00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020 #ifndef _RTAI_ASM_PPC_SYSTEM_H
00021 #define _RTAI_ASM_PPC_SYSTEM_H
00022
00023 #include <nucleus/asm-generic/system.h>
00024
00025 #ifdef __KERNEL__
00026
00027 #include <linux/config.h>
00028 #include <linux/ptrace.h>
00029
00030 #ifdef CONFIG_ADEOS_CORE
00031 #if ADEOS_RELEASE_NUMBER < 0x02060703
00032 #error "Adeos 2.6r7c3/ppc or above is required to run this software; please upgrade."
00033 #error "See http://download.gna.org/adeos/patches/v2.6/ppc/"
00034 #endif
00035 #endif
00036
00037 #define XNARCH_DEFAULT_TICK 1000000
00038 #define XNARCH_HOST_TICK (1000000000UL/HZ)
00039
00040 #define XNARCH_THREAD_STACKSZ 4096
00041
00042 #define xnarch_stack_size(tcb) ((tcb)->stacksize)
00043 #define xnarch_user_task(tcb) ((tcb)->user_task)
00044 #define xnarch_user_pid(tcb) ((tcb)->user_task->pid)
00045
00046 #define xnarch_alloc_stack xnmalloc
00047 #define xnarch_free_stack xnfree
00048
00049 struct xnthread;
00050 struct task_struct;
00051
00052 typedef struct xnarchtcb {
00053
00054
00055
00056 #ifdef CONFIG_RTAI_HW_FPU
00057
00058
00059 rthal_fpenv_t fpuenv __attribute__ ((aligned (16)));
00060 rthal_fpenv_t *fpup;
00061 struct task_struct *user_fpu_owner;
00062
00063
00064
00065
00066
00067 #define xnarch_fpu_ptr(tcb) ((tcb)->fpup)
00068 #else
00069 #define xnarch_fpu_ptr(tcb) NULL
00070 #endif
00071
00072 unsigned stacksize;
00073 unsigned long *stackbase;
00074 unsigned long ksp;
00075 unsigned long *kspp;
00076
00077
00078 struct task_struct *user_task;
00079 struct task_struct *active_task;
00080
00081
00082 struct xnthread *self;
00083 int imask;
00084 const char *name;
00085 void (*entry)(void *cookie);
00086 void *cookie;
00087
00088 } xnarchtcb_t;
00089
00090 typedef struct xnarch_fltinfo {
00091
00092 unsigned exception;
00093 struct pt_regs *regs;
00094
00095 } xnarch_fltinfo_t;
00096
00097 #define xnarch_fault_trap(fi) ((unsigned int)(fi)->regs->trap)
00098 #define xnarch_fault_code(fi) ((fi)->regs->dar)
00099 #define xnarch_fault_pc(fi) ((fi)->regs->nip)
00100 #define xnarch_fault_pc(fi) ((fi)->regs->nip)
00101
00102 #define xnarch_fault_fpu_p(fi) (0)
00103
00104
00105 #ifdef CONFIG_ADEOS_CORE
00106 #define xnarch_fault_pf_p(fi) ((fi)->exception == ADEOS_ACCESS_TRAP)
00107 #define xnarch_fault_bp_p(fi) ((current->ptrace & PT_PTRACED) && \
00108 ((fi)->exception == ADEOS_IABR_TRAP || \
00109 (fi)->exception == ADEOS_SSTEP_TRAP || \
00110 (fi)->exception == ADEOS_DEBUG_TRAP))
00111 #else
00112 #define xnarch_fault_pf_p(fi) ((fi)->exception == IPIPE_TRAP_ACCESS)
00113 #define xnarch_fault_bp_p(fi) ((current->ptrace & PT_PTRACED) && \
00114 ((fi)->exception == IPIPE_TRAP_IABR || \
00115 (fi)->exception == IPIPE_TRAP_SSTEP || \
00116 (fi)->exception == IPIPE_TRAP_DEBUG))
00117 #endif
00118
00119 #define xnarch_fault_notify(fi) (!xnarch_fault_bp_p(fi))
00120
00121 #ifdef __cplusplus
00122 extern "C" {
00123 #endif
00124
00125 static inline void *xnarch_sysalloc (u_long bytes)
00126
00127 {
00128 #if 0
00129 if (bytes >= 128*1024)
00130 return vmalloc(bytes);
00131 #endif
00132
00133 return kmalloc(bytes,GFP_KERNEL);
00134 }
00135
00136 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00137
00138 {
00139 #if 0
00140 if (bytes >= 128*1024)
00141 vfree(chunk);
00142 else
00143 #endif
00144 kfree(chunk);
00145 }
00146
00147 static inline void xnarch_relay_tick (void)
00148
00149 {
00150 rthal_irq_host_pend(RTHAL_TIMER_IRQ);
00151 }
00152
00153 #ifdef XENO_POD_MODULE
00154
00155 void xnpod_welcome_thread(struct xnthread *);
00156
00157 void xnpod_delete_thread(struct xnthread *);
00158
00159 static inline int xnarch_start_timer (unsigned long ns,
00160 void (*tickhandler)(void))
00161 {
00162 return rthal_timer_request(tickhandler,ns);
00163 }
00164
00165 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00166
00167 {
00168 rthal_declare_cpuid;
00169
00170 rthal_load_cpuid();
00171
00172
00173
00174 __set_bit(cpuid,&rthal_cpu_realtime);
00175
00176 rootcb->user_task = rootcb->active_task = rthal_current_host_task(cpuid);
00177 #ifdef CONFIG_RTAI_HW_FPU
00178 rootcb->user_fpu_owner = rthal_get_fpu_owner(rootcb->user_task);
00179
00180 rootcb->fpup = (rootcb->user_fpu_owner
00181 ? (rthal_fpenv_t *)&rootcb->user_fpu_owner->thread.fpr[0]
00182 : NULL);
00183 #endif
00184 }
00185
00186 static inline void xnarch_enter_root (xnarchtcb_t *rootcb) {
00187 __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00188 }
00189
00190 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00191 xnarchtcb_t *in_tcb)
00192 {
00193 struct task_struct *prev = out_tcb->active_task;
00194 struct task_struct *next = in_tcb->user_task;
00195
00196 in_tcb->active_task = next ?: prev;
00197
00198 if (next && next != prev)
00199 {
00200 struct mm_struct *mm = next->active_mm;
00201
00202
00203
00204 #ifdef CONFIG_ALTIVEC
00205
00206
00207 if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) {
00208 asm volatile (
00209 "dssall;\n"
00210 #ifndef CONFIG_POWER4
00211 "sync;\n"
00212 #endif
00213 : : );
00214 }
00215 #endif
00216
00217 next->thread.pgdir = mm->pgd;
00218 get_mmu_context(mm);
00219 set_context(mm->context,mm->pgd);
00220
00221
00222
00223 current = prev;
00224 _switch(&prev->thread, &next->thread);
00225
00226 barrier();
00227 }
00228 else
00229
00230 rthal_switch_context(out_tcb->kspp,in_tcb->kspp);
00231 }
00232
00233 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00234 xnarchtcb_t *next_tcb)
00235 {
00236 xnarch_switch_to(dead_tcb,next_tcb);
00237 }
00238
00239 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00240
00241 {
00242
00243 }
00244
00245 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00246 struct xnthread *thread,
00247 const char *name)
00248 {
00249 tcb->user_task = current;
00250 tcb->active_task = NULL;
00251 tcb->ksp = 0;
00252 tcb->kspp = &tcb->ksp;
00253 #ifdef CONFIG_RTAI_HW_FPU
00254 tcb->user_fpu_owner = NULL;
00255 tcb->fpup = NULL;
00256 #endif
00257 tcb->entry = NULL;
00258 tcb->cookie = NULL;
00259 tcb->self = thread;
00260 tcb->imask = 0;
00261 tcb->name = name;
00262 }
00263
00264 asmlinkage static void xnarch_thread_trampoline (xnarchtcb_t *tcb)
00265
00266 {
00267 rthal_local_irq_restore(!!tcb->imask);
00268 xnpod_welcome_thread(tcb->self);
00269 tcb->entry(tcb->cookie);
00270 xnpod_delete_thread(tcb->self);
00271 }
00272
00273 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00274 void (*entry)(void *),
00275 void *cookie,
00276 int imask,
00277 struct xnthread *thread,
00278 char *name)
00279 {
00280 unsigned long *ksp, flags;
00281
00282 rthal_local_irq_flags_hw(flags);
00283
00284 *tcb->stackbase = 0;
00285 ksp = (unsigned long *)((((unsigned long)tcb->stackbase + tcb->stacksize - 0x10) & ~0xf)
00286 - RTHAL_SWITCH_FRAME_SIZE);
00287 tcb->ksp = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
00288 ksp[19] = (unsigned long)tcb;
00289 ksp[25] = (unsigned long)&xnarch_thread_trampoline;
00290 ksp[26] = flags & ~(MSR_EE | MSR_FP);
00291
00292 tcb->entry = entry;
00293 tcb->cookie = cookie;
00294 tcb->self = thread;
00295 tcb->imask = imask;
00296 tcb->name = name;
00297 }
00298
00299
00300 #define xnarch_fpu_init_p(task) (1)
00301
00302 static inline void xnarch_enable_fpu (xnarchtcb_t *current_tcb)
00303
00304 {
00305 #ifdef CONFIG_RTAI_HW_FPU
00306 if(!current_tcb->user_task)
00307 rthal_enable_fpu();
00308 #endif
00309 }
00310
00311 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00312
00313 {
00314 #ifdef CONFIG_RTAI_HW_FPU
00315
00316
00317 memset(&tcb->fpuenv,0,sizeof(tcb->fpuenv));
00318 rthal_init_fpu(&tcb->fpuenv);
00319 #endif
00320 }
00321
00322 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00323
00324 {
00325 #ifdef CONFIG_RTAI_HW_FPU
00326
00327 if(tcb->fpup)
00328 {
00329 rthal_save_fpu(tcb->fpup);
00330
00331 if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
00332 tcb->user_fpu_owner->thread.regs->msr &= ~MSR_FP;
00333 }
00334
00335 #endif
00336 }
00337
00338 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00339
00340 {
00341 #ifdef CONFIG_RTAI_HW_FPU
00342
00343 if(tcb->fpup)
00344 {
00345 rthal_restore_fpu(tcb->fpup);
00346
00347 if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
00348 tcb->user_fpu_owner->thread.regs->msr |= MSR_FP;
00349 }
00350
00351
00352
00353 if(tcb->user_task)
00354 rthal_disable_fpu();
00355
00356 #endif
00357 }
00358
00359 #endif
00360
00361 #ifdef XENO_THREAD_MODULE
00362
00363 static inline void xnarch_init_tcb (xnarchtcb_t *tcb) {
00364
00365 tcb->user_task = NULL;
00366 tcb->active_task = NULL;
00367 tcb->kspp = &tcb->ksp;
00368 #ifdef CONFIG_RTAI_HW_FPU
00369 tcb->user_fpu_owner = NULL;
00370 tcb->fpup = &tcb->fpuenv;
00371 #endif
00372
00373 }
00374
00375 #endif
00376
00377 #ifdef XENO_SHADOW_MODULE
00378
00379 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00380 struct xnthread *thread,
00381 const char *name)
00382 {
00383 struct task_struct *task = current;
00384
00385 tcb->user_task = task;
00386 tcb->active_task = NULL;
00387 tcb->ksp = 0;
00388 tcb->kspp = &task->thread.ksp;
00389 #ifdef CONFIG_RTAI_HW_FPU
00390 tcb->user_fpu_owner = task;
00391 tcb->fpup = (rthal_fpenv_t *)&task->thread.fpr[0];
00392 #endif
00393 tcb->entry = NULL;
00394 tcb->cookie = NULL;
00395 tcb->self = thread;
00396 tcb->imask = 0;
00397 tcb->name = name;
00398 }
00399
00400 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00401
00402 {
00403 unsigned irq;
00404
00405 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00406 rthal_virtualize_irq(rthal_current_domain,
00407 irq,
00408 handler,
00409 NULL,
00410 IPIPE_DYNAMIC_MASK);
00411
00412
00413
00414
00415
00416 rthal_virtualize_irq(rthal_current_domain,
00417 RTHAL_TIMER_IRQ,
00418 handler,
00419 NULL,
00420 IPIPE_DYNAMIC_MASK);
00421 }
00422
00423 static inline void xnarch_lock_xirqs (rthal_pipeline_stage_t *ipd, int cpuid)
00424
00425 {
00426 unsigned irq;
00427
00428 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00429 {
00430 switch (irq)
00431 {
00432 #ifdef CONFIG_SMP
00433 case RTHAL_CRITICAL_IPI:
00434
00435
00436 continue;
00437 #endif
00438
00439 default:
00440
00441 rthal_lock_irq(ipd,cpuid,irq);
00442 }
00443 }
00444
00445 rthal_lock_irq(ipd,cpuid,RTHAL_TIMER_IRQ);
00446 }
00447
00448 static inline void xnarch_unlock_xirqs (rthal_pipeline_stage_t *ipd, int cpuid)
00449
00450 {
00451 unsigned irq;
00452
00453 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00454 {
00455 switch (irq)
00456 {
00457 #ifdef CONFIG_SMP
00458 case RTHAL_CRITICAL_IPI:
00459
00460 continue;
00461 #endif
00462
00463 default:
00464
00465 rthal_unlock_irq(ipd,irq);
00466 }
00467 }
00468
00469 rthal_unlock_irq(ipd,RTHAL_TIMER_IRQ);
00470 }
00471
00472 #endif
00473
00474 #ifdef XENO_TIMER_MODULE
00475
00476 static inline void xnarch_program_timer_shot (unsigned long delay) {
00477
00478
00479
00480
00481
00482
00483 rthal_timer_program_shot(delay);
00484 }
00485
00486 static inline void xnarch_stop_timer (void) {
00487 rthal_timer_release();
00488 }
00489
00490 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00491
00492 {
00493 #ifdef CONFIG_SMP
00494 return -1;
00495 #else
00496 return 0;
00497 #endif
00498 }
00499
00500 #endif
00501
00502 #ifdef XENO_MAIN_MODULE
00503
00504 #include <linux/init.h>
00505 #include <nucleus/asm/calibration.h>
00506
00507 extern u_long nkschedlat;
00508
00509 extern u_long nktimerlat;
00510
00511 int xnarch_escalation_virq;
00512
00513 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00514
00515 void xnpod_schedule_handler(void);
00516
00517 static rthal_trap_handler_t xnarch_old_trap_handler;
00518
00519 static int xnarch_trap_fault (unsigned event, unsigned domid, void *data)
00520 {
00521 xnarch_fltinfo_t fltinfo;
00522 fltinfo.exception = event;
00523 fltinfo.regs = (struct pt_regs *)data;
00524 return xnpod_trap_fault(&fltinfo);
00525 }
00526
00527 unsigned long xnarch_calibrate_timer (void)
00528
00529 {
00530 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00531 return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY) ?: 1;
00532 #else
00533
00534
00535 return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00536 #endif
00537 }
00538
00539 int xnarch_calibrate_sched (void)
00540
00541 {
00542 nktimerlat = xnarch_calibrate_timer();
00543
00544 if (!nktimerlat)
00545 return -ENODEV;
00546
00547 nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00548
00549 return 0;
00550 }
00551
00552 static inline int xnarch_init (void)
00553
00554 {
00555 int err;
00556
00557 #ifdef CONFIG_SMP
00558
00559
00560 set_cpus_allowed(current,cpumask_of_cpu(0));
00561 #endif
00562
00563 err = xnarch_calibrate_sched();
00564
00565 if (err)
00566 return err;
00567
00568 xnarch_escalation_virq = rthal_alloc_virq();
00569
00570 if (xnarch_escalation_virq == 0)
00571 return -ENOSYS;
00572
00573 rthal_virtualize_irq(&rthal_domain,
00574 xnarch_escalation_virq,
00575 (void (*)(unsigned))&xnpod_schedule_handler,
00576 NULL,
00577 IPIPE_HANDLE_MASK);
00578
00579 xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00580
00581 #ifdef CONFIG_RTAI_OPT_FUSION
00582 err = xnshadow_mount();
00583 #endif
00584
00585 if (err)
00586 {
00587 rthal_trap_catch(xnarch_old_trap_handler);
00588 rthal_free_virq(xnarch_escalation_virq);
00589 }
00590
00591 return err;
00592 }
00593
00594 static inline void xnarch_exit (void)
00595
00596 {
00597 #ifdef CONFIG_RTAI_OPT_FUSION
00598 xnshadow_cleanup();
00599 #endif
00600 rthal_trap_catch(xnarch_old_trap_handler);
00601 rthal_free_virq(xnarch_escalation_virq);
00602 }
00603
00604 #endif
00605
00606 #ifdef __cplusplus
00607 }
00608 #endif
00609
00610 #else
00611
00612 #include <nucleus/system.h>
00613 #include <bits/local_lim.h>
00614
00615 #endif
00616
00617 #endif