00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024 #ifndef _RTAI_ASM_PPC64_SYSTEM_H
00025 #define _RTAI_ASM_PPC64_SYSTEM_H
00026
00027 #include <nucleus/asm-generic/system.h>
00028
00029 #ifdef __KERNEL__
00030
00031 #include <linux/config.h>
00032 #include <linux/ptrace.h>
00033
00034 #if ADEOS_RELEASE_NUMBER < 0x02060201
00035 #error "Adeos 2.6r2c1/ppc64 or above is required to run this software; please upgrade."
00036 #error "See http://download.gna.org/adeos/patches/v2.6/ppc64/"
00037 #endif
00038
00039 #define XNARCH_DEFAULT_TICK 1000000
00040 #define XNARCH_HOST_TICK (1000000000UL/HZ)
00041
00042 #define XNARCH_THREAD_STACKSZ 16384
00043
00044 #define xnarch_stack_size(tcb) ((tcb)->stacksize)
00045 #define xnarch_user_task(tcb) ((tcb)->user_task)
00046 #define xnarch_user_pid(tcb) ((tcb)->user_task->pid)
00047
00048 #define xnarch_alloc_stack xnmalloc
00049 #define xnarch_free_stack xnfree
00050
00051 struct xnthread;
00052 struct task_struct;
00053
00054 typedef struct xnarchtcb {
00055
00056
00057
00058 #ifdef CONFIG_RTAI_HW_FPU
00059
00060
00061 rthal_fpenv_t fpuenv __attribute__ ((aligned (16)));
00062 rthal_fpenv_t *fpup;
00063 struct task_struct *user_fpu_owner;
00064
00065
00066
00067
00068
00069 #define xnarch_fpu_ptr(tcb) ((tcb)->fpup)
00070 #else
00071 #define xnarch_fpu_ptr(tcb) NULL
00072 #endif
00073
00074 unsigned stacksize;
00075 unsigned long *stackbase;
00076 unsigned long ksp;
00077 unsigned long *kspp;
00078
00079
00080 struct task_struct *user_task;
00081 struct task_struct *active_task;
00082
00083
00084 struct xnthread *self;
00085 int imask;
00086 const char *name;
00087 void (*entry)(void *cookie);
00088 void *cookie;
00089
00090 } xnarchtcb_t;
00091
00092 typedef struct xnarch_fltinfo {
00093
00094 unsigned exception;
00095 struct pt_regs *regs;
00096
00097 } xnarch_fltinfo_t;
00098
00099 #define xnarch_fault_trap(fi) ((unsigned int)(fi)->regs->trap)
00100 #define xnarch_fault_code(fi) ((fi)->regs->dar)
00101 #define xnarch_fault_pc(fi) ((fi)->regs->nip)
00102 #define xnarch_fault_pc(fi) ((fi)->regs->nip)
00103
00104 #define xnarch_fault_fpu_p(fi) (0)
00105
00106
00107 #define xnarch_fault_pf_p(fi) ((fi)->exception == ADEOS_ACCESS_TRAP)
00108 #define xnarch_fault_notify(fi) (!(current->ptrace & PT_PTRACED) || \
00109 ((fi)->exception != ADEOS_IABR_TRAP && \
00110 (fi)->exception != ADEOS_SSTEP_TRAP && \
00111 (fi)->exception != ADEOS_PERFMON_TRAP))
00112 #ifdef __cplusplus
00113 extern "C" {
00114 #endif
00115
00116 static inline void *xnarch_sysalloc (u_long bytes)
00117
00118 {
00119 #if 0
00120 if (bytes >= 128*1024)
00121 return vmalloc(bytes);
00122 #endif
00123
00124 return kmalloc(bytes,GFP_KERNEL);
00125 }
00126
00127 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00128
00129 {
00130 #if 0
00131 if (bytes >= 128*1024)
00132 vfree(chunk);
00133 else
00134 #endif
00135 kfree(chunk);
00136 }
00137
00138 static inline void xnarch_relay_tick (void)
00139
00140 {
00141 rthal_irq_host_pend(ADEOS_TIMER_VIRQ);
00142 }
00143
00144 #ifdef XENO_POD_MODULE
00145
00146 void xnpod_welcome_thread(struct xnthread *);
00147
00148 void xnpod_delete_thread(struct xnthread *);
00149
00150 static inline int xnarch_start_timer (unsigned long ns,
00151 void (*tickhandler)(void))
00152 {
00153 return rthal_timer_request(tickhandler,ns);
00154 }
00155
00156 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00157
00158 {
00159 rthal_declare_cpuid;
00160
00161 rthal_load_cpuid();
00162
00163
00164
00165 __set_bit(cpuid,&rthal_cpu_realtime);
00166
00167 rootcb->user_task = rootcb->active_task = rthal_current_host_task(cpuid);
00168 #ifdef CONFIG_RTAI_HW_FPU
00169 rootcb->user_fpu_owner = rthal_get_fpu_owner(rootcb->user_task);
00170
00171 rootcb->fpup = (rootcb->user_fpu_owner
00172 ? (rthal_fpenv_t *)&rootcb->user_fpu_owner->thread.fpr[0]
00173 : NULL);
00174 #endif
00175 }
00176
00177 static inline void xnarch_enter_root (xnarchtcb_t *rootcb) {
00178 __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00179 }
00180
00181 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00182 xnarchtcb_t *in_tcb)
00183 {
00184 struct task_struct *prev = out_tcb->active_task;
00185 struct task_struct *next = in_tcb->user_task;
00186
00187 in_tcb->active_task = next ?: prev;
00188
00189 if (next && next != prev)
00190 {
00191 struct mm_struct *mm = next->active_mm;
00192
00193
00194
00195 #ifdef CONFIG_ALTIVEC
00196
00197
00198 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC) {
00199 asm volatile (
00200 "dssall;\n"
00201 : : );
00202 }
00203 #endif
00204
00205 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
00206 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
00207 }
00208
00209 if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
00210 switch_slb(next, mm);
00211 }
00212 else {
00213 switch_stab(next, mm);
00214 }
00215
00216 flush_tlb_pending();
00217
00218 _switch(&prev->thread, &next->thread);
00219
00220 barrier();
00221 }
00222 else
00223
00224 rthal_switch_context(out_tcb->kspp,in_tcb->kspp);
00225 }
00226
00227 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00228 xnarchtcb_t *next_tcb)
00229 {
00230 xnarch_switch_to(dead_tcb,next_tcb);
00231 }
00232
00233 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00234
00235 {
00236
00237 }
00238
00239 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00240 struct xnthread *thread,
00241 const char *name)
00242 {
00243 tcb->user_task = current;
00244 tcb->active_task = NULL;
00245 tcb->ksp = 0;
00246 tcb->kspp = &tcb->ksp;
00247 #ifdef CONFIG_RTAI_HW_FPU
00248 tcb->user_fpu_owner = NULL;
00249 tcb->fpup = NULL;
00250 #endif
00251 tcb->entry = NULL;
00252 tcb->cookie = NULL;
00253 tcb->self = thread;
00254 tcb->imask = 0;
00255 tcb->name = name;
00256 }
00257
00258 asmlinkage static void xnarch_thread_trampoline (xnarchtcb_t *tcb)
00259
00260 {
00261 rthal_local_irq_restore(!!tcb->imask);
00262 xnpod_welcome_thread(tcb->self);
00263 tcb->entry(tcb->cookie);
00264 xnpod_delete_thread(tcb->self);
00265 }
00266
00267 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00268 void (*entry)(void *),
00269 void *cookie,
00270 int imask,
00271 struct xnthread *thread,
00272 char *name)
00273 {
00274 unsigned long *ksp, flags;
00275
00276 rthal_local_irq_flags_hw(flags);
00277
00278 if (tcb->stackbase) {
00279 *tcb->stackbase = 0;
00280
00281 ksp = (unsigned long *)(((unsigned long)tcb->stackbase + tcb->stacksize - 16) & ~0xf);
00282 *ksp = 0L;
00283 ksp = ksp - STACK_FRAME_OVERHEAD;
00284 *ksp = (unsigned long)ksp+STACK_FRAME_OVERHEAD;
00285 ksp = ksp - RTHAL_SWITCH_FRAME_SIZE;
00286 tcb->ksp = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
00287 *((unsigned long *)tcb->ksp) = (unsigned long)ksp + 224;
00288
00289 ksp[18] = (unsigned long)get_paca();
00290 ksp[19] = (unsigned long)tcb;
00291 ksp[20] = ((unsigned long *)&xnarch_thread_trampoline)[1];
00292 ksp[25] = ((unsigned long *)&xnarch_thread_trampoline)[0];
00293 ksp[26] = flags & ~(MSR_EE | MSR_FP);
00294 }
00295 else {
00296 printk("xnarch_init_thread: NULL stackbase!\n");
00297 }
00298
00299 tcb->entry = entry;
00300 tcb->cookie = cookie;
00301 tcb->self = thread;
00302 tcb->imask = imask;
00303 tcb->name = name;
00304 }
00305
00306
00307 #define xnarch_fpu_init_p(task) (1)
00308
00309 static inline void xnarch_enable_fpu (xnarchtcb_t *current_tcb)
00310
00311 {
00312 #ifdef CONFIG_RTAI_HW_FPU
00313 if(!current_tcb->user_task)
00314 rthal_enable_fpu();
00315 #endif
00316 }
00317
00318 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00319
00320 {
00321 #ifdef CONFIG_RTAI_HW_FPU
00322
00323
00324 memset(&tcb->fpuenv,0,sizeof(tcb->fpuenv));
00325 rthal_init_fpu(&tcb->fpuenv);
00326 #endif
00327 }
00328
00329 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00330
00331 {
00332 #ifdef CONFIG_RTAI_HW_FPU
00333
00334 if(tcb->fpup)
00335 {
00336 rthal_save_fpu(tcb->fpup);
00337
00338 if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
00339 tcb->user_fpu_owner->thread.regs->msr &= ~MSR_FP;
00340 }
00341
00342 #endif
00343 }
00344
00345 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00346
00347 {
00348 #ifdef CONFIG_RTAI_HW_FPU
00349
00350 if(tcb->fpup)
00351 {
00352 rthal_restore_fpu(tcb->fpup);
00353
00354 if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
00355 tcb->user_fpu_owner->thread.regs->msr |= MSR_FP;
00356 }
00357
00358
00359
00360 if(tcb->user_task)
00361 rthal_disable_fpu();
00362
00363 #endif
00364 }
00365
00366 #endif
00367
00368 #ifdef XENO_THREAD_MODULE
00369
00370 static inline void xnarch_init_tcb (xnarchtcb_t *tcb) {
00371
00372 tcb->user_task = NULL;
00373 tcb->active_task = NULL;
00374 tcb->kspp = &tcb->ksp;
00375 #ifdef CONFIG_RTAI_HW_FPU
00376 tcb->user_fpu_owner = NULL;
00377 tcb->fpup = &tcb->fpuenv;
00378 #endif
00379
00380 }
00381
00382 #endif
00383
00384 #ifdef XENO_SHADOW_MODULE
00385
00386 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00387 struct xnthread *thread,
00388 const char *name)
00389 {
00390 struct task_struct *task = current;
00391
00392 tcb->user_task = task;
00393 tcb->active_task = NULL;
00394 tcb->ksp = 0;
00395 tcb->kspp = &task->thread.ksp;
00396 #ifdef CONFIG_RTAI_HW_FPU
00397 tcb->user_fpu_owner = task;
00398 tcb->fpup = (rthal_fpenv_t *)&task->thread.fpr[0];
00399 #endif
00400 tcb->entry = NULL;
00401 tcb->cookie = NULL;
00402 tcb->self = thread;
00403 tcb->imask = 0;
00404 tcb->name = name;
00405 }
00406
00407 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00408
00409 {
00410 unsigned irq;
00411
00412 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00413 rthal_virtualize_irq(rthal_current_domain,
00414 irq,
00415 handler,
00416 NULL,
00417 IPIPE_DYNAMIC_MASK);
00418
00419
00420
00421
00422
00423 rthal_virtualize_irq(rthal_current_domain,
00424 ADEOS_TIMER_VIRQ,
00425 handler,
00426 NULL,
00427 IPIPE_DYNAMIC_MASK);
00428 }
00429
00430 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00431
00432 {
00433 unsigned irq;
00434
00435 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00436 {
00437 switch (irq)
00438 {
00439 #ifdef CONFIG_SMP
00440 case ADEOS_CRITICAL_IPI:
00441
00442
00443 continue;
00444 #endif
00445
00446 default:
00447
00448 rthal_lock_irq(adp,cpuid,irq);
00449 }
00450 }
00451
00452 rthal_lock_irq(adp,cpuid,ADEOS_TIMER_VIRQ);
00453 }
00454
00455 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00456
00457 {
00458 unsigned irq;
00459
00460 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00461 {
00462 switch (irq)
00463 {
00464 #ifdef CONFIG_SMP
00465 case ADEOS_CRITICAL_IPI:
00466
00467 continue;
00468 #endif
00469
00470 default:
00471
00472 rthal_unlock_irq(adp,irq);
00473 }
00474 }
00475
00476 rthal_unlock_irq(adp,ADEOS_TIMER_VIRQ);
00477 }
00478
00479 #endif
00480
00481 #ifdef XENO_TIMER_MODULE
00482
00483 static inline void xnarch_program_timer_shot (unsigned long delay) {
00484
00485
00486
00487
00488
00489
00490 rthal_timer_program_shot(delay);
00491 }
00492
00493 static inline void xnarch_stop_timer (void) {
00494 rthal_timer_release();
00495 }
00496
00497 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00498
00499 {
00500 #ifdef CONFIG_SMP
00501 return -1;
00502 #else
00503 return 0;
00504 #endif
00505 }
00506
00507 #endif
00508
00509 #ifdef XENO_MAIN_MODULE
00510
00511 #include <linux/init.h>
00512 #include <nucleus/asm/calibration.h>
00513
00514 extern u_long nkschedlat;
00515
00516 extern u_long nktimerlat;
00517
00518 int xnarch_escalation_virq;
00519
00520 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00521
00522 void xnpod_schedule_handler(void);
00523
00524 static rthal_trap_handler_t xnarch_old_trap_handler;
00525
00526 static int xnarch_trap_fault (unsigned event, unsigned domid, void *data)
00527 {
00528 xnarch_fltinfo_t fltinfo;
00529 fltinfo.exception = event;
00530 fltinfo.regs = (struct pt_regs *)data;
00531 return xnpod_trap_fault(&fltinfo);
00532 }
00533
00534 unsigned long xnarch_calibrate_timer (void)
00535
00536 {
00537 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00538 return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY) ?: 1;
00539 #else
00540
00541
00542 return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00543 #endif
00544 }
00545
00546 int xnarch_calibrate_sched (void)
00547
00548 {
00549 nktimerlat = xnarch_calibrate_timer();
00550
00551 if (!nktimerlat)
00552 return -ENODEV;
00553
00554 nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00555
00556 return 0;
00557 }
00558
00559 static inline int xnarch_init (void)
00560
00561 {
00562 int err;
00563
00564 #ifdef CONFIG_SMP
00565
00566
00567 set_cpus_allowed(current,cpumask_of_cpu(0));
00568 #endif
00569
00570 err = xnarch_calibrate_sched();
00571
00572 if (err)
00573 return err;
00574
00575 xnarch_escalation_virq = rthal_alloc_virq();
00576
00577 if (xnarch_escalation_virq == 0)
00578 return -ENOSYS;
00579
00580 rthal_virtualize_irq(&rthal_domain,
00581 xnarch_escalation_virq,
00582 (void (*)(unsigned))&xnpod_schedule_handler,
00583 NULL,
00584 IPIPE_HANDLE_MASK);
00585
00586 xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00587
00588 #ifdef CONFIG_RTAI_OPT_FUSION
00589 err = xnshadow_mount();
00590 #endif
00591
00592 if (err)
00593 {
00594 rthal_trap_catch(xnarch_old_trap_handler);
00595 rthal_free_virq(xnarch_escalation_virq);
00596 }
00597
00598 return err;
00599 }
00600
00601 static inline void xnarch_exit (void)
00602
00603 {
00604 #ifdef CONFIG_RTAI_OPT_FUSION
00605 xnshadow_cleanup();
00606 #endif
00607 rthal_trap_catch(xnarch_old_trap_handler);
00608 rthal_free_virq(xnarch_escalation_virq);
00609 }
00610
00611 #endif
00612
00613 #ifdef __cplusplus
00614 }
00615 #endif
00616
00617 #else
00618
00619 #include <nucleus/system.h>
00620 #include <bits/local_lim.h>
00621
00622 #endif
00623
00624 #endif