00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045 #ifndef _RTAI_ASM_PPC_SYSTEM_H
00046 #define _RTAI_ASM_PPC_SYSTEM_H
00047
00048 #ifdef __KERNEL__
00049
00050 #include <linux/kernel.h>
00051 #include <linux/version.h>
00052 #include <linux/module.h>
00053 #include <linux/slab.h>
00054 #include <linux/errno.h>
00055 #include <linux/adeos.h>
00056 #include <linux/vmalloc.h>
00057 #include <asm/uaccess.h>
00058 #include <asm/param.h>
00059 #include <asm/mmu_context.h>
00060 #include <rtai_config.h>
00061 #include <nucleus/asm/hal.h>
00062 #include <nucleus/asm/atomic.h>
00063 #include <nucleus/shadow.h>
00064
00065 #if ADEOS_RELEASE_NUMBER < 0x02060609
00066 #error "Adeos 2.6r6c9/ppc or above is required to run this software; please upgrade."
00067 #error "See http://download.gna.org/adeos/patches/v2.6/ppc/"
00068 #endif
00069
00070 #define MODULE_PARM_VALUE(parm) (parm)
00071
00072 typedef unsigned long spl_t;
00073
00074 #define splhigh(x) rthal_local_irq_save(x)
00075 #ifdef CONFIG_SMP
00076 #define splexit(x) rthal_local_irq_restore((x) & 1)
00077 #else
00078 #define splexit(x) rthal_local_irq_restore(x)
00079 #endif
00080 #define splnone() rthal_sti()
00081 #define spltest() rthal_local_irq_test()
00082 #define splget(x) rthal_local_irq_flags(x)
00083 #define splsync(x) rthal_local_irq_sync(x)
00084
00085 typedef unsigned long xnlock_t;
00086
00087 #define XNARCH_LOCK_UNLOCKED 0
00088
00089 #ifdef CONFIG_SMP
00090
00091 #define xnlock_get_irqsave(lock,x) ((x) = __xnlock_get_irqsave(lock))
00092 #define xnlock_clear_irqoff(lock) xnlock_put_irqrestore(lock,1)
00093 #define xnlock_clear_irqon(lock) xnlock_put_irqrestore(lock,0)
00094
00095 static inline void xnlock_init (xnlock_t *lock) {
00096
00097 *lock = XNARCH_LOCK_UNLOCKED;
00098 }
00099
00100 static inline spl_t __xnlock_get_irqsave (xnlock_t *lock)
00101
00102 {
00103 adeos_declare_cpuid;
00104 spl_t flags;
00105
00106 rthal_local_irq_save(flags);
00107
00108 adeos_load_cpuid();
00109
00110 if (!test_and_set_bit(cpuid,lock))
00111 {
00112 while (test_and_set_bit(BITS_PER_LONG - 1,lock))
00113 rthal_cpu_relax(cpuid);
00114 }
00115 else
00116 flags |= 2;
00117
00118 return flags;
00119 }
00120
00121 static inline void xnlock_put_irqrestore (xnlock_t *lock, spl_t flags)
00122
00123 {
00124 if (!(flags & 2))
00125 {
00126 adeos_declare_cpuid;
00127
00128 rthal_cli();
00129
00130 adeos_load_cpuid();
00131
00132 if (test_bit(cpuid,lock))
00133 {
00134 clear_bit(cpuid,lock);
00135 clear_bit(BITS_PER_LONG - 1,lock);
00136 rthal_cpu_relax(cpuid);
00137 }
00138 }
00139
00140 rthal_local_irq_restore(flags & 1);
00141 }
00142
00143 #define XNARCH_PASSTHROUGH_IRQS
00144
00145 #else
00146
00147 #define xnlock_init(lock) do { } while(0)
00148 #define xnlock_get_irqsave(lock,x) rthal_local_irq_save(x)
00149 #define xnlock_put_irqrestore(lock,x) rthal_local_irq_restore(x)
00150 #define xnlock_clear_irqoff(lock) rthal_cli()
00151 #define xnlock_clear_irqon(lock) rthal_sti()
00152
00153 #endif
00154
00155 #define XNARCH_NR_CPUS RTHAL_NR_CPUS
00156
00157 #define XNARCH_DEFAULT_TICK 1000000
00158 #define XNARCH_IRQ_MAX IPIPE_NR_XIRQS
00159 #define XNARCH_HOST_TICK (1000000000UL/HZ)
00160
00161
00162
00163
00164
00165 #define xnarch_adjust_calibration(x) ((x) * 2 / 3)
00166
00167 #define XNARCH_THREAD_STACKSZ 4096
00168 #define XNARCH_ROOT_STACKSZ 0
00169
00170 #define XNARCH_PROMPT "RTAI[nucleus]: "
00171 #define xnarch_loginfo(fmt,args...) printk(KERN_INFO XNARCH_PROMPT fmt, ##args)
00172 #define xnarch_logwarn(fmt,args...) printk(KERN_WARNING XNARCH_PROMPT fmt, ##args)
00173 #define xnarch_logerr(fmt,args...) printk(KERN_ERR XNARCH_PROMPT fmt, ##args)
00174 #define xnarch_printf(fmt,args...) printk(KERN_INFO XNARCH_PROMPT fmt, ##args)
00175
00176 #define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
00177 #define xnarch_uldiv(ull, d) xnarch_uldivrem(ull, d, NULL)
00178 #define xnarch_ulmod(ull, d) ({ u_long _rem; \
00179 xnarh_uldivrem(ull,uld,&_rem); _rem; })
00180
00181 #define xnarch_ullmul rthal_ullmul
00182 #define xnarch_uldivrem rthal_uldivrem
00183 #define xnarch_ulldiv rthal_ulldiv
00184 #define xnarch_imuldiv rthal_imuldiv
00185 #define xnarch_llimd rthal_llimd
00186 #define xnarch_get_cpu_tsc rthal_rdtsc
00187
00188 typedef cpumask_t xnarch_cpumask_t;
00189 #ifdef CONFIG_SMP
00190 #define xnarch_cpu_online_map cpu_online_map
00191 #else
00192 #define xnarch_cpu_online_map cpumask_of_cpu(0)
00193 #endif
00194 #define xnarch_num_online_cpus() num_online_cpus()
00195 #define xnarch_cpu_set(cpu, mask) cpu_set(cpu, mask)
00196 #define xnarch_cpu_clear(cpu, mask) cpu_clear(cpu, mask)
00197 #define xnarch_cpus_clear(mask) cpus_clear(mask)
00198 #define xnarch_cpu_isset(cpu, mask) cpu_isset(cpu, mask)
00199 #define xnarch_cpus_and(dst, src1, src2) cpus_and(dst, src1, src2)
00200 #define xnarch_cpus_equal(mask1, mask2) cpus_equal(mask1, mask2)
00201 #define xnarch_cpus_empty(mask) cpus_empty(mask)
00202 #define xnarch_cpumask_of_cpu(cpu) cpumask_of_cpu(cpu)
00203 #define xnarch_first_cpu(mask) first_cpu(mask)
00204 #define XNARCH_CPU_MASK_ALL CPU_MASK_ALL
00205
00206 struct xnthread;
00207 struct xnheap;
00208 struct task_struct;
00209
00210 #define xnarch_stack_size(tcb) ((tcb)->stacksize)
00211
00212 typedef struct xnarchtcb {
00213
00214
00215
00216 #ifdef CONFIG_RTAI_HW_FPU
00217
00218
00219 rthal_fpenv_t fpuenv __attribute__ ((aligned (16)));
00220 rthal_fpenv_t *fpup;
00221 #define xnarch_fpu_ptr(tcb) ((tcb)->fpup)
00222 #else
00223 #define xnarch_fpu_ptr(tcb) NULL
00224 #endif
00225
00226 unsigned stacksize;
00227 unsigned long *stackbase;
00228 unsigned long ksp;
00229 unsigned long *kspp;
00230
00231
00232 struct task_struct *user_task;
00233 struct task_struct *active_task;
00234
00235
00236 struct xnthread *self;
00237 int imask;
00238 const char *name;
00239 void (*entry)(void *cookie);
00240 void *cookie;
00241
00242 } xnarchtcb_t;
00243
00244 typedef struct xnarch_fltinfo {
00245
00246 struct pt_regs *regs;
00247
00248 } xnarch_fltinfo_t;
00249
00250 #define xnarch_fault_trap(fi) ((unsigned int)(fi)->regs->trap)
00251 #define xnarch_fault_code(fi) ((fi)->regs->dar)
00252 #define xnarch_fault_pc(fi) ((fi)->regs->nip)
00253
00254 typedef struct xnarch_heapcb {
00255
00256 atomic_t numaps;
00257
00258 int kmflags;
00259
00260 void *heapbase;
00261
00262 void *shmbase;
00263
00264 } xnarch_heapcb_t;
00265
00266 static inline void xnarch_init_heapcb (xnarch_heapcb_t *hcb)
00267
00268 {
00269 atomic_set(&hcb->numaps,0);
00270 hcb->kmflags = 0;
00271 hcb->heapbase = NULL;
00272 hcb->shmbase = NULL;
00273 }
00274
00275 #ifdef __cplusplus
00276 extern "C" {
00277 #endif
00278
00279 static inline unsigned long long xnarch_tsc_to_ns (unsigned long long ts) {
00280 return xnarch_llimd(ts,1000000000,RTHAL_CPU_FREQ);
00281 }
00282
00283 static inline unsigned long long xnarch_ns_to_tsc (unsigned long long ns) {
00284 return xnarch_llimd(ns,RTHAL_CPU_FREQ,1000000000);
00285 }
00286
00287 static inline unsigned long long xnarch_get_cpu_time (void) {
00288 return xnarch_tsc_to_ns(xnarch_get_cpu_tsc());
00289 }
00290
00291 static inline unsigned long long xnarch_get_cpu_freq (void) {
00292 return RTHAL_CPU_FREQ;
00293 }
00294
00295 static inline unsigned xnarch_current_cpu (void) {
00296 return adeos_processor_id();
00297 }
00298
00299 static inline void *xnarch_sysalloc (u_long bytes)
00300
00301 {
00302 if (bytes >= 128*1024)
00303 return vmalloc(bytes);
00304
00305 return kmalloc(bytes,GFP_KERNEL);
00306 }
00307
00308 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00309
00310 {
00311 if (bytes >= 128*1024)
00312 vfree(chunk);
00313 else
00314 kfree(chunk);
00315 }
00316
00317 #define xnarch_declare_cpuid adeos_declare_cpuid
00318 #define xnarch_get_cpu(flags) adeos_get_cpu(flags)
00319 #define xnarch_put_cpu(flags) adeos_put_cpu(flags)
00320
00321 #define xnarch_halt(emsg) \
00322 do { \
00323 adeos_set_printk_sync(adp_current); \
00324 xnarch_logerr("fatal: %s\n",emsg); \
00325 show_stack(NULL,NULL); \
00326 for (;;) ; \
00327 } while(0)
00328
00329 #define xnarch_alloc_stack xnmalloc
00330 #define xnarch_free_stack xnfree
00331
00332 static inline int xnarch_setimask (int imask)
00333
00334 {
00335 spl_t s;
00336 splhigh(s);
00337 splexit(!!imask);
00338 return !!s;
00339 }
00340
00341 #ifdef XENO_INTR_MODULE
00342
00343 static inline int xnarch_hook_irq (unsigned irq,
00344 void (*handler)(unsigned irq,
00345 void *cookie),
00346 void *cookie)
00347 {
00348 int err = rthal_request_irq(irq,handler,cookie);
00349
00350 if (!err)
00351 rthal_enable_irq(irq);
00352
00353 return err;
00354 }
00355
00356 static inline int xnarch_release_irq (unsigned irq) {
00357
00358 return rthal_release_irq(irq);
00359 }
00360
00361 static inline int xnarch_enable_irq (unsigned irq)
00362
00363 {
00364 if (irq >= XNARCH_IRQ_MAX)
00365 return -EINVAL;
00366
00367 rthal_enable_irq(irq);
00368
00369 return 0;
00370 }
00371
00372 static inline int xnarch_disable_irq (unsigned irq)
00373
00374 {
00375 if (irq >= XNARCH_IRQ_MAX)
00376 return -EINVAL;
00377
00378 rthal_disable_irq(irq);
00379
00380 return 0;
00381 }
00382
00383 static inline void xnarch_isr_chain_irq (unsigned irq) {
00384 rthal_pend_linux_irq(irq);
00385 }
00386
00387 static inline void xnarch_isr_enable_irq (unsigned irq) {
00388 rthal_enable_irq(irq);
00389 }
00390
00391 static inline void xnarch_relay_tick (void) {
00392
00393 rthal_pend_linux_irq(ADEOS_TIMER_VIRQ);
00394 }
00395
00396 static inline cpumask_t xnarch_set_irq_affinity (unsigned irq,
00397 cpumask_t affinity) {
00398 return adeos_set_irq_affinity(irq,affinity);
00399 }
00400
00401 #endif
00402
00403 #ifdef XENO_POD_MODULE
00404
00405 void xnpod_welcome_thread(struct xnthread *);
00406
00407 void xnpod_delete_thread(struct xnthread *);
00408
00409 unsigned long xnarch_calibrate_timer (void)
00410
00411 {
00412 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00413 return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY);
00414 #else
00415
00416
00417 return xnarch_ns_to_tsc(rthal_calibrate_timer());
00418 #endif
00419 }
00420
00421 static inline int xnarch_start_timer (unsigned long ns,
00422 void (*tickhandler)(void)) {
00423 return rthal_request_timer(tickhandler,ns);
00424 }
00425
00426 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00427
00428 {
00429 adeos_declare_cpuid;
00430
00431 adeos_load_cpuid();
00432
00433
00434
00435 __set_bit(cpuid,&rthal_cpu_realtime);
00436
00437 rootcb->user_task = rootcb->active_task = rthal_get_current(cpuid);
00438
00439 #ifdef CONFIG_RTAI_HW_FPU
00440 rootcb->fpup = (rthal_fpenv_t *)&rootcb->user_task->thread.fpr[0];
00441 #endif
00442 }
00443
00444 static inline void xnarch_enter_root (xnarchtcb_t *rootcb) {
00445 __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00446 }
00447
00448 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00449 xnarchtcb_t *in_tcb)
00450 {
00451 struct task_struct *outproc = out_tcb->active_task;
00452 struct task_struct *inproc = in_tcb->user_task;
00453 unsigned long flags;
00454
00455 rthal_hw_lock(flags);
00456
00457 in_tcb->active_task = inproc ?: outproc;
00458
00459 if (inproc && inproc != outproc)
00460 {
00461 struct mm_struct *prev = outproc->active_mm;
00462 struct mm_struct *next = inproc->active_mm;
00463
00464
00465
00466 inproc->thread.pgdir = next->pgd;
00467
00468 if (prev != next)
00469 {
00470 get_mmu_context(next);
00471 set_context(next->context, next->pgd);
00472 }
00473
00474 if (!inproc->mm)
00475 enter_lazy_tlb(prev,inproc);
00476
00477
00478
00479
00480 #ifdef CONFIG_SMP
00481 if (outproc->thread.regs && (outproc->thread.regs->msr & MSR_FP))
00482 giveup_fpu(outproc);
00483 #ifdef CONFIG_ALTIVEC
00484 if ((outproc->thread.regs && (outproc->thread.regs->msr & MSR_VEC)))
00485 giveup_altivec(outproc);
00486 #endif
00487 #ifdef CONFIG_SPE
00488 if ((outproc->thread.regs && (outproc->thread.regs->msr & MSR_SPE)))
00489 giveup_spe(outproc);
00490 #endif
00491 #endif
00492
00493 #ifdef CONFIG_ALTIVEC
00494 if (inproc->thread.regs && last_task_used_altivec == inproc)
00495 inproc->thread.regs->msr |= MSR_VEC;
00496 #endif
00497
00498 #ifdef CONFIG_SPE
00499 if (inproc->thread.regs && last_task_used_spe == inproc)
00500 inproc->thread.regs->msr |= MSR_SPE;
00501 #endif
00502
00503 _switch(&outproc->thread,&inproc->thread);
00504 }
00505 else
00506
00507 rthal_switch_context(out_tcb->kspp,in_tcb->kspp);
00508
00509 rthal_hw_unlock(flags);
00510 }
00511
00512 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00513 xnarchtcb_t *next_tcb) {
00514 xnarch_switch_to(dead_tcb,next_tcb);
00515 }
00516
00517 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb) {
00518
00519 }
00520
00521 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00522 struct xnthread *thread,
00523 const char *name)
00524 {
00525 tcb->user_task = current;
00526 tcb->active_task = NULL;
00527 tcb->ksp = 0;
00528 tcb->kspp = &tcb->ksp;
00529 #ifdef CONFIG_RTAI_HW_FPU
00530 tcb->fpup = NULL;
00531 #endif
00532 tcb->entry = NULL;
00533 tcb->cookie = NULL;
00534 tcb->self = thread;
00535 tcb->imask = 0;
00536 tcb->name = name;
00537 }
00538
00539 static inline void xnarch_init_tcb (xnarchtcb_t *tcb) {
00540
00541 tcb->user_task = NULL;
00542 tcb->active_task = NULL;
00543 tcb->kspp = &tcb->ksp;
00544 #ifdef CONFIG_RTAI_HW_FPU
00545 tcb->fpup = &tcb->fpuenv;
00546 #endif
00547
00548 }
00549
00550 asmlinkage static void xnarch_thread_trampoline (xnarchtcb_t *tcb)
00551
00552 {
00553 rthal_local_irq_restore(!!tcb->imask);
00554 xnpod_welcome_thread(tcb->self);
00555 tcb->entry(tcb->cookie);
00556 xnpod_delete_thread(tcb->self);
00557 }
00558
00559 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00560 void (*entry)(void *),
00561 void *cookie,
00562 int imask,
00563 struct xnthread *thread,
00564 char *name)
00565 {
00566 unsigned long *ksp, flags;
00567
00568 adeos_hw_local_irq_flags(flags);
00569
00570 *tcb->stackbase = 0;
00571 ksp = (unsigned long *)((((unsigned long)tcb->stackbase + tcb->stacksize - 0x10) & ~0xf) - RTHAL_SWITCH_FRAME_SIZE);
00572 tcb->ksp = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
00573 ksp[19] = (unsigned long)tcb;
00574 ksp[25] = (unsigned long)&xnarch_thread_trampoline;
00575 ksp[26] = flags & ~MSR_EE;
00576
00577 tcb->entry = entry;
00578 tcb->cookie = cookie;
00579 tcb->self = thread;
00580 tcb->imask = imask;
00581 tcb->name = name;
00582 }
00583
00584 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00585
00586 {
00587 #ifdef CONFIG_RTAI_HW_FPU
00588
00589
00590 memset(&tcb->fpuenv,0,sizeof(tcb->fpuenv));
00591 rthal_init_fpu(&tcb->fpuenv);
00592 #endif
00593 }
00594
00595 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00596
00597 {
00598 #ifdef CONFIG_RTAI_HW_FPU
00599
00600 struct task_struct *task = tcb->user_task;
00601
00602 if (task)
00603 {
00604 if (task->thread.regs && (task->thread.regs->msr & MSR_FP))
00605 giveup_fpu(task);
00606
00607 return;
00608 }
00609
00610
00611
00612 rthal_save_fpu(tcb->fpup);
00613
00614 #endif
00615 }
00616
00617 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00618
00619 {
00620 #ifdef CONFIG_RTAI_HW_FPU
00621
00622 if (tcb->user_task)
00623
00624
00625
00626 return;
00627
00628
00629
00630 rthal_restore_fpu(tcb->fpup);
00631
00632 #endif
00633 }
00634
00635 int xnarch_sleep_on (int *flagp) {
00636
00637 while (!*flagp)
00638 {
00639 #if !CONFIG_RTAI_OPT_DEBUG
00640 set_current_state(TASK_UNINTERRUPTIBLE);
00641 schedule_timeout(1);
00642 #else
00643 set_current_state(TASK_INTERRUPTIBLE);
00644 schedule_timeout(1);
00645 if(signal_pending(current))
00646 return -ERESTARTSYS;
00647 #endif
00648 }
00649 return 0;
00650 }
00651
00652 #ifdef CONFIG_SMP
00653
00654 static inline int xnarch_send_ipi (cpumask_t cpumask) {
00655
00656 return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
00657 }
00658
00659 static inline int xnarch_hook_ipi (void (*handler)(void))
00660
00661 {
00662 return adeos_virtualize_irq_from(&rthal_domain,
00663 ADEOS_SERVICE_IPI0,
00664 (void (*)(unsigned)) handler,
00665 NULL,
00666 IPIPE_HANDLE_MASK);
00667 }
00668
00669 static inline int xnarch_release_ipi (void)
00670
00671 {
00672 return adeos_virtualize_irq_from(&rthal_domain,
00673 ADEOS_SERVICE_IPI0,
00674 NULL,
00675 NULL,
00676 IPIPE_PASS_MASK);
00677 }
00678
00679 static inline void xnarch_notify_halt(void)
00680
00681 {
00682 unsigned long flags = adeos_critical_enter(NULL);
00683 adeos_critical_exit(flags);
00684 }
00685
00686 #else
00687
00688 static inline int xnarch_send_ipi (cpumask_t cpumask) {
00689
00690 return 0;
00691 }
00692
00693 static inline int xnarch_hook_ipi (void (*handler)(void)) {
00694
00695 return 0;
00696 }
00697
00698 static inline int xnarch_release_ipi (void) {
00699
00700 return 0;
00701 }
00702
00703 #define xnarch_notify_halt()
00704
00705 #endif
00706
00707 static inline void xnarch_notify_shutdown(void)
00708
00709 {
00710 #ifdef CONFIG_SMP
00711
00712
00713 set_cpus_allowed(current,cpumask_of_cpu(0));
00714 #endif
00715 #ifdef CONFIG_RTAI_OPT_FUSION
00716 xnshadow_release_events();
00717 #endif
00718
00719 set_current_state(TASK_UNINTERRUPTIBLE);
00720 schedule_timeout(50);
00721 xnarch_release_ipi();
00722 }
00723
00724 static inline int xnarch_escalate (void)
00725
00726 {
00727 extern int xnarch_escalation_virq;
00728
00729 if (adp_current == adp_root)
00730 {
00731 spl_t s;
00732 splsync(s);
00733 adeos_trigger_irq(xnarch_escalation_virq);
00734 splexit(s);
00735 return 1;
00736 }
00737
00738 return 0;
00739 }
00740
00741 static void xnarch_notify_ready (void)
00742
00743 {
00744 #ifdef CONFIG_RTAI_OPT_FUSION
00745 xnshadow_grab_events();
00746 #endif
00747 }
00748
00749 #endif
00750
00751 #ifdef XENO_SHADOW_MODULE
00752
00753 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00754 struct xnthread *thread,
00755 const char *name)
00756 {
00757 struct task_struct *task = current;
00758
00759 tcb->user_task = task;
00760 tcb->active_task = NULL;
00761 tcb->ksp = 0;
00762 tcb->kspp = &task->thread.ksp;
00763 #ifdef CONFIG_RTAI_HW_FPU
00764 tcb->fpup = (rthal_fpenv_t *)&task->thread.fpr[0];
00765 #endif
00766 tcb->entry = NULL;
00767 tcb->cookie = NULL;
00768 tcb->self = thread;
00769 tcb->imask = 0;
00770 tcb->name = name;
00771 }
00772
00773 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00774
00775 {
00776 unsigned irq;
00777
00778 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00779 adeos_virtualize_irq(irq,
00780 handler,
00781 NULL,
00782 IPIPE_DYNAMIC_MASK);
00783
00784
00785
00786
00787
00788 adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
00789 handler,
00790 NULL,
00791 IPIPE_DYNAMIC_MASK);
00792 }
00793
00794 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00795
00796 {
00797 unsigned irq;
00798
00799 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00800 {
00801 switch (irq)
00802 {
00803 #ifdef CONFIG_SMP
00804 case ADEOS_CRITICAL_IPI:
00805
00806
00807 continue;
00808 #endif
00809
00810 default:
00811
00812 __adeos_lock_irq(adp,cpuid,irq);
00813 }
00814 }
00815
00816 __adeos_lock_irq(adp,cpuid,ADEOS_TIMER_VIRQ);
00817 }
00818
00819 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00820
00821 {
00822 unsigned irq;
00823
00824 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00825 {
00826 switch (irq)
00827 {
00828 #ifdef CONFIG_SMP
00829 case ADEOS_CRITICAL_IPI:
00830
00831 continue;
00832 #endif
00833
00834 default:
00835
00836 __adeos_unlock_irq(adp,irq);
00837 }
00838 }
00839
00840 __adeos_unlock_irq(adp,ADEOS_TIMER_VIRQ);
00841 }
00842
00843 #endif
00844
00845 #ifdef XENO_TIMER_MODULE
00846
00847 static inline void xnarch_program_timer_shot (unsigned long long delay) {
00848
00849
00850
00851
00852
00853 rthal_set_timer_shot(delay);
00854 }
00855
00856 static inline void xnarch_stop_timer (void) {
00857 rthal_release_timer();
00858 }
00859
00860 static inline void xnarch_read_timings (unsigned long long *shot,
00861 unsigned long long *delivery,
00862 unsigned long long defval)
00863 {
00864 #ifdef CONFIG_ADEOS_PROFILING
00865 int cpuid = adeos_processor_id();
00866 *shot = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_handled;
00867 *delivery = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_synced;
00868 #else
00869 *shot = defval;
00870 *delivery = defval;
00871 #endif
00872 }
00873
00874 #endif
00875
00876 #ifdef XENO_MAIN_MODULE
00877
00878 int xnarch_escalation_virq;
00879
00880 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00881
00882 void xnpod_schedule_handler(void);
00883
00884 static rthal_trap_handler_t xnarch_old_trap_handler;
00885
00886 static int xnarch_trap_fault (adevinfo_t *evinfo)
00887
00888 {
00889 xnarch_fltinfo_t fltinfo;
00890 fltinfo.regs = (struct pt_regs *)evinfo->evdata;
00891 return xnpod_trap_fault(&fltinfo);
00892 }
00893
00894 static inline int xnarch_init (void)
00895
00896 {
00897 int err = 0;
00898
00899 #ifdef CONFIG_SMP
00900
00901
00902 set_cpus_allowed(current,cpumask_of_cpu(0));
00903 #endif
00904
00905 xnarch_escalation_virq = adeos_alloc_irq();
00906
00907 if (xnarch_escalation_virq == 0)
00908 return -ENOSYS;
00909
00910 adeos_virtualize_irq_from(&rthal_domain,
00911 xnarch_escalation_virq,
00912 (void (*)(unsigned))&xnpod_schedule_handler,
00913 NULL,
00914 IPIPE_HANDLE_MASK);
00915
00916 xnarch_old_trap_handler = rthal_set_trap_handler(&xnarch_trap_fault);
00917
00918 #ifdef CONFIG_RTAI_OPT_FUSION
00919 err = xnshadow_mount();
00920 #endif
00921
00922 if (err)
00923 adeos_free_irq(xnarch_escalation_virq);
00924
00925 return err;
00926 }
00927
00928 static inline void xnarch_exit (void)
00929
00930 {
00931 #ifdef CONFIG_RTAI_OPT_FUSION
00932 xnshadow_cleanup();
00933 #endif
00934 rthal_set_trap_handler(xnarch_old_trap_handler);
00935 adeos_free_irq(xnarch_escalation_virq);
00936 }
00937
00938 #endif
00939
00940 #ifdef __cplusplus
00941 }
00942 #endif
00943
00944
00945 #define XNARCH_DECL_DISPLAY_CONTEXT();
00946 #define xnarch_init_display_context(obj)
00947 #define xnarch_create_display(obj,name,tag)
00948 #define xnarch_delete_display(obj)
00949 #define xnarch_post_graph(obj,state)
00950 #define xnarch_post_graph_if(obj,state,cond)
00951
00952 #else
00953
00954 #include <nucleus/system.h>
00955
00956 #endif
00957
00958 #define XNARCH_CALIBRATION_PERIOD 1000000
00959
00960 #endif