00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046 #ifndef _RTAI_ASM_I386_SYSTEM_H
00047 #define _RTAI_ASM_I386_SYSTEM_H
00048
00049 #ifdef __KERNEL__
00050
00051 #include <linux/kernel.h>
00052 #include <linux/version.h>
00053 #include <linux/module.h>
00054 #include <linux/slab.h>
00055 #include <linux/errno.h>
00056 #include <linux/adeos.h>
00057 #include <linux/vmalloc.h>
00058 #include <asm/uaccess.h>
00059 #include <asm/param.h>
00060 #include <asm/mmu_context.h>
00061 #include <rtai_config.h>
00062 #include <nucleus/asm/hal.h>
00063 #include <nucleus/asm/atomic.h>
00064 #include <nucleus/shadow.h>
00065
00066 #if ADEOS_RELEASE_NUMBER < 0x02060901
00067 #error "Adeos 2.6r9c1/x86 or above is required to run this software; please upgrade."
00068 #error "See http://download.gna.org/adeos/patches/v2.6/i386/"
00069 #endif
00070
00071 #define MODULE_PARM_VALUE(parm) (parm)
00072
00073 typedef unsigned long spl_t;
00074
00075 #define splhigh(x) rthal_local_irq_save(x)
00076 #ifdef CONFIG_SMP
00077 #define splexit(x) rthal_local_irq_restore((x) & 1)
00078 #else
00079 #define splexit(x) rthal_local_irq_restore(x)
00080 #endif
00081 #define splnone() rthal_sti()
00082 #define spltest() rthal_local_irq_test()
00083 #define splget(x) rthal_local_irq_flags(x)
00084 #define splsync(x) rthal_local_irq_sync(x)
00085
00086 typedef struct {
00087
00088 volatile unsigned long lock;
00089 #if CONFIG_RTAI_OPT_DEBUG
00090 const char *file;
00091 const char *function;
00092 unsigned line;
00093 int cpu;
00094 #endif
00095 } xnlock_t;
00096
00097 #ifndef CONFIG_RTAI_OPT_DEBUG
00098 #define XNARCH_LOCK_UNLOCKED (xnlock_t) { 0 }
00099 #else
00100 #define XNARCH_LOCK_UNLOCKED (xnlock_t) { \
00101 0, \
00102 NULL, \
00103 0, \
00104 -1 \
00105 }
00106 #endif
00107
00108 #ifdef CONFIG_SMP
00109
00110 #ifndef CONFIG_RTAI_OPT_DEBUG
00111 #define xnlock_get_irqsave(lock,x) ((x) = __xnlock_get_irqsave(lock))
00112 #else
00113 #define xnlock_get_irqsave(lock,x) \
00114 ((x) = __xnlock_get_irqsave(lock, __FILE__, __LINE__,__FUNCTION__))
00115 #endif
00116 #define xnlock_clear_irqoff(lock) xnlock_put_irqrestore(lock,1)
00117 #define xnlock_clear_irqon(lock) xnlock_put_irqrestore(lock,0)
00118
00119 static inline void xnlock_init (xnlock_t *lock) {
00120
00121 *lock = XNARCH_LOCK_UNLOCKED;
00122 }
00123
00124 #if CONFIG_RTAI_OPT_DEBUG
00125 #define XNARCH_DEBUG_SPIN_LIMIT 3000000
00126
00127 static inline spl_t
00128 __xnlock_get_irqsave (xnlock_t *lock, const char *file, unsigned line, const char *function)
00129 {
00130 unsigned spin_count = 0;
00131 #else
00132 static inline spl_t __xnlock_get_irqsave (xnlock_t *lock)
00133 {
00134 #endif
00135 adeos_declare_cpuid;
00136 unsigned long flags;
00137
00138 rthal_local_irq_save(flags);
00139
00140 adeos_load_cpuid();
00141
00142 if (!test_and_set_bit(cpuid,&lock->lock))
00143 {
00144 while (test_and_set_bit(BITS_PER_LONG - 1,&lock->lock))
00145 {
00146 rthal_cpu_relax(cpuid);
00147
00148 #if CONFIG_RTAI_OPT_DEBUG
00149 if (++spin_count == XNARCH_DEBUG_SPIN_LIMIT)
00150 {
00151 adeos_set_printk_sync(adp_current);
00152 printk(KERN_ERR
00153 "RTAI: stuck on nucleus lock %p\n"
00154 " waiter = %s:%u (%s(), CPU #%d)\n"
00155 " owner = %s:%u (%s(), CPU #%d)\n",
00156 lock,file,line,function,cpuid,
00157 lock->file,lock->line,lock->function,lock->cpu);
00158 show_stack(NULL,NULL);
00159 for (;;)
00160 safe_halt();
00161 }
00162 #endif
00163 }
00164
00165 #if CONFIG_RTAI_OPT_DEBUG
00166 lock->file = file;
00167 lock->function = function;
00168 lock->line = line;
00169 lock->cpu = cpuid;
00170 #endif
00171 }
00172 else
00173 flags |= 2;
00174
00175 return flags;
00176 }
00177
00178 static inline void xnlock_put_irqrestore (xnlock_t *lock, spl_t flags)
00179
00180 {
00181 if (!(flags & 2))
00182 {
00183 adeos_declare_cpuid;
00184
00185 rthal_cli();
00186
00187 adeos_load_cpuid();
00188
00189 if (test_and_clear_bit(cpuid,&lock->lock))
00190 clear_bit(BITS_PER_LONG - 1,&lock->lock);
00191 }
00192
00193 rthal_local_irq_restore(flags & 1);
00194 }
00195
00196 #define XNARCH_PASSTHROUGH_IRQS \
00197 case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR: \
00198 case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR: \
00199 case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00200
00201 #else
00202
00203 #define xnlock_init(lock) do { } while(0)
00204 #define xnlock_get_irqsave(lock,x) rthal_local_irq_save(x)
00205 #define xnlock_put_irqrestore(lock,x) rthal_local_irq_restore(x)
00206 #define xnlock_clear_irqoff(lock) rthal_cli()
00207 #define xnlock_clear_irqon(lock) rthal_sti()
00208
00209 #endif
00210
00211 #define XNARCH_NR_CPUS RTHAL_NR_CPUS
00212
00213 #define XNARCH_DEFAULT_TICK 1000000
00214 #define XNARCH_IRQ_MAX IPIPE_NR_XIRQS
00215 #ifdef CONFIG_X86_LOCAL_APIC
00216
00217
00218
00219
00220 #define XNARCH_HOST_TICK 0
00221 #else
00222 #define XNARCH_HOST_TICK (1000000000UL/HZ)
00223 #endif
00224 #define xnarch_adjust_calibration(x) (x)
00225
00226 #define XNARCH_THREAD_STACKSZ 4096
00227 #define XNARCH_ROOT_STACKSZ 0
00228
00229 #define XNARCH_PROMPT "RTAI[nucleus]: "
00230 #define xnarch_loginfo(fmt,args...) printk(KERN_INFO XNARCH_PROMPT fmt , ##args)
00231 #define xnarch_logwarn(fmt,args...) printk(KERN_WARNING XNARCH_PROMPT fmt , ##args)
00232 #define xnarch_logerr(fmt,args...) printk(KERN_ERR XNARCH_PROMPT fmt , ##args)
00233 #define xnarch_printf(fmt,args...) printk(KERN_INFO XNARCH_PROMPT fmt , ##args)
00234
00235 #define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
00236 #define xnarch_uldiv(ull, d) rthal_uldivrem(ull, d, NULL)
00237 #define xnarch_ulmod(ull, d) ({ u_long _rem; \
00238 rthal_uldivrem(ull,uld,&_rem); _rem; })
00239
00240 #define xnarch_ullmul rthal_ullmul
00241 #define xnarch_uldivrem rthal_uldivrem
00242 #define xnarch_ulldiv rthal_ulldiv
00243 #define xnarch_imuldiv rthal_imuldiv
00244 #define xnarch_llimd rthal_llimd
00245 #define xnarch_get_cpu_tsc rthal_rdtsc
00246
00247 typedef cpumask_t xnarch_cpumask_t;
00248 #ifdef CONFIG_SMP
00249 #define xnarch_cpu_online_map cpu_online_map
00250 #else
00251 #define xnarch_cpu_online_map cpumask_of_cpu(0)
00252 #endif
00253 #define xnarch_num_online_cpus() num_online_cpus()
00254 #define xnarch_cpu_set(cpu, mask) cpu_set(cpu, mask)
00255 #define xnarch_cpu_clear(cpu, mask) cpu_clear(cpu, mask)
00256 #define xnarch_cpus_clear(mask) cpus_clear(mask)
00257 #define xnarch_cpu_isset(cpu, mask) cpu_isset(cpu, mask)
00258 #define xnarch_cpus_and(dst, src1, src2) cpus_and(dst, src1, src2)
00259 #define xnarch_cpus_equal(mask1, mask2) cpus_equal(mask1, mask2)
00260 #define xnarch_cpus_empty(mask) cpus_empty(mask)
00261 #define xnarch_cpumask_of_cpu(cpu) cpumask_of_cpu(cpu)
00262 #define xnarch_first_cpu(mask) first_cpu(mask)
00263 #define XNARCH_CPU_MASK_ALL CPU_MASK_ALL
00264
00265 struct xnthread;
00266 struct xnheap;
00267 struct task_struct;
00268
00269 #define xnarch_stack_size(tcb) ((tcb)->stacksize)
00270 #define xnarch_fpu_ptr(tcb) ((tcb)->fpup)
00271
00272 typedef struct xnarchtcb {
00273
00274
00275 union i387_union fpuenv __attribute__ ((aligned (16)));
00276 unsigned stacksize;
00277 unsigned long *stackbase;
00278 unsigned long esp;
00279 unsigned long eip;
00280
00281
00282 struct task_struct *user_task;
00283 struct task_struct *active_task;
00284
00285 unsigned long *espp;
00286 unsigned long *eipp;
00287 union i387_union *fpup;
00288
00289 } xnarchtcb_t;
00290
00291 typedef struct xnarch_fltinfo {
00292
00293 unsigned vector;
00294 long errcode;
00295 struct pt_regs *regs;
00296
00297 } xnarch_fltinfo_t;
00298
00299 #define xnarch_fault_trap(fi) ((fi)->vector)
00300 #define xnarch_fault_code(fi) ((fi)->errcode)
00301 #define xnarch_fault_pc(fi) ((fi)->regs->eip)
00302
00303 typedef struct xnarch_heapcb {
00304
00305 atomic_t numaps;
00306
00307 int kmflags;
00308
00309 void *heapbase;
00310
00311 void *shmbase;
00312
00313 } xnarch_heapcb_t;
00314
00315 static inline void xnarch_init_heapcb (xnarch_heapcb_t *hcb)
00316
00317 {
00318 atomic_set(&hcb->numaps,0);
00319 hcb->kmflags = 0;
00320 hcb->heapbase = NULL;
00321 hcb->shmbase = NULL;
00322 }
00323
00324 #ifdef __cplusplus
00325 extern "C" {
00326 #endif
00327
00328 static inline unsigned long long xnarch_tsc_to_ns (unsigned long long ts) {
00329 return xnarch_llimd(ts,1000000000,RTHAL_CPU_FREQ);
00330 }
00331
00332 static inline unsigned long long xnarch_ns_to_tsc (unsigned long long ns) {
00333 return xnarch_llimd(ns,RTHAL_CPU_FREQ,1000000000);
00334 }
00335
00336 static inline unsigned long long xnarch_get_cpu_time (void) {
00337 return xnarch_tsc_to_ns(xnarch_get_cpu_tsc());
00338 }
00339
00340 static inline unsigned long long xnarch_get_cpu_freq (void) {
00341 return RTHAL_CPU_FREQ;
00342 }
00343
00344 static inline unsigned xnarch_current_cpu (void) {
00345 return adeos_processor_id();
00346 }
00347
00348 static inline void *xnarch_sysalloc (u_long bytes)
00349
00350 {
00351 if (bytes >= 128*1024)
00352 return vmalloc(bytes);
00353
00354 return kmalloc(bytes,GFP_KERNEL);
00355 }
00356
00357 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00358
00359 {
00360 if (bytes >= 128*1024)
00361 vfree(chunk);
00362 else
00363 kfree(chunk);
00364 }
00365
00366 #define xnarch_declare_cpuid adeos_declare_cpuid
00367 #define xnarch_get_cpu(flags) adeos_get_cpu(flags)
00368 #define xnarch_put_cpu(flags) adeos_put_cpu(flags)
00369
00370 #define xnarch_halt(emsg) \
00371 do { \
00372 adeos_set_printk_sync(adp_current); \
00373 xnarch_logerr("fatal: %s\n",emsg); \
00374 show_stack(NULL,NULL); \
00375 for (;;) safe_halt(); \
00376 } while(0)
00377
00378 #define xnarch_alloc_stack xnmalloc
00379 #define xnarch_free_stack xnfree
00380
00381 static inline int xnarch_setimask (int imask)
00382
00383 {
00384 spl_t s;
00385 splhigh(s);
00386 splexit(!!imask);
00387 return !!s;
00388 }
00389
00390 #ifdef XENO_INTR_MODULE
00391
00392 static inline int xnarch_hook_irq (unsigned irq,
00393 void (*handler)(unsigned irq,
00394 void *cookie),
00395 void *cookie)
00396 {
00397 int err = rthal_request_irq(irq,handler,cookie);
00398
00399 if (!err)
00400 rthal_enable_irq(irq);
00401
00402 return err;
00403 }
00404
00405 static inline int xnarch_release_irq (unsigned irq) {
00406
00407 return rthal_release_irq(irq);
00408 }
00409
00410 static inline int xnarch_enable_irq (unsigned irq)
00411
00412 {
00413 if (irq >= XNARCH_IRQ_MAX)
00414 return -EINVAL;
00415
00416 rthal_enable_irq(irq);
00417
00418 return 0;
00419 }
00420
00421 static inline int xnarch_disable_irq (unsigned irq)
00422
00423 {
00424 if (irq >= XNARCH_IRQ_MAX)
00425 return -EINVAL;
00426
00427 rthal_disable_irq(irq);
00428
00429 return 0;
00430 }
00431
00432 static inline void xnarch_isr_chain_irq (unsigned irq) {
00433 rthal_pend_linux_irq(irq);
00434 }
00435
00436 static inline void xnarch_isr_enable_irq (unsigned irq) {
00437 rthal_enable_irq(irq);
00438 }
00439
00440 static inline void xnarch_relay_tick (void) {
00441
00442 rthal_pend_linux_irq(RTHAL_8254_IRQ);
00443 }
00444
00445 static inline cpumask_t xnarch_set_irq_affinity (unsigned irq,
00446 cpumask_t affinity) {
00447 return adeos_set_irq_affinity(irq,affinity);
00448 }
00449
00450 #endif
00451
00452 #ifdef XENO_POD_MODULE
00453
00454 void xnpod_welcome_thread(struct xnthread *);
00455
00456 void xnpod_delete_thread(struct xnthread *);
00457
00458 unsigned long xnarch_calibrate_timer (void)
00459
00460 {
00461 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00462 return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY);
00463 #else
00464
00465
00466
00467
00468
00469
00470 return xnarch_ns_to_tsc(rthal_calibrate_timer());
00471 #endif
00472 }
00473
00474 static inline int xnarch_start_timer (unsigned long ns,
00475 void (*tickhandler)(void)) {
00476 return rthal_request_timer(tickhandler,ns);
00477 }
00478
00479 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00480
00481 {
00482 adeos_declare_cpuid;
00483
00484 adeos_load_cpuid();
00485
00486
00487
00488 __set_bit(cpuid,&rthal_cpu_realtime);
00489
00490 rootcb->user_task = rootcb->active_task = rthal_get_current(cpuid);
00491
00492 rootcb->fpup = &rootcb->user_task->thread.i387;
00493 }
00494
00495 static inline void xnarch_enter_root (xnarchtcb_t *rootcb) {
00496 __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00497 }
00498
00499 static inline void __switch_threads(xnarchtcb_t *out_tcb,
00500 xnarchtcb_t *in_tcb,
00501 struct task_struct *outproc,
00502 struct task_struct *inproc
00503 )
00504 {
00505 #if __GNUC__ < 3 || __GNUC__ == 3 && __GNUC_MINOR__ < 2
00506
00507 __asm__ __volatile__( \
00508 "pushl %%ecx\n\t" \
00509 "pushl %%edi\n\t" \
00510 "pushl %%ebp\n\t" \
00511 "movl %0,%%ecx\n\t" \
00512 "movl %%esp,(%%ecx)\n\t" \
00513 "movl %1,%%ecx\n\t" \
00514 "movl $1f,(%%ecx)\n\t" \
00515 "movl %2,%%ecx\n\t" \
00516 "movl %3,%%edi\n\t" \
00517 "movl (%%ecx),%%esp\n\t" \
00518 "pushl (%%edi)\n\t" \
00519 "testl %%edx,%%edx\n\t" \
00520 "jne __switch_to\n\t" \
00521 "ret\n\t" \
00522 "1: popl %%ebp\n\t" \
00523 "popl %%edi\n\t" \
00524 "popl %%ecx\n\t" \
00525 : \
00526 : "m" (out_tcb->espp), \
00527 "m" (out_tcb->eipp), \
00528 "m" (in_tcb->espp), \
00529 "m" (in_tcb->eipp), \
00530 "b" (out_tcb), \
00531 "S" (in_tcb), \
00532 "a" (outproc), \
00533 "d" (inproc));
00534
00535 #else
00536
00537 long ebx_out, ecx_out, edi_out, esi_out;
00538
00539 __asm__ __volatile__( \
00540 "pushl %%ebp\n\t" \
00541 "movl %6,%%ecx\n\t" \
00542 "movl %%esp,(%%ecx)\n\t" \
00543 "movl %7,%%ecx\n\t" \
00544 "movl $1f,(%%ecx)\n\t" \
00545 "movl %8,%%ecx\n\t" \
00546 "movl %9,%%edi\n\t" \
00547 "movl (%%ecx),%%esp\n\t" \
00548 "pushl (%%edi)\n\t" \
00549 "testl %%edx,%%edx\n\t" \
00550 "jne __switch_to\n\t" \
00551 "ret\n\t" \
00552 "1: popl %%ebp\n\t" \
00553 : "=b" (ebx_out), \
00554 "=&c" (ecx_out), \
00555 "=S" (esi_out), \
00556 "=D" (edi_out), \
00557 "+a" (outproc), \
00558 "+d" (inproc) \
00559 : "m" (out_tcb->espp), \
00560 "m" (out_tcb->eipp), \
00561 "m" (in_tcb->espp), \
00562 "m" (in_tcb->eipp));
00563
00564 #endif
00565 }
00566
00567 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00568 xnarchtcb_t *in_tcb)
00569 {
00570 struct task_struct *outproc = out_tcb->active_task;
00571 struct task_struct *inproc = in_tcb->user_task;
00572
00573 if (inproc && outproc->thread_info->status & TS_USEDFPU)
00574
00575
00576 clts();
00577
00578 in_tcb->active_task = inproc ?: outproc;
00579
00580 if (inproc && inproc != outproc)
00581 {
00582 struct mm_struct *oldmm = outproc->active_mm;
00583
00584 switch_mm(oldmm,inproc->active_mm,inproc);
00585
00586 if (!inproc->mm)
00587 enter_lazy_tlb(oldmm,inproc);
00588 }
00589
00590 __switch_threads(out_tcb,in_tcb,outproc,inproc);
00591
00592 stts();
00593 }
00594
00595 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00596 xnarchtcb_t *next_tcb) {
00597 xnarch_switch_to(dead_tcb,next_tcb);
00598 }
00599
00600 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb) {
00601
00602 }
00603
00604 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00605 struct xnthread *thread,
00606 const char *name)
00607 {
00608 tcb->user_task = current;
00609 tcb->active_task = NULL;
00610 tcb->esp = 0;
00611 tcb->espp = &tcb->esp;
00612 tcb->eipp = &tcb->eip;
00613 tcb->fpup = NULL;
00614 }
00615
00616 static inline void xnarch_init_tcb (xnarchtcb_t *tcb) {
00617
00618 tcb->user_task = NULL;
00619 tcb->active_task = NULL;
00620 tcb->espp = &tcb->esp;
00621 tcb->eipp = &tcb->eip;
00622 tcb->fpup = &tcb->fpuenv;
00623
00624 }
00625
00626 asmlinkage static void xnarch_thread_redirect (struct xnthread *self,
00627 int imask,
00628 void(*entry)(void *),
00629 void *cookie)
00630 {
00631
00632 stts();
00633 rthal_local_irq_restore(!!imask);
00634 xnpod_welcome_thread(self);
00635 entry(cookie);
00636 xnpod_delete_thread(self);
00637 }
00638
00639 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00640 void (*entry)(void *),
00641 void *cookie,
00642 int imask,
00643 struct xnthread *thread,
00644 char *name)
00645 {
00646 unsigned long **psp = (unsigned long **)&tcb->esp;
00647
00648 tcb->eip = (unsigned long)&xnarch_thread_redirect;
00649 tcb->esp = (unsigned long)tcb->stackbase;
00650 **psp = 0;
00651 *psp = (unsigned long *)(((unsigned long)*psp + tcb->stacksize - 0x10) & ~0xf);
00652 *--(*psp) = (unsigned long)cookie;
00653 *--(*psp) = (unsigned long)entry;
00654 *--(*psp) = (unsigned long)imask;
00655 *--(*psp) = (unsigned long)thread;
00656 *--(*psp) = 0;
00657 }
00658
00659 #ifdef CONFIG_RTAI_HW_FPU
00660
00661 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00662
00663 {
00664
00665
00666
00667 __asm__ __volatile__ ("clts; fninit");
00668
00669 if (cpu_has_xmm)
00670 {
00671 unsigned long __mxcsr = 0x1f80UL & 0xffbfUL;
00672 __asm__ __volatile__ ("ldmxcsr %0": : "m" (__mxcsr));
00673 }
00674 }
00675
00676 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00677
00678 {
00679 if (!tcb->user_task)
00680 {
00681 clts();
00682
00683 if (cpu_has_fxsr)
00684 __asm__ __volatile__ ("fxsave %0; fnclex" : "=m" (*tcb->fpup));
00685 else
00686 __asm__ __volatile__ ("fnsave %0; fwait" : "=m" (*tcb->fpup));
00687 }
00688 }
00689
00690 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00691
00692 {
00693 struct task_struct *task = tcb->user_task;
00694
00695 if (task)
00696 {
00697 if (!task->used_math)
00698 {
00699 stts();
00700 return;
00701 }
00702
00703
00704
00705 task->thread_info->status |= TS_USEDFPU;
00706 }
00707
00708
00709
00710
00711 clts();
00712
00713 if (cpu_has_fxsr)
00714 __asm__ __volatile__ ("fxrstor %0": : "m" (*tcb->fpup));
00715 else
00716 __asm__ __volatile__ ("frstor %0": : "m" (*tcb->fpup));
00717 }
00718
00719 #else
00720
00721 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00722
00723 {}
00724
00725 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00726
00727 {}
00728
00729 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00730
00731 {}
00732
00733 #endif
00734
00735 int xnarch_sleep_on (int *flagp) {
00736
00737 while (!*flagp)
00738 {
00739 #if !CONFIG_RTAI_OPT_DEBUG
00740 set_current_state(TASK_UNINTERRUPTIBLE);
00741 schedule_timeout(1);
00742 #else
00743 set_current_state(TASK_INTERRUPTIBLE);
00744 schedule_timeout(1);
00745 if(signal_pending(current))
00746 return -ERESTARTSYS;
00747 #endif
00748 }
00749
00750 return 0;
00751 }
00752
00753 #ifdef CONFIG_SMP
00754
00755 static inline int xnarch_send_ipi (cpumask_t cpumask) {
00756
00757 return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
00758 }
00759
00760 static inline int xnarch_hook_ipi (void (*handler)(void))
00761
00762 {
00763 return adeos_virtualize_irq_from(&rthal_domain,
00764 ADEOS_SERVICE_IPI0,
00765 (void (*)(unsigned)) handler,
00766 NULL,
00767 IPIPE_HANDLE_MASK);
00768 }
00769
00770 static inline int xnarch_release_ipi (void)
00771
00772 {
00773 return adeos_virtualize_irq_from(&rthal_domain,
00774 ADEOS_SERVICE_IPI0,
00775 NULL,
00776 NULL,
00777 IPIPE_PASS_MASK);
00778 }
00779
00780 static struct semaphore xnarch_finalize_sync;
00781
00782 static void xnarch_finalize_cpu(unsigned irq)
00783 {
00784 up(&xnarch_finalize_sync);
00785 }
00786
00787 static inline void xnarch_notify_halt(void)
00788
00789 {
00790 unsigned cpu, nr_cpus = num_online_cpus();
00791 cpumask_t other_cpus = cpu_online_map;
00792 unsigned long flags;
00793 adeos_declare_cpuid;
00794
00795 init_MUTEX_LOCKED(&xnarch_finalize_sync);
00796
00797
00798
00799
00800 adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2,
00801 xnarch_finalize_cpu, NULL, IPIPE_HANDLE_MASK);
00802
00803 adeos_lock_cpu(flags);
00804 cpu_clear(cpuid, other_cpus);
00805 adeos_send_ipi(ADEOS_SERVICE_IPI2, other_cpus);
00806 adeos_unlock_cpu(flags);
00807
00808 for(cpu=0; cpu < nr_cpus-1; ++cpu)
00809 down(&xnarch_finalize_sync);
00810
00811 adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2, NULL, NULL,
00812 IPIPE_PASS_MASK);
00813 }
00814
00815 #else
00816
00817 static inline int xnarch_send_ipi (cpumask_t cpumask) {
00818
00819 return 0;
00820 }
00821
00822 static inline int xnarch_hook_ipi (void (*handler)(void)) {
00823
00824 return 0;
00825 }
00826
00827 static inline int xnarch_release_ipi (void) {
00828
00829 return 0;
00830 }
00831
00832 #define xnarch_notify_halt()
00833
00834 #endif
00835
00836 static inline void xnarch_notify_shutdown(void)
00837
00838 {
00839 #ifdef CONFIG_SMP
00840
00841
00842 set_cpus_allowed(current,cpumask_of_cpu(0));
00843 #endif
00844 #ifdef CONFIG_RTAI_OPT_FUSION
00845 xnshadow_release_events();
00846 #endif
00847
00848 set_current_state(TASK_UNINTERRUPTIBLE);
00849 schedule_timeout(50);
00850 xnarch_release_ipi();
00851 }
00852
00853 static inline int xnarch_escalate (void)
00854
00855 {
00856 extern int xnarch_escalation_virq;
00857
00858 if (adp_current == adp_root)
00859 {
00860 spl_t s;
00861 splsync(s);
00862 adeos_trigger_irq(xnarch_escalation_virq);
00863 splexit(s);
00864 return 1;
00865 }
00866
00867 return 0;
00868 }
00869
00870 static void xnarch_notify_ready (void)
00871
00872 {
00873 #ifdef CONFIG_RTAI_OPT_FUSION
00874 xnshadow_grab_events();
00875 #endif
00876 }
00877
00878 #endif
00879
00880 #ifdef XENO_SHADOW_MODULE
00881
00882 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00883 struct xnthread *thread,
00884 const char *name)
00885 {
00886 struct task_struct *task = current;
00887
00888 tcb->user_task = task;
00889 tcb->active_task = NULL;
00890 tcb->esp = 0;
00891 tcb->espp = &task->thread.esp;
00892 tcb->eipp = &task->thread.eip;
00893 tcb->fpup = &task->thread.i387;
00894 }
00895
00896 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00897
00898 {
00899 unsigned irq;
00900
00901 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00902 adeos_virtualize_irq(irq,
00903 handler,
00904 NULL,
00905 IPIPE_DYNAMIC_MASK);
00906 }
00907
00908 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00909
00910 {
00911 unsigned irq;
00912
00913 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00914 {
00915 switch (irq)
00916 {
00917 #ifdef CONFIG_SMP
00918 case ADEOS_CRITICAL_IPI:
00919 case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00920 case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00921 case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00922
00923
00924 continue;
00925 #endif
00926
00927 default:
00928
00929 __adeos_lock_irq(adp,cpuid,irq);
00930 }
00931 }
00932 }
00933
00934 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00935
00936 {
00937 unsigned irq;
00938
00939 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00940 {
00941 switch (irq)
00942 {
00943 #ifdef CONFIG_SMP
00944 case ADEOS_CRITICAL_IPI:
00945 case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00946 case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00947 case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00948
00949 continue;
00950 #endif
00951
00952 default:
00953
00954 __adeos_unlock_irq(adp,irq);
00955 }
00956 }
00957 }
00958
00959 #endif
00960
00961 #ifdef XENO_TIMER_MODULE
00962
00963 static inline void xnarch_program_timer_shot (unsigned long long delay) {
00964
00965
00966
00967 rthal_set_timer_shot(rthal_imuldiv(delay,RTHAL_TIMER_FREQ,RTHAL_CPU_FREQ));
00968 }
00969
00970 static inline void xnarch_stop_timer (void) {
00971 rthal_release_timer();
00972 }
00973
00974 static inline void xnarch_read_timings (unsigned long long *shot,
00975 unsigned long long *delivery,
00976 unsigned long long defval)
00977 {
00978 #ifdef CONFIG_ADEOS_PROFILING
00979 int cpuid = adeos_processor_id();
00980 *shot = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_handled;
00981 *delivery = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_synced;
00982 #else
00983 *shot = defval;
00984 *delivery = defval;
00985 #endif
00986 }
00987
00988 #endif
00989
00990 #ifdef XENO_MAIN_MODULE
00991
00992 int xnarch_escalation_virq;
00993
00994 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00995
00996 void xnpod_schedule_handler(void);
00997
00998 static rthal_trap_handler_t xnarch_old_trap_handler;
00999
01000 static int xnarch_trap_fault (adevinfo_t *evinfo)
01001 {
01002 xnarch_fltinfo_t fltinfo;
01003
01004 fltinfo.vector = evinfo->event;
01005 fltinfo.errcode = ((struct pt_regs *)evinfo->evdata)->orig_eax;
01006 fltinfo.regs = (struct pt_regs *)evinfo->evdata;
01007
01008 return xnpod_trap_fault(&fltinfo);
01009 }
01010
01011 static inline int xnarch_init (void)
01012
01013 {
01014 int err = 0;
01015
01016 #ifdef CONFIG_SMP
01017
01018
01019 set_cpus_allowed(current,cpumask_of_cpu(0));
01020 #endif
01021
01022 xnarch_escalation_virq = adeos_alloc_irq();
01023
01024 if (xnarch_escalation_virq == 0)
01025 return -ENOSYS;
01026
01027 adeos_virtualize_irq_from(&rthal_domain,
01028 xnarch_escalation_virq,
01029 (void (*)(unsigned))&xnpod_schedule_handler,
01030 NULL,
01031 IPIPE_HANDLE_MASK);
01032
01033 xnarch_old_trap_handler = rthal_set_trap_handler(&xnarch_trap_fault);
01034
01035 #ifdef CONFIG_RTAI_OPT_FUSION
01036 err = xnshadow_mount();
01037 #endif
01038
01039 if (err)
01040 adeos_free_irq(xnarch_escalation_virq);
01041
01042 return err;
01043 }
01044
01045 static inline void xnarch_exit (void)
01046
01047 {
01048 #ifdef CONFIG_RTAI_OPT_FUSION
01049 xnshadow_cleanup();
01050 #endif
01051 rthal_set_trap_handler(xnarch_old_trap_handler);
01052 adeos_free_irq(xnarch_escalation_virq);
01053 }
01054
01055 #endif
01056
01057 #ifdef XENO_TRACES_MODULE
01058
01059 #include <asm/timex.h>
01060 #include <linux/kernel_stat.h>
01061
01062 #if CONFIG_X86_LOCAL_APIC
01063
01064 #define RTAI_TRACE_TIMER_IRQ RTHAL_APIC_TIMER_IPI
01065 #define linux_timer_irq_count(cpu) (irq_stat[(cpu)].apic_timer_irqs)
01066
01067 #else
01068
01069 #define RTAI_TRACE_TIMER_IRQ RTHAL_8254_IRQ
01070 #define linux_timer_irq_count(cpu) (kstat_cpu(cpu).irqs[RTHAL_8254_IRQ])
01071
01072 #endif
01073
01074 #define tsc2ms(timestamp) rthal_ulldiv((timestamp), cpu_khz, NULL)
01075
01076 #endif
01077
01078 #ifdef __cplusplus
01079 }
01080 #endif
01081
01082
01083 #define XNARCH_DECL_DISPLAY_CONTEXT();
01084 #define xnarch_init_display_context(obj)
01085 #define xnarch_create_display(obj,name,tag)
01086 #define xnarch_delete_display(obj)
01087 #define xnarch_post_graph(obj,state)
01088 #define xnarch_post_graph_if(obj,state,cond)
01089
01090 #else
01091
01092 #include <nucleus/system.h>
01093
01094 #endif
01095
01096 #define XNARCH_CALIBRATION_PERIOD 100000
01097
01098 #endif