00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #ifndef _RTAI_ASM_I386_SYSTEM_H
00023 #define _RTAI_ASM_I386_SYSTEM_H
00024
00025 #include <nucleus/asm-generic/system.h>
00026
00027 #ifdef __KERNEL__
00028
00029 #include <linux/config.h>
00030 #include <linux/ptrace.h>
00031
00032 #if ADEOS_RELEASE_NUMBER < 0x02060b01
00033 #error "Adeos 2.6r11c1/x86 or above is required to run this software; please upgrade."
00034 #error "See http://download.gna.org/adeos/patches/v2.6/i386/"
00035 #endif
00036
00037 #define XNARCH_DEFAULT_TICK 1000000
00038 #ifdef CONFIG_X86_LOCAL_APIC
00039
00040
00041
00042
00043 #define XNARCH_HOST_TICK 0
00044 #else
00045 #define XNARCH_HOST_TICK (1000000000UL/HZ)
00046 #endif
00047
00048 #define XNARCH_THREAD_STACKSZ 4096
00049
00050 #define xnarch_stack_size(tcb) ((tcb)->stacksize)
00051 #define xnarch_fpu_ptr(tcb) ((tcb)->fpup)
00052 #define xnarch_user_task(tcb) ((tcb)->user_task)
00053
00054 #define xnarch_alloc_stack xnmalloc
00055 #define xnarch_free_stack xnfree
00056
00057 struct xnthread;
00058 struct task_struct;
00059
00060 typedef struct xnarchtcb {
00061
00062
00063 union i387_union fpuenv __attribute__ ((aligned (16)));
00064 unsigned stacksize;
00065 unsigned long *stackbase;
00066 unsigned long esp;
00067 unsigned long eip;
00068
00069
00070 struct task_struct *user_task;
00071 struct task_struct *active_task;
00072
00073 unsigned long *espp;
00074 unsigned long *eipp;
00075 union i387_union *fpup;
00076
00077 } xnarchtcb_t;
00078
00079 typedef struct xnarch_fltinfo {
00080
00081 unsigned vector;
00082 long errcode;
00083 struct pt_regs *regs;
00084
00085 } xnarch_fltinfo_t;
00086
00087 #define xnarch_fault_trap(fi) ((fi)->vector)
00088 #define xnarch_fault_code(fi) ((fi)->errcode)
00089 #define xnarch_fault_pc(fi) ((fi)->regs->eip)
00090
00091 #define xnarch_fault_fpu_p(fi) ((fi)->vector == 7)
00092
00093
00094 #define xnarch_fault_notify(fi) (!(current->ptrace & PT_PTRACED) || \
00095 ((fi)->vector != 1 && (fi)->vector != 3))
00096 #define xnarch_fault_pf_p(fi) ((fi)->vector == 14)
00097
00098 #ifdef __cplusplus
00099 extern "C" {
00100 #endif
00101
00102 static inline void *xnarch_sysalloc (u_long bytes)
00103
00104 {
00105 if (bytes >= 128*1024)
00106 return vmalloc(bytes);
00107
00108 return kmalloc(bytes,GFP_KERNEL);
00109 }
00110
00111 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00112
00113 {
00114 if (bytes >= 128*1024)
00115 vfree(chunk);
00116 else
00117 kfree(chunk);
00118 }
00119
00120 static inline int xnarch_shadow_p (xnarchtcb_t *tcb, struct task_struct *task)
00121 {
00122 return tcb->espp == &task->thread.esp;
00123 }
00124
00125 static inline void xnarch_relay_tick (void)
00126
00127 {
00128 rthal_irq_host_pend(RTHAL_8254_IRQ);
00129 }
00130
00131 #ifdef XENO_POD_MODULE
00132
00133 void xnpod_welcome_thread(struct xnthread *);
00134
00135 void xnpod_delete_thread(struct xnthread *);
00136
00137 static inline int xnarch_start_timer (unsigned long ns,
00138 void (*tickhandler)(void)) {
00139 return rthal_timer_request(tickhandler,ns);
00140 }
00141
00142 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00143
00144 {
00145 adeos_declare_cpuid;
00146
00147 adeos_load_cpuid();
00148
00149
00150
00151 __set_bit(cpuid,&rthal_cpu_realtime);
00152
00153 rootcb->user_task = rootcb->active_task = rthal_current_host_task(cpuid);
00154
00155 rootcb->fpup = &rootcb->user_task->thread.i387;
00156 }
00157
00158 static inline void xnarch_enter_root (xnarchtcb_t *rootcb)
00159 {
00160 __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00161 }
00162
00163 static inline void __switch_threads(xnarchtcb_t *out_tcb,
00164 xnarchtcb_t *in_tcb,
00165 struct task_struct *outproc,
00166 struct task_struct *inproc
00167 )
00168 {
00169 #if __GNUC__ < 3 || __GNUC__ == 3 && __GNUC_MINOR__ < 2
00170
00171 __asm__ __volatile__( \
00172 "pushl %%ecx\n\t" \
00173 "pushl %%edi\n\t" \
00174 "pushl %%ebp\n\t" \
00175 "movl %0,%%ecx\n\t" \
00176 "movl %%esp,(%%ecx)\n\t" \
00177 "movl %1,%%ecx\n\t" \
00178 "movl $1f,(%%ecx)\n\t" \
00179 "movl %2,%%ecx\n\t" \
00180 "movl %3,%%edi\n\t" \
00181 "movl (%%ecx),%%esp\n\t" \
00182 "pushl (%%edi)\n\t" \
00183 "testl %%edx,%%edx\n\t" \
00184 "jne __switch_to\n\t" \
00185 "ret\n\t" \
00186 "1: popl %%ebp\n\t" \
00187 "popl %%edi\n\t" \
00188 "popl %%ecx\n\t" \
00189 : \
00190 : "m" (out_tcb->espp), \
00191 "m" (out_tcb->eipp), \
00192 "m" (in_tcb->espp), \
00193 "m" (in_tcb->eipp), \
00194 "b" (out_tcb), \
00195 "S" (in_tcb), \
00196 "a" (outproc), \
00197 "d" (inproc));
00198
00199 #else
00200
00201 long ebx_out, ecx_out, edi_out, esi_out;
00202
00203 __asm__ __volatile__( \
00204 "pushl %%ebp\n\t" \
00205 "movl %6,%%ecx\n\t" \
00206 "movl %%esp,(%%ecx)\n\t" \
00207 "movl %7,%%ecx\n\t" \
00208 "movl $1f,(%%ecx)\n\t" \
00209 "movl %8,%%ecx\n\t" \
00210 "movl %9,%%edi\n\t" \
00211 "movl (%%ecx),%%esp\n\t" \
00212 "pushl (%%edi)\n\t" \
00213 "testl %%edx,%%edx\n\t" \
00214 "jne __switch_to\n\t" \
00215 "ret\n\t" \
00216 "1: popl %%ebp\n\t" \
00217 : "=b" (ebx_out), \
00218 "=&c" (ecx_out), \
00219 "=S" (esi_out), \
00220 "=D" (edi_out), \
00221 "+a" (outproc), \
00222 "+d" (inproc) \
00223 : "m" (out_tcb->espp), \
00224 "m" (out_tcb->eipp), \
00225 "m" (in_tcb->espp), \
00226 "m" (in_tcb->eipp));
00227
00228 #endif
00229 }
00230
00231 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00232 xnarchtcb_t *in_tcb)
00233 {
00234 struct task_struct *outproc = out_tcb->active_task;
00235 struct task_struct *inproc = in_tcb->user_task;
00236
00237 if (inproc && outproc->thread_info->status & TS_USEDFPU)
00238
00239
00240 clts();
00241
00242 in_tcb->active_task = inproc ?: outproc;
00243
00244 if (inproc && inproc != outproc)
00245 {
00246 struct mm_struct *oldmm = outproc->active_mm;
00247
00248 switch_mm(oldmm,inproc->active_mm,inproc);
00249
00250 if (!inproc->mm)
00251 enter_lazy_tlb(oldmm,inproc);
00252 }
00253
00254 __switch_threads(out_tcb,in_tcb,outproc,inproc);
00255
00256 if (xnarch_shadow_p(out_tcb,outproc)) {
00257
00258
00259
00260
00261
00262
00263
00264
00265 struct thread_struct *thread = &outproc->thread;
00266
00267 barrier();
00268
00269 if (thread->io_bitmap_ptr) {
00270 struct tss_struct *tss = &per_cpu(init_tss, adeos_processor_id());
00271
00272 if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY) {
00273
00274 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,thread->io_bitmap_max);
00275
00276 if (thread->io_bitmap_max < tss->io_bitmap_max)
00277 memset((char *) tss->io_bitmap +
00278 thread->io_bitmap_max, 0xff,
00279 tss->io_bitmap_max - thread->io_bitmap_max);
00280
00281 tss->io_bitmap_max = thread->io_bitmap_max;
00282 tss->io_bitmap_base = IO_BITMAP_OFFSET;
00283 }
00284 }
00285 }
00286
00287 stts();
00288 }
00289
00290 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00291 xnarchtcb_t *next_tcb)
00292 {
00293 xnarch_switch_to(dead_tcb,next_tcb);
00294 }
00295
00296 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00297 {
00298
00299 }
00300
00301 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00302 struct xnthread *thread,
00303 const char *name)
00304 {
00305 tcb->user_task = current;
00306 tcb->active_task = NULL;
00307 tcb->esp = 0;
00308 tcb->espp = &tcb->esp;
00309 tcb->eipp = &tcb->eip;
00310 tcb->fpup = NULL;
00311 }
00312
00313 asmlinkage static void xnarch_thread_redirect (struct xnthread *self,
00314 int imask,
00315 void(*entry)(void *),
00316 void *cookie)
00317 {
00318
00319 stts();
00320 rthal_local_irq_restore(!!imask);
00321 xnpod_welcome_thread(self);
00322 entry(cookie);
00323 xnpod_delete_thread(self);
00324 }
00325
00326 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00327 void (*entry)(void *),
00328 void *cookie,
00329 int imask,
00330 struct xnthread *thread,
00331 char *name)
00332 {
00333 unsigned long **psp = (unsigned long **)&tcb->esp;
00334
00335 tcb->eip = (unsigned long)&xnarch_thread_redirect;
00336 tcb->esp = (unsigned long)tcb->stackbase;
00337 **psp = 0;
00338 *psp = (unsigned long *)(((unsigned long)*psp + tcb->stacksize - 0x10) & ~0xf);
00339 *--(*psp) = (unsigned long)cookie;
00340 *--(*psp) = (unsigned long)entry;
00341 *--(*psp) = (unsigned long)imask;
00342 *--(*psp) = (unsigned long)thread;
00343 *--(*psp) = 0;
00344 }
00345
00346 #ifdef CONFIG_RTAI_HW_FPU
00347
00348 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
00349 #define xnarch_fpu_init_p(task) ((task)->used_math)
00350 #define xnarch_set_fpu_init(task) ((task)->used_math = 1)
00351 #else
00352 #define xnarch_fpu_init_p(task) tsk_used_math(task)
00353 #define xnarch_set_fpu_init(task) set_stopped_child_used_math(task)
00354 #endif
00355
00356 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00357
00358 {
00359 struct task_struct *task = tcb->user_task;
00360
00361
00362
00363 __asm__ __volatile__ ("clts; fninit");
00364
00365 if (cpu_has_xmm)
00366 {
00367 unsigned long __mxcsr = 0x1f80UL & 0xffbfUL;
00368 __asm__ __volatile__ ("ldmxcsr %0": : "m" (__mxcsr));
00369 }
00370
00371 if(task)
00372 {
00373
00374
00375
00376 xnarch_set_fpu_init(task);
00377 task->thread_info->status |= TS_USEDFPU;
00378 }
00379 }
00380
00381 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00382
00383 {
00384 struct task_struct *task = tcb->user_task;
00385
00386 if(task)
00387 {
00388 if(!(task->thread_info->status & TS_USEDFPU))
00389 return;
00390
00391
00392
00393 task->thread_info->status &= ~TS_USEDFPU;
00394 }
00395
00396 clts();
00397
00398 if (cpu_has_fxsr)
00399 __asm__ __volatile__ ("fxsave %0; fnclex" : "=m" (*tcb->fpup));
00400 else
00401 __asm__ __volatile__ ("fnsave %0; fwait" : "=m" (*tcb->fpup));
00402 }
00403
00404 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00405
00406 {
00407 struct task_struct *task = tcb->user_task;
00408
00409 if (task)
00410 {
00411 if (!xnarch_fpu_init_p(task))
00412 {
00413 stts();
00414 return;
00415 }
00416
00417
00418
00419 task->thread_info->status |= TS_USEDFPU;
00420 }
00421
00422
00423
00424 clts();
00425
00426 if (cpu_has_fxsr)
00427 __asm__ __volatile__ ("fxrstor %0": : "m" (*tcb->fpup));
00428 else
00429 __asm__ __volatile__ ("frstor %0": : "m" (*tcb->fpup));
00430 }
00431
00432 static inline void xnarch_enable_fpu(xnarchtcb_t *tcb)
00433
00434 {
00435 clts();
00436
00437 if(!cpu_has_fxsr && tcb->user_task)
00438
00439
00440 xnarch_restore_fpu(tcb);
00441 }
00442
00443 #else
00444
00445 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00446
00447 {}
00448
00449 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00450
00451 {}
00452
00453 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00454
00455 {}
00456
00457 static inline void xnarch_enable_fpu (xnarchtcb_t *tcb)
00458
00459 {}
00460
00461 #endif
00462
00463 #ifdef CONFIG_SMP
00464
00465 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask) {
00466
00467 return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
00468 }
00469
00470 static inline int xnarch_hook_ipi (void (*handler)(void))
00471
00472 {
00473 return adeos_virtualize_irq_from(&rthal_domain,
00474 ADEOS_SERVICE_IPI0,
00475 (void (*)(unsigned)) handler,
00476 NULL,
00477 IPIPE_HANDLE_MASK);
00478 }
00479
00480 static inline int xnarch_release_ipi (void)
00481
00482 {
00483 return adeos_virtualize_irq_from(&rthal_domain,
00484 ADEOS_SERVICE_IPI0,
00485 NULL,
00486 NULL,
00487 IPIPE_PASS_MASK);
00488 }
00489
00490 static struct linux_semaphore xnarch_finalize_sync;
00491
00492 static void xnarch_finalize_cpu(unsigned irq)
00493 {
00494 up(&xnarch_finalize_sync);
00495 }
00496
00497 static inline void xnarch_notify_halt(void)
00498
00499 {
00500 xnarch_cpumask_t other_cpus = cpu_online_map;
00501 unsigned cpu, nr_cpus = num_online_cpus();
00502 unsigned long flags;
00503 adeos_declare_cpuid;
00504
00505 sema_init(&xnarch_finalize_sync,0);
00506
00507
00508
00509
00510 adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2,
00511 xnarch_finalize_cpu, NULL, IPIPE_HANDLE_MASK);
00512
00513 adeos_lock_cpu(flags);
00514 cpu_clear(cpuid, other_cpus);
00515 adeos_send_ipi(ADEOS_SERVICE_IPI2, other_cpus);
00516 adeos_unlock_cpu(flags);
00517
00518 for(cpu=0; cpu < nr_cpus-1; ++cpu)
00519 down(&xnarch_finalize_sync);
00520
00521 adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2, NULL, NULL,
00522 IPIPE_PASS_MASK);
00523 }
00524
00525 #else
00526
00527 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask) {
00528
00529 return 0;
00530 }
00531
00532 static inline int xnarch_hook_ipi (void (*handler)(void)) {
00533
00534 return 0;
00535 }
00536
00537 static inline int xnarch_release_ipi (void) {
00538
00539 return 0;
00540 }
00541
00542 #define xnarch_notify_halt()
00543
00544 #endif
00545
00546 static inline void xnarch_notify_shutdown(void)
00547
00548 {
00549 #ifdef CONFIG_SMP
00550
00551
00552 set_cpus_allowed(current,cpumask_of_cpu(0));
00553 #endif
00554 #ifdef CONFIG_RTAI_OPT_FUSION
00555 xnshadow_release_events();
00556 #endif
00557
00558 set_current_state(TASK_UNINTERRUPTIBLE);
00559 schedule_timeout(50);
00560 xnarch_release_ipi();
00561 }
00562
00563 static inline int xnarch_escalate (void)
00564
00565 {
00566 extern int xnarch_escalation_virq;
00567
00568 if (adp_current == adp_root)
00569 {
00570 spl_t s;
00571 splsync(s);
00572 adeos_trigger_irq(xnarch_escalation_virq);
00573 splexit(s);
00574 return 1;
00575 }
00576
00577 return 0;
00578 }
00579
00580 static void xnarch_notify_ready (void)
00581
00582 {
00583 #ifdef CONFIG_RTAI_OPT_FUSION
00584 xnshadow_grab_events();
00585 #endif
00586 }
00587
00588 #endif
00589
00590 #ifdef XENO_THREAD_MODULE
00591
00592 static inline void xnarch_init_tcb (xnarchtcb_t *tcb)
00593 {
00594 tcb->user_task = NULL;
00595 tcb->active_task = NULL;
00596 tcb->espp = &tcb->esp;
00597 tcb->eipp = &tcb->eip;
00598 tcb->fpup = &tcb->fpuenv;
00599
00600 }
00601
00602 #endif
00603
00604 #ifdef XENO_SHADOW_MODULE
00605
00606 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00607 struct xnthread *thread,
00608 const char *name)
00609 {
00610 struct task_struct *task = current;
00611
00612 tcb->user_task = task;
00613 tcb->active_task = NULL;
00614 tcb->esp = 0;
00615 tcb->espp = &task->thread.esp;
00616 tcb->eipp = &task->thread.eip;
00617 tcb->fpup = &task->thread.i387;
00618 }
00619
00620 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00621
00622 {
00623 unsigned irq;
00624
00625 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00626 adeos_virtualize_irq(irq,
00627 handler,
00628 NULL,
00629 IPIPE_DYNAMIC_MASK);
00630 }
00631
00632 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00633
00634 {
00635 unsigned irq;
00636
00637 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00638 {
00639 switch (irq)
00640 {
00641 #ifdef CONFIG_SMP
00642 case ADEOS_CRITICAL_IPI:
00643 case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00644 case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00645 case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00646
00647
00648 continue;
00649 #endif
00650
00651 default:
00652
00653 __adeos_lock_irq(adp,cpuid,irq);
00654 }
00655 }
00656 }
00657
00658 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00659
00660 {
00661 unsigned irq;
00662
00663 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00664 {
00665 switch (irq)
00666 {
00667 #ifdef CONFIG_SMP
00668 case ADEOS_CRITICAL_IPI:
00669 case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00670 case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00671 case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00672
00673 continue;
00674 #endif
00675
00676 default:
00677
00678 __adeos_unlock_irq(adp,irq);
00679 }
00680 }
00681 }
00682
00683 #endif
00684
00685 #ifdef XENO_TIMER_MODULE
00686
00687 static inline void xnarch_program_timer_shot (unsigned long delay) {
00688
00689
00690
00691
00692 rthal_timer_program_shot(rthal_imuldiv(delay,RTHAL_TIMER_FREQ,RTHAL_CPU_FREQ));
00693 }
00694
00695 static inline void xnarch_stop_timer (void) {
00696 rthal_timer_release();
00697 }
00698
00699 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00700
00701 {
00702 #ifdef CONFIG_SMP
00703 return adeos_send_ipi(RTHAL_APIC_TIMER_IPI, mask);
00704 #else
00705 return 0;
00706 #endif
00707 }
00708
00709 static inline void xnarch_read_timings (unsigned long long *shot,
00710 unsigned long long *delivery,
00711 unsigned long long defval)
00712 {
00713 #ifdef CONFIG_ADEOS_PROFILING
00714 int cpuid = adeos_processor_id();
00715 *shot = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_handled;
00716 *delivery = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_synced;
00717 #else
00718 *shot = defval;
00719 *delivery = defval;
00720 #endif
00721 }
00722
00723 #endif
00724
00725 #ifdef XENO_MAIN_MODULE
00726
00727 #include <linux/init.h>
00728 #include <nucleus/asm/calibration.h>
00729
00730 extern u_long nkschedlat;
00731
00732 extern u_long nktimerlat;
00733
00734 int xnarch_escalation_virq;
00735
00736 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00737
00738 void xnpod_schedule_handler(void);
00739
00740 static rthal_trap_handler_t xnarch_old_trap_handler;
00741
00742 static int xnarch_trap_fault (adevinfo_t *evinfo)
00743 {
00744 xnarch_fltinfo_t fltinfo;
00745
00746 fltinfo.vector = evinfo->event;
00747 fltinfo.errcode = ((struct pt_regs *)evinfo->evdata)->orig_eax;
00748 fltinfo.regs = (struct pt_regs *)evinfo->evdata;
00749
00750 return xnpod_trap_fault(&fltinfo);
00751 }
00752
00753 static inline unsigned long xnarch_calibrate_timer (void)
00754
00755 {
00756 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00757 return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY) ?: 1;
00758 #else
00759
00760
00761
00762
00763
00764
00765 return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00766 #endif
00767 }
00768
00769 int xnarch_calibrate_sched (void)
00770
00771 {
00772 nktimerlat = xnarch_calibrate_timer();
00773
00774 if (!nktimerlat)
00775 return -ENODEV;
00776
00777 nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00778
00779 return 0;
00780 }
00781
00782 static inline int xnarch_init (void)
00783
00784 {
00785 int err;
00786
00787 #ifdef CONFIG_SMP
00788
00789
00790 set_cpus_allowed(current,cpumask_of_cpu(0));
00791 #endif
00792
00793 err = xnarch_calibrate_sched();
00794
00795 if (err)
00796 return err;
00797
00798 xnarch_escalation_virq = adeos_alloc_irq();
00799
00800 if (xnarch_escalation_virq == 0)
00801 return -ENOSYS;
00802
00803 adeos_virtualize_irq_from(&rthal_domain,
00804 xnarch_escalation_virq,
00805 (void (*)(unsigned))&xnpod_schedule_handler,
00806 NULL,
00807 IPIPE_HANDLE_MASK);
00808
00809 xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00810
00811 #ifdef CONFIG_RTAI_OPT_FUSION
00812 err = xnshadow_mount();
00813 #endif
00814
00815 if (err)
00816 {
00817 rthal_trap_catch(xnarch_old_trap_handler);
00818 adeos_free_irq(xnarch_escalation_virq);
00819 }
00820
00821 return err;
00822 }
00823
00824 static inline void xnarch_exit (void)
00825
00826 {
00827 #ifdef CONFIG_RTAI_OPT_FUSION
00828 xnshadow_cleanup();
00829 #endif
00830 rthal_trap_catch(xnarch_old_trap_handler);
00831 adeos_free_irq(xnarch_escalation_virq);
00832 }
00833
00834 #endif
00835
00836 #ifdef __cplusplus
00837 }
00838 #endif
00839
00840 #else
00841
00842 #include <nucleus/system.h>
00843
00844 #endif
00845
00846 #endif