system.h

00001 /*
00002  * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
00003  *
00004  * RTAI/fusion is free software; you can redistribute it and/or modify it
00005  * under the terms of the GNU General Public License as published by
00006  * the Free Software Foundation; either version 2 of the License, or
00007  * (at your option) any later version.
00008  *
00009  * RTAI/fusion is distributed in the hope that it will be useful, but
00010  * WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00012  * General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with RTAI/fusion; if not, write to the Free Software
00016  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
00017  * 02111-1307, USA.
00018  */
00019 
00020 #ifndef _RTAI_ASM_PPC_SYSTEM_H
00021 #define _RTAI_ASM_PPC_SYSTEM_H
00022 
00023 #include <nucleus/asm-generic/system.h>
00024 
00025 #ifdef __KERNEL__
00026 
00027 #include <linux/config.h>
00028 #include <linux/ptrace.h>
00029 
00030 #if ADEOS_RELEASE_NUMBER < 0x02060703
00031 #error "Adeos 2.6r7c3/ppc or above is required to run this software; please upgrade."
00032 #error "See http://download.gna.org/adeos/patches/v2.6/ppc/"
00033 #endif
00034 
00035 #define XNARCH_DEFAULT_TICK     1000000 /* ns, i.e. 1ms */
00036 #define XNARCH_HOST_TICK        (1000000000UL/HZ)
00037 
00038 #define XNARCH_THREAD_STACKSZ   4096
00039 
00040 #define xnarch_stack_size(tcb)  ((tcb)->stacksize)
00041 #define xnarch_user_task(tcb)   ((tcb)->user_task)
00042 
00043 #define xnarch_alloc_stack xnmalloc
00044 #define xnarch_free_stack  xnfree
00045 
00046 struct xnthread;
00047 struct task_struct;
00048 
00049 typedef struct xnarchtcb {      /* Per-thread arch-dependent block */
00050 
00051     /* Kernel mode side */
00052 
00053 #ifdef CONFIG_RTAI_HW_FPU
00054     /* We only care for basic FPU handling in kernel-space; Altivec
00055        and SPE are not available to kernel-based nucleus threads. */
00056     rthal_fpenv_t fpuenv  __attribute__ ((aligned (16)));
00057     rthal_fpenv_t *fpup;        /* Pointer to the FPU backup area */
00058     struct task_struct *user_fpu_owner;
00059     /* Pointer the the FPU owner in userspace:
00060        - NULL for RT K threads,
00061        - last_task_used_math for Linux US threads (only current or NULL when MP)
00062        - current for RT US threads.
00063     */
00064 #define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
00065 #else /* !CONFIG_RTAI_HW_FPU */
00066 #define xnarch_fpu_ptr(tcb)     NULL
00067 #endif /* CONFIG_RTAI_HW_FPU */
00068 
00069     unsigned stacksize;         /* Aligned size of stack (bytes) */
00070     unsigned long *stackbase;   /* Stack space */
00071     unsigned long ksp;          /* Saved KSP for kernel-based threads */
00072     unsigned long *kspp;        /* Pointer to saved KSP (&ksp or &user->thread.ksp) */
00073 
00074     /* User mode side */
00075     struct task_struct *user_task;      /* Shadowed user-space task */
00076     struct task_struct *active_task;    /* Active user-space task */
00077 
00078     /* Init block */
00079     struct xnthread *self;
00080     int imask;
00081     const char *name;
00082     void (*entry)(void *cookie);
00083     void *cookie;
00084 
00085 } xnarchtcb_t;
00086 
00087 typedef struct xnarch_fltinfo {
00088 
00089     unsigned exception;
00090     struct pt_regs *regs;
00091 
00092 } xnarch_fltinfo_t;
00093 
00094 #define xnarch_fault_trap(fi)   ((unsigned int)(fi)->regs->trap)
00095 #define xnarch_fault_code(fi)   ((fi)->regs->dar)
00096 #define xnarch_fault_pc(fi)     ((fi)->regs->nip)
00097 #define xnarch_fault_pc(fi)     ((fi)->regs->nip)
00098 /* FIXME: FPU faults ignored by the nanokernel on PPC. */
00099 #define xnarch_fault_fpu_p(fi)  (0)
00100 /* The following predicate is guaranteed to be called over a regular
00101    Linux stack context. */
00102 #define xnarch_fault_pf_p(fi)   ((fi)->exception == ADEOS_ACCESS_TRAP)
00103 #define xnarch_fault_notify(fi) (!(current->ptrace & PT_PTRACED) || \
00104                                  ((fi)->exception != ADEOS_IABR_TRAP && \
00105                                   (fi)->exception != ADEOS_SSTEP_TRAP && \
00106                                   (fi)->exception != ADEOS_DEBUG_TRAP))
00107 #ifdef __cplusplus
00108 extern "C" {
00109 #endif
00110 
00111 static inline void *xnarch_sysalloc (u_long bytes)
00112 
00113 {
00114 #if 0   /* FIXME: likely on-demand mapping bug here */
00115     if (bytes >= 128*1024)
00116         return vmalloc(bytes);
00117 #endif
00118 
00119     return kmalloc(bytes,GFP_KERNEL);
00120 }
00121 
00122 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00123 
00124 {
00125 #if 0   /* FIXME: likely on-demand mapping bug here */
00126     if (bytes >= 128*1024)
00127         vfree(chunk);
00128     else
00129 #endif
00130         kfree(chunk);
00131 }
00132 
00133 static inline void xnarch_relay_tick (void)
00134 
00135 {
00136     rthal_irq_host_pend(ADEOS_TIMER_VIRQ);
00137 }
00138 
00139 #ifdef XENO_POD_MODULE
00140 
00141 void xnpod_welcome_thread(struct xnthread *);
00142 
00143 void xnpod_delete_thread(struct xnthread *);
00144 
00145 static inline int xnarch_start_timer (unsigned long ns,
00146                                       void (*tickhandler)(void))
00147 {
00148     return rthal_timer_request(tickhandler,ns);
00149 }
00150 
00151 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00152 
00153 {
00154     adeos_declare_cpuid;
00155 
00156     adeos_load_cpuid();
00157 
00158     /* rthal_cpu_realtime is only tested for the current processor,
00159        and always inside a critical section. */
00160     __set_bit(cpuid,&rthal_cpu_realtime);
00161     /* Remember the preempted Linux task pointer. */
00162     rootcb->user_task = rootcb->active_task = rthal_current_host_task(cpuid);
00163 #ifdef CONFIG_RTAI_HW_FPU
00164     rootcb->user_fpu_owner = rthal_get_fpu_owner(rootcb->user_task);
00165     /* So that xnarch_save_fpu() will operate on the right FPU area. */
00166     rootcb->fpup = (rootcb->user_fpu_owner
00167                     ? (rthal_fpenv_t *)&rootcb->user_fpu_owner->thread.fpr[0]
00168                     : NULL);
00169 #endif /* CONFIG_RTAI_HW_FPU */
00170 }
00171 
00172 static inline void xnarch_enter_root (xnarchtcb_t *rootcb) {
00173     __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00174 }
00175 
00176 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00177                                      xnarchtcb_t *in_tcb)
00178 {
00179     struct task_struct *prev = out_tcb->active_task;
00180     struct task_struct *next = in_tcb->user_task;
00181 
00182     in_tcb->active_task = next ?: prev;
00183 
00184     if (next && next != prev) /* Switch to new user-space thread? */
00185         {
00186         struct mm_struct *mm = next->active_mm;
00187 
00188         /* Switch the mm context.*/
00189 
00190 #ifdef CONFIG_ALTIVEC
00191         /* Don't rely on FTR fixups --
00192            they don't work properly in our context. */
00193         if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) {
00194             asm volatile (
00195                 "dssall;\n"
00196 #ifndef CONFIG_POWER4
00197                 "sync;\n"
00198 #endif
00199                 : : );
00200         }
00201 #endif /* CONFIG_ALTIVEC */
00202 
00203         next->thread.pgdir = mm->pgd;
00204         get_mmu_context(mm);
00205         set_context(mm->context,mm->pgd);
00206 
00207         /* _switch expects a valid "current" (r2) for storing
00208          * ALTIVEC and SPE state. */
00209         current = prev;
00210         _switch(&prev->thread, &next->thread);
00211 
00212         barrier();
00213         }
00214     else
00215         /* Kernel-to-kernel context switch. */
00216         rthal_switch_context(out_tcb->kspp,in_tcb->kspp);
00217 }
00218 
00219 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00220                                                xnarchtcb_t *next_tcb)
00221 {
00222     xnarch_switch_to(dead_tcb,next_tcb);
00223 }
00224 
00225 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00226 
00227 {
00228     /* Empty */
00229 }
00230 
00231 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00232                                          struct xnthread *thread,
00233                                          const char *name)
00234 {
00235     tcb->user_task = current;
00236     tcb->active_task = NULL;
00237     tcb->ksp = 0;
00238     tcb->kspp = &tcb->ksp;
00239 #ifdef CONFIG_RTAI_HW_FPU
00240     tcb->user_fpu_owner = NULL;
00241     tcb->fpup = NULL;
00242 #endif /* CONFIG_RTAI_HW_FPU */
00243     tcb->entry = NULL;
00244     tcb->cookie = NULL;
00245     tcb->self = thread;
00246     tcb->imask = 0;
00247     tcb->name = name;
00248 }
00249 
00250 asmlinkage static void xnarch_thread_trampoline (xnarchtcb_t *tcb)
00251 
00252 {
00253     rthal_local_irq_restore(!!tcb->imask);
00254     xnpod_welcome_thread(tcb->self);
00255     tcb->entry(tcb->cookie);
00256     xnpod_delete_thread(tcb->self);
00257 }
00258 
00259 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00260                                        void (*entry)(void *),
00261                                        void *cookie,
00262                                        int imask,
00263                                        struct xnthread *thread,
00264                                        char *name)
00265 {
00266     unsigned long *ksp, flags;
00267 
00268     adeos_hw_local_irq_flags(flags);
00269 
00270     *tcb->stackbase = 0;
00271     ksp = (unsigned long *)((((unsigned long)tcb->stackbase + tcb->stacksize - 0x10) & ~0xf) - RTHAL_SWITCH_FRAME_SIZE);
00272     tcb->ksp = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
00273     ksp[19] = (unsigned long)tcb; /* r3 */
00274     ksp[25] = (unsigned long)&xnarch_thread_trampoline; /* lr */
00275     ksp[26] = flags & ~(MSR_EE | MSR_FP); /* msr */
00276 
00277     tcb->entry = entry;
00278     tcb->cookie = cookie;
00279     tcb->self = thread;
00280     tcb->imask = imask;
00281     tcb->name = name;
00282 }
00283 
00284 /* No lazy FPU init on PPC. */
00285 #define xnarch_fpu_init_p(task) (1)
00286 
00287 static inline void xnarch_enable_fpu (xnarchtcb_t *current_tcb)
00288 
00289 {
00290 #ifdef CONFIG_RTAI_HW_FPU
00291     if(!current_tcb->user_task)
00292         rthal_enable_fpu();
00293 #endif /* CONFIG_RTAI_HW_FPU */
00294 }
00295 
00296 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00297 
00298 {
00299 #ifdef CONFIG_RTAI_HW_FPU
00300     /* Initialize the FPU for an emerging kernel-based RT thread. This
00301        must be run on behalf of the emerging thread. */
00302     memset(&tcb->fpuenv,0,sizeof(tcb->fpuenv));
00303     rthal_init_fpu(&tcb->fpuenv);
00304 #endif /* CONFIG_RTAI_HW_FPU */
00305 }
00306 
00307 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00308 
00309 {
00310 #ifdef CONFIG_RTAI_HW_FPU
00311 
00312     if(tcb->fpup)
00313         {
00314         rthal_save_fpu(tcb->fpup);
00315 
00316         if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
00317             tcb->user_fpu_owner->thread.regs->msr &= ~MSR_FP;
00318         }   
00319 
00320 #endif /* CONFIG_RTAI_HW_FPU */
00321 }
00322 
00323 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00324 
00325 {
00326 #ifdef CONFIG_RTAI_HW_FPU
00327 
00328     if(tcb->fpup)
00329         {
00330         rthal_restore_fpu(tcb->fpup);
00331 
00332         if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
00333             tcb->user_fpu_owner->thread.regs->msr |= MSR_FP;
00334         }   
00335 
00336     /* FIXME: We restore FPU "as it was" when RTAI preempted Linux, whereas we
00337        could be much lazier. */
00338     if(tcb->user_task)
00339         rthal_disable_fpu();
00340 
00341 #endif /* CONFIG_RTAI_HW_FPU */
00342 }
00343 
00344 #ifdef CONFIG_SMP
00345 
00346 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask) {
00347 
00348     return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
00349 }
00350 
00351 static inline int xnarch_hook_ipi (void (*handler)(void))
00352 
00353 {
00354     return adeos_virtualize_irq_from(&rthal_domain,
00355                                      ADEOS_SERVICE_IPI0,
00356                                      (void (*)(unsigned)) handler,
00357                                      NULL,
00358                                      IPIPE_HANDLE_MASK);
00359 }
00360 
00361 static inline int xnarch_release_ipi (void)
00362 
00363 {
00364     return adeos_virtualize_irq_from(&rthal_domain,
00365                                      ADEOS_SERVICE_IPI0,
00366                                      NULL,
00367                                      NULL,
00368                                      IPIPE_PASS_MASK);
00369 }
00370 
00371 static inline void xnarch_notify_halt(void)
00372 
00373 {
00374     unsigned long flags = adeos_critical_enter(NULL);
00375     adeos_critical_exit(flags);
00376 }
00377 
00378 #else /* !CONFIG_SMP */
00379 
00380 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask)
00381 
00382 {
00383     return 0;
00384 }
00385 
00386 static inline int xnarch_hook_ipi (void (*handler)(void))
00387 
00388 {
00389     return 0;
00390 }
00391 
00392 static inline int xnarch_release_ipi (void)
00393 
00394 {
00395     return 0;
00396 }
00397 
00398 #define xnarch_notify_halt() /* Nullified */
00399 
00400 #endif /* CONFIG_SMP */
00401 
00402 static inline void xnarch_notify_shutdown(void)
00403 
00404 {
00405 #ifdef CONFIG_SMP
00406     /* The HAL layer also sets the same CPU affinity so that both
00407        modules keep their execution sequence on SMP boxen. */
00408     set_cpus_allowed(current,cpumask_of_cpu(0));
00409 #endif /* CONFIG_SMP */
00410 #ifdef CONFIG_RTAI_OPT_FUSION
00411     xnshadow_release_events();
00412 #endif /* CONFIG_RTAI_OPT_FUSION */
00413     /* Wait for the currently processed events to drain. */
00414     set_current_state(TASK_UNINTERRUPTIBLE);
00415     schedule_timeout(50);
00416     xnarch_release_ipi();
00417 }
00418 
00419 static inline int xnarch_escalate (void)
00420 
00421 {
00422     extern int xnarch_escalation_virq;
00423 
00424     if (adp_current == adp_root)
00425         {
00426         spl_t s;
00427         splsync(s);
00428         adeos_trigger_irq(xnarch_escalation_virq);
00429         splexit(s);
00430         return 1;
00431         }
00432 
00433     return 0;
00434 }
00435 
00436 static void xnarch_notify_ready (void)
00437 
00438 {
00439 #ifdef CONFIG_RTAI_OPT_FUSION
00440     xnshadow_grab_events();
00441 #endif /* CONFIG_RTAI_OPT_FUSION */
00442 }
00443 
00444 #endif /* XENO_POD_MODULE */
00445 
00446 #ifdef XENO_THREAD_MODULE
00447 
00448 static inline void xnarch_init_tcb (xnarchtcb_t *tcb) {
00449 
00450     tcb->user_task = NULL;
00451     tcb->active_task = NULL;
00452     tcb->kspp = &tcb->ksp;
00453 #ifdef CONFIG_RTAI_HW_FPU
00454     tcb->user_fpu_owner = NULL;
00455     tcb->fpup = &tcb->fpuenv;
00456 #endif /* CONFIG_RTAI_HW_FPU */
00457     /* Must be followed by xnarch_init_thread(). */
00458 }
00459 
00460 #endif /* XENO_THREAD_MODULE */
00461 
00462 #ifdef XENO_SHADOW_MODULE
00463 
00464 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00465                                            struct xnthread *thread,
00466                                            const char *name)
00467 {
00468     struct task_struct *task = current;
00469 
00470     tcb->user_task = task;
00471     tcb->active_task = NULL;
00472     tcb->ksp = 0;
00473     tcb->kspp = &task->thread.ksp;
00474 #ifdef CONFIG_RTAI_HW_FPU
00475     tcb->user_fpu_owner = task;
00476     tcb->fpup = (rthal_fpenv_t *)&task->thread.fpr[0];
00477 #endif /* CONFIG_RTAI_HW_FPU */
00478     tcb->entry = NULL;
00479     tcb->cookie = NULL;
00480     tcb->self = thread;
00481     tcb->imask = 0;
00482     tcb->name = name;
00483 }
00484 
00485 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00486 
00487 {
00488     unsigned irq;
00489 
00490     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00491         adeos_virtualize_irq(irq,
00492                              handler,
00493                              NULL,
00494                              IPIPE_DYNAMIC_MASK);
00495 
00496     /* On this arch, the decrementer trap is not an external IRQ but
00497        it is instead mapped to a virtual IRQ, so we must grab it
00498        individually. */
00499 
00500     adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
00501                          handler,
00502                          NULL,
00503                          IPIPE_DYNAMIC_MASK);
00504 }
00505 
00506 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00507 
00508 {
00509     unsigned irq;
00510 
00511     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00512         {
00513         switch (irq)
00514             {
00515 #ifdef CONFIG_SMP
00516             case ADEOS_CRITICAL_IPI:
00517 
00518                 /* Never lock out this one. */
00519                 continue;
00520 #endif /* CONFIG_SMP */
00521 
00522             default:
00523 
00524                 __adeos_lock_irq(adp,cpuid,irq);
00525             }
00526         }
00527 
00528     __adeos_lock_irq(adp,cpuid,ADEOS_TIMER_VIRQ);
00529 }
00530 
00531 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00532 
00533 {
00534     unsigned irq;
00535 
00536     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00537         {
00538         switch (irq)
00539             {
00540 #ifdef CONFIG_SMP
00541             case ADEOS_CRITICAL_IPI:
00542 
00543                 continue;
00544 #endif /* CONFIG_SMP */
00545 
00546             default:
00547 
00548                 __adeos_unlock_irq(adp,irq);
00549             }
00550         }
00551 
00552     __adeos_unlock_irq(adp,ADEOS_TIMER_VIRQ);
00553 }
00554 
00555 #endif /* XENO_SHADOW_MODULE */
00556 
00557 #ifdef XENO_TIMER_MODULE
00558 
00559 static inline void xnarch_program_timer_shot (unsigned long delay) {
00560     /* Even though some architectures may use a 64 bits delay here, we
00561        voluntarily limit to 32 bits, 4 billions ticks should be enough
00562        for now. Would a timer needs more, an extra call to the tick
00563        handler would simply occur after 4 billions ticks.  Since the
00564        timebase value is used to express CPU ticks on the PowerPC
00565        port, there is no need to rescale the delay value. */
00566     rthal_timer_program_shot(delay);
00567 }
00568 
00569 static inline void xnarch_stop_timer (void) {
00570     rthal_timer_release();
00571 }
00572 
00573 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00574 
00575 {
00576 #ifdef CONFIG_SMP
00577     return -1;          /* FIXME */
00578 #else /* ! CONFIG_SMP */
00579     return 0;
00580 #endif /* CONFIG_SMP */
00581 }
00582 
00583 static inline void xnarch_read_timings (unsigned long long *shot,
00584                                         unsigned long long *delivery,
00585                                         unsigned long long defval)
00586 {
00587 #ifdef CONFIG_ADEOS_PROFILING
00588     int cpuid = adeos_processor_id();
00589     *shot = __adeos_profile_data[cpuid].irqs[RTHAL_TIMER_IRQ].t_handled;
00590     *delivery = __adeos_profile_data[cpuid].irqs[RTHAL_TIMER_IRQ].t_synced;
00591 #else /* !CONFIG_ADEOS_PROFILING */
00592     *shot = defval;
00593     *delivery = defval;
00594 #endif /* CONFIG_ADEOS_PROFILING */
00595 }
00596 
00597 #endif /* XENO_TIMER_MODULE */
00598 
00599 #ifdef XENO_MAIN_MODULE
00600 
00601 #include <linux/init.h>
00602 #include <nucleus/asm/calibration.h>
00603 
00604 extern u_long nkschedlat;
00605 
00606 extern u_long nktimerlat;
00607 
00608 int xnarch_escalation_virq;
00609 
00610 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00611 
00612 void xnpod_schedule_handler(void);
00613 
00614 static rthal_trap_handler_t xnarch_old_trap_handler;
00615 
00616 static int xnarch_trap_fault (adevinfo_t *evinfo)
00617 
00618 {
00619     xnarch_fltinfo_t fltinfo;
00620     fltinfo.exception = evinfo->event;
00621     fltinfo.regs = (struct pt_regs *)evinfo->evdata;
00622     return xnpod_trap_fault(&fltinfo);
00623 }
00624 
00625 unsigned long xnarch_calibrate_timer (void)
00626 
00627 {
00628 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00629     return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY) ?: 1;
00630 #else /* CONFIG_RTAI_HW_TIMER_LATENCY unspecified. */
00631     /* Compute the time needed to program the decrementer in aperiodic
00632        mode. The return value is expressed in timebase ticks. */
00633     return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00634 #endif /* CONFIG_RTAI_HW_TIMER_LATENCY != 0 */
00635 }
00636 
00637 int xnarch_calibrate_sched (void)
00638 
00639 {
00640     nktimerlat = xnarch_calibrate_timer();
00641 
00642     if (!nktimerlat)
00643         return -ENODEV;
00644 
00645     nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00646 
00647     return 0;
00648 }
00649 
00650 static inline int xnarch_init (void)
00651 
00652 {
00653     int err;
00654 
00655 #ifdef CONFIG_SMP
00656     /* The HAL layer also sets the same CPU affinity so that both
00657        modules keep their execution sequence on SMP boxen. */
00658     set_cpus_allowed(current,cpumask_of_cpu(0));
00659 #endif /* CONFIG_SMP */
00660 
00661     err = xnarch_calibrate_sched();
00662 
00663     if (err)
00664         return err;
00665 
00666     xnarch_escalation_virq = adeos_alloc_irq();
00667 
00668     if (xnarch_escalation_virq == 0)
00669         return -ENOSYS;
00670 
00671     adeos_virtualize_irq_from(&rthal_domain,
00672                               xnarch_escalation_virq,
00673                               (void (*)(unsigned))&xnpod_schedule_handler,
00674                               NULL,
00675                               IPIPE_HANDLE_MASK);
00676 
00677     xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00678 
00679 #ifdef CONFIG_RTAI_OPT_FUSION
00680     err = xnshadow_mount();
00681 #endif /* CONFIG_RTAI_OPT_FUSION */
00682 
00683     if (err)
00684         {
00685         rthal_trap_catch(xnarch_old_trap_handler);
00686         adeos_free_irq(xnarch_escalation_virq);
00687         }
00688 
00689     return err;
00690 }
00691 
00692 static inline void xnarch_exit (void)
00693 
00694 {
00695 #ifdef CONFIG_RTAI_OPT_FUSION
00696     xnshadow_cleanup();
00697 #endif /* CONFIG_RTAI_OPT_FUSION */
00698     rthal_trap_catch(xnarch_old_trap_handler);
00699     adeos_free_irq(xnarch_escalation_virq);
00700 }
00701 
00702 #endif /* XENO_MAIN_MODULE */
00703 
00704 #ifdef __cplusplus
00705 }
00706 #endif
00707 
00708 #else /* !__KERNEL__ */
00709 
00710 #include <nucleus/system.h>
00711 
00712 #endif /* __KERNEL__ */
00713 
00714 #endif /* !_RTAI_ASM_PPC_SYSTEM_H */

Generated on Wed Jun 22 22:54:02 2005 for RTAI Fusion API by  doxygen 1.4.1