system.h

00001 /*
00002  * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
00003  * Copyright (C) 2004 The HYADES Project (http://www.hyades-itea.org).
00004  * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@laposte.net>.
00005  *
00006  * RTAI/fusion is free software; you can redistribute it and/or modify it
00007  * under the terms of the GNU General Public License as published by
00008  * the Free Software Foundation; either version 2 of the License, or
00009  * (at your option) any later version.
00010  *
00011  * RTAI/fusion is distributed in the hope that it will be useful, but
00012  * WITHOUT ANY WARRANTY; without even the implied warranty of
00013  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  * General Public License for more details.
00015  *
00016  * You should have received a copy of the GNU General Public License
00017  * along with RTAI/fusion; if not, write to the Free Software
00018  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
00019  * 02111-1307, USA.
00020  */
00021 
00022 #ifndef _RTAI_ASM_I386_SYSTEM_H
00023 #define _RTAI_ASM_I386_SYSTEM_H
00024 
00025 #include <nucleus/asm-generic/system.h>
00026 
00027 #ifdef __KERNEL__
00028 
00029 #include <linux/config.h>
00030 #include <linux/ptrace.h>
00031 
00032 #ifdef CONFIG_ADEOS_CORE
00033 #if ADEOS_RELEASE_NUMBER < 0x02060b01
00034 #error "Adeos 2.6r11c1/x86 or above is required to run this software; please upgrade."
00035 #error "See http://download.gna.org/adeos/patches/v2.6/i386/"
00036 #endif
00037 #endif /* CONFIG_ADEOS_CORE */
00038 
00039 #define XNARCH_DEFAULT_TICK          1000000 /* ns, i.e. 1ms */
00040 #ifdef CONFIG_X86_LOCAL_APIC
00041 /* When the local APIC is enabled, we do not need to relay the host
00042    tick since 8254 interrupts are already flowing normally to Linux
00043    (i.e. the nucleus does not intercept them, but uses a dedicated
00044    APIC-based timer interrupt instead, i.e. RTHAL_APIC_TIMER_IPI). */
00045 #define XNARCH_HOST_TICK             0
00046 #else /* CONFIG_X86_LOCAL_APIC */
00047 #define XNARCH_HOST_TICK             (1000000000UL/HZ)
00048 #endif /* CONFIG_X86_LOCAL_APIC */
00049 
00050 #define XNARCH_THREAD_STACKSZ 4096
00051 
00052 #define xnarch_stack_size(tcb)  ((tcb)->stacksize)
00053 #define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
00054 #define xnarch_user_task(tcb)   ((tcb)->user_task)
00055 #define xnarch_user_pid(tcb)    ((tcb)->user_task->pid)
00056 
00057 #define xnarch_alloc_stack xnmalloc
00058 #define xnarch_free_stack  xnfree
00059 
00060 struct xnthread;
00061 struct task_struct;
00062 
00063 typedef struct xnarchtcb {      /* Per-thread arch-dependent block */
00064 
00065     /* Kernel mode side */
00066     union i387_union fpuenv __attribute__ ((aligned (16))); /* FPU backup area */
00067     unsigned stacksize;         /* Aligned size of stack (bytes) */
00068     unsigned long *stackbase;   /* Stack space */
00069     unsigned long esp;          /* Saved ESP for kernel-based threads */
00070     unsigned long eip;          /* Saved EIP for kernel-based threads */
00071 
00072     /* User mode side */
00073     struct task_struct *user_task;      /* Shadowed user-space task */
00074     struct task_struct *active_task;    /* Active user-space task */
00075 
00076     unsigned long *espp;        /* Pointer to ESP backup area (&esp or &user->thread.esp) */
00077     unsigned long *eipp;        /* Pointer to EIP backup area (&eip or &user->thread.eip) */
00078     union i387_union *fpup;     /* Pointer to the FPU backup area (&fpuenv or &user->thread.i387.f[x]save */
00079 
00080 } xnarchtcb_t;
00081 
00082 typedef struct xnarch_fltinfo {
00083 
00084     unsigned vector;
00085     long errcode;
00086     struct pt_regs *regs;
00087 
00088 } xnarch_fltinfo_t;
00089 
00090 #define xnarch_fault_trap(fi)   ((fi)->vector)
00091 #define xnarch_fault_code(fi)   ((fi)->errcode)
00092 #define xnarch_fault_pc(fi)     ((fi)->regs->eip)
00093 /* fault is caused by use FPU while FPU disabled. */
00094 #define xnarch_fault_fpu_p(fi)  ((fi)->vector == 7)
00095 /* The following predicates are only usable over a regular Linux stack
00096    context. */
00097 #define xnarch_fault_pf_p(fi)   ((fi)->vector == 14)
00098 #define xnarch_fault_bp_p(fi)   ((current->ptrace & PT_PTRACED) && \
00099                                  ((fi)->vector == 1 || (fi)->vector == 3))
00100 #define xnarch_fault_notify(fi) (!xnarch_fault_bp_p(fi))
00101 
00102 #ifdef __cplusplus
00103 extern "C" {
00104 #endif
00105 
00106 static inline void *xnarch_sysalloc (u_long bytes)
00107 
00108 {
00109     if (bytes >= 128*1024)
00110         return vmalloc(bytes);
00111 
00112     return kmalloc(bytes,GFP_KERNEL);
00113 }
00114 
00115 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00116 
00117 {
00118     if (bytes >= 128*1024)
00119         vfree(chunk);
00120     else
00121         kfree(chunk);
00122 }
00123 
00124 static inline int xnarch_shadow_p (xnarchtcb_t *tcb, struct task_struct *task)
00125 {
00126     return tcb->espp == &task->thread.esp; /* Sign of shadow... */
00127 }
00128 
00129 static inline void xnarch_relay_tick (void)
00130 
00131 {
00132     rthal_irq_host_pend(RTHAL_8254_IRQ);
00133 }
00134 
00135 #ifdef XENO_POD_MODULE
00136 
00137 void xnpod_welcome_thread(struct xnthread *);
00138 
00139 void xnpod_delete_thread(struct xnthread *);
00140 
00141 static inline int xnarch_start_timer (unsigned long ns,
00142                                       void (*tickhandler)(void)) {
00143     return rthal_timer_request(tickhandler,ns);
00144 }
00145 
00146 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00147 
00148 {
00149     rthal_declare_cpuid;
00150 
00151     rthal_load_cpuid();
00152 
00153     /* rthal_cpu_realtime is only tested for the current processor,
00154        and always inside a critical section. */
00155     __set_bit(cpuid,&rthal_cpu_realtime);
00156     /* Remember the preempted Linux task pointer. */
00157     rootcb->user_task = rootcb->active_task = rthal_current_host_task(cpuid);
00158     /* So that xnarch_save_fpu() will operate on the right FPU area. */
00159     rootcb->fpup = &rootcb->user_task->thread.i387;
00160 }
00161 
00162 static inline void xnarch_enter_root (xnarchtcb_t *rootcb)
00163 {
00164     __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00165 }
00166 
00167 static inline void __switch_threads(xnarchtcb_t *out_tcb,
00168                                     xnarchtcb_t *in_tcb,
00169                                     struct task_struct *outproc,
00170                                     struct task_struct *inproc
00171                                     )
00172 {
00173 #if __GNUC__ < 3 || __GNUC__ == 3 && __GNUC_MINOR__ < 2
00174 
00175     __asm__ __volatile__( \
00176         "pushfl\n\t" \
00177         "pushl %%ecx\n\t" \
00178         "pushl %%edi\n\t" \
00179         "pushl %%ebp\n\t" \
00180         "movl %0,%%ecx\n\t" \
00181         "movl %%esp,(%%ecx)\n\t" \
00182         "movl %1,%%ecx\n\t" \
00183         "movl $1f,(%%ecx)\n\t" \
00184         "movl %2,%%ecx\n\t" \
00185         "movl %3,%%edi\n\t" \
00186         "movl (%%ecx),%%esp\n\t" \
00187         "pushl (%%edi)\n\t" \
00188         "testl %%edx,%%edx\n\t" \
00189         "jne  __switch_to\n\t" \
00190         "ret\n\t" \
00191 "1:      popl %%ebp\n\t" \
00192         "popl %%edi\n\t" \
00193         "popl %%ecx\n\t" \
00194         "popfl\n\t" \
00195       : /* no output */ \
00196       : "m" (out_tcb->espp), \
00197         "m" (out_tcb->eipp), \
00198         "m" (in_tcb->espp), \
00199         "m" (in_tcb->eipp), \
00200         "b" (out_tcb), \
00201         "S" (in_tcb), \
00202         "a" (outproc), \
00203         "d" (inproc));
00204 
00205 #else /* GCC version >= 3.2 */
00206 
00207     long ebx_out, ecx_out, edi_out, esi_out;
00208     
00209     __asm__ __volatile__( \
00210         "pushfl\n\t" \
00211         "pushl %%ebp\n\t" \
00212         "movl %6,%%ecx\n\t" \
00213         "movl %%esp,(%%ecx)\n\t" \
00214         "movl %7,%%ecx\n\t" \
00215         "movl $1f,(%%ecx)\n\t" \
00216         "movl %8,%%ecx\n\t" \
00217         "movl %9,%%edi\n\t" \
00218         "movl (%%ecx),%%esp\n\t" \
00219         "pushl (%%edi)\n\t" \
00220         "testl %%edx,%%edx\n\t" \
00221         "jne  __switch_to\n\t" \
00222         "ret\n\t" \
00223 "1:      popl %%ebp\n\t" \
00224         "popfl\n\t" \
00225       : "=b" (ebx_out), \
00226         "=&c" (ecx_out), \
00227         "=S" (esi_out), \
00228         "=D" (edi_out), \
00229         "+a" (outproc), \
00230         "+d" (inproc) \
00231       : "m" (out_tcb->espp), \
00232         "m" (out_tcb->eipp), \
00233         "m" (in_tcb->espp), \
00234         "m" (in_tcb->eipp));
00235 
00236 #endif /* GCC version < 3.2 */
00237 }
00238 
00239 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00240                                      xnarchtcb_t *in_tcb)
00241 {
00242     struct task_struct *outproc = out_tcb->active_task;
00243     struct task_struct *inproc = in_tcb->user_task;
00244     unsigned long fs, gs;
00245 
00246     if (inproc && outproc->thread_info->status & TS_USEDFPU)
00247         /* __switch_to will try and use __unlazy_fpu, so we need to
00248            clear the ts bit. */
00249         clts();
00250     
00251     in_tcb->active_task = inproc ?: outproc;
00252 
00253     if (inproc && inproc != outproc)
00254         {
00255         struct mm_struct *oldmm = outproc->active_mm;
00256 
00257         switch_mm(oldmm,inproc->active_mm,inproc);
00258 
00259         if (!inproc->mm)
00260             enter_lazy_tlb(oldmm,inproc);
00261         }
00262 
00263     if (out_tcb->user_task) {
00264        /* Make sure that __switch_to() will always reload the correct
00265           %fs and %gs registers, even if we happen to migrate the task
00266           across domains in the meantime. */
00267         asm volatile("mov %%fs,%0":"=m" (fs));
00268         asm volatile("mov %%gs,%0":"=m" (gs));
00269     }
00270 
00271     __switch_threads(out_tcb,in_tcb,outproc,inproc);
00272 
00273     if (xnarch_shadow_p(out_tcb,outproc)) {
00274 
00275         struct thread_struct *thread = &outproc->thread;
00276 
00277         loadsegment(fs, fs);
00278         loadsegment(gs, gs);
00279 
00280         barrier();
00281 
00282         /* Eagerly reinstate the I/O bitmap of any incoming shadow
00283            thread which has previously requested I/O permissions. We
00284            don't want the unexpected latencies induced by lazy update
00285            from the GPF handler to bite shadow threads that
00286            explicitely told the kernel that they would need to perform
00287            raw I/O ops. */
00288 
00289         if (thread->io_bitmap_ptr) {
00290             struct tss_struct *tss = &per_cpu(init_tss, rthal_processor_id());
00291 
00292             if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY) {
00293                 
00294                 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,thread->io_bitmap_max);
00295 
00296                 if (thread->io_bitmap_max < tss->io_bitmap_max)
00297                     memset((char *) tss->io_bitmap +
00298                            thread->io_bitmap_max, 0xff,
00299                            tss->io_bitmap_max - thread->io_bitmap_max);
00300 
00301                 tss->io_bitmap_max = thread->io_bitmap_max;
00302                 tss->io_bitmap_base = IO_BITMAP_OFFSET;
00303             }
00304         }
00305     }
00306 
00307     stts();
00308 }
00309 
00310 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00311                                                xnarchtcb_t *next_tcb)
00312 {
00313     xnarch_switch_to(dead_tcb,next_tcb);
00314 }
00315 
00316 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00317 {
00318     /* Empty */
00319 }
00320 
00321 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00322                                          struct xnthread *thread,
00323                                          const char *name)
00324 {
00325     tcb->user_task = current;
00326     tcb->active_task = NULL;
00327     tcb->esp = 0;
00328     tcb->espp = &tcb->esp;
00329     tcb->eipp = &tcb->eip;
00330     tcb->fpup = NULL;
00331 }
00332 
00333 asmlinkage static void xnarch_thread_redirect (struct xnthread *self,
00334                                                int imask,
00335                                                void(*entry)(void *),
00336                                                void *cookie)
00337 {
00338     /* xnpod_welcome_thread() will do clts() if needed. */
00339     stts();
00340     rthal_local_irq_restore(!!imask);
00341     xnpod_welcome_thread(self);
00342     entry(cookie);
00343     xnpod_delete_thread(self);
00344 }
00345 
00346 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00347                                        void (*entry)(void *),
00348                                        void *cookie,
00349                                        int imask,
00350                                        struct xnthread *thread,
00351                                        char *name)
00352 {
00353     unsigned long **psp = (unsigned long **)&tcb->esp;
00354 
00355     tcb->eip = (unsigned long)&xnarch_thread_redirect;
00356     tcb->esp = (unsigned long)tcb->stackbase;
00357     **psp = 0;  /* Commit bottom stack memory */
00358     *psp = (unsigned long *)(((unsigned long)*psp + tcb->stacksize - 0x10) & ~0xf);
00359     *--(*psp) = (unsigned long)cookie;
00360     *--(*psp) = (unsigned long)entry;
00361     *--(*psp) = (unsigned long)imask;
00362     *--(*psp) = (unsigned long)thread;
00363     *--(*psp) = 0;
00364 }
00365 
00366 #ifdef CONFIG_RTAI_HW_FPU
00367 
00368 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
00369 #define xnarch_fpu_init_p(task)   ((task)->used_math)
00370 #define xnarch_set_fpu_init(task) ((task)->used_math = 1)
00371 #else
00372 #define xnarch_fpu_init_p(task)   tsk_used_math(task)
00373 #define xnarch_set_fpu_init(task) set_stopped_child_used_math(task)
00374 #endif
00375 
00376 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00377 
00378 {
00379     struct task_struct *task = tcb->user_task;
00380     /* Initialize the FPU for a task. This must be run on behalf of the
00381        task. */
00382 
00383     __asm__ __volatile__ ("clts; fninit");
00384 
00385     if (cpu_has_xmm)
00386         {
00387         unsigned long __mxcsr = 0x1f80UL & 0xffbfUL;
00388         __asm__ __volatile__ ("ldmxcsr %0": : "m" (__mxcsr));
00389         }
00390 
00391     if(task)
00392         {
00393         /* Real-time shadow FPU initialization: tell Linux that this thread
00394           initialized its FPU hardware. The TS_USEDFPU bit is necessary for
00395           xnarch_save_fpu to save the FPU state at next switch. */
00396         xnarch_set_fpu_init(task);
00397         task->thread_info->status |= TS_USEDFPU;
00398         }   
00399 }
00400 
00401 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00402 
00403 {
00404     struct task_struct *task = tcb->user_task;
00405     
00406     if(task)
00407         {
00408         if(!(task->thread_info->status & TS_USEDFPU))
00409             return;
00410 
00411         /* Tell Linux that we already saved the state of the FPU hardware
00412            of this task. */
00413         task->thread_info->status &= ~TS_USEDFPU;
00414         }
00415 
00416     clts();
00417     
00418     if (cpu_has_fxsr)
00419         __asm__ __volatile__ ("fxsave %0; fnclex" : "=m" (*tcb->fpup));
00420     else
00421         __asm__ __volatile__ ("fnsave %0; fwait" : "=m" (*tcb->fpup));
00422 }
00423 
00424 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00425 
00426 {
00427     struct task_struct *task = tcb->user_task;
00428 
00429     if (task)
00430         {
00431         if (!xnarch_fpu_init_p(task))
00432             {
00433             stts();
00434             return;     /* Uninit fpu area -- do not restore. */
00435             }
00436 
00437         /* Tell Linux that this task has altered the state of the FPU
00438            hardware. */
00439         task->thread_info->status |= TS_USEDFPU;
00440         }
00441 
00442     /* Restore the FPU hardware with valid fp registers from a
00443        user-space or kernel thread. */
00444     clts();
00445 
00446     if (cpu_has_fxsr)
00447         __asm__ __volatile__ ("fxrstor %0": /* no output */ : "m" (*tcb->fpup));
00448     else
00449         __asm__ __volatile__ ("frstor %0": /* no output */ : "m" (*tcb->fpup));
00450 }
00451 
00452 static inline void xnarch_enable_fpu(xnarchtcb_t *tcb)
00453 
00454 {
00455     struct task_struct *task = tcb->user_task;
00456 
00457     if(task &&  !(task->thread_info->status & TS_USEDFPU))
00458         return;
00459 
00460     clts();
00461 
00462     if(!cpu_has_fxsr && tcb->user_task)
00463     /* fnsave also initializes the FPU state, so that on cpus prior to PII
00464        (i.e. without fxsr), we need to restore the saved state. */
00465         xnarch_restore_fpu(tcb);
00466 }
00467 
00468 #else /* !CONFIG_RTAI_HW_FPU */
00469 
00470 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00471 
00472 {}
00473 
00474 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00475 
00476 {}
00477 
00478 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00479 
00480 {}
00481 
00482 static inline void xnarch_enable_fpu (xnarchtcb_t *tcb)
00483 
00484 {}
00485 
00486 #endif /* CONFIG_RTAI_HW_FPU */
00487 
00488 #endif /* XENO_POD_MODULE */
00489 
00490 #ifdef XENO_THREAD_MODULE
00491 
00492 static inline void xnarch_init_tcb (xnarchtcb_t *tcb)
00493 {
00494     tcb->user_task = NULL;
00495     tcb->active_task = NULL;
00496     tcb->espp = &tcb->esp;
00497     tcb->eipp = &tcb->eip;
00498     tcb->fpup = &tcb->fpuenv;
00499     /* Must be followed by xnarch_init_thread(). */
00500 }
00501 
00502 #endif /* XENO_THREAD_MODULE */
00503 
00504 #ifdef XENO_SHADOW_MODULE
00505 
00506 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00507                                            struct xnthread *thread,
00508                                            const char *name)
00509 {
00510     struct task_struct *task = current;
00511 
00512     tcb->user_task = task;
00513     tcb->active_task = NULL;
00514     tcb->esp = 0;
00515     tcb->espp = &task->thread.esp;
00516     tcb->eipp = &task->thread.eip;
00517     tcb->fpup = &task->thread.i387;
00518 }
00519 
00520 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00521 
00522 {
00523     unsigned irq;
00524 
00525     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00526         rthal_virtualize_irq(rthal_current_domain,
00527                              irq,
00528                              handler,
00529                              NULL,
00530                              IPIPE_DYNAMIC_MASK);
00531 }
00532 
00533 static inline void xnarch_lock_xirqs (rthal_pipeline_stage_t *ipd, int cpuid)
00534 
00535 {
00536     unsigned irq;
00537 
00538     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00539         {
00540         switch (irq)
00541             {
00542 #ifdef CONFIG_SMP
00543             case RTHAL_CRITICAL_IPI:
00544             case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00545             case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00546             case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00547 
00548                 /* Never lock out these ones. */
00549                 continue;
00550 #endif /* CONFIG_SMP */
00551 
00552             default:
00553 
00554                 rthal_lock_irq(ipd,cpuid,irq);
00555             }
00556         }
00557 }
00558 
00559 static inline void xnarch_unlock_xirqs (rthal_pipeline_stage_t *ipd, int cpuid)
00560 
00561 {
00562     unsigned irq;
00563 
00564     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00565         {
00566         switch (irq)
00567             {
00568 #ifdef CONFIG_SMP
00569             case RTHAL_CRITICAL_IPI:
00570             case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00571             case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00572             case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00573 
00574                 continue;
00575 #endif /* CONFIG_SMP */
00576 
00577             default:
00578 
00579                 rthal_unlock_irq(ipd,irq);
00580             }
00581         }
00582 }
00583 
00584 #endif /* XENO_SHADOW_MODULE */
00585 
00586 #ifdef XENO_TIMER_MODULE
00587 
00588 static inline void xnarch_program_timer_shot (unsigned long delay)
00589 {
00590     /* Even though some architectures may use a 64 bits delay here, we
00591        voluntarily limit to 32 bits, 4 billions ticks should be enough
00592        for now. Would a timer needs more, an extra call to the tick
00593        handler would simply occur after 4 billions ticks. */
00594     rthal_timer_program_shot(rthal_imuldiv(delay,RTHAL_TIMER_FREQ,RTHAL_CPU_FREQ));
00595 }
00596 
00597 static inline void xnarch_stop_timer (void)
00598 {
00599     rthal_timer_release();
00600 }
00601 
00602 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00603 {
00604 #ifdef CONFIG_SMP
00605     return rthal_send_ipi(RTHAL_APIC_TIMER_IPI, mask);
00606 #else /* ! CONFIG_SMP */
00607     return 0;
00608 #endif /* CONFIG_SMP */
00609 }
00610 
00611 #endif /* XENO_TIMER_MODULE */
00612 
00613 #ifdef XENO_MAIN_MODULE
00614 
00615 #include <linux/init.h>
00616 #include <nucleus/asm/calibration.h>
00617 
00618 extern u_long nkschedlat;
00619 
00620 extern u_long nktimerlat;
00621 
00622 int xnarch_escalation_virq;
00623 
00624 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00625 
00626 void xnpod_schedule_handler(void);
00627 
00628 static rthal_trap_handler_t xnarch_old_trap_handler;
00629 
00630 static int xnarch_trap_fault (unsigned event, unsigned domid, void *data)
00631 {
00632     struct pt_regs *regs = (struct pt_regs *)data;
00633     xnarch_fltinfo_t fltinfo;
00634 
00635     fltinfo.vector = event;
00636     fltinfo.errcode = regs->orig_eax;
00637     fltinfo.regs = regs;
00638 
00639     return xnpod_trap_fault(&fltinfo);
00640 }
00641 
00642 static inline unsigned long xnarch_calibrate_timer (void)
00643 
00644 {
00645 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00646     return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY) ?: 1;
00647 #else /* CONFIG_RTAI_HW_TIMER_LATENCY unspecified. */
00648     /* Compute the time needed to program the PIT in aperiodic
00649        mode. The return value is expressed in CPU ticks. Depending on
00650        whether CONFIG_X86_LOCAL_APIC is enabled or not in the kernel
00651        configuration RTAI is compiled against,
00652        CONFIG_RTAI_HW_TIMER_LATENCY will either refer to the local
00653        APIC or 8254 timer latency value. */
00654     return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00655 #endif /* CONFIG_RTAI_HW_TIMER_LATENCY != 0 */
00656 }
00657 
00658 int xnarch_calibrate_sched (void)
00659 
00660 {
00661     nktimerlat = xnarch_calibrate_timer();
00662 
00663     if (!nktimerlat)
00664         return -ENODEV;
00665 
00666     nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00667 
00668     return 0;
00669 }
00670 
00671 static inline int xnarch_init (void)
00672 
00673 {
00674     int err;
00675 
00676 #ifdef CONFIG_SMP
00677     /* The HAL layer also sets the same CPU affinity so that both
00678        modules keep their execution sequence on SMP boxen. */
00679     set_cpus_allowed(current,cpumask_of_cpu(0));
00680 #endif /* CONFIG_SMP */
00681 
00682     err = xnarch_calibrate_sched();
00683 
00684     if (err)
00685         return err;
00686 
00687     xnarch_escalation_virq = rthal_alloc_virq();
00688 
00689     if (xnarch_escalation_virq == 0)
00690         return -ENOSYS;
00691 
00692     rthal_virtualize_irq(&rthal_domain,
00693                          xnarch_escalation_virq,
00694                          (void (*)(unsigned))&xnpod_schedule_handler,
00695                          NULL,
00696                          IPIPE_HANDLE_MASK);
00697 
00698     xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00699 
00700 #ifdef CONFIG_RTAI_OPT_FUSION
00701     err = xnshadow_mount();
00702 #endif /* CONFIG_RTAI_OPT_FUSION */
00703 
00704     if (err)
00705         {
00706         rthal_trap_catch(xnarch_old_trap_handler);
00707         rthal_free_virq(xnarch_escalation_virq);
00708         }
00709 
00710     return err;
00711 }
00712 
00713 static inline void xnarch_exit (void)
00714 
00715 {
00716 #ifdef CONFIG_RTAI_OPT_FUSION
00717     xnshadow_cleanup();
00718 #endif /* CONFIG_RTAI_OPT_FUSION */
00719     rthal_trap_catch(xnarch_old_trap_handler);
00720     rthal_free_virq(xnarch_escalation_virq);
00721 }
00722 
00723 #endif /* XENO_MAIN_MODULE */
00724 
00725 #ifdef __cplusplus
00726 }
00727 #endif
00728 
00729 #else /* !__KERNEL__ */
00730 
00731 #include <nucleus/system.h>
00732 #include <bits/local_lim.h>
00733 
00734 #endif /* __KERNEL__ */
00735 
00736 #endif /* !_RTAI_ASM_I386_SYSTEM_H */

Generated on Sat Sep 3 12:32:46 2005 for RTAI Fusion API by  doxygen 1.4.2