system.h

00001 /*
00002  * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
00003  * Copyright (C) 2004 The HYADES Project (http://www.hyades-itea.org).
00004  * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@laposte.net>.
00005  *
00006  * RTAI/fusion is free software; you can redistribute it and/or modify it
00007  * under the terms of the GNU General Public License as published by
00008  * the Free Software Foundation; either version 2 of the License, or
00009  * (at your option) any later version.
00010  *
00011  * RTAI/fusion is distributed in the hope that it will be useful, but
00012  * WITHOUT ANY WARRANTY; without even the implied warranty of
00013  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  * General Public License for more details.
00015  *
00016  * You should have received a copy of the GNU General Public License
00017  * along with RTAI/fusion; if not, write to the Free Software
00018  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
00019  * 02111-1307, USA.
00020  */
00021 
00022 #ifndef _RTAI_ASM_I386_SYSTEM_H
00023 #define _RTAI_ASM_I386_SYSTEM_H
00024 
00025 #include <nucleus/asm-generic/system.h>
00026 
00027 #ifdef __KERNEL__
00028 
00029 #include <linux/config.h>
00030 #include <linux/ptrace.h>
00031 
00032 #if ADEOS_RELEASE_NUMBER < 0x02060b01
00033 #error "Adeos 2.6r11c1/x86 or above is required to run this software; please upgrade."
00034 #error "See http://download.gna.org/adeos/patches/v2.6/i386/"
00035 #endif
00036 
00037 #define XNARCH_DEFAULT_TICK          1000000 /* ns, i.e. 1ms */
00038 #ifdef CONFIG_X86_LOCAL_APIC
00039 /* When the local APIC is enabled, we do not need to relay the host
00040    tick since 8254 interrupts are already flowing normally to Linux
00041    (i.e. the nucleus does not intercept it, but uses a dedicated
00042    APIC-based timer interrupt instead, i.e. RTHAL_APIC_TIMER_IPI). */
00043 #define XNARCH_HOST_TICK             0
00044 #else /* CONFIG_X86_LOCAL_APIC */
00045 #define XNARCH_HOST_TICK             (1000000000UL/HZ)
00046 #endif /* CONFIG_X86_LOCAL_APIC */
00047 
00048 #define XNARCH_THREAD_STACKSZ 4096
00049 
00050 #define xnarch_stack_size(tcb)  ((tcb)->stacksize)
00051 #define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
00052 #define xnarch_user_task(tcb)   ((tcb)->user_task)
00053 
00054 #define xnarch_alloc_stack xnmalloc
00055 #define xnarch_free_stack  xnfree
00056 
00057 struct xnthread;
00058 struct task_struct;
00059 
00060 typedef struct xnarchtcb {      /* Per-thread arch-dependent block */
00061 
00062     /* Kernel mode side */
00063     union i387_union fpuenv __attribute__ ((aligned (16))); /* FPU backup area */
00064     unsigned stacksize;         /* Aligned size of stack (bytes) */
00065     unsigned long *stackbase;   /* Stack space */
00066     unsigned long esp;          /* Saved ESP for kernel-based threads */
00067     unsigned long eip;          /* Saved EIP for kernel-based threads */
00068 
00069     /* User mode side */
00070     struct task_struct *user_task;      /* Shadowed user-space task */
00071     struct task_struct *active_task;    /* Active user-space task */
00072 
00073     unsigned long *espp;        /* Pointer to ESP backup area (&esp or &user->thread.esp) */
00074     unsigned long *eipp;        /* Pointer to EIP backup area (&eip or &user->thread.eip) */
00075     union i387_union *fpup;     /* Pointer to the FPU backup area (&fpuenv or &user->thread.i387.f[x]save */
00076 
00077 } xnarchtcb_t;
00078 
00079 typedef struct xnarch_fltinfo {
00080 
00081     unsigned vector;
00082     long errcode;
00083     struct pt_regs *regs;
00084 
00085 } xnarch_fltinfo_t;
00086 
00087 #define xnarch_fault_trap(fi)   ((fi)->vector)
00088 #define xnarch_fault_code(fi)   ((fi)->errcode)
00089 #define xnarch_fault_pc(fi)     ((fi)->regs->eip)
00090 /* fault is caused by use FPU while FPU disabled. */
00091 #define xnarch_fault_fpu_p(fi)  ((fi)->vector == 7)
00092 /* The following predicate is guaranteed to be called over a regular
00093    Linux stack context. */
00094 #define xnarch_fault_notify(fi) (!(current->ptrace & PT_PTRACED) || \
00095                                  ((fi)->vector != 1 && (fi)->vector != 3))
00096 #define xnarch_fault_pf_p(fi)   ((fi)->vector == 14)
00097 
00098 #ifdef __cplusplus
00099 extern "C" {
00100 #endif
00101 
00102 static inline void *xnarch_sysalloc (u_long bytes)
00103 
00104 {
00105     if (bytes >= 128*1024)
00106         return vmalloc(bytes);
00107 
00108     return kmalloc(bytes,GFP_KERNEL);
00109 }
00110 
00111 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00112 
00113 {
00114     if (bytes >= 128*1024)
00115         vfree(chunk);
00116     else
00117         kfree(chunk);
00118 }
00119 
00120 static inline int xnarch_shadow_p (xnarchtcb_t *tcb, struct task_struct *task)
00121 {
00122     return tcb->espp == &task->thread.esp; /* Sign of shadow... */
00123 }
00124 
00125 static inline void xnarch_relay_tick (void)
00126 
00127 {
00128     rthal_irq_host_pend(RTHAL_8254_IRQ);
00129 }
00130 
00131 #ifdef XENO_POD_MODULE
00132 
00133 void xnpod_welcome_thread(struct xnthread *);
00134 
00135 void xnpod_delete_thread(struct xnthread *);
00136 
00137 static inline int xnarch_start_timer (unsigned long ns,
00138                                       void (*tickhandler)(void)) {
00139     return rthal_timer_request(tickhandler,ns);
00140 }
00141 
00142 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00143 
00144 {
00145     adeos_declare_cpuid;
00146 
00147     adeos_load_cpuid();
00148 
00149     /* rthal_cpu_realtime is only tested for the current processor,
00150        and always inside a critical section. */
00151     __set_bit(cpuid,&rthal_cpu_realtime);
00152     /* Remember the preempted Linux task pointer. */
00153     rootcb->user_task = rootcb->active_task = rthal_current_host_task(cpuid);
00154     /* So that xnarch_save_fpu() will operate on the right FPU area. */
00155     rootcb->fpup = &rootcb->user_task->thread.i387;
00156 }
00157 
00158 static inline void xnarch_enter_root (xnarchtcb_t *rootcb)
00159 {
00160     __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00161 }
00162 
00163 static inline void __switch_threads(xnarchtcb_t *out_tcb,
00164                                     xnarchtcb_t *in_tcb,
00165                                     struct task_struct *outproc,
00166                                     struct task_struct *inproc
00167                                     )
00168 {
00169 #if __GNUC__ < 3 || __GNUC__ == 3 && __GNUC_MINOR__ < 2
00170 
00171     __asm__ __volatile__( \
00172         "pushl %%ecx\n\t" \
00173         "pushl %%edi\n\t" \
00174         "pushl %%ebp\n\t" \
00175         "movl %0,%%ecx\n\t" \
00176         "movl %%esp,(%%ecx)\n\t" \
00177         "movl %1,%%ecx\n\t" \
00178         "movl $1f,(%%ecx)\n\t" \
00179         "movl %2,%%ecx\n\t" \
00180         "movl %3,%%edi\n\t" \
00181         "movl (%%ecx),%%esp\n\t" \
00182         "pushl (%%edi)\n\t" \
00183         "testl %%edx,%%edx\n\t" \
00184         "jne  __switch_to\n\t" \
00185         "ret\n\t" \
00186 "1:      popl %%ebp\n\t" \
00187         "popl %%edi\n\t" \
00188         "popl %%ecx\n\t" \
00189       : /* no output */ \
00190       : "m" (out_tcb->espp), \
00191         "m" (out_tcb->eipp), \
00192         "m" (in_tcb->espp), \
00193         "m" (in_tcb->eipp), \
00194         "b" (out_tcb), \
00195         "S" (in_tcb), \
00196         "a" (outproc), \
00197         "d" (inproc));
00198 
00199 #else /* GCC version >= 3.2 */
00200 
00201     long ebx_out, ecx_out, edi_out, esi_out;
00202     
00203     __asm__ __volatile__( \
00204         "pushl %%ebp\n\t" \
00205         "movl %6,%%ecx\n\t" \
00206         "movl %%esp,(%%ecx)\n\t" \
00207         "movl %7,%%ecx\n\t" \
00208         "movl $1f,(%%ecx)\n\t" \
00209         "movl %8,%%ecx\n\t" \
00210         "movl %9,%%edi\n\t" \
00211         "movl (%%ecx),%%esp\n\t" \
00212         "pushl (%%edi)\n\t" \
00213         "testl %%edx,%%edx\n\t" \
00214         "jne  __switch_to\n\t" \
00215         "ret\n\t" \
00216 "1:      popl %%ebp\n\t" \
00217       : "=b" (ebx_out), \
00218         "=&c" (ecx_out), \
00219         "=S" (esi_out), \
00220         "=D" (edi_out), \
00221         "+a" (outproc), \
00222         "+d" (inproc) \
00223       : "m" (out_tcb->espp), \
00224         "m" (out_tcb->eipp), \
00225         "m" (in_tcb->espp), \
00226         "m" (in_tcb->eipp));
00227 
00228 #endif /* GCC version < 3.2 */
00229 }
00230 
00231 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00232                                      xnarchtcb_t *in_tcb)
00233 {
00234     struct task_struct *outproc = out_tcb->active_task;
00235     struct task_struct *inproc = in_tcb->user_task;
00236 
00237     if (inproc && outproc->thread_info->status & TS_USEDFPU)
00238         /* __switch_to will try and use __unlazy_fpu, so we need to
00239            clear the ts bit. */
00240         clts();
00241     
00242     in_tcb->active_task = inproc ?: outproc;
00243 
00244     if (inproc && inproc != outproc)
00245         {
00246         struct mm_struct *oldmm = outproc->active_mm;
00247 
00248         switch_mm(oldmm,inproc->active_mm,inproc);
00249 
00250         if (!inproc->mm)
00251             enter_lazy_tlb(oldmm,inproc);
00252         }
00253 
00254     __switch_threads(out_tcb,in_tcb,outproc,inproc);
00255 
00256     if (xnarch_shadow_p(out_tcb,outproc)) {
00257 
00258         /* Eagerly reinstate the I/O bitmap of any incoming shadow
00259            thread which has previously requested I/O permissions. We
00260            don't want the unexpected latencies induced by lazy update
00261            from the GPF handler to bite shadow threads that
00262            explicitely told the kernel that they would need to perform
00263            raw I/O ops. */
00264 
00265         struct thread_struct *thread = &outproc->thread;
00266 
00267         barrier();
00268 
00269         if (thread->io_bitmap_ptr) {
00270             struct tss_struct *tss = &per_cpu(init_tss, adeos_processor_id());
00271 
00272             if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY) {
00273                 
00274                 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,thread->io_bitmap_max);
00275 
00276                 if (thread->io_bitmap_max < tss->io_bitmap_max)
00277                     memset((char *) tss->io_bitmap +
00278                            thread->io_bitmap_max, 0xff,
00279                            tss->io_bitmap_max - thread->io_bitmap_max);
00280 
00281                 tss->io_bitmap_max = thread->io_bitmap_max;
00282                 tss->io_bitmap_base = IO_BITMAP_OFFSET;
00283             }
00284         }
00285     }
00286 
00287     stts();
00288 }
00289 
00290 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00291                                                xnarchtcb_t *next_tcb)
00292 {
00293     xnarch_switch_to(dead_tcb,next_tcb);
00294 }
00295 
00296 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00297 {
00298     /* Empty */
00299 }
00300 
00301 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00302                                          struct xnthread *thread,
00303                                          const char *name)
00304 {
00305     tcb->user_task = current;
00306     tcb->active_task = NULL;
00307     tcb->esp = 0;
00308     tcb->espp = &tcb->esp;
00309     tcb->eipp = &tcb->eip;
00310     tcb->fpup = NULL;
00311 }
00312 
00313 asmlinkage static void xnarch_thread_redirect (struct xnthread *self,
00314                                                int imask,
00315                                                void(*entry)(void *),
00316                                                void *cookie)
00317 {
00318     /* xnpod_welcome_thread() will do clts() if needed. */
00319     stts();
00320     rthal_local_irq_restore(!!imask);
00321     xnpod_welcome_thread(self);
00322     entry(cookie);
00323     xnpod_delete_thread(self);
00324 }
00325 
00326 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00327                                        void (*entry)(void *),
00328                                        void *cookie,
00329                                        int imask,
00330                                        struct xnthread *thread,
00331                                        char *name)
00332 {
00333     unsigned long **psp = (unsigned long **)&tcb->esp;
00334 
00335     tcb->eip = (unsigned long)&xnarch_thread_redirect;
00336     tcb->esp = (unsigned long)tcb->stackbase;
00337     **psp = 0;  /* Commit bottom stack memory */
00338     *psp = (unsigned long *)(((unsigned long)*psp + tcb->stacksize - 0x10) & ~0xf);
00339     *--(*psp) = (unsigned long)cookie;
00340     *--(*psp) = (unsigned long)entry;
00341     *--(*psp) = (unsigned long)imask;
00342     *--(*psp) = (unsigned long)thread;
00343     *--(*psp) = 0;
00344 }
00345 
00346 #ifdef CONFIG_RTAI_HW_FPU
00347 
00348 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
00349 #define xnarch_fpu_init_p(task)   ((task)->used_math)
00350 #define xnarch_set_fpu_init(task) ((task)->used_math = 1)
00351 #else
00352 #define xnarch_fpu_init_p(task)   tsk_used_math(task)
00353 #define xnarch_set_fpu_init(task) set_stopped_child_used_math(task)
00354 #endif
00355 
00356 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00357 
00358 {
00359     struct task_struct *task = tcb->user_task;
00360     /* Initialize the FPU for a task. This must be run on behalf of the
00361        task. */
00362 
00363     __asm__ __volatile__ ("clts; fninit");
00364 
00365     if (cpu_has_xmm)
00366         {
00367         unsigned long __mxcsr = 0x1f80UL & 0xffbfUL;
00368         __asm__ __volatile__ ("ldmxcsr %0": : "m" (__mxcsr));
00369         }
00370 
00371     if(task)
00372         {
00373         /* Real-time shadow FPU initialization: tell Linux that this thread
00374           initialized its FPU hardware. The TS_USEDFPU bit is necessary for
00375           xnarch_save_fpu to save the FPU state at next switch. */
00376         xnarch_set_fpu_init(task);
00377         task->thread_info->status |= TS_USEDFPU;
00378         }   
00379 }
00380 
00381 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00382 
00383 {
00384     struct task_struct *task = tcb->user_task;
00385     
00386     if(task)
00387         {
00388         if(!(task->thread_info->status & TS_USEDFPU))
00389             return;
00390 
00391         /* Tell Linux that we already saved the state of the FPU hardware
00392            of this task. */
00393         task->thread_info->status &= ~TS_USEDFPU;
00394         }
00395 
00396     clts();
00397     
00398     if (cpu_has_fxsr)
00399         __asm__ __volatile__ ("fxsave %0; fnclex" : "=m" (*tcb->fpup));
00400     else
00401         __asm__ __volatile__ ("fnsave %0; fwait" : "=m" (*tcb->fpup));
00402 }
00403 
00404 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00405 
00406 {
00407     struct task_struct *task = tcb->user_task;
00408 
00409     if (task)
00410         {
00411         if (!xnarch_fpu_init_p(task))
00412             {
00413             stts();
00414             return;     /* Uninit fpu area -- do not restore. */
00415             }
00416 
00417         /* Tell Linux that this task has altered the state of the FPU
00418            hardware. */
00419         task->thread_info->status |= TS_USEDFPU;
00420         }
00421 
00422     /* Restore the FPU hardware with valid fp registers from a
00423        user-space or kernel thread. */
00424     clts();
00425 
00426     if (cpu_has_fxsr)
00427         __asm__ __volatile__ ("fxrstor %0": /* no output */ : "m" (*tcb->fpup));
00428     else
00429         __asm__ __volatile__ ("frstor %0": /* no output */ : "m" (*tcb->fpup));
00430 }
00431 
00432 static inline void xnarch_enable_fpu(xnarchtcb_t *tcb)
00433 
00434 {
00435     clts();
00436 
00437     if(!cpu_has_fxsr && tcb->user_task)
00438     /* fnsave also initializes the FPU state, so that on cpus prior to PII
00439        (i.e. without fxsr), we need to restore the saved state. */
00440         xnarch_restore_fpu(tcb);
00441 }
00442 
00443 #else /* !CONFIG_RTAI_HW_FPU */
00444 
00445 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00446 
00447 {}
00448 
00449 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00450 
00451 {}
00452 
00453 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00454 
00455 {}
00456 
00457 static inline void xnarch_enable_fpu (xnarchtcb_t *tcb)
00458 
00459 {}
00460 
00461 #endif /* CONFIG_RTAI_HW_FPU */
00462 
00463 #ifdef CONFIG_SMP
00464 
00465 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask) {
00466 
00467     return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
00468 }
00469 
00470 static inline int xnarch_hook_ipi (void (*handler)(void))
00471 
00472 {
00473     return adeos_virtualize_irq_from(&rthal_domain,
00474                                      ADEOS_SERVICE_IPI0,
00475                                      (void (*)(unsigned)) handler,
00476                                      NULL,
00477                                      IPIPE_HANDLE_MASK);
00478 }
00479 
00480 static inline int xnarch_release_ipi (void)
00481 
00482 {
00483     return adeos_virtualize_irq_from(&rthal_domain,
00484                                      ADEOS_SERVICE_IPI0,
00485                                      NULL,
00486                                      NULL,
00487                                      IPIPE_PASS_MASK);
00488 }
00489 
00490 static struct linux_semaphore xnarch_finalize_sync;
00491 
00492 static void xnarch_finalize_cpu(unsigned irq)
00493 {
00494     up(&xnarch_finalize_sync);
00495 }
00496 
00497 static inline void xnarch_notify_halt(void)
00498     
00499 {
00500     xnarch_cpumask_t other_cpus = cpu_online_map;
00501     unsigned cpu, nr_cpus = num_online_cpus();
00502     unsigned long flags;
00503     adeos_declare_cpuid;
00504 
00505     sema_init(&xnarch_finalize_sync,0);
00506 
00507     /* Here adp_current is in fact root, since xnarch_notify_halt is
00508        called from xnpod_shutdown, itself called from Linux
00509        context. */
00510     adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2,
00511                               xnarch_finalize_cpu, NULL, IPIPE_HANDLE_MASK);
00512 
00513     adeos_lock_cpu(flags);
00514     cpu_clear(cpuid, other_cpus);
00515     adeos_send_ipi(ADEOS_SERVICE_IPI2, other_cpus);
00516     adeos_unlock_cpu(flags);
00517 
00518     for(cpu=0; cpu < nr_cpus-1; ++cpu)
00519         down(&xnarch_finalize_sync);
00520     
00521     adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2, NULL, NULL,
00522                               IPIPE_PASS_MASK);
00523 }
00524 
00525 #else /* !CONFIG_SMP */
00526 
00527 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask) {
00528 
00529     return 0;
00530 }
00531 
00532 static inline int xnarch_hook_ipi (void (*handler)(void)) {
00533 
00534     return 0;
00535 }
00536 
00537 static inline int xnarch_release_ipi (void) {
00538 
00539     return 0;
00540 }
00541 
00542 #define xnarch_notify_halt() /* Nullified */
00543 
00544 #endif /* CONFIG_SMP */
00545 
00546 static inline void xnarch_notify_shutdown(void)
00547 
00548 {
00549 #ifdef CONFIG_SMP
00550     /* The HAL layer also sets the same CPU affinity so that both
00551        modules keep their execution sequence on SMP boxen. */
00552     set_cpus_allowed(current,cpumask_of_cpu(0));
00553 #endif /* CONFIG_SMP */
00554 #ifdef CONFIG_RTAI_OPT_FUSION
00555     xnshadow_release_events();
00556 #endif /* CONFIG_RTAI_OPT_FUSION */
00557     /* Wait for the currently processed events to drain. */
00558     set_current_state(TASK_UNINTERRUPTIBLE);
00559     schedule_timeout(50);
00560     xnarch_release_ipi();
00561 }
00562 
00563 static inline int xnarch_escalate (void)
00564 
00565 {
00566     extern int xnarch_escalation_virq;
00567 
00568     if (adp_current == adp_root)
00569         {
00570         spl_t s;
00571         splsync(s);
00572         adeos_trigger_irq(xnarch_escalation_virq);
00573         splexit(s);
00574         return 1;
00575         }
00576 
00577     return 0;
00578 }
00579 
00580 static void xnarch_notify_ready (void)
00581 
00582 {
00583 #ifdef CONFIG_RTAI_OPT_FUSION    
00584     xnshadow_grab_events();
00585 #endif /* CONFIG_RTAI_OPT_FUSION */
00586 }
00587 
00588 #endif /* XENO_POD_MODULE */
00589 
00590 #ifdef XENO_THREAD_MODULE
00591 
00592 static inline void xnarch_init_tcb (xnarchtcb_t *tcb)
00593 {
00594     tcb->user_task = NULL;
00595     tcb->active_task = NULL;
00596     tcb->espp = &tcb->esp;
00597     tcb->eipp = &tcb->eip;
00598     tcb->fpup = &tcb->fpuenv;
00599     /* Must be followed by xnarch_init_thread(). */
00600 }
00601 
00602 #endif /* XENO_THREAD_MODULE */
00603 
00604 #ifdef XENO_SHADOW_MODULE
00605 
00606 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00607                                            struct xnthread *thread,
00608                                            const char *name)
00609 {
00610     struct task_struct *task = current;
00611 
00612     tcb->user_task = task;
00613     tcb->active_task = NULL;
00614     tcb->esp = 0;
00615     tcb->espp = &task->thread.esp;
00616     tcb->eipp = &task->thread.eip;
00617     tcb->fpup = &task->thread.i387;
00618 }
00619 
00620 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00621 
00622 {
00623     unsigned irq;
00624 
00625     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00626         adeos_virtualize_irq(irq,
00627                              handler,
00628                              NULL,
00629                              IPIPE_DYNAMIC_MASK);
00630 }
00631 
00632 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00633 
00634 {
00635     unsigned irq;
00636 
00637     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00638         {
00639         switch (irq)
00640             {
00641 #ifdef CONFIG_SMP
00642             case ADEOS_CRITICAL_IPI:
00643             case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00644             case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00645             case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00646 
00647                 /* Never lock out these ones. */
00648                 continue;
00649 #endif /* CONFIG_SMP */
00650 
00651             default:
00652 
00653                 __adeos_lock_irq(adp,cpuid,irq);
00654             }
00655         }
00656 }
00657 
00658 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00659 
00660 {
00661     unsigned irq;
00662 
00663     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00664         {
00665         switch (irq)
00666             {
00667 #ifdef CONFIG_SMP
00668             case ADEOS_CRITICAL_IPI:
00669             case INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR:
00670             case CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR:
00671             case RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR:
00672 
00673                 continue;
00674 #endif /* CONFIG_SMP */
00675 
00676             default:
00677 
00678                 __adeos_unlock_irq(adp,irq);
00679             }
00680         }
00681 }
00682 
00683 #endif /* XENO_SHADOW_MODULE */
00684 
00685 #ifdef XENO_TIMER_MODULE
00686 
00687 static inline void xnarch_program_timer_shot (unsigned long delay) {
00688     /* Even though some architectures may use a 64 bits delay here, we
00689        voluntarily limit to 32 bits, 4 billions ticks should be enough
00690        for now. Would a timer needs more, an extra call to the tick
00691        handler would simply occur after 4 billions ticks. */
00692     rthal_timer_program_shot(rthal_imuldiv(delay,RTHAL_TIMER_FREQ,RTHAL_CPU_FREQ));
00693 }
00694 
00695 static inline void xnarch_stop_timer (void) {
00696     rthal_timer_release();
00697 }
00698 
00699 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00700 
00701 {
00702 #ifdef CONFIG_SMP
00703     return adeos_send_ipi(RTHAL_APIC_TIMER_IPI, mask);
00704 #else /* ! CONFIG_SMP */
00705     return 0;
00706 #endif /* CONFIG_SMP */
00707 }
00708 
00709 static inline void xnarch_read_timings (unsigned long long *shot,
00710                                         unsigned long long *delivery,
00711                                         unsigned long long defval)
00712 {
00713 #ifdef CONFIG_ADEOS_PROFILING
00714     int cpuid = adeos_processor_id();
00715     *shot = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_handled;
00716     *delivery = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_synced;
00717 #else /* !CONFIG_ADEOS_PROFILING */
00718     *shot = defval;
00719     *delivery = defval;
00720 #endif /* CONFIG_ADEOS_PROFILING */
00721 }
00722 
00723 #endif /* XENO_TIMER_MODULE */
00724 
00725 #ifdef XENO_MAIN_MODULE
00726 
00727 #include <linux/init.h>
00728 #include <nucleus/asm/calibration.h>
00729 
00730 extern u_long nkschedlat;
00731 
00732 extern u_long nktimerlat;
00733 
00734 int xnarch_escalation_virq;
00735 
00736 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00737 
00738 void xnpod_schedule_handler(void);
00739 
00740 static rthal_trap_handler_t xnarch_old_trap_handler;
00741 
00742 static int xnarch_trap_fault (adevinfo_t *evinfo)
00743 {
00744     xnarch_fltinfo_t fltinfo;
00745 
00746     fltinfo.vector = evinfo->event;
00747     fltinfo.errcode = ((struct pt_regs *)evinfo->evdata)->orig_eax;
00748     fltinfo.regs = (struct pt_regs *)evinfo->evdata;
00749 
00750     return xnpod_trap_fault(&fltinfo);
00751 }
00752 
00753 static inline unsigned long xnarch_calibrate_timer (void)
00754 
00755 {
00756 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00757     return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY) ?: 1;
00758 #else /* CONFIG_RTAI_HW_TIMER_LATENCY unspecified. */
00759     /* Compute the time needed to program the PIT in aperiodic
00760        mode. The return value is expressed in CPU ticks. Depending on
00761        whether CONFIG_X86_LOCAL_APIC is enabled or not in the kernel
00762        configuration RTAI is compiled against,
00763        CONFIG_RTAI_HW_TIMER_LATENCY will either refer to the local
00764        APIC or 8254 timer latency value. */
00765     return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00766 #endif /* CONFIG_RTAI_HW_TIMER_LATENCY != 0 */
00767 }
00768 
00769 int xnarch_calibrate_sched (void)
00770 
00771 {
00772     nktimerlat = xnarch_calibrate_timer();
00773 
00774     if (!nktimerlat)
00775         return -ENODEV;
00776 
00777     nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00778 
00779     return 0;
00780 }
00781 
00782 static inline int xnarch_init (void)
00783 
00784 {
00785     int err;
00786 
00787 #ifdef CONFIG_SMP
00788     /* The HAL layer also sets the same CPU affinity so that both
00789        modules keep their execution sequence on SMP boxen. */
00790     set_cpus_allowed(current,cpumask_of_cpu(0));
00791 #endif /* CONFIG_SMP */
00792 
00793     err = xnarch_calibrate_sched();
00794 
00795     if (err)
00796         return err;
00797 
00798     xnarch_escalation_virq = adeos_alloc_irq();
00799 
00800     if (xnarch_escalation_virq == 0)
00801         return -ENOSYS;
00802 
00803     adeos_virtualize_irq_from(&rthal_domain,
00804                               xnarch_escalation_virq,
00805                               (void (*)(unsigned))&xnpod_schedule_handler,
00806                               NULL,
00807                               IPIPE_HANDLE_MASK);
00808 
00809     xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00810 
00811 #ifdef CONFIG_RTAI_OPT_FUSION
00812     err = xnshadow_mount();
00813 #endif /* CONFIG_RTAI_OPT_FUSION */
00814 
00815     if (err)
00816         {
00817         rthal_trap_catch(xnarch_old_trap_handler);
00818         adeos_free_irq(xnarch_escalation_virq);
00819         }
00820 
00821     return err;
00822 }
00823 
00824 static inline void xnarch_exit (void)
00825 
00826 {
00827 #ifdef CONFIG_RTAI_OPT_FUSION
00828     xnshadow_cleanup();
00829 #endif /* CONFIG_RTAI_OPT_FUSION */
00830     rthal_trap_catch(xnarch_old_trap_handler);
00831     adeos_free_irq(xnarch_escalation_virq);
00832 }
00833 
00834 #endif /* XENO_MAIN_MODULE */
00835 
00836 #ifdef __cplusplus
00837 }
00838 #endif
00839 
00840 #else /* !__KERNEL__ */
00841 
00842 #include <nucleus/system.h>
00843 
00844 #endif /* __KERNEL__ */
00845 
00846 #endif /* !_RTAI_ASM_I386_SYSTEM_H */

Generated on Wed Jun 22 22:54:02 2005 for RTAI Fusion API by  doxygen 1.4.1