system.h

00001 /*
00002  * Copyright &copy; 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
00003  * Copyright &copy; 2004 The HYADES project <http://www.hyades-itea.org>
00004  *
00005  * RTAI/fusion is free software; you can redistribute it and/or modify it
00006  * under the terms of the GNU General Public License as published by
00007  * the Free Software Foundation; either version 2 of the License, or
00008  * (at your option) any later version.
00009  *
00010  * RTAI/fusion is distributed in the hope that it will be useful, but
00011  * WITHOUT ANY WARRANTY; without even the implied warranty of
00012  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013  * General Public License for more details.
00014  *
00015  * You should have received a copy of the GNU General Public License
00016  * along with RTAI/fusion; if not, write to the Free Software
00017  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
00018  * 02111-1307, USA.
00019  */
00020 
00021 #ifndef _RTAI_ASM_IA64_SYSTEM_H
00022 #define _RTAI_ASM_IA64_SYSTEM_H
00023 
00024 #include <nucleus/asm-generic/system.h>
00025 
00026 #ifdef __KERNEL__
00027 
00028 #include <linux/config.h>
00029 #include <linux/ptrace.h>
00030 
00031 #if ADEOS_RELEASE_NUMBER < 0x0206070b
00032 #error "Adeos 2.6r7c11/ia64 or above is required to run this software; please upgrade."
00033 #error "See http://download.gna.org/adeos/patches/v2.6/ia64/"
00034 #endif
00035 
00036 #ifdef CONFIG_IA64_HP_SIM
00037 #define XNARCH_DEFAULT_TICK    31250000 /* ns, i.e. 31ms */
00038 #else
00039 #define XNARCH_DEFAULT_TICK    XNARCH_HOST_TICK
00040 #endif
00041 #define XNARCH_HOST_TICK       (1000000000UL/HZ)
00042 
00043 #define XNARCH_THREAD_STACKSZ  (1<<KERNEL_STACK_SIZE_ORDER)
00044 
00045 #define xnarch_stack_size(tcb)  ((tcb)->stacksize)
00046 #define xnarch_user_task(tcb)   ((tcb)->user_task)
00047 #define xnarch_user_pid(tcb)    ((tcb)->user_task->pid)
00048 
00049 void *xnarch_alloc_stack(unsigned long stacksize);
00050 void xnarch_free_stack(void *block);
00051 
00052 struct xnthread;
00053 struct task_struct;
00054 
00055 typedef struct xnarchtcb {      /* Per-thread arch-dependent block */
00056 
00057     /* Kernel mode side */
00058 
00059     unsigned long *espp;        /* Pointer to ESP backup area (&esp or
00060                                    &user->thread.esp).
00061                                    DONT MOVE THIS MEMBER,
00062                                    switch_to depends on it. */
00063 
00064     struct ia64_fpreg fpuenv[96]; /* FIXME FPU: check if alignment constraints
00065                                      are needed. */
00066     
00067     unsigned stacksize;         /* Aligned size of stack (bytes) */
00068     unsigned long *stackbase;   /* Stack space */
00069     unsigned long esp;          /* Saved ESP for kernel-based threads */
00070 
00071     /* User mode side */
00072 
00073     struct task_struct *user_task;      /* Shadowed user-space task */
00074     struct task_struct *active_task;    /* Active user-space task */
00075 
00076     struct ia64_fpreg *fpup;
00077 #define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
00078 
00079 } xnarchtcb_t;
00080 
00081 typedef struct xnarch_fltinfo {
00082 
00083     ia64trapinfo_t ia64;
00084     unsigned trap;
00085 
00086 } xnarch_fltinfo_t;
00087 
00088 #define xnarch_fault_trap(fi)  ((fi)->trap)
00089 #define xnarch_fault_code(fi)  ((fi)->ia64.isr)
00090 #define xnarch_fault_pc(fi)    ((fi)->ia64.regs->cr_iip)
00091 /* Fault is caused by use of FPU while FPU disabled. */
00092 #define xnarch_fault_fpu_p(fi) ((fi)->trap == ADEOS_FPDIS_TRAP)
00093 /* The following predicates are only usable over a regular Linux stack
00094    context. */
00095 #define xnarch_fault_pf_p(fi)   ((fi)->trap == ADEOS_PF_TRAP)
00096 #define xnarch_fault_bp_p(fi)   ((current->ptrace & PT_PTRACED) && \
00097                                  (fi)->trap == ADEOS_DEBUG_TRAP)
00098 #define xnarch_fault_notify(fi) (!xnarch_fault_bp_p(fi))
00099 
00100 #ifdef __cplusplus
00101 extern "C" {
00102 #endif
00103 
00104 static inline void *xnarch_sysalloc (u_long bytes)
00105 
00106 {
00107     if (bytes >= 128*1024)
00108         return vmalloc(bytes);
00109 
00110     return kmalloc(bytes,GFP_KERNEL);
00111 }
00112 
00113 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00114 
00115 {
00116     if (bytes >= 128*1024)
00117         vfree(chunk);
00118     else
00119         kfree(chunk);
00120 }
00121 
00122 static inline void xnarch_relay_tick (void)
00123 
00124 {
00125 #ifdef CONFIG_SMP
00126     rthal_send_ipi(RTHAL_HOST_TIMER_IRQ, cpu_online_map);
00127 #else /* ! CONFIG_SMP */
00128     rthal_trigger_irq(RTHAL_HOST_TIMER_IRQ);
00129 #endif
00130 }
00131 
00132 #ifdef XENO_POD_MODULE
00133 
00134 void xnpod_welcome_thread(struct xnthread *);
00135 
00136 void xnpod_delete_thread(struct xnthread *);
00137 
00138 static inline int xnarch_start_timer (unsigned long ns,
00139                                       void (*tickhandler)(void))
00140 {
00141     int err = rthal_timer_request(tickhandler,ns);
00142     rthal_declare_cpuid;
00143     long long delta;
00144 
00145     if (err)
00146         return err;
00147 
00148     rthal_load_cpuid();
00149     delta = rthal_itm_next[cpuid] - ia64_get_itc();
00150     
00151     return delta < 0LL ? 0LL : xnarch_tsc_to_ns(delta);
00152 }
00153 
00154 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00155 
00156 {
00157     struct task_struct *fpu_owner
00158         = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
00159     rthal_declare_cpuid;
00160 
00161     rthal_load_cpuid();
00162 
00163     __set_bit(cpuid,&rthal_cpu_realtime);
00164     /* Remember the preempted Linux task pointer. */
00165     rootcb->user_task = rootcb->active_task = rthal_root_host_task(cpuid);
00166     /* So that xnarch_save_fpu() will operate on the right FPU area. */
00167     rootcb->fpup = fpu_owner ? fpu_owner->thread.fph : NULL;
00168 }
00169 
00170 static inline void xnarch_enter_root (xnarchtcb_t *rootcb)
00171 {
00172     __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00173 }
00174 
00175 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00176                                      xnarchtcb_t *in_tcb)
00177 {
00178     struct task_struct *outproc = out_tcb->active_task;
00179     struct task_struct *inproc = in_tcb->user_task;
00180 
00181     in_tcb->active_task = inproc ?: outproc;
00182 
00183     if (inproc && inproc != outproc)
00184         {
00185         /* We are switching to a user task different from the last
00186            preempted or running user task, so that we can use the
00187            Linux context switch routine. */
00188         struct mm_struct *oldmm = outproc->active_mm;
00189         struct task_struct *last;
00190 
00191         switch_mm(oldmm,inproc->active_mm,inproc);
00192 
00193         if (!inproc->mm)
00194             enter_lazy_tlb(oldmm,inproc);
00195 
00196         __switch_to(outproc, inproc, last);
00197         }
00198     else
00199         {
00200         /* Use our own light switch routine. */
00201         unsigned long gp;
00202 
00203         ia64_stop();
00204         gp = ia64_getreg(_IA64_REG_GP);
00205         ia64_stop();
00206         rthal_switch_context(out_tcb,in_tcb);
00207         ia64_stop();
00208         ia64_setreg(_IA64_REG_GP, gp);
00209         ia64_stop();
00210 
00211         /* fph will be enabled by xnarch_restore_fpu if needed, and
00212            returns the root thread in its usual mode. */
00213         ia64_fph_disable();
00214         }
00215 }
00216 
00217 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00218                                                xnarchtcb_t *next_tcb)
00219 {
00220     xnarch_switch_to(dead_tcb,next_tcb);
00221 }
00222 
00223 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00224 
00225 {
00226     /* Empty */
00227 }
00228 
00229 #define fph2task(faddr)                                 \
00230     ((struct task_struct *)((char *) (faddr) -          \
00231                             (size_t) &((struct task_struct *) 0)->thread.fph[0]))
00232 
00233 #define xnarch_fpu_init_p(task) ((task)->thread.flags & IA64_THREAD_FPH_VALID)
00234 
00235 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00236 
00237 {
00238     struct task_struct *task = tcb->user_task;
00239     /* Initialize the FPU for a task. This must be run on behalf of the
00240        task. */
00241     ia64_fph_enable();
00242     __ia64_init_fpu();
00243     /* The mfh bit is automatically armed, since the init_fpu routine
00244        modifies the FPH registers. */
00245 
00246     if(task)
00247         /* Real-time shadow FPU initialization: setting the mfh bit in saved
00248           registers, xnarch_save_fpu will finish the work. Since tcb is the tcb
00249           of a shadow, no need to check: task == fph2task(tcb->fpup). */
00250         ia64_psr(ia64_task_regs(task))->mfh = 1;
00251 }
00252 
00253 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00254 {
00255     unsigned long lpsr = ia64_getreg(_IA64_REG_PSR);
00256     struct ia64_psr *current_psr = (struct ia64_psr *) &lpsr;
00257     
00258     if (current_psr->mfh)
00259         {
00260         if(tcb->user_task && tcb->fpup)
00261             {
00262             struct task_struct *linux_fpu_owner = fph2task(tcb->fpup);
00263             struct ia64_psr *psr = ia64_psr(ia64_task_regs(linux_fpu_owner));
00264 
00265             /* Keep the FPU save zone in sync with what Linux expects. */
00266             psr->mfh = 0;
00267             linux_fpu_owner->thread.flags |= IA64_THREAD_FPH_VALID;
00268             }
00269 
00270         ia64_fph_enable();
00271         __ia64_save_fpu(tcb->fpup);
00272         ia64_rsm(IA64_PSR_MFH);
00273         ia64_srlz_d();
00274         }
00275 }
00276 
00277 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00278 
00279 {
00280     struct task_struct *linux_fpu_owner;
00281     int need_disabled_fph;
00282 
00283     if (tcb->user_task && tcb->fpup)
00284         {
00285         linux_fpu_owner = fph2task(tcb->fpup);
00286 
00287         if(!xnarch_fpu_init_p(linux_fpu_owner))
00288             return;     /* Uninit fpu area -- do not restore. */
00289 
00290         /* Disable fph, if we are not switching back to the task which
00291            owns the FPU. */
00292         need_disabled_fph = linux_fpu_owner != tcb->user_task;
00293         }
00294     else
00295         need_disabled_fph = 0;
00296 
00297     /* Restore the FPU hardware with valid fp registers from a
00298        user-space or kernel thread. */
00299     ia64_fph_enable();
00300     __ia64_load_fpu(tcb->fpup);
00301     ia64_rsm(IA64_PSR_MFH);
00302     ia64_srlz_d();
00303 
00304     if(need_disabled_fph)
00305         ia64_fph_disable();
00306 }
00307 
00308 
00309 static inline void xnarch_enable_fpu(xnarchtcb_t *tcb)
00310 {
00311     if (tcb->user_task && tcb->fpup && fph2task(tcb->fpup) != tcb->user_task)
00312         return;
00313 
00314     ia64_fph_enable();
00315 }
00316 
00317 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00318                                          struct xnthread *thread,
00319                                          const char *name)
00320 {
00321     tcb->user_task = current;
00322     tcb->active_task = NULL;
00323     tcb->espp = &tcb->esp;
00324     tcb->fpup = current->thread.fph;
00325 }
00326 
00327 static void xnarch_thread_trampoline (struct xnthread *self,
00328                                       int imask,
00329                                       void(*entry)(void *),
00330                                       void *cookie)
00331 {
00332     /* xnpod_welcome_thread() will do ia64_fpu_enable() if needed. */
00333     ia64_fph_disable();
00334     rthal_local_irq_restore(!!imask);
00335     rthal_local_irq_enable_hw();
00336     xnpod_welcome_thread(self);
00337     entry(cookie);
00338     xnpod_delete_thread(self);
00339 }
00340 
00341 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00342                                        void (*entry)(void *),
00343                                        void *cookie,
00344                                        int imask,
00345                                        struct xnthread *thread,
00346                                        char *name)
00347 {
00348     unsigned long rbs,bspstore,child_stack,child_rbs,rbs_size;
00349     unsigned long stackbase = (unsigned long) tcb->stackbase;
00350     struct switch_stack *swstack;    
00351 
00352     tcb->esp = 0;
00353     
00354     /* the stack should have already been allocated */   
00355     rthal_prepare_stack(stackbase+KERNEL_STACK_SIZE);
00356 
00357     /* The value of esp is used as a marker to indicate whether we are
00358        initializing a new task or we are back from the context switch. */
00359 
00360     if (tcb->esp != 0)
00361         xnarch_thread_trampoline(thread, imask, entry, cookie);
00362 
00363     child_stack = stackbase + KERNEL_STACK_SIZE - IA64_SWITCH_STACK_SIZE;
00364     tcb->esp = child_stack;
00365     swstack = (struct switch_stack *)child_stack;
00366     bspstore = swstack->ar_bspstore;
00367 
00368     rbs = (ia64_getreg(_IA64_REG_SP) & ~(KERNEL_STACK_SIZE-1)) + IA64_RBS_OFFSET;
00369     child_rbs = stackbase + IA64_RBS_OFFSET;
00370     rbs_size = bspstore - rbs;
00371 
00372     memcpy((void *)child_rbs,(void *)rbs,rbs_size);
00373     swstack->ar_bspstore = child_rbs + rbs_size;
00374     tcb->esp -= 16 ;    /* Provide for the (bloody) scratch area... */
00375 }
00376 
00377 #endif /* XENO_POD_MODULE */
00378 
00379 #ifdef XENO_THREAD_MODULE
00380 
00381 static inline void xnarch_init_tcb (xnarchtcb_t *tcb)
00382 {
00383     tcb->user_task = NULL;
00384     tcb->active_task = NULL;
00385     tcb->espp = &tcb->esp;
00386     tcb->fpup = tcb->fpuenv;
00387     /* Must be followed by xnarch_init_thread(). */
00388 }
00389 
00390 #endif /* XENO_THREAD_MODULE */
00391 
00392 #ifdef XENO_SHADOW_MODULE
00393 
00394 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00395                                            struct xnthread *thread,
00396                                            const char *name)
00397 {
00398     struct task_struct *task = current;
00399 
00400     tcb->user_task = task;
00401     tcb->active_task = NULL;
00402     tcb->esp = 0;
00403     tcb->espp = &task->thread.ksp;
00404     tcb->fpup = task->thread.fph;
00405 }
00406 
00407 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00408 
00409 {
00410     unsigned irq;
00411 
00412     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00413         rthal_virtualize_irq(rthal_current_domain,
00414                              irq,
00415                              handler,
00416                              NULL,
00417                              IPIPE_DYNAMIC_MASK);
00418 }
00419 
00420 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00421 
00422 {
00423     unsigned irq;
00424 
00425     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00426         {
00427         unsigned vector = __ia64_local_vector_to_irq(irq);
00428 
00429         switch (vector)
00430             {
00431 #ifdef CONFIG_SMP
00432             case ADEOS_CRITICAL_VECTOR:
00433             case IA64_IPI_RESCHEDULE:
00434             case IA64_IPI_VECTOR:
00435 
00436                 /* Never lock out these ones. */
00437                 continue;
00438 #endif /* CONFIG_SMP */
00439 
00440             default:
00441 
00442                 rthal_lock_irq(adp,cpuid,irq);
00443             }
00444         }
00445 }
00446 
00447 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00448 
00449 {
00450     unsigned irq;
00451 
00452     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00453         {
00454         unsigned vector = local_vector_to_irq(irq);
00455 
00456         switch (vector)
00457             {
00458 #ifdef CONFIG_SMP
00459             case ADEOS_CRITICAL_VECTOR:
00460             case IA64_IPI_RESCHEDULE:
00461             case IA64_IPI_VECTOR:
00462 
00463                 continue;
00464 #endif /* CONFIG_SMP */
00465 
00466             default:
00467 
00468                 rthal_unlock_irq(adp,irq);
00469             }
00470         }
00471 }
00472 
00473 #endif /* XENO_SHADOW_MODULE */
00474 
00475 #ifdef XENO_TIMER_MODULE
00476 
00477 static inline void xnarch_program_timer_shot (unsigned long delay)
00478 {
00479     rthal_timer_program_shot(delay);
00480 }
00481 
00482 static inline void xnarch_stop_timer (void)
00483 {
00484     rthal_timer_release();
00485 }
00486 
00487 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00488 
00489 {
00490 #ifdef CONFIG_SMP
00491     return rthal_send_ipi(RTHAL_TIMER_IRQ, mask);
00492 #else /* ! CONFIG_SMP */
00493     return 0;
00494 #endif /* CONFIG_SMP */
00495 }
00496 
00497 #endif /* XENO_TIMER_MODULE */
00498 
00499 #ifdef XENO_MAIN_MODULE
00500 
00501 #include <linux/init.h>
00502 #include <nucleus/asm/calibration.h>
00503 
00504 extern u_long nkschedlat;
00505 
00506 extern u_long nktimerlat;
00507 
00508 int xnarch_escalation_virq;
00509 
00510 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00511 
00512 void xnpod_schedule_handler(void);
00513 
00514 static rthal_trap_handler_t xnarch_old_trap_handler;
00515 
00516 typedef struct xnarch_stack {
00517     struct xnarch_stack *next;
00518 } xnarch_stack_t;
00519 
00520 #ifdef CONFIG_SMP
00521 static xnlock_t xnarch_stacks_lock = XNARCH_LOCK_UNLOCKED;
00522 #endif
00523 static atomic_counter_t xnarch_allocated_stacks;
00524 
00525 static xnarch_stack_t xnarch_free_stacks_q;
00526 static atomic_counter_t xnarch_free_stacks_count;
00527 
00528 static int xnarch_trap_fault (unsigned event, unsigned domid, void *data)
00529 {
00530     xnarch_fltinfo_t fltinfo;
00531 
00532     fltinfo.trap = event;
00533     fltinfo.ia64 = *(ia64trapinfo_t *)data;
00534 
00535     return xnpod_trap_fault(&fltinfo);
00536 }
00537 
00538 unsigned long xnarch_calibrate_timer (void)
00539 
00540 {
00541 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00542     return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY);
00543 #else /* CONFIG_RTAI_HW_TIMER_LATENCY unspecified. */
00544     /* Compute the time needed to program the ITM in aperiodic
00545        mode. The return value is expressed in CPU ticks. */
00546     return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00547 #endif /* CONFIG_RTAI_HW_TIMER_LATENCY != 0 */
00548 }
00549 
00550 int xnarch_calibrate_sched (void)
00551 
00552 {
00553     nktimerlat = xnarch_calibrate_timer();
00554 
00555     if (!nktimerlat)
00556         return -ENODEV;
00557 
00558     nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00559 
00560     return 0;
00561 }
00562 
00563 static inline void stacksq_push(xnarch_stack_t *q, xnarch_stack_t *stack)
00564 {
00565     stack->next = q->next;
00566     q->next = stack;
00567 }
00568 
00569 static inline xnarch_stack_t *stacksq_pop(xnarch_stack_t *q)
00570 {
00571     xnarch_stack_t *stack = q->next;
00572 
00573     if(stack)
00574         q->next = stack->next;
00575 
00576     return stack;
00577 }
00578 
00579 void *xnarch_alloc_stack(unsigned long stacksize)
00580 
00581 {
00582     xnarch_stack_t *stack;
00583     spl_t s;
00584 
00585     if (stacksize > KERNEL_STACK_SIZE)
00586         return NULL;
00587 
00588     if (rthal_current_domain == rthal_root_domain &&
00589         atomic_read(&xnarch_free_stacks_count) <= CONFIG_RTAI_HW_IA64_STACK_POOL)
00590         {
00591         stack = (xnarch_stack_t *)
00592             __get_free_pages(GFP_KERNEL,KERNEL_STACK_SIZE_ORDER);
00593 
00594         if(stack)
00595             atomic_inc(&xnarch_allocated_stacks);
00596 
00597         return stack;
00598         }
00599 
00600     xnlock_get_irqsave(&xnarch_stacks_lock, s);
00601     stack = stacksq_pop(&xnarch_free_stacks_q);
00602     xnlock_put_irqrestore(&xnarch_stacks_lock, s);
00603 
00604     if (stack)
00605         atomic_dec(&xnarch_free_stacks_count);
00606 
00607     return stack;
00608 }
00609 
00610 void xnarch_free_stack(void *block)
00611 
00612 {
00613     xnarch_stack_t *stack = (xnarch_stack_t *) block;
00614     spl_t s;
00615 
00616     if (!stack)
00617         return;
00618 
00619     if (rthal_current_domain == rthal_root_domain
00620         && atomic_read(&xnarch_free_stacks_count) > CONFIG_RTAI_HW_IA64_STACK_POOL)
00621         {
00622         atomic_dec(&xnarch_allocated_stacks);
00623             
00624         free_pages((unsigned long) block,KERNEL_STACK_SIZE_ORDER);
00625 
00626         return ;
00627         }
00628 
00629     xnlock_get_irqsave(&xnarch_stacks_lock, s);
00630     stacksq_push(&xnarch_free_stacks_q, stack);
00631     xnlock_put_irqrestore(&xnarch_stacks_lock, s);
00632     
00633     atomic_inc(&xnarch_free_stacks_count);
00634 }
00635 
00636 static int xnarch_stack_pool_init(void)
00637 
00638 {
00639     while (atomic_read(&xnarch_free_stacks_count) < CONFIG_RTAI_HW_IA64_STACK_POOL)
00640         {
00641         void *stack = xnarch_alloc_stack(KERNEL_STACK_SIZE);
00642 
00643         if(!stack)
00644             return -ENOMEM;
00645 
00646         xnarch_free_stack(stack);
00647         }
00648 
00649     return 0;
00650 }
00651 
00652 static void xnarch_stack_pool_destroy(void)
00653 
00654 {
00655     xnarch_stack_t *stack;
00656 
00657     stack = stacksq_pop(&xnarch_free_stacks_q);
00658 
00659     while (stack)
00660         {
00661         free_pages((unsigned long) stack, KERNEL_STACK_SIZE_ORDER);
00662         stack = stacksq_pop(&xnarch_free_stacks_q);
00663 
00664         if(atomic_dec_and_test(&xnarch_allocated_stacks))
00665             break;
00666         }
00667 
00668     if (atomic_read(&xnarch_allocated_stacks) != 0)
00669         xnarch_logwarn("leaked %u kernel threads stacks.\n",
00670                        atomic_read(&xnarch_allocated_stacks));
00671 
00672     if (xnarch_free_stacks_q.next)
00673         xnarch_logwarn("kernel threads stacks pool corrupted.\n");
00674 }
00675 
00676 static inline int xnarch_init (void)
00677 
00678 {
00679     int err;
00680 
00681 #ifdef CONFIG_SMP
00682     /* The HAL layer also sets the same CPU affinity so that both
00683        modules keep their execution sequence on SMP boxen. */
00684     set_cpus_allowed(current,cpumask_of_cpu(0));
00685 #endif /* CONFIG_SMP */
00686 
00687     err = xnarch_calibrate_sched();
00688 
00689     if (err)
00690         return err;
00691 
00692     xnarch_escalation_virq = rthal_alloc_virq();
00693 
00694     if (xnarch_escalation_virq == 0)
00695         return -ENOSYS;
00696 
00697     rthal_virtualize_irq(&rthal_domain,
00698                          xnarch_escalation_virq,
00699                          (void (*)(unsigned))&xnpod_schedule_handler,
00700                          NULL,
00701                          IPIPE_HANDLE_MASK);
00702 
00703     xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00704 
00705 #ifdef CONFIG_RTAI_OPT_FUSION
00706     err = xnshadow_mount();
00707 #endif /* CONFIG_RTAI_OPT_FUSION */
00708 
00709     if (err)
00710         goto release_trap;
00711 
00712     err = xnarch_stack_pool_init();
00713 
00714     if (!err)
00715         return 0;
00716 
00717 #ifdef CONFIG_RTAI_OPT_FUSION
00718     xnshadow_cleanup();
00719 #endif /* CONFIG_RTAI_OPT_FUSION */
00720 
00721  release_trap:
00722     rthal_trap_catch(xnarch_old_trap_handler);
00723     rthal_free_virq(xnarch_escalation_virq);
00724 
00725     return err;
00726 }
00727 
00728 static inline void xnarch_exit (void)
00729 
00730 {
00731 #ifdef CONFIG_RTAI_OPT_FUSION
00732     xnshadow_cleanup();
00733 #endif /* CONFIG_RTAI_OPT_FUSION */
00734     rthal_trap_catch(xnarch_old_trap_handler);
00735     rthal_free_virq(xnarch_escalation_virq);
00736     xnarch_stack_pool_destroy();
00737 }
00738 
00739 #endif /* XENO_MAIN_MODULE */
00740 
00741 #ifdef __cplusplus
00742 }
00743 #endif
00744 
00745 #else /* !__KERNEL__ */
00746 
00747 #include <nucleus/system.h>
00748 #include <bits/local_lim.h>
00749 
00750 #endif /* __KERNEL__ */
00751 
00752 #endif /* !_RTAI_ASM_IA64_SYSTEM_H */

Generated on Sat Sep 3 12:32:46 2005 for RTAI Fusion API by  doxygen 1.4.2