00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef _RTAI_ASM_IA64_SYSTEM_H
00022 #define _RTAI_ASM_IA64_SYSTEM_H
00023
00024 #include <nucleus/asm-generic/system.h>
00025
00026 #ifdef __KERNEL__
00027
00028 #include <linux/config.h>
00029 #include <linux/ptrace.h>
00030
00031 #if ADEOS_RELEASE_NUMBER < 0x0206070b
00032 #error "Adeos 2.6r7c11/ia64 or above is required to run this software; please upgrade."
00033 #error "See http://download.gna.org/adeos/patches/v2.6/ia64/"
00034 #endif
00035
00036 #ifdef CONFIG_IA64_HP_SIM
00037 #define XNARCH_DEFAULT_TICK 31250000
00038 #else
00039 #define XNARCH_DEFAULT_TICK XNARCH_HOST_TICK
00040 #endif
00041 #define XNARCH_HOST_TICK (1000000000UL/HZ)
00042
00043 #define XNARCH_THREAD_STACKSZ (1<<KERNEL_STACK_SIZE_ORDER)
00044
00045 #define xnarch_stack_size(tcb) ((tcb)->stacksize)
00046 #define xnarch_user_task(tcb) ((tcb)->user_task)
00047
00048 void *xnarch_alloc_stack(unsigned long stacksize);
00049 void xnarch_free_stack(void *block);
00050
00051 struct xnthread;
00052 struct task_struct;
00053
00054 typedef struct xnarchtcb {
00055
00056
00057
00058 unsigned long *espp;
00059
00060
00061
00062
00063 struct ia64_fpreg fpuenv[96];
00064
00065
00066 unsigned stacksize;
00067 unsigned long *stackbase;
00068 unsigned long esp;
00069
00070
00071
00072 struct task_struct *user_task;
00073 struct task_struct *active_task;
00074
00075 struct ia64_fpreg *fpup;
00076 #define xnarch_fpu_ptr(tcb) ((tcb)->fpup)
00077
00078 } xnarchtcb_t;
00079
00080 typedef struct xnarch_fltinfo {
00081
00082 ia64trapinfo_t ia64;
00083 unsigned trap;
00084
00085 } xnarch_fltinfo_t;
00086
00087 #define xnarch_fault_trap(fi) ((fi)->trap)
00088 #define xnarch_fault_code(fi) ((fi)->ia64.isr)
00089 #define xnarch_fault_pc(fi) ((fi)->ia64.regs->cr_iip)
00090
00091 #define xnarch_fault_fpu_p(fi) ((fi)->trap == ADEOS_FPDIS_TRAP)
00092
00093
00094 #define xnarch_fault_pf_p(fi) ((fi)->trap == ADEOS_PF_TRAP)
00095 #define xnarch_fault_notify(fi) (!(current->ptrace & PT_PTRACED) || \
00096 (fi)->trap != ADEOS_DEBUG_TRAP)
00097 #ifdef __cplusplus
00098 extern "C" {
00099 #endif
00100
00101 static inline void *xnarch_sysalloc (u_long bytes)
00102
00103 {
00104 if (bytes >= 128*1024)
00105 return vmalloc(bytes);
00106
00107 return kmalloc(bytes,GFP_KERNEL);
00108 }
00109
00110 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00111
00112 {
00113 if (bytes >= 128*1024)
00114 vfree(chunk);
00115 else
00116 kfree(chunk);
00117 }
00118
00119 static inline void xnarch_relay_tick (void)
00120
00121 {
00122 #ifdef CONFIG_SMP
00123 adeos_send_ipi(RTHAL_HOST_TIMER_IRQ, cpu_online_map);
00124 #else
00125 adeos_trigger_irq(RTHAL_HOST_TIMER_IRQ);
00126 #endif
00127 }
00128
00129 #ifdef XENO_POD_MODULE
00130
00131 void xnpod_welcome_thread(struct xnthread *);
00132
00133 void xnpod_delete_thread(struct xnthread *);
00134
00135 static inline int xnarch_start_timer (unsigned long ns,
00136 void (*tickhandler)(void))
00137 {
00138 int err = rthal_timer_request(tickhandler,ns);
00139 adeos_declare_cpuid;
00140 long long delta;
00141
00142 if (err)
00143 return err;
00144
00145 adeos_load_cpuid();
00146 delta = __adeos_itm_next[cpuid] - ia64_get_itc();
00147
00148 return delta < 0LL ? 0LL : xnarch_tsc_to_ns(delta);
00149 }
00150
00151 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00152
00153 {
00154 struct task_struct *fpu_owner
00155 = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
00156 adeos_declare_cpuid;
00157
00158 adeos_load_cpuid();
00159
00160 __set_bit(cpuid,&rthal_cpu_realtime);
00161
00162 rootcb->user_task = rootcb->active_task = rthal_root_host_task(cpuid);
00163
00164 rootcb->fpup = fpu_owner ? fpu_owner->thread.fph : NULL;
00165 }
00166
00167 static inline void xnarch_enter_root (xnarchtcb_t *rootcb)
00168 {
00169 __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00170 }
00171
00172 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00173 xnarchtcb_t *in_tcb)
00174 {
00175 struct task_struct *outproc = out_tcb->active_task;
00176 struct task_struct *inproc = in_tcb->user_task;
00177
00178 in_tcb->active_task = inproc ?: outproc;
00179
00180 if (inproc && inproc != outproc)
00181 {
00182
00183
00184
00185 struct mm_struct *oldmm = outproc->active_mm;
00186 struct task_struct *last;
00187
00188 switch_mm(oldmm,inproc->active_mm,inproc);
00189
00190 if (!inproc->mm)
00191 enter_lazy_tlb(oldmm,inproc);
00192
00193 __switch_to(outproc, inproc, last);
00194 }
00195 else
00196 {
00197
00198 unsigned long gp;
00199
00200 ia64_stop();
00201 gp = ia64_getreg(_IA64_REG_GP);
00202 ia64_stop();
00203 rthal_switch_context(out_tcb,in_tcb);
00204 ia64_stop();
00205 ia64_setreg(_IA64_REG_GP, gp);
00206 ia64_stop();
00207
00208
00209
00210 ia64_fph_disable();
00211 }
00212 }
00213
00214 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00215 xnarchtcb_t *next_tcb)
00216 {
00217 xnarch_switch_to(dead_tcb,next_tcb);
00218 }
00219
00220 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
00221
00222 {
00223
00224 }
00225
00226 #define fph2task(faddr) \
00227 ((struct task_struct *)((char *) (faddr) - \
00228 (size_t) &((struct task_struct *) 0)->thread.fph[0]))
00229
00230 #define xnarch_fpu_init_p(task) ((task)->thread.flags & IA64_THREAD_FPH_VALID)
00231
00232 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00233
00234 {
00235 struct task_struct *task = tcb->user_task;
00236
00237
00238 ia64_fph_enable();
00239 __ia64_init_fpu();
00240
00241
00242
00243 if(task)
00244
00245
00246
00247 ia64_psr(ia64_task_regs(task))->mfh = 1;
00248 }
00249
00250 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00251 {
00252 unsigned long lpsr = ia64_getreg(_IA64_REG_PSR);
00253 struct ia64_psr *current_psr = (struct ia64_psr *) &lpsr;
00254
00255 if (current_psr->mfh)
00256 {
00257 if(tcb->user_task && tcb->fpup)
00258 {
00259 struct task_struct *linux_fpu_owner = fph2task(tcb->fpup);
00260 struct ia64_psr *psr = ia64_psr(ia64_task_regs(linux_fpu_owner));
00261
00262
00263 psr->mfh = 0;
00264 linux_fpu_owner->thread.flags |= IA64_THREAD_FPH_VALID;
00265 }
00266
00267 ia64_fph_enable();
00268 __ia64_save_fpu(tcb->fpup);
00269 ia64_rsm(IA64_PSR_MFH);
00270 ia64_srlz_d();
00271 }
00272 }
00273
00274 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00275
00276 {
00277 struct task_struct *linux_fpu_owner;
00278 int need_disabled_fph;
00279
00280 if (tcb->user_task && tcb->fpup)
00281 {
00282 linux_fpu_owner = fph2task(tcb->fpup);
00283
00284 if(!xnarch_fpu_init_p(linux_fpu_owner))
00285 return;
00286
00287
00288
00289 need_disabled_fph = linux_fpu_owner != tcb->user_task;
00290 }
00291 else
00292 need_disabled_fph = 0;
00293
00294
00295
00296 ia64_fph_enable();
00297 __ia64_load_fpu(tcb->fpup);
00298 ia64_rsm(IA64_PSR_MFH);
00299 ia64_srlz_d();
00300
00301 if(need_disabled_fph)
00302 ia64_fph_disable();
00303 }
00304
00305
00306 static inline void xnarch_enable_fpu(xnarchtcb_t *tcb)
00307 {
00308 ia64_fph_enable();
00309 }
00310
00311 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00312 struct xnthread *thread,
00313 const char *name)
00314 {
00315 tcb->user_task = current;
00316 tcb->active_task = NULL;
00317 tcb->espp = &tcb->esp;
00318 tcb->fpup = current->thread.fph;
00319 }
00320
00321 static void xnarch_thread_trampoline (struct xnthread *self,
00322 int imask,
00323 void(*entry)(void *),
00324 void *cookie)
00325 {
00326
00327 ia64_fph_disable();
00328 rthal_local_irq_restore(!!imask);
00329 rthal_hw_enable();
00330 xnpod_welcome_thread(self);
00331 entry(cookie);
00332 xnpod_delete_thread(self);
00333 }
00334
00335 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00336 void (*entry)(void *),
00337 void *cookie,
00338 int imask,
00339 struct xnthread *thread,
00340 char *name)
00341 {
00342 unsigned long rbs,bspstore,child_stack,child_rbs,rbs_size;
00343 unsigned long stackbase = (unsigned long) tcb->stackbase;
00344 struct switch_stack *swstack;
00345
00346 tcb->esp = 0;
00347
00348
00349 rthal_prepare_stack(stackbase+KERNEL_STACK_SIZE);
00350
00351
00352
00353
00354 if (tcb->esp != 0)
00355 xnarch_thread_trampoline(thread, imask, entry, cookie);
00356
00357 child_stack = stackbase + KERNEL_STACK_SIZE - IA64_SWITCH_STACK_SIZE;
00358 tcb->esp = child_stack;
00359 swstack = (struct switch_stack *)child_stack;
00360 bspstore = swstack->ar_bspstore;
00361
00362 rbs = (ia64_getreg(_IA64_REG_SP) & ~(KERNEL_STACK_SIZE-1)) + IA64_RBS_OFFSET;
00363 child_rbs = stackbase + IA64_RBS_OFFSET;
00364 rbs_size = bspstore - rbs;
00365
00366 memcpy((void *)child_rbs,(void *)rbs,rbs_size);
00367 swstack->ar_bspstore = child_rbs + rbs_size;
00368 tcb->esp -= 16 ;
00369 }
00370
00371 #ifdef CONFIG_SMP
00372
00373 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask) {
00374
00375 return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
00376 }
00377
00378 static inline int xnarch_hook_ipi (void (*handler)(void))
00379
00380 {
00381 return adeos_virtualize_irq_from(&rthal_domain,
00382 ADEOS_SERVICE_IPI0,
00383 (void (*)(unsigned)) handler,
00384 NULL,
00385 IPIPE_HANDLE_MASK);
00386 }
00387
00388 static inline int xnarch_release_ipi (void)
00389
00390 {
00391 return adeos_virtualize_irq_from(&rthal_domain,
00392 ADEOS_SERVICE_IPI0,
00393 NULL,
00394 NULL,
00395 IPIPE_PASS_MASK);
00396 }
00397
00398 static struct semaphore xnarch_finalize_sync;
00399
00400 static void xnarch_finalize_cpu(unsigned irq)
00401 {
00402 up(&xnarch_finalize_sync);
00403 }
00404
00405 static inline void xnarch_notify_halt(void)
00406
00407 {
00408 unsigned cpu, nr_cpus = num_online_cpus();
00409 cpumask_t other_cpus = cpu_online_map;
00410 unsigned long flags;
00411 adeos_declare_cpuid;
00412
00413 init_MUTEX_LOCKED(&xnarch_finalize_sync);
00414
00415
00416
00417
00418 adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2,
00419 xnarch_finalize_cpu, NULL, IPIPE_HANDLE_MASK);
00420
00421 adeos_lock_cpu(flags);
00422 cpu_clear(cpuid, other_cpus);
00423 adeos_send_ipi(ADEOS_SERVICE_IPI2, other_cpus);
00424 adeos_unlock_cpu(flags);
00425
00426 for(cpu=0; cpu < nr_cpus-1; ++cpu)
00427 down(&xnarch_finalize_sync);
00428
00429 adeos_virtualize_irq_from(adp_current, ADEOS_SERVICE_IPI2, NULL, NULL,
00430 IPIPE_PASS_MASK);
00431 }
00432
00433 #else
00434
00435 static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask)
00436
00437 {
00438 return 0;
00439 }
00440
00441 static inline int xnarch_hook_ipi (void (*handler)(void))
00442
00443 {
00444 return 0;
00445 }
00446
00447 static inline int xnarch_release_ipi (void)
00448
00449 {
00450 return 0;
00451 }
00452
00453 #define xnarch_notify_halt()
00454
00455 #endif
00456
00457 static inline void xnarch_notify_shutdown(void)
00458
00459 {
00460 #ifdef CONFIG_SMP
00461
00462
00463 set_cpus_allowed(current,cpumask_of_cpu(0));
00464 #endif
00465 #ifdef CONFIG_RTAI_OPT_FUSION
00466 xnshadow_release_events();
00467 #endif
00468
00469 set_current_state(TASK_UNINTERRUPTIBLE);
00470 schedule_timeout(50);
00471 xnarch_release_ipi();
00472 }
00473
00474 static inline int xnarch_escalate (void)
00475
00476 {
00477 extern int xnarch_escalation_virq;
00478
00479 if (adp_current == adp_root)
00480 {
00481 spl_t s;
00482 splsync(s);
00483 adeos_trigger_irq(xnarch_escalation_virq);
00484 splexit(s);
00485 return 1;
00486 }
00487
00488 return 0;
00489 }
00490
00491 static void xnarch_notify_ready (void)
00492
00493 {
00494 #ifdef CONFIG_RTAI_OPT_FUSION
00495 xnshadow_grab_events();
00496 #endif
00497 }
00498
00499 #endif
00500
00501 #ifdef XENO_THREAD_MODULE
00502
00503 static inline void xnarch_init_tcb (xnarchtcb_t *tcb)
00504 {
00505 tcb->user_task = NULL;
00506 tcb->active_task = NULL;
00507 tcb->espp = &tcb->esp;
00508 tcb->fpup = tcb->fpuenv;
00509
00510 }
00511
00512 #endif
00513
00514 #ifdef XENO_SHADOW_MODULE
00515
00516 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00517 struct xnthread *thread,
00518 const char *name)
00519 {
00520 struct task_struct *task = current;
00521
00522 tcb->user_task = task;
00523 tcb->active_task = NULL;
00524 tcb->esp = 0;
00525 tcb->espp = &task->thread.ksp;
00526 tcb->fpup = task->thread.fph;
00527 }
00528
00529 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00530
00531 {
00532 unsigned irq;
00533
00534 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00535 adeos_virtualize_irq(irq,
00536 handler,
00537 NULL,
00538 IPIPE_DYNAMIC_MASK);
00539 }
00540
00541 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00542
00543 {
00544 unsigned irq;
00545
00546 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00547 {
00548 unsigned vector = __ia64_local_vector_to_irq(irq);
00549
00550 switch (vector)
00551 {
00552 #ifdef CONFIG_SMP
00553 case ADEOS_CRITICAL_VECTOR:
00554 case IA64_IPI_RESCHEDULE:
00555 case IA64_IPI_VECTOR:
00556
00557
00558 continue;
00559 #endif
00560
00561 default:
00562
00563 __adeos_lock_irq(adp,cpuid,irq);
00564 }
00565 }
00566 }
00567
00568 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00569
00570 {
00571 unsigned irq;
00572
00573 for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00574 {
00575 unsigned vector = local_vector_to_irq(irq);
00576
00577 switch (vector)
00578 {
00579 #ifdef CONFIG_SMP
00580 case ADEOS_CRITICAL_VECTOR:
00581 case IA64_IPI_RESCHEDULE:
00582 case IA64_IPI_VECTOR:
00583
00584 continue;
00585 #endif
00586
00587 default:
00588
00589 __adeos_unlock_irq(adp,irq);
00590 }
00591 }
00592 }
00593
00594 #endif
00595
00596 #ifdef XENO_TIMER_MODULE
00597
00598 static inline void xnarch_program_timer_shot (unsigned long delay)
00599 {
00600 rthal_timer_program_shot(delay);
00601 }
00602
00603 static inline void xnarch_stop_timer (void)
00604 {
00605 rthal_timer_release();
00606 }
00607
00608 static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
00609
00610 {
00611 return adeos_send_ipi(RTHAL_TIMER_IRQ, mask);
00612 }
00613
00614 static inline void xnarch_read_timings (unsigned long long *shot,
00615 unsigned long long *delivery,
00616 unsigned long long defval)
00617 {
00618 #ifdef CONFIG_ADEOS_PROFILING
00619 int cpuid = adeos_processor_id();
00620 *shot = __adeos_profile_data[cpuid].irqs[RTHAL_TIMER_IRQ].t_handled;
00621 *delivery = __adeos_profile_data[cpuid].irqs[RTHAL_TIMER_IRQ].t_synced;
00622 #else
00623 *shot = defval;
00624 *delivery = defval;
00625 #endif
00626 }
00627
00628 #endif
00629
00630 #ifdef XENO_MAIN_MODULE
00631
00632 #include <linux/init.h>
00633 #include <nucleus/asm/calibration.h>
00634
00635 extern u_long nkschedlat;
00636
00637 extern u_long nktimerlat;
00638
00639 int xnarch_escalation_virq;
00640
00641 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00642
00643 void xnpod_schedule_handler(void);
00644
00645 static rthal_trap_handler_t xnarch_old_trap_handler;
00646
00647 typedef struct xnarch_stack {
00648 struct xnarch_stack *next;
00649 } xnarch_stack_t;
00650
00651 #ifdef CONFIG_SMP
00652 static xnlock_t xnarch_stacks_lock = XNARCH_LOCK_UNLOCKED;
00653 #endif
00654 static atomic_counter_t xnarch_allocated_stacks;
00655
00656 static xnarch_stack_t xnarch_free_stacks_q;
00657 static atomic_counter_t xnarch_free_stacks_count;
00658
00659 static int xnarch_trap_fault (adevinfo_t *evinfo)
00660
00661 {
00662 xnarch_fltinfo_t fltinfo;
00663
00664 fltinfo.trap = evinfo->event;
00665 fltinfo.ia64 = *(ia64trapinfo_t *)evinfo->evdata;
00666
00667 return xnpod_trap_fault(&fltinfo);
00668 }
00669
00670 unsigned long xnarch_calibrate_timer (void)
00671
00672 {
00673 #if CONFIG_RTAI_HW_TIMER_LATENCY != 0
00674 return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY);
00675 #else
00676
00677
00678 return xnarch_ns_to_tsc(rthal_timer_calibrate()) ?: 1;
00679 #endif
00680 }
00681
00682 int xnarch_calibrate_sched (void)
00683
00684 {
00685 nktimerlat = xnarch_calibrate_timer();
00686
00687 if (!nktimerlat)
00688 return -ENODEV;
00689
00690 nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
00691
00692 return 0;
00693 }
00694
00695 static inline void stacksq_push(xnarch_stack_t *q, xnarch_stack_t *stack)
00696 {
00697 stack->next = q->next;
00698 q->next = stack;
00699 }
00700
00701 static inline xnarch_stack_t *stacksq_pop(xnarch_stack_t *q)
00702 {
00703 xnarch_stack_t *stack = q->next;
00704
00705 if(stack)
00706 q->next = stack->next;
00707
00708 return stack;
00709 }
00710
00711 void *xnarch_alloc_stack(unsigned long stacksize)
00712
00713 {
00714 xnarch_stack_t *stack;
00715 spl_t s;
00716
00717 if (stacksize > KERNEL_STACK_SIZE)
00718 return NULL;
00719
00720 if (adp_current == adp_root &&
00721 atomic_read(&xnarch_free_stacks_count) <= CONFIG_RTAI_HW_IA64_STACK_POOL)
00722 {
00723 stack = (xnarch_stack_t *)
00724 __get_free_pages(GFP_KERNEL,KERNEL_STACK_SIZE_ORDER);
00725
00726 if(stack)
00727 atomic_inc(&xnarch_allocated_stacks);
00728
00729 return stack;
00730 }
00731
00732 xnlock_get_irqsave(&xnarch_stacks_lock, s);
00733 stack = stacksq_pop(&xnarch_free_stacks_q);
00734 xnlock_put_irqrestore(&xnarch_stacks_lock, s);
00735
00736 if (stack)
00737 atomic_dec(&xnarch_free_stacks_count);
00738
00739 return stack;
00740 }
00741
00742 void xnarch_free_stack(void *block)
00743
00744 {
00745 xnarch_stack_t *stack = (xnarch_stack_t *) block;
00746 spl_t s;
00747
00748 if (!stack)
00749 return;
00750
00751 if (adp_current == adp_root
00752 && atomic_read(&xnarch_free_stacks_count) > CONFIG_RTAI_HW_IA64_STACK_POOL)
00753 {
00754 atomic_dec(&xnarch_allocated_stacks);
00755
00756 free_pages((unsigned long) block,KERNEL_STACK_SIZE_ORDER);
00757
00758 return ;
00759 }
00760
00761 xnlock_get_irqsave(&xnarch_stacks_lock, s);
00762 stacksq_push(&xnarch_free_stacks_q, stack);
00763 xnlock_put_irqrestore(&xnarch_stacks_lock, s);
00764
00765 atomic_inc(&xnarch_free_stacks_count);
00766 }
00767
00768 static int xnarch_stack_pool_init(void)
00769
00770 {
00771 while (atomic_read(&xnarch_free_stacks_count) < CONFIG_RTAI_HW_IA64_STACK_POOL)
00772 {
00773 void *stack = xnarch_alloc_stack(KERNEL_STACK_SIZE);
00774
00775 if(!stack)
00776 return -ENOMEM;
00777
00778 xnarch_free_stack(stack);
00779 }
00780
00781 return 0;
00782 }
00783
00784 static void xnarch_stack_pool_destroy(void)
00785
00786 {
00787 xnarch_stack_t *stack;
00788
00789 stack = stacksq_pop(&xnarch_free_stacks_q);
00790
00791 while (stack)
00792 {
00793 free_pages((unsigned long) stack, KERNEL_STACK_SIZE_ORDER);
00794 stack = stacksq_pop(&xnarch_free_stacks_q);
00795
00796 if(atomic_dec_and_test(&xnarch_allocated_stacks))
00797 break;
00798 }
00799
00800 if (atomic_read(&xnarch_allocated_stacks) != 0)
00801 xnarch_logwarn("leaked %u kernel threads stacks.\n",
00802 atomic_read(&xnarch_allocated_stacks));
00803
00804 if (xnarch_free_stacks_q.next)
00805 xnarch_logwarn("kernel threads stacks pool corrupted.\n");
00806 }
00807
00808 static inline int xnarch_init (void)
00809
00810 {
00811 int err;
00812
00813 #ifdef CONFIG_SMP
00814
00815
00816 set_cpus_allowed(current,cpumask_of_cpu(0));
00817 #endif
00818
00819 err = xnarch_calibrate_sched();
00820
00821 if (err)
00822 return err;
00823
00824 xnarch_escalation_virq = adeos_alloc_irq();
00825
00826 if (xnarch_escalation_virq == 0)
00827 return -ENOSYS;
00828
00829 adeos_virtualize_irq_from(&rthal_domain,
00830 xnarch_escalation_virq,
00831 (void (*)(unsigned))&xnpod_schedule_handler,
00832 NULL,
00833 IPIPE_HANDLE_MASK);
00834
00835 xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);
00836
00837 #ifdef CONFIG_RTAI_OPT_FUSION
00838 err = xnshadow_mount();
00839 #endif
00840
00841 if (err)
00842 goto release_trap;
00843
00844 err = xnarch_stack_pool_init();
00845
00846 if (!err)
00847 return 0;
00848
00849 #ifdef CONFIG_RTAI_OPT_FUSION
00850 xnshadow_cleanup();
00851 #endif
00852
00853 release_trap:
00854 rthal_trap_catch(xnarch_old_trap_handler);
00855 adeos_free_irq(xnarch_escalation_virq);
00856
00857 return err;
00858 }
00859
00860 static inline void xnarch_exit (void)
00861
00862 {
00863 #ifdef CONFIG_RTAI_OPT_FUSION
00864 xnshadow_cleanup();
00865 #endif
00866 rthal_trap_catch(xnarch_old_trap_handler);
00867 adeos_free_irq(xnarch_escalation_virq);
00868 xnarch_stack_pool_destroy();
00869 }
00870
00871 #endif
00872
00873 #ifdef __cplusplus
00874 }
00875 #endif
00876
00877 #else
00878
00879 #include <nucleus/system.h>
00880 #include <bits/local_lim.h>
00881
00882 #endif
00883
00884 #endif