system.h

00001 /*
00002  * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
00003  *
00004  * Xenomai is free software; you can redistribute it and/or modify it
00005  * under the terms of the GNU General Public License as published by
00006  * the Free Software Foundation; either version 2 of the License, or
00007  * (at your option) any later version.
00008  *
00009  * Xenomai is distributed in the hope that it will be useful, but
00010  * WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00012  * General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with Xenomai; if not, write to the Free Software Foundation,
00016  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00017  *
00018  * As a special exception, the RTAI project gives permission for
00019  * additional uses of the text contained in its release of Xenomai.
00020  *
00021  * The exception is that, if you link the Xenomai libraries with other
00022  * files to produce an executable, this does not by itself cause the
00023  * resulting executable to be covered by the GNU General Public License.
00024  * Your use of that executable is in no way restricted on account of
00025  * linking the Xenomai libraries code into it.
00026  *
00027  * This exception does not however invalidate any other reasons why
00028  * the executable file might be covered by the GNU General Public
00029  * License.
00030  *
00031  * This exception applies only to the code released by the
00032  * RTAI project under the name Xenomai.  If you copy code from other
00033  * RTAI project releases into a copy of Xenomai, as the General Public
00034  * License permits, the exception does not apply to the code that you
00035  * add in this way.  To avoid misleading anyone as to the status of
00036  * such modified files, you must delete this exception notice from
00037  * them.
00038  *
00039  * If you write modifications of your own for Xenomai, it is your
00040  * choice whether to permit this exception to apply to your
00041  * modifications. If you do not wish that, delete this exception
00042  * notice.
00043  */
00044 
00045 #ifndef _RTAI_ASM_PPC_SYSTEM_H
00046 #define _RTAI_ASM_PPC_SYSTEM_H
00047 
00048 #ifdef __KERNEL__
00049 
00050 #include <linux/kernel.h>
00051 #include <linux/version.h>
00052 #include <linux/module.h>
00053 #include <linux/slab.h>
00054 #include <linux/errno.h>
00055 #include <linux/adeos.h>
00056 #include <linux/vmalloc.h>
00057 #include <asm/uaccess.h>
00058 #include <asm/param.h>
00059 #include <asm/mmu_context.h>
00060 #include <rtai_config.h>
00061 #include <nucleus/asm/hal.h>
00062 #include <nucleus/asm/atomic.h>
00063 #include <nucleus/shadow.h>
00064 
00065 #if ADEOS_RELEASE_NUMBER < 0x02060609
00066 #error "Adeos 2.6r6c9/ppc or above is required to run this software; please upgrade."
00067 #error "See http://download.gna.org/adeos/patches/v2.6/ppc/"
00068 #endif
00069 
00070 #define MODULE_PARM_VALUE(parm) (parm)
00071 
00072 typedef unsigned long spl_t;
00073 
00074 #define splhigh(x)  rthal_local_irq_save(x)
00075 #ifdef CONFIG_SMP
00076 #define splexit(x)  rthal_local_irq_restore((x) & 1)
00077 #else /* !CONFIG_SMP */
00078 #define splexit(x)  rthal_local_irq_restore(x)
00079 #endif /* CONFIG_SMP */
00080 #define splnone()   rthal_sti()
00081 #define spltest()   rthal_local_irq_test()
00082 #define splget(x)   rthal_local_irq_flags(x)
00083 #define splsync(x)  rthal_local_irq_sync(x)
00084 
00085 typedef unsigned long xnlock_t;
00086 
00087 #define XNARCH_LOCK_UNLOCKED 0
00088 
00089 #ifdef CONFIG_SMP
00090 
00091 #define xnlock_get_irqsave(lock,x)  ((x) = __xnlock_get_irqsave(lock))
00092 #define xnlock_clear_irqoff(lock)   xnlock_put_irqrestore(lock,1)
00093 #define xnlock_clear_irqon(lock)    xnlock_put_irqrestore(lock,0)
00094 
00095 static inline void xnlock_init (xnlock_t *lock) {
00096 
00097     *lock = XNARCH_LOCK_UNLOCKED;
00098 }
00099 
00100 static inline spl_t __xnlock_get_irqsave (xnlock_t *lock)
00101 
00102 {
00103     adeos_declare_cpuid;
00104     spl_t flags;
00105 
00106     rthal_local_irq_save(flags);
00107 
00108     adeos_load_cpuid();
00109 
00110     if (!test_and_set_bit(cpuid,lock))
00111         {
00112         while (test_and_set_bit(BITS_PER_LONG - 1,lock))
00113             rthal_cpu_relax(cpuid);
00114         }
00115     else
00116         flags |= 2;
00117 
00118     return flags;
00119 }
00120 
00121 static inline void xnlock_put_irqrestore (xnlock_t *lock, spl_t flags)
00122 
00123 {
00124     if (!(flags & 2))
00125         {
00126         adeos_declare_cpuid;
00127 
00128         rthal_cli();
00129 
00130         adeos_load_cpuid();
00131 
00132         if (test_bit(cpuid,lock))
00133             {
00134             clear_bit(cpuid,lock);
00135             clear_bit(BITS_PER_LONG - 1,lock);
00136             rthal_cpu_relax(cpuid); /* FIXME: is this really needed ? */
00137             }
00138         }
00139 
00140     rthal_local_irq_restore(flags & 1);
00141 }
00142 
00143 #define XNARCH_PASSTHROUGH_IRQS /*empty*/
00144 
00145 #else /* !CONFIG_SMP */
00146 
00147 #define xnlock_init(lock)              do { } while(0)
00148 #define xnlock_get_irqsave(lock,x)     rthal_local_irq_save(x)
00149 #define xnlock_put_irqrestore(lock,x)  rthal_local_irq_restore(x)
00150 #define xnlock_clear_irqoff(lock)      rthal_cli()
00151 #define xnlock_clear_irqon(lock)       rthal_sti()
00152 
00153 #endif /* CONFIG_SMP */
00154 
00155 #define XNARCH_NR_CPUS               RTHAL_NR_CPUS
00156 
00157 #define XNARCH_DEFAULT_TICK          1000000 /* ns, i.e. 1ms */
00158 #define XNARCH_IRQ_MAX               IPIPE_NR_XIRQS /* Do _not_ use NR_IRQS here. */
00159 #define XNARCH_HOST_TICK             (1000000000UL/HZ)
00160 /* Using 2/3 of the average jitter is some constant obtained from
00161    experimentation that proved to fit on tested PPC platforms with
00162    respect to auto-calibration. In any case, a more accurate
00163    scheduling latency can still be fixed by setting
00164    CONFIG_RTAI_HW_SCHED_LATENCY properly. */
00165 #define xnarch_adjust_calibration(x) ((x) * 2 / 3)
00166 
00167 #define XNARCH_THREAD_STACKSZ 4096
00168 #define XNARCH_ROOT_STACKSZ   0 /* Only a placeholder -- no stack */
00169 
00170 #define XNARCH_PROMPT "RTAI[nucleus]: "
00171 #define xnarch_loginfo(fmt,args...)  printk(KERN_INFO XNARCH_PROMPT fmt, ##args)
00172 #define xnarch_logwarn(fmt,args...)  printk(KERN_WARNING XNARCH_PROMPT fmt, ##args)
00173 #define xnarch_logerr(fmt,args...)   printk(KERN_ERR XNARCH_PROMPT fmt, ##args)
00174 #define xnarch_printf(fmt,args...)   printk(KERN_INFO XNARCH_PROMPT fmt, ##args)
00175 
00176 #define xnarch_ullmod(ull,uld,rem)   ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
00177 #define xnarch_uldiv(ull, d)         xnarch_uldivrem(ull, d, NULL)
00178 #define xnarch_ulmod(ull, d)         ({ u_long _rem;                    \
00179                                         xnarh_uldivrem(ull,uld,&_rem); _rem; })
00180 
00181 #define xnarch_ullmul                rthal_ullmul
00182 #define xnarch_uldivrem              rthal_uldivrem
00183 #define xnarch_ulldiv                rthal_ulldiv
00184 #define xnarch_imuldiv               rthal_imuldiv
00185 #define xnarch_llimd                 rthal_llimd
00186 #define xnarch_get_cpu_tsc           rthal_rdtsc
00187 
00188 typedef cpumask_t xnarch_cpumask_t;
00189 #ifdef CONFIG_SMP
00190 #define xnarch_cpu_online_map            cpu_online_map
00191 #else
00192 #define xnarch_cpu_online_map            cpumask_of_cpu(0)
00193 #endif
00194 #define xnarch_num_online_cpus()         num_online_cpus()
00195 #define xnarch_cpu_set(cpu, mask)        cpu_set(cpu, mask)
00196 #define xnarch_cpu_clear(cpu, mask)      cpu_clear(cpu, mask)
00197 #define xnarch_cpus_clear(mask)          cpus_clear(mask)
00198 #define xnarch_cpu_isset(cpu, mask)      cpu_isset(cpu, mask)
00199 #define xnarch_cpus_and(dst, src1, src2) cpus_and(dst, src1, src2)
00200 #define xnarch_cpus_equal(mask1, mask2)  cpus_equal(mask1, mask2)
00201 #define xnarch_cpus_empty(mask)          cpus_empty(mask)
00202 #define xnarch_cpumask_of_cpu(cpu)       cpumask_of_cpu(cpu)
00203 #define xnarch_first_cpu(mask)           first_cpu(mask)
00204 #define XNARCH_CPU_MASK_ALL              CPU_MASK_ALL
00205 
00206 struct xnthread;
00207 struct xnheap;
00208 struct task_struct;
00209 
00210 #define xnarch_stack_size(tcb)  ((tcb)->stacksize)
00211 
00212 typedef struct xnarchtcb {      /* Per-thread arch-dependent block */
00213 
00214     /* Kernel mode side */
00215 
00216 #ifdef CONFIG_RTAI_HW_FPU
00217     /* We only care for basic FPU handling in kernel-space; Altivec
00218        and SPE are not available to kernel-based Xenomai threads. */
00219     rthal_fpenv_t fpuenv  __attribute__ ((aligned (16)));
00220     rthal_fpenv_t *fpup;        /* Pointer to the FPU backup area */
00221 #define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
00222 #else /* !CONFIG_RTAI_HW_FPU */
00223 #define xnarch_fpu_ptr(tcb)     NULL
00224 #endif /* CONFIG_RTAI_HW_FPU */
00225 
00226     unsigned stacksize;         /* Aligned size of stack (bytes) */
00227     unsigned long *stackbase;   /* Stack space */
00228     unsigned long ksp;          /* Saved KSP for kernel-based threads */
00229     unsigned long *kspp;        /* Pointer to saved KSP (&ksp or &user->thread.ksp) */
00230 
00231     /* User mode side */
00232     struct task_struct *user_task;      /* Shadowed user-space task */
00233     struct task_struct *active_task;    /* Active user-space task */
00234 
00235     /* Init block */
00236     struct xnthread *self;
00237     int imask;
00238     const char *name;
00239     void (*entry)(void *cookie);
00240     void *cookie;
00241 
00242 } xnarchtcb_t;
00243 
00244 typedef struct xnarch_fltinfo {
00245 
00246     struct pt_regs *regs;
00247 
00248 } xnarch_fltinfo_t;
00249 
00250 #define xnarch_fault_trap(fi)  ((unsigned int)(fi)->regs->trap)
00251 #define xnarch_fault_code(fi)  ((fi)->regs->dar)
00252 #define xnarch_fault_pc(fi)    ((fi)->regs->nip)
00253 
00254 typedef struct xnarch_heapcb {
00255 
00256     atomic_t numaps;    /* # of active user-space mappings. */
00257 
00258     int kmflags;        /* Kernel memory flags (0 if vmalloc()). */
00259 
00260     void *heapbase;     /* Shared heap memory base (possibly unaligned). */
00261 
00262     void *shmbase;      /* Shared memory base (page-aligned). */
00263 
00264 } xnarch_heapcb_t;
00265 
00266 static inline void xnarch_init_heapcb (xnarch_heapcb_t *hcb)
00267 
00268 {
00269     atomic_set(&hcb->numaps,0);
00270     hcb->kmflags = 0;
00271     hcb->heapbase = NULL;
00272     hcb->shmbase = NULL;
00273 }
00274 
00275 #ifdef __cplusplus
00276 extern "C" {
00277 #endif
00278 
00279 static inline unsigned long long xnarch_tsc_to_ns (unsigned long long ts) {
00280     return xnarch_llimd(ts,1000000000,RTHAL_CPU_FREQ);
00281 }
00282 
00283 static inline unsigned long long xnarch_ns_to_tsc (unsigned long long ns) {
00284     return xnarch_llimd(ns,RTHAL_CPU_FREQ,1000000000);
00285 }
00286 
00287 static inline unsigned long long xnarch_get_cpu_time (void) {
00288     return xnarch_tsc_to_ns(xnarch_get_cpu_tsc());
00289 }
00290 
00291 static inline unsigned long long xnarch_get_cpu_freq (void) {
00292     return RTHAL_CPU_FREQ;
00293 }
00294 
00295 static inline unsigned xnarch_current_cpu (void) {
00296     return adeos_processor_id();
00297 }
00298 
00299 static inline void *xnarch_sysalloc (u_long bytes)
00300 
00301 {
00302     if (bytes >= 128*1024)
00303         return vmalloc(bytes);
00304 
00305     return kmalloc(bytes,GFP_KERNEL);
00306 }
00307 
00308 static inline void xnarch_sysfree (void *chunk, u_long bytes)
00309 
00310 {
00311     if (bytes >= 128*1024)
00312         vfree(chunk);
00313     else
00314         kfree(chunk);
00315 }
00316 
00317 #define xnarch_declare_cpuid  adeos_declare_cpuid
00318 #define xnarch_get_cpu(flags) adeos_get_cpu(flags)
00319 #define xnarch_put_cpu(flags) adeos_put_cpu(flags)
00320 
00321 #define xnarch_halt(emsg) \
00322 do { \
00323     adeos_set_printk_sync(adp_current); \
00324     xnarch_logerr("fatal: %s\n",emsg); \
00325     show_stack(NULL,NULL);              \
00326     for (;;) ;                          \
00327 } while(0)
00328 
00329 #define xnarch_alloc_stack xnmalloc
00330 #define xnarch_free_stack  xnfree
00331 
00332 static inline int xnarch_setimask (int imask)
00333 
00334 {
00335     spl_t s;
00336     splhigh(s);
00337     splexit(!!imask);
00338     return !!s;
00339 }
00340 
00341 #ifdef XENO_INTR_MODULE
00342 
00343 static inline int xnarch_hook_irq (unsigned irq,
00344                                    void (*handler)(unsigned irq,
00345                                                    void *cookie),
00346                                    void *cookie)
00347 {
00348     int err = rthal_request_irq(irq,handler,cookie);
00349 
00350     if (!err)
00351         rthal_enable_irq(irq);
00352 
00353     return err;
00354 }
00355 
00356 static inline int xnarch_release_irq (unsigned irq) {
00357 
00358     return rthal_release_irq(irq);
00359 }
00360 
00361 static inline int xnarch_enable_irq (unsigned irq)
00362 
00363 {
00364     if (irq >= XNARCH_IRQ_MAX)
00365         return -EINVAL;
00366 
00367     rthal_enable_irq(irq);
00368 
00369     return 0;
00370 }
00371 
00372 static inline int xnarch_disable_irq (unsigned irq)
00373 
00374 {
00375     if (irq >= XNARCH_IRQ_MAX)
00376         return -EINVAL;
00377 
00378     rthal_disable_irq(irq);
00379 
00380     return 0;
00381 }
00382 
00383 static inline void xnarch_isr_chain_irq (unsigned irq) {
00384     rthal_pend_linux_irq(irq);
00385 }
00386 
00387 static inline void xnarch_isr_enable_irq (unsigned irq) {
00388     rthal_enable_irq(irq);
00389 }
00390 
00391 static inline void xnarch_relay_tick (void) {
00392 
00393     rthal_pend_linux_irq(ADEOS_TIMER_VIRQ);
00394 }
00395 
00396 static inline cpumask_t xnarch_set_irq_affinity (unsigned irq,
00397                                                  cpumask_t affinity) {
00398     return adeos_set_irq_affinity(irq,affinity);
00399 }
00400 
00401 #endif /* XENO_INTR_MODULE */
00402 
00403 #ifdef XENO_POD_MODULE
00404 
00405 void xnpod_welcome_thread(struct xnthread *);
00406 
00407 void xnpod_delete_thread(struct xnthread *);
00408 
00409 unsigned long xnarch_calibrate_timer (void)
00410 
00411 {
00412 #if  CONFIG_RTAI_HW_TIMER_LATENCY != 0
00413     return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY);
00414 #else /* CONFIG_RTAI_HW_TIMER_LATENCY unspecified. */
00415     /* Compute the time needed to program the decrementer in aperiodic
00416        mode. The return value is expressed in timebase ticks. */
00417     return xnarch_ns_to_tsc(rthal_calibrate_timer());
00418 #endif /* CONFIG_RTAI_HW_TIMER_LATENCY != 0 */
00419 }
00420 
00421 static inline int xnarch_start_timer (unsigned long ns,
00422                                       void (*tickhandler)(void)) {
00423     return rthal_request_timer(tickhandler,ns);
00424 }
00425 
00426 static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
00427 
00428 {
00429     adeos_declare_cpuid;
00430 
00431     adeos_load_cpuid();
00432 
00433     /* rthal_cpu_realtime is only tested for the current processor,
00434        and always inside a critical section. */
00435     __set_bit(cpuid,&rthal_cpu_realtime);
00436     /* Remember the preempted Linux task pointer. */
00437     rootcb->user_task = rootcb->active_task = rthal_get_current(cpuid);
00438     /* So that xnarch_save_fpu() will operate on the right FPU area. */
00439 #ifdef CONFIG_RTAI_HW_FPU
00440     rootcb->fpup = (rthal_fpenv_t *)&rootcb->user_task->thread.fpr[0];
00441 #endif /* CONFIG_RTAI_HW_FPU */
00442 }
00443 
00444 static inline void xnarch_enter_root (xnarchtcb_t *rootcb) {
00445     __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
00446 }
00447 
00448 static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
00449                                      xnarchtcb_t *in_tcb)
00450 {
00451     struct task_struct *outproc = out_tcb->active_task;
00452     struct task_struct *inproc = in_tcb->user_task;
00453     unsigned long flags;
00454 
00455     rthal_hw_lock(flags);
00456 
00457     in_tcb->active_task = inproc ?: outproc;
00458 
00459     if (inproc && inproc != outproc) /* User-space thread switch? */
00460         {
00461         struct mm_struct *prev = outproc->active_mm;
00462         struct mm_struct *next = inproc->active_mm;
00463 
00464         /* Switch the mm context.*/
00465 
00466         inproc->thread.pgdir = next->pgd;
00467 
00468         if (prev != next)
00469             {
00470             get_mmu_context(next);
00471             set_context(next->context, next->pgd);
00472             }
00473 
00474         if (!inproc->mm)
00475             enter_lazy_tlb(prev,inproc);
00476 
00477         /* The following has been lifted from
00478            arch/ppc/kernel/process.c. */
00479 
00480 #ifdef CONFIG_SMP
00481         if (outproc->thread.regs && (outproc->thread.regs->msr & MSR_FP))
00482             giveup_fpu(outproc);
00483 #ifdef CONFIG_ALTIVEC
00484         if ((outproc->thread.regs && (outproc->thread.regs->msr & MSR_VEC)))
00485             giveup_altivec(outproc);
00486 #endif /* CONFIG_ALTIVEC */
00487 #ifdef CONFIG_SPE
00488         if ((outproc->thread.regs && (outproc->thread.regs->msr & MSR_SPE)))
00489             giveup_spe(outproc);
00490 #endif /* CONFIG_SPE */
00491 #endif /* CONFIG_SMP */
00492 
00493 #ifdef CONFIG_ALTIVEC
00494         if (inproc->thread.regs && last_task_used_altivec == inproc)
00495             inproc->thread.regs->msr |= MSR_VEC;
00496 #endif /* CONFIG_ALTIVEC */
00497 
00498 #ifdef CONFIG_SPE
00499         if (inproc->thread.regs && last_task_used_spe == inproc)
00500             inproc->thread.regs->msr |= MSR_SPE;
00501 #endif /* CONFIG_SPE */
00502 
00503         _switch(&outproc->thread,&inproc->thread);
00504         }
00505     else
00506         /* Kernel-to-kernel context switch. */
00507         rthal_switch_context(out_tcb->kspp,in_tcb->kspp);
00508 
00509     rthal_hw_unlock(flags);
00510 }
00511 
00512 static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
00513                                                xnarchtcb_t *next_tcb) {
00514     xnarch_switch_to(dead_tcb,next_tcb);
00515 }
00516 
00517 static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb) {
00518     /* Empty */
00519 }
00520 
00521 static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
00522                                          struct xnthread *thread,
00523                                          const char *name)
00524 {
00525     tcb->user_task = current;
00526     tcb->active_task = NULL;
00527     tcb->ksp = 0;
00528     tcb->kspp = &tcb->ksp;
00529 #ifdef CONFIG_RTAI_HW_FPU
00530     tcb->fpup = NULL;
00531 #endif /* CONFIG_RTAI_HW_FPU */
00532     tcb->entry = NULL;
00533     tcb->cookie = NULL;
00534     tcb->self = thread;
00535     tcb->imask = 0;
00536     tcb->name = name;
00537 }
00538 
00539 static inline void xnarch_init_tcb (xnarchtcb_t *tcb) {
00540 
00541     tcb->user_task = NULL;
00542     tcb->active_task = NULL;
00543     tcb->kspp = &tcb->ksp;
00544 #ifdef CONFIG_RTAI_HW_FPU
00545     tcb->fpup = &tcb->fpuenv;
00546 #endif /* CONFIG_RTAI_HW_FPU */
00547     /* Must be followed by xnarch_init_thread(). */
00548 }
00549 
00550 asmlinkage static void xnarch_thread_trampoline (xnarchtcb_t *tcb)
00551 
00552 {
00553     rthal_local_irq_restore(!!tcb->imask);
00554     xnpod_welcome_thread(tcb->self);
00555     tcb->entry(tcb->cookie);
00556     xnpod_delete_thread(tcb->self);
00557 }
00558 
00559 static inline void xnarch_init_thread (xnarchtcb_t *tcb,
00560                                        void (*entry)(void *),
00561                                        void *cookie,
00562                                        int imask,
00563                                        struct xnthread *thread,
00564                                        char *name)
00565 {
00566     unsigned long *ksp, flags;
00567 
00568     adeos_hw_local_irq_flags(flags);
00569 
00570     *tcb->stackbase = 0;
00571     ksp = (unsigned long *)((((unsigned long)tcb->stackbase + tcb->stacksize - 0x10) & ~0xf) - RTHAL_SWITCH_FRAME_SIZE);
00572     tcb->ksp = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
00573     ksp[19] = (unsigned long)tcb; /* r3 */
00574     ksp[25] = (unsigned long)&xnarch_thread_trampoline; /* lr */
00575     ksp[26] = flags & ~MSR_EE; /* msr */
00576 
00577     tcb->entry = entry;
00578     tcb->cookie = cookie;
00579     tcb->self = thread;
00580     tcb->imask = imask;
00581     tcb->name = name;
00582 }
00583 
00584 static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
00585 
00586 {
00587 #ifdef CONFIG_RTAI_HW_FPU
00588     /* Initialize the FPU for an emerging kernel-based RT thread. This
00589        must be run on behalf of the emerging thread. */
00590     memset(&tcb->fpuenv,0,sizeof(tcb->fpuenv));
00591     rthal_init_fpu(&tcb->fpuenv);
00592 #endif /* CONFIG_RTAI_HW_FPU */
00593 }
00594 
00595 static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
00596 
00597 {
00598 #ifdef CONFIG_RTAI_HW_FPU
00599 
00600     struct task_struct *task = tcb->user_task;
00601 
00602     if (task)
00603         {
00604         if (task->thread.regs && (task->thread.regs->msr & MSR_FP))
00605             giveup_fpu(task);
00606 
00607         return;
00608         }
00609 
00610     /* Save the fp regs of the kernel thread owning the FPU. */
00611 
00612     rthal_save_fpu(tcb->fpup);
00613 
00614 #endif /* CONFIG_RTAI_HW_FPU */
00615 }
00616 
00617 static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
00618 
00619 {
00620 #ifdef CONFIG_RTAI_HW_FPU
00621 
00622     if (tcb->user_task)
00623         /* On PowerpPC, we let the unavailability exception happen for
00624            the incoming user-space task if it happens to use the FPU,
00625            instead of eagerly reloading the fp regs upon switch. */
00626         return;
00627 
00628     /* Restore the fp regs of the incoming kernel thread. */
00629 
00630     rthal_restore_fpu(tcb->fpup);
00631 
00632 #endif /* CONFIG_RTAI_HW_FPU */
00633 }
00634 
00635 int xnarch_sleep_on (int *flagp) {
00636 
00637     while (!*flagp)
00638         {
00639 #if !CONFIG_RTAI_OPT_DEBUG
00640         set_current_state(TASK_UNINTERRUPTIBLE);
00641         schedule_timeout(1);
00642 #else /* CONFIG_RTAI_OPT_DEBUG. */
00643         set_current_state(TASK_INTERRUPTIBLE);
00644         schedule_timeout(1);
00645         if(signal_pending(current))
00646             return -ERESTARTSYS;
00647 #endif /* !CONFIG_RTAI_OPT_DEBUG. */
00648         }
00649     return 0;
00650 }
00651 
00652 #ifdef CONFIG_SMP
00653 
00654 static inline int xnarch_send_ipi (cpumask_t cpumask) {
00655 
00656     return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
00657 }
00658 
00659 static inline int xnarch_hook_ipi (void (*handler)(void))
00660 
00661 {
00662     return adeos_virtualize_irq_from(&rthal_domain,
00663                                      ADEOS_SERVICE_IPI0,
00664                                      (void (*)(unsigned)) handler,
00665                                      NULL,
00666                                      IPIPE_HANDLE_MASK);
00667 }
00668 
00669 static inline int xnarch_release_ipi (void)
00670 
00671 {
00672     return adeos_virtualize_irq_from(&rthal_domain,
00673                                      ADEOS_SERVICE_IPI0,
00674                                      NULL,
00675                                      NULL,
00676                                      IPIPE_PASS_MASK);
00677 }
00678 
00679 static inline void xnarch_notify_halt(void)
00680 
00681 {
00682     unsigned long flags = adeos_critical_enter(NULL);
00683     adeos_critical_exit(flags);
00684 }
00685 
00686 #else /* !CONFIG_SMP */
00687 
00688 static inline int xnarch_send_ipi (cpumask_t cpumask) {
00689 
00690     return 0;
00691 }
00692 
00693 static inline int xnarch_hook_ipi (void (*handler)(void)) {
00694 
00695     return 0;
00696 }
00697 
00698 static inline int xnarch_release_ipi (void) {
00699 
00700     return 0;
00701 }
00702 
00703 #define xnarch_notify_halt() /* Nullified */
00704 
00705 #endif /* CONFIG_SMP */
00706 
00707 static inline void xnarch_notify_shutdown(void)
00708 
00709 {
00710 #ifdef CONFIG_SMP
00711     /* The HAL layer also sets the same CPU affinity so that both
00712        modules keep their execution sequence on SMP boxen. */
00713     set_cpus_allowed(current,cpumask_of_cpu(0));
00714 #endif /* CONFIG_SMP */
00715 #ifdef CONFIG_RTAI_OPT_FUSION
00716     xnshadow_release_events();
00717 #endif /* CONFIG_RTAI_OPT_FUSION */
00718     /* Wait for the currently processed events to drain. */
00719     set_current_state(TASK_UNINTERRUPTIBLE);
00720     schedule_timeout(50);
00721     xnarch_release_ipi();
00722 }
00723 
00724 static inline int xnarch_escalate (void)
00725 
00726 {
00727     extern int xnarch_escalation_virq;
00728 
00729     if (adp_current == adp_root)
00730         {
00731         spl_t s;
00732         splsync(s);
00733         adeos_trigger_irq(xnarch_escalation_virq);
00734         splexit(s);
00735         return 1;
00736         }
00737 
00738     return 0;
00739 }
00740 
00741 static void xnarch_notify_ready (void)
00742 
00743 {
00744 #ifdef CONFIG_RTAI_OPT_FUSION
00745     xnshadow_grab_events();
00746 #endif /* CONFIG_RTAI_OPT_FUSION */
00747 }
00748 
00749 #endif /* XENO_POD_MODULE */
00750 
00751 #ifdef XENO_SHADOW_MODULE
00752 
00753 static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
00754                                            struct xnthread *thread,
00755                                            const char *name)
00756 {
00757     struct task_struct *task = current;
00758 
00759     tcb->user_task = task;
00760     tcb->active_task = NULL;
00761     tcb->ksp = 0;
00762     tcb->kspp = &task->thread.ksp;
00763 #ifdef CONFIG_RTAI_HW_FPU
00764     tcb->fpup = (rthal_fpenv_t *)&task->thread.fpr[0];
00765 #endif /* CONFIG_RTAI_HW_FPU */
00766     tcb->entry = NULL;
00767     tcb->cookie = NULL;
00768     tcb->self = thread;
00769     tcb->imask = 0;
00770     tcb->name = name;
00771 }
00772 
00773 static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
00774 
00775 {
00776     unsigned irq;
00777 
00778     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00779         adeos_virtualize_irq(irq,
00780                              handler,
00781                              NULL,
00782                              IPIPE_DYNAMIC_MASK);
00783 
00784     /* On this arch, the decrementer trap is not an external IRQ but
00785        it is instead mapped to a virtual IRQ, so we must grab it
00786        individually. */
00787 
00788     adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
00789                          handler,
00790                          NULL,
00791                          IPIPE_DYNAMIC_MASK);
00792 }
00793 
00794 static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
00795 
00796 {
00797     unsigned irq;
00798 
00799     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00800         {
00801         switch (irq)
00802             {
00803 #ifdef CONFIG_SMP
00804             case ADEOS_CRITICAL_IPI:
00805 
00806                 /* Never lock out this one. */
00807                 continue;
00808 #endif /* CONFIG_SMP */
00809 
00810             default:
00811 
00812                 __adeos_lock_irq(adp,cpuid,irq);
00813             }
00814         }
00815 
00816     __adeos_lock_irq(adp,cpuid,ADEOS_TIMER_VIRQ);
00817 }
00818 
00819 static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
00820 
00821 {
00822     unsigned irq;
00823 
00824     for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
00825         {
00826         switch (irq)
00827             {
00828 #ifdef CONFIG_SMP
00829             case ADEOS_CRITICAL_IPI:
00830 
00831                 continue;
00832 #endif /* CONFIG_SMP */
00833 
00834             default:
00835 
00836                 __adeos_unlock_irq(adp,irq);
00837             }
00838         }
00839 
00840     __adeos_unlock_irq(adp,ADEOS_TIMER_VIRQ);
00841 }
00842 
00843 #endif /* XENO_SHADOW_MODULE */
00844 
00845 #ifdef XENO_TIMER_MODULE
00846 
00847 static inline void xnarch_program_timer_shot (unsigned long long delay) {
00848     /* Delays are expressed in CPU ticks, so we need to keep a 64bit
00849        value here, especially for 64bit arch ports using an interval
00850        timer based on the internal cycle counter of the CPU. Since the
00851        timebase value is used to express CPU ticks on the PowerPC
00852        port, there is no need to rescale the delay value. */ 
00853     rthal_set_timer_shot(delay);
00854 }
00855 
00856 static inline void xnarch_stop_timer (void) {
00857     rthal_release_timer();
00858 }
00859 
00860 static inline void xnarch_read_timings (unsigned long long *shot,
00861                                         unsigned long long *delivery,
00862                                         unsigned long long defval)
00863 {
00864 #ifdef CONFIG_ADEOS_PROFILING
00865     int cpuid = adeos_processor_id();
00866     *shot = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_handled;
00867     *delivery = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_synced;
00868 #else /* !CONFIG_ADEOS_PROFILING */
00869     *shot = defval;
00870     *delivery = defval;
00871 #endif /* CONFIG_ADEOS_PROFILING */
00872 }
00873 
00874 #endif /* XENO_TIMER_MODULE */
00875 
00876 #ifdef XENO_MAIN_MODULE
00877 
00878 int xnarch_escalation_virq;
00879 
00880 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
00881 
00882 void xnpod_schedule_handler(void);
00883 
00884 static rthal_trap_handler_t xnarch_old_trap_handler;
00885 
00886 static int xnarch_trap_fault (adevinfo_t *evinfo)
00887 
00888 {
00889     xnarch_fltinfo_t fltinfo;
00890     fltinfo.regs = (struct pt_regs *)evinfo->evdata;
00891     return xnpod_trap_fault(&fltinfo);
00892 }
00893 
00894 static inline int xnarch_init (void)
00895 
00896 {
00897     int err = 0;
00898 
00899 #ifdef CONFIG_SMP
00900     /* The HAL layer also sets the same CPU affinity so that both
00901        modules keep their execution sequence on SMP boxen. */
00902     set_cpus_allowed(current,cpumask_of_cpu(0));
00903 #endif /* CONFIG_SMP */
00904 
00905     xnarch_escalation_virq = adeos_alloc_irq();
00906 
00907     if (xnarch_escalation_virq == 0)
00908         return -ENOSYS;
00909 
00910     adeos_virtualize_irq_from(&rthal_domain,
00911                               xnarch_escalation_virq,
00912                               (void (*)(unsigned))&xnpod_schedule_handler,
00913                               NULL,
00914                               IPIPE_HANDLE_MASK);
00915 
00916     xnarch_old_trap_handler = rthal_set_trap_handler(&xnarch_trap_fault);
00917 
00918 #ifdef CONFIG_RTAI_OPT_FUSION
00919     err = xnshadow_mount();
00920 #endif /* CONFIG_RTAI_OPT_FUSION */
00921 
00922     if (err)
00923         adeos_free_irq(xnarch_escalation_virq);
00924 
00925     return err;
00926 }
00927 
00928 static inline void xnarch_exit (void)
00929 
00930 {
00931 #ifdef CONFIG_RTAI_OPT_FUSION
00932     xnshadow_cleanup();
00933 #endif /* CONFIG_RTAI_OPT_FUSION */
00934     rthal_set_trap_handler(xnarch_old_trap_handler);
00935     adeos_free_irq(xnarch_escalation_virq);
00936 }
00937 
00938 #endif /* XENO_MAIN_MODULE */
00939 
00940 #ifdef __cplusplus
00941 }
00942 #endif
00943 
00944 /* Dashboard and graph control. */
00945 #define XNARCH_DECL_DISPLAY_CONTEXT();
00946 #define xnarch_init_display_context(obj)
00947 #define xnarch_create_display(obj,name,tag)
00948 #define xnarch_delete_display(obj)
00949 #define xnarch_post_graph(obj,state)
00950 #define xnarch_post_graph_if(obj,state,cond)
00951 
00952 #else /* !__KERNEL__ */
00953 
00954 #include <nucleus/system.h>
00955 
00956 #endif /* __KERNEL__ */
00957 
00958 #define XNARCH_CALIBRATION_PERIOD    1000000 /* ns */
00959 
00960 #endif /* !_RTAI_ASM_PPC_SYSTEM_H */

Generated on Mon Dec 13 09:49:49 2004 for RTAI API by  doxygen 1.3.9.1