00001
00038 #ifndef _RTAI_ASM_PPC_HAL_H
00039 #define _RTAI_ASM_PPC_HAL_H
00040
00041 #include <nucleus/asm-generic/hal.h>
00042 #include <asm/div64.h>
00043
00044 typedef unsigned long long rthal_time_t;
00045
00046 #define __rthal_u64tou32(ull, h, l) ({ \
00047 union { unsigned long long _ull; \
00048 struct { u_long _h; u_long _l; } _s; } _u; \
00049 _u._ull = (ull); \
00050 (h) = _u._s._h; \
00051 (l) = _u._s._l; \
00052 })
00053
00054 #define __rthal_u64fromu32(h, l) ({ \
00055 union { unsigned long long _ull; \
00056 struct { u_long _h; u_long _l; } _s; } _u; \
00057 _u._s._h = (h); \
00058 _u._s._l = (l); \
00059 _u._ull; \
00060 })
00061
00062 static inline unsigned long long rthal_ullmul(const unsigned long m0,
00063 const unsigned long m1)
00064 {
00065 return (unsigned long long) m0 * m1;
00066 }
00067
00068 static inline unsigned long long rthal_ulldiv (unsigned long long ull,
00069 const unsigned long uld,
00070 unsigned long *const rp)
00071 {
00072 #if defined(__KERNEL__) && BITS_PER_LONG == 32
00073 const unsigned long r = __div64_32(&ull, uld);
00074 #else
00075 const unsigned long r = ull % uld;
00076 ull /= uld;
00077 #endif
00078
00079 if (rp)
00080 *rp = r;
00081
00082 return ull;
00083 }
00084
00085 #define rthal_uldivrem(ull,ul,rp) ((u_long) rthal_ulldiv((ull),(ul),(rp)))
00086
00087 static inline int rthal_imuldiv (int i, int mult, int div) {
00088
00089
00090 const unsigned long long ull = rthal_ullmul(i, mult);
00091 return rthal_uldivrem(ull, div, NULL);
00092 }
00093
00094 static inline __attribute_const__
00095 unsigned long long __rthal_ullimd (const unsigned long long op,
00096 const unsigned long m,
00097 const unsigned long d)
00098 {
00099 u_long oph, opl, tlh, tll, qh, rh, ql;
00100 unsigned long long th, tl;
00101
00102 __rthal_u64tou32(op, oph, opl);
00103 tl = rthal_ullmul(opl, m);
00104 __rthal_u64tou32(tl, tlh, tll);
00105 th = rthal_ullmul(oph, m);
00106 th += tlh;
00107
00108 qh = rthal_uldivrem(th, d, &rh);
00109 th = __rthal_u64fromu32(rh, tll);
00110 ql = rthal_uldivrem(th, d, NULL);
00111 return __rthal_u64fromu32(qh, ql);
00112 }
00113
00114 static inline long long rthal_llimd (long long op,
00115 unsigned long m,
00116 unsigned long d)
00117 {
00118
00119 if(op < 0LL)
00120 return -__rthal_ullimd(-op, m, d);
00121 return __rthal_ullimd(op, m, d);
00122 }
00123
00124 static inline __attribute_const__ unsigned long ffnz (unsigned long ul) {
00125
00126 __asm__ ("cntlzw %0, %1" : "=r" (ul) : "r" (ul & (-ul)));
00127 return 31 - ul;
00128 }
00129
00130 #if defined(__KERNEL__) && !defined(__cplusplus)
00131 #include <asm/system.h>
00132 #include <asm/time.h>
00133 #include <asm/timex.h>
00134 #include <nucleus/asm/atomic.h>
00135 #include <asm/processor.h>
00136
00137 #define RTHAL_TIMER_IRQ ADEOS_TIMER_VIRQ
00138
00139 #define rthal_irq_descp(irq) (irq_desc + irq)
00140
00141 static inline unsigned long long rthal_rdtsc (void) {
00142 unsigned long long t;
00143 adeos_hw_tsc(t);
00144 return t;
00145 }
00146
00147 #if !defined(CONFIG_ADEOS_NOTHREADS)
00148
00149
00150
00151
00152
00153
00154 static inline struct task_struct *rthal_root_host_task (int cpuid) {
00155 return ((struct thread_info *)(adp_root->esp[cpuid] & (~8191UL)))->task;
00156 }
00157
00158 static inline struct task_struct *rthal_current_host_task (int cpuid)
00159
00160 {
00161 register unsigned long esp asm ("r1");
00162
00163
00164 if (esp >= rthal_domain.estackbase[cpuid] && esp < rthal_domain.estackbase[cpuid] + 8192)
00165 return rthal_root_host_task(cpuid);
00166
00167 return current;
00168 }
00169
00170 #else
00171
00172 static inline struct task_struct *rthal_root_host_task (int cpuid) {
00173 return current;
00174 }
00175
00176 static inline struct task_struct *rthal_current_host_task (int cpuid) {
00177 return current;
00178 }
00179
00180 #endif
00181
00182 static inline void rthal_timer_program_shot (unsigned long delay)
00183 {
00184 if(!delay) delay = 1;
00185 #ifdef CONFIG_40x
00186 mtspr(SPRN_PIT,delay);
00187 #else
00188 set_dec(delay);
00189 #endif
00190 }
00191
00192
00193
00194
00195
00196 #define RTHAL_SWITCH_FRAME_SIZE 108
00197
00198 void rthal_switch_context(unsigned long *out_kspp,
00199 unsigned long *in_kspp);
00200
00201 #ifdef CONFIG_RTAI_HW_FPU
00202
00203 typedef struct rthal_fpenv {
00204
00205
00206
00207
00208
00209
00210 double fpr[32];
00211 unsigned long fpscr_pad;
00212 unsigned long fpscr;
00213
00214 } rthal_fpenv_t;
00215
00216 void rthal_init_fpu(rthal_fpenv_t *fpuenv);
00217
00218 void rthal_save_fpu(rthal_fpenv_t *fpuenv);
00219
00220 void rthal_restore_fpu(rthal_fpenv_t *fpuenv);
00221
00222 #ifndef CONFIG_SMP
00223 #define rthal_get_fpu_owner(cur) last_task_used_math
00224 #else
00225 #define rthal_get_fpu_owner(cur) ({ \
00226 struct task_struct * _cur = (cur); \
00227 ((_cur->thread.regs && (_cur->thread.regs->msr & MSR_FP)) \
00228 ? _cur : NULL); \
00229 })
00230 #endif
00231
00232 #define rthal_disable_fpu() ({ \
00233 register long _msr; \
00234 __asm__ __volatile__ ( "mfmsr %0" : "=r"(_msr) ); \
00235 __asm__ __volatile__ ( "mtmsr %0" \
00236 : \
00237 : "r"(_msr & ~(MSR_FP)) \
00238 : "memory" ); \
00239 })
00240
00241 #define rthal_enable_fpu() ({ \
00242 register long _msr; \
00243 __asm__ __volatile__ ( "mfmsr %0" : "=r"(_msr) ); \
00244 __asm__ __volatile__ ( "mtmsr %0" \
00245 : \
00246 : "r"(_msr | MSR_FP) \
00247 : "memory" ); \
00248 })
00249
00250 #endif
00251
00252 static const char *const rthal_fault_labels[] = {
00253 [0] = "Data or instruction access",
00254 [1] = "Alignment",
00255 [2] = "Altivec unavailable",
00256 [3] = "Program check exception",
00257 [4] = "Machine check exception",
00258 [5] = "Unknown",
00259 [6] = "Instruction breakpoint",
00260 [7] = "Run mode exception",
00261 [8] = "Single-step exception",
00262 [9] = "Non-recoverable exception",
00263 [10] = "Software emulation",
00264 [11] = "Debug",
00265 [12] = "SPE",
00266 [13] = "Altivec assist",
00267 [14] = NULL
00268 };
00269
00270 #endif
00271
00274 #endif