20 #ifndef _COBALT_KERNEL_LOCK_H
21 #define _COBALT_KERNEL_LOCK_H
23 #include <linux/ipipe.h>
24 #include <linux/percpu.h>
25 #include <cobalt/kernel/assert.h>
32 typedef unsigned long spl_t;
39 #define splhigh(x) ((x) = ipipe_test_and_stall_head() & 1)
46 #define splexit(x) ipipe_restore_head(x & 1)
48 #define splexit(x) ipipe_restore_head(x)
53 #define splmax() ipipe_stall_head()
57 #define splnone() ipipe_unstall_head()
64 #define spltest() ipipe_test_head()
66 #if XENO_DEBUG(LOCKING)
70 arch_spinlock_t alock;
75 unsigned long long spin_time;
76 unsigned long long lock_date;
80 unsigned long long spin_time;
81 unsigned long long lock_time;
87 #define XNARCH_LOCK_UNLOCKED (struct xnlock) { \
89 __ARCH_SPIN_LOCK_UNLOCKED, \
98 #define XNLOCK_DBG_CONTEXT , __FILE__, __LINE__, __FUNCTION__
99 #define XNLOCK_DBG_CONTEXT_ARGS \
100 , const char *file, int line, const char *function
101 #define XNLOCK_DBG_PASS_CONTEXT , file, line, function
103 void xnlock_dbg_prepare_acquire(
unsigned long long *start);
104 void xnlock_dbg_prepare_spin(
unsigned int *spin_limit);
105 void xnlock_dbg_acquired(
struct xnlock *lock,
int cpu,
106 unsigned long long *start,
107 const char *file,
int line,
108 const char *
function);
109 int xnlock_dbg_release(
struct xnlock *lock,
110 const char *file,
int line,
111 const char *
function);
113 DECLARE_PER_CPU(
struct xnlockinfo, xnlock_stats);
119 arch_spinlock_t alock;
122 #define XNARCH_LOCK_UNLOCKED \
125 __ARCH_SPIN_LOCK_UNLOCKED, \
128 #define XNLOCK_DBG_CONTEXT
129 #define XNLOCK_DBG_CONTEXT_ARGS
130 #define XNLOCK_DBG_PASS_CONTEXT
133 void xnlock_dbg_prepare_acquire(
unsigned long long *start)
138 void xnlock_dbg_prepare_spin(
unsigned int *spin_limit)
143 xnlock_dbg_acquired(
struct xnlock *lock,
int cpu,
144 unsigned long long *start)
148 static inline int xnlock_dbg_release(
struct xnlock *lock)
155 #if defined(CONFIG_SMP) || XENO_DEBUG(LOCKING)
157 #define xnlock_get(lock) __xnlock_get(lock XNLOCK_DBG_CONTEXT)
158 #define xnlock_put(lock) __xnlock_put(lock XNLOCK_DBG_CONTEXT)
159 #define xnlock_get_irqsave(lock,x) \
160 ((x) = __xnlock_get_irqsave(lock XNLOCK_DBG_CONTEXT))
161 #define xnlock_put_irqrestore(lock,x) \
162 __xnlock_put_irqrestore(lock,x XNLOCK_DBG_CONTEXT)
163 #define xnlock_clear_irqoff(lock) xnlock_put_irqrestore(lock, 1)
164 #define xnlock_clear_irqon(lock) xnlock_put_irqrestore(lock, 0)
166 static inline void xnlock_init (
struct xnlock *lock)
168 *lock = XNARCH_LOCK_UNLOCKED;
171 #define DECLARE_XNLOCK(lock) struct xnlock lock
172 #define DECLARE_EXTERN_XNLOCK(lock) extern struct xnlock lock
173 #define DEFINE_XNLOCK(lock) struct xnlock lock = XNARCH_LOCK_UNLOCKED
174 #define DEFINE_PRIVATE_XNLOCK(lock) static DEFINE_XNLOCK(lock)
176 static inline int ____xnlock_get(
struct xnlock *lock XNLOCK_DBG_CONTEXT_ARGS)
178 int cpu = ipipe_processor_id();
179 unsigned long long start;
181 if (lock->owner == cpu)
184 xnlock_dbg_prepare_acquire(&start);
186 arch_spin_lock(&lock->alock);
189 xnlock_dbg_acquired(lock, cpu, &start XNLOCK_DBG_PASS_CONTEXT);
194 static inline void ____xnlock_put(
struct xnlock *lock XNLOCK_DBG_CONTEXT_ARGS)
196 if (xnlock_dbg_release(lock XNLOCK_DBG_PASS_CONTEXT))
200 arch_spin_unlock(&lock->alock);
203 #ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
204 #define ___xnlock_get ____xnlock_get
205 #define ___xnlock_put ____xnlock_put
207 int ___xnlock_get(
struct xnlock *lock XNLOCK_DBG_CONTEXT_ARGS);
209 void ___xnlock_put(
struct xnlock *lock XNLOCK_DBG_CONTEXT_ARGS);
212 #if XENO_DEBUG(LOCKING)
214 #define __locking_active__ 1
216 #define __locking_active__ ipipe_smp_p
220 __xnlock_get_irqsave(
struct xnlock *lock XNLOCK_DBG_CONTEXT_ARGS)
226 if (__locking_active__)
227 flags |= ___xnlock_get(lock XNLOCK_DBG_PASS_CONTEXT);
232 static inline void __xnlock_put_irqrestore(
struct xnlock *lock, spl_t flags
233 XNLOCK_DBG_CONTEXT_ARGS)
236 if (__locking_active__ && !(flags & 2))
237 ___xnlock_put(lock XNLOCK_DBG_PASS_CONTEXT);
242 static inline int xnlock_is_owner(
struct xnlock *lock)
244 if (__locking_active__)
245 return lock->owner == ipipe_processor_id();
250 static inline int __xnlock_get(
struct xnlock *lock XNLOCK_DBG_CONTEXT_ARGS)
252 if (__locking_active__)
253 return ___xnlock_get(lock XNLOCK_DBG_PASS_CONTEXT);
258 static inline void __xnlock_put(
struct xnlock *lock XNLOCK_DBG_CONTEXT_ARGS)
260 if (__locking_active__)
261 ___xnlock_put(lock XNLOCK_DBG_PASS_CONTEXT);
264 #undef __locking_active__
268 #define xnlock_init(lock) do { } while(0)
269 #define xnlock_get(lock) do { } while(0)
270 #define xnlock_put(lock) do { } while(0)
271 #define xnlock_get_irqsave(lock,x) splhigh(x)
272 #define xnlock_put_irqrestore(lock,x) splexit(x)
273 #define xnlock_clear_irqoff(lock) splmax()
274 #define xnlock_clear_irqon(lock) splnone()
275 #define xnlock_is_owner(lock) 1
277 #define DECLARE_XNLOCK(lock)
278 #define DECLARE_EXTERN_XNLOCK(lock)
279 #define DEFINE_XNLOCK(lock)
280 #define DEFINE_PRIVATE_XNLOCK(lock)
284 DECLARE_EXTERN_XNLOCK(nklock);
#define splexit(x)
Restore the saved hard interrupt state on the local processor.
Definition: lock.h:46
#define splhigh(x)
Hard disable interrupts on the local processor, saving previous state.
Definition: lock.h:39