Xenomai  3.0-rc7
lock.h
1 /*
2  * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13 
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 
19 #ifndef _BOILERPLATE_LOCK_H
20 #define _BOILERPLATE_LOCK_H
21 
22 #include <pthread.h>
23 #include <boilerplate/atomic.h>
24 #include <boilerplate/wrappers.h>
25 #include <boilerplate/ancillaries.h>
26 #include <boilerplate/debug.h>
27 
28 /*
29  * CANCEL_DEFER/RESTORE() should enclose any emulator code prior to
30  * holding a lock, or invoking inner boilerplate/copperplate services
31  * (which usually do so), to change the system state. A proper cleanup
32  * handler should be pushed prior to acquire such lock.
33  *
34  * Those macros ensure that cancellation type is switched to deferred
35  * mode while the section is traversed, then restored to its original
36  * value upon exit.
37  *
38  * WARNING: inner services MAY ASSUME that cancellability is deferred
39  * for the caller, so you really want to define protected sections as
40  * required in the higher interface layers.
41  */
42 struct service {
43  int cancel_type;
44 };
45 
46 #ifdef CONFIG_XENO_ASYNC_CANCEL
47 
48 #define CANCEL_DEFER(__s) \
49  do { \
50  pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, \
51  &(__s).cancel_type); \
52  } while (0)
53 
54 #define CANCEL_RESTORE(__s) \
55  do { \
56  pthread_setcanceltype((__s).cancel_type, NULL); \
57  backtrace_check(); \
58  } while (0)
59 
60 #else /* !CONFIG_XENO_ASYNC_CANCEL */
61 
62 #define CANCEL_DEFER(__s) do { (void)(__s); } while (0)
63 
64 #define CANCEL_RESTORE(__s) do { } while (0)
65 
66 #endif /* !CONFIG_XENO_ASYNC_CANCEL */
67 
68 struct cleanup_block {
69  pthread_mutex_t *lock;
70  void (*handler)(void *arg);
71  void *arg;
72 };
73 
74 #define __push_cleanup_args(__cb, __lock, __fn, __arg) \
75  ((__cb)->lock = (__lock)), \
76  ((__cb)->handler = (void (*)(void *))(__fn)), \
77  ((__cb)->arg = (__arg))
78 
79 #define push_cleanup_handler(__cb, __lock, __fn, __arg) \
80  pthread_cleanup_push((void (*)(void *))__run_cleanup_block, \
81  (__push_cleanup_args(__cb, __lock, __fn, __arg), (__cb)))
82 
83 #define pop_cleanup_handler(__cb) \
84  pthread_cleanup_pop(0)
85 
86 #define push_cleanup_lock(__lock) \
87  pthread_cleanup_push((void (*)(void *))__RT(pthread_mutex_unlock), (__lock))
88 
89 #define pop_cleanup_lock(__lock) \
90  pthread_cleanup_pop(0)
91 
92 #ifdef CONFIG_XENO_DEBUG
93 int __check_cancel_type(const char *locktype);
94 #else
95 #define __check_cancel_type(__locktype) \
96  ({ (void)__locktype; 0; })
97 #endif
98 
99 #define __do_lock(__lock, __op) \
100  ({ \
101  int __ret; \
102  __ret = -__RT(pthread_mutex_##__op(__lock)); \
103  __ret; \
104  })
105 
106 #define __do_lock_nocancel(__lock, __type, __op) \
107  ({ \
108  __bt(__check_cancel_type(#__op "_nocancel")); \
109  __do_lock(__lock, __op); \
110  })
111 
112 #define __do_unlock(__lock) \
113  ({ \
114  int __ret; \
115  __ret = -__RT(pthread_mutex_unlock(__lock)); \
116  __ret; \
117  })
118 /*
119  * Macros to enter/leave critical sections within inner
120  * routines. Actually, they are mainly aimed at self-documenting the
121  * code, by specifying basic assumption(s) about the code being
122  * traversed. In effect, they are currently aliases to the standard
123  * pthread_mutex_* API, except for the _safe form.
124  *
125  * The _nocancel suffix indicates that no cancellation point is
126  * traversed by the protected code, therefore we don't need any
127  * cleanup handler since we are guaranteed to run in deferred cancel
128  * mode after CANCEL_DEFER(). A runtime check is inserted in
129  * debug mode, which triggers when cancellability is not in deferred
130  * mode while an attempt is made to acquire a _nocancel lock.
131  *
132  * read/write_lock() forms must be enclosed within the scope of a
133  * cleanup handler since the protected code may reach cancellation
134  * points. push_cleanup_lock() is a simple shorthand to push
135  * pthread_mutex_unlock as the cleanup handler.
136  */
137 #define read_lock(__lock) \
138  __do_lock(__lock, lock)
139 
140 #define read_trylock(__lock) \
141  __do_lock(__lock, trylock)
142 
143 #define read_lock_nocancel(__lock) \
144  __do_lock_nocancel(__lock, read_lock, lock)
145 
146 #define read_trylock_nocancel(__lock) \
147  __do_lock_nocancel(__lock, read_trylock, trylock)
148 
149 #define read_unlock(__lock) \
150  __do_unlock(__lock)
151 
152 #define write_lock(__lock) \
153  __do_lock(__lock, lock)
154 
155 #define write_trylock(__lock) \
156  __do_lock(__lock, trylock)
157 
158 #define write_lock_nocancel(__lock) \
159  __do_lock_nocancel(__lock, write_lock, lock)
160 
161 #define write_trylock_nocancel(__lock) \
162  __do_lock_nocancel(__lock, write_trylock, trylock)
163 
164 #define write_unlock(__lock) \
165  __do_unlock(__lock)
166 
167 #define __do_lock_safe(__lock, __state, __op) \
168  ({ \
169  int __ret, __oldstate; \
170  __bt(__check_cancel_type(#__op "_safe")); \
171  pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__oldstate); \
172  __ret = -__RT(pthread_mutex_##__op(__lock)); \
173  if (__ret) \
174  pthread_setcancelstate(__oldstate, NULL); \
175  __state = __oldstate; \
176  __ret; \
177  })
178 
179 #define __do_unlock_safe(__lock, __state) \
180  ({ \
181  int __ret, __restored_state = __state; \
182  __ret = -__RT(pthread_mutex_unlock(__lock)); \
183  pthread_setcancelstate(__restored_state, NULL); \
184  __ret; \
185  })
186 
187 /*
188  * The _safe call form is available when undoing the changes from an
189  * update section upon cancellation using a cleanup handler is not an
190  * option (e.g. too complex), or in situations where the protected
191  * code shall fully run; in such cases, cancellation is disabled
192  * throughout the section.
193  */
194 
195 #define write_lock_safe(__lock, __state) \
196  __do_lock_safe(__lock, __state, lock)
197 
198 #define write_trylock_safe(__lock, __state) \
199  __do_lock_safe(__lock, __state, trylock)
200 
201 #define write_unlock_safe(__lock, __state) \
202  __do_unlock_safe(__lock, __state)
203 
204 #define read_lock_safe(__lock, __state) \
205  __do_lock_safe(__lock, __state, lock)
206 
207 #define read_unlock_safe(__lock, __state) \
208  __do_unlock_safe(__lock, __state)
209 
210 #ifdef CONFIG_XENO_DEBUG
211 #define mutex_type_attribute PTHREAD_MUTEX_ERRORCHECK
212 #else
213 #define mutex_type_attribute PTHREAD_MUTEX_NORMAL
214 #endif
215 
216 #endif /* _BOILERPLATE_LOCK_H */