Xenomai  3.0-rc7
thread.h
1 /*
2  * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_THREAD_H
20 #define _COBALT_KERNEL_THREAD_H
21 
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <cobalt/kernel/list.h>
25 #include <cobalt/kernel/stat.h>
26 #include <cobalt/kernel/timer.h>
27 #include <cobalt/kernel/registry.h>
28 #include <cobalt/kernel/schedparam.h>
29 #include <cobalt/kernel/trace.h>
30 #include <cobalt/kernel/synch.h>
31 #include <cobalt/uapi/kernel/thread.h>
32 #include <cobalt/uapi/signal.h>
33 #include <asm/xenomai/machine.h>
34 #include <asm/xenomai/thread.h>
35 
40 #define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNMIGRATE|XNHELD)
41 #define XNTHREAD_MODE_BITS (XNRRB|XNWARN|XNTRAPLB)
42 
43 struct xnthread;
44 struct xnsched;
45 struct xnselector;
46 struct xnsched_class;
47 struct xnsched_tpslot;
48 struct xnthread_personality;
49 struct completion;
50 
51 struct xnthread_init_attr {
52  struct xnthread_personality *personality;
53  cpumask_t affinity;
54  int flags;
55  const char *name;
56 };
57 
58 struct xnthread_start_attr {
59  int mode;
60  void (*entry)(void *cookie);
61  void *cookie;
62 };
63 
64 struct xnthread_wait_context {
65  int posted;
66 };
67 
68 struct xnthread_personality {
69  const char *name;
70  unsigned int magic;
71  int xid;
72  atomic_t refcnt;
73  struct {
74  void *(*attach_process)(void);
75  void (*detach_process)(void *arg);
76  void (*map_thread)(struct xnthread *thread);
77  struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
78  struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
79  struct xnthread_personality *(*move_thread)(struct xnthread *thread,
80  int dest_cpu);
81  struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
82  struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
83  } ops;
84  struct module *module;
85 };
86 
87 struct xnthread {
88  struct xnarchtcb tcb; /* Architecture-dependent block */
89 
90  __u32 state; /* Thread state flags */
91  __u32 info; /* Thread information flags */
92  __u32 local_info; /* Local thread information flags */
93 
94  struct xnsched *sched; /* Thread scheduler */
95  struct xnsched_class *sched_class; /* Current scheduling class */
96  struct xnsched_class *base_class; /* Base scheduling class */
97 
98 #ifdef CONFIG_XENO_OPT_SCHED_TP
99  struct xnsched_tpslot *tps; /* Current partition slot for TP scheduling */
100  struct list_head tp_link; /* Link in per-sched TP thread queue */
101 #endif
102 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
103  struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
104 #endif
105 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
106  struct xnsched_quota_group *quota; /* Quota scheduling group. */
107  struct list_head quota_expired;
108  struct list_head quota_next;
109 #endif
110 
111  unsigned int idtag; /* Unique ID tag */
112 
113  cpumask_t affinity; /* Processor affinity. */
114 
115  int bprio; /* Base priority (before PIP boost) */
116 
117  int cprio; /* Current priority */
118 
122  int wprio;
123 
124  int lock_count;
130  struct list_head rlink;
131 
136  struct list_head plink;
137 
139  struct list_head glink;
140 
145  struct list_head claimq;
146 
147  struct xnsynch *wchan; /* Resource the thread pends on */
148 
149  struct xnsynch *wwake; /* Wait channel the thread was resumed from */
150 
151  int res_count; /* Held resources count */
152 
153  struct xntimer rtimer; /* Resource timer */
154 
155  struct xntimer ptimer; /* Periodic timer */
156 
157  xnticks_t rrperiod; /* Allotted round-robin period (ns) */
158 
159  struct xnthread_wait_context *wcontext; /* Active wait context. */
160 
161  struct {
162  xnstat_counter_t ssw; /* Primary -> secondary mode switch count */
163  xnstat_counter_t csw; /* Context switches (includes secondary -> primary switches) */
164  xnstat_counter_t xsc; /* Xenomai syscalls */
165  xnstat_counter_t pf; /* Number of page faults */
166  xnstat_exectime_t account; /* Execution time accounting entity */
167  xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
168  } stat;
169 
170  struct xnselector *selector; /* For select. */
171 
172  xnhandle_t handle; /* Handle in registry */
173 
174  char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
175 
176  void (*entry)(void *cookie); /* Thread entry routine */
177  void *cookie; /* Cookie to pass to the entry routine */
178 
183  struct xnthread_user_window *u_window;
184 
185  struct xnthread_personality *personality;
186 
187 #ifdef CONFIG_XENO_OPT_DEBUG
188  const char *exe_path; /* Executable path */
189  u32 proghash; /* Hash value for exe_path */
190 #endif
191 
192  struct xnsynch join_synch;
193 };
194 
195 static inline int xnthread_get_state(const struct xnthread *thread)
196 {
197  return thread->state;
198 }
199 
200 static inline int xnthread_test_state(struct xnthread *thread, int bits)
201 {
202  return thread->state & bits;
203 }
204 
205 static inline void xnthread_set_state(struct xnthread *thread, int bits)
206 {
207  thread->state |= bits;
208 }
209 
210 static inline void xnthread_clear_state(struct xnthread *thread, int bits)
211 {
212  thread->state &= ~bits;
213 }
214 
215 static inline int xnthread_test_info(struct xnthread *thread, int bits)
216 {
217  return thread->info & bits;
218 }
219 
220 static inline void xnthread_set_info(struct xnthread *thread, int bits)
221 {
222  thread->info |= bits;
223 }
224 
225 static inline void xnthread_clear_info(struct xnthread *thread, int bits)
226 {
227  thread->info &= ~bits;
228 }
229 
230 static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
231 {
232  return curr->local_info & bits;
233 }
234 
235 static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
236 {
237  curr->local_info |= bits;
238 }
239 
240 static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
241 {
242  curr->local_info &= ~bits;
243 }
244 
245 static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
246 {
247  return &thread->tcb;
248 }
249 
250 static inline int xnthread_base_priority(const struct xnthread *thread)
251 {
252  return thread->bprio;
253 }
254 
255 static inline int xnthread_current_priority(const struct xnthread *thread)
256 {
257  return thread->cprio;
258 }
259 
260 static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
261 {
262  return xnthread_archtcb(thread)->core.host_task;
263 }
264 
265 static inline pid_t xnthread_host_pid(struct xnthread *thread)
266 {
267  if (xnthread_test_state(thread, XNROOT))
268  return 0;
269 
270  return xnthread_host_task(thread)->pid;
271 }
272 
273 #define xnthread_for_each_claimed(__pos, __thread) \
274  list_for_each_entry(__pos, &(__thread)->claimq, link)
275 
276 #define xnthread_for_each_claimed_safe(__pos, __tmp, __thread) \
277  list_for_each_entry_safe(__pos, __tmp, &(__thread)->claimq, link)
278 
279 #define xnthread_run_handler(__t, __h, __a...) \
280  do { \
281  struct xnthread_personality *__p__ = (__t)->personality; \
282  if ((__p__)->ops.__h) \
283  (__p__)->ops.__h(__t, ##__a); \
284  } while (0)
285 
286 #define xnthread_run_handler_stack(__t, __h, __a...) \
287  do { \
288  struct xnthread_personality *__p__ = (__t)->personality; \
289  do { \
290  if ((__p__)->ops.__h == NULL) \
291  break; \
292  __p__ = (__p__)->ops.__h(__t, ##__a); \
293  } while (__p__); \
294  } while (0)
295 
296 static inline
297 struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
298 {
299  return thread->wcontext;
300 }
301 
302 static inline
303 int xnthread_register(struct xnthread *thread, const char *name)
304 {
305  return xnregistry_enter(name, thread, &thread->handle, NULL);
306 }
307 
308 static inline
309 struct xnthread *xnthread_lookup(xnhandle_t threadh)
310 {
311  struct xnthread *thread = xnregistry_lookup(threadh, NULL);
312  return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
313 }
314 
315 static inline void xnthread_sync_window(struct xnthread *thread)
316 {
317  if (thread->u_window) {
318  thread->u_window->state = thread->state;
319  thread->u_window->info = thread->info;
320  }
321 }
322 
323 static inline
324 void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
325 {
326  if (thread->u_window) {
327  thread->u_window->state = thread->state & ~state_bits;
328  thread->u_window->info = thread->info;
329  }
330 }
331 
332 static inline
333 void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
334 {
335  if (thread->u_window) {
336  thread->u_window->state = thread->state | state_bits;
337  thread->u_window->info = thread->info;
338  }
339 }
340 
341 static inline int normalize_priority(int prio)
342 {
343  return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
344 }
345 
346 int __xnthread_init(struct xnthread *thread,
347  const struct xnthread_init_attr *attr,
348  struct xnsched *sched,
349  struct xnsched_class *sched_class,
350  const union xnsched_policy_param *sched_param);
351 
352 void __xnthread_test_cancel(struct xnthread *curr);
353 
354 void __xnthread_cleanup(struct xnthread *curr);
355 
356 void __xnthread_discard(struct xnthread *thread);
357 
373 static inline struct xnthread *xnthread_current(void)
374 {
375  return ipipe_current_threadinfo()->thread;
376 }
377 
389 static inline struct xnthread *xnthread_from_task(struct task_struct *p)
390 {
391  return ipipe_task_threadinfo(p)->thread;
392 }
393 
403 static inline void xnthread_test_cancel(void)
404 {
405  struct xnthread *curr = xnthread_current();
406 
407  if (curr && xnthread_test_info(curr, XNCANCELD))
408  __xnthread_test_cancel(curr);
409 }
410 
411 static inline
412 void xnthread_complete_wait(struct xnthread_wait_context *wc)
413 {
414  wc->posted = 1;
415 }
416 
417 static inline
418 int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
419 {
420  return wc->posted;
421 }
422 
423 #ifdef CONFIG_XENO_ARCH_FPU
424 void xnthread_switch_fpu(struct xnsched *sched);
425 #else
426 static inline void xnthread_switch_fpu(struct xnsched *sched) { }
427 #endif /* CONFIG_XENO_ARCH_FPU */
428 
429 void xnthread_init_shadow_tcb(struct xnthread *thread);
430 
431 void xnthread_init_root_tcb(struct xnthread *thread);
432 
433 void xnthread_deregister(struct xnthread *thread);
434 
435 char *xnthread_format_status(unsigned long status, char *buf, int size);
436 
437 xnticks_t xnthread_get_timeout(struct xnthread *thread, xnticks_t ns);
438 
439 xnticks_t xnthread_get_period(struct xnthread *thread);
440 
441 void xnthread_prepare_wait(struct xnthread_wait_context *wc);
442 
443 int xnthread_init(struct xnthread *thread,
444  const struct xnthread_init_attr *attr,
445  struct xnsched_class *sched_class,
446  const union xnsched_policy_param *sched_param);
447 
448 int xnthread_start(struct xnthread *thread,
449  const struct xnthread_start_attr *attr);
450 
451 int xnthread_set_mode(int clrmask,
452  int setmask);
453 
454 void xnthread_suspend(struct xnthread *thread,
455  int mask,
456  xnticks_t timeout,
457  xntmode_t timeout_mode,
458  struct xnsynch *wchan);
459 
460 void xnthread_resume(struct xnthread *thread,
461  int mask);
462 
463 int xnthread_unblock(struct xnthread *thread);
464 
465 int xnthread_set_periodic(struct xnthread *thread,
466  xnticks_t idate,
467  xntmode_t timeout_mode,
468  xnticks_t period);
469 
470 int xnthread_wait_period(unsigned long *overruns_r);
471 
472 int xnthread_set_slice(struct xnthread *thread,
473  xnticks_t quantum);
474 
475 void xnthread_cancel(struct xnthread *thread);
476 
477 int xnthread_join(struct xnthread *thread, bool uninterruptible);
478 
479 int xnthread_harden(void);
480 
481 void xnthread_relax(int notify, int reason);
482 
483 void __xnthread_kick(struct xnthread *thread);
484 
485 void xnthread_kick(struct xnthread *thread);
486 
487 void __xnthread_demote(struct xnthread *thread);
488 
489 void xnthread_demote(struct xnthread *thread);
490 
491 void xnthread_signal(struct xnthread *thread,
492  int sig, int arg);
493 
494 void xnthread_pin_initial(struct xnthread *thread);
495 
496 int xnthread_map(struct xnthread *thread,
497  struct completion *done);
498 
499 void xnthread_call_mayday(struct xnthread *thread, int reason);
500 
501 static inline void xnthread_get_resource(struct xnthread *thread)
502 {
503  if (xnthread_test_state(thread, XNWEAK|XNDEBUG))
504  thread->res_count++;
505 }
506 
507 static inline int xnthread_put_resource(struct xnthread *thread)
508 {
509  if (xnthread_test_state(thread, XNWEAK) ||
510  IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
511  if (unlikely(thread->res_count == 0)) {
512  if (xnthread_test_state(thread, XNWARN))
513  xnthread_signal(thread, SIGDEBUG,
514  SIGDEBUG_RESCNT_IMBALANCE);
515  return -EPERM;
516  }
517  thread->res_count--;
518  }
519 
520  return 0;
521 }
522 
523 #ifdef CONFIG_SMP
524 int xnthread_migrate(int cpu);
525 
526 void xnthread_migrate_passive(struct xnthread *thread,
527  struct xnsched *sched);
528 #else
529 
530 static inline int xnthread_migrate(int cpu)
531 {
532  return cpu ? -EINVAL : 0;
533 }
534 
535 static inline void xnthread_migrate_passive(struct xnthread *thread,
536  struct xnsched *sched)
537 { }
538 
539 #endif
540 
541 int __xnthread_set_schedparam(struct xnthread *thread,
542  struct xnsched_class *sched_class,
543  const union xnsched_policy_param *sched_param);
544 
545 int xnthread_set_schedparam(struct xnthread *thread,
546  struct xnsched_class *sched_class,
547  const union xnsched_policy_param *sched_param);
548 
549 int xnthread_killall(int grace, int mask);
550 
551 extern struct xnthread_personality xenomai_personality;
552 
555 #endif /* !_COBALT_KERNEL_THREAD_H */
void xnthread_relax(int notify, int reason)
Switch a shadow thread back to the Linux domain.
Definition: thread.c:1986
void xnthread_cancel(struct xnthread *thread)
Cancel a thread.
Definition: thread.c:1479
#define XNCANCELD
Cancellation request is pending.
Definition: thread.h:72
static void xnthread_test_cancel(void)
Introduce a thread cancellation point.
Definition: thread.h:403
#define XNWARN
Issue SIGDEBUG on error detection.
Definition: thread.h:46
#define XNDEBUG
User-level debugging enabled.
Definition: thread.h:53
int xnregistry_enter(const char *key, void *objaddr, xnhandle_t *phandle, struct xnpnode *pnode)
Register a real-time object.
Definition: registry.c:627
void xnthread_suspend(struct xnthread *thread, int mask, xnticks_t timeout, xntmode_t timeout_mode, struct xnsynch *wchan)
Suspend a thread.
Definition: thread.c:844
#define XNWEAK
Non real-time shadow (from the WEAK class)
Definition: thread.h:49
int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
Set thread time-slicing information.
Definition: thread.c:1420
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
static struct xnthread * xnthread_current(void)
Retrieve the current Cobalt core TCB.
Definition: thread.h:373
int xnthread_wait_period(unsigned long *overruns_r)
Wait for the next periodic release point.
Definition: thread.c:1346
Scheduling information structure.
Definition: sched.h:57
int xnthread_map(struct xnthread *thread, struct completion *done)
Create a shadow thread context over a kernel task.
Definition: thread.c:2401
int xnthread_set_mode(int clrmask, int setmask)
Change control mode of the current thread.
Definition: thread.c:750
static void * xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
Find a real-time object into the registry.
Definition: registry.h:175
int xnthread_init(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Initialize a new thread.
Definition: thread.c:586
int xnthread_harden(void)
Migrate a Linux task to the Xenomai domain.
Definition: thread.c:1877
int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
Make a thread periodic.
Definition: thread.c:1265
int xnthread_start(struct xnthread *thread, const struct xnthread_start_attr *attr)
Start a newly created thread.
Definition: thread.c:667
Copyright © 2011 Gilles Chanteperdrix gilles.chanteperdrix@xenomai.org.
Definition: atomic.h:24
int xnthread_unblock(struct xnthread *thread)
Unblock a thread.
Definition: thread.c:1178
static struct xnthread * xnthread_from_task(struct task_struct *p)
Retrieve the Cobalt core TCB attached to a Linux task.
Definition: thread.h:389
int xnthread_join(struct xnthread *thread, bool uninterruptible)
Join with a terminated thread.
Definition: thread.c:1565
int xnthread_set_schedparam(struct xnthread *thread, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Change the base scheduling parameters of a thread.
Definition: thread.c:1775
void xnthread_resume(struct xnthread *thread, int mask)
Resume a thread.
Definition: thread.c:1060
int xnthread_migrate(int cpu)
Migrate the current thread.
Definition: thread.c:1650