19 #ifndef _COBALT_KERNEL_THREAD_H
20 #define _COBALT_KERNEL_THREAD_H
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <cobalt/kernel/list.h>
25 #include <cobalt/kernel/stat.h>
26 #include <cobalt/kernel/timer.h>
27 #include <cobalt/kernel/registry.h>
28 #include <cobalt/kernel/schedparam.h>
29 #include <cobalt/kernel/trace.h>
30 #include <cobalt/kernel/synch.h>
31 #include <cobalt/uapi/kernel/thread.h>
32 #include <cobalt/uapi/signal.h>
33 #include <asm/xenomai/machine.h>
34 #include <asm/xenomai/thread.h>
40 #define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNMIGRATE|XNHELD)
41 #define XNTHREAD_MODE_BITS (XNRRB|XNWARN|XNTRAPLB)
47 struct xnsched_tpslot;
48 struct xnthread_personality;
51 struct xnthread_init_attr {
52 struct xnthread_personality *personality;
58 struct xnthread_start_attr {
60 void (*entry)(
void *cookie);
64 struct xnthread_wait_context {
68 struct xnthread_personality {
74 void *(*attach_process)(void);
75 void (*detach_process)(
void *arg);
76 void (*map_thread)(
struct xnthread *thread);
77 struct xnthread_personality *(*relax_thread)(
struct xnthread *thread);
78 struct xnthread_personality *(*harden_thread)(
struct xnthread *thread);
79 struct xnthread_personality *(*move_thread)(
struct xnthread *thread,
81 struct xnthread_personality *(*exit_thread)(
struct xnthread *thread);
82 struct xnthread_personality *(*finalize_thread)(
struct xnthread *thread);
84 struct module *module;
95 struct xnsched_class *sched_class;
96 struct xnsched_class *base_class;
98 #ifdef CONFIG_XENO_OPT_SCHED_TP
99 struct xnsched_tpslot *tps;
100 struct list_head tp_link;
102 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
103 struct xnsched_sporadic_data *pss;
105 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
106 struct xnsched_quota_group *quota;
107 struct list_head quota_expired;
108 struct list_head quota_next;
130 struct list_head rlink;
136 struct list_head plink;
139 struct list_head glink;
145 struct list_head claimq;
147 struct xnsynch *wchan;
149 struct xnsynch *wwake;
153 struct xntimer rtimer;
155 struct xntimer ptimer;
159 struct xnthread_wait_context *wcontext;
162 xnstat_counter_t ssw;
163 xnstat_counter_t csw;
164 xnstat_counter_t xsc;
166 xnstat_exectime_t account;
167 xnstat_exectime_t lastperiod;
170 struct xnselector *selector;
174 char name[XNOBJECT_NAME_LEN];
176 void (*entry)(
void *cookie);
183 struct xnthread_user_window *u_window;
185 struct xnthread_personality *personality;
187 #ifdef CONFIG_XENO_OPT_DEBUG
188 const char *exe_path;
192 struct xnsynch join_synch;
195 static inline int xnthread_get_state(
const struct xnthread *thread)
197 return thread->state;
200 static inline int xnthread_test_state(
struct xnthread *thread,
int bits)
202 return thread->state & bits;
205 static inline void xnthread_set_state(
struct xnthread *thread,
int bits)
207 thread->state |= bits;
210 static inline void xnthread_clear_state(
struct xnthread *thread,
int bits)
212 thread->state &= ~bits;
215 static inline int xnthread_test_info(
struct xnthread *thread,
int bits)
217 return thread->info & bits;
220 static inline void xnthread_set_info(
struct xnthread *thread,
int bits)
222 thread->info |= bits;
225 static inline void xnthread_clear_info(
struct xnthread *thread,
int bits)
227 thread->info &= ~bits;
230 static inline int xnthread_test_localinfo(
struct xnthread *curr,
int bits)
232 return curr->local_info & bits;
235 static inline void xnthread_set_localinfo(
struct xnthread *curr,
int bits)
237 curr->local_info |= bits;
240 static inline void xnthread_clear_localinfo(
struct xnthread *curr,
int bits)
242 curr->local_info &= ~bits;
245 static inline struct xnarchtcb *xnthread_archtcb(
struct xnthread *thread)
250 static inline int xnthread_base_priority(
const struct xnthread *thread)
252 return thread->bprio;
255 static inline int xnthread_current_priority(
const struct xnthread *thread)
257 return thread->cprio;
260 static inline struct task_struct *xnthread_host_task(
struct xnthread *thread)
262 return xnthread_archtcb(thread)->core.host_task;
265 static inline pid_t xnthread_host_pid(
struct xnthread *thread)
267 if (xnthread_test_state(thread,
XNROOT))
270 return xnthread_host_task(thread)->pid;
273 #define xnthread_for_each_claimed(__pos, __thread) \
274 list_for_each_entry(__pos, &(__thread)->claimq, link)
276 #define xnthread_for_each_claimed_safe(__pos, __tmp, __thread) \
277 list_for_each_entry_safe(__pos, __tmp, &(__thread)->claimq, link)
279 #define xnthread_run_handler(__t, __h, __a...) \
281 struct xnthread_personality *__p__ = (__t)->personality; \
282 if ((__p__)->ops.__h) \
283 (__p__)->ops.__h(__t, ##__a); \
286 #define xnthread_run_handler_stack(__t, __h, __a...) \
288 struct xnthread_personality *__p__ = (__t)->personality; \
290 if ((__p__)->ops.__h == NULL) \
292 __p__ = (__p__)->ops.__h(__t, ##__a); \
297 struct xnthread_wait_context *xnthread_get_wait_context(
struct xnthread *thread)
299 return thread->wcontext;
303 int xnthread_register(
struct xnthread *thread,
const char *name)
309 struct xnthread *xnthread_lookup(xnhandle_t threadh)
312 return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
315 static inline void xnthread_sync_window(
struct xnthread *thread)
317 if (thread->u_window) {
318 thread->u_window->state = thread->state;
319 thread->u_window->info = thread->info;
324 void xnthread_clear_sync_window(
struct xnthread *thread,
int state_bits)
326 if (thread->u_window) {
327 thread->u_window->state = thread->state & ~state_bits;
328 thread->u_window->info = thread->info;
333 void xnthread_set_sync_window(
struct xnthread *thread,
int state_bits)
335 if (thread->u_window) {
336 thread->u_window->state = thread->state | state_bits;
337 thread->u_window->info = thread->info;
341 static inline int normalize_priority(
int prio)
343 return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
346 int __xnthread_init(
struct xnthread *thread,
347 const struct xnthread_init_attr *attr,
349 struct xnsched_class *sched_class,
350 const union xnsched_policy_param *sched_param);
352 void __xnthread_test_cancel(
struct xnthread *curr);
354 void __xnthread_cleanup(
struct xnthread *curr);
356 void __xnthread_discard(
struct xnthread *thread);
375 return ipipe_current_threadinfo()->thread;
391 return ipipe_task_threadinfo(p)->thread;
407 if (curr && xnthread_test_info(curr,
XNCANCELD))
408 __xnthread_test_cancel(curr);
412 void xnthread_complete_wait(
struct xnthread_wait_context *wc)
418 int xnthread_wait_complete_p(
struct xnthread_wait_context *wc)
423 #ifdef CONFIG_XENO_ARCH_FPU
424 void xnthread_switch_fpu(
struct xnsched *sched);
426 static inline void xnthread_switch_fpu(
struct xnsched *sched) { }
429 void xnthread_init_shadow_tcb(
struct xnthread *thread);
431 void xnthread_init_root_tcb(
struct xnthread *thread);
433 void xnthread_deregister(
struct xnthread *thread);
435 char *xnthread_format_status(
unsigned long status,
char *buf,
int size);
437 xnticks_t xnthread_get_timeout(
struct xnthread *thread, xnticks_t ns);
439 xnticks_t xnthread_get_period(
struct xnthread *thread);
441 void xnthread_prepare_wait(
struct xnthread_wait_context *wc);
444 const struct xnthread_init_attr *attr,
445 struct xnsched_class *sched_class,
446 const union xnsched_policy_param *sched_param);
449 const struct xnthread_start_attr *attr);
457 xntmode_t timeout_mode,
458 struct xnsynch *wchan);
467 xntmode_t timeout_mode,
477 int xnthread_join(
struct xnthread *thread,
bool uninterruptible);
483 void __xnthread_kick(
struct xnthread *thread);
485 void xnthread_kick(
struct xnthread *thread);
487 void __xnthread_demote(
struct xnthread *thread);
489 void xnthread_demote(
struct xnthread *thread);
491 void xnthread_signal(
struct xnthread *thread,
494 void xnthread_pin_initial(
struct xnthread *thread);
497 struct completion *done);
499 void xnthread_call_mayday(
struct xnthread *thread,
int reason);
501 static inline void xnthread_get_resource(
struct xnthread *thread)
507 static inline int xnthread_put_resource(
struct xnthread *thread)
509 if (xnthread_test_state(thread,
XNWEAK) ||
510 IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
511 if (unlikely(thread->res_count == 0)) {
512 if (xnthread_test_state(thread,
XNWARN))
513 xnthread_signal(thread, SIGDEBUG,
514 SIGDEBUG_RESCNT_IMBALANCE);
526 void xnthread_migrate_passive(
struct xnthread *thread,
532 return cpu ? -EINVAL : 0;
535 static inline void xnthread_migrate_passive(
struct xnthread *thread,
541 int __xnthread_set_schedparam(
struct xnthread *thread,
542 struct xnsched_class *sched_class,
543 const union xnsched_policy_param *sched_param);
546 struct xnsched_class *sched_class,
547 const union xnsched_policy_param *sched_param);
549 int xnthread_killall(
int grace,
int mask);
551 extern struct xnthread_personality xenomai_personality;
void xnthread_relax(int notify, int reason)
Switch a shadow thread back to the Linux domain.
Definition: thread.c:1986
void xnthread_cancel(struct xnthread *thread)
Cancel a thread.
Definition: thread.c:1479
#define XNCANCELD
Cancellation request is pending.
Definition: thread.h:72
static void xnthread_test_cancel(void)
Introduce a thread cancellation point.
Definition: thread.h:403
#define XNWARN
Issue SIGDEBUG on error detection.
Definition: thread.h:46
#define XNDEBUG
User-level debugging enabled.
Definition: thread.h:53
int xnregistry_enter(const char *key, void *objaddr, xnhandle_t *phandle, struct xnpnode *pnode)
Register a real-time object.
Definition: registry.c:627
void xnthread_suspend(struct xnthread *thread, int mask, xnticks_t timeout, xntmode_t timeout_mode, struct xnsynch *wchan)
Suspend a thread.
Definition: thread.c:844
#define XNWEAK
Non real-time shadow (from the WEAK class)
Definition: thread.h:49
int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
Set thread time-slicing information.
Definition: thread.c:1420
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
static struct xnthread * xnthread_current(void)
Retrieve the current Cobalt core TCB.
Definition: thread.h:373
int xnthread_wait_period(unsigned long *overruns_r)
Wait for the next periodic release point.
Definition: thread.c:1346
Scheduling information structure.
Definition: sched.h:57
int xnthread_map(struct xnthread *thread, struct completion *done)
Create a shadow thread context over a kernel task.
Definition: thread.c:2401
int xnthread_set_mode(int clrmask, int setmask)
Change control mode of the current thread.
Definition: thread.c:750
static void * xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
Find a real-time object into the registry.
Definition: registry.h:175
int xnthread_init(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Initialize a new thread.
Definition: thread.c:586
int xnthread_harden(void)
Migrate a Linux task to the Xenomai domain.
Definition: thread.c:1877
int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
Make a thread periodic.
Definition: thread.c:1265
int xnthread_start(struct xnthread *thread, const struct xnthread_start_attr *attr)
Start a newly created thread.
Definition: thread.c:667
Copyright © 2011 Gilles Chanteperdrix gilles.chanteperdrix@xenomai.org.
Definition: atomic.h:24
int xnthread_unblock(struct xnthread *thread)
Unblock a thread.
Definition: thread.c:1178
static struct xnthread * xnthread_from_task(struct task_struct *p)
Retrieve the Cobalt core TCB attached to a Linux task.
Definition: thread.h:389
int xnthread_join(struct xnthread *thread, bool uninterruptible)
Join with a terminated thread.
Definition: thread.c:1565
int xnthread_set_schedparam(struct xnthread *thread, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Change the base scheduling parameters of a thread.
Definition: thread.c:1775
void xnthread_resume(struct xnthread *thread, int mask)
Resume a thread.
Definition: thread.c:1060
int xnthread_migrate(int cpu)
Migrate the current thread.
Definition: thread.c:1650