Xenomai  3.0-rc7
rtskb.h
1 /***
2  *
3  * include/rtskb.h
4  *
5  * RTnet - real-time networking subsystem
6  * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>,
7  * 2003-2005 Jan Kiszka <jan.kiszka@web.de>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  */
24 
25 #ifndef __RTSKB_H_
26 #define __RTSKB_H_
27 
28 #ifdef __KERNEL__
29 
30 #include <linux/skbuff.h>
31 
32 #include <rtnet.h>
33 #include <rtnet_internal.h>
34 
35 
36 /***
37 
38 rtskb Management - A Short Introduction
39 ---------------------------------------
40 
41 1. rtskbs (Real-Time Socket Buffers)
42 
43 A rtskb consists of a management structure (struct rtskb) and a fixed-sized
44 (RTSKB_SIZE) data buffer. It is used to store network packets on their way from
45 the API routines through the stack to the NICs or vice versa. rtskbs are
46 allocated as one chunk of memory which contains both the managment structure
47 and the buffer memory itself.
48 
49 
50 2. rtskb Queues
51 
52 A rtskb queue is described by struct rtskb_queue. A queue can contain an
53 unlimited number of rtskbs in an ordered way. A rtskb can either be added to
54 the head (rtskb_queue_head()) or the tail of a queue (rtskb_queue_tail()). When
55 a rtskb is removed from a queue (rtskb_dequeue()), it is always taken from the
56 head. Queues are normally spin lock protected unless the __variants of the
57 queuing functions are used.
58 
59 
60 3. Prioritized rtskb Queues
61 
62 A prioritized queue contains a number of normal rtskb queues within an array.
63 The array index of a sub-queue correspond to the priority of the rtskbs within
64 this queue. For enqueuing a rtskb (rtskb_prio_queue_head()), its priority field
65 is evaluated and the rtskb is then placed into the appropriate sub-queue. When
66 dequeuing a rtskb, the first rtskb of the first non-empty sub-queue with the
67 highest priority is returned. The current implementation supports 32 different
68 priority levels, the lowest if defined by QUEUE_MIN_PRIO, the highest by
69 QUEUE_MAX_PRIO.
70 
71 
72 4. rtskb Pools
73 
74 As rtskbs must not be allocated by a normal memory manager during runtime,
75 preallocated rtskbs are kept ready in several pools. Most packet producers
76 (NICs, sockets, etc.) have their own pools in order to be independent of the
77 load situation of other parts of the stack.
78 
79 When a pool is created (rtskb_pool_init()), the required rtskbs are allocated
80 from a Linux slab cache. Pools can be extended (rtskb_pool_extend()) or
81 shrinked (rtskb_pool_shrink()) during runtime. When shutting down the
82 program/module, every pool has to be released (rtskb_pool_release()). All these
83 commands demand to be executed within a non real-time context.
84 
85 Pools are organized as normal rtskb queues (struct rtskb_queue). When a rtskb
86 is allocated (alloc_rtskb()), it is actually dequeued from the pool's queue.
87 When freeing a rtskb (kfree_rtskb()), the rtskb is enqueued to its owning pool.
88 rtskbs can be exchanged between pools (rtskb_acquire()). In this case, the
89 passed rtskb switches over to from its owning pool to a given pool, but only if
90 this pool can pass an empty rtskb from its own queue back.
91 
92 
93 5. rtskb Chains
94 
95 To ease the defragmentation of larger IP packets, several rtskbs can form a
96 chain. For these purposes, the first rtskb (and only the first!) provides a
97 pointer to the last rtskb in the chain. When enqueuing the first rtskb of a
98 chain, the whole chain is automatically placed into the destined queue. But,
99 to dequeue a complete chain specialized calls are required (postfix: _chain).
100 While chains also get freed en bloc (kfree_rtskb()) when passing the first
101 rtskbs, it is not possible to allocate a chain from a pool (alloc_rtskb()); a
102 newly allocated rtskb is always reset to a "single rtskb chain". Furthermore,
103 the acquisition of complete chains is NOT supported (rtskb_acquire()).
104 
105 
106 6. Capturing Support (Optional)
107 
108 When incoming or outgoing packets are captured, the assigned rtskb needs to be
109 shared between the stack, the driver, and the capturing service. In contrast to
110 many other network stacks, RTnet does not create a new rtskb head and
111 re-references the payload. Instead, additional fields at the end of the rtskb
112 structure are use for sharing a rtskb with a capturing service. If the sharing
113 bit (RTSKB_CAP_SHARED) in cap_flags is set, the rtskb will not be returned to
114 the owning pool upon the call of kfree_rtskb. Instead this bit will be reset,
115 and a compensation rtskb stored in cap_comp_skb will be returned to the owning
116 pool. cap_start and cap_len can be used to mirror the dimension of the full
117 packet. This is required because the data and len fields will be modified while
118 walking through the stack. cap_next allows to add a rtskb to a separate queue
119 which is independent of any queue described in 2.
120 
121 Certain setup tasks for capturing packets can not become part of a capturing
122 module, they have to be embedded into the stack. For this purpose, several
123 inline functions are provided. rtcap_mark_incoming() is used to save the packet
124 dimension right before it is modifed by the stack. rtcap_report_incoming()
125 calls the capturing handler, if present, in order to let it process the
126 received rtskb (e.g. allocate compensation rtskb, mark original rtskb as
127 shared, and enqueue it).
128 
129 Outgoing rtskb have to be captured by adding a hook function to the chain of
130 hard_start_xmit functions of a device. To measure the delay caused by RTmac
131 between the request and the actual transmission, a time stamp can be taken using
132 rtcap_mark_rtmac_enqueue(). This function is typically called by RTmac
133 disciplines when they add a rtskb to their internal transmission queue. In such
134 a case, the RTSKB_CAP_RTMAC_STAMP bit is set in cap_flags to indicate that the
135 cap_rtmac_stamp field now contains valid data.
136 
137  ***/
138 
139 
140 #ifndef CHECKSUM_PARTIAL
141 #define CHECKSUM_PARTIAL CHECKSUM_HW
142 #endif
143 
144 #define RTSKB_CAP_SHARED 1 /* rtskb shared between stack and RTcap */
145 #define RTSKB_CAP_RTMAC_STAMP 2 /* cap_rtmac_stamp is valid */
146 
147 #define RTSKB_UNMAPPED 0
148 
149 struct rtskb_queue;
150 struct rtsocket;
151 struct rtnet_device;
152 
153 /***
154  * rtskb - realtime socket buffer
155  */
156 struct rtskb {
157  struct rtskb *next; /* used for queuing rtskbs */
158  struct rtskb *chain_end; /* marks the end of a rtskb chain starting
159  with this very rtskb */
160 
161  struct rtskb_pool *pool; /* owning pool */
162 
163  unsigned int priority; /* bit 0..15: prio, 16..31: user-defined */
164 
165  struct rtsocket *sk; /* assigned socket */
166  struct rtnet_device *rtdev; /* source or destination device */
167 
168  nanosecs_abs_t time_stamp; /* arrival or transmission (RTcap) time */
169 
170  /* patch address of the transmission time stamp, can be NULL
171  * calculation: *xmit_stamp = cpu_to_be64(time_in_ns + *xmit_stamp)
172  */
173  nanosecs_abs_t *xmit_stamp;
174 
175  /* transport layer */
176  union
177  {
178  struct tcphdr *th;
179  struct udphdr *uh;
180  struct icmphdr *icmph;
181  struct iphdr *ipihdr;
182  unsigned char *raw;
183  } h;
184 
185  /* network layer */
186  union
187  {
188  struct iphdr *iph;
189  struct arphdr *arph;
190  unsigned char *raw;
191  } nh;
192 
193  /* link layer */
194  union
195  {
196  struct ethhdr *ethernet;
197  unsigned char *raw;
198  } mac;
199 
200  unsigned short protocol;
201  unsigned char pkt_type;
202 
203  unsigned char ip_summed;
204  unsigned int csum;
205 
206  unsigned char *data;
207  unsigned char *tail;
208  unsigned char *end;
209  unsigned int len;
210 
211  dma_addr_t buf_dma_addr;
212 
213  unsigned char *buf_start;
214 
215 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
216  unsigned char *buf_end;
217 #endif
218 
219 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
220  int cap_flags; /* see RTSKB_CAP_xxx */
221  struct rtskb *cap_comp_skb; /* compensation rtskb */
222  struct rtskb *cap_next; /* used for capture queue */
223  unsigned char *cap_start; /* start offset for capturing */
224  unsigned int cap_len; /* capture length of this rtskb */
225  nanosecs_abs_t cap_rtmac_stamp; /* RTmac enqueuing time */
226 #endif
227 
228  struct list_head entry; /* for global rtskb list */
229 };
230 
231 struct rtskb_queue {
232  struct rtskb *first;
233  struct rtskb *last;
234  rtdm_lock_t lock;
235 };
236 
237 struct rtskb_pool_lock_ops {
238  int (*trylock)(void *cookie);
239  void (*unlock)(void *cookie);
240 };
241 
242 struct rtskb_pool {
243  struct rtskb_queue queue;
244  const struct rtskb_pool_lock_ops *lock_ops;
245  unsigned lock_count;
246  void *lock_cookie;
247 };
248 
249 #define QUEUE_MAX_PRIO 0
250 #define QUEUE_MIN_PRIO 31
251 
252 struct rtskb_prio_queue {
253  rtdm_lock_t lock;
254  unsigned long usage; /* bit array encoding non-empty sub-queues */
255  struct rtskb_queue queue[QUEUE_MIN_PRIO+1];
256 };
257 
258 #define RTSKB_PRIO_MASK 0x0000FFFF /* bits 0..15: xmit prio */
259 #define RTSKB_CHANNEL_MASK 0xFFFF0000 /* bits 16..31: xmit channel */
260 #define RTSKB_CHANNEL_SHIFT 16
261 
262 #define RTSKB_DEF_RT_CHANNEL SOCK_DEF_RT_CHANNEL
263 #define RTSKB_DEF_NRT_CHANNEL SOCK_DEF_NRT_CHANNEL
264 #define RTSKB_USER_CHANNEL SOCK_USER_CHANNEL
265 
266 /* Note: always keep SOCK_XMIT_PARAMS consistent with definitions above! */
267 #define RTSKB_PRIO_VALUE SOCK_XMIT_PARAMS
268 
269 
270 /* default values for the module parameter */
271 #define DEFAULT_GLOBAL_RTSKBS 0 /* default number of rtskb's in global pool */
272 #define DEFAULT_DEVICE_RTSKBS 16 /* default additional rtskbs per network adapter */
273 #define DEFAULT_SOCKET_RTSKBS 16 /* default number of rtskb's in socket pools */
274 
275 #define ALIGN_RTSKB_STRUCT_LEN SKB_DATA_ALIGN(sizeof(struct rtskb))
276 #define RTSKB_SIZE 1544 /* maximum needed by pcnet32-rt */
277 
278 extern unsigned int rtskb_pools; /* current number of rtskb pools */
279 extern unsigned int rtskb_pools_max; /* maximum number of rtskb pools */
280 extern unsigned int rtskb_amount; /* current number of allocated rtskbs */
281 extern unsigned int rtskb_amount_max; /* maximum number of allocated rtskbs */
282 
283 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
284 extern void rtskb_over_panic(struct rtskb *skb, int len, void *here);
285 extern void rtskb_under_panic(struct rtskb *skb, int len, void *here);
286 #endif
287 
288 extern struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool);
289 
290 extern void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb);
291 
292 extern struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool);
293 
294 extern void kfree_rtskb(struct rtskb *skb);
295 #define dev_kfree_rtskb(a) kfree_rtskb(a)
296 
297 
298 #define rtskb_checksum_none_assert(skb) (skb->ip_summed = CHECKSUM_NONE)
299 
300 static inline void rtskb_tx_timestamp(struct rtskb *skb)
301 {
302  nanosecs_abs_t *ts = skb->xmit_stamp;
303 
304  if (!ts)
305  return;
306 
307  *ts = cpu_to_be64(rtdm_clock_read() + *ts);
308 }
309 
310 /***
311  * rtskb_queue_init - initialize the queue
312  * @queue
313  */
314 static inline void rtskb_queue_init(struct rtskb_queue *queue)
315 {
316  rtdm_lock_init(&queue->lock);
317  queue->first = NULL;
318  queue->last = NULL;
319 }
320 
321 /***
322  * rtskb_prio_queue_init - initialize the prioritized queue
323  * @prioqueue
324  */
325 static inline void rtskb_prio_queue_init(struct rtskb_prio_queue *prioqueue)
326 {
327  memset(prioqueue, 0, sizeof(struct rtskb_prio_queue));
328  rtdm_lock_init(&prioqueue->lock);
329 }
330 
331 /***
332  * rtskb_queue_empty
333  * @queue
334  */
335 static inline int rtskb_queue_empty(struct rtskb_queue *queue)
336 {
337  return (queue->first == NULL);
338 }
339 
340 /***
341  * rtskb__prio_queue_empty
342  * @queue
343  */
344 static inline int rtskb_prio_queue_empty(struct rtskb_prio_queue *prioqueue)
345 {
346  return (prioqueue->usage == 0);
347 }
348 
349 /***
350  * __rtskb_queue_head - insert a buffer at the queue head (w/o locks)
351  * @queue: queue to use
352  * @skb: buffer to queue
353  */
354 static inline void __rtskb_queue_head(struct rtskb_queue *queue,
355  struct rtskb *skb)
356 {
357  struct rtskb *chain_end = skb->chain_end;
358 
359  chain_end->next = queue->first;
360 
361  if (queue->first == NULL)
362  queue->last = chain_end;
363  queue->first = skb;
364 }
365 
366 /***
367  * rtskb_queue_head - insert a buffer at the queue head (lock protected)
368  * @queue: queue to use
369  * @skb: buffer to queue
370  */
371 static inline void rtskb_queue_head(struct rtskb_queue *queue, struct rtskb *skb)
372 {
373  rtdm_lockctx_t context;
374 
375  rtdm_lock_get_irqsave(&queue->lock, context);
376  __rtskb_queue_head(queue, skb);
377  rtdm_lock_put_irqrestore(&queue->lock, context);
378 }
379 
380 /***
381  * __rtskb_prio_queue_head - insert a buffer at the prioritized queue head
382  * (w/o locks)
383  * @queue: queue to use
384  * @skb: buffer to queue
385  */
386 static inline void __rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
387  struct rtskb *skb)
388 {
389  unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
390 
391  RTNET_ASSERT(prio <= 31, prio = 31;);
392 
393  __rtskb_queue_head(&prioqueue->queue[prio], skb);
394  __set_bit(prio, &prioqueue->usage);
395 }
396 
397 /***
398  * rtskb_prio_queue_head - insert a buffer at the prioritized queue head
399  * (lock protected)
400  * @queue: queue to use
401  * @skb: buffer to queue
402  */
403 static inline void rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
404  struct rtskb *skb)
405 {
406  rtdm_lockctx_t context;
407 
408  rtdm_lock_get_irqsave(&prioqueue->lock, context);
409  __rtskb_prio_queue_head(prioqueue, skb);
410  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
411 }
412 
413 /***
414  * __rtskb_queue_tail - insert a buffer at the queue tail (w/o locks)
415  * @queue: queue to use
416  * @skb: buffer to queue
417  */
418 static inline void __rtskb_queue_tail(struct rtskb_queue *queue,
419  struct rtskb *skb)
420 {
421  struct rtskb *chain_end = skb->chain_end;
422 
423  chain_end->next = NULL;
424 
425  if (queue->first == NULL)
426  queue->first = skb;
427  else
428  queue->last->next = skb;
429  queue->last = chain_end;
430 }
431 
432 /***
433  * rtskb_queue_tail - insert a buffer at the queue tail (lock protected)
434  * @queue: queue to use
435  * @skb: buffer to queue
436  */
437 static inline void rtskb_queue_tail(struct rtskb_queue *queue,
438  struct rtskb *skb)
439 {
440  rtdm_lockctx_t context;
441 
442  rtdm_lock_get_irqsave(&queue->lock, context);
443  __rtskb_queue_tail(queue, skb);
444  rtdm_lock_put_irqrestore(&queue->lock, context);
445 }
446 
447 /***
448  * __rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
449  * (w/o locks)
450  * @prioqueue: queue to use
451  * @skb: buffer to queue
452  */
453 static inline void __rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
454  struct rtskb *skb)
455 {
456  unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
457 
458  RTNET_ASSERT(prio <= 31, prio = 31;);
459 
460  __rtskb_queue_tail(&prioqueue->queue[prio], skb);
461  __set_bit(prio, &prioqueue->usage);
462 }
463 
464 /***
465  * rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
466  * (lock protected)
467  * @prioqueue: queue to use
468  * @skb: buffer to queue
469  */
470 static inline void rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
471  struct rtskb *skb)
472 {
473  rtdm_lockctx_t context;
474 
475  rtdm_lock_get_irqsave(&prioqueue->lock, context);
476  __rtskb_prio_queue_tail(prioqueue, skb);
477  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
478 }
479 
480 /***
481  * __rtskb_dequeue - remove from the head of the queue (w/o locks)
482  * @queue: queue to remove from
483  */
484 static inline struct rtskb *__rtskb_dequeue(struct rtskb_queue *queue)
485 {
486  struct rtskb *result;
487 
488  if ((result = queue->first) != NULL) {
489  queue->first = result->next;
490  result->next = NULL;
491  }
492 
493  return result;
494 }
495 
496 /***
497  * rtskb_dequeue - remove from the head of the queue (lock protected)
498  * @queue: queue to remove from
499  */
500 static inline struct rtskb *rtskb_dequeue(struct rtskb_queue *queue)
501 {
502  rtdm_lockctx_t context;
503  struct rtskb *result;
504 
505  rtdm_lock_get_irqsave(&queue->lock, context);
506  result = __rtskb_dequeue(queue);
507  rtdm_lock_put_irqrestore(&queue->lock, context);
508 
509  return result;
510 }
511 
512 /***
513  * __rtskb_prio_dequeue - remove from the head of the prioritized queue
514  * (w/o locks)
515  * @prioqueue: queue to remove from
516  */
517 static inline struct rtskb *
518  __rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
519 {
520  int prio;
521  struct rtskb *result = NULL;
522  struct rtskb_queue *sub_queue;
523 
524  if (prioqueue->usage) {
525  prio = ffz(~prioqueue->usage);
526  sub_queue = &prioqueue->queue[prio];
527  result = __rtskb_dequeue(sub_queue);
528  if (rtskb_queue_empty(sub_queue))
529  __change_bit(prio, &prioqueue->usage);
530  }
531 
532  return result;
533 }
534 
535 /***
536  * rtskb_prio_dequeue - remove from the head of the prioritized queue
537  * (lock protected)
538  * @prioqueue: queue to remove from
539  */
540 static inline struct rtskb *
541  rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
542 {
543  rtdm_lockctx_t context;
544  struct rtskb *result;
545 
546  rtdm_lock_get_irqsave(&prioqueue->lock, context);
547  result = __rtskb_prio_dequeue(prioqueue);
548  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
549 
550  return result;
551 }
552 
553 /***
554  * __rtskb_dequeue_chain - remove a chain from the head of the queue
555  * (w/o locks)
556  * @queue: queue to remove from
557  */
558 static inline struct rtskb *__rtskb_dequeue_chain(struct rtskb_queue *queue)
559 {
560  struct rtskb *result;
561  struct rtskb *chain_end;
562 
563  if ((result = queue->first) != NULL) {
564  chain_end = result->chain_end;
565  queue->first = chain_end->next;
566  chain_end->next = NULL;
567  }
568 
569  return result;
570 }
571 
572 /***
573  * rtskb_dequeue_chain - remove a chain from the head of the queue
574  * (lock protected)
575  * @queue: queue to remove from
576  */
577 static inline struct rtskb *rtskb_dequeue_chain(struct rtskb_queue *queue)
578 {
579  rtdm_lockctx_t context;
580  struct rtskb *result;
581 
582  rtdm_lock_get_irqsave(&queue->lock, context);
583  result = __rtskb_dequeue_chain(queue);
584  rtdm_lock_put_irqrestore(&queue->lock, context);
585 
586  return result;
587 }
588 
589 /***
590  * rtskb_prio_dequeue_chain - remove a chain from the head of the
591  * prioritized queue
592  * @prioqueue: queue to remove from
593  */
594 static inline
595  struct rtskb *rtskb_prio_dequeue_chain(struct rtskb_prio_queue *prioqueue)
596 {
597  rtdm_lockctx_t context;
598  int prio;
599  struct rtskb *result = NULL;
600  struct rtskb_queue *sub_queue;
601 
602  rtdm_lock_get_irqsave(&prioqueue->lock, context);
603  if (prioqueue->usage) {
604  prio = ffz(~prioqueue->usage);
605  sub_queue = &prioqueue->queue[prio];
606  result = __rtskb_dequeue_chain(sub_queue);
607  if (rtskb_queue_empty(sub_queue))
608  __change_bit(prio, &prioqueue->usage);
609  }
610  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
611 
612  return result;
613 }
614 
615 /***
616  * rtskb_queue_purge - clean the queue
617  * @queue
618  */
619 static inline void rtskb_queue_purge(struct rtskb_queue *queue)
620 {
621  struct rtskb *skb;
622  while ( (skb=rtskb_dequeue(queue))!=NULL )
623  kfree_rtskb(skb);
624 }
625 
626 static inline int rtskb_headlen(const struct rtskb *skb)
627 {
628  return skb->len;
629 }
630 
631 static inline void rtskb_reserve(struct rtskb *skb, unsigned int len)
632 {
633  skb->data+=len;
634  skb->tail+=len;
635 }
636 
637 static inline unsigned char *__rtskb_put(struct rtskb *skb, unsigned int len)
638 {
639  unsigned char *tmp=skb->tail;
640 
641  skb->tail+=len;
642  skb->len+=len;
643  return tmp;
644 }
645 
646 #define rtskb_put(skb, length) \
647 ({ \
648  struct rtskb *__rtskb = (skb); \
649  unsigned int __len = (length); \
650  unsigned char *tmp=__rtskb->tail; \
651 \
652  __rtskb->tail += __len; \
653  __rtskb->len += __len; \
654 \
655  RTNET_ASSERT(__rtskb->tail <= __rtskb->buf_end, \
656  rtskb_over_panic(__rtskb, __len, current_text_addr());); \
657 \
658  tmp; \
659 })
660 
661 static inline unsigned char *__rtskb_push(struct rtskb *skb, unsigned int len)
662 {
663  skb->data-=len;
664  skb->len+=len;
665  return skb->data;
666 }
667 
668 #define rtskb_push(skb, length) \
669 ({ \
670  struct rtskb *__rtskb = (skb); \
671  unsigned int __len = (length); \
672 \
673  __rtskb->data -= __len; \
674  __rtskb->len += __len; \
675 \
676  RTNET_ASSERT(__rtskb->data >= __rtskb->buf_start, \
677  rtskb_under_panic(__rtskb, __len, current_text_addr());); \
678 \
679  __rtskb->data; \
680 })
681 
682 static inline unsigned char *__rtskb_pull(struct rtskb *skb, unsigned int len)
683 {
684  RTNET_ASSERT(len <= skb->len, return NULL;);
685 
686  skb->len -= len;
687 
688  return skb->data += len;
689 }
690 
691 static inline unsigned char *rtskb_pull(struct rtskb *skb, unsigned int len)
692 {
693  if (len > skb->len)
694  return NULL;
695 
696  skb->len -= len;
697 
698  return skb->data += len;
699 }
700 
701 static inline void rtskb_trim(struct rtskb *skb, unsigned int len)
702 {
703  if (skb->len>len) {
704  skb->len = len;
705  skb->tail = skb->data+len;
706  }
707 }
708 
709 static inline struct rtskb *rtskb_padto(struct rtskb *rtskb, unsigned int len)
710 {
711  RTNET_ASSERT(len <= (unsigned int)(rtskb->buf_end + 1 - rtskb->data),
712  return NULL;);
713 
714  memset(rtskb->data + rtskb->len, 0, len - rtskb->len);
715 
716  return rtskb;
717 }
718 
719 static inline dma_addr_t rtskb_data_dma_addr(struct rtskb *rtskb,
720  unsigned int offset)
721 {
722  return rtskb->buf_dma_addr + rtskb->data - rtskb->buf_start + offset;
723 }
724 
725 extern struct rtskb_pool global_pool;
726 
727 extern unsigned int rtskb_pool_init(struct rtskb_pool *pool,
728  unsigned int initial_size,
729  const struct rtskb_pool_lock_ops *lock_ops,
730  void *lock_cookie);
731 
732 extern unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
733  unsigned int initial_size,
734  struct module *module);
735 
736 #define rtskb_module_pool_init(pool, size) \
737  __rtskb_module_pool_init(pool, size, THIS_MODULE)
738 
739 extern int rtskb_pool_release(struct rtskb_pool *pool);
740 
741 extern unsigned int rtskb_pool_extend(struct rtskb_pool *pool,
742  unsigned int add_rtskbs);
743 extern unsigned int rtskb_pool_shrink(struct rtskb_pool *pool,
744  unsigned int rem_rtskbs);
745 extern int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool);
746 extern struct rtskb* rtskb_clone(struct rtskb *rtskb,
747  struct rtskb_pool *pool);
748 
749 extern int rtskb_pools_init(void);
750 extern void rtskb_pools_release(void);
751 
752 extern unsigned int rtskb_copy_and_csum_bits(const struct rtskb *skb,
753  int offset, u8 *to, int len,
754  unsigned int csum);
755 extern void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to);
756 
757 
758 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
759 
760 extern rtdm_lock_t rtcap_lock;
761 extern void (*rtcap_handler)(struct rtskb *skb);
762 
763 static inline void rtcap_mark_incoming(struct rtskb *skb)
764 {
765  skb->cap_start = skb->data;
766  skb->cap_len = skb->len;
767 }
768 
769 static inline void rtcap_report_incoming(struct rtskb *skb)
770 {
771  rtdm_lockctx_t context;
772 
773 
774  rtdm_lock_get_irqsave(&rtcap_lock, context);
775  if (rtcap_handler != NULL)
776  rtcap_handler(skb);
777 
778  rtdm_lock_put_irqrestore(&rtcap_lock, context);
779 }
780 
781 static inline void rtcap_mark_rtmac_enqueue(struct rtskb *skb)
782 {
783  /* rtskb start and length are probably not valid yet */
784  skb->cap_flags |= RTSKB_CAP_RTMAC_STAMP;
785  skb->cap_rtmac_stamp = rtdm_clock_read();
786 }
787 
788 #else /* ifndef CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
789 
790 #define rtcap_mark_incoming(skb)
791 #define rtcap_report_incoming(skb)
792 #define rtcap_mark_rtmac_enqueue(skb)
793 
794 #endif /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
795 
796 
797 #endif /* __KERNEL__ */
798 
799 #endif /* __RTSKB_H_ */
static void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
Release lock and restore preemption state.
Definition: driver.h:603
ipipe_spinlock_t rtdm_lock_t
Lock variable.
Definition: driver.h:528
uint64_t nanosecs_abs_t
RTDM type for representing absolute dates.
Definition: rtdm.h:43
static void rtdm_lock_init(rtdm_lock_t *lock)
Dynamic lock initialisation.
Definition: driver.h:540
nanosecs_abs_t rtdm_clock_read(void)
Get system time.
unsigned long rtdm_lockctx_t
Variable to save the context while holding a lock.
Definition: driver.h:531