30 #include <linux/skbuff.h>
33 #include <rtnet_internal.h>
140 #ifndef CHECKSUM_PARTIAL
141 #define CHECKSUM_PARTIAL CHECKSUM_HW
144 #define RTSKB_CAP_SHARED 1
145 #define RTSKB_CAP_RTMAC_STAMP 2
147 #define RTSKB_UNMAPPED 0
158 struct rtskb *chain_end;
161 struct rtskb_pool *pool;
163 unsigned int priority;
166 struct rtnet_device *rtdev;
180 struct icmphdr *icmph;
181 struct iphdr *ipihdr;
196 struct ethhdr *ethernet;
200 unsigned short protocol;
201 unsigned char pkt_type;
203 unsigned char ip_summed;
211 dma_addr_t buf_dma_addr;
213 unsigned char *buf_start;
215 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
216 unsigned char *buf_end;
219 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
221 struct rtskb *cap_comp_skb;
222 struct rtskb *cap_next;
223 unsigned char *cap_start;
224 unsigned int cap_len;
228 struct list_head entry;
237 struct rtskb_pool_lock_ops {
238 int (*trylock)(
void *cookie);
239 void (*unlock)(
void *cookie);
243 struct rtskb_queue queue;
244 const struct rtskb_pool_lock_ops *lock_ops;
249 #define QUEUE_MAX_PRIO 0
250 #define QUEUE_MIN_PRIO 31
252 struct rtskb_prio_queue {
255 struct rtskb_queue queue[QUEUE_MIN_PRIO+1];
258 #define RTSKB_PRIO_MASK 0x0000FFFF
259 #define RTSKB_CHANNEL_MASK 0xFFFF0000
260 #define RTSKB_CHANNEL_SHIFT 16
262 #define RTSKB_DEF_RT_CHANNEL SOCK_DEF_RT_CHANNEL
263 #define RTSKB_DEF_NRT_CHANNEL SOCK_DEF_NRT_CHANNEL
264 #define RTSKB_USER_CHANNEL SOCK_USER_CHANNEL
267 #define RTSKB_PRIO_VALUE SOCK_XMIT_PARAMS
271 #define DEFAULT_GLOBAL_RTSKBS 0
272 #define DEFAULT_DEVICE_RTSKBS 16
273 #define DEFAULT_SOCKET_RTSKBS 16
275 #define ALIGN_RTSKB_STRUCT_LEN SKB_DATA_ALIGN(sizeof(struct rtskb))
276 #define RTSKB_SIZE 1544
278 extern unsigned int rtskb_pools;
279 extern unsigned int rtskb_pools_max;
280 extern unsigned int rtskb_amount;
281 extern unsigned int rtskb_amount_max;
283 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
284 extern void rtskb_over_panic(
struct rtskb *skb,
int len,
void *here);
285 extern void rtskb_under_panic(
struct rtskb *skb,
int len,
void *here);
288 extern struct rtskb *rtskb_pool_dequeue(
struct rtskb_pool *pool);
290 extern void rtskb_pool_queue_tail(
struct rtskb_pool *pool,
struct rtskb *skb);
292 extern struct rtskb *alloc_rtskb(
unsigned int size,
struct rtskb_pool *pool);
294 extern void kfree_rtskb(
struct rtskb *skb);
295 #define dev_kfree_rtskb(a) kfree_rtskb(a)
298 #define rtskb_checksum_none_assert(skb) (skb->ip_summed = CHECKSUM_NONE)
300 static inline void rtskb_tx_timestamp(
struct rtskb *skb)
314 static inline void rtskb_queue_init(
struct rtskb_queue *queue)
325 static inline void rtskb_prio_queue_init(
struct rtskb_prio_queue *prioqueue)
327 memset(prioqueue, 0,
sizeof(
struct rtskb_prio_queue));
335 static inline int rtskb_queue_empty(
struct rtskb_queue *queue)
337 return (queue->first == NULL);
344 static inline int rtskb_prio_queue_empty(
struct rtskb_prio_queue *prioqueue)
346 return (prioqueue->usage == 0);
354 static inline void __rtskb_queue_head(
struct rtskb_queue *queue,
357 struct rtskb *chain_end = skb->chain_end;
359 chain_end->next = queue->first;
361 if (queue->first == NULL)
362 queue->last = chain_end;
371 static inline void rtskb_queue_head(
struct rtskb_queue *queue,
struct rtskb *skb)
375 rtdm_lock_get_irqsave(&queue->lock, context);
376 __rtskb_queue_head(queue, skb);
386 static inline void __rtskb_prio_queue_head(
struct rtskb_prio_queue *prioqueue,
389 unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
391 RTNET_ASSERT(prio <= 31, prio = 31;);
393 __rtskb_queue_head(&prioqueue->queue[prio], skb);
394 __set_bit(prio, &prioqueue->usage);
403 static inline void rtskb_prio_queue_head(
struct rtskb_prio_queue *prioqueue,
408 rtdm_lock_get_irqsave(&prioqueue->lock, context);
409 __rtskb_prio_queue_head(prioqueue, skb);
418 static inline void __rtskb_queue_tail(
struct rtskb_queue *queue,
421 struct rtskb *chain_end = skb->chain_end;
423 chain_end->next = NULL;
425 if (queue->first == NULL)
428 queue->last->next = skb;
429 queue->last = chain_end;
437 static inline void rtskb_queue_tail(
struct rtskb_queue *queue,
442 rtdm_lock_get_irqsave(&queue->lock, context);
443 __rtskb_queue_tail(queue, skb);
453 static inline void __rtskb_prio_queue_tail(
struct rtskb_prio_queue *prioqueue,
456 unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
458 RTNET_ASSERT(prio <= 31, prio = 31;);
460 __rtskb_queue_tail(&prioqueue->queue[prio], skb);
461 __set_bit(prio, &prioqueue->usage);
470 static inline void rtskb_prio_queue_tail(
struct rtskb_prio_queue *prioqueue,
475 rtdm_lock_get_irqsave(&prioqueue->lock, context);
476 __rtskb_prio_queue_tail(prioqueue, skb);
484 static inline struct rtskb *__rtskb_dequeue(
struct rtskb_queue *queue)
486 struct rtskb *result;
488 if ((result = queue->first) != NULL) {
489 queue->first = result->next;
500 static inline struct rtskb *rtskb_dequeue(
struct rtskb_queue *queue)
503 struct rtskb *result;
505 rtdm_lock_get_irqsave(&queue->lock, context);
506 result = __rtskb_dequeue(queue);
517 static inline struct rtskb *
518 __rtskb_prio_dequeue(
struct rtskb_prio_queue *prioqueue)
521 struct rtskb *result = NULL;
522 struct rtskb_queue *sub_queue;
524 if (prioqueue->usage) {
525 prio = ffz(~prioqueue->usage);
526 sub_queue = &prioqueue->queue[prio];
527 result = __rtskb_dequeue(sub_queue);
528 if (rtskb_queue_empty(sub_queue))
529 __change_bit(prio, &prioqueue->usage);
540 static inline struct rtskb *
541 rtskb_prio_dequeue(
struct rtskb_prio_queue *prioqueue)
544 struct rtskb *result;
546 rtdm_lock_get_irqsave(&prioqueue->lock, context);
547 result = __rtskb_prio_dequeue(prioqueue);
558 static inline struct rtskb *__rtskb_dequeue_chain(
struct rtskb_queue *queue)
560 struct rtskb *result;
561 struct rtskb *chain_end;
563 if ((result = queue->first) != NULL) {
564 chain_end = result->chain_end;
565 queue->first = chain_end->next;
566 chain_end->next = NULL;
577 static inline struct rtskb *rtskb_dequeue_chain(
struct rtskb_queue *queue)
580 struct rtskb *result;
582 rtdm_lock_get_irqsave(&queue->lock, context);
583 result = __rtskb_dequeue_chain(queue);
595 struct rtskb *rtskb_prio_dequeue_chain(
struct rtskb_prio_queue *prioqueue)
599 struct rtskb *result = NULL;
600 struct rtskb_queue *sub_queue;
602 rtdm_lock_get_irqsave(&prioqueue->lock, context);
603 if (prioqueue->usage) {
604 prio = ffz(~prioqueue->usage);
605 sub_queue = &prioqueue->queue[prio];
606 result = __rtskb_dequeue_chain(sub_queue);
607 if (rtskb_queue_empty(sub_queue))
608 __change_bit(prio, &prioqueue->usage);
619 static inline void rtskb_queue_purge(
struct rtskb_queue *queue)
622 while ( (skb=rtskb_dequeue(queue))!=NULL )
626 static inline int rtskb_headlen(
const struct rtskb *skb)
631 static inline void rtskb_reserve(
struct rtskb *skb,
unsigned int len)
637 static inline unsigned char *__rtskb_put(
struct rtskb *skb,
unsigned int len)
639 unsigned char *tmp=skb->tail;
646 #define rtskb_put(skb, length) \
648 struct rtskb *__rtskb = (skb); \
649 unsigned int __len = (length); \
650 unsigned char *tmp=__rtskb->tail; \
652 __rtskb->tail += __len; \
653 __rtskb->len += __len; \
655 RTNET_ASSERT(__rtskb->tail <= __rtskb->buf_end, \
656 rtskb_over_panic(__rtskb, __len, current_text_addr());); \
661 static inline unsigned char *__rtskb_push(
struct rtskb *skb,
unsigned int len)
668 #define rtskb_push(skb, length) \
670 struct rtskb *__rtskb = (skb); \
671 unsigned int __len = (length); \
673 __rtskb->data -= __len; \
674 __rtskb->len += __len; \
676 RTNET_ASSERT(__rtskb->data >= __rtskb->buf_start, \
677 rtskb_under_panic(__rtskb, __len, current_text_addr());); \
682 static inline unsigned char *__rtskb_pull(
struct rtskb *skb,
unsigned int len)
684 RTNET_ASSERT(len <= skb->len,
return NULL;);
688 return skb->data += len;
691 static inline unsigned char *rtskb_pull(
struct rtskb *skb,
unsigned int len)
698 return skb->data += len;
701 static inline void rtskb_trim(
struct rtskb *skb,
unsigned int len)
705 skb->tail = skb->data+len;
709 static inline struct rtskb *rtskb_padto(
struct rtskb *rtskb,
unsigned int len)
711 RTNET_ASSERT(len <= (
unsigned int)(rtskb->buf_end + 1 - rtskb->data),
714 memset(rtskb->data + rtskb->len, 0, len - rtskb->len);
719 static inline dma_addr_t rtskb_data_dma_addr(
struct rtskb *rtskb,
722 return rtskb->buf_dma_addr + rtskb->data - rtskb->buf_start + offset;
725 extern struct rtskb_pool global_pool;
727 extern unsigned int rtskb_pool_init(
struct rtskb_pool *pool,
728 unsigned int initial_size,
729 const struct rtskb_pool_lock_ops *lock_ops,
732 extern unsigned int __rtskb_module_pool_init(
struct rtskb_pool *pool,
733 unsigned int initial_size,
734 struct module *module);
736 #define rtskb_module_pool_init(pool, size) \
737 __rtskb_module_pool_init(pool, size, THIS_MODULE)
739 extern int rtskb_pool_release(
struct rtskb_pool *pool);
741 extern unsigned int rtskb_pool_extend(
struct rtskb_pool *pool,
742 unsigned int add_rtskbs);
743 extern unsigned int rtskb_pool_shrink(
struct rtskb_pool *pool,
744 unsigned int rem_rtskbs);
745 extern int rtskb_acquire(
struct rtskb *rtskb,
struct rtskb_pool *comp_pool);
746 extern struct rtskb* rtskb_clone(
struct rtskb *rtskb,
747 struct rtskb_pool *pool);
749 extern int rtskb_pools_init(
void);
750 extern void rtskb_pools_release(
void);
752 extern unsigned int rtskb_copy_and_csum_bits(
const struct rtskb *skb,
753 int offset, u8 *to,
int len,
755 extern void rtskb_copy_and_csum_dev(
const struct rtskb *skb, u8 *to);
758 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
761 extern void (*rtcap_handler)(
struct rtskb *skb);
763 static inline void rtcap_mark_incoming(
struct rtskb *skb)
765 skb->cap_start = skb->data;
766 skb->cap_len = skb->len;
769 static inline void rtcap_report_incoming(
struct rtskb *skb)
774 rtdm_lock_get_irqsave(&rtcap_lock, context);
775 if (rtcap_handler != NULL)
781 static inline void rtcap_mark_rtmac_enqueue(
struct rtskb *skb)
784 skb->cap_flags |= RTSKB_CAP_RTMAC_STAMP;
790 #define rtcap_mark_incoming(skb)
791 #define rtcap_report_incoming(skb)
792 #define rtcap_mark_rtmac_enqueue(skb)
static void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
Release lock and restore preemption state.
Definition: driver.h:603
ipipe_spinlock_t rtdm_lock_t
Lock variable.
Definition: driver.h:528
uint64_t nanosecs_abs_t
RTDM type for representing absolute dates.
Definition: rtdm.h:43
static void rtdm_lock_init(rtdm_lock_t *lock)
Dynamic lock initialisation.
Definition: driver.h:540
nanosecs_abs_t rtdm_clock_read(void)
Get system time.
unsigned long rtdm_lockctx_t
Variable to save the context while holding a lock.
Definition: driver.h:531