xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision 9f44a47fd07924afc035991af15d84e6585dea4f)
1 /*-
2  * Copyright (c) 2020-2023 The FreeBSD Foundation
3  * Copyright (c) 2021-2023 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/kernel.h>
42 #include <linux/page.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/netdev_features.h>
45 #include <linux/list.h>
46 #include <linux/gfp.h>
47 #include <linux/compiler.h>
48 #include <linux/spinlock.h>
49 #include <linux/ktime.h>
50 
51 /* #define	SKB_DEBUG */
52 #ifdef SKB_DEBUG
53 #define	DSKB_TODO	0x01
54 #define	DSKB_IMPROVE	0x02
55 #define	DSKB_TRACE	0x10
56 #define	DSKB_TRACEX	0x20
57 extern int linuxkpi_debug_skb;
58 
59 #define	SKB_TODO()							\
60     if (linuxkpi_debug_skb & DSKB_TODO)					\
61 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
62 #define	SKB_IMPROVE(...)						\
63     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
64 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
65 #define	SKB_TRACE(_s)							\
66     if (linuxkpi_debug_skb & DSKB_TRACE)				\
67 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
68 #define	SKB_TRACE2(_s, _p)						\
69     if (linuxkpi_debug_skb & DSKB_TRACE)				\
70 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
71 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
72    if (linuxkpi_debug_skb & DSKB_TRACE)					\
73 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
74 	    __VA_ARGS__)
75 #else
76 #define	SKB_TODO()		do { } while(0)
77 #define	SKB_IMPROVE(...)	do { } while(0)
78 #define	SKB_TRACE(_s)		do { } while(0)
79 #define	SKB_TRACE2(_s, _p)	do { } while(0)
80 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
81 #endif
82 
83 enum sk_buff_pkt_type {
84 	PACKET_BROADCAST,
85 	PACKET_MULTICAST,
86 	PACKET_OTHERHOST,
87 };
88 
89 struct skb_shared_hwtstamps {
90 	ktime_t			hwtstamp;
91 };
92 
93 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
94 
95 struct sk_buff_head {
96 		/* XXX TODO */
97 	union {
98 		struct {
99 			struct sk_buff		*next;
100 			struct sk_buff		*prev;
101 		};
102 		struct sk_buff_head_l {
103 			struct sk_buff		*next;
104 			struct sk_buff		*prev;
105 		} list;
106 	};
107 	size_t			qlen;
108 	spinlock_t		lock;
109 };
110 
111 enum sk_checksum_flags {
112 	CHECKSUM_NONE			= 0x00,
113 	CHECKSUM_UNNECESSARY		= 0x01,
114 	CHECKSUM_PARTIAL		= 0x02,
115 	CHECKSUM_COMPLETE		= 0x04,
116 };
117 
118 struct skb_frag {
119 		/* XXX TODO */
120 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
121 	off_t			offset;
122 	size_t			size;
123 };
124 typedef	struct skb_frag	skb_frag_t;
125 
126 enum skb_shared_info_gso_type {
127 	SKB_GSO_TCPV4,
128 	SKB_GSO_TCPV6,
129 };
130 
131 struct skb_shared_info {
132 	enum skb_shared_info_gso_type	gso_type;
133 	uint16_t			gso_size;
134 	uint16_t			nr_frags;
135 	struct sk_buff			*frag_list;
136 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
137 };
138 
139 struct sk_buff {
140 	/* XXX TODO */
141 	union {
142 		/* struct sk_buff_head */
143 		struct {
144 			struct sk_buff		*next;
145 			struct sk_buff		*prev;
146 		};
147 		struct list_head	list;
148 	};
149 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
150 	uint32_t		len;		/* ? */
151 	uint32_t		data_len;	/* ? If we have frags? */
152 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
153 	uint16_t		mac_len;	/* Link-layer header length. */
154 	__sum16			csum;
155 	uint16_t		l3hdroff;	/* network header offset from *head */
156 	uint16_t		l4hdroff;	/* transport header offset from *head */
157 	uint32_t		priority;
158 	uint16_t		qmap;		/* queue mapping */
159 	uint16_t		_flags;		/* Internal flags. */
160 #define	_SKB_FLAGS_SKBEXTFRAG	0x0001
161 	enum sk_buff_pkt_type	pkt_type;
162 	uint16_t		mac_header;	/* offset of mac_header */
163 
164 	/* "Scratch" area for layers to store metadata. */
165 	/* ??? I see sizeof() operations so probably an array. */
166 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
167 
168 	struct net_device	*dev;
169 	void			*sk;		/* XXX net/sock.h? */
170 
171 	int		csum_offset, csum_start, ip_summed, protocol;
172 
173 	uint8_t			*head;			/* Head of buffer. */
174 	uint8_t			*data;			/* Head of data. */
175 	uint8_t			*tail;			/* End of data. */
176 	uint8_t			*end;			/* End of buffer. */
177 
178 	struct skb_shared_info	*shinfo;
179 
180 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
181 	void			*m;
182 	void(*m_free_func)(void *);
183 
184 	/* Force padding to CACHE_LINE_SIZE. */
185 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
186 };
187 
188 /* -------------------------------------------------------------------------- */
189 
190 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
191 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
192 struct sk_buff *linuxkpi_build_skb(void *, size_t);
193 void linuxkpi_kfree_skb(struct sk_buff *);
194 
195 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
196 
197 /* -------------------------------------------------------------------------- */
198 
199 static inline struct sk_buff *
200 alloc_skb(size_t size, gfp_t gfp)
201 {
202 	struct sk_buff *skb;
203 
204 	skb = linuxkpi_alloc_skb(size, gfp);
205 	SKB_TRACE(skb);
206 	return (skb);
207 }
208 
209 static inline struct sk_buff *
210 __dev_alloc_skb(size_t len, gfp_t gfp)
211 {
212 	struct sk_buff *skb;
213 
214 	skb = linuxkpi_dev_alloc_skb(len, gfp);
215 	SKB_IMPROVE();
216 	SKB_TRACE(skb);
217 	return (skb);
218 }
219 
220 static inline struct sk_buff *
221 dev_alloc_skb(size_t len)
222 {
223 	struct sk_buff *skb;
224 
225 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
226 	SKB_IMPROVE();
227 	SKB_TRACE(skb);
228 	return (skb);
229 }
230 
231 static inline void
232 kfree_skb(struct sk_buff *skb)
233 {
234 	SKB_TRACE(skb);
235 	linuxkpi_kfree_skb(skb);
236 }
237 
238 static inline void
239 dev_kfree_skb(struct sk_buff *skb)
240 {
241 	SKB_TRACE(skb);
242 	kfree_skb(skb);
243 }
244 
245 static inline void
246 dev_kfree_skb_any(struct sk_buff *skb)
247 {
248 	SKB_TRACE(skb);
249 	dev_kfree_skb(skb);
250 }
251 
252 static inline void
253 dev_kfree_skb_irq(struct sk_buff *skb)
254 {
255 	SKB_TRACE(skb);
256 	SKB_IMPROVE("Do we have to defer this?");
257 	dev_kfree_skb(skb);
258 }
259 
260 static inline struct sk_buff *
261 build_skb(void *data, unsigned int fragsz)
262 {
263 	struct sk_buff *skb;
264 
265 	skb = linuxkpi_build_skb(data, fragsz);
266 	SKB_TRACE(skb);
267 	return (skb);
268 }
269 
270 /* -------------------------------------------------------------------------- */
271 
272 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
273 #define	skb_list_walk_safe(_q, skb, tmp)				\
274 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
275 
276 /* Add headroom; cannot do once there is data in there. */
277 static inline void
278 skb_reserve(struct sk_buff *skb, size_t len)
279 {
280 	SKB_TRACE(skb);
281 #if 0
282 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
283 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
284 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
285 #else
286 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
287 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
288 	    skb->head, skb->data, skb->tail, skb->len));
289 #endif
290 	skb->data += len;
291 	skb->tail += len;
292 }
293 
294 /*
295  * Remove headroom; return new data pointer; basically make space at the
296  * front to copy data in (manually).
297  */
298 static inline void *
299 __skb_push(struct sk_buff *skb, size_t len)
300 {
301 	SKB_TRACE(skb);
302 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
303 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
304 	skb->len  += len;
305 	skb->data -= len;
306 	return (skb->data);
307 }
308 
309 static inline void *
310 skb_push(struct sk_buff *skb, size_t len)
311 {
312 
313 	SKB_TRACE(skb);
314 	return (__skb_push(skb, len));
315 }
316 
317 /*
318  * Length of the data on the skb (without any frags)???
319  */
320 static inline size_t
321 skb_headlen(struct sk_buff *skb)
322 {
323 
324 	SKB_TRACE(skb);
325 	return (skb->len - skb->data_len);
326 }
327 
328 
329 /* Return the end of data (tail pointer). */
330 static inline uint8_t *
331 skb_tail_pointer(struct sk_buff *skb)
332 {
333 
334 	SKB_TRACE(skb);
335 	return (skb->tail);
336 }
337 
338 /* Return number of bytes available at end of buffer. */
339 static inline unsigned int
340 skb_tailroom(struct sk_buff *skb)
341 {
342 
343 	SKB_TRACE(skb);
344 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
345 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
346 	return (skb->end - skb->tail);
347 }
348 
349 /* Return numer of bytes available at the beginning of buffer. */
350 static inline unsigned int
351 skb_headroom(struct sk_buff *skb)
352 {
353 	SKB_TRACE(skb);
354 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
355 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
356 	return (skb->data - skb->head);
357 }
358 
359 
360 /*
361  * Remove tailroom; return the old tail pointer; basically make space at
362  * the end to copy data in (manually).  See also skb_put_data() below.
363  */
364 static inline void *
365 __skb_put(struct sk_buff *skb, size_t len)
366 {
367 	void *s;
368 
369 	SKB_TRACE(skb);
370 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
371 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
372 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
373 
374 	s = skb_tail_pointer(skb);
375 	if (len == 0)
376 		return (s);
377 	skb->tail += len;
378 	skb->len += len;
379 #ifdef SKB_DEBUG
380 	if (linuxkpi_debug_skb & DSKB_TRACEX)
381 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
382 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
383 	    s, len);
384 #endif
385 	return (s);
386 }
387 
388 static inline void *
389 skb_put(struct sk_buff *skb, size_t len)
390 {
391 
392 	SKB_TRACE(skb);
393 	return (__skb_put(skb, len));
394 }
395 
396 /* skb_put() + copying data in. */
397 static inline void *
398 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
399 {
400 	void *s;
401 
402 	SKB_TRACE2(skb, buf);
403 	s = skb_put(skb, len);
404 	if (len == 0)
405 		return (s);
406 	memcpy(s, buf, len);
407 	return (s);
408 }
409 
410 /* skb_put() + filling with zeros. */
411 static inline void *
412 skb_put_zero(struct sk_buff *skb, size_t len)
413 {
414 	void *s;
415 
416 	SKB_TRACE(skb);
417 	s = skb_put(skb, len);
418 	memset(s, '\0', len);
419 	return (s);
420 }
421 
422 /*
423  * Remove len bytes from beginning of data.
424  *
425  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
426  * we return the advanced data pointer so we don't have to keep a temp, correct?
427  */
428 static inline void *
429 skb_pull(struct sk_buff *skb, size_t len)
430 {
431 
432 	SKB_TRACE(skb);
433 #if 0	/* Apparently this doesn't barf... */
434 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
435 	    __func__, skb, skb->len, len, skb->data));
436 #endif
437 	if (skb->len < len)
438 		return (NULL);
439 	skb->len -= len;
440 	skb->data += len;
441 	return (skb->data);
442 }
443 
444 /* Reduce skb data to given length or do nothing if smaller already. */
445 static inline void
446 __skb_trim(struct sk_buff *skb, unsigned int len)
447 {
448 
449 	SKB_TRACE(skb);
450 	if (skb->len < len)
451 		return;
452 
453 	skb->len = len;
454 	skb->tail = skb->data + skb->len;
455 }
456 
457 static inline void
458 skb_trim(struct sk_buff *skb, unsigned int len)
459 {
460 
461 	return (__skb_trim(skb, len));
462 }
463 
464 static inline struct skb_shared_info *
465 skb_shinfo(struct sk_buff *skb)
466 {
467 
468 	SKB_TRACE(skb);
469 	return (skb->shinfo);
470 }
471 
472 static inline void
473 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
474     off_t offset, size_t size, unsigned int truesize)
475 {
476 	struct skb_shared_info *shinfo;
477 
478 	SKB_TRACE(skb);
479 #ifdef SKB_DEBUG
480 	if (linuxkpi_debug_skb & DSKB_TRACEX)
481 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
482 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
483 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
484 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
485 	    size, truesize);
486 #endif
487 
488 	shinfo = skb_shinfo(skb);
489 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
490 	    "fragno %d too big\n", __func__, skb, fragno));
491 	shinfo->frags[fragno].page = page;
492 	shinfo->frags[fragno].offset = offset;
493 	shinfo->frags[fragno].size = size;
494 	shinfo->nr_frags = fragno + 1;
495         skb->len += size;
496 	skb->data_len += size;
497         skb->truesize += truesize;
498 
499 	/* XXX TODO EXTEND truesize? */
500 }
501 
502 /* -------------------------------------------------------------------------- */
503 
504 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
505 #define	skb_queue_walk(_q, skb)						\
506 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
507 	    (skb) = (skb)->next)
508 
509 #define	skb_queue_walk_safe(_q, skb, tmp)				\
510 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
511 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
512 
513 static inline bool
514 skb_queue_empty(struct sk_buff_head *q)
515 {
516 
517 	SKB_TRACE(q);
518 	return (q->qlen == 0);
519 }
520 
521 static inline void
522 __skb_queue_head_init(struct sk_buff_head *q)
523 {
524 	SKB_TRACE(q);
525 	q->prev = q->next = (struct sk_buff *)q;
526 	q->qlen = 0;
527 }
528 
529 static inline void
530 skb_queue_head_init(struct sk_buff_head *q)
531 {
532 	SKB_TRACE(q);
533 	return (__skb_queue_head_init(q));
534 }
535 
536 static inline void
537 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
538     struct sk_buff_head *q)
539 {
540 
541 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
542 	new->prev = prev;
543 	new->next = next;
544 	((struct sk_buff_head_l *)next)->prev = new;
545 	((struct sk_buff_head_l *)prev)->next = new;
546 	q->qlen++;
547 }
548 
549 static inline void
550 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
551     struct sk_buff *new)
552 {
553 
554 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
555 	__skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
556 }
557 
558 static inline void
559 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
560     struct sk_buff *new)
561 {
562 
563 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
564 	__skb_insert(new, skb->prev, skb, q);
565 }
566 
567 static inline void
568 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
569 {
570 
571 	SKB_TRACE2(q, new);
572 	__skb_queue_after(q, (struct sk_buff *)q, new);
573 }
574 
575 static inline void
576 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
577 {
578 	SKB_TRACE2(q, skb);
579 	return (__skb_queue_tail(q, new));
580 }
581 
582 static inline struct sk_buff *
583 skb_peek(struct sk_buff_head *q)
584 {
585 	struct sk_buff *skb;
586 
587 	skb = q->next;
588 	SKB_TRACE2(q, skb);
589 	if (skb == (struct sk_buff *)q)
590 		return (NULL);
591 	return (skb);
592 }
593 
594 static inline struct sk_buff *
595 skb_peek_tail(struct sk_buff_head *q)
596 {
597 	struct sk_buff *skb;
598 
599 	skb = q->prev;
600 	SKB_TRACE2(q, skb);
601 	if (skb == (struct sk_buff *)q)
602 		return (NULL);
603 	return (skb);
604 }
605 
606 static inline void
607 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
608 {
609 	SKB_TRACE2(skb, head);
610 	struct sk_buff *p, *n;;
611 
612 	head->qlen--;
613 	p = skb->prev;
614 	n = skb->next;
615 	p->next = n;
616 	n->prev = p;
617 	skb->prev = skb->next = NULL;
618 }
619 
620 static inline void
621 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
622 {
623 	SKB_TRACE2(skb, head);
624 	return (__skb_unlink(skb, head));
625 }
626 
627 static inline struct sk_buff *
628 __skb_dequeue(struct sk_buff_head *q)
629 {
630 	struct sk_buff *skb;
631 
632 	SKB_TRACE(q);
633 	skb = q->next;
634 	if (skb == (struct sk_buff *)q)
635 		return (NULL);
636 	if (skb != NULL)
637 		__skb_unlink(skb, q);
638 	SKB_TRACE(skb);
639 	return (skb);
640 }
641 
642 static inline struct sk_buff *
643 skb_dequeue(struct sk_buff_head *q)
644 {
645 	SKB_TRACE(q);
646 	return (__skb_dequeue(q));
647 }
648 
649 static inline struct sk_buff *
650 skb_dequeue_tail(struct sk_buff_head *q)
651 {
652 	struct sk_buff *skb;
653 
654 	skb = skb_peek_tail(q);
655 	if (skb != NULL)
656 		__skb_unlink(skb, q);
657 
658 	SKB_TRACE2(q, skb);
659 	return (skb);
660 }
661 
662 static inline void
663 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
664 {
665 
666 	SKB_TRACE2(q, skb);
667 	__skb_queue_after(q, (struct sk_buff *)q, skb);
668 }
669 
670 static inline void
671 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
672 {
673 
674 	SKB_TRACE2(q, skb);
675 	__skb_queue_after(q, (struct sk_buff *)q, skb);
676 }
677 
678 static inline uint32_t
679 skb_queue_len(struct sk_buff_head *head)
680 {
681 
682 	SKB_TRACE(head);
683 	return (head->qlen);
684 }
685 
686 static inline uint32_t
687 skb_queue_len_lockless(const struct sk_buff_head *head)
688 {
689 
690 	SKB_TRACE(head);
691 	return (READ_ONCE(head->qlen));
692 }
693 
694 static inline void
695 __skb_queue_purge(struct sk_buff_head *q)
696 {
697 	struct sk_buff *skb;
698 
699 	SKB_TRACE(q);
700         while ((skb = __skb_dequeue(q)) != NULL)
701 		kfree_skb(skb);
702 }
703 
704 static inline void
705 skb_queue_purge(struct sk_buff_head *q)
706 {
707 	SKB_TRACE(q);
708 	return (__skb_queue_purge(q));
709 }
710 
711 static inline struct sk_buff *
712 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
713 {
714 
715 	SKB_TRACE2(q, skb);
716 	/* XXX what is the q argument good for? */
717 	return (skb->prev);
718 }
719 
720 /* -------------------------------------------------------------------------- */
721 
722 static inline struct sk_buff *
723 skb_copy(struct sk_buff *skb, gfp_t gfp)
724 {
725 	struct sk_buff *new;
726 
727 	new = linuxkpi_skb_copy(skb, gfp);
728 	SKB_TRACE2(skb, new);
729 	return (new);
730 }
731 
732 static inline void
733 consume_skb(struct sk_buff *skb)
734 {
735 	SKB_TRACE(skb);
736 	SKB_TODO();
737 }
738 
739 static inline uint16_t
740 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
741 {
742 	SKB_TRACE(skb);
743 	SKB_TODO();
744 	return (0xffff);
745 }
746 
747 static inline int
748 skb_checksum_start_offset(struct sk_buff *skb)
749 {
750 	SKB_TRACE(skb);
751 	SKB_TODO();
752 	return (-1);
753 }
754 
755 static inline dma_addr_t
756 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
757     size_t fragsz, enum dma_data_direction dir)
758 {
759 	SKB_TRACE2(frag, dev);
760 	SKB_TODO();
761 	return (-1);
762 }
763 
764 static inline size_t
765 skb_frag_size(const skb_frag_t *frag)
766 {
767 	SKB_TRACE(frag);
768 	SKB_TODO();
769 	return (-1);
770 }
771 
772 #define	skb_walk_frags(_skb, _frag)					\
773 	for ((_frag) = (_skb); false; (_frag)++)
774 
775 static inline void
776 skb_checksum_help(struct sk_buff *skb)
777 {
778 	SKB_TRACE(skb);
779 	SKB_TODO();
780 }
781 
782 static inline bool
783 skb_ensure_writable(struct sk_buff *skb, size_t off)
784 {
785 	SKB_TRACE(skb);
786 	SKB_TODO();
787 	return (false);
788 }
789 
790 static inline void *
791 skb_frag_address(const skb_frag_t *frag)
792 {
793 	SKB_TRACE(frag);
794 	SKB_TODO();
795 	return (NULL);
796 }
797 
798 static inline void
799 skb_free_frag(void *frag)
800 {
801 
802 	page_frag_free(frag);
803 }
804 
805 static inline struct sk_buff *
806 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
807 {
808 	SKB_TRACE(skb);
809 	SKB_TODO();
810 	return (NULL);
811 }
812 
813 static inline bool
814 skb_is_gso(struct sk_buff *skb)
815 {
816 	SKB_TRACE(skb);
817 	SKB_IMPROVE("Really a TODO but get it away from logging");
818 	return (false);
819 }
820 
821 static inline void
822 skb_mark_not_on_list(struct sk_buff *skb)
823 {
824 	SKB_TRACE(skb);
825 	SKB_TODO();
826 }
827 
828 static inline void
829 ___skb_queue_splice_init(const struct sk_buff_head *from,
830     struct sk_buff *p, struct sk_buff *n)
831 {
832 	struct sk_buff *b, *e;
833 
834 	b = from->next;
835 	e = from->prev;
836 
837 	b->prev = p;
838 	((struct sk_buff_head_l *)p)->next = b;
839 	e->next = n;
840 	((struct sk_buff_head_l *)n)->prev = e;
841 }
842 
843 static inline void
844 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
845 {
846 
847 	SKB_TRACE2(from, to);
848 
849 	if (skb_queue_empty(from))
850 		return;
851 
852 	___skb_queue_splice_init(from, (struct sk_buff *)to, to->next);
853 	to->qlen += from->qlen;
854 	__skb_queue_head_init(from);
855 }
856 
857 static inline void
858 skb_reset_transport_header(struct sk_buff *skb)
859 {
860 
861 	SKB_TRACE(skb);
862 	skb->l4hdroff = skb->data - skb->head;
863 }
864 
865 static inline uint8_t *
866 skb_transport_header(struct sk_buff *skb)
867 {
868 
869 	SKB_TRACE(skb);
870         return (skb->head + skb->l4hdroff);
871 }
872 
873 static inline uint8_t *
874 skb_network_header(struct sk_buff *skb)
875 {
876 
877 	SKB_TRACE(skb);
878         return (skb->head + skb->l3hdroff);
879 }
880 
881 static inline bool
882 skb_is_nonlinear(struct sk_buff *skb)
883 {
884 	SKB_TRACE(skb);
885 	return ((skb->data_len > 0) ? true : false);
886 }
887 
888 static inline int
889 __skb_linearize(struct sk_buff *skb)
890 {
891 	SKB_TRACE(skb);
892 	SKB_TODO();
893 	return (ENXIO);
894 }
895 
896 static inline int
897 skb_linearize(struct sk_buff *skb)
898 {
899 
900 	return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
901 }
902 
903 static inline int
904 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
905 {
906 	SKB_TRACE(skb);
907 	SKB_TODO();
908 	return (-ENXIO);
909 }
910 
911 /* Not really seen this one but need it as symmetric accessor function. */
912 static inline void
913 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
914 {
915 
916 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
917 	skb->qmap = qmap;
918 }
919 
920 static inline uint16_t
921 skb_get_queue_mapping(struct sk_buff *skb)
922 {
923 
924 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
925 	return (skb->qmap);
926 }
927 
928 static inline bool
929 skb_header_cloned(struct sk_buff *skb)
930 {
931 	SKB_TRACE(skb);
932 	SKB_TODO();
933 	return (false);
934 }
935 
936 static inline uint8_t *
937 skb_mac_header(const struct sk_buff *skb)
938 {
939 	SKB_TRACE(skb);
940 	/* Make sure the mac_header was set as otherwise we return garbage. */
941 	WARN_ON(skb->mac_header == 0);
942 	return (skb->head + skb->mac_header);
943 }
944 static inline void
945 skb_reset_mac_header(struct sk_buff *skb)
946 {
947 	SKB_TRACE(skb);
948 	skb->mac_header = skb->data - skb->head;
949 }
950 
951 static inline void
952 skb_set_mac_header(struct sk_buff *skb, const size_t len)
953 {
954 	SKB_TRACE(skb);
955 	skb_reset_mac_header(skb);
956 	skb->mac_header += len;
957 }
958 
959 static inline struct skb_shared_hwtstamps *
960 skb_hwtstamps(struct sk_buff *skb)
961 {
962 	SKB_TRACE(skb);
963 	SKB_TODO();
964 	return (NULL);
965 }
966 
967 static inline void
968 skb_orphan(struct sk_buff *skb)
969 {
970 	SKB_TRACE(skb);
971 	SKB_TODO();
972 }
973 
974 static inline __sum16
975 csum_unfold(__sum16 sum)
976 {
977 	SKB_TODO();
978 	return (sum);
979 }
980 
981 static __inline void
982 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
983 {
984 	SKB_TODO();
985 }
986 
987 static inline void
988 skb_reset_tail_pointer(struct sk_buff *skb)
989 {
990 
991 	SKB_TRACE(skb);
992 #ifdef SKB_DOING_OFFSETS_US_NOT
993 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
994 #endif
995 	skb->tail = skb->data;
996 	SKB_TRACE(skb);
997 }
998 
999 static inline struct sk_buff *
1000 skb_get(struct sk_buff *skb)
1001 {
1002 
1003 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
1004 	return (skb);
1005 }
1006 
1007 static inline struct sk_buff *
1008 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1009 {
1010 
1011 	SKB_TODO();
1012 	return (NULL);
1013 }
1014 
1015 static inline void
1016 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1017 {
1018 
1019 	SKB_TRACE(skb);
1020 	/* Let us just hope the destination has len space ... */
1021 	memcpy(dst, skb->data, len);
1022 }
1023 
1024 static inline int
1025 skb_pad(struct sk_buff *skb, int pad)
1026 {
1027 
1028 	SKB_TRACE(skb);
1029 	SKB_TODO();
1030 	return (-1);
1031 }
1032 
1033 static inline void
1034 skb_list_del_init(struct sk_buff *skb)
1035 {
1036 
1037 	SKB_TRACE(skb);
1038 	SKB_TODO();
1039 }
1040 
1041 static inline void
1042 napi_consume_skb(struct sk_buff *skb, int budget)
1043 {
1044 
1045 	SKB_TRACE(skb);
1046 	SKB_TODO();
1047 }
1048 
1049 static inline struct sk_buff *
1050 napi_build_skb(void *data, size_t len)
1051 {
1052 
1053 	SKB_TRACE(skb);
1054 	SKB_TODO();
1055 	return (NULL);
1056 }
1057 
1058 static inline uint32_t
1059 skb_get_hash(struct sk_buff *skb)
1060 {
1061 	SKB_TRACE(skb);
1062 	SKB_TODO();
1063 	return (0);
1064 }
1065 
1066 static inline void
1067 skb_mark_for_recycle(struct sk_buff *skb)
1068 {
1069 	SKB_TRACE(skb);
1070 	SKB_TODO();
1071 }
1072 
1073 #define	SKB_WITH_OVERHEAD(_s)						\
1074 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1075 
1076 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1077