xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * Copyright (c) 2020-2023 The FreeBSD Foundation
3  * Copyright (c) 2021-2023 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
32  *       Do not rely on the internals of this implementation.  They are highly
33  *       likely to change as we will improve the integration to FreeBSD mbufs.
34  */
35 
36 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
37 #define	_LINUXKPI_LINUX_SKBUFF_H
38 
39 #include <linux/kernel.h>
40 #include <linux/page.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdev_features.h>
43 #include <linux/list.h>
44 #include <linux/gfp.h>
45 #include <linux/compiler.h>
46 #include <linux/spinlock.h>
47 #include <linux/ktime.h>
48 
49 /* #define	SKB_DEBUG */
50 #ifdef SKB_DEBUG
51 #define	DSKB_TODO	0x01
52 #define	DSKB_IMPROVE	0x02
53 #define	DSKB_TRACE	0x10
54 #define	DSKB_TRACEX	0x20
55 extern int linuxkpi_debug_skb;
56 
57 #define	SKB_TODO()							\
58     if (linuxkpi_debug_skb & DSKB_TODO)					\
59 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
60 #define	SKB_IMPROVE(...)						\
61     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
62 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
63 #define	SKB_TRACE(_s)							\
64     if (linuxkpi_debug_skb & DSKB_TRACE)				\
65 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
66 #define	SKB_TRACE2(_s, _p)						\
67     if (linuxkpi_debug_skb & DSKB_TRACE)				\
68 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
69 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
70    if (linuxkpi_debug_skb & DSKB_TRACE)					\
71 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
72 	    __VA_ARGS__)
73 #else
74 #define	SKB_TODO()		do { } while(0)
75 #define	SKB_IMPROVE(...)	do { } while(0)
76 #define	SKB_TRACE(_s)		do { } while(0)
77 #define	SKB_TRACE2(_s, _p)	do { } while(0)
78 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
79 #endif
80 
81 enum sk_buff_pkt_type {
82 	PACKET_BROADCAST,
83 	PACKET_MULTICAST,
84 	PACKET_OTHERHOST,
85 };
86 
87 struct skb_shared_hwtstamps {
88 	ktime_t			hwtstamp;
89 };
90 
91 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
92 
93 struct sk_buff_head {
94 		/* XXX TODO */
95 	union {
96 		struct {
97 			struct sk_buff		*next;
98 			struct sk_buff		*prev;
99 		};
100 		struct sk_buff_head_l {
101 			struct sk_buff		*next;
102 			struct sk_buff		*prev;
103 		} list;
104 	};
105 	size_t			qlen;
106 	spinlock_t		lock;
107 };
108 
109 enum sk_checksum_flags {
110 	CHECKSUM_NONE			= 0x00,
111 	CHECKSUM_UNNECESSARY		= 0x01,
112 	CHECKSUM_PARTIAL		= 0x02,
113 	CHECKSUM_COMPLETE		= 0x04,
114 };
115 
116 struct skb_frag {
117 		/* XXX TODO */
118 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
119 	off_t			offset;
120 	size_t			size;
121 };
122 typedef	struct skb_frag	skb_frag_t;
123 
124 enum skb_shared_info_gso_type {
125 	SKB_GSO_TCPV4,
126 	SKB_GSO_TCPV6,
127 };
128 
129 struct skb_shared_info {
130 	enum skb_shared_info_gso_type	gso_type;
131 	uint16_t			gso_size;
132 	uint16_t			nr_frags;
133 	struct sk_buff			*frag_list;
134 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
135 };
136 
137 struct sk_buff {
138 	/* XXX TODO */
139 	union {
140 		/* struct sk_buff_head */
141 		struct {
142 			struct sk_buff		*next;
143 			struct sk_buff		*prev;
144 		};
145 		struct list_head	list;
146 	};
147 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
148 	uint32_t		len;		/* ? */
149 	uint32_t		data_len;	/* ? If we have frags? */
150 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
151 	uint16_t		mac_len;	/* Link-layer header length. */
152 	__sum16			csum;
153 	uint16_t		l3hdroff;	/* network header offset from *head */
154 	uint16_t		l4hdroff;	/* transport header offset from *head */
155 	uint32_t		priority;
156 	uint16_t		qmap;		/* queue mapping */
157 	uint16_t		_flags;		/* Internal flags. */
158 #define	_SKB_FLAGS_SKBEXTFRAG	0x0001
159 	enum sk_buff_pkt_type	pkt_type;
160 	uint16_t		mac_header;	/* offset of mac_header */
161 
162 	/* "Scratch" area for layers to store metadata. */
163 	/* ??? I see sizeof() operations so probably an array. */
164 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
165 
166 	struct net_device	*dev;
167 	void			*sk;		/* XXX net/sock.h? */
168 
169 	int		csum_offset, csum_start, ip_summed, protocol;
170 
171 	uint8_t			*head;			/* Head of buffer. */
172 	uint8_t			*data;			/* Head of data. */
173 	uint8_t			*tail;			/* End of data. */
174 	uint8_t			*end;			/* End of buffer. */
175 
176 	struct skb_shared_info	*shinfo;
177 
178 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
179 	void			*m;
180 	void(*m_free_func)(void *);
181 
182 	/* Force padding to CACHE_LINE_SIZE. */
183 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
184 };
185 
186 /* -------------------------------------------------------------------------- */
187 
188 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
189 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
190 struct sk_buff *linuxkpi_build_skb(void *, size_t);
191 void linuxkpi_kfree_skb(struct sk_buff *);
192 
193 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
194 
195 /* -------------------------------------------------------------------------- */
196 
197 static inline struct sk_buff *
198 alloc_skb(size_t size, gfp_t gfp)
199 {
200 	struct sk_buff *skb;
201 
202 	skb = linuxkpi_alloc_skb(size, gfp);
203 	SKB_TRACE(skb);
204 	return (skb);
205 }
206 
207 static inline struct sk_buff *
208 __dev_alloc_skb(size_t len, gfp_t gfp)
209 {
210 	struct sk_buff *skb;
211 
212 	skb = linuxkpi_dev_alloc_skb(len, gfp);
213 	SKB_IMPROVE();
214 	SKB_TRACE(skb);
215 	return (skb);
216 }
217 
218 static inline struct sk_buff *
219 dev_alloc_skb(size_t len)
220 {
221 	struct sk_buff *skb;
222 
223 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
224 	SKB_IMPROVE();
225 	SKB_TRACE(skb);
226 	return (skb);
227 }
228 
229 static inline void
230 kfree_skb(struct sk_buff *skb)
231 {
232 	SKB_TRACE(skb);
233 	linuxkpi_kfree_skb(skb);
234 }
235 
236 static inline void
237 dev_kfree_skb(struct sk_buff *skb)
238 {
239 	SKB_TRACE(skb);
240 	kfree_skb(skb);
241 }
242 
243 static inline void
244 dev_kfree_skb_any(struct sk_buff *skb)
245 {
246 	SKB_TRACE(skb);
247 	dev_kfree_skb(skb);
248 }
249 
250 static inline void
251 dev_kfree_skb_irq(struct sk_buff *skb)
252 {
253 	SKB_TRACE(skb);
254 	SKB_IMPROVE("Do we have to defer this?");
255 	dev_kfree_skb(skb);
256 }
257 
258 static inline struct sk_buff *
259 build_skb(void *data, unsigned int fragsz)
260 {
261 	struct sk_buff *skb;
262 
263 	skb = linuxkpi_build_skb(data, fragsz);
264 	SKB_TRACE(skb);
265 	return (skb);
266 }
267 
268 /* -------------------------------------------------------------------------- */
269 
270 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
271 #define	skb_list_walk_safe(_q, skb, tmp)				\
272 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
273 
274 /* Add headroom; cannot do once there is data in there. */
275 static inline void
276 skb_reserve(struct sk_buff *skb, size_t len)
277 {
278 	SKB_TRACE(skb);
279 #if 0
280 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
281 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
282 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
283 #else
284 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
285 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
286 	    skb->head, skb->data, skb->tail, skb->len));
287 #endif
288 	skb->data += len;
289 	skb->tail += len;
290 }
291 
292 /*
293  * Remove headroom; return new data pointer; basically make space at the
294  * front to copy data in (manually).
295  */
296 static inline void *
297 __skb_push(struct sk_buff *skb, size_t len)
298 {
299 	SKB_TRACE(skb);
300 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
301 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
302 	skb->len  += len;
303 	skb->data -= len;
304 	return (skb->data);
305 }
306 
307 static inline void *
308 skb_push(struct sk_buff *skb, size_t len)
309 {
310 
311 	SKB_TRACE(skb);
312 	return (__skb_push(skb, len));
313 }
314 
315 /*
316  * Length of the data on the skb (without any frags)???
317  */
318 static inline size_t
319 skb_headlen(struct sk_buff *skb)
320 {
321 
322 	SKB_TRACE(skb);
323 	return (skb->len - skb->data_len);
324 }
325 
326 
327 /* Return the end of data (tail pointer). */
328 static inline uint8_t *
329 skb_tail_pointer(struct sk_buff *skb)
330 {
331 
332 	SKB_TRACE(skb);
333 	return (skb->tail);
334 }
335 
336 /* Return number of bytes available at end of buffer. */
337 static inline unsigned int
338 skb_tailroom(struct sk_buff *skb)
339 {
340 
341 	SKB_TRACE(skb);
342 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
343 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
344 	return (skb->end - skb->tail);
345 }
346 
347 /* Return numer of bytes available at the beginning of buffer. */
348 static inline unsigned int
349 skb_headroom(struct sk_buff *skb)
350 {
351 	SKB_TRACE(skb);
352 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
353 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
354 	return (skb->data - skb->head);
355 }
356 
357 
358 /*
359  * Remove tailroom; return the old tail pointer; basically make space at
360  * the end to copy data in (manually).  See also skb_put_data() below.
361  */
362 static inline void *
363 __skb_put(struct sk_buff *skb, size_t len)
364 {
365 	void *s;
366 
367 	SKB_TRACE(skb);
368 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
369 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
370 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
371 
372 	s = skb_tail_pointer(skb);
373 	if (len == 0)
374 		return (s);
375 	skb->tail += len;
376 	skb->len += len;
377 #ifdef SKB_DEBUG
378 	if (linuxkpi_debug_skb & DSKB_TRACEX)
379 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
380 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
381 	    s, len);
382 #endif
383 	return (s);
384 }
385 
386 static inline void *
387 skb_put(struct sk_buff *skb, size_t len)
388 {
389 
390 	SKB_TRACE(skb);
391 	return (__skb_put(skb, len));
392 }
393 
394 /* skb_put() + copying data in. */
395 static inline void *
396 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
397 {
398 	void *s;
399 
400 	SKB_TRACE2(skb, buf);
401 	s = skb_put(skb, len);
402 	if (len == 0)
403 		return (s);
404 	memcpy(s, buf, len);
405 	return (s);
406 }
407 
408 /* skb_put() + filling with zeros. */
409 static inline void *
410 skb_put_zero(struct sk_buff *skb, size_t len)
411 {
412 	void *s;
413 
414 	SKB_TRACE(skb);
415 	s = skb_put(skb, len);
416 	memset(s, '\0', len);
417 	return (s);
418 }
419 
420 /*
421  * Remove len bytes from beginning of data.
422  *
423  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
424  * we return the advanced data pointer so we don't have to keep a temp, correct?
425  */
426 static inline void *
427 skb_pull(struct sk_buff *skb, size_t len)
428 {
429 
430 	SKB_TRACE(skb);
431 #if 0	/* Apparently this doesn't barf... */
432 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
433 	    __func__, skb, skb->len, len, skb->data));
434 #endif
435 	if (skb->len < len)
436 		return (NULL);
437 	skb->len -= len;
438 	skb->data += len;
439 	return (skb->data);
440 }
441 
442 /* Reduce skb data to given length or do nothing if smaller already. */
443 static inline void
444 __skb_trim(struct sk_buff *skb, unsigned int len)
445 {
446 
447 	SKB_TRACE(skb);
448 	if (skb->len < len)
449 		return;
450 
451 	skb->len = len;
452 	skb->tail = skb->data + skb->len;
453 }
454 
455 static inline void
456 skb_trim(struct sk_buff *skb, unsigned int len)
457 {
458 
459 	return (__skb_trim(skb, len));
460 }
461 
462 static inline struct skb_shared_info *
463 skb_shinfo(struct sk_buff *skb)
464 {
465 
466 	SKB_TRACE(skb);
467 	return (skb->shinfo);
468 }
469 
470 static inline void
471 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
472     off_t offset, size_t size, unsigned int truesize)
473 {
474 	struct skb_shared_info *shinfo;
475 
476 	SKB_TRACE(skb);
477 #ifdef SKB_DEBUG
478 	if (linuxkpi_debug_skb & DSKB_TRACEX)
479 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
480 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
481 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
482 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
483 	    size, truesize);
484 #endif
485 
486 	shinfo = skb_shinfo(skb);
487 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
488 	    "fragno %d too big\n", __func__, skb, fragno));
489 	shinfo->frags[fragno].page = page;
490 	shinfo->frags[fragno].offset = offset;
491 	shinfo->frags[fragno].size = size;
492 	shinfo->nr_frags = fragno + 1;
493         skb->len += size;
494 	skb->data_len += size;
495         skb->truesize += truesize;
496 
497 	/* XXX TODO EXTEND truesize? */
498 }
499 
500 /* -------------------------------------------------------------------------- */
501 
502 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
503 #define	skb_queue_walk(_q, skb)						\
504 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
505 	    (skb) = (skb)->next)
506 
507 #define	skb_queue_walk_safe(_q, skb, tmp)				\
508 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
509 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
510 
511 static inline bool
512 skb_queue_empty(struct sk_buff_head *q)
513 {
514 
515 	SKB_TRACE(q);
516 	return (q->qlen == 0);
517 }
518 
519 static inline void
520 __skb_queue_head_init(struct sk_buff_head *q)
521 {
522 	SKB_TRACE(q);
523 	q->prev = q->next = (struct sk_buff *)q;
524 	q->qlen = 0;
525 }
526 
527 static inline void
528 skb_queue_head_init(struct sk_buff_head *q)
529 {
530 	SKB_TRACE(q);
531 	return (__skb_queue_head_init(q));
532 }
533 
534 static inline void
535 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
536     struct sk_buff_head *q)
537 {
538 
539 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
540 	new->prev = prev;
541 	new->next = next;
542 	((struct sk_buff_head_l *)next)->prev = new;
543 	((struct sk_buff_head_l *)prev)->next = new;
544 	q->qlen++;
545 }
546 
547 static inline void
548 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
549     struct sk_buff *new)
550 {
551 
552 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
553 	__skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
554 }
555 
556 static inline void
557 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
558     struct sk_buff *new)
559 {
560 
561 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
562 	__skb_insert(new, skb->prev, skb, q);
563 }
564 
565 static inline void
566 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
567 {
568 
569 	SKB_TRACE2(q, new);
570 	__skb_queue_after(q, (struct sk_buff *)q, new);
571 }
572 
573 static inline void
574 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
575 {
576 	SKB_TRACE2(q, new);
577 	return (__skb_queue_tail(q, new));
578 }
579 
580 static inline struct sk_buff *
581 skb_peek(struct sk_buff_head *q)
582 {
583 	struct sk_buff *skb;
584 
585 	skb = q->next;
586 	SKB_TRACE2(q, skb);
587 	if (skb == (struct sk_buff *)q)
588 		return (NULL);
589 	return (skb);
590 }
591 
592 static inline struct sk_buff *
593 skb_peek_tail(struct sk_buff_head *q)
594 {
595 	struct sk_buff *skb;
596 
597 	skb = q->prev;
598 	SKB_TRACE2(q, skb);
599 	if (skb == (struct sk_buff *)q)
600 		return (NULL);
601 	return (skb);
602 }
603 
604 static inline void
605 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
606 {
607 	SKB_TRACE2(skb, head);
608 	struct sk_buff *p, *n;;
609 
610 	head->qlen--;
611 	p = skb->prev;
612 	n = skb->next;
613 	p->next = n;
614 	n->prev = p;
615 	skb->prev = skb->next = NULL;
616 }
617 
618 static inline void
619 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
620 {
621 	SKB_TRACE2(skb, head);
622 	return (__skb_unlink(skb, head));
623 }
624 
625 static inline struct sk_buff *
626 __skb_dequeue(struct sk_buff_head *q)
627 {
628 	struct sk_buff *skb;
629 
630 	SKB_TRACE(q);
631 	skb = q->next;
632 	if (skb == (struct sk_buff *)q)
633 		return (NULL);
634 	if (skb != NULL)
635 		__skb_unlink(skb, q);
636 	SKB_TRACE(skb);
637 	return (skb);
638 }
639 
640 static inline struct sk_buff *
641 skb_dequeue(struct sk_buff_head *q)
642 {
643 	SKB_TRACE(q);
644 	return (__skb_dequeue(q));
645 }
646 
647 static inline struct sk_buff *
648 skb_dequeue_tail(struct sk_buff_head *q)
649 {
650 	struct sk_buff *skb;
651 
652 	skb = skb_peek_tail(q);
653 	if (skb != NULL)
654 		__skb_unlink(skb, q);
655 
656 	SKB_TRACE2(q, skb);
657 	return (skb);
658 }
659 
660 static inline void
661 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
662 {
663 
664 	SKB_TRACE2(q, skb);
665 	__skb_queue_after(q, (struct sk_buff *)q, skb);
666 }
667 
668 static inline void
669 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
670 {
671 
672 	SKB_TRACE2(q, skb);
673 	__skb_queue_after(q, (struct sk_buff *)q, skb);
674 }
675 
676 static inline uint32_t
677 skb_queue_len(struct sk_buff_head *head)
678 {
679 
680 	SKB_TRACE(head);
681 	return (head->qlen);
682 }
683 
684 static inline uint32_t
685 skb_queue_len_lockless(const struct sk_buff_head *head)
686 {
687 
688 	SKB_TRACE(head);
689 	return (READ_ONCE(head->qlen));
690 }
691 
692 static inline void
693 __skb_queue_purge(struct sk_buff_head *q)
694 {
695 	struct sk_buff *skb;
696 
697 	SKB_TRACE(q);
698         while ((skb = __skb_dequeue(q)) != NULL)
699 		kfree_skb(skb);
700 }
701 
702 static inline void
703 skb_queue_purge(struct sk_buff_head *q)
704 {
705 	SKB_TRACE(q);
706 	return (__skb_queue_purge(q));
707 }
708 
709 static inline struct sk_buff *
710 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
711 {
712 
713 	SKB_TRACE2(q, skb);
714 	/* XXX what is the q argument good for? */
715 	return (skb->prev);
716 }
717 
718 /* -------------------------------------------------------------------------- */
719 
720 static inline struct sk_buff *
721 skb_copy(struct sk_buff *skb, gfp_t gfp)
722 {
723 	struct sk_buff *new;
724 
725 	new = linuxkpi_skb_copy(skb, gfp);
726 	SKB_TRACE2(skb, new);
727 	return (new);
728 }
729 
730 static inline void
731 consume_skb(struct sk_buff *skb)
732 {
733 	SKB_TRACE(skb);
734 	SKB_TODO();
735 }
736 
737 static inline uint16_t
738 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
739 {
740 	SKB_TRACE(skb);
741 	SKB_TODO();
742 	return (0xffff);
743 }
744 
745 static inline int
746 skb_checksum_start_offset(struct sk_buff *skb)
747 {
748 	SKB_TRACE(skb);
749 	SKB_TODO();
750 	return (-1);
751 }
752 
753 static inline dma_addr_t
754 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
755     size_t fragsz, enum dma_data_direction dir)
756 {
757 	SKB_TRACE2(frag, dev);
758 	SKB_TODO();
759 	return (-1);
760 }
761 
762 static inline size_t
763 skb_frag_size(const skb_frag_t *frag)
764 {
765 	SKB_TRACE(frag);
766 	SKB_TODO();
767 	return (-1);
768 }
769 
770 #define	skb_walk_frags(_skb, _frag)					\
771 	for ((_frag) = (_skb); false; (_frag)++)
772 
773 static inline void
774 skb_checksum_help(struct sk_buff *skb)
775 {
776 	SKB_TRACE(skb);
777 	SKB_TODO();
778 }
779 
780 static inline bool
781 skb_ensure_writable(struct sk_buff *skb, size_t off)
782 {
783 	SKB_TRACE(skb);
784 	SKB_TODO();
785 	return (false);
786 }
787 
788 static inline void *
789 skb_frag_address(const skb_frag_t *frag)
790 {
791 	SKB_TRACE(frag);
792 	SKB_TODO();
793 	return (NULL);
794 }
795 
796 static inline void
797 skb_free_frag(void *frag)
798 {
799 
800 	page_frag_free(frag);
801 }
802 
803 static inline struct sk_buff *
804 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
805 {
806 	SKB_TRACE(skb);
807 	SKB_TODO();
808 	return (NULL);
809 }
810 
811 static inline bool
812 skb_is_gso(struct sk_buff *skb)
813 {
814 	SKB_TRACE(skb);
815 	SKB_IMPROVE("Really a TODO but get it away from logging");
816 	return (false);
817 }
818 
819 static inline void
820 skb_mark_not_on_list(struct sk_buff *skb)
821 {
822 	SKB_TRACE(skb);
823 	SKB_TODO();
824 }
825 
826 static inline void
827 ___skb_queue_splice_init(const struct sk_buff_head *from,
828     struct sk_buff *p, struct sk_buff *n)
829 {
830 	struct sk_buff *b, *e;
831 
832 	b = from->next;
833 	e = from->prev;
834 
835 	b->prev = p;
836 	((struct sk_buff_head_l *)p)->next = b;
837 	e->next = n;
838 	((struct sk_buff_head_l *)n)->prev = e;
839 }
840 
841 static inline void
842 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
843 {
844 
845 	SKB_TRACE2(from, to);
846 
847 	if (skb_queue_empty(from))
848 		return;
849 
850 	___skb_queue_splice_init(from, (struct sk_buff *)to, to->next);
851 	to->qlen += from->qlen;
852 	__skb_queue_head_init(from);
853 }
854 
855 static inline void
856 skb_reset_transport_header(struct sk_buff *skb)
857 {
858 
859 	SKB_TRACE(skb);
860 	skb->l4hdroff = skb->data - skb->head;
861 }
862 
863 static inline uint8_t *
864 skb_transport_header(struct sk_buff *skb)
865 {
866 
867 	SKB_TRACE(skb);
868         return (skb->head + skb->l4hdroff);
869 }
870 
871 static inline uint8_t *
872 skb_network_header(struct sk_buff *skb)
873 {
874 
875 	SKB_TRACE(skb);
876         return (skb->head + skb->l3hdroff);
877 }
878 
879 static inline bool
880 skb_is_nonlinear(struct sk_buff *skb)
881 {
882 	SKB_TRACE(skb);
883 	return ((skb->data_len > 0) ? true : false);
884 }
885 
886 static inline int
887 __skb_linearize(struct sk_buff *skb)
888 {
889 	SKB_TRACE(skb);
890 	SKB_TODO();
891 	return (ENXIO);
892 }
893 
894 static inline int
895 skb_linearize(struct sk_buff *skb)
896 {
897 
898 	return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
899 }
900 
901 static inline int
902 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
903 {
904 	SKB_TRACE(skb);
905 	SKB_TODO();
906 	return (-ENXIO);
907 }
908 
909 /* Not really seen this one but need it as symmetric accessor function. */
910 static inline void
911 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
912 {
913 
914 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
915 	skb->qmap = qmap;
916 }
917 
918 static inline uint16_t
919 skb_get_queue_mapping(struct sk_buff *skb)
920 {
921 
922 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
923 	return (skb->qmap);
924 }
925 
926 static inline bool
927 skb_header_cloned(struct sk_buff *skb)
928 {
929 	SKB_TRACE(skb);
930 	SKB_TODO();
931 	return (false);
932 }
933 
934 static inline uint8_t *
935 skb_mac_header(const struct sk_buff *skb)
936 {
937 	SKB_TRACE(skb);
938 	/* Make sure the mac_header was set as otherwise we return garbage. */
939 	WARN_ON(skb->mac_header == 0);
940 	return (skb->head + skb->mac_header);
941 }
942 static inline void
943 skb_reset_mac_header(struct sk_buff *skb)
944 {
945 	SKB_TRACE(skb);
946 	skb->mac_header = skb->data - skb->head;
947 }
948 
949 static inline void
950 skb_set_mac_header(struct sk_buff *skb, const size_t len)
951 {
952 	SKB_TRACE(skb);
953 	skb_reset_mac_header(skb);
954 	skb->mac_header += len;
955 }
956 
957 static inline struct skb_shared_hwtstamps *
958 skb_hwtstamps(struct sk_buff *skb)
959 {
960 	SKB_TRACE(skb);
961 	SKB_TODO();
962 	return (NULL);
963 }
964 
965 static inline void
966 skb_orphan(struct sk_buff *skb)
967 {
968 	SKB_TRACE(skb);
969 	SKB_TODO();
970 }
971 
972 static inline __sum16
973 csum_unfold(__sum16 sum)
974 {
975 	SKB_TODO();
976 	return (sum);
977 }
978 
979 static __inline void
980 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
981 {
982 	SKB_TODO();
983 }
984 
985 static inline void
986 skb_reset_tail_pointer(struct sk_buff *skb)
987 {
988 
989 	SKB_TRACE(skb);
990 #ifdef SKB_DOING_OFFSETS_US_NOT
991 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
992 #endif
993 	skb->tail = skb->data;
994 	SKB_TRACE(skb);
995 }
996 
997 static inline struct sk_buff *
998 skb_get(struct sk_buff *skb)
999 {
1000 
1001 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
1002 	return (skb);
1003 }
1004 
1005 static inline struct sk_buff *
1006 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1007 {
1008 
1009 	SKB_TODO();
1010 	return (NULL);
1011 }
1012 
1013 static inline void
1014 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1015 {
1016 
1017 	SKB_TRACE(skb);
1018 	/* Let us just hope the destination has len space ... */
1019 	memcpy(dst, skb->data, len);
1020 }
1021 
1022 static inline int
1023 skb_pad(struct sk_buff *skb, int pad)
1024 {
1025 
1026 	SKB_TRACE(skb);
1027 	SKB_TODO();
1028 	return (-1);
1029 }
1030 
1031 static inline void
1032 skb_list_del_init(struct sk_buff *skb)
1033 {
1034 
1035 	SKB_TRACE(skb);
1036 	SKB_TODO();
1037 }
1038 
1039 static inline void
1040 napi_consume_skb(struct sk_buff *skb, int budget)
1041 {
1042 
1043 	SKB_TRACE(skb);
1044 	SKB_TODO();
1045 }
1046 
1047 static inline struct sk_buff *
1048 napi_build_skb(void *data, size_t len)
1049 {
1050 
1051 	SKB_TODO();
1052 	return (NULL);
1053 }
1054 
1055 static inline uint32_t
1056 skb_get_hash(struct sk_buff *skb)
1057 {
1058 	SKB_TRACE(skb);
1059 	SKB_TODO();
1060 	return (0);
1061 }
1062 
1063 static inline void
1064 skb_mark_for_recycle(struct sk_buff *skb)
1065 {
1066 	SKB_TRACE(skb);
1067 	SKB_TODO();
1068 }
1069 
1070 static inline int
1071 skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1072 {
1073 	SKB_TRACE(skb);
1074 	SKB_TODO();
1075 	return (-1);
1076 }
1077 
1078 #define	SKB_WITH_OVERHEAD(_s)						\
1079 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1080 
1081 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1082