xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision 657729a89dd578d8cfc70d6616f5c65a48a8b33a)
1 /*-
2  * Copyright (c) 2020-2022 The FreeBSD Foundation
3  * Copyright (c) 2021-2022 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/kernel.h>
42 #include <linux/page.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/netdev_features.h>
45 #include <linux/list.h>
46 #include <linux/gfp.h>
47 #include <linux/compiler.h>
48 #include <linux/spinlock.h>
49 
50 /* #define	SKB_DEBUG */
51 #ifdef SKB_DEBUG
52 #define	DSKB_TODO	0x01
53 #define	DSKB_IMPROVE	0x02
54 #define	DSKB_TRACE	0x10
55 #define	DSKB_TRACEX	0x20
56 extern int linuxkpi_debug_skb;
57 
58 #define	SKB_TODO()							\
59     if (linuxkpi_debug_skb & DSKB_TODO)					\
60 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
61 #define	SKB_IMPROVE(...)						\
62     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
63 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
64 #define	SKB_TRACE(_s)							\
65     if (linuxkpi_debug_skb & DSKB_TRACE)				\
66 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
67 #define	SKB_TRACE2(_s, _p)						\
68     if (linuxkpi_debug_skb & DSKB_TRACE)				\
69 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
70 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
71    if (linuxkpi_debug_skb & DSKB_TRACE)					\
72 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
73 	    __VA_ARGS__)
74 #else
75 #define	SKB_TODO()		do { } while(0)
76 #define	SKB_IMPROVE(...)	do { } while(0)
77 #define	SKB_TRACE(_s)		do { } while(0)
78 #define	SKB_TRACE2(_s, _p)	do { } while(0)
79 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
80 #endif
81 
82 enum sk_buff_pkt_type {
83 	PACKET_BROADCAST,
84 	PACKET_MULTICAST,
85 	PACKET_OTHERHOST,
86 };
87 
88 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
89 
90 struct sk_buff_head {
91 		/* XXX TODO */
92 	struct sk_buff		*next;
93 	struct sk_buff		*prev;
94 	size_t			qlen;
95 	spinlock_t		lock;
96 };
97 
98 enum sk_checksum_flags {
99 	CHECKSUM_NONE			= 0x00,
100 	CHECKSUM_UNNECESSARY		= 0x01,
101 	CHECKSUM_PARTIAL		= 0x02,
102 	CHECKSUM_COMPLETE		= 0x04,
103 };
104 
105 struct skb_frag {
106 		/* XXX TODO */
107 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
108 	off_t			offset;
109 	size_t			size;
110 };
111 typedef	struct skb_frag	skb_frag_t;
112 
113 enum skb_shared_info_gso_type {
114 	SKB_GSO_TCPV4,
115 	SKB_GSO_TCPV6,
116 };
117 
118 struct skb_shared_info {
119 	enum skb_shared_info_gso_type	gso_type;
120 	uint16_t			gso_size;
121 	uint16_t			nr_frags;
122 	struct sk_buff			*frag_list;
123 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
124 };
125 
126 struct sk_buff {
127 	/* XXX TODO */
128 	union {
129 		/* struct sk_buff_head */
130 		struct {
131 			struct sk_buff		*next;
132 			struct sk_buff		*prev;
133 		};
134 		struct list_head	list;
135 	};
136 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
137 	uint32_t		len;		/* ? */
138 	uint32_t		data_len;	/* ? If we have frags? */
139 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
140 	uint16_t		mac_len;	/* Link-layer header length. */
141 	__sum16			csum;
142 	uint16_t		l3hdroff;	/* network header offset from *head */
143 	uint16_t		l4hdroff;	/* transport header offset from *head */
144 	uint32_t		priority;
145 	uint16_t		qmap;		/* queue mapping */
146 	uint16_t		_flags;		/* Internal flags. */
147 #define	_SKB_FLAGS_SKBEXTFRAG	0x0001
148 	enum sk_buff_pkt_type	pkt_type;
149 
150 	/* "Scratch" area for layers to store metadata. */
151 	/* ??? I see sizeof() operations so probably an array. */
152 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
153 
154 	struct net_device	*dev;
155 	void			*sk;		/* XXX net/sock.h? */
156 
157 	int		csum_offset, csum_start, ip_summed, protocol;
158 
159 	uint8_t			*head;			/* Head of buffer. */
160 	uint8_t			*data;			/* Head of data. */
161 	uint8_t			*tail;			/* End of data. */
162 	uint8_t			*end;			/* End of buffer. */
163 
164 	struct skb_shared_info	*shinfo;
165 
166 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
167 	void			*m;
168 	void(*m_free_func)(void *);
169 
170 	/* Force padding to CACHE_LINE_SIZE. */
171 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
172 };
173 
174 /* -------------------------------------------------------------------------- */
175 
176 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
177 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
178 struct sk_buff *linuxkpi_build_skb(void *, size_t);
179 void linuxkpi_kfree_skb(struct sk_buff *);
180 
181 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
182 
183 /* -------------------------------------------------------------------------- */
184 
185 static inline struct sk_buff *
186 alloc_skb(size_t size, gfp_t gfp)
187 {
188 	struct sk_buff *skb;
189 
190 	skb = linuxkpi_alloc_skb(size, gfp);
191 	SKB_TRACE(skb);
192 	return (skb);
193 }
194 
195 static inline struct sk_buff *
196 __dev_alloc_skb(size_t len, gfp_t gfp)
197 {
198 	struct sk_buff *skb;
199 
200 	skb = linuxkpi_dev_alloc_skb(len, gfp);
201 	SKB_IMPROVE();
202 	SKB_TRACE(skb);
203 	return (skb);
204 }
205 
206 static inline struct sk_buff *
207 dev_alloc_skb(size_t len)
208 {
209 	struct sk_buff *skb;
210 
211 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
212 	SKB_IMPROVE();
213 	SKB_TRACE(skb);
214 	return (skb);
215 }
216 
217 static inline void
218 kfree_skb(struct sk_buff *skb)
219 {
220 	SKB_TRACE(skb);
221 	linuxkpi_kfree_skb(skb);
222 }
223 
224 static inline void
225 dev_kfree_skb(struct sk_buff *skb)
226 {
227 	SKB_TRACE(skb);
228 	kfree_skb(skb);
229 }
230 
231 static inline void
232 dev_kfree_skb_any(struct sk_buff *skb)
233 {
234 	SKB_TRACE(skb);
235 	dev_kfree_skb(skb);
236 }
237 
238 static inline void
239 dev_kfree_skb_irq(struct sk_buff *skb)
240 {
241 	SKB_TRACE(skb);
242 	SKB_IMPROVE("Do we have to defer this?");
243 	dev_kfree_skb(skb);
244 }
245 
246 static inline struct sk_buff *
247 build_skb(void *data, unsigned int fragsz)
248 {
249 	struct sk_buff *skb;
250 
251 	skb = linuxkpi_build_skb(data, fragsz);
252 	SKB_TRACE(skb);
253 	return (skb);
254 }
255 
256 /* -------------------------------------------------------------------------- */
257 
258 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
259 #define	skb_list_walk_safe(_q, skb, tmp)				\
260 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
261 
262 /* Add headroom; cannot do once there is data in there. */
263 static inline void
264 skb_reserve(struct sk_buff *skb, size_t len)
265 {
266 	SKB_TRACE(skb);
267 #if 0
268 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
269 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
270 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
271 #else
272 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
273 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
274 	    skb->head, skb->data, skb->tail, skb->len));
275 #endif
276 	skb->data += len;
277 	skb->tail += len;
278 }
279 
280 /*
281  * Remove headroom; return new data pointer; basically make space at the
282  * front to copy data in (manually).
283  */
284 static inline void *
285 __skb_push(struct sk_buff *skb, size_t len)
286 {
287 	SKB_TRACE(skb);
288 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
289 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
290 	skb->len  += len;
291 	skb->data -= len;
292 	return (skb->data);
293 }
294 
295 static inline void *
296 skb_push(struct sk_buff *skb, size_t len)
297 {
298 
299 	SKB_TRACE(skb);
300 	return (__skb_push(skb, len));
301 }
302 
303 /*
304  * Length of the data on the skb (without any frags)???
305  */
306 static inline size_t
307 skb_headlen(struct sk_buff *skb)
308 {
309 
310 	SKB_TRACE(skb);
311 	return (skb->len - skb->data_len);
312 }
313 
314 
315 /* Return the end of data (tail pointer). */
316 static inline uint8_t *
317 skb_tail_pointer(struct sk_buff *skb)
318 {
319 
320 	SKB_TRACE(skb);
321 	return (skb->tail);
322 }
323 
324 /* Return number of bytes available at end of buffer. */
325 static inline unsigned int
326 skb_tailroom(struct sk_buff *skb)
327 {
328 
329 	SKB_TRACE(skb);
330 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
331 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
332 	return (skb->end - skb->tail);
333 }
334 
335 /* Return numer of bytes available at the beginning of buffer. */
336 static inline unsigned int
337 skb_headroom(struct sk_buff *skb)
338 {
339 	SKB_TRACE(skb);
340 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
341 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
342 	return (skb->data - skb->head);
343 }
344 
345 
346 /*
347  * Remove tailroom; return the old tail pointer; basically make space at
348  * the end to copy data in (manually).  See also skb_put_data() below.
349  */
350 static inline void *
351 __skb_put(struct sk_buff *skb, size_t len)
352 {
353 	void *s;
354 
355 	SKB_TRACE(skb);
356 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
357 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
358 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
359 
360 	s = skb_tail_pointer(skb);
361 	if (len == 0)
362 		return (s);
363 	skb->tail += len;
364 	skb->len += len;
365 #ifdef SKB_DEBUG
366 	if (linuxkpi_debug_skb & DSKB_TRACEX)
367 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
368 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
369 	    s, len);
370 #endif
371 	return (s);
372 }
373 
374 static inline void *
375 skb_put(struct sk_buff *skb, size_t len)
376 {
377 
378 	SKB_TRACE(skb);
379 	return (__skb_put(skb, len));
380 }
381 
382 /* skb_put() + copying data in. */
383 static inline void *
384 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
385 {
386 	void *s;
387 
388 	SKB_TRACE2(skb, buf);
389 	s = skb_put(skb, len);
390 	if (len == 0)
391 		return (s);
392 	memcpy(s, buf, len);
393 	return (s);
394 }
395 
396 /* skb_put() + filling with zeros. */
397 static inline void *
398 skb_put_zero(struct sk_buff *skb, size_t len)
399 {
400 	void *s;
401 
402 	SKB_TRACE(skb);
403 	s = skb_put(skb, len);
404 	memset(s, '\0', len);
405 	return (s);
406 }
407 
408 /*
409  * Remove len bytes from beginning of data.
410  *
411  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
412  * we return the advanced data pointer so we don't have to keep a temp, correct?
413  */
414 static inline void *
415 skb_pull(struct sk_buff *skb, size_t len)
416 {
417 
418 	SKB_TRACE(skb);
419 #if 0	/* Apparently this doesn't barf... */
420 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
421 	    __func__, skb, skb->len, len, skb->data));
422 #endif
423 	if (skb->len < len)
424 		return (NULL);
425 	skb->len -= len;
426 	skb->data += len;
427 	return (skb->data);
428 }
429 
430 /* Reduce skb data to given length or do nothing if smaller already. */
431 static inline void
432 __skb_trim(struct sk_buff *skb, unsigned int len)
433 {
434 
435 	SKB_TRACE(skb);
436 	if (skb->len < len)
437 		return;
438 
439 	skb->len = len;
440 	skb->tail = skb->data + skb->len;
441 }
442 
443 static inline void
444 skb_trim(struct sk_buff *skb, unsigned int len)
445 {
446 
447 	return (__skb_trim(skb, len));
448 }
449 
450 static inline struct skb_shared_info *
451 skb_shinfo(struct sk_buff *skb)
452 {
453 
454 	SKB_TRACE(skb);
455 	return (skb->shinfo);
456 }
457 
458 static inline void
459 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
460     off_t offset, size_t size, unsigned int truesize)
461 {
462 	struct skb_shared_info *shinfo;
463 
464 	SKB_TRACE(skb);
465 #ifdef SKB_DEBUG
466 	if (linuxkpi_debug_skb & DSKB_TRACEX)
467 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
468 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
469 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
470 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
471 	    size, truesize);
472 #endif
473 
474 	shinfo = skb_shinfo(skb);
475 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
476 	    "fragno %d too big\n", __func__, skb, fragno));
477 	shinfo->frags[fragno].page = page;
478 	shinfo->frags[fragno].offset = offset;
479 	shinfo->frags[fragno].size = size;
480 	shinfo->nr_frags = fragno + 1;
481         skb->len += size;
482 	skb->data_len += size;
483         skb->truesize += truesize;
484 
485 	/* XXX TODO EXTEND truesize? */
486 }
487 
488 /* -------------------------------------------------------------------------- */
489 
490 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
491 #define	skb_queue_walk(_q, skb)						\
492 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
493 	    (skb) = (skb)->next)
494 
495 #define	skb_queue_walk_safe(_q, skb, tmp)				\
496 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
497 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
498 
499 static inline bool
500 skb_queue_empty(struct sk_buff_head *q)
501 {
502 
503 	SKB_TRACE(q);
504 	return (q->qlen == 0);
505 }
506 
507 static inline void
508 __skb_queue_head_init(struct sk_buff_head *q)
509 {
510 	SKB_TRACE(q);
511 	q->prev = q->next = (struct sk_buff *)q;
512 	q->qlen = 0;
513 }
514 
515 static inline void
516 skb_queue_head_init(struct sk_buff_head *q)
517 {
518 	SKB_TRACE(q);
519 	return (__skb_queue_head_init(q));
520 }
521 
522 static inline void
523 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
524     struct sk_buff_head *q)
525 {
526 
527 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
528 	new->prev = prev;
529 	new->next = next;
530 	next->prev = new;
531 	prev->next = new;
532 	q->qlen++;
533 }
534 
535 static inline void
536 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
537     struct sk_buff *new)
538 {
539 
540 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
541 	__skb_insert(new, skb, skb->next, q);
542 }
543 
544 static inline void
545 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
546     struct sk_buff *new)
547 {
548 
549 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
550 	__skb_insert(new, skb->prev, skb, q);
551 }
552 
553 static inline void
554 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
555 {
556 	struct sk_buff *s;
557 
558 	SKB_TRACE2(q, skb);
559 	q->qlen++;
560 	s = (struct sk_buff *)q;
561 	s->prev->next = skb;
562 	skb->prev = s->prev;
563 	skb->next = s;
564 	s->prev = skb;
565 }
566 
567 static inline void
568 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
569 {
570 	SKB_TRACE2(q, skb);
571 	return (__skb_queue_tail(q, skb));
572 }
573 
574 static inline struct sk_buff *
575 skb_peek(struct sk_buff_head *q)
576 {
577 	struct sk_buff *skb;
578 
579 	skb = q->next;
580 	SKB_TRACE2(q, skb);
581 	if (skb == (struct sk_buff *)q)
582 		return (NULL);
583 	return (skb);
584 }
585 
586 static inline struct sk_buff *
587 skb_peek_tail(struct sk_buff_head *q)
588 {
589 	struct sk_buff *skb;
590 
591 	skb = q->prev;
592 	SKB_TRACE2(q, skb);
593 	if (skb == (struct sk_buff *)q)
594 		return (NULL);
595 	return (skb);
596 }
597 
598 static inline void
599 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
600 {
601 	SKB_TRACE2(skb, head);
602 	struct sk_buff *p, *n;;
603 
604 	head->qlen--;
605 	p = skb->prev;
606 	n = skb->next;
607 	p->next = n;
608 	n->prev = p;
609 	skb->prev = skb->next = NULL;
610 }
611 
612 static inline void
613 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
614 {
615 	SKB_TRACE2(skb, head);
616 	return (__skb_unlink(skb, head));
617 }
618 
619 static inline struct sk_buff *
620 __skb_dequeue(struct sk_buff_head *q)
621 {
622 	struct sk_buff *skb;
623 
624 	SKB_TRACE(q);
625 	skb = q->next;
626 	if (skb == (struct sk_buff *)q)
627 		return (NULL);
628 	if (skb != NULL)
629 		__skb_unlink(skb, q);
630 	SKB_TRACE(skb);
631 	return (skb);
632 }
633 
634 static inline struct sk_buff *
635 skb_dequeue(struct sk_buff_head *q)
636 {
637 	SKB_TRACE(q);
638 	return (__skb_dequeue(q));
639 }
640 
641 static inline struct sk_buff *
642 skb_dequeue_tail(struct sk_buff_head *q)
643 {
644 	struct sk_buff *skb;
645 
646 	skb = skb_peek_tail(q);
647 	if (skb != NULL)
648 		__skb_unlink(skb, q);
649 
650 	SKB_TRACE2(q, skb);
651 	return (skb);
652 }
653 
654 static inline void
655 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
656 {
657 
658 	SKB_TRACE2(q, skb);
659 	__skb_queue_after(q, (struct sk_buff *)q, skb);
660 }
661 
662 static inline void
663 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
664 {
665 
666 	SKB_TRACE2(q, skb);
667 	__skb_queue_after(q, (struct sk_buff *)q, skb);
668 }
669 
670 static inline uint32_t
671 skb_queue_len(struct sk_buff_head *head)
672 {
673 
674 	SKB_TRACE(head);
675 	return (head->qlen);
676 }
677 
678 static inline uint32_t
679 skb_queue_len_lockless(const struct sk_buff_head *head)
680 {
681 
682 	SKB_TRACE(head);
683 	return (READ_ONCE(head->qlen));
684 }
685 
686 static inline void
687 __skb_queue_purge(struct sk_buff_head *q)
688 {
689 	struct sk_buff *skb;
690 
691 	SKB_TRACE(q);
692         while ((skb = __skb_dequeue(q)) != NULL)
693 		kfree_skb(skb);
694 }
695 
696 static inline void
697 skb_queue_purge(struct sk_buff_head *q)
698 {
699 	SKB_TRACE(q);
700 	return (__skb_queue_purge(q));
701 }
702 
703 static inline struct sk_buff *
704 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
705 {
706 
707 	SKB_TRACE2(q, skb);
708 	/* XXX what is the q argument good for? */
709 	return (skb->prev);
710 }
711 
712 /* -------------------------------------------------------------------------- */
713 
714 static inline struct sk_buff *
715 skb_copy(struct sk_buff *skb, gfp_t gfp)
716 {
717 	struct sk_buff *new;
718 
719 	new = linuxkpi_skb_copy(skb, gfp);
720 	SKB_TRACE2(skb, new);
721 	return (new);
722 }
723 
724 static inline void
725 consume_skb(struct sk_buff *skb)
726 {
727 	SKB_TRACE(skb);
728 	SKB_TODO();
729 }
730 
731 static inline uint16_t
732 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
733 {
734 	SKB_TRACE(skb);
735 	SKB_TODO();
736 	return (0xffff);
737 }
738 
739 static inline int
740 skb_checksum_start_offset(struct sk_buff *skb)
741 {
742 	SKB_TRACE(skb);
743 	SKB_TODO();
744 	return (-1);
745 }
746 
747 static inline dma_addr_t
748 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
749     size_t fragsz, enum dma_data_direction dir)
750 {
751 	SKB_TRACE2(frag, dev);
752 	SKB_TODO();
753 	return (-1);
754 }
755 
756 static inline size_t
757 skb_frag_size(const skb_frag_t *frag)
758 {
759 	SKB_TRACE(frag);
760 	SKB_TODO();
761 	return (-1);
762 }
763 
764 #define	skb_walk_frags(_skb, _frag)					\
765 	for ((_frag) = (_skb); false; (_frag)++)
766 
767 static inline void
768 skb_checksum_help(struct sk_buff *skb)
769 {
770 	SKB_TRACE(skb);
771 	SKB_TODO();
772 }
773 
774 static inline bool
775 skb_ensure_writable(struct sk_buff *skb, size_t off)
776 {
777 	SKB_TRACE(skb);
778 	SKB_TODO();
779 	return (false);
780 }
781 
782 static inline void *
783 skb_frag_address(const skb_frag_t *frag)
784 {
785 	SKB_TRACE(frag);
786 	SKB_TODO();
787 	return (NULL);
788 }
789 
790 static inline void
791 skb_free_frag(void *frag)
792 {
793 
794 	SKB_TODO();
795 }
796 
797 static inline struct sk_buff *
798 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
799 {
800 	SKB_TRACE(skb);
801 	SKB_TODO();
802 	return (NULL);
803 }
804 
805 static inline bool
806 skb_is_gso(struct sk_buff *skb)
807 {
808 	SKB_TRACE(skb);
809 	SKB_IMPROVE("Really a TODO but get it away from logging");
810 	return (false);
811 }
812 
813 static inline void
814 skb_mark_not_on_list(struct sk_buff *skb)
815 {
816 	SKB_TRACE(skb);
817 	SKB_TODO();
818 }
819 
820 static inline void
821 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
822 {
823 	struct sk_buff *b, *e, *n;
824 
825 	SKB_TRACE2(from, to);
826 
827 	if (skb_queue_empty(from))
828 		return;
829 
830 	/* XXX do we need a barrier around this? */
831 	b = from->next;
832 	e = from->prev;
833 	n = to->next;
834 
835 	b->prev = (struct sk_buff *)to;
836 	to->next = b;
837 	e->next = n;
838 	n->prev = e;
839 
840 	to->qlen += from->qlen;
841 	__skb_queue_head_init(from);
842 }
843 
844 static inline void
845 skb_reset_transport_header(struct sk_buff *skb)
846 {
847 
848 	SKB_TRACE(skb);
849 	skb->l4hdroff = skb->data - skb->head;
850 }
851 
852 static inline uint8_t *
853 skb_transport_header(struct sk_buff *skb)
854 {
855 
856 	SKB_TRACE(skb);
857         return (skb->head + skb->l4hdroff);
858 }
859 
860 static inline uint8_t *
861 skb_network_header(struct sk_buff *skb)
862 {
863 
864 	SKB_TRACE(skb);
865         return (skb->head + skb->l3hdroff);
866 }
867 
868 static inline bool
869 skb_is_nonlinear(struct sk_buff *skb)
870 {
871 	SKB_TRACE(skb);
872 	return ((skb->data_len > 0) ? true : false);
873 }
874 
875 static inline int
876 __skb_linearize(struct sk_buff *skb)
877 {
878 	SKB_TRACE(skb);
879 	SKB_TODO();
880 	return (ENXIO);
881 }
882 
883 static inline int
884 skb_linearize(struct sk_buff *skb)
885 {
886 
887 	return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
888 }
889 
890 static inline int
891 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
892 {
893 	SKB_TRACE(skb);
894 	SKB_TODO();
895 	return (-ENXIO);
896 }
897 
898 /* Not really seen this one but need it as symmetric accessor function. */
899 static inline void
900 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
901 {
902 
903 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
904 	skb->qmap = qmap;
905 }
906 
907 static inline uint16_t
908 skb_get_queue_mapping(struct sk_buff *skb)
909 {
910 
911 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
912 	return (skb->qmap);
913 }
914 
915 static inline bool
916 skb_header_cloned(struct sk_buff *skb)
917 {
918 	SKB_TRACE(skb);
919 	SKB_TODO();
920 	return (false);
921 }
922 
923 static inline uint8_t *
924 skb_mac_header(struct sk_buff *skb)
925 {
926 	SKB_TRACE(skb);
927 	SKB_TODO();
928 	return (NULL);
929 }
930 
931 static inline void
932 skb_orphan(struct sk_buff *skb)
933 {
934 	SKB_TRACE(skb);
935 	SKB_TODO();
936 }
937 
938 static inline void
939 skb_reset_mac_header(struct sk_buff *skb)
940 {
941 	SKB_TRACE(skb);
942 	SKB_TODO();
943 }
944 
945 static inline __sum16
946 csum_unfold(__sum16 sum)
947 {
948 	SKB_TODO();
949 	return (sum);
950 }
951 
952 static __inline void
953 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
954 {
955 	SKB_TODO();
956 }
957 
958 static inline void
959 skb_reset_tail_pointer(struct sk_buff *skb)
960 {
961 
962 	SKB_TRACE(skb);
963 #ifdef SKB_DOING_OFFSETS_US_NOT
964 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
965 #endif
966 	skb->tail = skb->data;
967 	SKB_TRACE(skb);
968 }
969 
970 static inline struct sk_buff *
971 skb_get(struct sk_buff *skb)
972 {
973 
974 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
975 	return (skb);
976 }
977 
978 static inline struct sk_buff *
979 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
980 {
981 
982 	SKB_TODO();
983 	return (NULL);
984 }
985 
986 static inline void
987 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
988 {
989 
990 	SKB_TRACE(skb);
991 	/* Let us just hope the destination has len space ... */
992 	memcpy(dst, skb->data, len);
993 }
994 
995 static inline int
996 skb_pad(struct sk_buff *skb, int pad)
997 {
998 
999 	SKB_TRACE(skb);
1000 	SKB_TODO();
1001 	return (-1);
1002 }
1003 
1004 static inline void
1005 skb_list_del_init(struct sk_buff *skb)
1006 {
1007 
1008 	SKB_TRACE(skb);
1009 	SKB_TODO();
1010 }
1011 
1012 static inline void
1013 napi_consume_skb(struct sk_buff *skb, int budget)
1014 {
1015 
1016 	SKB_TRACE(skb);
1017 	SKB_TODO();
1018 }
1019 
1020 #define	SKB_WITH_OVERHEAD(_s)						\
1021 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1022 
1023 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1024