xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision 2ab4a41956159e7c974979693cb0b13cf552128e)
1 /*-
2  * Copyright (c) 2020-2025 The FreeBSD Foundation
3  * Copyright (c) 2021-2023 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
32  *       Do not rely on the internals of this implementation.  They are highly
33  *       likely to change as we will improve the integration to FreeBSD mbufs.
34  */
35 
36 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
37 #define	_LINUXKPI_LINUX_SKBUFF_H
38 
39 #include <linux/kernel.h>
40 #include <linux/page.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdev_features.h>
43 #include <linux/list.h>
44 #include <linux/gfp.h>
45 #include <linux/compiler.h>
46 #include <linux/spinlock.h>
47 #include <linux/ktime.h>
48 #include <linux/compiler.h>
49 
50 #include "opt_wlan.h"
51 
52 /* Currently this is only used for wlan so we can depend on that. */
53 #if defined(IEEE80211_DEBUG) && !defined(SKB_DEBUG)
54 #define	SKB_DEBUG
55 #endif
56 
57 /* #define	SKB_DEBUG */
58 
59 #ifdef SKB_DEBUG
60 #define	DSKB_TODO	0x01
61 #define	DSKB_IMPROVE	0x02
62 #define	DSKB_TRACE	0x10
63 #define	DSKB_TRACEX	0x20
64 extern int linuxkpi_debug_skb;
65 
66 #define	SKB_TODO()							\
67     if (linuxkpi_debug_skb & DSKB_TODO)					\
68 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
69 #define	SKB_IMPROVE(...)						\
70     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
71 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
72 #define	SKB_TRACE(_s)							\
73     if (linuxkpi_debug_skb & DSKB_TRACE)				\
74 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
75 #define	SKB_TRACE2(_s, _p)						\
76     if (linuxkpi_debug_skb & DSKB_TRACE)				\
77 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
78 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
79    if (linuxkpi_debug_skb & DSKB_TRACE)					\
80 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
81 	    __VA_ARGS__)
82 #else
83 #define	SKB_TODO()		do { } while(0)
84 #define	SKB_IMPROVE(...)	do { } while(0)
85 #define	SKB_TRACE(_s)		do { } while(0)
86 #define	SKB_TRACE2(_s, _p)	do { } while(0)
87 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
88 #endif
89 
90 enum sk_buff_pkt_type {
91 	PACKET_BROADCAST,
92 	PACKET_MULTICAST,
93 	PACKET_OTHERHOST,
94 };
95 
96 struct skb_shared_hwtstamps {
97 	ktime_t			hwtstamp;
98 };
99 
100 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
101 #define	SKB_DATA_ALIGN(_x)	roundup2(_x, CACHE_LINE_SIZE)
102 
103 struct sk_buff_head {
104 		/* XXX TODO */
105 	union {
106 		struct {
107 			struct sk_buff		*next;
108 			struct sk_buff		*prev;
109 		};
110 		struct sk_buff_head_l {
111 			struct sk_buff		*next;
112 			struct sk_buff		*prev;
113 		} list;
114 	};
115 	size_t			qlen;
116 	spinlock_t		lock;
117 };
118 
119 enum sk_checksum_flags {
120 	CHECKSUM_NONE			= 0x00,
121 	CHECKSUM_UNNECESSARY		= 0x01,
122 	CHECKSUM_PARTIAL		= 0x02,
123 	CHECKSUM_COMPLETE		= 0x04,
124 };
125 
126 struct skb_frag {
127 		/* XXX TODO */
128 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
129 	off_t			offset;
130 	size_t			size;
131 };
132 typedef	struct skb_frag	skb_frag_t;
133 
134 enum skb_shared_info_gso_type {
135 	SKB_GSO_TCPV4,
136 	SKB_GSO_TCPV6,
137 };
138 
139 struct skb_shared_info {
140 	enum skb_shared_info_gso_type	gso_type;
141 	uint16_t			gso_size;
142 	uint16_t			nr_frags;
143 	struct sk_buff			*frag_list;
144 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
145 };
146 
147 struct sk_buff {
148 	/* XXX TODO */
149 	union {
150 		/* struct sk_buff_head */
151 		struct {
152 			struct sk_buff		*next;
153 			struct sk_buff		*prev;
154 		};
155 		struct list_head	list;
156 	};
157 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
158 	uint32_t		len;		/* ? */
159 	uint32_t		data_len;	/* ? If we have frags? */
160 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
161 	uint16_t		mac_len;	/* Link-layer header length. */
162 	__sum16			csum;
163 	uint16_t		l3hdroff;	/* network header offset from *head */
164 	uint16_t		l4hdroff;	/* transport header offset from *head */
165 	uint32_t		priority;
166 	uint16_t		qmap;		/* queue mapping */
167 	uint16_t		_flags;		/* Internal flags. */
168 #define	_SKB_FLAGS_SKBEXTFRAG	0x0001
169 	enum sk_buff_pkt_type	pkt_type;
170 	uint16_t		mac_header;	/* offset of mac_header */
171 	refcount_t		refcnt;
172 
173 	/* "Scratch" area for layers to store metadata. */
174 	/* ??? I see sizeof() operations so probably an array. */
175 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
176 
177 	struct net_device	*dev;
178 	void			*sk;		/* XXX net/sock.h? */
179 
180 	int		csum_offset, csum_start, ip_summed, protocol;
181 
182 	uint8_t			*head;			/* Head of buffer. */
183 	uint8_t			*data;			/* Head of data. */
184 	uint8_t			*tail;			/* End of data. */
185 	uint8_t			*end;			/* End of buffer. */
186 
187 	struct skb_shared_info	*shinfo;
188 
189 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
190 	void			*m;
191 	void(*m_free_func)(void *);
192 
193 	/* Force padding to CACHE_LINE_SIZE. */
194 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
195 };
196 
197 /* -------------------------------------------------------------------------- */
198 
199 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
200 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
201 struct sk_buff *linuxkpi_build_skb(void *, size_t);
202 void linuxkpi_kfree_skb(struct sk_buff *);
203 
204 struct sk_buff *linuxkpi_skb_copy(const struct sk_buff *, gfp_t);
205 
206 /* -------------------------------------------------------------------------- */
207 
208 static inline struct sk_buff *
alloc_skb(size_t size,gfp_t gfp)209 alloc_skb(size_t size, gfp_t gfp)
210 {
211 	struct sk_buff *skb;
212 
213 	skb = linuxkpi_alloc_skb(size, gfp);
214 	SKB_TRACE(skb);
215 	return (skb);
216 }
217 
218 static inline struct sk_buff *
__dev_alloc_skb(size_t len,gfp_t gfp)219 __dev_alloc_skb(size_t len, gfp_t gfp)
220 {
221 	struct sk_buff *skb;
222 
223 	skb = linuxkpi_dev_alloc_skb(len, gfp);
224 	SKB_IMPROVE();
225 	SKB_TRACE(skb);
226 	return (skb);
227 }
228 
229 static inline struct sk_buff *
dev_alloc_skb(size_t len)230 dev_alloc_skb(size_t len)
231 {
232 	struct sk_buff *skb;
233 
234 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
235 	SKB_IMPROVE();
236 	SKB_TRACE(skb);
237 	return (skb);
238 }
239 
240 static inline void
kfree_skb(struct sk_buff * skb)241 kfree_skb(struct sk_buff *skb)
242 {
243 	SKB_TRACE(skb);
244 	linuxkpi_kfree_skb(skb);
245 }
246 
247 static inline void
consume_skb(struct sk_buff * skb)248 consume_skb(struct sk_buff *skb)
249 {
250 	SKB_TRACE(skb);
251 	kfree_skb(skb);
252 }
253 
254 static inline void
dev_kfree_skb(struct sk_buff * skb)255 dev_kfree_skb(struct sk_buff *skb)
256 {
257 	SKB_TRACE(skb);
258 	kfree_skb(skb);
259 }
260 
261 static inline void
dev_kfree_skb_any(struct sk_buff * skb)262 dev_kfree_skb_any(struct sk_buff *skb)
263 {
264 	SKB_TRACE(skb);
265 	dev_kfree_skb(skb);
266 }
267 
268 static inline void
dev_kfree_skb_irq(struct sk_buff * skb)269 dev_kfree_skb_irq(struct sk_buff *skb)
270 {
271 	SKB_TRACE(skb);
272 	SKB_IMPROVE("Do we have to defer this?");
273 	dev_kfree_skb(skb);
274 }
275 
276 static inline struct sk_buff *
build_skb(void * data,unsigned int fragsz)277 build_skb(void *data, unsigned int fragsz)
278 {
279 	struct sk_buff *skb;
280 
281 	skb = linuxkpi_build_skb(data, fragsz);
282 	SKB_TRACE(skb);
283 	return (skb);
284 }
285 
286 /* -------------------------------------------------------------------------- */
287 
288 static inline bool
skb_is_nonlinear(struct sk_buff * skb)289 skb_is_nonlinear(struct sk_buff *skb)
290 {
291 	SKB_TRACE(skb);
292 	return ((skb->data_len > 0) ? true : false);
293 }
294 
295 /* Add headroom; cannot do once there is data in there. */
296 static inline void
skb_reserve(struct sk_buff * skb,size_t len)297 skb_reserve(struct sk_buff *skb, size_t len)
298 {
299 	SKB_TRACE(skb);
300 #if 0
301 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
302 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
303 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
304 #else
305 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
306 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
307 	    skb->head, skb->data, skb->tail, skb->len));
308 #endif
309 	skb->data += len;
310 	skb->tail += len;
311 }
312 
313 /*
314  * Remove headroom; return new data pointer; basically make space at the
315  * front to copy data in (manually).
316  */
317 static inline void *
__skb_push(struct sk_buff * skb,size_t len)318 __skb_push(struct sk_buff *skb, size_t len)
319 {
320 	SKB_TRACE(skb);
321 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
322 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
323 	skb->len  += len;
324 	skb->data -= len;
325 	return (skb->data);
326 }
327 
328 static inline void *
skb_push(struct sk_buff * skb,size_t len)329 skb_push(struct sk_buff *skb, size_t len)
330 {
331 
332 	SKB_TRACE(skb);
333 	return (__skb_push(skb, len));
334 }
335 
336 /*
337  * Length of the data on the skb (without any frags)???
338  */
339 static inline size_t
skb_headlen(struct sk_buff * skb)340 skb_headlen(struct sk_buff *skb)
341 {
342 
343 	SKB_TRACE(skb);
344 	return (skb->len - skb->data_len);
345 }
346 
347 
348 /* Return the end of data (tail pointer). */
349 static inline uint8_t *
skb_tail_pointer(struct sk_buff * skb)350 skb_tail_pointer(struct sk_buff *skb)
351 {
352 
353 	SKB_TRACE(skb);
354 	return (skb->tail);
355 }
356 
357 /* Return number of bytes available at end of buffer. */
358 static inline unsigned int
skb_tailroom(struct sk_buff * skb)359 skb_tailroom(struct sk_buff *skb)
360 {
361 
362 	SKB_TRACE(skb);
363 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
364 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
365 	if (unlikely(skb_is_nonlinear(skb)))
366 		return (0);
367 	return (skb->end - skb->tail);
368 }
369 
370 /* Return number of bytes available at the beginning of buffer. */
371 static inline unsigned int
skb_headroom(const struct sk_buff * skb)372 skb_headroom(const struct sk_buff *skb)
373 {
374 	SKB_TRACE(skb);
375 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
376 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
377 	return (skb->data - skb->head);
378 }
379 
380 
381 /*
382  * Remove tailroom; return the old tail pointer; basically make space at
383  * the end to copy data in (manually).  See also skb_put_data() below.
384  */
385 static inline void *
__skb_put(struct sk_buff * skb,size_t len)386 __skb_put(struct sk_buff *skb, size_t len)
387 {
388 	void *s;
389 
390 	SKB_TRACE(skb);
391 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
392 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
393 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
394 
395 	s = skb_tail_pointer(skb);
396 	if (len == 0)
397 		return (s);
398 	skb->tail += len;
399 	skb->len += len;
400 #ifdef SKB_DEBUG
401 	if (linuxkpi_debug_skb & DSKB_TRACEX)
402 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
403 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
404 	    s, len);
405 #endif
406 	return (s);
407 }
408 
409 static inline void *
skb_put(struct sk_buff * skb,size_t len)410 skb_put(struct sk_buff *skb, size_t len)
411 {
412 
413 	SKB_TRACE(skb);
414 	return (__skb_put(skb, len));
415 }
416 
417 /* skb_put() + copying data in. */
418 static inline void *
skb_put_data(struct sk_buff * skb,const void * buf,size_t len)419 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
420 {
421 	void *s;
422 
423 	SKB_TRACE2(skb, buf);
424 	s = skb_put(skb, len);
425 	if (len == 0)
426 		return (s);
427 	memcpy(s, buf, len);
428 	return (s);
429 }
430 
431 /* skb_put() + filling with zeros. */
432 static inline void *
skb_put_zero(struct sk_buff * skb,size_t len)433 skb_put_zero(struct sk_buff *skb, size_t len)
434 {
435 	void *s;
436 
437 	SKB_TRACE(skb);
438 	s = skb_put(skb, len);
439 	memset(s, '\0', len);
440 	return (s);
441 }
442 
443 /*
444  * Remove len bytes from beginning of data.
445  *
446  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
447  * we return the advanced data pointer so we don't have to keep a temp, correct?
448  */
449 static inline void *
skb_pull(struct sk_buff * skb,size_t len)450 skb_pull(struct sk_buff *skb, size_t len)
451 {
452 
453 	SKB_TRACE(skb);
454 #if 0	/* Apparently this doesn't barf... */
455 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
456 	    __func__, skb, skb->len, len, skb->data));
457 #endif
458 	if (skb->len < len)
459 		return (NULL);
460 	skb->len -= len;
461 	skb->data += len;
462 	return (skb->data);
463 }
464 
465 /* Reduce skb data to given length or do nothing if smaller already. */
466 static inline void
__skb_trim(struct sk_buff * skb,unsigned int len)467 __skb_trim(struct sk_buff *skb, unsigned int len)
468 {
469 
470 	SKB_TRACE(skb);
471 	if (skb->len < len)
472 		return;
473 
474 	skb->len = len;
475 	skb->tail = skb->data + skb->len;
476 }
477 
478 static inline void
skb_trim(struct sk_buff * skb,unsigned int len)479 skb_trim(struct sk_buff *skb, unsigned int len)
480 {
481 
482 	return (__skb_trim(skb, len));
483 }
484 
485 static inline struct skb_shared_info *
skb_shinfo(struct sk_buff * skb)486 skb_shinfo(struct sk_buff *skb)
487 {
488 
489 	SKB_TRACE(skb);
490 	return (skb->shinfo);
491 }
492 
493 static inline void
skb_add_rx_frag(struct sk_buff * skb,int fragno,struct page * page,off_t offset,size_t size,unsigned int truesize)494 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
495     off_t offset, size_t size, unsigned int truesize)
496 {
497 	struct skb_shared_info *shinfo;
498 
499 	SKB_TRACE(skb);
500 #ifdef SKB_DEBUG
501 	if (linuxkpi_debug_skb & DSKB_TRACEX)
502 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
503 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
504 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
505 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
506 	    size, truesize);
507 #endif
508 
509 	shinfo = skb_shinfo(skb);
510 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
511 	    "fragno %d too big\n", __func__, skb, fragno));
512 	shinfo->frags[fragno].page = page;
513 	shinfo->frags[fragno].offset = offset;
514 	shinfo->frags[fragno].size = size;
515 	shinfo->nr_frags = fragno + 1;
516         skb->len += size;
517 	skb->data_len += size;
518         skb->truesize += truesize;
519 }
520 
521 /* -------------------------------------------------------------------------- */
522 
523 #define	skb_queue_walk(_q, skb)						\
524 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
525 	    (skb) = (skb)->next)
526 
527 #define	skb_queue_walk_safe(_q, skb, tmp)				\
528 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
529 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
530 
531 #define	skb_list_walk_safe(_q, skb, tmp)				\
532 	for ((skb) = (_q), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL; \
533 	    ((skb) != NULL);						\
534 	    (skb) = (tmp), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL)
535 
536 static inline bool
skb_queue_empty(const struct sk_buff_head * q)537 skb_queue_empty(const struct sk_buff_head *q)
538 {
539 	SKB_TRACE(q);
540 	return (q->next == (const struct sk_buff *)q);
541 }
542 
543 static inline bool
skb_queue_empty_lockless(const struct sk_buff_head * q)544 skb_queue_empty_lockless(const struct sk_buff_head *q)
545 {
546 	SKB_TRACE(q);
547 	return (READ_ONCE(q->next) == (const struct sk_buff *)q);
548 }
549 
550 static inline void
__skb_queue_head_init(struct sk_buff_head * q)551 __skb_queue_head_init(struct sk_buff_head *q)
552 {
553 	SKB_TRACE(q);
554 	q->prev = q->next = (struct sk_buff *)q;
555 	q->qlen = 0;
556 }
557 
558 static inline void
skb_queue_head_init(struct sk_buff_head * q)559 skb_queue_head_init(struct sk_buff_head *q)
560 {
561 	SKB_TRACE(q);
562 	__skb_queue_head_init(q);
563 	spin_lock_init(&q->lock);
564 }
565 
566 static inline void
__skb_insert(struct sk_buff * new,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * q)567 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
568     struct sk_buff_head *q)
569 {
570 
571 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
572 	WRITE_ONCE(new->prev, prev);
573 	WRITE_ONCE(new->next, next);
574 	WRITE_ONCE(((struct sk_buff_head_l *)next)->prev, new);
575 	WRITE_ONCE(((struct sk_buff_head_l *)prev)->next, new);
576 	WRITE_ONCE(q->qlen, q->qlen + 1);
577 }
578 
579 static inline void
__skb_queue_after(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)580 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
581     struct sk_buff *new)
582 {
583 
584 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
585 	__skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
586 }
587 
588 static inline void
__skb_queue_before(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)589 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
590     struct sk_buff *new)
591 {
592 
593 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
594 	__skb_insert(new, skb->prev, skb, q);
595 }
596 
597 static inline void
__skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)598 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
599 {
600 
601 	SKB_TRACE2(q, new);
602 	__skb_queue_before(q, (struct sk_buff *)q, new);
603 }
604 
605 static inline void
skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)606 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
607 {
608 	unsigned long flags;
609 
610 	SKB_TRACE2(q, new);
611 	spin_lock_irqsave(&q->lock, flags);
612 	__skb_queue_tail(q, new);
613 	spin_unlock_irqrestore(&q->lock, flags);
614 }
615 
616 static inline struct sk_buff *
skb_peek(const struct sk_buff_head * q)617 skb_peek(const struct sk_buff_head *q)
618 {
619 	struct sk_buff *skb;
620 
621 	skb = q->next;
622 	SKB_TRACE2(q, skb);
623 	if (skb == (const struct sk_buff *)q)
624 		return (NULL);
625 	return (skb);
626 }
627 
628 static inline struct sk_buff *
skb_peek_tail(const struct sk_buff_head * q)629 skb_peek_tail(const struct sk_buff_head *q)
630 {
631 	struct sk_buff *skb;
632 
633 	skb = READ_ONCE(q->prev);
634 	SKB_TRACE2(q, skb);
635 	if (skb == (const struct sk_buff *)q)
636 		return (NULL);
637 	return (skb);
638 }
639 
640 static inline void
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)641 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
642 {
643 	struct sk_buff *p, *n;
644 
645 	SKB_TRACE2(skb, q);
646 
647 	WRITE_ONCE(q->qlen, q->qlen - 1);
648 	p = skb->prev;
649 	n = skb->next;
650 	WRITE_ONCE(n->prev, p);
651 	WRITE_ONCE(p->next, n);
652 	skb->prev = skb->next = NULL;
653 }
654 
655 static inline void
skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)656 skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
657 {
658 	unsigned long flags;
659 
660 	SKB_TRACE2(skb, q);
661 	spin_lock_irqsave(&q->lock, flags);
662 	__skb_unlink(skb, q);
663 	spin_unlock_irqrestore(&q->lock, flags);
664 }
665 
666 static inline struct sk_buff *
__skb_dequeue(struct sk_buff_head * q)667 __skb_dequeue(struct sk_buff_head *q)
668 {
669 	struct sk_buff *skb;
670 
671 	skb = skb_peek(q);
672 	if (skb != NULL)
673 		__skb_unlink(skb, q);
674 	SKB_TRACE2(q, skb);
675 	return (skb);
676 }
677 
678 static inline struct sk_buff *
skb_dequeue(struct sk_buff_head * q)679 skb_dequeue(struct sk_buff_head *q)
680 {
681 	unsigned long flags;
682 	struct sk_buff *skb;
683 
684 	spin_lock_irqsave(&q->lock, flags);
685 	skb = __skb_dequeue(q);
686 	spin_unlock_irqrestore(&q->lock, flags);
687 	SKB_TRACE2(q, skb);
688 	return (skb);
689 }
690 
691 static inline struct sk_buff *
__skb_dequeue_tail(struct sk_buff_head * q)692 __skb_dequeue_tail(struct sk_buff_head *q)
693 {
694 	struct sk_buff *skb;
695 
696 	skb = skb_peek_tail(q);
697 	if (skb != NULL)
698 		__skb_unlink(skb, q);
699 	SKB_TRACE2(q, skb);
700 	return (skb);
701 }
702 
703 static inline struct sk_buff *
skb_dequeue_tail(struct sk_buff_head * q)704 skb_dequeue_tail(struct sk_buff_head *q)
705 {
706 	unsigned long flags;
707 	struct sk_buff *skb;
708 
709 	spin_lock_irqsave(&q->lock, flags);
710 	skb = __skb_dequeue_tail(q);
711 	spin_unlock_irqrestore(&q->lock, flags);
712 	SKB_TRACE2(q, skb);
713 	return (skb);
714 }
715 
716 static inline void
__skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)717 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
718 {
719 
720 	SKB_TRACE2(q, skb);
721 	__skb_queue_after(q, (struct sk_buff *)q, skb);
722 }
723 
724 static inline void
skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)725 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
726 {
727 	unsigned long flags;
728 
729 	SKB_TRACE2(q, skb);
730 	spin_lock_irqsave(&q->lock, flags);
731 	__skb_queue_head(q, skb);
732 	spin_unlock_irqrestore(&q->lock, flags);
733 }
734 
735 static inline uint32_t
skb_queue_len(const struct sk_buff_head * q)736 skb_queue_len(const struct sk_buff_head *q)
737 {
738 
739 	SKB_TRACE(q);
740 	return (q->qlen);
741 }
742 
743 static inline uint32_t
skb_queue_len_lockless(const struct sk_buff_head * q)744 skb_queue_len_lockless(const struct sk_buff_head *q)
745 {
746 
747 	SKB_TRACE(q);
748 	return (READ_ONCE(q->qlen));
749 }
750 
751 static inline void
___skb_queue_splice(const struct sk_buff_head * from,struct sk_buff * p,struct sk_buff * n)752 ___skb_queue_splice(const struct sk_buff_head *from,
753     struct sk_buff *p, struct sk_buff *n)
754 {
755 	struct sk_buff *b, *e;
756 
757 	b = from->next;
758 	e = from->prev;
759 
760 	WRITE_ONCE(b->prev, p);
761 	WRITE_ONCE(((struct sk_buff_head_l *)p)->next, b);
762 	WRITE_ONCE(e->next, n);
763 	WRITE_ONCE(((struct sk_buff_head_l *)n)->prev, e);
764 }
765 
766 static inline void
skb_queue_splice_init(struct sk_buff_head * from,struct sk_buff_head * to)767 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
768 {
769 
770 	SKB_TRACE2(from, to);
771 
772 	if (skb_queue_empty(from))
773 		return;
774 
775 	___skb_queue_splice(from, (struct sk_buff *)to, to->next);
776 	to->qlen += from->qlen;
777 	__skb_queue_head_init(from);
778 }
779 
780 static inline void
skb_queue_splice_tail_init(struct sk_buff_head * from,struct sk_buff_head * to)781 skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to)
782 {
783 
784 	SKB_TRACE2(from, to);
785 
786 	if (skb_queue_empty(from))
787 		return;
788 
789 	___skb_queue_splice(from, to->prev, (struct sk_buff *)to);
790 	to->qlen += from->qlen;
791 	__skb_queue_head_init(from);
792 }
793 
794 
795 static inline void
__skb_queue_purge(struct sk_buff_head * q)796 __skb_queue_purge(struct sk_buff_head *q)
797 {
798 	struct sk_buff *skb;
799 
800 	SKB_TRACE(q);
801         while ((skb = __skb_dequeue(q)) != NULL)
802 		kfree_skb(skb);
803 	WARN_ONCE(skb_queue_len(q) != 0, "%s: queue %p not empty: %u",
804 	    __func__, q, skb_queue_len(q));
805 }
806 
807 static inline void
skb_queue_purge(struct sk_buff_head * q)808 skb_queue_purge(struct sk_buff_head *q)
809 {
810 	struct sk_buff_head _q;
811 	unsigned long flags;
812 
813 	SKB_TRACE(q);
814 
815 	if (skb_queue_empty_lockless(q))
816 		return;
817 
818 	__skb_queue_head_init(&_q);
819 	spin_lock_irqsave(&q->lock, flags);
820 	skb_queue_splice_init(q, &_q);
821 	spin_unlock_irqrestore(&q->lock, flags);
822 	__skb_queue_purge(&_q);
823 }
824 
825 static inline struct sk_buff *
skb_queue_prev(struct sk_buff_head * q,struct sk_buff * skb)826 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
827 {
828 
829 	SKB_TRACE2(q, skb);
830 	/* XXX what is the q argument good for? */
831 	return (skb->prev);
832 }
833 
834 /* -------------------------------------------------------------------------- */
835 
836 static inline struct sk_buff *
skb_copy(const struct sk_buff * skb,gfp_t gfp)837 skb_copy(const struct sk_buff *skb, gfp_t gfp)
838 {
839 	struct sk_buff *new;
840 
841 	new = linuxkpi_skb_copy(skb, gfp);
842 	SKB_TRACE2(skb, new);
843 	return (new);
844 }
845 
846 static inline uint16_t
skb_checksum(struct sk_buff * skb,int offs,size_t len,int x)847 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
848 {
849 	SKB_TRACE(skb);
850 	SKB_TODO();
851 	return (0xffff);
852 }
853 
854 static inline int
skb_checksum_start_offset(struct sk_buff * skb)855 skb_checksum_start_offset(struct sk_buff *skb)
856 {
857 	SKB_TRACE(skb);
858 	SKB_TODO();
859 	return (-1);
860 }
861 
862 static inline dma_addr_t
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,int x,size_t fragsz,enum dma_data_direction dir)863 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
864     size_t fragsz, enum dma_data_direction dir)
865 {
866 	SKB_TRACE2(frag, dev);
867 	SKB_TODO();
868 	return (-1);
869 }
870 
871 static inline size_t
skb_frag_size(const skb_frag_t * frag)872 skb_frag_size(const skb_frag_t *frag)
873 {
874 	SKB_TRACE(frag);
875 	return (frag->size);
876 }
877 
878 #define	skb_walk_frags(_skb, _frag)					\
879 	for ((_frag) = (_skb); false; (_frag)++)
880 
881 static inline void
skb_checksum_help(struct sk_buff * skb)882 skb_checksum_help(struct sk_buff *skb)
883 {
884 	SKB_TRACE(skb);
885 	SKB_TODO();
886 }
887 
888 static inline bool
skb_ensure_writable(struct sk_buff * skb,size_t off)889 skb_ensure_writable(struct sk_buff *skb, size_t off)
890 {
891 	SKB_TRACE(skb);
892 	SKB_TODO();
893 	return (false);
894 }
895 
896 static inline void *
skb_frag_address(const skb_frag_t * frag)897 skb_frag_address(const skb_frag_t *frag)
898 {
899 	SKB_TRACE(frag);
900 	return (page_address(frag->page + frag->offset));
901 }
902 
903 static inline void
skb_free_frag(void * frag)904 skb_free_frag(void *frag)
905 {
906 
907 	page_frag_free(frag);
908 }
909 
910 static inline struct sk_buff *
skb_gso_segment(struct sk_buff * skb,netdev_features_t netdev_flags)911 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
912 {
913 	SKB_TRACE(skb);
914 	SKB_TODO();
915 	return (NULL);
916 }
917 
918 static inline bool
skb_is_gso(struct sk_buff * skb)919 skb_is_gso(struct sk_buff *skb)
920 {
921 	SKB_TRACE(skb);
922 	SKB_IMPROVE("Really a TODO but get it away from logging");
923 	return (false);
924 }
925 
926 static inline void
skb_mark_not_on_list(struct sk_buff * skb)927 skb_mark_not_on_list(struct sk_buff *skb)
928 {
929 	SKB_TRACE(skb);
930 	skb->next = NULL;
931 }
932 
933 static inline void
skb_reset_transport_header(struct sk_buff * skb)934 skb_reset_transport_header(struct sk_buff *skb)
935 {
936 
937 	SKB_TRACE(skb);
938 	skb->l4hdroff = skb->data - skb->head;
939 }
940 
941 static inline uint8_t *
skb_transport_header(struct sk_buff * skb)942 skb_transport_header(struct sk_buff *skb)
943 {
944 
945 	SKB_TRACE(skb);
946         return (skb->head + skb->l4hdroff);
947 }
948 
949 static inline uint8_t *
skb_network_header(struct sk_buff * skb)950 skb_network_header(struct sk_buff *skb)
951 {
952 
953 	SKB_TRACE(skb);
954         return (skb->head + skb->l3hdroff);
955 }
956 
957 static inline int
__skb_linearize(struct sk_buff * skb)958 __skb_linearize(struct sk_buff *skb)
959 {
960 	SKB_TRACE(skb);
961 	SKB_TODO();
962 	return (-ENXIO);
963 }
964 
965 static inline int
skb_linearize(struct sk_buff * skb)966 skb_linearize(struct sk_buff *skb)
967 {
968 	return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
969 }
970 
971 static inline int
pskb_expand_head(struct sk_buff * skb,int x,int len,gfp_t gfp)972 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
973 {
974 	SKB_TRACE(skb);
975 	SKB_TODO();
976 	return (-ENXIO);
977 }
978 
979 /* Not really seen this one but need it as symmetric accessor function. */
980 static inline void
skb_set_queue_mapping(struct sk_buff * skb,uint16_t qmap)981 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
982 {
983 
984 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
985 	skb->qmap = qmap;
986 }
987 
988 static inline uint16_t
skb_get_queue_mapping(struct sk_buff * skb)989 skb_get_queue_mapping(struct sk_buff *skb)
990 {
991 
992 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
993 	return (skb->qmap);
994 }
995 
996 static inline bool
skb_header_cloned(struct sk_buff * skb)997 skb_header_cloned(struct sk_buff *skb)
998 {
999 	SKB_TRACE(skb);
1000 	SKB_TODO();
1001 	return (true);
1002 }
1003 
1004 static inline uint8_t *
skb_mac_header(const struct sk_buff * skb)1005 skb_mac_header(const struct sk_buff *skb)
1006 {
1007 	SKB_TRACE(skb);
1008 	return (skb->head + skb->mac_header);
1009 }
1010 
1011 static inline void
skb_reset_mac_header(struct sk_buff * skb)1012 skb_reset_mac_header(struct sk_buff *skb)
1013 {
1014 	SKB_TRACE(skb);
1015 	skb->mac_header = skb->data - skb->head;
1016 }
1017 
1018 static inline void
skb_set_mac_header(struct sk_buff * skb,const size_t len)1019 skb_set_mac_header(struct sk_buff *skb, const size_t len)
1020 {
1021 	SKB_TRACE(skb);
1022 	skb_reset_mac_header(skb);
1023 	skb->mac_header += len;
1024 }
1025 
1026 static inline struct skb_shared_hwtstamps *
skb_hwtstamps(struct sk_buff * skb)1027 skb_hwtstamps(struct sk_buff *skb)
1028 {
1029 	SKB_TRACE(skb);
1030 	SKB_TODO();
1031 	return (NULL);
1032 }
1033 
1034 static inline void
skb_orphan(struct sk_buff * skb)1035 skb_orphan(struct sk_buff *skb)
1036 {
1037 	SKB_TRACE(skb);
1038 	SKB_TODO();
1039 }
1040 
1041 static inline __sum16
csum_unfold(__sum16 sum)1042 csum_unfold(__sum16 sum)
1043 {
1044 	return (sum);
1045 }
1046 
1047 static __inline void
skb_postpush_rcsum(struct sk_buff * skb,const void * data,size_t len)1048 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
1049 {
1050 	SKB_TODO();
1051 }
1052 
1053 static inline void
skb_reset_tail_pointer(struct sk_buff * skb)1054 skb_reset_tail_pointer(struct sk_buff *skb)
1055 {
1056 
1057 	SKB_TRACE(skb);
1058 #ifdef SKB_DOING_OFFSETS_US_NOT
1059 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
1060 #endif
1061 	skb->tail = skb->data;
1062 	SKB_TRACE(skb);
1063 }
1064 
1065 static inline struct sk_buff *
skb_get(struct sk_buff * skb)1066 skb_get(struct sk_buff *skb)
1067 {
1068 
1069 	SKB_TRACE(skb);
1070 	refcount_inc(&skb->refcnt);
1071 	return (skb);
1072 }
1073 
1074 static inline struct sk_buff *
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1075 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1076 {
1077 
1078 	SKB_TODO();
1079 	return (NULL);
1080 }
1081 
1082 static inline void
skb_copy_from_linear_data(const struct sk_buff * skb,void * dst,size_t len)1083 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1084 {
1085 
1086 	SKB_TRACE(skb);
1087 	/* Let us just hope the destination has len space ... */
1088 	memcpy(dst, skb->data, len);
1089 }
1090 
1091 static inline int
skb_pad(struct sk_buff * skb,int pad)1092 skb_pad(struct sk_buff *skb, int pad)
1093 {
1094 
1095 	SKB_TRACE(skb);
1096 	SKB_TODO();
1097 	return (-1);
1098 }
1099 
1100 static inline void
skb_list_del_init(struct sk_buff * skb)1101 skb_list_del_init(struct sk_buff *skb)
1102 {
1103 
1104 	SKB_TRACE(skb);
1105 	__list_del_entry(&skb->list);
1106 	skb_mark_not_on_list(skb);
1107 }
1108 
1109 static inline void
napi_consume_skb(struct sk_buff * skb,int budget)1110 napi_consume_skb(struct sk_buff *skb, int budget)
1111 {
1112 
1113 	SKB_TRACE(skb);
1114 	SKB_TODO();
1115 }
1116 
1117 static inline struct sk_buff *
napi_build_skb(void * data,size_t len)1118 napi_build_skb(void *data, size_t len)
1119 {
1120 
1121 	SKB_TODO();
1122 	return (NULL);
1123 }
1124 
1125 static inline uint32_t
skb_get_hash(struct sk_buff * skb)1126 skb_get_hash(struct sk_buff *skb)
1127 {
1128 	SKB_TRACE(skb);
1129 	SKB_TODO();
1130 	return (0);
1131 }
1132 
1133 static inline void
skb_mark_for_recycle(struct sk_buff * skb)1134 skb_mark_for_recycle(struct sk_buff *skb)
1135 {
1136 	SKB_TRACE(skb);
1137 	/* page_pool */
1138 	SKB_TODO();
1139 }
1140 
1141 static inline int
skb_cow_head(struct sk_buff * skb,unsigned int headroom)1142 skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1143 {
1144 	SKB_TRACE(skb);
1145 	SKB_TODO();
1146 	return (-1);
1147 }
1148 
1149 #define	SKB_WITH_OVERHEAD(_s)						\
1150 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1151 
1152 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1153