xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision bc222e96d135687aea4e67ee9b11fb9490f42546)
1 /*-
2  * Copyright (c) 2020-2025 The FreeBSD Foundation
3  * Copyright (c) 2021-2025 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
32  *       Do not rely on the internals of this implementation.  They are highly
33  *       likely to change as we will improve the integration to FreeBSD mbufs.
34  */
35 
36 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
37 #define	_LINUXKPI_LINUX_SKBUFF_H
38 
39 #include <linux/kernel.h>
40 #include <linux/page.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdev_features.h>
43 #include <linux/list.h>
44 #include <linux/gfp.h>
45 #include <linux/compiler.h>
46 #include <linux/spinlock.h>
47 #include <linux/ktime.h>
48 #include <linux/compiler.h>
49 
50 /*
51  * At least the net/intel-irdma-kmod port pulls this header in; likely through
52  * if_ether.h (see PR289268).  This means we no longer can rely on
53  * IEEE80211_DEBUG (opt_wlan.h) to automatically set SKB_DEBUG.
54  */
55 /* #define	SKB_DEBUG */
56 
57 #ifdef SKB_DEBUG
58 #define	DSKB_TODO	0x01
59 #define	DSKB_IMPROVE	0x02
60 #define	DSKB_TRACE	0x10
61 #define	DSKB_TRACEX	0x20
62 extern int linuxkpi_debug_skb;
63 
64 #define	SKB_TODO()							\
65     if (linuxkpi_debug_skb & DSKB_TODO)					\
66 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
67 #define	SKB_IMPROVE(...)						\
68     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
69 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
70 #define	SKB_TRACE(_s)							\
71     if (linuxkpi_debug_skb & DSKB_TRACE)				\
72 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
73 #define	SKB_TRACE2(_s, _p)						\
74     if (linuxkpi_debug_skb & DSKB_TRACE)				\
75 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
76 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
77    if (linuxkpi_debug_skb & DSKB_TRACE)					\
78 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
79 	    __VA_ARGS__)
80 #else
81 #define	SKB_TODO()		do { } while(0)
82 #define	SKB_IMPROVE(...)	do { } while(0)
83 #define	SKB_TRACE(_s)		do { } while(0)
84 #define	SKB_TRACE2(_s, _p)	do { } while(0)
85 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
86 #endif
87 
88 enum sk_buff_pkt_type {
89 	PACKET_BROADCAST,
90 	PACKET_MULTICAST,
91 	PACKET_OTHERHOST,
92 };
93 
94 struct skb_shared_hwtstamps {
95 	ktime_t			hwtstamp;
96 };
97 
98 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
99 #define	SKB_DATA_ALIGN(_x)	roundup2(_x, CACHE_LINE_SIZE)
100 
101 struct sk_buff_head {
102 		/* XXX TODO */
103 	union {
104 		struct {
105 			struct sk_buff		*next;
106 			struct sk_buff		*prev;
107 		};
108 		struct sk_buff_head_l {
109 			struct sk_buff		*next;
110 			struct sk_buff		*prev;
111 		} list;
112 	};
113 	size_t			qlen;
114 	spinlock_t		lock;
115 };
116 
117 enum sk_checksum_flags {
118 	CHECKSUM_NONE			= 0x00,
119 	CHECKSUM_UNNECESSARY		= 0x01,
120 	CHECKSUM_PARTIAL		= 0x02,
121 	CHECKSUM_COMPLETE		= 0x03,
122 };
123 
124 struct skb_frag {
125 		/* XXX TODO */
126 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
127 	off_t			offset;
128 	size_t			size;
129 };
130 typedef	struct skb_frag	skb_frag_t;
131 
132 enum skb_shared_info_gso_type {
133 	SKB_GSO_TCPV4,
134 	SKB_GSO_TCPV6,
135 };
136 
137 struct skb_shared_info {
138 	enum skb_shared_info_gso_type	gso_type;
139 	uint16_t			gso_size;
140 	uint16_t			nr_frags;
141 	struct sk_buff			*frag_list;
142 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
143 };
144 
145 struct sk_buff {
146 	/* XXX TODO */
147 	union {
148 		/* struct sk_buff_head */
149 		struct {
150 			struct sk_buff		*next;
151 			struct sk_buff		*prev;
152 		};
153 		struct list_head	list;
154 	};
155 
156 	uint8_t			*head;			/* Head of buffer. */
157 	uint8_t			*data;			/* Head of data. */
158 	uint8_t			*tail;			/* End of data. */
159 	uint8_t			*end;			/* End of buffer. */
160 
161 	uint32_t		len;		/* ? */
162 	uint32_t		data_len;	/* ? If we have frags? */
163 	union {
164 		__wsum			csum;
165 		struct {
166 			uint16_t	csum_offset;
167 			uint16_t	csum_start;
168 		};
169 	};
170 	uint16_t		protocol;
171 	uint8_t			ip_summed;		/* 2 bit only. */
172 	/* uint8_t */
173 
174 	/* "Scratch" area for layers to store metadata. */
175 	/* ??? I see sizeof() operations so probably an array. */
176 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
177 
178 	struct skb_shared_info	*shinfo	__aligned(CACHE_LINE_SIZE);
179 
180 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
181 	uint32_t		priority;
182 	uint16_t		qmap;		/* queue mapping */
183 	uint16_t		_flags;		/* Internal flags. */
184 #define	_SKB_FLAGS_SKBEXTFRAG	0x0001
185 	uint16_t		l3hdroff;	/* network header offset from *head */
186 	uint16_t		l4hdroff;	/* transport header offset from *head */
187 	uint16_t		mac_header;	/* offset of mac_header */
188 	uint16_t		mac_len;	/* Link-layer header length. */
189 	enum sk_buff_pkt_type	pkt_type;
190 	refcount_t		refcnt;
191 
192 	struct net_device	*dev;
193 	void			*sk;		/* XXX net/sock.h? */
194 
195 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
196 	void			*m;
197 	void(*m_free_func)(void *);
198 
199 	/* Force padding to CACHE_LINE_SIZE. */
200 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
201 };
202 
203 /* -------------------------------------------------------------------------- */
204 
205 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
206 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
207 struct sk_buff *linuxkpi_build_skb(void *, size_t);
208 void linuxkpi_kfree_skb(struct sk_buff *);
209 
210 struct sk_buff *linuxkpi_skb_copy(const struct sk_buff *, gfp_t);
211 
212 /* -------------------------------------------------------------------------- */
213 
214 static inline struct sk_buff *
alloc_skb(size_t size,gfp_t gfp)215 alloc_skb(size_t size, gfp_t gfp)
216 {
217 	struct sk_buff *skb;
218 
219 	skb = linuxkpi_alloc_skb(size, gfp);
220 	SKB_TRACE(skb);
221 	return (skb);
222 }
223 
224 static inline struct sk_buff *
__dev_alloc_skb(size_t len,gfp_t gfp)225 __dev_alloc_skb(size_t len, gfp_t gfp)
226 {
227 	struct sk_buff *skb;
228 
229 	skb = linuxkpi_dev_alloc_skb(len, gfp);
230 	SKB_IMPROVE();
231 	SKB_TRACE(skb);
232 	return (skb);
233 }
234 
235 static inline struct sk_buff *
dev_alloc_skb(size_t len)236 dev_alloc_skb(size_t len)
237 {
238 	struct sk_buff *skb;
239 
240 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
241 	SKB_IMPROVE();
242 	SKB_TRACE(skb);
243 	return (skb);
244 }
245 
246 static inline void
kfree_skb(struct sk_buff * skb)247 kfree_skb(struct sk_buff *skb)
248 {
249 	SKB_TRACE(skb);
250 	linuxkpi_kfree_skb(skb);
251 }
252 
253 static inline void
consume_skb(struct sk_buff * skb)254 consume_skb(struct sk_buff *skb)
255 {
256 	SKB_TRACE(skb);
257 	kfree_skb(skb);
258 }
259 
260 static inline void
dev_kfree_skb(struct sk_buff * skb)261 dev_kfree_skb(struct sk_buff *skb)
262 {
263 	SKB_TRACE(skb);
264 	kfree_skb(skb);
265 }
266 
267 static inline void
dev_kfree_skb_any(struct sk_buff * skb)268 dev_kfree_skb_any(struct sk_buff *skb)
269 {
270 	SKB_TRACE(skb);
271 	dev_kfree_skb(skb);
272 }
273 
274 static inline void
dev_kfree_skb_irq(struct sk_buff * skb)275 dev_kfree_skb_irq(struct sk_buff *skb)
276 {
277 	SKB_TRACE(skb);
278 	SKB_IMPROVE("Do we have to defer this?");
279 	dev_kfree_skb(skb);
280 }
281 
282 static inline struct sk_buff *
build_skb(void * data,unsigned int fragsz)283 build_skb(void *data, unsigned int fragsz)
284 {
285 	struct sk_buff *skb;
286 
287 	skb = linuxkpi_build_skb(data, fragsz);
288 	SKB_TRACE(skb);
289 	return (skb);
290 }
291 
292 /* -------------------------------------------------------------------------- */
293 
294 static inline bool
skb_is_nonlinear(struct sk_buff * skb)295 skb_is_nonlinear(struct sk_buff *skb)
296 {
297 	SKB_TRACE(skb);
298 	return ((skb->data_len > 0) ? true : false);
299 }
300 
301 /* Add headroom; cannot do once there is data in there. */
302 static inline void
skb_reserve(struct sk_buff * skb,size_t len)303 skb_reserve(struct sk_buff *skb, size_t len)
304 {
305 	SKB_TRACE(skb);
306 #if 0
307 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
308 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
309 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
310 #else
311 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
312 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
313 	    skb->head, skb->data, skb->tail, skb->len));
314 #endif
315 	skb->data += len;
316 	skb->tail += len;
317 }
318 
319 /*
320  * Remove headroom; return new data pointer; basically make space at the
321  * front to copy data in (manually).
322  */
323 static inline void *
__skb_push(struct sk_buff * skb,size_t len)324 __skb_push(struct sk_buff *skb, size_t len)
325 {
326 	SKB_TRACE(skb);
327 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
328 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
329 	skb->len  += len;
330 	skb->data -= len;
331 	return (skb->data);
332 }
333 
334 static inline void *
skb_push(struct sk_buff * skb,size_t len)335 skb_push(struct sk_buff *skb, size_t len)
336 {
337 
338 	SKB_TRACE(skb);
339 	return (__skb_push(skb, len));
340 }
341 
342 /*
343  * Length of the data on the skb (without any frags)???
344  */
345 static inline size_t
skb_headlen(struct sk_buff * skb)346 skb_headlen(struct sk_buff *skb)
347 {
348 
349 	SKB_TRACE(skb);
350 	return (skb->len - skb->data_len);
351 }
352 
353 
354 /* Return the end of data (tail pointer). */
355 static inline uint8_t *
skb_tail_pointer(struct sk_buff * skb)356 skb_tail_pointer(struct sk_buff *skb)
357 {
358 
359 	SKB_TRACE(skb);
360 	return (skb->tail);
361 }
362 
363 /* Return number of bytes available at end of buffer. */
364 static inline unsigned int
skb_tailroom(struct sk_buff * skb)365 skb_tailroom(struct sk_buff *skb)
366 {
367 
368 	SKB_TRACE(skb);
369 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
370 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
371 	if (unlikely(skb_is_nonlinear(skb)))
372 		return (0);
373 	return (skb->end - skb->tail);
374 }
375 
376 /* Return number of bytes available at the beginning of buffer. */
377 static inline unsigned int
skb_headroom(const struct sk_buff * skb)378 skb_headroom(const struct sk_buff *skb)
379 {
380 	SKB_TRACE(skb);
381 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
382 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
383 	return (skb->data - skb->head);
384 }
385 
386 
387 /*
388  * Remove tailroom; return the old tail pointer; basically make space at
389  * the end to copy data in (manually).  See also skb_put_data() below.
390  */
391 static inline void *
__skb_put(struct sk_buff * skb,size_t len)392 __skb_put(struct sk_buff *skb, size_t len)
393 {
394 	void *s;
395 
396 	SKB_TRACE(skb);
397 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
398 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
399 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
400 
401 	s = skb_tail_pointer(skb);
402 	if (len == 0)
403 		return (s);
404 	skb->tail += len;
405 	skb->len += len;
406 #ifdef SKB_DEBUG
407 	if (linuxkpi_debug_skb & DSKB_TRACEX)
408 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
409 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
410 	    s, len);
411 #endif
412 	return (s);
413 }
414 
415 static inline void *
skb_put(struct sk_buff * skb,size_t len)416 skb_put(struct sk_buff *skb, size_t len)
417 {
418 
419 	SKB_TRACE(skb);
420 	return (__skb_put(skb, len));
421 }
422 
423 /* skb_put() + copying data in. */
424 static inline void *
skb_put_data(struct sk_buff * skb,const void * buf,size_t len)425 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
426 {
427 	void *s;
428 
429 	SKB_TRACE2(skb, buf);
430 	s = skb_put(skb, len);
431 	if (len == 0)
432 		return (s);
433 	memcpy(s, buf, len);
434 	return (s);
435 }
436 
437 /* skb_put() + filling with zeros. */
438 static inline void *
skb_put_zero(struct sk_buff * skb,size_t len)439 skb_put_zero(struct sk_buff *skb, size_t len)
440 {
441 	void *s;
442 
443 	SKB_TRACE(skb);
444 	s = skb_put(skb, len);
445 	memset(s, '\0', len);
446 	return (s);
447 }
448 
449 /*
450  * Remove len bytes from beginning of data.
451  *
452  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
453  * we return the advanced data pointer so we don't have to keep a temp, correct?
454  */
455 static inline void *
skb_pull(struct sk_buff * skb,size_t len)456 skb_pull(struct sk_buff *skb, size_t len)
457 {
458 
459 	SKB_TRACE(skb);
460 #if 0	/* Apparently this doesn't barf... */
461 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
462 	    __func__, skb, skb->len, len, skb->data));
463 #endif
464 	if (skb->len < len)
465 		return (NULL);
466 	skb->len -= len;
467 	skb->data += len;
468 	return (skb->data);
469 }
470 
471 /* Reduce skb data to given length or do nothing if smaller already. */
472 static inline void
__skb_trim(struct sk_buff * skb,unsigned int len)473 __skb_trim(struct sk_buff *skb, unsigned int len)
474 {
475 
476 	SKB_TRACE(skb);
477 	if (skb->len < len)
478 		return;
479 
480 	skb->len = len;
481 	skb->tail = skb->data + skb->len;
482 }
483 
484 static inline void
skb_trim(struct sk_buff * skb,unsigned int len)485 skb_trim(struct sk_buff *skb, unsigned int len)
486 {
487 
488 	return (__skb_trim(skb, len));
489 }
490 
491 static inline struct skb_shared_info *
skb_shinfo(struct sk_buff * skb)492 skb_shinfo(struct sk_buff *skb)
493 {
494 
495 	SKB_TRACE(skb);
496 	return (skb->shinfo);
497 }
498 
499 static inline void
skb_add_rx_frag(struct sk_buff * skb,int fragno,struct page * page,off_t offset,size_t size,unsigned int truesize)500 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
501     off_t offset, size_t size, unsigned int truesize)
502 {
503 	struct skb_shared_info *shinfo;
504 
505 	SKB_TRACE(skb);
506 #ifdef SKB_DEBUG
507 	if (linuxkpi_debug_skb & DSKB_TRACEX)
508 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
509 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
510 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
511 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
512 	    size, truesize);
513 #endif
514 
515 	shinfo = skb_shinfo(skb);
516 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
517 	    "fragno %d too big\n", __func__, skb, fragno));
518 	shinfo->frags[fragno].page = page;
519 	shinfo->frags[fragno].offset = offset;
520 	shinfo->frags[fragno].size = size;
521 	shinfo->nr_frags = fragno + 1;
522         skb->len += size;
523 	skb->data_len += size;
524         skb->truesize += truesize;
525 }
526 
527 /* -------------------------------------------------------------------------- */
528 
529 #define	skb_queue_walk(_q, skb)						\
530 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
531 	    (skb) = (skb)->next)
532 
533 #define	skb_queue_walk_safe(_q, skb, tmp)				\
534 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
535 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
536 
537 #define	skb_list_walk_safe(_q, skb, tmp)				\
538 	for ((skb) = (_q), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL; \
539 	    ((skb) != NULL);						\
540 	    (skb) = (tmp), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL)
541 
542 static inline bool
skb_queue_empty(const struct sk_buff_head * q)543 skb_queue_empty(const struct sk_buff_head *q)
544 {
545 	SKB_TRACE(q);
546 	return (q->next == (const struct sk_buff *)q);
547 }
548 
549 static inline bool
skb_queue_empty_lockless(const struct sk_buff_head * q)550 skb_queue_empty_lockless(const struct sk_buff_head *q)
551 {
552 	SKB_TRACE(q);
553 	return (READ_ONCE(q->next) == (const struct sk_buff *)q);
554 }
555 
556 static inline void
__skb_queue_head_init(struct sk_buff_head * q)557 __skb_queue_head_init(struct sk_buff_head *q)
558 {
559 	SKB_TRACE(q);
560 	q->prev = q->next = (struct sk_buff *)q;
561 	q->qlen = 0;
562 }
563 
564 static inline void
skb_queue_head_init(struct sk_buff_head * q)565 skb_queue_head_init(struct sk_buff_head *q)
566 {
567 	SKB_TRACE(q);
568 	__skb_queue_head_init(q);
569 	spin_lock_init(&q->lock);
570 }
571 
572 static inline void
__skb_insert(struct sk_buff * new,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * q)573 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
574     struct sk_buff_head *q)
575 {
576 
577 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
578 	WRITE_ONCE(new->prev, prev);
579 	WRITE_ONCE(new->next, next);
580 	WRITE_ONCE(((struct sk_buff_head_l *)next)->prev, new);
581 	WRITE_ONCE(((struct sk_buff_head_l *)prev)->next, new);
582 	WRITE_ONCE(q->qlen, q->qlen + 1);
583 }
584 
585 static inline void
__skb_queue_after(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)586 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
587     struct sk_buff *new)
588 {
589 
590 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
591 	__skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
592 }
593 
594 static inline void
__skb_queue_before(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)595 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
596     struct sk_buff *new)
597 {
598 
599 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
600 	__skb_insert(new, skb->prev, skb, q);
601 }
602 
603 static inline void
__skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)604 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
605 {
606 
607 	SKB_TRACE2(q, new);
608 	__skb_queue_before(q, (struct sk_buff *)q, new);
609 }
610 
611 static inline void
skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)612 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
613 {
614 	unsigned long flags;
615 
616 	SKB_TRACE2(q, new);
617 	spin_lock_irqsave(&q->lock, flags);
618 	__skb_queue_tail(q, new);
619 	spin_unlock_irqrestore(&q->lock, flags);
620 }
621 
622 static inline struct sk_buff *
skb_peek(const struct sk_buff_head * q)623 skb_peek(const struct sk_buff_head *q)
624 {
625 	struct sk_buff *skb;
626 
627 	skb = q->next;
628 	SKB_TRACE2(q, skb);
629 	if (skb == (const struct sk_buff *)q)
630 		return (NULL);
631 	return (skb);
632 }
633 
634 static inline struct sk_buff *
skb_peek_tail(const struct sk_buff_head * q)635 skb_peek_tail(const struct sk_buff_head *q)
636 {
637 	struct sk_buff *skb;
638 
639 	skb = READ_ONCE(q->prev);
640 	SKB_TRACE2(q, skb);
641 	if (skb == (const struct sk_buff *)q)
642 		return (NULL);
643 	return (skb);
644 }
645 
646 static inline void
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)647 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
648 {
649 	struct sk_buff *p, *n;
650 
651 	SKB_TRACE2(skb, q);
652 
653 	WRITE_ONCE(q->qlen, q->qlen - 1);
654 	p = skb->prev;
655 	n = skb->next;
656 	WRITE_ONCE(n->prev, p);
657 	WRITE_ONCE(p->next, n);
658 	skb->prev = skb->next = NULL;
659 }
660 
661 static inline void
skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)662 skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
663 {
664 	unsigned long flags;
665 
666 	SKB_TRACE2(skb, q);
667 	spin_lock_irqsave(&q->lock, flags);
668 	__skb_unlink(skb, q);
669 	spin_unlock_irqrestore(&q->lock, flags);
670 }
671 
672 static inline struct sk_buff *
__skb_dequeue(struct sk_buff_head * q)673 __skb_dequeue(struct sk_buff_head *q)
674 {
675 	struct sk_buff *skb;
676 
677 	skb = skb_peek(q);
678 	if (skb != NULL)
679 		__skb_unlink(skb, q);
680 	SKB_TRACE2(q, skb);
681 	return (skb);
682 }
683 
684 static inline struct sk_buff *
skb_dequeue(struct sk_buff_head * q)685 skb_dequeue(struct sk_buff_head *q)
686 {
687 	unsigned long flags;
688 	struct sk_buff *skb;
689 
690 	spin_lock_irqsave(&q->lock, flags);
691 	skb = __skb_dequeue(q);
692 	spin_unlock_irqrestore(&q->lock, flags);
693 	SKB_TRACE2(q, skb);
694 	return (skb);
695 }
696 
697 static inline struct sk_buff *
__skb_dequeue_tail(struct sk_buff_head * q)698 __skb_dequeue_tail(struct sk_buff_head *q)
699 {
700 	struct sk_buff *skb;
701 
702 	skb = skb_peek_tail(q);
703 	if (skb != NULL)
704 		__skb_unlink(skb, q);
705 	SKB_TRACE2(q, skb);
706 	return (skb);
707 }
708 
709 static inline struct sk_buff *
skb_dequeue_tail(struct sk_buff_head * q)710 skb_dequeue_tail(struct sk_buff_head *q)
711 {
712 	unsigned long flags;
713 	struct sk_buff *skb;
714 
715 	spin_lock_irqsave(&q->lock, flags);
716 	skb = __skb_dequeue_tail(q);
717 	spin_unlock_irqrestore(&q->lock, flags);
718 	SKB_TRACE2(q, skb);
719 	return (skb);
720 }
721 
722 static inline void
__skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)723 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
724 {
725 
726 	SKB_TRACE2(q, skb);
727 	__skb_queue_after(q, (struct sk_buff *)q, skb);
728 }
729 
730 static inline void
skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)731 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
732 {
733 	unsigned long flags;
734 
735 	SKB_TRACE2(q, skb);
736 	spin_lock_irqsave(&q->lock, flags);
737 	__skb_queue_head(q, skb);
738 	spin_unlock_irqrestore(&q->lock, flags);
739 }
740 
741 static inline uint32_t
skb_queue_len(const struct sk_buff_head * q)742 skb_queue_len(const struct sk_buff_head *q)
743 {
744 
745 	SKB_TRACE(q);
746 	return (q->qlen);
747 }
748 
749 static inline uint32_t
skb_queue_len_lockless(const struct sk_buff_head * q)750 skb_queue_len_lockless(const struct sk_buff_head *q)
751 {
752 
753 	SKB_TRACE(q);
754 	return (READ_ONCE(q->qlen));
755 }
756 
757 static inline void
___skb_queue_splice(const struct sk_buff_head * from,struct sk_buff * p,struct sk_buff * n)758 ___skb_queue_splice(const struct sk_buff_head *from,
759     struct sk_buff *p, struct sk_buff *n)
760 {
761 	struct sk_buff *b, *e;
762 
763 	b = from->next;
764 	e = from->prev;
765 
766 	WRITE_ONCE(b->prev, p);
767 	WRITE_ONCE(((struct sk_buff_head_l *)p)->next, b);
768 	WRITE_ONCE(e->next, n);
769 	WRITE_ONCE(((struct sk_buff_head_l *)n)->prev, e);
770 }
771 
772 static inline void
skb_queue_splice_init(struct sk_buff_head * from,struct sk_buff_head * to)773 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
774 {
775 
776 	SKB_TRACE2(from, to);
777 
778 	if (skb_queue_empty(from))
779 		return;
780 
781 	___skb_queue_splice(from, (struct sk_buff *)to, to->next);
782 	to->qlen += from->qlen;
783 	__skb_queue_head_init(from);
784 }
785 
786 static inline void
skb_queue_splice_tail_init(struct sk_buff_head * from,struct sk_buff_head * to)787 skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to)
788 {
789 
790 	SKB_TRACE2(from, to);
791 
792 	if (skb_queue_empty(from))
793 		return;
794 
795 	___skb_queue_splice(from, to->prev, (struct sk_buff *)to);
796 	to->qlen += from->qlen;
797 	__skb_queue_head_init(from);
798 }
799 
800 
801 static inline void
__skb_queue_purge(struct sk_buff_head * q)802 __skb_queue_purge(struct sk_buff_head *q)
803 {
804 	struct sk_buff *skb;
805 
806 	SKB_TRACE(q);
807         while ((skb = __skb_dequeue(q)) != NULL)
808 		kfree_skb(skb);
809 	WARN_ONCE(skb_queue_len(q) != 0, "%s: queue %p not empty: %u",
810 	    __func__, q, skb_queue_len(q));
811 }
812 
813 static inline void
skb_queue_purge(struct sk_buff_head * q)814 skb_queue_purge(struct sk_buff_head *q)
815 {
816 	struct sk_buff_head _q;
817 	unsigned long flags;
818 
819 	SKB_TRACE(q);
820 
821 	if (skb_queue_empty_lockless(q))
822 		return;
823 
824 	__skb_queue_head_init(&_q);
825 	spin_lock_irqsave(&q->lock, flags);
826 	skb_queue_splice_init(q, &_q);
827 	spin_unlock_irqrestore(&q->lock, flags);
828 	__skb_queue_purge(&_q);
829 }
830 
831 static inline struct sk_buff *
skb_queue_prev(struct sk_buff_head * q,struct sk_buff * skb)832 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
833 {
834 
835 	SKB_TRACE2(q, skb);
836 	/* XXX what is the q argument good for? */
837 	return (skb->prev);
838 }
839 
840 /* -------------------------------------------------------------------------- */
841 
842 static inline struct sk_buff *
skb_copy(const struct sk_buff * skb,gfp_t gfp)843 skb_copy(const struct sk_buff *skb, gfp_t gfp)
844 {
845 	struct sk_buff *new;
846 
847 	new = linuxkpi_skb_copy(skb, gfp);
848 	SKB_TRACE2(skb, new);
849 	return (new);
850 }
851 
852 static inline uint16_t
skb_checksum(struct sk_buff * skb,int offs,size_t len,int x)853 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
854 {
855 	SKB_TRACE(skb);
856 	SKB_TODO();
857 	return (0xffff);
858 }
859 
860 static inline int
skb_checksum_start_offset(struct sk_buff * skb)861 skb_checksum_start_offset(struct sk_buff *skb)
862 {
863 	SKB_TRACE(skb);
864 	SKB_TODO();
865 	return (-1);
866 }
867 
868 static inline dma_addr_t
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,int x,size_t fragsz,enum dma_data_direction dir)869 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
870     size_t fragsz, enum dma_data_direction dir)
871 {
872 	SKB_TRACE2(frag, dev);
873 	SKB_TODO();
874 	return (-1);
875 }
876 
877 static inline size_t
skb_frag_size(const skb_frag_t * frag)878 skb_frag_size(const skb_frag_t *frag)
879 {
880 	SKB_TRACE(frag);
881 	return (frag->size);
882 }
883 
884 #define	skb_walk_frags(_skb, _frag)					\
885 	for ((_frag) = (_skb); false; (_frag)++)
886 
887 static inline void
skb_checksum_help(struct sk_buff * skb)888 skb_checksum_help(struct sk_buff *skb)
889 {
890 	SKB_TRACE(skb);
891 	SKB_TODO();
892 }
893 
894 static inline bool
skb_ensure_writable(struct sk_buff * skb,size_t off)895 skb_ensure_writable(struct sk_buff *skb, size_t off)
896 {
897 	SKB_TRACE(skb);
898 	SKB_TODO();
899 	return (false);
900 }
901 
902 static inline void *
skb_frag_address(const skb_frag_t * frag)903 skb_frag_address(const skb_frag_t *frag)
904 {
905 	SKB_TRACE(frag);
906 	return (page_address(frag->page + frag->offset));
907 }
908 
909 static inline void
skb_free_frag(void * frag)910 skb_free_frag(void *frag)
911 {
912 
913 	page_frag_free(frag);
914 }
915 
916 static inline struct sk_buff *
skb_gso_segment(struct sk_buff * skb,netdev_features_t netdev_flags)917 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
918 {
919 	SKB_TRACE(skb);
920 	SKB_TODO();
921 	return (NULL);
922 }
923 
924 static inline bool
skb_is_gso(struct sk_buff * skb)925 skb_is_gso(struct sk_buff *skb)
926 {
927 	SKB_TRACE(skb);
928 	SKB_IMPROVE("Really a TODO but get it away from logging");
929 	return (false);
930 }
931 
932 static inline void
skb_mark_not_on_list(struct sk_buff * skb)933 skb_mark_not_on_list(struct sk_buff *skb)
934 {
935 	SKB_TRACE(skb);
936 	skb->next = NULL;
937 }
938 
939 static inline void
skb_reset_transport_header(struct sk_buff * skb)940 skb_reset_transport_header(struct sk_buff *skb)
941 {
942 
943 	SKB_TRACE(skb);
944 	skb->l4hdroff = skb->data - skb->head;
945 }
946 
947 static inline uint8_t *
skb_transport_header(struct sk_buff * skb)948 skb_transport_header(struct sk_buff *skb)
949 {
950 
951 	SKB_TRACE(skb);
952         return (skb->head + skb->l4hdroff);
953 }
954 
955 static inline uint8_t *
skb_network_header(struct sk_buff * skb)956 skb_network_header(struct sk_buff *skb)
957 {
958 
959 	SKB_TRACE(skb);
960         return (skb->head + skb->l3hdroff);
961 }
962 
963 static inline int
__skb_linearize(struct sk_buff * skb)964 __skb_linearize(struct sk_buff *skb)
965 {
966 	SKB_TRACE(skb);
967 	SKB_TODO();
968 	return (-ENXIO);
969 }
970 
971 static inline int
skb_linearize(struct sk_buff * skb)972 skb_linearize(struct sk_buff *skb)
973 {
974 	return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
975 }
976 
977 static inline int
pskb_expand_head(struct sk_buff * skb,int x,int len,gfp_t gfp)978 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
979 {
980 	SKB_TRACE(skb);
981 	SKB_TODO();
982 	return (-ENXIO);
983 }
984 
985 /* Not really seen this one but need it as symmetric accessor function. */
986 static inline void
skb_set_queue_mapping(struct sk_buff * skb,uint16_t qmap)987 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
988 {
989 
990 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
991 	skb->qmap = qmap;
992 }
993 
994 static inline uint16_t
skb_get_queue_mapping(struct sk_buff * skb)995 skb_get_queue_mapping(struct sk_buff *skb)
996 {
997 
998 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
999 	return (skb->qmap);
1000 }
1001 
1002 static inline void
skb_copy_header(struct sk_buff * to,const struct sk_buff * from)1003 skb_copy_header(struct sk_buff *to, const struct sk_buff *from)
1004 {
1005 	SKB_TRACE2(to, from);
1006 	SKB_TODO();
1007 }
1008 
1009 static inline bool
skb_header_cloned(struct sk_buff * skb)1010 skb_header_cloned(struct sk_buff *skb)
1011 {
1012 	SKB_TRACE(skb);
1013 	SKB_TODO();
1014 	return (true);
1015 }
1016 
1017 static inline uint8_t *
skb_mac_header(const struct sk_buff * skb)1018 skb_mac_header(const struct sk_buff *skb)
1019 {
1020 	SKB_TRACE(skb);
1021 	return (skb->head + skb->mac_header);
1022 }
1023 
1024 static inline void
skb_reset_mac_header(struct sk_buff * skb)1025 skb_reset_mac_header(struct sk_buff *skb)
1026 {
1027 	SKB_TRACE(skb);
1028 	skb->mac_header = skb->data - skb->head;
1029 }
1030 
1031 static inline void
skb_set_mac_header(struct sk_buff * skb,const size_t len)1032 skb_set_mac_header(struct sk_buff *skb, const size_t len)
1033 {
1034 	SKB_TRACE(skb);
1035 	skb_reset_mac_header(skb);
1036 	skb->mac_header += len;
1037 }
1038 
1039 static inline struct skb_shared_hwtstamps *
skb_hwtstamps(struct sk_buff * skb)1040 skb_hwtstamps(struct sk_buff *skb)
1041 {
1042 	SKB_TRACE(skb);
1043 	SKB_TODO();
1044 	return (NULL);
1045 }
1046 
1047 static inline void
skb_orphan(struct sk_buff * skb)1048 skb_orphan(struct sk_buff *skb)
1049 {
1050 	SKB_TRACE(skb);
1051 	SKB_TODO();
1052 }
1053 
1054 static inline __wsum
csum_unfold(__sum16 sum)1055 csum_unfold(__sum16 sum)
1056 {
1057 	return (sum);
1058 }
1059 
1060 static __inline void
skb_postpush_rcsum(struct sk_buff * skb,const void * data,size_t len)1061 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
1062 {
1063 	SKB_TODO();
1064 }
1065 
1066 static inline void
skb_reset_tail_pointer(struct sk_buff * skb)1067 skb_reset_tail_pointer(struct sk_buff *skb)
1068 {
1069 
1070 	SKB_TRACE(skb);
1071 #ifdef SKB_DOING_OFFSETS_US_NOT
1072 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
1073 #endif
1074 	skb->tail = skb->data;
1075 	SKB_TRACE(skb);
1076 }
1077 
1078 static inline struct sk_buff *
skb_get(struct sk_buff * skb)1079 skb_get(struct sk_buff *skb)
1080 {
1081 
1082 	SKB_TRACE(skb);
1083 	refcount_inc(&skb->refcnt);
1084 	return (skb);
1085 }
1086 
1087 static inline struct sk_buff *
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1088 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1089 {
1090 
1091 	SKB_TODO();
1092 	return (NULL);
1093 }
1094 
1095 static inline void
skb_copy_from_linear_data(const struct sk_buff * skb,void * dst,size_t len)1096 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1097 {
1098 
1099 	SKB_TRACE(skb);
1100 	/* Let us just hope the destination has len space ... */
1101 	memcpy(dst, skb->data, len);
1102 }
1103 
1104 static inline int
skb_pad(struct sk_buff * skb,int pad)1105 skb_pad(struct sk_buff *skb, int pad)
1106 {
1107 
1108 	SKB_TRACE(skb);
1109 	SKB_TODO();
1110 	return (-1);
1111 }
1112 
1113 static inline void
skb_list_del_init(struct sk_buff * skb)1114 skb_list_del_init(struct sk_buff *skb)
1115 {
1116 
1117 	SKB_TRACE(skb);
1118 	__list_del_entry(&skb->list);
1119 	skb_mark_not_on_list(skb);
1120 }
1121 
1122 static inline void
napi_consume_skb(struct sk_buff * skb,int budget)1123 napi_consume_skb(struct sk_buff *skb, int budget)
1124 {
1125 
1126 	SKB_TRACE(skb);
1127 	SKB_TODO();
1128 }
1129 
1130 static inline struct sk_buff *
napi_build_skb(void * data,size_t len)1131 napi_build_skb(void *data, size_t len)
1132 {
1133 
1134 	SKB_TODO();
1135 	return (NULL);
1136 }
1137 
1138 static inline uint32_t
skb_get_hash(struct sk_buff * skb)1139 skb_get_hash(struct sk_buff *skb)
1140 {
1141 	SKB_TRACE(skb);
1142 	SKB_TODO();
1143 	return (0);
1144 }
1145 
1146 static inline void
skb_mark_for_recycle(struct sk_buff * skb)1147 skb_mark_for_recycle(struct sk_buff *skb)
1148 {
1149 	SKB_TRACE(skb);
1150 	/* page_pool */
1151 	SKB_TODO();
1152 }
1153 
1154 static inline int
skb_cow_head(struct sk_buff * skb,unsigned int headroom)1155 skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1156 {
1157 	SKB_TRACE(skb);
1158 	SKB_TODO();
1159 	return (-1);
1160 }
1161 
1162 #define	SKB_WITH_OVERHEAD(_s)						\
1163 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1164 
1165 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1166