xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision b4856b8e9d872ca6ee0c4ef43e7047693ef7645b)
1 /*-
2  * Copyright (c) 2020-2023 The FreeBSD Foundation
3  * Copyright (c) 2021-2023 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
32  *       Do not rely on the internals of this implementation.  They are highly
33  *       likely to change as we will improve the integration to FreeBSD mbufs.
34  */
35 
36 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
37 #define	_LINUXKPI_LINUX_SKBUFF_H
38 
39 #include <linux/kernel.h>
40 #include <linux/page.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdev_features.h>
43 #include <linux/list.h>
44 #include <linux/gfp.h>
45 #include <linux/compiler.h>
46 #include <linux/spinlock.h>
47 #include <linux/ktime.h>
48 
49 #include "opt_wlan.h"
50 
51 /* Currently this is only used for wlan so we can depend on that. */
52 #if defined(IEEE80211_DEBUG) && !defined(SKB_DEBUG)
53 #define	SKB_DEBUG
54 #endif
55 
56 /* #define	SKB_DEBUG */
57 
58 #ifdef SKB_DEBUG
59 #define	DSKB_TODO	0x01
60 #define	DSKB_IMPROVE	0x02
61 #define	DSKB_TRACE	0x10
62 #define	DSKB_TRACEX	0x20
63 extern int linuxkpi_debug_skb;
64 
65 #define	SKB_TODO()							\
66     if (linuxkpi_debug_skb & DSKB_TODO)					\
67 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
68 #define	SKB_IMPROVE(...)						\
69     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
70 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
71 #define	SKB_TRACE(_s)							\
72     if (linuxkpi_debug_skb & DSKB_TRACE)				\
73 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
74 #define	SKB_TRACE2(_s, _p)						\
75     if (linuxkpi_debug_skb & DSKB_TRACE)				\
76 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
77 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
78    if (linuxkpi_debug_skb & DSKB_TRACE)					\
79 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
80 	    __VA_ARGS__)
81 #else
82 #define	SKB_TODO()		do { } while(0)
83 #define	SKB_IMPROVE(...)	do { } while(0)
84 #define	SKB_TRACE(_s)		do { } while(0)
85 #define	SKB_TRACE2(_s, _p)	do { } while(0)
86 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
87 #endif
88 
89 enum sk_buff_pkt_type {
90 	PACKET_BROADCAST,
91 	PACKET_MULTICAST,
92 	PACKET_OTHERHOST,
93 };
94 
95 struct skb_shared_hwtstamps {
96 	ktime_t			hwtstamp;
97 };
98 
99 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
100 #define	SKB_DATA_ALIGN(_x)	roundup2(_x, CACHE_LINE_SIZE)
101 
102 struct sk_buff_head {
103 		/* XXX TODO */
104 	union {
105 		struct {
106 			struct sk_buff		*next;
107 			struct sk_buff		*prev;
108 		};
109 		struct sk_buff_head_l {
110 			struct sk_buff		*next;
111 			struct sk_buff		*prev;
112 		} list;
113 	};
114 	size_t			qlen;
115 	spinlock_t		lock;
116 };
117 
118 enum sk_checksum_flags {
119 	CHECKSUM_NONE			= 0x00,
120 	CHECKSUM_UNNECESSARY		= 0x01,
121 	CHECKSUM_PARTIAL		= 0x02,
122 	CHECKSUM_COMPLETE		= 0x04,
123 };
124 
125 struct skb_frag {
126 		/* XXX TODO */
127 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
128 	off_t			offset;
129 	size_t			size;
130 };
131 typedef	struct skb_frag	skb_frag_t;
132 
133 enum skb_shared_info_gso_type {
134 	SKB_GSO_TCPV4,
135 	SKB_GSO_TCPV6,
136 };
137 
138 struct skb_shared_info {
139 	enum skb_shared_info_gso_type	gso_type;
140 	uint16_t			gso_size;
141 	uint16_t			nr_frags;
142 	struct sk_buff			*frag_list;
143 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
144 };
145 
146 struct sk_buff {
147 	/* XXX TODO */
148 	union {
149 		/* struct sk_buff_head */
150 		struct {
151 			struct sk_buff		*next;
152 			struct sk_buff		*prev;
153 		};
154 		struct list_head	list;
155 	};
156 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
157 	uint32_t		len;		/* ? */
158 	uint32_t		data_len;	/* ? If we have frags? */
159 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
160 	uint16_t		mac_len;	/* Link-layer header length. */
161 	__sum16			csum;
162 	uint16_t		l3hdroff;	/* network header offset from *head */
163 	uint16_t		l4hdroff;	/* transport header offset from *head */
164 	uint32_t		priority;
165 	uint16_t		qmap;		/* queue mapping */
166 	uint16_t		_flags;		/* Internal flags. */
167 #define	_SKB_FLAGS_SKBEXTFRAG	0x0001
168 	enum sk_buff_pkt_type	pkt_type;
169 	uint16_t		mac_header;	/* offset of mac_header */
170 
171 	/* "Scratch" area for layers to store metadata. */
172 	/* ??? I see sizeof() operations so probably an array. */
173 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
174 
175 	struct net_device	*dev;
176 	void			*sk;		/* XXX net/sock.h? */
177 
178 	int		csum_offset, csum_start, ip_summed, protocol;
179 
180 	uint8_t			*head;			/* Head of buffer. */
181 	uint8_t			*data;			/* Head of data. */
182 	uint8_t			*tail;			/* End of data. */
183 	uint8_t			*end;			/* End of buffer. */
184 
185 	struct skb_shared_info	*shinfo;
186 
187 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
188 	void			*m;
189 	void(*m_free_func)(void *);
190 
191 	/* Force padding to CACHE_LINE_SIZE. */
192 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
193 };
194 
195 /* -------------------------------------------------------------------------- */
196 
197 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
198 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
199 struct sk_buff *linuxkpi_build_skb(void *, size_t);
200 void linuxkpi_kfree_skb(struct sk_buff *);
201 
202 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
203 
204 /* -------------------------------------------------------------------------- */
205 
206 static inline struct sk_buff *
alloc_skb(size_t size,gfp_t gfp)207 alloc_skb(size_t size, gfp_t gfp)
208 {
209 	struct sk_buff *skb;
210 
211 	skb = linuxkpi_alloc_skb(size, gfp);
212 	SKB_TRACE(skb);
213 	return (skb);
214 }
215 
216 static inline struct sk_buff *
__dev_alloc_skb(size_t len,gfp_t gfp)217 __dev_alloc_skb(size_t len, gfp_t gfp)
218 {
219 	struct sk_buff *skb;
220 
221 	skb = linuxkpi_dev_alloc_skb(len, gfp);
222 	SKB_IMPROVE();
223 	SKB_TRACE(skb);
224 	return (skb);
225 }
226 
227 static inline struct sk_buff *
dev_alloc_skb(size_t len)228 dev_alloc_skb(size_t len)
229 {
230 	struct sk_buff *skb;
231 
232 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
233 	SKB_IMPROVE();
234 	SKB_TRACE(skb);
235 	return (skb);
236 }
237 
238 static inline void
kfree_skb(struct sk_buff * skb)239 kfree_skb(struct sk_buff *skb)
240 {
241 	SKB_TRACE(skb);
242 	linuxkpi_kfree_skb(skb);
243 }
244 
245 static inline void
dev_kfree_skb(struct sk_buff * skb)246 dev_kfree_skb(struct sk_buff *skb)
247 {
248 	SKB_TRACE(skb);
249 	kfree_skb(skb);
250 }
251 
252 static inline void
dev_kfree_skb_any(struct sk_buff * skb)253 dev_kfree_skb_any(struct sk_buff *skb)
254 {
255 	SKB_TRACE(skb);
256 	dev_kfree_skb(skb);
257 }
258 
259 static inline void
dev_kfree_skb_irq(struct sk_buff * skb)260 dev_kfree_skb_irq(struct sk_buff *skb)
261 {
262 	SKB_TRACE(skb);
263 	SKB_IMPROVE("Do we have to defer this?");
264 	dev_kfree_skb(skb);
265 }
266 
267 static inline struct sk_buff *
build_skb(void * data,unsigned int fragsz)268 build_skb(void *data, unsigned int fragsz)
269 {
270 	struct sk_buff *skb;
271 
272 	skb = linuxkpi_build_skb(data, fragsz);
273 	SKB_TRACE(skb);
274 	return (skb);
275 }
276 
277 /* -------------------------------------------------------------------------- */
278 
279 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
280 #define	skb_list_walk_safe(_q, skb, tmp)				\
281 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
282 
283 /* Add headroom; cannot do once there is data in there. */
284 static inline void
skb_reserve(struct sk_buff * skb,size_t len)285 skb_reserve(struct sk_buff *skb, size_t len)
286 {
287 	SKB_TRACE(skb);
288 #if 0
289 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
290 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
291 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
292 #else
293 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
294 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
295 	    skb->head, skb->data, skb->tail, skb->len));
296 #endif
297 	skb->data += len;
298 	skb->tail += len;
299 }
300 
301 /*
302  * Remove headroom; return new data pointer; basically make space at the
303  * front to copy data in (manually).
304  */
305 static inline void *
__skb_push(struct sk_buff * skb,size_t len)306 __skb_push(struct sk_buff *skb, size_t len)
307 {
308 	SKB_TRACE(skb);
309 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
310 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
311 	skb->len  += len;
312 	skb->data -= len;
313 	return (skb->data);
314 }
315 
316 static inline void *
skb_push(struct sk_buff * skb,size_t len)317 skb_push(struct sk_buff *skb, size_t len)
318 {
319 
320 	SKB_TRACE(skb);
321 	return (__skb_push(skb, len));
322 }
323 
324 /*
325  * Length of the data on the skb (without any frags)???
326  */
327 static inline size_t
skb_headlen(struct sk_buff * skb)328 skb_headlen(struct sk_buff *skb)
329 {
330 
331 	SKB_TRACE(skb);
332 	return (skb->len - skb->data_len);
333 }
334 
335 
336 /* Return the end of data (tail pointer). */
337 static inline uint8_t *
skb_tail_pointer(struct sk_buff * skb)338 skb_tail_pointer(struct sk_buff *skb)
339 {
340 
341 	SKB_TRACE(skb);
342 	return (skb->tail);
343 }
344 
345 /* Return number of bytes available at end of buffer. */
346 static inline unsigned int
skb_tailroom(struct sk_buff * skb)347 skb_tailroom(struct sk_buff *skb)
348 {
349 
350 	SKB_TRACE(skb);
351 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
352 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
353 	return (skb->end - skb->tail);
354 }
355 
356 /* Return numer of bytes available at the beginning of buffer. */
357 static inline unsigned int
skb_headroom(struct sk_buff * skb)358 skb_headroom(struct sk_buff *skb)
359 {
360 	SKB_TRACE(skb);
361 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
362 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
363 	return (skb->data - skb->head);
364 }
365 
366 
367 /*
368  * Remove tailroom; return the old tail pointer; basically make space at
369  * the end to copy data in (manually).  See also skb_put_data() below.
370  */
371 static inline void *
__skb_put(struct sk_buff * skb,size_t len)372 __skb_put(struct sk_buff *skb, size_t len)
373 {
374 	void *s;
375 
376 	SKB_TRACE(skb);
377 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
378 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
379 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
380 
381 	s = skb_tail_pointer(skb);
382 	if (len == 0)
383 		return (s);
384 	skb->tail += len;
385 	skb->len += len;
386 #ifdef SKB_DEBUG
387 	if (linuxkpi_debug_skb & DSKB_TRACEX)
388 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
389 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
390 	    s, len);
391 #endif
392 	return (s);
393 }
394 
395 static inline void *
skb_put(struct sk_buff * skb,size_t len)396 skb_put(struct sk_buff *skb, size_t len)
397 {
398 
399 	SKB_TRACE(skb);
400 	return (__skb_put(skb, len));
401 }
402 
403 /* skb_put() + copying data in. */
404 static inline void *
skb_put_data(struct sk_buff * skb,const void * buf,size_t len)405 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
406 {
407 	void *s;
408 
409 	SKB_TRACE2(skb, buf);
410 	s = skb_put(skb, len);
411 	if (len == 0)
412 		return (s);
413 	memcpy(s, buf, len);
414 	return (s);
415 }
416 
417 /* skb_put() + filling with zeros. */
418 static inline void *
skb_put_zero(struct sk_buff * skb,size_t len)419 skb_put_zero(struct sk_buff *skb, size_t len)
420 {
421 	void *s;
422 
423 	SKB_TRACE(skb);
424 	s = skb_put(skb, len);
425 	memset(s, '\0', len);
426 	return (s);
427 }
428 
429 /*
430  * Remove len bytes from beginning of data.
431  *
432  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
433  * we return the advanced data pointer so we don't have to keep a temp, correct?
434  */
435 static inline void *
skb_pull(struct sk_buff * skb,size_t len)436 skb_pull(struct sk_buff *skb, size_t len)
437 {
438 
439 	SKB_TRACE(skb);
440 #if 0	/* Apparently this doesn't barf... */
441 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
442 	    __func__, skb, skb->len, len, skb->data));
443 #endif
444 	if (skb->len < len)
445 		return (NULL);
446 	skb->len -= len;
447 	skb->data += len;
448 	return (skb->data);
449 }
450 
451 /* Reduce skb data to given length or do nothing if smaller already. */
452 static inline void
__skb_trim(struct sk_buff * skb,unsigned int len)453 __skb_trim(struct sk_buff *skb, unsigned int len)
454 {
455 
456 	SKB_TRACE(skb);
457 	if (skb->len < len)
458 		return;
459 
460 	skb->len = len;
461 	skb->tail = skb->data + skb->len;
462 }
463 
464 static inline void
skb_trim(struct sk_buff * skb,unsigned int len)465 skb_trim(struct sk_buff *skb, unsigned int len)
466 {
467 
468 	return (__skb_trim(skb, len));
469 }
470 
471 static inline struct skb_shared_info *
skb_shinfo(struct sk_buff * skb)472 skb_shinfo(struct sk_buff *skb)
473 {
474 
475 	SKB_TRACE(skb);
476 	return (skb->shinfo);
477 }
478 
479 static inline void
skb_add_rx_frag(struct sk_buff * skb,int fragno,struct page * page,off_t offset,size_t size,unsigned int truesize)480 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
481     off_t offset, size_t size, unsigned int truesize)
482 {
483 	struct skb_shared_info *shinfo;
484 
485 	SKB_TRACE(skb);
486 #ifdef SKB_DEBUG
487 	if (linuxkpi_debug_skb & DSKB_TRACEX)
488 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
489 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
490 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
491 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
492 	    size, truesize);
493 #endif
494 
495 	shinfo = skb_shinfo(skb);
496 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
497 	    "fragno %d too big\n", __func__, skb, fragno));
498 	shinfo->frags[fragno].page = page;
499 	shinfo->frags[fragno].offset = offset;
500 	shinfo->frags[fragno].size = size;
501 	shinfo->nr_frags = fragno + 1;
502         skb->len += size;
503 	skb->data_len += size;
504         skb->truesize += truesize;
505 
506 	/* XXX TODO EXTEND truesize? */
507 }
508 
509 /* -------------------------------------------------------------------------- */
510 
511 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
512 #define	skb_queue_walk(_q, skb)						\
513 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
514 	    (skb) = (skb)->next)
515 
516 #define	skb_queue_walk_safe(_q, skb, tmp)				\
517 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
518 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
519 
520 static inline bool
skb_queue_empty(struct sk_buff_head * q)521 skb_queue_empty(struct sk_buff_head *q)
522 {
523 
524 	SKB_TRACE(q);
525 	return (q->qlen == 0);
526 }
527 
528 static inline void
__skb_queue_head_init(struct sk_buff_head * q)529 __skb_queue_head_init(struct sk_buff_head *q)
530 {
531 	SKB_TRACE(q);
532 	q->prev = q->next = (struct sk_buff *)q;
533 	q->qlen = 0;
534 }
535 
536 static inline void
skb_queue_head_init(struct sk_buff_head * q)537 skb_queue_head_init(struct sk_buff_head *q)
538 {
539 	SKB_TRACE(q);
540 	return (__skb_queue_head_init(q));
541 }
542 
543 static inline void
__skb_insert(struct sk_buff * new,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * q)544 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
545     struct sk_buff_head *q)
546 {
547 
548 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
549 	new->prev = prev;
550 	new->next = next;
551 	((struct sk_buff_head_l *)next)->prev = new;
552 	((struct sk_buff_head_l *)prev)->next = new;
553 	q->qlen++;
554 }
555 
556 static inline void
__skb_queue_after(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)557 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
558     struct sk_buff *new)
559 {
560 
561 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
562 	__skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
563 }
564 
565 static inline void
__skb_queue_before(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)566 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
567     struct sk_buff *new)
568 {
569 
570 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
571 	__skb_insert(new, skb->prev, skb, q);
572 }
573 
574 static inline void
__skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)575 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
576 {
577 
578 	SKB_TRACE2(q, new);
579 	__skb_queue_before(q, (struct sk_buff *)q, new);
580 }
581 
582 static inline void
skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)583 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
584 {
585 	SKB_TRACE2(q, new);
586 	return (__skb_queue_tail(q, new));
587 }
588 
589 static inline struct sk_buff *
skb_peek(struct sk_buff_head * q)590 skb_peek(struct sk_buff_head *q)
591 {
592 	struct sk_buff *skb;
593 
594 	skb = q->next;
595 	SKB_TRACE2(q, skb);
596 	if (skb == (struct sk_buff *)q)
597 		return (NULL);
598 	return (skb);
599 }
600 
601 static inline struct sk_buff *
skb_peek_tail(struct sk_buff_head * q)602 skb_peek_tail(struct sk_buff_head *q)
603 {
604 	struct sk_buff *skb;
605 
606 	skb = q->prev;
607 	SKB_TRACE2(q, skb);
608 	if (skb == (struct sk_buff *)q)
609 		return (NULL);
610 	return (skb);
611 }
612 
613 static inline void
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * head)614 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
615 {
616 	SKB_TRACE2(skb, head);
617 	struct sk_buff *p, *n;
618 
619 	head->qlen--;
620 	p = skb->prev;
621 	n = skb->next;
622 	p->next = n;
623 	n->prev = p;
624 	skb->prev = skb->next = NULL;
625 }
626 
627 static inline void
skb_unlink(struct sk_buff * skb,struct sk_buff_head * head)628 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
629 {
630 	SKB_TRACE2(skb, head);
631 	return (__skb_unlink(skb, head));
632 }
633 
634 static inline struct sk_buff *
__skb_dequeue(struct sk_buff_head * q)635 __skb_dequeue(struct sk_buff_head *q)
636 {
637 	struct sk_buff *skb;
638 
639 	SKB_TRACE(q);
640 	skb = q->next;
641 	if (skb == (struct sk_buff *)q)
642 		return (NULL);
643 	if (skb != NULL)
644 		__skb_unlink(skb, q);
645 	SKB_TRACE(skb);
646 	return (skb);
647 }
648 
649 static inline struct sk_buff *
skb_dequeue(struct sk_buff_head * q)650 skb_dequeue(struct sk_buff_head *q)
651 {
652 	SKB_TRACE(q);
653 	return (__skb_dequeue(q));
654 }
655 
656 static inline struct sk_buff *
skb_dequeue_tail(struct sk_buff_head * q)657 skb_dequeue_tail(struct sk_buff_head *q)
658 {
659 	struct sk_buff *skb;
660 
661 	skb = skb_peek_tail(q);
662 	if (skb != NULL)
663 		__skb_unlink(skb, q);
664 
665 	SKB_TRACE2(q, skb);
666 	return (skb);
667 }
668 
669 static inline void
__skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)670 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
671 {
672 
673 	SKB_TRACE2(q, skb);
674 	__skb_queue_after(q, (struct sk_buff *)q, skb);
675 }
676 
677 static inline void
skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)678 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
679 {
680 
681 	SKB_TRACE2(q, skb);
682 	__skb_queue_after(q, (struct sk_buff *)q, skb);
683 }
684 
685 static inline uint32_t
skb_queue_len(struct sk_buff_head * head)686 skb_queue_len(struct sk_buff_head *head)
687 {
688 
689 	SKB_TRACE(head);
690 	return (head->qlen);
691 }
692 
693 static inline uint32_t
skb_queue_len_lockless(const struct sk_buff_head * head)694 skb_queue_len_lockless(const struct sk_buff_head *head)
695 {
696 
697 	SKB_TRACE(head);
698 	return (READ_ONCE(head->qlen));
699 }
700 
701 static inline void
__skb_queue_purge(struct sk_buff_head * q)702 __skb_queue_purge(struct sk_buff_head *q)
703 {
704 	struct sk_buff *skb;
705 
706 	SKB_TRACE(q);
707         while ((skb = __skb_dequeue(q)) != NULL)
708 		kfree_skb(skb);
709 }
710 
711 static inline void
skb_queue_purge(struct sk_buff_head * q)712 skb_queue_purge(struct sk_buff_head *q)
713 {
714 	SKB_TRACE(q);
715 	return (__skb_queue_purge(q));
716 }
717 
718 static inline struct sk_buff *
skb_queue_prev(struct sk_buff_head * q,struct sk_buff * skb)719 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
720 {
721 
722 	SKB_TRACE2(q, skb);
723 	/* XXX what is the q argument good for? */
724 	return (skb->prev);
725 }
726 
727 /* -------------------------------------------------------------------------- */
728 
729 static inline struct sk_buff *
skb_copy(struct sk_buff * skb,gfp_t gfp)730 skb_copy(struct sk_buff *skb, gfp_t gfp)
731 {
732 	struct sk_buff *new;
733 
734 	new = linuxkpi_skb_copy(skb, gfp);
735 	SKB_TRACE2(skb, new);
736 	return (new);
737 }
738 
739 static inline void
consume_skb(struct sk_buff * skb)740 consume_skb(struct sk_buff *skb)
741 {
742 	SKB_TRACE(skb);
743 	SKB_TODO();
744 }
745 
746 static inline uint16_t
skb_checksum(struct sk_buff * skb,int offs,size_t len,int x)747 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
748 {
749 	SKB_TRACE(skb);
750 	SKB_TODO();
751 	return (0xffff);
752 }
753 
754 static inline int
skb_checksum_start_offset(struct sk_buff * skb)755 skb_checksum_start_offset(struct sk_buff *skb)
756 {
757 	SKB_TRACE(skb);
758 	SKB_TODO();
759 	return (-1);
760 }
761 
762 static inline dma_addr_t
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,int x,size_t fragsz,enum dma_data_direction dir)763 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
764     size_t fragsz, enum dma_data_direction dir)
765 {
766 	SKB_TRACE2(frag, dev);
767 	SKB_TODO();
768 	return (-1);
769 }
770 
771 static inline size_t
skb_frag_size(const skb_frag_t * frag)772 skb_frag_size(const skb_frag_t *frag)
773 {
774 	SKB_TRACE(frag);
775 	SKB_TODO();
776 	return (-1);
777 }
778 
779 #define	skb_walk_frags(_skb, _frag)					\
780 	for ((_frag) = (_skb); false; (_frag)++)
781 
782 static inline void
skb_checksum_help(struct sk_buff * skb)783 skb_checksum_help(struct sk_buff *skb)
784 {
785 	SKB_TRACE(skb);
786 	SKB_TODO();
787 }
788 
789 static inline bool
skb_ensure_writable(struct sk_buff * skb,size_t off)790 skb_ensure_writable(struct sk_buff *skb, size_t off)
791 {
792 	SKB_TRACE(skb);
793 	SKB_TODO();
794 	return (false);
795 }
796 
797 static inline void *
skb_frag_address(const skb_frag_t * frag)798 skb_frag_address(const skb_frag_t *frag)
799 {
800 	SKB_TRACE(frag);
801 	SKB_TODO();
802 	return (NULL);
803 }
804 
805 static inline void
skb_free_frag(void * frag)806 skb_free_frag(void *frag)
807 {
808 
809 	page_frag_free(frag);
810 }
811 
812 static inline struct sk_buff *
skb_gso_segment(struct sk_buff * skb,netdev_features_t netdev_flags)813 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
814 {
815 	SKB_TRACE(skb);
816 	SKB_TODO();
817 	return (NULL);
818 }
819 
820 static inline bool
skb_is_gso(struct sk_buff * skb)821 skb_is_gso(struct sk_buff *skb)
822 {
823 	SKB_TRACE(skb);
824 	SKB_IMPROVE("Really a TODO but get it away from logging");
825 	return (false);
826 }
827 
828 static inline void
skb_mark_not_on_list(struct sk_buff * skb)829 skb_mark_not_on_list(struct sk_buff *skb)
830 {
831 	SKB_TRACE(skb);
832 	SKB_TODO();
833 }
834 
835 static inline void
___skb_queue_splice(const struct sk_buff_head * from,struct sk_buff * p,struct sk_buff * n)836 ___skb_queue_splice(const struct sk_buff_head *from,
837     struct sk_buff *p, struct sk_buff *n)
838 {
839 	struct sk_buff *b, *e;
840 
841 	b = from->next;
842 	e = from->prev;
843 
844 	b->prev = p;
845 	((struct sk_buff_head_l *)p)->next = b;
846 	e->next = n;
847 	((struct sk_buff_head_l *)n)->prev = e;
848 }
849 
850 static inline void
skb_queue_splice_init(struct sk_buff_head * from,struct sk_buff_head * to)851 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
852 {
853 
854 	SKB_TRACE2(from, to);
855 
856 	if (skb_queue_empty(from))
857 		return;
858 
859 	___skb_queue_splice(from, (struct sk_buff *)to, to->next);
860 	to->qlen += from->qlen;
861 	__skb_queue_head_init(from);
862 }
863 
864 static inline void
skb_queue_splice_tail_init(struct sk_buff_head * from,struct sk_buff_head * to)865 skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to)
866 {
867 
868 	SKB_TRACE2(from, to);
869 
870 	if (skb_queue_empty(from))
871 		return;
872 
873 	___skb_queue_splice(from, to->prev, (struct sk_buff *)to);
874 	to->qlen += from->qlen;
875 	__skb_queue_head_init(from);
876 }
877 
878 static inline void
skb_reset_transport_header(struct sk_buff * skb)879 skb_reset_transport_header(struct sk_buff *skb)
880 {
881 
882 	SKB_TRACE(skb);
883 	skb->l4hdroff = skb->data - skb->head;
884 }
885 
886 static inline uint8_t *
skb_transport_header(struct sk_buff * skb)887 skb_transport_header(struct sk_buff *skb)
888 {
889 
890 	SKB_TRACE(skb);
891         return (skb->head + skb->l4hdroff);
892 }
893 
894 static inline uint8_t *
skb_network_header(struct sk_buff * skb)895 skb_network_header(struct sk_buff *skb)
896 {
897 
898 	SKB_TRACE(skb);
899         return (skb->head + skb->l3hdroff);
900 }
901 
902 static inline bool
skb_is_nonlinear(struct sk_buff * skb)903 skb_is_nonlinear(struct sk_buff *skb)
904 {
905 	SKB_TRACE(skb);
906 	return ((skb->data_len > 0) ? true : false);
907 }
908 
909 static inline int
__skb_linearize(struct sk_buff * skb)910 __skb_linearize(struct sk_buff *skb)
911 {
912 	SKB_TRACE(skb);
913 	SKB_TODO();
914 	return (ENXIO);
915 }
916 
917 static inline int
skb_linearize(struct sk_buff * skb)918 skb_linearize(struct sk_buff *skb)
919 {
920 
921 	return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
922 }
923 
924 static inline int
pskb_expand_head(struct sk_buff * skb,int x,int len,gfp_t gfp)925 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
926 {
927 	SKB_TRACE(skb);
928 	SKB_TODO();
929 	return (-ENXIO);
930 }
931 
932 /* Not really seen this one but need it as symmetric accessor function. */
933 static inline void
skb_set_queue_mapping(struct sk_buff * skb,uint16_t qmap)934 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
935 {
936 
937 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
938 	skb->qmap = qmap;
939 }
940 
941 static inline uint16_t
skb_get_queue_mapping(struct sk_buff * skb)942 skb_get_queue_mapping(struct sk_buff *skb)
943 {
944 
945 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
946 	return (skb->qmap);
947 }
948 
949 static inline bool
skb_header_cloned(struct sk_buff * skb)950 skb_header_cloned(struct sk_buff *skb)
951 {
952 	SKB_TRACE(skb);
953 	SKB_TODO();
954 	return (false);
955 }
956 
957 static inline uint8_t *
skb_mac_header(const struct sk_buff * skb)958 skb_mac_header(const struct sk_buff *skb)
959 {
960 	SKB_TRACE(skb);
961 	return (skb->head + skb->mac_header);
962 }
963 
964 static inline void
skb_reset_mac_header(struct sk_buff * skb)965 skb_reset_mac_header(struct sk_buff *skb)
966 {
967 	SKB_TRACE(skb);
968 	skb->mac_header = skb->data - skb->head;
969 }
970 
971 static inline void
skb_set_mac_header(struct sk_buff * skb,const size_t len)972 skb_set_mac_header(struct sk_buff *skb, const size_t len)
973 {
974 	SKB_TRACE(skb);
975 	skb_reset_mac_header(skb);
976 	skb->mac_header += len;
977 }
978 
979 static inline struct skb_shared_hwtstamps *
skb_hwtstamps(struct sk_buff * skb)980 skb_hwtstamps(struct sk_buff *skb)
981 {
982 	SKB_TRACE(skb);
983 	SKB_TODO();
984 	return (NULL);
985 }
986 
987 static inline void
skb_orphan(struct sk_buff * skb)988 skb_orphan(struct sk_buff *skb)
989 {
990 	SKB_TRACE(skb);
991 	SKB_TODO();
992 }
993 
994 static inline __sum16
csum_unfold(__sum16 sum)995 csum_unfold(__sum16 sum)
996 {
997 	SKB_TODO();
998 	return (sum);
999 }
1000 
1001 static __inline void
skb_postpush_rcsum(struct sk_buff * skb,const void * data,size_t len)1002 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
1003 {
1004 	SKB_TODO();
1005 }
1006 
1007 static inline void
skb_reset_tail_pointer(struct sk_buff * skb)1008 skb_reset_tail_pointer(struct sk_buff *skb)
1009 {
1010 
1011 	SKB_TRACE(skb);
1012 #ifdef SKB_DOING_OFFSETS_US_NOT
1013 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
1014 #endif
1015 	skb->tail = skb->data;
1016 	SKB_TRACE(skb);
1017 }
1018 
1019 static inline struct sk_buff *
skb_get(struct sk_buff * skb)1020 skb_get(struct sk_buff *skb)
1021 {
1022 
1023 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
1024 	return (skb);
1025 }
1026 
1027 static inline struct sk_buff *
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1028 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1029 {
1030 
1031 	SKB_TODO();
1032 	return (NULL);
1033 }
1034 
1035 static inline void
skb_copy_from_linear_data(const struct sk_buff * skb,void * dst,size_t len)1036 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1037 {
1038 
1039 	SKB_TRACE(skb);
1040 	/* Let us just hope the destination has len space ... */
1041 	memcpy(dst, skb->data, len);
1042 }
1043 
1044 static inline int
skb_pad(struct sk_buff * skb,int pad)1045 skb_pad(struct sk_buff *skb, int pad)
1046 {
1047 
1048 	SKB_TRACE(skb);
1049 	SKB_TODO();
1050 	return (-1);
1051 }
1052 
1053 static inline void
skb_list_del_init(struct sk_buff * skb)1054 skb_list_del_init(struct sk_buff *skb)
1055 {
1056 
1057 	SKB_TRACE(skb);
1058 	SKB_TODO();
1059 }
1060 
1061 static inline void
napi_consume_skb(struct sk_buff * skb,int budget)1062 napi_consume_skb(struct sk_buff *skb, int budget)
1063 {
1064 
1065 	SKB_TRACE(skb);
1066 	SKB_TODO();
1067 }
1068 
1069 static inline struct sk_buff *
napi_build_skb(void * data,size_t len)1070 napi_build_skb(void *data, size_t len)
1071 {
1072 
1073 	SKB_TODO();
1074 	return (NULL);
1075 }
1076 
1077 static inline uint32_t
skb_get_hash(struct sk_buff * skb)1078 skb_get_hash(struct sk_buff *skb)
1079 {
1080 	SKB_TRACE(skb);
1081 	SKB_TODO();
1082 	return (0);
1083 }
1084 
1085 static inline void
skb_mark_for_recycle(struct sk_buff * skb)1086 skb_mark_for_recycle(struct sk_buff *skb)
1087 {
1088 	SKB_TRACE(skb);
1089 	SKB_TODO();
1090 }
1091 
1092 static inline int
skb_cow_head(struct sk_buff * skb,unsigned int headroom)1093 skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1094 {
1095 	SKB_TRACE(skb);
1096 	SKB_TODO();
1097 	return (-1);
1098 }
1099 
1100 #define	SKB_WITH_OVERHEAD(_s)						\
1101 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1102 
1103 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1104