xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision ce9f36610ea9ff29d42a2bcfed44b020c2e56dcb)
1 /*-
2  * Copyright (c) 2020-2022 The FreeBSD Foundation
3  * Copyright (c) 2021-2022 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/kernel.h>
42 #include <linux/page.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/netdev_features.h>
45 #include <linux/list.h>
46 #include <linux/gfp.h>
47 #include <linux/compiler.h>
48 #include <linux/spinlock.h>
49 
50 /* #define	SKB_DEBUG */
51 #ifdef SKB_DEBUG
52 #define	DSKB_TODO	0x01
53 #define	DSKB_IMPROVE	0x02
54 #define	DSKB_TRACE	0x10
55 #define	DSKB_TRACEX	0x20
56 extern int linuxkpi_debug_skb;
57 
58 #define	SKB_TODO()							\
59     if (linuxkpi_debug_skb & DSKB_TODO)					\
60 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
61 #define	SKB_IMPROVE(...)						\
62     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
63 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
64 #define	SKB_TRACE(_s)							\
65     if (linuxkpi_debug_skb & DSKB_TRACE)				\
66 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
67 #define	SKB_TRACE2(_s, _p)						\
68     if (linuxkpi_debug_skb & DSKB_TRACE)				\
69 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
70 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
71    if (linuxkpi_debug_skb & DSKB_TRACE)					\
72 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
73 	    __VA_ARGS__)
74 #else
75 #define	SKB_TODO()		do { } while(0)
76 #define	SKB_IMPROVE(...)	do { } while(0)
77 #define	SKB_TRACE(_s)		do { } while(0)
78 #define	SKB_TRACE2(_s, _p)	do { } while(0)
79 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
80 #endif
81 
82 enum sk_buff_pkt_type {
83 	PACKET_BROADCAST,
84 	PACKET_MULTICAST,
85 	PACKET_OTHERHOST,
86 };
87 
88 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
89 
90 struct sk_buff_head {
91 		/* XXX TODO */
92 	struct sk_buff		*next;
93 	struct sk_buff		*prev;
94 	size_t			qlen;
95 	spinlock_t		lock;
96 };
97 
98 enum sk_checksum_flags {
99 	CHECKSUM_NONE			= 0x00,
100 	CHECKSUM_UNNECESSARY		= 0x01,
101 	CHECKSUM_PARTIAL		= 0x02,
102 	CHECKSUM_COMPLETE		= 0x04,
103 };
104 
105 struct skb_frag {
106 		/* XXX TODO */
107 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
108 	off_t			offset;
109 	size_t			size;
110 };
111 typedef	struct skb_frag	skb_frag_t;
112 
113 enum skb_shared_info_gso_type {
114 	SKB_GSO_TCPV4,
115 	SKB_GSO_TCPV6,
116 };
117 
118 struct skb_shared_info {
119 	enum skb_shared_info_gso_type	gso_type;
120 	uint16_t			gso_size;
121 	uint16_t			nr_frags;
122 	struct sk_buff			*frag_list;
123 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
124 };
125 
126 struct sk_buff {
127 	/* XXX TODO */
128 	union {
129 		/* struct sk_buff_head */
130 		struct {
131 			struct sk_buff		*next;
132 			struct sk_buff		*prev;
133 		};
134 		struct list_head	list;
135 	};
136 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
137 	uint32_t		len;		/* ? */
138 	uint32_t		data_len;	/* ? If we have frags? */
139 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
140 	uint16_t		mac_len;	/* Link-layer header length. */
141 	__sum16			csum;
142 	uint16_t		l3hdroff;	/* network header offset from *head */
143 	uint16_t		l4hdroff;	/* transport header offset from *head */
144 	uint32_t		priority;
145 	uint16_t		qmap;		/* queue mapping */
146 	uint16_t		_spareu16_0;
147 	enum sk_buff_pkt_type	pkt_type;
148 
149 	/* "Scratch" area for layers to store metadata. */
150 	/* ??? I see sizeof() operations so probably an array. */
151 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
152 
153 	struct net_device	*dev;
154 	void			*sk;		/* XXX net/sock.h? */
155 
156 	int		csum_offset, csum_start, ip_summed, protocol;
157 
158 	uint8_t			*head;			/* Head of buffer. */
159 	uint8_t			*data;			/* Head of data. */
160 	uint8_t			*tail;			/* End of data. */
161 	uint8_t			*end;			/* End of buffer. */
162 
163 	struct skb_shared_info	*shinfo;
164 
165 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
166 	void			*m;
167 	void(*m_free_func)(void *);
168 
169 	/* Force padding to CACHE_LINE_SIZE. */
170 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
171 };
172 
173 /* -------------------------------------------------------------------------- */
174 
175 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
176 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
177 void linuxkpi_kfree_skb(struct sk_buff *);
178 
179 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
180 
181 /* -------------------------------------------------------------------------- */
182 
183 static inline struct sk_buff *
184 alloc_skb(size_t size, gfp_t gfp)
185 {
186 	struct sk_buff *skb;
187 
188 	skb = linuxkpi_alloc_skb(size, gfp);
189 	SKB_TRACE(skb);
190 	return (skb);
191 }
192 
193 static inline struct sk_buff *
194 __dev_alloc_skb(size_t len, gfp_t gfp)
195 {
196 	struct sk_buff *skb;
197 
198 	skb = linuxkpi_dev_alloc_skb(len, gfp);
199 	SKB_IMPROVE();
200 	SKB_TRACE(skb);
201 	return (skb);
202 }
203 
204 static inline struct sk_buff *
205 dev_alloc_skb(size_t len)
206 {
207 	struct sk_buff *skb;
208 
209 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
210 	SKB_IMPROVE();
211 	SKB_TRACE(skb);
212 	return (skb);
213 }
214 
215 static inline void
216 kfree_skb(struct sk_buff *skb)
217 {
218 	SKB_TRACE(skb);
219 	linuxkpi_kfree_skb(skb);
220 }
221 
222 static inline void
223 dev_kfree_skb(struct sk_buff *skb)
224 {
225 	SKB_TRACE(skb);
226 	kfree_skb(skb);
227 }
228 
229 static inline void
230 dev_kfree_skb_any(struct sk_buff *skb)
231 {
232 	SKB_TRACE(skb);
233 	dev_kfree_skb(skb);
234 }
235 
236 static inline void
237 dev_kfree_skb_irq(struct sk_buff *skb)
238 {
239 	SKB_TRACE(skb);
240 	SKB_IMPROVE("Do we have to defer this?");
241 	dev_kfree_skb(skb);
242 }
243 
244 /* -------------------------------------------------------------------------- */
245 
246 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
247 #define	skb_list_walk_safe(_q, skb, tmp)				\
248 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
249 
250 /* Add headroom; cannot do once there is data in there. */
251 static inline void
252 skb_reserve(struct sk_buff *skb, size_t len)
253 {
254 	SKB_TRACE(skb);
255 #if 0
256 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
257 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
258 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
259 #else
260 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
261 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
262 	    skb->head, skb->data, skb->tail, skb->len));
263 #endif
264 	skb->data += len;
265 	skb->tail += len;
266 }
267 
268 /*
269  * Remove headroom; return new data pointer; basically make space at the
270  * front to copy data in (manually).
271  */
272 static inline void *
273 __skb_push(struct sk_buff *skb, size_t len)
274 {
275 	SKB_TRACE(skb);
276 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
277 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
278 	skb->len  += len;
279 	skb->data -= len;
280 	return (skb->data);
281 }
282 
283 static inline void *
284 skb_push(struct sk_buff *skb, size_t len)
285 {
286 
287 	SKB_TRACE(skb);
288 	return (__skb_push(skb, len));
289 }
290 
291 /*
292  * Length of the data on the skb (without any frags)???
293  */
294 static inline size_t
295 skb_headlen(struct sk_buff *skb)
296 {
297 
298 	SKB_TRACE(skb);
299 	return (skb->len - skb->data_len);
300 }
301 
302 
303 /* Return the end of data (tail pointer). */
304 static inline uint8_t *
305 skb_tail_pointer(struct sk_buff *skb)
306 {
307 
308 	SKB_TRACE(skb);
309 	return (skb->tail);
310 }
311 
312 /* Return number of bytes available at end of buffer. */
313 static inline unsigned int
314 skb_tailroom(struct sk_buff *skb)
315 {
316 
317 	SKB_TRACE(skb);
318 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
319 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
320 	return (skb->end - skb->tail);
321 }
322 
323 /* Return numer of bytes available at the beginning of buffer. */
324 static inline unsigned int
325 skb_headroom(struct sk_buff *skb)
326 {
327 	SKB_TRACE(skb);
328 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
329 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
330 	return (skb->data - skb->head);
331 }
332 
333 
334 /*
335  * Remove tailroom; return the old tail pointer; basically make space at
336  * the end to copy data in (manually).  See also skb_put_data() below.
337  */
338 static inline void *
339 __skb_put(struct sk_buff *skb, size_t len)
340 {
341 	void *s;
342 
343 	SKB_TRACE(skb);
344 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
345 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
346 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
347 
348 	s = skb_tail_pointer(skb);
349 	if (len == 0)
350 		return (s);
351 	skb->tail += len;
352 	skb->len += len;
353 #ifdef SKB_DEBUG
354 	if (linuxkpi_debug_skb & DSKB_TRACEX)
355 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
356 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
357 	    s, len);
358 #endif
359 	return (s);
360 }
361 
362 static inline void *
363 skb_put(struct sk_buff *skb, size_t len)
364 {
365 
366 	SKB_TRACE(skb);
367 	return (__skb_put(skb, len));
368 }
369 
370 /* skb_put() + copying data in. */
371 static inline void *
372 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
373 {
374 	void *s;
375 
376 	SKB_TRACE2(skb, buf);
377 	s = skb_put(skb, len);
378 	if (len == 0)
379 		return (s);
380 	memcpy(s, buf, len);
381 	return (s);
382 }
383 
384 /* skb_put() + filling with zeros. */
385 static inline void *
386 skb_put_zero(struct sk_buff *skb, size_t len)
387 {
388 	void *s;
389 
390 	SKB_TRACE(skb);
391 	s = skb_put(skb, len);
392 	memset(s, '\0', len);
393 	return (s);
394 }
395 
396 /*
397  * Remove len bytes from beginning of data.
398  *
399  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
400  * we return the advanced data pointer so we don't have to keep a temp, correct?
401  */
402 static inline void *
403 skb_pull(struct sk_buff *skb, size_t len)
404 {
405 
406 	SKB_TRACE(skb);
407 #if 0	/* Apparently this doesn't barf... */
408 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
409 	    __func__, skb, skb->len, len, skb->data));
410 #endif
411 	if (skb->len < len)
412 		return (NULL);
413 	skb->len -= len;
414 	skb->data += len;
415 	return (skb->data);
416 }
417 
418 /* Reduce skb data to given length or do nothing if smaller already. */
419 static inline void
420 __skb_trim(struct sk_buff *skb, unsigned int len)
421 {
422 
423 	SKB_TRACE(skb);
424 	if (skb->len < len)
425 		return;
426 
427 	skb->len = len;
428 	skb->tail = skb->data + skb->len;
429 }
430 
431 static inline void
432 skb_trim(struct sk_buff *skb, unsigned int len)
433 {
434 
435 	return (__skb_trim(skb, len));
436 }
437 
438 static inline struct skb_shared_info *
439 skb_shinfo(struct sk_buff *skb)
440 {
441 
442 	SKB_TRACE(skb);
443 	return (skb->shinfo);
444 }
445 
446 static inline void
447 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
448     off_t offset, size_t size, unsigned int truesize)
449 {
450 	struct skb_shared_info *shinfo;
451 
452 	SKB_TRACE(skb);
453 #ifdef SKB_DEBUG
454 	if (linuxkpi_debug_skb & DSKB_TRACEX)
455 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
456 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
457 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
458 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
459 	    size, truesize);
460 #endif
461 
462 	shinfo = skb_shinfo(skb);
463 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
464 	    "fragno %d too big\n", __func__, skb, fragno));
465 	shinfo->frags[fragno].page = page;
466 	shinfo->frags[fragno].offset = offset;
467 	shinfo->frags[fragno].size = size;
468 	shinfo->nr_frags = fragno + 1;
469         skb->len += size;
470         skb->truesize += truesize;
471 
472 	/* XXX TODO EXTEND truesize? */
473 }
474 
475 /* -------------------------------------------------------------------------- */
476 
477 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
478 #define	skb_queue_walk(_q, skb)						\
479 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
480 	    (skb) = (skb)->next)
481 
482 #define	skb_queue_walk_safe(_q, skb, tmp)				\
483 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
484 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
485 
486 static inline bool
487 skb_queue_empty(struct sk_buff_head *q)
488 {
489 
490 	SKB_TRACE(q);
491 	return (q->qlen == 0);
492 }
493 
494 static inline void
495 __skb_queue_head_init(struct sk_buff_head *q)
496 {
497 	SKB_TRACE(q);
498 	q->prev = q->next = (struct sk_buff *)q;
499 	q->qlen = 0;
500 }
501 
502 static inline void
503 skb_queue_head_init(struct sk_buff_head *q)
504 {
505 	SKB_TRACE(q);
506 	return (__skb_queue_head_init(q));
507 }
508 
509 static inline void
510 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
511     struct sk_buff_head *q)
512 {
513 
514 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
515 	new->prev = prev;
516 	new->next = next;
517 	next->prev = new;
518 	prev->next = new;
519 	q->qlen++;
520 }
521 
522 static inline void
523 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
524     struct sk_buff *new)
525 {
526 
527 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
528 	__skb_insert(new, skb, skb->next, q);
529 }
530 
531 static inline void
532 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
533     struct sk_buff *new)
534 {
535 
536 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
537 	__skb_insert(new, skb->prev, skb, q);
538 }
539 
540 static inline void
541 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
542 {
543 	struct sk_buff *s;
544 
545 	SKB_TRACE2(q, skb);
546 	q->qlen++;
547 	s = (struct sk_buff *)q;
548 	s->prev->next = skb;
549 	skb->prev = s->prev;
550 	skb->next = s;
551 	s->prev = skb;
552 }
553 
554 static inline void
555 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
556 {
557 	SKB_TRACE2(q, skb);
558 	return (__skb_queue_tail(q, skb));
559 }
560 
561 static inline struct sk_buff *
562 skb_peek(struct sk_buff_head *q)
563 {
564 	struct sk_buff *skb;
565 
566 	skb = q->next;
567 	SKB_TRACE2(q, skb);
568 	if (skb == (struct sk_buff *)q)
569 		return (NULL);
570 	return (skb);
571 }
572 
573 static inline struct sk_buff *
574 skb_peek_tail(struct sk_buff_head *q)
575 {
576 	struct sk_buff *skb;
577 
578 	skb = q->prev;
579 	SKB_TRACE2(q, skb);
580 	if (skb == (struct sk_buff *)q)
581 		return (NULL);
582 	return (skb);
583 }
584 
585 static inline void
586 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
587 {
588 	SKB_TRACE2(skb, head);
589 	struct sk_buff *p, *n;;
590 
591 	head->qlen--;
592 	p = skb->prev;
593 	n = skb->next;
594 	p->next = n;
595 	n->prev = p;
596 	skb->prev = skb->next = NULL;
597 }
598 
599 static inline void
600 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
601 {
602 	SKB_TRACE2(skb, head);
603 	return (__skb_unlink(skb, head));
604 }
605 
606 static inline struct sk_buff *
607 __skb_dequeue(struct sk_buff_head *q)
608 {
609 	struct sk_buff *skb;
610 
611 	SKB_TRACE(q);
612 	skb = q->next;
613 	if (skb == (struct sk_buff *)q)
614 		return (NULL);
615 	if (skb != NULL)
616 		__skb_unlink(skb, q);
617 	SKB_TRACE(skb);
618 	return (skb);
619 }
620 
621 static inline struct sk_buff *
622 skb_dequeue(struct sk_buff_head *q)
623 {
624 	SKB_TRACE(q);
625 	return (__skb_dequeue(q));
626 }
627 
628 static inline struct sk_buff *
629 skb_dequeue_tail(struct sk_buff_head *q)
630 {
631 	struct sk_buff *skb;
632 
633 	skb = skb_peek_tail(q);
634 	if (skb != NULL)
635 		__skb_unlink(skb, q);
636 
637 	SKB_TRACE2(q, skb);
638 	return (skb);
639 }
640 
641 static inline void
642 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
643 {
644 
645 	SKB_TRACE2(q, skb);
646 	__skb_queue_after(q, (struct sk_buff *)q, skb);
647 }
648 
649 static inline void
650 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
651 {
652 
653 	SKB_TRACE2(q, skb);
654 	__skb_queue_after(q, (struct sk_buff *)q, skb);
655 }
656 
657 static inline uint32_t
658 skb_queue_len(struct sk_buff_head *head)
659 {
660 
661 	SKB_TRACE(head);
662 	return (head->qlen);
663 }
664 
665 static inline uint32_t
666 skb_queue_len_lockless(const struct sk_buff_head *head)
667 {
668 
669 	SKB_TRACE(head);
670 	return (READ_ONCE(head->qlen));
671 }
672 
673 static inline void
674 __skb_queue_purge(struct sk_buff_head *q)
675 {
676 	struct sk_buff *skb;
677 
678 	SKB_TRACE(q);
679         while ((skb = __skb_dequeue(q)) != NULL)
680 		kfree_skb(skb);
681 }
682 
683 static inline void
684 skb_queue_purge(struct sk_buff_head *q)
685 {
686 	SKB_TRACE(q);
687 	return (__skb_queue_purge(q));
688 }
689 
690 static inline struct sk_buff *
691 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
692 {
693 
694 	SKB_TRACE2(q, skb);
695 	/* XXX what is the q argument good for? */
696 	return (skb->prev);
697 }
698 
699 /* -------------------------------------------------------------------------- */
700 
701 static inline struct sk_buff *
702 skb_copy(struct sk_buff *skb, gfp_t gfp)
703 {
704 	struct sk_buff *new;
705 
706 	new = linuxkpi_skb_copy(skb, gfp);
707 	SKB_TRACE2(skb, new);
708 	return (new);
709 }
710 
711 static inline void
712 consume_skb(struct sk_buff *skb)
713 {
714 	SKB_TRACE(skb);
715 	SKB_TODO();
716 }
717 
718 static inline uint16_t
719 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
720 {
721 	SKB_TRACE(skb);
722 	SKB_TODO();
723 	return (0xffff);
724 }
725 
726 static inline int
727 skb_checksum_start_offset(struct sk_buff *skb)
728 {
729 	SKB_TRACE(skb);
730 	SKB_TODO();
731 	return (-1);
732 }
733 
734 static inline dma_addr_t
735 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
736     size_t fragsz, enum dma_data_direction dir)
737 {
738 	SKB_TRACE2(frag, dev);
739 	SKB_TODO();
740 	return (-1);
741 }
742 
743 static inline size_t
744 skb_frag_size(const skb_frag_t *frag)
745 {
746 	SKB_TRACE(frag);
747 	SKB_TODO();
748 	return (-1);
749 }
750 
751 static inline bool
752 skb_is_nonlinear(struct sk_buff *skb)
753 {
754 	SKB_TRACE(skb);
755 	return ((skb->data_len > 0) ? true : false);
756 }
757 
758 #define	skb_walk_frags(_skb, _frag)					\
759 	for ((_frag) = (_skb); false; (_frag)++)
760 
761 static inline void
762 skb_checksum_help(struct sk_buff *skb)
763 {
764 	SKB_TRACE(skb);
765 	SKB_TODO();
766 }
767 
768 static inline bool
769 skb_ensure_writable(struct sk_buff *skb, size_t off)
770 {
771 	SKB_TRACE(skb);
772 	SKB_TODO();
773 	return (false);
774 }
775 
776 static inline void *
777 skb_frag_address(const skb_frag_t *frag)
778 {
779 	SKB_TRACE(frag);
780 	SKB_TODO();
781 	return (NULL);
782 }
783 
784 static inline void
785 skb_free_frag(void *frag)
786 {
787 
788 	SKB_TODO();
789 }
790 
791 static inline struct sk_buff *
792 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
793 {
794 	SKB_TRACE(skb);
795 	SKB_TODO();
796 	return (NULL);
797 }
798 
799 static inline bool
800 skb_is_gso(struct sk_buff *skb)
801 {
802 	SKB_TRACE(skb);
803 	SKB_IMPROVE("Really a TODO but get it away from logging");
804 	return (false);
805 }
806 
807 static inline void
808 skb_mark_not_on_list(struct sk_buff *skb)
809 {
810 	SKB_TRACE(skb);
811 	SKB_TODO();
812 }
813 
814 static inline void
815 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
816 {
817 	struct sk_buff *b, *e, *n;
818 
819 	SKB_TRACE2(from, to);
820 
821 	if (skb_queue_empty(from))
822 		return;
823 
824 	/* XXX do we need a barrier around this? */
825 	b = from->next;
826 	e = from->prev;
827 	n = to->next;
828 
829 	b->prev = (struct sk_buff *)to;
830 	to->next = b;
831 	e->next = n;
832 	n->prev = e;
833 
834 	to->qlen += from->qlen;
835 	__skb_queue_head_init(from);
836 }
837 
838 static inline void
839 skb_reset_transport_header(struct sk_buff *skb)
840 {
841 
842 	SKB_TRACE(skb);
843 	skb->l4hdroff = skb->data - skb->head;
844 }
845 
846 static inline uint8_t *
847 skb_transport_header(struct sk_buff *skb)
848 {
849 
850 	SKB_TRACE(skb);
851         return (skb->head + skb->l4hdroff);
852 }
853 
854 static inline uint8_t *
855 skb_network_header(struct sk_buff *skb)
856 {
857 
858 	SKB_TRACE(skb);
859         return (skb->head + skb->l3hdroff);
860 }
861 
862 static inline int
863 __skb_linearize(struct sk_buff *skb)
864 {
865 	SKB_TRACE(skb);
866 	SKB_TODO();
867 	return (ENXIO);
868 }
869 
870 static inline int
871 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
872 {
873 	SKB_TRACE(skb);
874 	SKB_TODO();
875 	return (-ENXIO);
876 }
877 
878 /* Not really seen this one but need it as symmetric accessor function. */
879 static inline void
880 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
881 {
882 
883 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
884 	skb->qmap = qmap;
885 }
886 
887 static inline uint16_t
888 skb_get_queue_mapping(struct sk_buff *skb)
889 {
890 
891 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
892 	return (skb->qmap);
893 }
894 
895 static inline bool
896 skb_header_cloned(struct sk_buff *skb)
897 {
898 	SKB_TRACE(skb);
899 	SKB_TODO();
900 	return (false);
901 }
902 
903 static inline uint8_t *
904 skb_mac_header(struct sk_buff *skb)
905 {
906 	SKB_TRACE(skb);
907 	SKB_TODO();
908 	return (NULL);
909 }
910 
911 static inline void
912 skb_orphan(struct sk_buff *skb)
913 {
914 	SKB_TRACE(skb);
915 	SKB_TODO();
916 }
917 
918 static inline void
919 skb_reset_mac_header(struct sk_buff *skb)
920 {
921 	SKB_TRACE(skb);
922 	SKB_TODO();
923 }
924 
925 static inline __sum16
926 csum_unfold(__sum16 sum)
927 {
928 	SKB_TODO();
929 	return (sum);
930 }
931 
932 static __inline void
933 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
934 {
935 	SKB_TODO();
936 }
937 
938 static inline void
939 skb_reset_tail_pointer(struct sk_buff *skb)
940 {
941 
942 	SKB_TRACE(skb);
943 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
944 	SKB_TRACE(skb);
945 }
946 
947 static inline struct sk_buff *
948 skb_get(struct sk_buff *skb)
949 {
950 
951 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
952 	return (skb);
953 }
954 
955 static inline struct sk_buff *
956 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
957 {
958 
959 	SKB_TODO();
960 	return (NULL);
961 }
962 
963 static inline void
964 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
965 {
966 
967 	SKB_TRACE(skb);
968 	/* Let us just hope the destination has len space ... */
969 	memcpy(dst, skb->data, len);
970 }
971 
972 static inline struct sk_buff *
973 build_skb(void *data, unsigned int fragsz)
974 {
975 
976 	SKB_TODO();
977 	return (NULL);
978 }
979 
980 static inline int
981 skb_pad(struct sk_buff *skb, int pad)
982 {
983 
984 	SKB_TRACE(skb);
985 	SKB_TODO();
986 	return (-1);
987 }
988 
989 static inline void
990 skb_list_del_init(struct sk_buff *skb)
991 {
992 
993 	SKB_TRACE(skb);
994 	SKB_TODO();
995 }
996 
997 static inline void
998 napi_consume_skb(struct sk_buff *skb, int budget)
999 {
1000 
1001 	SKB_TRACE(skb);
1002 	SKB_TODO();
1003 }
1004 
1005 static inline bool
1006 skb_linearize(struct sk_buff *skb)
1007 {
1008 
1009 	SKB_TRACE(skb);
1010 	SKB_TODO();
1011 	return (false);
1012 }
1013 
1014 #define	SKB_WITH_OVERHEAD(_s)						\
1015 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1016 
1017 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1018