xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision b4ce0ff19a1f0faa92bdb03019badfaa2876f73e)
1 /*-
2  * Copyright (c) 2020-2022 The FreeBSD Foundation
3  * Copyright (c) 2021-2022 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/kernel.h>
42 #include <linux/page.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/netdev_features.h>
45 #include <linux/list.h>
46 #include <linux/gfp.h>
47 #include <linux/compiler.h>
48 #include <linux/spinlock.h>
49 
50 /* #define	SKB_DEBUG */
51 #ifdef SKB_DEBUG
52 #define	DSKB_TODO	0x01
53 #define	DSKB_IMPROVE	0x02
54 #define	DSKB_TRACE	0x10
55 #define	DSKB_TRACEX	0x20
56 extern int linuxkpi_debug_skb;
57 
58 #define	SKB_TODO()							\
59     if (linuxkpi_debug_skb & DSKB_TODO)					\
60 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
61 #define	SKB_IMPROVE(...)						\
62     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
63 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
64 #define	SKB_TRACE(_s)							\
65     if (linuxkpi_debug_skb & DSKB_TRACE)				\
66 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
67 #define	SKB_TRACE2(_s, _p)						\
68     if (linuxkpi_debug_skb & DSKB_TRACE)				\
69 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
70 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
71    if (linuxkpi_debug_skb & DSKB_TRACE)					\
72 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
73 	    __VA_ARGS__)
74 #else
75 #define	SKB_TODO()		do { } while(0)
76 #define	SKB_IMPROVE(...)	do { } while(0)
77 #define	SKB_TRACE(_s)		do { } while(0)
78 #define	SKB_TRACE2(_s, _p)	do { } while(0)
79 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
80 #endif
81 
82 enum sk_buff_pkt_type {
83 	PACKET_BROADCAST,
84 	PACKET_MULTICAST,
85 	PACKET_OTHERHOST,
86 };
87 
88 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
89 
90 struct sk_buff_head {
91 		/* XXX TODO */
92 	struct sk_buff		*next;
93 	struct sk_buff		*prev;
94 	size_t			qlen;
95 	spinlock_t		lock;
96 };
97 
98 enum sk_checksum_flags {
99 	CHECKSUM_NONE			= 0x00,
100 	CHECKSUM_UNNECESSARY		= 0x01,
101 	CHECKSUM_PARTIAL		= 0x02,
102 	CHECKSUM_COMPLETE		= 0x04,
103 };
104 
105 struct skb_frag {
106 		/* XXX TODO */
107 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
108 	off_t			offset;
109 	size_t			size;
110 };
111 typedef	struct skb_frag	skb_frag_t;
112 
113 enum skb_shared_info_gso_type {
114 	SKB_GSO_TCPV4,
115 	SKB_GSO_TCPV6,
116 };
117 
118 struct skb_shared_info {
119 	enum skb_shared_info_gso_type	gso_type;
120 	uint16_t			gso_size;
121 	uint16_t			nr_frags;
122 	struct sk_buff			*frag_list;
123 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
124 };
125 
126 struct sk_buff {
127 		/* XXX TODO */
128 	/* struct sk_buff_head */
129 	struct sk_buff		*next;
130 	struct sk_buff		*prev;
131 	int			list;		/* XXX TYPE */
132 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
133 	uint32_t		len;		/* ? */
134 	uint32_t		data_len;	/* ? If we have frags? */
135 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
136 	uint16_t		mac_len;	/* Link-layer header length. */
137 	__sum16			csum;
138 	uint16_t		l3hdroff;	/* network header offset from *head */
139 	uint16_t		l4hdroff;	/* transport header offset from *head */
140 	uint32_t		priority;
141 	uint16_t		qmap;		/* queue mapping */
142 	uint16_t		_spareu16_0;
143 	enum sk_buff_pkt_type	pkt_type;
144 
145 	/* "Scratch" area for layers to store metadata. */
146 	/* ??? I see sizeof() operations so probably an array. */
147 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
148 
149 	struct net_device	*dev;
150 	void			*sk;		/* XXX net/sock.h? */
151 
152 	int		csum_offset, csum_start, ip_summed, protocol;
153 
154 	uint8_t			*head;			/* Head of buffer. */
155 	uint8_t			*data;			/* Head of data. */
156 	uint8_t			*tail;			/* End of data. */
157 	uint8_t			*end;			/* End of buffer. */
158 
159 	struct skb_shared_info	*shinfo;
160 
161 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
162 	void			*m;
163 	void(*m_free_func)(void *);
164 
165 	/* Force padding to CACHE_LINE_SIZE. */
166 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
167 };
168 
169 /* -------------------------------------------------------------------------- */
170 
171 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
172 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
173 void linuxkpi_kfree_skb(struct sk_buff *);
174 
175 /* -------------------------------------------------------------------------- */
176 
177 static inline struct sk_buff *
178 alloc_skb(size_t size, gfp_t gfp)
179 {
180 	struct sk_buff *skb;
181 
182 	skb = linuxkpi_alloc_skb(size, gfp);
183 	SKB_TRACE(skb);
184 	return (skb);
185 }
186 
187 static inline struct sk_buff *
188 __dev_alloc_skb(size_t len, gfp_t gfp)
189 {
190 	struct sk_buff *skb;
191 
192 	skb = linuxkpi_dev_alloc_skb(len, gfp);
193 	SKB_IMPROVE();
194 	SKB_TRACE(skb);
195 	return (skb);
196 }
197 
198 static inline struct sk_buff *
199 dev_alloc_skb(size_t len)
200 {
201 	struct sk_buff *skb;
202 
203 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
204 	SKB_IMPROVE();
205 	SKB_TRACE(skb);
206 	return (skb);
207 }
208 
209 static inline void
210 kfree_skb(struct sk_buff *skb)
211 {
212 	SKB_TRACE(skb);
213 	linuxkpi_kfree_skb(skb);
214 }
215 
216 static inline void
217 dev_kfree_skb(struct sk_buff *skb)
218 {
219 	SKB_TRACE(skb);
220 	kfree_skb(skb);
221 }
222 
223 static inline void
224 dev_kfree_skb_any(struct sk_buff *skb)
225 {
226 	SKB_TRACE(skb);
227 	dev_kfree_skb(skb);
228 }
229 
230 static inline void
231 dev_kfree_skb_irq(struct sk_buff *skb)
232 {
233 	SKB_TRACE(skb);
234 	SKB_TODO();
235 }
236 
237 /* -------------------------------------------------------------------------- */
238 
239 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
240 #define	skb_list_walk_safe(_q, skb, tmp)				\
241 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
242 
243 /* Add headroom; cannot do once there is data in there. */
244 static inline void
245 skb_reserve(struct sk_buff *skb, size_t len)
246 {
247 	SKB_TRACE(skb);
248 #if 0
249 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
250 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
251 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
252 #else
253 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
254 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
255 	    skb->head, skb->data, skb->tail, skb->len));
256 #endif
257 	skb->data += len;
258 	skb->tail += len;
259 }
260 
261 /*
262  * Remove headroom; return new data pointer; basically make space at the
263  * front to copy data in (manually).
264  */
265 static inline void *
266 skb_push(struct sk_buff *skb, size_t len)
267 {
268 	SKB_TRACE(skb);
269 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
270 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
271 	skb->len  += len;
272 	skb->data -= len;
273 	return (skb->data);
274 }
275 
276 /*
277  * Length of the data on the skb (without any frags)???
278  */
279 static inline size_t
280 skb_headlen(struct sk_buff *skb)
281 {
282 
283 	SKB_TRACE(skb);
284 	return (skb->len - skb->data_len);
285 }
286 
287 
288 /* Return the end of data (tail pointer). */
289 static inline uint8_t *
290 skb_tail_pointer(struct sk_buff *skb)
291 {
292 
293 	SKB_TRACE(skb);
294 	return (skb->tail);
295 }
296 
297 /* Return number of bytes available at end of buffer. */
298 static inline unsigned int
299 skb_tailroom(struct sk_buff *skb)
300 {
301 
302 	SKB_TRACE(skb);
303 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
304 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
305 	return (skb->end - skb->tail);
306 }
307 
308 /* Return numer of bytes available at the beginning of buffer. */
309 static inline unsigned int
310 skb_headroom(struct sk_buff *skb)
311 {
312 	SKB_TRACE(skb);
313 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
314 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
315 	return (skb->data - skb->head);
316 }
317 
318 
319 /*
320  * Remove tailroom; return the old tail pointer; basically make space at
321  * the end to copy data in (manually).  See also skb_put_data() below.
322  */
323 static inline void *
324 skb_put(struct sk_buff *skb, size_t len)
325 {
326 	void *s;
327 
328 	SKB_TRACE(skb);
329 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
330 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
331 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
332 
333 	s = skb_tail_pointer(skb);
334 	skb->tail += len;
335 	skb->len += len;
336 #ifdef SKB_DEBUG
337 	if (linuxkpi_debug_skb & DSKB_TRACEX)
338 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
339 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
340 	    s, len);
341 #endif
342 	return (s);
343 }
344 
345 /* skb_put() + copying data in. */
346 static inline void *
347 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
348 {
349 	void *s;
350 
351 	SKB_TRACE2(skb, buf);
352 	s = skb_put(skb, len);
353 	memcpy(s, buf, len);
354 	return (s);
355 }
356 
357 /* skb_put() + filling with zeros. */
358 static inline void *
359 skb_put_zero(struct sk_buff *skb, size_t len)
360 {
361 	void *s;
362 
363 	SKB_TRACE(skb);
364 	s = skb_put(skb, len);
365 	memset(s, '\0', len);
366 	return (s);
367 }
368 
369 /*
370  * Remove len bytes from beginning of data.
371  *
372  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
373  * we return the advanced data pointer so we don't have to keep a temp, correct?
374  */
375 static inline void *
376 skb_pull(struct sk_buff *skb, size_t len)
377 {
378 
379 	SKB_TRACE(skb);
380 #if 0	/* Apparently this doesn't barf... */
381 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
382 	    __func__, skb, skb->len, len, skb->data));
383 #endif
384 	if (skb->len < len)
385 		return (NULL);
386 	skb->len -= len;
387 	skb->data += len;
388 	return (skb->data);
389 }
390 
391 /* Reduce skb data to given length or do nothing if smaller already. */
392 static inline void
393 __skb_trim(struct sk_buff *skb, unsigned int len)
394 {
395 
396 	SKB_TRACE(skb);
397 	if (skb->len < len)
398 		return;
399 
400 	skb->len = len;
401 	skb->tail = skb->data + skb->len;
402 }
403 
404 static inline void
405 skb_trim(struct sk_buff *skb, unsigned int len)
406 {
407 
408 	return (__skb_trim(skb, len));
409 }
410 
411 static inline struct skb_shared_info *
412 skb_shinfo(struct sk_buff *skb)
413 {
414 
415 	SKB_TRACE(skb);
416 	return (skb->shinfo);
417 }
418 
419 static inline void
420 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
421     off_t offset, size_t size, unsigned int truesize)
422 {
423 	struct skb_shared_info *shinfo;
424 
425 	SKB_TRACE(skb);
426 #ifdef SKB_DEBUG
427 	if (linuxkpi_debug_skb & DSKB_TRACEX)
428 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
429 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
430 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
431 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
432 	    size, truesize);
433 #endif
434 
435 	shinfo = skb_shinfo(skb);
436 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
437 	    "fragno %d too big\n", __func__, skb, fragno));
438 	shinfo->frags[fragno].page = page;
439 	shinfo->frags[fragno].offset = offset;
440 	shinfo->frags[fragno].size = size;
441 	shinfo->nr_frags = fragno + 1;
442         skb->len += size;
443         skb->truesize += truesize;
444 
445 	/* XXX TODO EXTEND truesize? */
446 }
447 
448 /* -------------------------------------------------------------------------- */
449 
450 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
451 #define	skb_queue_walk(_q, skb)						\
452 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
453 	    (skb) = (skb)->next)
454 
455 #define	skb_queue_walk_safe(_q, skb, tmp)				\
456 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
457 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
458 
459 static inline bool
460 skb_queue_empty(struct sk_buff_head *q)
461 {
462 
463 	SKB_TRACE(q);
464 	return (q->qlen == 0);
465 }
466 
467 static inline void
468 __skb_queue_head_init(struct sk_buff_head *q)
469 {
470 	SKB_TRACE(q);
471 	q->prev = q->next = (struct sk_buff *)q;
472 	q->qlen = 0;
473 }
474 
475 static inline void
476 skb_queue_head_init(struct sk_buff_head *q)
477 {
478 	SKB_TRACE(q);
479 	return (__skb_queue_head_init(q));
480 }
481 
482 static inline void
483 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
484     struct sk_buff_head *q)
485 {
486 
487 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
488 	new->prev = prev;
489 	new->next = next;
490 	next->prev = new;
491 	prev->next = new;
492 	q->qlen++;
493 }
494 
495 static inline void
496 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
497     struct sk_buff *new)
498 {
499 
500 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
501 	__skb_insert(new, skb, skb->next, q);
502 }
503 
504 static inline void
505 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
506     struct sk_buff *new)
507 {
508 
509 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
510 	__skb_insert(new, skb->prev, skb, q);
511 }
512 
513 static inline void
514 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
515 {
516 	struct sk_buff *s;
517 
518 	SKB_TRACE2(q, skb);
519 	q->qlen++;
520 	s = (struct sk_buff *)q;
521 	s->prev->next = skb;
522 	skb->prev = s->prev;
523 	skb->next = s;
524 	s->prev = skb;
525 }
526 
527 static inline void
528 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
529 {
530 	SKB_TRACE2(q, skb);
531 	return (__skb_queue_tail(q, skb));
532 }
533 
534 static inline struct sk_buff *
535 skb_peek_tail(struct sk_buff_head *q)
536 {
537 	struct sk_buff *skb;
538 
539 	skb = q->prev;
540 	SKB_TRACE2(q, skb);
541 	if (skb == (struct sk_buff *)q)
542 		return (NULL);
543 	return (skb);
544 }
545 
546 static inline void
547 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
548 {
549 	SKB_TRACE2(skb, head);
550 	struct sk_buff *p, *n;;
551 
552 	head->qlen--;
553 	p = skb->prev;
554 	n = skb->next;
555 	p->next = n;
556 	n->prev = p;
557 	skb->prev = skb->next = NULL;
558 }
559 
560 static inline void
561 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
562 {
563 	SKB_TRACE2(skb, head);
564 	return (__skb_unlink(skb, head));
565 }
566 
567 static inline struct sk_buff *
568 __skb_dequeue(struct sk_buff_head *q)
569 {
570 	struct sk_buff *skb;
571 
572 	SKB_TRACE(q);
573 	skb = q->next;
574 	if (skb == (struct sk_buff *)q)
575 		return (NULL);
576 	if (skb != NULL)
577 		__skb_unlink(skb, q);
578 	SKB_TRACE(skb);
579 	return (skb);
580 }
581 
582 static inline struct sk_buff *
583 skb_dequeue(struct sk_buff_head *q)
584 {
585 	SKB_TRACE(q);
586 	return (__skb_dequeue(q));
587 }
588 
589 static inline struct sk_buff *
590 skb_dequeue_tail(struct sk_buff_head *q)
591 {
592 	struct sk_buff *skb;
593 
594 	skb = skb_peek_tail(q);
595 	if (skb != NULL)
596 		__skb_unlink(skb, q);
597 
598 	SKB_TRACE2(q, skb);
599 	return (skb);
600 }
601 
602 static inline void
603 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
604 {
605 
606 	SKB_TRACE2(q, skb);
607 	__skb_queue_after(q, (struct sk_buff *)q, skb);
608 }
609 
610 static inline void
611 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
612 {
613 
614 	SKB_TRACE2(q, skb);
615 	__skb_queue_after(q, (struct sk_buff *)q, skb);
616 }
617 
618 static inline uint32_t
619 skb_queue_len(struct sk_buff_head *head)
620 {
621 
622 	SKB_TRACE(head);
623 	return (head->qlen);
624 }
625 
626 static inline uint32_t
627 skb_queue_len_lockless(const struct sk_buff_head *head)
628 {
629 
630 	SKB_TRACE(head);
631 	return (READ_ONCE(head->qlen));
632 }
633 
634 static inline void
635 __skb_queue_purge(struct sk_buff_head *q)
636 {
637 	struct sk_buff *skb;
638 
639 	SKB_TRACE(q);
640         while ((skb = __skb_dequeue(q)) != NULL)
641 		kfree_skb(skb);
642 }
643 
644 static inline void
645 skb_queue_purge(struct sk_buff_head *q)
646 {
647 	SKB_TRACE(q);
648 	return (__skb_queue_purge(q));
649 }
650 
651 static inline struct sk_buff *
652 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
653 {
654 
655 	SKB_TRACE2(q, skb);
656 	/* XXX what is the q argument good for? */
657 	return (skb->prev);
658 }
659 
660 /* -------------------------------------------------------------------------- */
661 
662 static inline struct sk_buff *
663 skb_copy(struct sk_buff *skb, gfp_t gfp)
664 {
665 	SKB_TRACE(skb);
666 	SKB_TODO();
667 	return (NULL);
668 }
669 
670 static inline void
671 consume_skb(struct sk_buff *skb)
672 {
673 	SKB_TRACE(skb);
674 	SKB_TODO();
675 }
676 
677 static inline uint16_t
678 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
679 {
680 	SKB_TRACE(skb);
681 	SKB_TODO();
682 	return (0xffff);
683 }
684 
685 static inline int
686 skb_checksum_start_offset(struct sk_buff *skb)
687 {
688 	SKB_TRACE(skb);
689 	SKB_TODO();
690 	return (-1);
691 }
692 
693 static inline dma_addr_t
694 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
695     size_t fragsz, enum dma_data_direction dir)
696 {
697 	SKB_TRACE2(frag, dev);
698 	SKB_TODO();
699 	return (-1);
700 }
701 
702 static inline size_t
703 skb_frag_size(const skb_frag_t *frag)
704 {
705 	SKB_TRACE(frag);
706 	SKB_TODO();
707 	return (-1);
708 }
709 
710 static inline bool
711 skb_is_nonlinear(struct sk_buff *skb)
712 {
713 	SKB_TRACE(skb);
714 	return ((skb->data_len > 0) ? true : false);
715 }
716 
717 #define	skb_walk_frags(_skb, _frag)					\
718 	for ((_frag) = (_skb); false; (_frag)++)
719 
720 static inline void
721 skb_checksum_help(struct sk_buff *skb)
722 {
723 	SKB_TRACE(skb);
724 	SKB_TODO();
725 }
726 
727 static inline bool
728 skb_ensure_writable(struct sk_buff *skb, size_t off)
729 {
730 	SKB_TRACE(skb);
731 	SKB_TODO();
732 	return (false);
733 }
734 
735 static inline void *
736 skb_frag_address(const skb_frag_t *frag)
737 {
738 	SKB_TRACE(frag);
739 	SKB_TODO();
740 	return (NULL);
741 }
742 
743 static inline struct sk_buff *
744 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
745 {
746 	SKB_TRACE(skb);
747 	SKB_TODO();
748 	return (NULL);
749 }
750 
751 static inline bool
752 skb_is_gso(struct sk_buff *skb)
753 {
754 	SKB_TRACE(skb);
755 	SKB_TODO();
756 	return (false);
757 }
758 
759 static inline void
760 skb_mark_not_on_list(struct sk_buff *skb)
761 {
762 	SKB_TRACE(skb);
763 	SKB_TODO();
764 }
765 
766 static inline void
767 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
768 {
769 	struct sk_buff *b, *e, *n;
770 
771 	SKB_TRACE2(from, to);
772 
773 	if (skb_queue_empty(from))
774 		return;
775 
776 	/* XXX do we need a barrier around this? */
777 	b = from->next;
778 	e = from->prev;
779 	n = to->next;
780 
781 	b->prev = (struct sk_buff *)to;
782 	to->next = b;
783 	e->next = n;
784 	n->prev = e;
785 
786 	to->qlen += from->qlen;
787 	__skb_queue_head_init(from);
788 }
789 
790 static inline void
791 skb_reset_transport_header(struct sk_buff *skb)
792 {
793 
794 	SKB_TRACE(skb);
795 	skb->l4hdroff = skb->data - skb->head;
796 }
797 
798 static inline uint8_t *
799 skb_transport_header(struct sk_buff *skb)
800 {
801 
802 	SKB_TRACE(skb);
803         return (skb->head + skb->l4hdroff);
804 }
805 
806 static inline uint8_t *
807 skb_network_header(struct sk_buff *skb)
808 {
809 
810 	SKB_TRACE(skb);
811         return (skb->head + skb->l3hdroff);
812 }
813 
814 static inline int
815 __skb_linearize(struct sk_buff *skb)
816 {
817 	SKB_TRACE(skb);
818 	SKB_TODO();
819 	return (ENXIO);
820 }
821 
822 static inline int
823 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
824 {
825 	SKB_TRACE(skb);
826 	SKB_TODO();
827 	return (-ENXIO);
828 }
829 
830 /* Not really seen this one but need it as symmetric accessor function. */
831 static inline void
832 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
833 {
834 
835 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
836 	skb->qmap = qmap;
837 }
838 
839 static inline uint16_t
840 skb_get_queue_mapping(struct sk_buff *skb)
841 {
842 
843 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
844 	return (skb->qmap);
845 }
846 
847 static inline bool
848 skb_header_cloned(struct sk_buff *skb)
849 {
850 	SKB_TRACE(skb);
851 	SKB_TODO();
852 	return (false);
853 }
854 
855 static inline uint8_t *
856 skb_mac_header(struct sk_buff *skb)
857 {
858 	SKB_TRACE(skb);
859 	SKB_TODO();
860 	return (NULL);
861 }
862 
863 static inline void
864 skb_orphan(struct sk_buff *skb)
865 {
866 	SKB_TRACE(skb);
867 	SKB_TODO();
868 }
869 
870 static inline void
871 skb_reset_mac_header(struct sk_buff *skb)
872 {
873 	SKB_TRACE(skb);
874 	SKB_TODO();
875 }
876 
877 static inline struct sk_buff *
878 skb_peek(struct sk_buff_head *q)
879 {
880 	SKB_TRACE(q);
881 	SKB_TODO();
882 	return (NULL);
883 }
884 
885 static inline __sum16
886 csum_unfold(__sum16 sum)
887 {
888 	SKB_TODO();
889 	return (sum);
890 }
891 
892 static __inline void
893 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
894 {
895 	SKB_TODO();
896 }
897 
898 static inline void
899 skb_reset_tail_pointer(struct sk_buff *skb)
900 {
901 
902 	SKB_TRACE(skb);
903 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
904 	SKB_TRACE(skb);
905 }
906 
907 static inline struct sk_buff *
908 skb_get(struct sk_buff *skb)
909 {
910 
911 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
912 	return (skb);
913 }
914 
915 static inline struct sk_buff *
916 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
917 {
918 
919 	SKB_TODO();
920 	return (NULL);
921 }
922 
923 static inline void
924 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
925 {
926 
927 	SKB_TRACE(skb);
928 	/* Let us just hope the destination has len space ... */
929 	memcpy(dst, skb->data, len);
930 }
931 
932 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
933