xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision cc68614da8232d8baaca0ae0d0dd8f890f06623e)
1 /*-
2  * Copyright (c) 2020-2021 The FreeBSD Foundation
3  * Copyright (c) 2021 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/page.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdev_features.h>
44 #include <linux/list.h>
45 #include <linux/gfp.h>
46 
47 /* #define	SKB_DEBUG */
48 #ifdef SKB_DEBUG
49 
50 #define	DSKB_TODO	0x01
51 #define	DSKB_TRACE	0x02
52 #define	DSKB_TRACEX	0x04
53 extern int debug_skb;
54 
55 #define	SKB_TRACE(_s)		if (debug_skb & DSKB_TRACE)		\
56     printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
57 #define	SKB_TRACE2(_s, _p)	if (debug_skb & DSKB_TRACE)		\
58     printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
59 #define	SKB_TRACE_FMT(_s, _fmt, ...)	if (debug_skb & DSKB_TRACE)	\
60     printf("SKB_TRACE %s:%d %p" _fmt "\n", __func__, __LINE__, _s, __VA_ARGS__)
61 #define	SKB_TODO()		if (debug_skb & DSKB_TODO)		\
62     printf("SKB_TODO %s:%d\n", __func__, __LINE__)
63 #else
64 #define	SKB_TRACE(_s)		do { } while(0)
65 #define	SKB_TRACE2(_s, _p)	do { } while(0)
66 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
67 #define	SKB_TODO()		do { } while(0)
68 #endif
69 
70 enum sk_buff_pkt_type {
71 	PACKET_BROADCAST,
72 	PACKET_MULTICAST,
73 	PACKET_OTHERHOST,
74 };
75 
76 #define	NET_SKB_PAD		CACHE_LINE_SIZE		/* ? */
77 
78 struct sk_buff_head {
79 		/* XXX TODO */
80 	struct sk_buff		*next;
81 	struct sk_buff		*prev;
82 	size_t			qlen;
83 	int			lock;	/* XXX TYPE */
84 };
85 
86 enum sk_checksum_flags {
87 	CHECKSUM_NONE			= 0x00,
88 	CHECKSUM_UNNECESSARY		= 0x01,
89 	CHECKSUM_PARTIAL		= 0x02,
90 	CHECKSUM_COMPLETE		= 0x04,
91 };
92 
93 struct skb_frag {
94 		/* XXX TODO */
95 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
96 	off_t			offset;
97 	size_t			size;
98 };
99 typedef	struct skb_frag	skb_frag_t;
100 
101 enum skb_shared_info_gso_type {
102 	SKB_GSO_TCPV4,
103 	SKB_GSO_TCPV6,
104 };
105 
106 struct skb_shared_info {
107 	enum skb_shared_info_gso_type	gso_type;
108 	uint16_t			gso_size;
109 	uint16_t			nr_frags;
110 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
111 };
112 
113 struct sk_buff {
114 		/* XXX TODO */
115 	/* struct sk_buff_head */
116 	struct sk_buff		*next;
117 	struct sk_buff		*prev;
118 	int			list;		/* XXX TYPE */
119 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
120 	uint32_t		len;		/* ? */
121 	uint32_t		data_len;	/* ? If we have frags? */
122 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
123 	uint16_t		mac_len;	/* Link-layer header length. */
124 	__sum16			csum;
125 	uint16_t		l3hdroff;	/* network header offset from *head */
126 	uint16_t		l4hdroff;	/* transport header offset from *head */
127 	uint32_t		priority;
128 	uint16_t		qmap;		/* queue mapping */
129 	uint16_t		_spareu16_0;
130 	enum sk_buff_pkt_type	pkt_type;
131 
132 	/* "Scratch" area for layers to store metadata. */
133 	/* ??? I see sizeof() operations so probably an array. */
134 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
135 
136 	struct net_device	*dev;
137 	void			*sk;		/* XXX net/sock.h? */
138 
139 	int		csum_offset, csum_start, ip_summed, protocol;
140 
141 	uint8_t			*head;			/* Head of buffer. */
142 	uint8_t			*data;			/* Head of data. */
143 	uint8_t			*tail;			/* End of data. */
144 	uint8_t			*end;			/* End of buffer. */
145 
146 	struct skb_shared_info	*shinfo;
147 
148 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
149 	void			*m;
150 	void(*m_free_func)(void *);
151 
152 	/* Force padding to CACHE_LINE_SIZE. */
153 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
154 };
155 
156 /* -------------------------------------------------------------------------- */
157 
158 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
159 void linuxkpi_kfree_skb(struct sk_buff *);
160 
161 /* -------------------------------------------------------------------------- */
162 
163 static inline struct sk_buff *
164 alloc_skb(size_t size, gfp_t gfp)
165 {
166 	struct sk_buff *skb;
167 
168 	skb = linuxkpi_alloc_skb(size, gfp);
169 	SKB_TRACE(skb);
170 	return (skb);
171 }
172 
173 static inline struct sk_buff *
174 dev_alloc_skb(size_t len)
175 {
176 	struct sk_buff *skb;
177 
178 	skb = alloc_skb(len, GFP_KERNEL);
179 	/* XXX TODO */
180 	SKB_TRACE(skb);
181 	return (skb);
182 }
183 
184 static inline void
185 kfree_skb(struct sk_buff *skb)
186 {
187 	SKB_TRACE(skb);
188 	linuxkpi_kfree_skb(skb);
189 }
190 
191 static inline void
192 dev_kfree_skb(struct sk_buff *skb)
193 {
194 	SKB_TRACE(skb);
195 	kfree_skb(skb);
196 }
197 
198 static inline void
199 dev_kfree_skb_any(struct sk_buff *skb)
200 {
201 	SKB_TRACE(skb);
202 	dev_kfree_skb(skb);
203 }
204 
205 static inline void
206 dev_kfree_skb_irq(struct sk_buff *skb)
207 {
208 	SKB_TRACE(skb);
209 	SKB_TODO();
210 }
211 
212 /* -------------------------------------------------------------------------- */
213 
214 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
215 #define	skb_list_walk_safe(_q, skb, tmp)				\
216 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
217 
218 /* Add headroom; cannot do once there is data in there. */
219 static inline void
220 skb_reserve(struct sk_buff *skb, size_t len)
221 {
222 	SKB_TRACE(skb);
223 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
224 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
225 	skb->data += len;
226 	skb->tail += len;
227 }
228 
229 /*
230  * Remove headroom; return new data pointer; basically make space at the
231  * front to copy data in (manually).
232  */
233 static inline void *
234 skb_push(struct sk_buff *skb, size_t len)
235 {
236 	SKB_TRACE(skb);
237 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
238 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
239 	skb->len  += len;
240 	skb->data -= len;
241 	return (skb->data);
242 }
243 
244 /*
245  * Length of the data on the skb (without any frags)???
246  */
247 static inline size_t
248 skb_headlen(struct sk_buff *skb)
249 {
250 
251 	SKB_TRACE(skb);
252 	return (skb->len - skb->data_len);
253 }
254 
255 
256 /* Return the end of data (tail pointer). */
257 static inline uint8_t *
258 skb_tail_pointer(struct sk_buff *skb)
259 {
260 
261 	SKB_TRACE(skb);
262 	return (skb->tail);
263 }
264 
265 /* Return number of bytes available at end of buffer. */
266 static inline unsigned int
267 skb_tailroom(struct sk_buff *skb)
268 {
269 
270 	SKB_TRACE(skb);
271 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
272 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
273 	return (skb->end - skb->tail);
274 }
275 
276 /* Return numer of bytes available at the beginning of buffer. */
277 static inline unsigned int
278 skb_headroom(struct sk_buff *skb)
279 {
280 	SKB_TRACE(skb);
281 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
282 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
283 	return (skb->data - skb->head);
284 }
285 
286 
287 /*
288  * Remove tailroom; return the old tail pointer; basically make space at
289  * the end to copy data in (manually).  See also skb_put_data() below.
290  */
291 static inline void *
292 skb_put(struct sk_buff *skb, size_t len)
293 {
294 	void *s;
295 
296 	SKB_TRACE(skb);
297 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
298 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
299 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
300 
301 	s = skb_tail_pointer(skb);
302 	skb->tail += len;
303 	skb->len += len;
304 #ifdef SKB_DEBUG
305 	if (debug_skb & DSKB_TRACEX)
306 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
307 	    __func__, skb,skb->len, skb->head, skb->data, skb->tail, skb->end,
308 	    s, len);
309 #endif
310 	return (s);
311 }
312 
313 /* skb_put() + copying data in. */
314 static inline void *
315 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
316 {
317 	void *s;
318 
319 	SKB_TRACE2(skb, buf);
320 	s = skb_put(skb, len);
321 	memcpy(s, buf, len);
322 	return (s);
323 }
324 
325 /* skb_put() + filling with zeros. */
326 static inline void *
327 skb_put_zero(struct sk_buff *skb, size_t len)
328 {
329 	void *s;
330 
331 	SKB_TRACE(skb);
332 	s = skb_put(skb, len);
333 	memset(s, '\0', len);
334 	return (s);
335 }
336 
337 /*
338  * Remove len bytes from beginning of data.
339  *
340  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
341  * we return the advanced data pointer so we don't have to keep a temp, correct?
342  */
343 static inline void *
344 skb_pull(struct sk_buff *skb, size_t len)
345 {
346 
347 	SKB_TRACE(skb);
348 #if 0	/* Apparently this doesn't barf... */
349 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
350 	    __func__, skb, skb->len, len, skb->data));
351 #endif
352 	if (skb->len < len)
353 		return (NULL);
354 	skb->len -= len;
355 	skb->data += len;
356 	return (skb->data);
357 }
358 
359 /* Reduce skb data to given length or do nothing if smaller already. */
360 static inline void
361 __skb_trim(struct sk_buff *skb, unsigned int len)
362 {
363 
364 	SKB_TRACE(skb);
365 	if (skb->len < len)
366 		return;
367 
368 	skb->len = len;
369 	skb->tail = skb->data + skb->len;
370 }
371 
372 static inline void
373 skb_trim(struct sk_buff *skb, unsigned int len)
374 {
375 
376 	return (__skb_trim(skb, len));
377 }
378 
379 static inline struct skb_shared_info *
380 skb_shinfo(struct sk_buff *skb)
381 {
382 
383 	SKB_TRACE(skb);
384 	return (skb->shinfo);
385 }
386 
387 static inline void
388 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
389     off_t offset, size_t size, unsigned int truesize)
390 {
391 	struct skb_shared_info *shinfo;
392 
393 	SKB_TRACE(skb);
394 #ifdef SKB_DEBUG
395 	if (debug_skb & DSKB_TRACEX)
396 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
397 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
398 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
399 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
400 	    size, truesize);
401 #endif
402 
403 	shinfo = skb_shinfo(skb);
404 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
405 	    "fragno %d too big\n", __func__, skb, fragno));
406 	shinfo->frags[fragno].page = page;
407 	shinfo->frags[fragno].offset = offset;
408 	shinfo->frags[fragno].size = size;
409 	shinfo->nr_frags = fragno + 1;
410         skb->len += size;
411         skb->truesize += truesize;
412 
413 	/* XXX TODO EXTEND truesize? */
414 }
415 
416 /* -------------------------------------------------------------------------- */
417 
418 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
419 #define	skb_queue_walk(_q, skb)						\
420 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
421 	    (skb) = (skb)->next)
422 
423 #define	skb_queue_walk_safe(_q, skb, tmp)				\
424 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
425 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
426 
427 static inline bool
428 skb_queue_empty(struct sk_buff_head *q)
429 {
430 
431 	SKB_TRACE(q);
432 	return (q->qlen == 0);
433 }
434 
435 static inline void
436 __skb_queue_head_init(struct sk_buff_head *q)
437 {
438 	SKB_TRACE(q);
439 	q->prev = q->next = (struct sk_buff *)q;
440 	q->qlen = 0;
441 }
442 
443 static inline void
444 skb_queue_head_init(struct sk_buff_head *q)
445 {
446 	SKB_TRACE(q);
447 	return (__skb_queue_head_init(q));
448 }
449 
450 static inline void
451 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
452     struct sk_buff_head *q)
453 {
454 
455 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
456 	new->prev = prev;
457 	new->next = next;
458 	next->prev = new;
459 	prev->next = new;
460 	q->qlen++;
461 }
462 
463 static inline void
464 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
465     struct sk_buff *new)
466 {
467 
468 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
469 	__skb_insert(new, skb, skb->next, q);
470 }
471 
472 static inline void
473 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
474     struct sk_buff *new)
475 {
476 
477 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
478 	__skb_insert(new, skb->prev, skb, q);
479 }
480 
481 static inline void
482 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
483 {
484 	struct sk_buff *s;
485 
486 	SKB_TRACE2(q, skb);
487 	q->qlen++;
488 	s = (struct sk_buff *)q;
489 	s->prev->next = skb;
490 	skb->prev = s->prev;
491 	skb->next = s;
492 	s->prev = skb;
493 }
494 
495 static inline void
496 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
497 {
498 	SKB_TRACE2(q, skb);
499 	return (__skb_queue_tail(q, skb));
500 }
501 
502 static inline struct sk_buff *
503 skb_peek_tail(struct sk_buff_head *q)
504 {
505 	struct sk_buff *skb;
506 
507 	skb = q->prev;
508 	SKB_TRACE2(q, skb);
509 	if (skb == (struct sk_buff *)q)
510 		return (NULL);
511 	return (skb);
512 }
513 
514 static inline void
515 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
516 {
517 	SKB_TRACE2(skb, head);
518 	struct sk_buff *p, *n;;
519 
520 	head->qlen--;
521 	p = skb->prev;
522 	n = skb->next;
523 	p->next = n;
524 	n->prev = p;
525 	skb->prev = skb->next = NULL;
526 }
527 
528 static inline void
529 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
530 {
531 	SKB_TRACE2(skb, head);
532 	return (__skb_unlink(skb, head));
533 }
534 
535 static inline struct sk_buff *
536 __skb_dequeue(struct sk_buff_head *q)
537 {
538 	struct sk_buff *skb;
539 
540 	SKB_TRACE(q);
541 	skb = q->next;
542 	if (skb == (struct sk_buff *)q)
543 		return (NULL);
544 	if (skb != NULL)
545 		__skb_unlink(skb, q);
546 	SKB_TRACE(skb);
547 	return (skb);
548 }
549 
550 static inline struct sk_buff *
551 skb_dequeue(struct sk_buff_head *q)
552 {
553 	SKB_TRACE(q);
554 	return (__skb_dequeue(q));
555 }
556 
557 static inline struct sk_buff *
558 skb_dequeue_tail(struct sk_buff_head *q)
559 {
560 	struct sk_buff *skb;
561 
562 	skb = skb_peek_tail(q);
563 	if (skb != NULL)
564 		__skb_unlink(skb, q);
565 
566 	SKB_TRACE2(q, skb);
567 	return (skb);
568 }
569 
570 static inline void
571 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
572 {
573 
574 	SKB_TRACE2(q, skb);
575 	__skb_queue_after(q, (struct sk_buff *)q, skb);
576 }
577 
578 static inline uint32_t
579 skb_queue_len(struct sk_buff_head *head)
580 {
581 	SKB_TRACE(head);
582 	return (head->qlen);
583 }
584 
585 static inline void
586 __skb_queue_purge(struct sk_buff_head *q)
587 {
588 	struct sk_buff *skb;
589 
590 	SKB_TRACE(q);
591         while ((skb = __skb_dequeue(q)) != NULL)
592 		kfree_skb(skb);
593 }
594 
595 static inline void
596 skb_queue_purge(struct sk_buff_head *q)
597 {
598 	SKB_TRACE(q);
599 	return (__skb_queue_purge(q));
600 }
601 
602 static inline struct sk_buff *
603 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
604 {
605 
606 	SKB_TRACE2(q, skb);
607 	/* XXX what is the q argument good for? */
608 	return (skb->prev);
609 }
610 
611 /* -------------------------------------------------------------------------- */
612 
613 static inline struct sk_buff *
614 skb_copy(struct sk_buff *skb, gfp_t gfp)
615 {
616 	SKB_TRACE(skb);
617 	SKB_TODO();
618 	return (NULL);
619 }
620 
621 static inline void
622 consume_skb(struct sk_buff *skb)
623 {
624 	SKB_TRACE(skb);
625 	SKB_TODO();
626 }
627 
628 static inline uint16_t
629 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
630 {
631 	SKB_TRACE(skb);
632 	SKB_TODO();
633 	return (0xffff);
634 }
635 
636 static inline int
637 skb_checksum_start_offset(struct sk_buff *skb)
638 {
639 	SKB_TRACE(skb);
640 	SKB_TODO();
641 	return (-1);
642 }
643 
644 static inline dma_addr_t
645 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
646     size_t fragsz, enum dma_data_direction dir)
647 {
648 	SKB_TRACE2(frag, dev);
649 	SKB_TODO();
650 	return (-1);
651 }
652 
653 static inline size_t
654 skb_frag_size(const skb_frag_t *frag)
655 {
656 	SKB_TRACE(frag);
657 	SKB_TODO();
658 	return (-1);
659 }
660 
661 static inline bool
662 skb_is_nonlinear(struct sk_buff *skb)
663 {
664 	SKB_TRACE(skb);
665 	return ((skb->data_len > 0) ? true : false);
666 }
667 
668 #define	skb_walk_frags(_skb, _frag)					\
669 	for ((_frag) = (_skb); false; (_frag)++)
670 
671 static inline void
672 skb_checksum_help(struct sk_buff *skb)
673 {
674 	SKB_TRACE(skb);
675 	SKB_TODO();
676 }
677 
678 static inline bool
679 skb_ensure_writable(struct sk_buff *skb, size_t off)
680 {
681 	SKB_TRACE(skb);
682 	SKB_TODO();
683 	return (false);
684 }
685 
686 static inline void *
687 skb_frag_address(const skb_frag_t *frag)
688 {
689 	SKB_TRACE(frag);
690 	SKB_TODO();
691 	return (NULL);
692 }
693 
694 static inline struct sk_buff *
695 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
696 {
697 	SKB_TRACE(skb);
698 	SKB_TODO();
699 	return (NULL);
700 }
701 
702 static inline bool
703 skb_is_gso(struct sk_buff *skb)
704 {
705 	SKB_TRACE(skb);
706 	SKB_TODO();
707 	return (false);
708 }
709 
710 static inline void
711 skb_mark_not_on_list(struct sk_buff *skb)
712 {
713 	SKB_TRACE(skb);
714 	SKB_TODO();
715 }
716 
717 static inline void
718 skb_queue_splice_init(struct sk_buff_head *qa, struct sk_buff_head *qb)
719 {
720 	SKB_TRACE2(qa, qb);
721 	SKB_TODO();
722 }
723 
724 static inline void
725 skb_reset_transport_header(struct sk_buff *skb)
726 {
727 
728 	SKB_TRACE(skb);
729 	skb->l4hdroff = skb->data - skb->head;
730 }
731 
732 static inline uint8_t *
733 skb_transport_header(struct sk_buff *skb)
734 {
735 
736 	SKB_TRACE(skb);
737         return (skb->head + skb->l4hdroff);
738 }
739 
740 static inline uint8_t *
741 skb_network_header(struct sk_buff *skb)
742 {
743 
744 	SKB_TRACE(skb);
745         return (skb->head + skb->l3hdroff);
746 }
747 
748 static inline int
749 __skb_linearize(struct sk_buff *skb)
750 {
751 	SKB_TRACE(skb);
752 	SKB_TODO();
753 	return (ENXIO);
754 }
755 
756 static inline bool
757 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
758 {
759 	SKB_TRACE(skb);
760 	SKB_TODO();
761 	return (false);
762 }
763 
764 /* Not really seen this one but need it as symmetric accessor function. */
765 static inline void
766 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
767 {
768 
769 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
770 	skb->qmap = qmap;
771 }
772 
773 static inline uint16_t
774 skb_get_queue_mapping(struct sk_buff *skb)
775 {
776 
777 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
778 	return (skb->qmap);
779 }
780 
781 static inline bool
782 skb_header_cloned(struct sk_buff *skb)
783 {
784 	SKB_TRACE(skb);
785 	SKB_TODO();
786 	return (false);
787 }
788 
789 static inline uint8_t *
790 skb_mac_header(struct sk_buff *skb)
791 {
792 	SKB_TRACE(skb);
793 	SKB_TODO();
794 	return (NULL);
795 }
796 
797 static inline void
798 skb_orphan(struct sk_buff *skb)
799 {
800 	SKB_TRACE(skb);
801 	SKB_TODO();
802 }
803 
804 static inline void
805 skb_reset_mac_header(struct sk_buff *skb)
806 {
807 	SKB_TRACE(skb);
808 	SKB_TODO();
809 }
810 
811 static inline struct sk_buff *
812 skb_peek(struct sk_buff_head *q)
813 {
814 	SKB_TRACE(q);
815 	SKB_TODO();
816 	return (NULL);
817 }
818 
819 static inline __sum16
820 csum_unfold(__sum16 sum)
821 {
822 	SKB_TODO();
823 	return (sum);
824 }
825 
826 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
827