xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision 2d5d2a986ce1a93b8567dbdf3f80bc2b545d6998)
1 /*-
2  * Copyright (c) 2020-2022 The FreeBSD Foundation
3  * Copyright (c) 2021-2022 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/page.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdev_features.h>
44 #include <linux/list.h>
45 #include <linux/gfp.h>
46 #include <linux/compiler.h>
47 #include <linux/spinlock.h>
48 
49 /* #define	SKB_DEBUG */
50 #ifdef SKB_DEBUG
51 #define	DSKB_TODO	0x01
52 #define	DSKB_IMPROVE	0x02
53 #define	DSKB_TRACE	0x10
54 #define	DSKB_TRACEX	0x20
55 extern int linuxkpi_debug_skb;
56 
57 #define	SKB_TODO()							\
58     if (linuxkpi_debug_skb & DSKB_TODO)					\
59 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
60 #define	SKB_IMPROVE(...)						\
61     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
62 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
63 #define	SKB_TRACE(_s)							\
64     if (linuxkpi_debug_skb & DSKB_TRACE)				\
65 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
66 #define	SKB_TRACE2(_s, _p)						\
67     if (linuxkpi_debug_skb & DSKB_TRACE)				\
68 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
69 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
70    if (linuxkpi_debug_skb & DSKB_TRACE)					\
71 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
72 	    __VA_ARGS__)
73 #else
74 #define	SKB_TODO()		do { } while(0)
75 #define	SKB_IMPROVE(...)	do { } while(0)
76 #define	SKB_TRACE(_s)		do { } while(0)
77 #define	SKB_TRACE2(_s, _p)	do { } while(0)
78 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
79 #endif
80 
81 enum sk_buff_pkt_type {
82 	PACKET_BROADCAST,
83 	PACKET_MULTICAST,
84 	PACKET_OTHERHOST,
85 };
86 
87 #define	NET_SKB_PAD		CACHE_LINE_SIZE		/* ? */
88 
89 struct sk_buff_head {
90 		/* XXX TODO */
91 	struct sk_buff		*next;
92 	struct sk_buff		*prev;
93 	size_t			qlen;
94 	spinlock_t		lock;
95 };
96 
97 enum sk_checksum_flags {
98 	CHECKSUM_NONE			= 0x00,
99 	CHECKSUM_UNNECESSARY		= 0x01,
100 	CHECKSUM_PARTIAL		= 0x02,
101 	CHECKSUM_COMPLETE		= 0x04,
102 };
103 
104 struct skb_frag {
105 		/* XXX TODO */
106 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
107 	off_t			offset;
108 	size_t			size;
109 };
110 typedef	struct skb_frag	skb_frag_t;
111 
112 enum skb_shared_info_gso_type {
113 	SKB_GSO_TCPV4,
114 	SKB_GSO_TCPV6,
115 };
116 
117 struct skb_shared_info {
118 	enum skb_shared_info_gso_type	gso_type;
119 	uint16_t			gso_size;
120 	uint16_t			nr_frags;
121 	struct sk_buff			*frag_list;
122 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
123 };
124 
125 struct sk_buff {
126 		/* XXX TODO */
127 	/* struct sk_buff_head */
128 	struct sk_buff		*next;
129 	struct sk_buff		*prev;
130 	int			list;		/* XXX TYPE */
131 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
132 	uint32_t		len;		/* ? */
133 	uint32_t		data_len;	/* ? If we have frags? */
134 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
135 	uint16_t		mac_len;	/* Link-layer header length. */
136 	__sum16			csum;
137 	uint16_t		l3hdroff;	/* network header offset from *head */
138 	uint16_t		l4hdroff;	/* transport header offset from *head */
139 	uint32_t		priority;
140 	uint16_t		qmap;		/* queue mapping */
141 	uint16_t		_spareu16_0;
142 	enum sk_buff_pkt_type	pkt_type;
143 
144 	/* "Scratch" area for layers to store metadata. */
145 	/* ??? I see sizeof() operations so probably an array. */
146 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
147 
148 	struct net_device	*dev;
149 	void			*sk;		/* XXX net/sock.h? */
150 
151 	int		csum_offset, csum_start, ip_summed, protocol;
152 
153 	uint8_t			*head;			/* Head of buffer. */
154 	uint8_t			*data;			/* Head of data. */
155 	uint8_t			*tail;			/* End of data. */
156 	uint8_t			*end;			/* End of buffer. */
157 
158 	struct skb_shared_info	*shinfo;
159 
160 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
161 	void			*m;
162 	void(*m_free_func)(void *);
163 
164 	/* Force padding to CACHE_LINE_SIZE. */
165 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
166 };
167 
168 /* -------------------------------------------------------------------------- */
169 
170 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
171 void linuxkpi_kfree_skb(struct sk_buff *);
172 
173 /* -------------------------------------------------------------------------- */
174 
175 static inline struct sk_buff *
176 alloc_skb(size_t size, gfp_t gfp)
177 {
178 	struct sk_buff *skb;
179 
180 	skb = linuxkpi_alloc_skb(size, gfp);
181 	SKB_TRACE(skb);
182 	return (skb);
183 }
184 
185 static inline struct sk_buff *
186 __dev_alloc_skb(size_t len, gfp_t gfp)
187 {
188 	struct sk_buff *skb;
189 
190 	skb = alloc_skb(len, gfp);
191 	SKB_IMPROVE();
192 	SKB_TRACE(skb);
193 	return (skb);
194 }
195 
196 static inline struct sk_buff *
197 dev_alloc_skb(size_t len)
198 {
199 	struct sk_buff *skb;
200 
201 	skb = alloc_skb(len, GFP_NOWAIT);
202 	SKB_IMPROVE();
203 	SKB_TRACE(skb);
204 	return (skb);
205 }
206 
207 static inline void
208 kfree_skb(struct sk_buff *skb)
209 {
210 	SKB_TRACE(skb);
211 	linuxkpi_kfree_skb(skb);
212 }
213 
214 static inline void
215 dev_kfree_skb(struct sk_buff *skb)
216 {
217 	SKB_TRACE(skb);
218 	kfree_skb(skb);
219 }
220 
221 static inline void
222 dev_kfree_skb_any(struct sk_buff *skb)
223 {
224 	SKB_TRACE(skb);
225 	dev_kfree_skb(skb);
226 }
227 
228 static inline void
229 dev_kfree_skb_irq(struct sk_buff *skb)
230 {
231 	SKB_TRACE(skb);
232 	SKB_TODO();
233 }
234 
235 /* -------------------------------------------------------------------------- */
236 
237 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
238 #define	skb_list_walk_safe(_q, skb, tmp)				\
239 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
240 
241 /* Add headroom; cannot do once there is data in there. */
242 static inline void
243 skb_reserve(struct sk_buff *skb, size_t len)
244 {
245 	SKB_TRACE(skb);
246 #if 0
247 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
248 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
249 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
250 #else
251 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
252 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
253 	    skb->head, skb->data, skb->tail, skb->len));
254 #endif
255 	skb->data += len;
256 	skb->tail += len;
257 }
258 
259 /*
260  * Remove headroom; return new data pointer; basically make space at the
261  * front to copy data in (manually).
262  */
263 static inline void *
264 skb_push(struct sk_buff *skb, size_t len)
265 {
266 	SKB_TRACE(skb);
267 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
268 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
269 	skb->len  += len;
270 	skb->data -= len;
271 	return (skb->data);
272 }
273 
274 /*
275  * Length of the data on the skb (without any frags)???
276  */
277 static inline size_t
278 skb_headlen(struct sk_buff *skb)
279 {
280 
281 	SKB_TRACE(skb);
282 	return (skb->len - skb->data_len);
283 }
284 
285 
286 /* Return the end of data (tail pointer). */
287 static inline uint8_t *
288 skb_tail_pointer(struct sk_buff *skb)
289 {
290 
291 	SKB_TRACE(skb);
292 	return (skb->tail);
293 }
294 
295 /* Return number of bytes available at end of buffer. */
296 static inline unsigned int
297 skb_tailroom(struct sk_buff *skb)
298 {
299 
300 	SKB_TRACE(skb);
301 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
302 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
303 	return (skb->end - skb->tail);
304 }
305 
306 /* Return numer of bytes available at the beginning of buffer. */
307 static inline unsigned int
308 skb_headroom(struct sk_buff *skb)
309 {
310 	SKB_TRACE(skb);
311 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
312 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
313 	return (skb->data - skb->head);
314 }
315 
316 
317 /*
318  * Remove tailroom; return the old tail pointer; basically make space at
319  * the end to copy data in (manually).  See also skb_put_data() below.
320  */
321 static inline void *
322 skb_put(struct sk_buff *skb, size_t len)
323 {
324 	void *s;
325 
326 	SKB_TRACE(skb);
327 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
328 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
329 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
330 
331 	s = skb_tail_pointer(skb);
332 	skb->tail += len;
333 	skb->len += len;
334 #ifdef SKB_DEBUG
335 	if (linuxkpi_debug_skb & DSKB_TRACEX)
336 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
337 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
338 	    s, len);
339 #endif
340 	return (s);
341 }
342 
343 /* skb_put() + copying data in. */
344 static inline void *
345 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
346 {
347 	void *s;
348 
349 	SKB_TRACE2(skb, buf);
350 	s = skb_put(skb, len);
351 	memcpy(s, buf, len);
352 	return (s);
353 }
354 
355 /* skb_put() + filling with zeros. */
356 static inline void *
357 skb_put_zero(struct sk_buff *skb, size_t len)
358 {
359 	void *s;
360 
361 	SKB_TRACE(skb);
362 	s = skb_put(skb, len);
363 	memset(s, '\0', len);
364 	return (s);
365 }
366 
367 /*
368  * Remove len bytes from beginning of data.
369  *
370  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
371  * we return the advanced data pointer so we don't have to keep a temp, correct?
372  */
373 static inline void *
374 skb_pull(struct sk_buff *skb, size_t len)
375 {
376 
377 	SKB_TRACE(skb);
378 #if 0	/* Apparently this doesn't barf... */
379 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
380 	    __func__, skb, skb->len, len, skb->data));
381 #endif
382 	if (skb->len < len)
383 		return (NULL);
384 	skb->len -= len;
385 	skb->data += len;
386 	return (skb->data);
387 }
388 
389 /* Reduce skb data to given length or do nothing if smaller already. */
390 static inline void
391 __skb_trim(struct sk_buff *skb, unsigned int len)
392 {
393 
394 	SKB_TRACE(skb);
395 	if (skb->len < len)
396 		return;
397 
398 	skb->len = len;
399 	skb->tail = skb->data + skb->len;
400 }
401 
402 static inline void
403 skb_trim(struct sk_buff *skb, unsigned int len)
404 {
405 
406 	return (__skb_trim(skb, len));
407 }
408 
409 static inline struct skb_shared_info *
410 skb_shinfo(struct sk_buff *skb)
411 {
412 
413 	SKB_TRACE(skb);
414 	return (skb->shinfo);
415 }
416 
417 static inline void
418 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
419     off_t offset, size_t size, unsigned int truesize)
420 {
421 	struct skb_shared_info *shinfo;
422 
423 	SKB_TRACE(skb);
424 #ifdef SKB_DEBUG
425 	if (linuxkpi_debug_skb & DSKB_TRACEX)
426 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
427 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
428 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
429 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
430 	    size, truesize);
431 #endif
432 
433 	shinfo = skb_shinfo(skb);
434 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
435 	    "fragno %d too big\n", __func__, skb, fragno));
436 	shinfo->frags[fragno].page = page;
437 	shinfo->frags[fragno].offset = offset;
438 	shinfo->frags[fragno].size = size;
439 	shinfo->nr_frags = fragno + 1;
440         skb->len += size;
441         skb->truesize += truesize;
442 
443 	/* XXX TODO EXTEND truesize? */
444 }
445 
446 /* -------------------------------------------------------------------------- */
447 
448 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
449 #define	skb_queue_walk(_q, skb)						\
450 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
451 	    (skb) = (skb)->next)
452 
453 #define	skb_queue_walk_safe(_q, skb, tmp)				\
454 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
455 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
456 
457 static inline bool
458 skb_queue_empty(struct sk_buff_head *q)
459 {
460 
461 	SKB_TRACE(q);
462 	return (q->qlen == 0);
463 }
464 
465 static inline void
466 __skb_queue_head_init(struct sk_buff_head *q)
467 {
468 	SKB_TRACE(q);
469 	q->prev = q->next = (struct sk_buff *)q;
470 	q->qlen = 0;
471 }
472 
473 static inline void
474 skb_queue_head_init(struct sk_buff_head *q)
475 {
476 	SKB_TRACE(q);
477 	return (__skb_queue_head_init(q));
478 }
479 
480 static inline void
481 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
482     struct sk_buff_head *q)
483 {
484 
485 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
486 	new->prev = prev;
487 	new->next = next;
488 	next->prev = new;
489 	prev->next = new;
490 	q->qlen++;
491 }
492 
493 static inline void
494 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
495     struct sk_buff *new)
496 {
497 
498 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
499 	__skb_insert(new, skb, skb->next, q);
500 }
501 
502 static inline void
503 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
504     struct sk_buff *new)
505 {
506 
507 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
508 	__skb_insert(new, skb->prev, skb, q);
509 }
510 
511 static inline void
512 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
513 {
514 	struct sk_buff *s;
515 
516 	SKB_TRACE2(q, skb);
517 	q->qlen++;
518 	s = (struct sk_buff *)q;
519 	s->prev->next = skb;
520 	skb->prev = s->prev;
521 	skb->next = s;
522 	s->prev = skb;
523 }
524 
525 static inline void
526 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
527 {
528 	SKB_TRACE2(q, skb);
529 	return (__skb_queue_tail(q, skb));
530 }
531 
532 static inline struct sk_buff *
533 skb_peek_tail(struct sk_buff_head *q)
534 {
535 	struct sk_buff *skb;
536 
537 	skb = q->prev;
538 	SKB_TRACE2(q, skb);
539 	if (skb == (struct sk_buff *)q)
540 		return (NULL);
541 	return (skb);
542 }
543 
544 static inline void
545 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
546 {
547 	SKB_TRACE2(skb, head);
548 	struct sk_buff *p, *n;;
549 
550 	head->qlen--;
551 	p = skb->prev;
552 	n = skb->next;
553 	p->next = n;
554 	n->prev = p;
555 	skb->prev = skb->next = NULL;
556 }
557 
558 static inline void
559 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
560 {
561 	SKB_TRACE2(skb, head);
562 	return (__skb_unlink(skb, head));
563 }
564 
565 static inline struct sk_buff *
566 __skb_dequeue(struct sk_buff_head *q)
567 {
568 	struct sk_buff *skb;
569 
570 	SKB_TRACE(q);
571 	skb = q->next;
572 	if (skb == (struct sk_buff *)q)
573 		return (NULL);
574 	if (skb != NULL)
575 		__skb_unlink(skb, q);
576 	SKB_TRACE(skb);
577 	return (skb);
578 }
579 
580 static inline struct sk_buff *
581 skb_dequeue(struct sk_buff_head *q)
582 {
583 	SKB_TRACE(q);
584 	return (__skb_dequeue(q));
585 }
586 
587 static inline struct sk_buff *
588 skb_dequeue_tail(struct sk_buff_head *q)
589 {
590 	struct sk_buff *skb;
591 
592 	skb = skb_peek_tail(q);
593 	if (skb != NULL)
594 		__skb_unlink(skb, q);
595 
596 	SKB_TRACE2(q, skb);
597 	return (skb);
598 }
599 
600 static inline void
601 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
602 {
603 
604 	SKB_TRACE2(q, skb);
605 	__skb_queue_after(q, (struct sk_buff *)q, skb);
606 }
607 
608 static inline void
609 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
610 {
611 
612 	SKB_TRACE2(q, skb);
613 	__skb_queue_after(q, (struct sk_buff *)q, skb);
614 }
615 
616 static inline uint32_t
617 skb_queue_len(struct sk_buff_head *head)
618 {
619 
620 	SKB_TRACE(head);
621 	return (head->qlen);
622 }
623 
624 static inline uint32_t
625 skb_queue_len_lockless(const struct sk_buff_head *head)
626 {
627 
628 	SKB_TRACE(head);
629 	return (READ_ONCE(head->qlen));
630 }
631 
632 static inline void
633 __skb_queue_purge(struct sk_buff_head *q)
634 {
635 	struct sk_buff *skb;
636 
637 	SKB_TRACE(q);
638         while ((skb = __skb_dequeue(q)) != NULL)
639 		kfree_skb(skb);
640 }
641 
642 static inline void
643 skb_queue_purge(struct sk_buff_head *q)
644 {
645 	SKB_TRACE(q);
646 	return (__skb_queue_purge(q));
647 }
648 
649 static inline struct sk_buff *
650 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
651 {
652 
653 	SKB_TRACE2(q, skb);
654 	/* XXX what is the q argument good for? */
655 	return (skb->prev);
656 }
657 
658 /* -------------------------------------------------------------------------- */
659 
660 static inline struct sk_buff *
661 skb_copy(struct sk_buff *skb, gfp_t gfp)
662 {
663 	SKB_TRACE(skb);
664 	SKB_TODO();
665 	return (NULL);
666 }
667 
668 static inline void
669 consume_skb(struct sk_buff *skb)
670 {
671 	SKB_TRACE(skb);
672 	SKB_TODO();
673 }
674 
675 static inline uint16_t
676 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
677 {
678 	SKB_TRACE(skb);
679 	SKB_TODO();
680 	return (0xffff);
681 }
682 
683 static inline int
684 skb_checksum_start_offset(struct sk_buff *skb)
685 {
686 	SKB_TRACE(skb);
687 	SKB_TODO();
688 	return (-1);
689 }
690 
691 static inline dma_addr_t
692 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
693     size_t fragsz, enum dma_data_direction dir)
694 {
695 	SKB_TRACE2(frag, dev);
696 	SKB_TODO();
697 	return (-1);
698 }
699 
700 static inline size_t
701 skb_frag_size(const skb_frag_t *frag)
702 {
703 	SKB_TRACE(frag);
704 	SKB_TODO();
705 	return (-1);
706 }
707 
708 static inline bool
709 skb_is_nonlinear(struct sk_buff *skb)
710 {
711 	SKB_TRACE(skb);
712 	return ((skb->data_len > 0) ? true : false);
713 }
714 
715 #define	skb_walk_frags(_skb, _frag)					\
716 	for ((_frag) = (_skb); false; (_frag)++)
717 
718 static inline void
719 skb_checksum_help(struct sk_buff *skb)
720 {
721 	SKB_TRACE(skb);
722 	SKB_TODO();
723 }
724 
725 static inline bool
726 skb_ensure_writable(struct sk_buff *skb, size_t off)
727 {
728 	SKB_TRACE(skb);
729 	SKB_TODO();
730 	return (false);
731 }
732 
733 static inline void *
734 skb_frag_address(const skb_frag_t *frag)
735 {
736 	SKB_TRACE(frag);
737 	SKB_TODO();
738 	return (NULL);
739 }
740 
741 static inline struct sk_buff *
742 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
743 {
744 	SKB_TRACE(skb);
745 	SKB_TODO();
746 	return (NULL);
747 }
748 
749 static inline bool
750 skb_is_gso(struct sk_buff *skb)
751 {
752 	SKB_TRACE(skb);
753 	SKB_TODO();
754 	return (false);
755 }
756 
757 static inline void
758 skb_mark_not_on_list(struct sk_buff *skb)
759 {
760 	SKB_TRACE(skb);
761 	SKB_TODO();
762 }
763 
764 static inline void
765 skb_queue_splice_init(struct sk_buff_head *q, struct sk_buff_head *h)
766 {
767 	struct sk_buff *b, *e;
768 
769 	SKB_TRACE2(q, h);
770 
771 	if (skb_queue_empty(q))
772 		return;
773 
774 	/* XXX do we need a barrier around this? */
775 	b = q->next;
776 	e = q->prev;
777 
778 	b->prev = (struct sk_buff *)h;
779 	h->next = b;
780 	e->next = h->next;
781 	h->next->prev = e;
782 
783 	h->qlen += q->qlen;
784 	__skb_queue_head_init(q);
785 }
786 
787 static inline void
788 skb_reset_transport_header(struct sk_buff *skb)
789 {
790 
791 	SKB_TRACE(skb);
792 	skb->l4hdroff = skb->data - skb->head;
793 }
794 
795 static inline uint8_t *
796 skb_transport_header(struct sk_buff *skb)
797 {
798 
799 	SKB_TRACE(skb);
800         return (skb->head + skb->l4hdroff);
801 }
802 
803 static inline uint8_t *
804 skb_network_header(struct sk_buff *skb)
805 {
806 
807 	SKB_TRACE(skb);
808         return (skb->head + skb->l3hdroff);
809 }
810 
811 static inline int
812 __skb_linearize(struct sk_buff *skb)
813 {
814 	SKB_TRACE(skb);
815 	SKB_TODO();
816 	return (ENXIO);
817 }
818 
819 static inline int
820 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
821 {
822 	SKB_TRACE(skb);
823 	SKB_TODO();
824 	return (-ENXIO);
825 }
826 
827 /* Not really seen this one but need it as symmetric accessor function. */
828 static inline void
829 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
830 {
831 
832 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
833 	skb->qmap = qmap;
834 }
835 
836 static inline uint16_t
837 skb_get_queue_mapping(struct sk_buff *skb)
838 {
839 
840 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
841 	return (skb->qmap);
842 }
843 
844 static inline bool
845 skb_header_cloned(struct sk_buff *skb)
846 {
847 	SKB_TRACE(skb);
848 	SKB_TODO();
849 	return (false);
850 }
851 
852 static inline uint8_t *
853 skb_mac_header(struct sk_buff *skb)
854 {
855 	SKB_TRACE(skb);
856 	SKB_TODO();
857 	return (NULL);
858 }
859 
860 static inline void
861 skb_orphan(struct sk_buff *skb)
862 {
863 	SKB_TRACE(skb);
864 	SKB_TODO();
865 }
866 
867 static inline void
868 skb_reset_mac_header(struct sk_buff *skb)
869 {
870 	SKB_TRACE(skb);
871 	SKB_TODO();
872 }
873 
874 static inline struct sk_buff *
875 skb_peek(struct sk_buff_head *q)
876 {
877 	SKB_TRACE(q);
878 	SKB_TODO();
879 	return (NULL);
880 }
881 
882 static inline __sum16
883 csum_unfold(__sum16 sum)
884 {
885 	SKB_TODO();
886 	return (sum);
887 }
888 
889 static __inline void
890 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
891 {
892 	SKB_TODO();
893 }
894 
895 static inline void
896 skb_reset_tail_pointer(struct sk_buff *skb)
897 {
898 
899 	SKB_TRACE(skb);
900 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
901 	SKB_TRACE(skb);
902 }
903 
904 static inline struct sk_buff *
905 skb_get(struct sk_buff *skb)
906 {
907 
908 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
909 	return (skb);
910 }
911 
912 static inline struct sk_buff *
913 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
914 {
915 
916 	SKB_TODO();
917 	return (NULL);
918 }
919 
920 static inline void
921 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
922 {
923 
924 	SKB_TRACE(skb);
925 	/* Let us just hope the destination has len space ... */
926 	memcpy(dst, skb->data, len);
927 }
928 
929 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
930