xref: /freebsd/sys/compat/linuxkpi/common/include/linux/skbuff.h (revision 952643ea452655a8e80d1b5e1cc2cae35cb73870)
1 /*-
2  * Copyright (c) 2020-2022 The FreeBSD Foundation
3  * Copyright (c) 2021-2022 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/kernel.h>
42 #include <linux/page.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/netdev_features.h>
45 #include <linux/list.h>
46 #include <linux/gfp.h>
47 #include <linux/compiler.h>
48 #include <linux/spinlock.h>
49 
50 /* #define	SKB_DEBUG */
51 #ifdef SKB_DEBUG
52 #define	DSKB_TODO	0x01
53 #define	DSKB_IMPROVE	0x02
54 #define	DSKB_TRACE	0x10
55 #define	DSKB_TRACEX	0x20
56 extern int linuxkpi_debug_skb;
57 
58 #define	SKB_TODO()							\
59     if (linuxkpi_debug_skb & DSKB_TODO)					\
60 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
61 #define	SKB_IMPROVE(...)						\
62     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
63 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
64 #define	SKB_TRACE(_s)							\
65     if (linuxkpi_debug_skb & DSKB_TRACE)				\
66 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
67 #define	SKB_TRACE2(_s, _p)						\
68     if (linuxkpi_debug_skb & DSKB_TRACE)				\
69 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
70 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
71    if (linuxkpi_debug_skb & DSKB_TRACE)					\
72 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
73 	    __VA_ARGS__)
74 #else
75 #define	SKB_TODO()		do { } while(0)
76 #define	SKB_IMPROVE(...)	do { } while(0)
77 #define	SKB_TRACE(_s)		do { } while(0)
78 #define	SKB_TRACE2(_s, _p)	do { } while(0)
79 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
80 #endif
81 
82 enum sk_buff_pkt_type {
83 	PACKET_BROADCAST,
84 	PACKET_MULTICAST,
85 	PACKET_OTHERHOST,
86 };
87 
88 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
89 
90 struct sk_buff_head {
91 		/* XXX TODO */
92 	struct sk_buff		*next;
93 	struct sk_buff		*prev;
94 	size_t			qlen;
95 	spinlock_t		lock;
96 };
97 
98 enum sk_checksum_flags {
99 	CHECKSUM_NONE			= 0x00,
100 	CHECKSUM_UNNECESSARY		= 0x01,
101 	CHECKSUM_PARTIAL		= 0x02,
102 	CHECKSUM_COMPLETE		= 0x04,
103 };
104 
105 struct skb_frag {
106 		/* XXX TODO */
107 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
108 	off_t			offset;
109 	size_t			size;
110 };
111 typedef	struct skb_frag	skb_frag_t;
112 
113 enum skb_shared_info_gso_type {
114 	SKB_GSO_TCPV4,
115 	SKB_GSO_TCPV6,
116 };
117 
118 struct skb_shared_info {
119 	enum skb_shared_info_gso_type	gso_type;
120 	uint16_t			gso_size;
121 	uint16_t			nr_frags;
122 	struct sk_buff			*frag_list;
123 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
124 };
125 
126 struct sk_buff {
127 		/* XXX TODO */
128 	/* struct sk_buff_head */
129 	struct sk_buff		*next;
130 	struct sk_buff		*prev;
131 	int			list;		/* XXX TYPE */
132 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
133 	uint32_t		len;		/* ? */
134 	uint32_t		data_len;	/* ? If we have frags? */
135 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
136 	uint16_t		mac_len;	/* Link-layer header length. */
137 	__sum16			csum;
138 	uint16_t		l3hdroff;	/* network header offset from *head */
139 	uint16_t		l4hdroff;	/* transport header offset from *head */
140 	uint32_t		priority;
141 	uint16_t		qmap;		/* queue mapping */
142 	uint16_t		_spareu16_0;
143 	enum sk_buff_pkt_type	pkt_type;
144 
145 	/* "Scratch" area for layers to store metadata. */
146 	/* ??? I see sizeof() operations so probably an array. */
147 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
148 
149 	struct net_device	*dev;
150 	void			*sk;		/* XXX net/sock.h? */
151 
152 	int		csum_offset, csum_start, ip_summed, protocol;
153 
154 	uint8_t			*head;			/* Head of buffer. */
155 	uint8_t			*data;			/* Head of data. */
156 	uint8_t			*tail;			/* End of data. */
157 	uint8_t			*end;			/* End of buffer. */
158 
159 	struct skb_shared_info	*shinfo;
160 
161 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
162 	void			*m;
163 	void(*m_free_func)(void *);
164 
165 	/* Force padding to CACHE_LINE_SIZE. */
166 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
167 };
168 
169 /* -------------------------------------------------------------------------- */
170 
171 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
172 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
173 void linuxkpi_kfree_skb(struct sk_buff *);
174 
175 /* -------------------------------------------------------------------------- */
176 
177 static inline struct sk_buff *
178 alloc_skb(size_t size, gfp_t gfp)
179 {
180 	struct sk_buff *skb;
181 
182 	skb = linuxkpi_alloc_skb(size, gfp);
183 	SKB_TRACE(skb);
184 	return (skb);
185 }
186 
187 static inline struct sk_buff *
188 __dev_alloc_skb(size_t len, gfp_t gfp)
189 {
190 	struct sk_buff *skb;
191 
192 	skb = linuxkpi_dev_alloc_skb(len, gfp);
193 	SKB_IMPROVE();
194 	SKB_TRACE(skb);
195 	return (skb);
196 }
197 
198 static inline struct sk_buff *
199 dev_alloc_skb(size_t len)
200 {
201 	struct sk_buff *skb;
202 
203 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
204 	SKB_IMPROVE();
205 	SKB_TRACE(skb);
206 	return (skb);
207 }
208 
209 static inline void
210 kfree_skb(struct sk_buff *skb)
211 {
212 	SKB_TRACE(skb);
213 	linuxkpi_kfree_skb(skb);
214 }
215 
216 static inline void
217 dev_kfree_skb(struct sk_buff *skb)
218 {
219 	SKB_TRACE(skb);
220 	kfree_skb(skb);
221 }
222 
223 static inline void
224 dev_kfree_skb_any(struct sk_buff *skb)
225 {
226 	SKB_TRACE(skb);
227 	dev_kfree_skb(skb);
228 }
229 
230 static inline void
231 dev_kfree_skb_irq(struct sk_buff *skb)
232 {
233 	SKB_TRACE(skb);
234 	SKB_IMPROVE("Do we have to defer this?");
235 	dev_kfree_skb(skb);
236 }
237 
238 /* -------------------------------------------------------------------------- */
239 
240 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
241 #define	skb_list_walk_safe(_q, skb, tmp)				\
242 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
243 
244 /* Add headroom; cannot do once there is data in there. */
245 static inline void
246 skb_reserve(struct sk_buff *skb, size_t len)
247 {
248 	SKB_TRACE(skb);
249 #if 0
250 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
251 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
252 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
253 #else
254 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
255 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
256 	    skb->head, skb->data, skb->tail, skb->len));
257 #endif
258 	skb->data += len;
259 	skb->tail += len;
260 }
261 
262 /*
263  * Remove headroom; return new data pointer; basically make space at the
264  * front to copy data in (manually).
265  */
266 static inline void *
267 skb_push(struct sk_buff *skb, size_t len)
268 {
269 	SKB_TRACE(skb);
270 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
271 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
272 	skb->len  += len;
273 	skb->data -= len;
274 	return (skb->data);
275 }
276 
277 /*
278  * Length of the data on the skb (without any frags)???
279  */
280 static inline size_t
281 skb_headlen(struct sk_buff *skb)
282 {
283 
284 	SKB_TRACE(skb);
285 	return (skb->len - skb->data_len);
286 }
287 
288 
289 /* Return the end of data (tail pointer). */
290 static inline uint8_t *
291 skb_tail_pointer(struct sk_buff *skb)
292 {
293 
294 	SKB_TRACE(skb);
295 	return (skb->tail);
296 }
297 
298 /* Return number of bytes available at end of buffer. */
299 static inline unsigned int
300 skb_tailroom(struct sk_buff *skb)
301 {
302 
303 	SKB_TRACE(skb);
304 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
305 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
306 	return (skb->end - skb->tail);
307 }
308 
309 /* Return numer of bytes available at the beginning of buffer. */
310 static inline unsigned int
311 skb_headroom(struct sk_buff *skb)
312 {
313 	SKB_TRACE(skb);
314 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
315 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
316 	return (skb->data - skb->head);
317 }
318 
319 
320 /*
321  * Remove tailroom; return the old tail pointer; basically make space at
322  * the end to copy data in (manually).  See also skb_put_data() below.
323  */
324 static inline void *
325 skb_put(struct sk_buff *skb, size_t len)
326 {
327 	void *s;
328 
329 	SKB_TRACE(skb);
330 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
331 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
332 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
333 
334 	s = skb_tail_pointer(skb);
335 	if (len == 0)
336 		return (s);
337 	skb->tail += len;
338 	skb->len += len;
339 #ifdef SKB_DEBUG
340 	if (linuxkpi_debug_skb & DSKB_TRACEX)
341 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
342 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
343 	    s, len);
344 #endif
345 	return (s);
346 }
347 
348 /* skb_put() + copying data in. */
349 static inline void *
350 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
351 {
352 	void *s;
353 
354 	SKB_TRACE2(skb, buf);
355 	s = skb_put(skb, len);
356 	if (len == 0)
357 		return (s);
358 	memcpy(s, buf, len);
359 	return (s);
360 }
361 
362 /* skb_put() + filling with zeros. */
363 static inline void *
364 skb_put_zero(struct sk_buff *skb, size_t len)
365 {
366 	void *s;
367 
368 	SKB_TRACE(skb);
369 	s = skb_put(skb, len);
370 	memset(s, '\0', len);
371 	return (s);
372 }
373 
374 /*
375  * Remove len bytes from beginning of data.
376  *
377  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
378  * we return the advanced data pointer so we don't have to keep a temp, correct?
379  */
380 static inline void *
381 skb_pull(struct sk_buff *skb, size_t len)
382 {
383 
384 	SKB_TRACE(skb);
385 #if 0	/* Apparently this doesn't barf... */
386 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
387 	    __func__, skb, skb->len, len, skb->data));
388 #endif
389 	if (skb->len < len)
390 		return (NULL);
391 	skb->len -= len;
392 	skb->data += len;
393 	return (skb->data);
394 }
395 
396 /* Reduce skb data to given length or do nothing if smaller already. */
397 static inline void
398 __skb_trim(struct sk_buff *skb, unsigned int len)
399 {
400 
401 	SKB_TRACE(skb);
402 	if (skb->len < len)
403 		return;
404 
405 	skb->len = len;
406 	skb->tail = skb->data + skb->len;
407 }
408 
409 static inline void
410 skb_trim(struct sk_buff *skb, unsigned int len)
411 {
412 
413 	return (__skb_trim(skb, len));
414 }
415 
416 static inline struct skb_shared_info *
417 skb_shinfo(struct sk_buff *skb)
418 {
419 
420 	SKB_TRACE(skb);
421 	return (skb->shinfo);
422 }
423 
424 static inline void
425 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
426     off_t offset, size_t size, unsigned int truesize)
427 {
428 	struct skb_shared_info *shinfo;
429 
430 	SKB_TRACE(skb);
431 #ifdef SKB_DEBUG
432 	if (linuxkpi_debug_skb & DSKB_TRACEX)
433 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
434 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
435 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
436 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
437 	    size, truesize);
438 #endif
439 
440 	shinfo = skb_shinfo(skb);
441 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
442 	    "fragno %d too big\n", __func__, skb, fragno));
443 	shinfo->frags[fragno].page = page;
444 	shinfo->frags[fragno].offset = offset;
445 	shinfo->frags[fragno].size = size;
446 	shinfo->nr_frags = fragno + 1;
447         skb->len += size;
448         skb->truesize += truesize;
449 
450 	/* XXX TODO EXTEND truesize? */
451 }
452 
453 /* -------------------------------------------------------------------------- */
454 
455 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
456 #define	skb_queue_walk(_q, skb)						\
457 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
458 	    (skb) = (skb)->next)
459 
460 #define	skb_queue_walk_safe(_q, skb, tmp)				\
461 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
462 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
463 
464 static inline bool
465 skb_queue_empty(struct sk_buff_head *q)
466 {
467 
468 	SKB_TRACE(q);
469 	return (q->qlen == 0);
470 }
471 
472 static inline void
473 __skb_queue_head_init(struct sk_buff_head *q)
474 {
475 	SKB_TRACE(q);
476 	q->prev = q->next = (struct sk_buff *)q;
477 	q->qlen = 0;
478 }
479 
480 static inline void
481 skb_queue_head_init(struct sk_buff_head *q)
482 {
483 	SKB_TRACE(q);
484 	return (__skb_queue_head_init(q));
485 }
486 
487 static inline void
488 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
489     struct sk_buff_head *q)
490 {
491 
492 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
493 	new->prev = prev;
494 	new->next = next;
495 	next->prev = new;
496 	prev->next = new;
497 	q->qlen++;
498 }
499 
500 static inline void
501 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
502     struct sk_buff *new)
503 {
504 
505 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
506 	__skb_insert(new, skb, skb->next, q);
507 }
508 
509 static inline void
510 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
511     struct sk_buff *new)
512 {
513 
514 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
515 	__skb_insert(new, skb->prev, skb, q);
516 }
517 
518 static inline void
519 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
520 {
521 	struct sk_buff *s;
522 
523 	SKB_TRACE2(q, skb);
524 	q->qlen++;
525 	s = (struct sk_buff *)q;
526 	s->prev->next = skb;
527 	skb->prev = s->prev;
528 	skb->next = s;
529 	s->prev = skb;
530 }
531 
532 static inline void
533 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
534 {
535 	SKB_TRACE2(q, skb);
536 	return (__skb_queue_tail(q, skb));
537 }
538 
539 static inline struct sk_buff *
540 skb_peek_tail(struct sk_buff_head *q)
541 {
542 	struct sk_buff *skb;
543 
544 	skb = q->prev;
545 	SKB_TRACE2(q, skb);
546 	if (skb == (struct sk_buff *)q)
547 		return (NULL);
548 	return (skb);
549 }
550 
551 static inline void
552 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
553 {
554 	SKB_TRACE2(skb, head);
555 	struct sk_buff *p, *n;;
556 
557 	head->qlen--;
558 	p = skb->prev;
559 	n = skb->next;
560 	p->next = n;
561 	n->prev = p;
562 	skb->prev = skb->next = NULL;
563 }
564 
565 static inline void
566 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
567 {
568 	SKB_TRACE2(skb, head);
569 	return (__skb_unlink(skb, head));
570 }
571 
572 static inline struct sk_buff *
573 __skb_dequeue(struct sk_buff_head *q)
574 {
575 	struct sk_buff *skb;
576 
577 	SKB_TRACE(q);
578 	skb = q->next;
579 	if (skb == (struct sk_buff *)q)
580 		return (NULL);
581 	if (skb != NULL)
582 		__skb_unlink(skb, q);
583 	SKB_TRACE(skb);
584 	return (skb);
585 }
586 
587 static inline struct sk_buff *
588 skb_dequeue(struct sk_buff_head *q)
589 {
590 	SKB_TRACE(q);
591 	return (__skb_dequeue(q));
592 }
593 
594 static inline struct sk_buff *
595 skb_dequeue_tail(struct sk_buff_head *q)
596 {
597 	struct sk_buff *skb;
598 
599 	skb = skb_peek_tail(q);
600 	if (skb != NULL)
601 		__skb_unlink(skb, q);
602 
603 	SKB_TRACE2(q, skb);
604 	return (skb);
605 }
606 
607 static inline void
608 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
609 {
610 
611 	SKB_TRACE2(q, skb);
612 	__skb_queue_after(q, (struct sk_buff *)q, skb);
613 }
614 
615 static inline void
616 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
617 {
618 
619 	SKB_TRACE2(q, skb);
620 	__skb_queue_after(q, (struct sk_buff *)q, skb);
621 }
622 
623 static inline uint32_t
624 skb_queue_len(struct sk_buff_head *head)
625 {
626 
627 	SKB_TRACE(head);
628 	return (head->qlen);
629 }
630 
631 static inline uint32_t
632 skb_queue_len_lockless(const struct sk_buff_head *head)
633 {
634 
635 	SKB_TRACE(head);
636 	return (READ_ONCE(head->qlen));
637 }
638 
639 static inline void
640 __skb_queue_purge(struct sk_buff_head *q)
641 {
642 	struct sk_buff *skb;
643 
644 	SKB_TRACE(q);
645         while ((skb = __skb_dequeue(q)) != NULL)
646 		kfree_skb(skb);
647 }
648 
649 static inline void
650 skb_queue_purge(struct sk_buff_head *q)
651 {
652 	SKB_TRACE(q);
653 	return (__skb_queue_purge(q));
654 }
655 
656 static inline struct sk_buff *
657 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
658 {
659 
660 	SKB_TRACE2(q, skb);
661 	/* XXX what is the q argument good for? */
662 	return (skb->prev);
663 }
664 
665 /* -------------------------------------------------------------------------- */
666 
667 static inline struct sk_buff *
668 skb_copy(struct sk_buff *skb, gfp_t gfp)
669 {
670 	SKB_TRACE(skb);
671 	SKB_TODO();
672 	return (NULL);
673 }
674 
675 static inline void
676 consume_skb(struct sk_buff *skb)
677 {
678 	SKB_TRACE(skb);
679 	SKB_TODO();
680 }
681 
682 static inline uint16_t
683 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
684 {
685 	SKB_TRACE(skb);
686 	SKB_TODO();
687 	return (0xffff);
688 }
689 
690 static inline int
691 skb_checksum_start_offset(struct sk_buff *skb)
692 {
693 	SKB_TRACE(skb);
694 	SKB_TODO();
695 	return (-1);
696 }
697 
698 static inline dma_addr_t
699 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
700     size_t fragsz, enum dma_data_direction dir)
701 {
702 	SKB_TRACE2(frag, dev);
703 	SKB_TODO();
704 	return (-1);
705 }
706 
707 static inline size_t
708 skb_frag_size(const skb_frag_t *frag)
709 {
710 	SKB_TRACE(frag);
711 	SKB_TODO();
712 	return (-1);
713 }
714 
715 static inline bool
716 skb_is_nonlinear(struct sk_buff *skb)
717 {
718 	SKB_TRACE(skb);
719 	return ((skb->data_len > 0) ? true : false);
720 }
721 
722 #define	skb_walk_frags(_skb, _frag)					\
723 	for ((_frag) = (_skb); false; (_frag)++)
724 
725 static inline void
726 skb_checksum_help(struct sk_buff *skb)
727 {
728 	SKB_TRACE(skb);
729 	SKB_TODO();
730 }
731 
732 static inline bool
733 skb_ensure_writable(struct sk_buff *skb, size_t off)
734 {
735 	SKB_TRACE(skb);
736 	SKB_TODO();
737 	return (false);
738 }
739 
740 static inline void *
741 skb_frag_address(const skb_frag_t *frag)
742 {
743 	SKB_TRACE(frag);
744 	SKB_TODO();
745 	return (NULL);
746 }
747 
748 static inline struct sk_buff *
749 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
750 {
751 	SKB_TRACE(skb);
752 	SKB_TODO();
753 	return (NULL);
754 }
755 
756 static inline bool
757 skb_is_gso(struct sk_buff *skb)
758 {
759 	SKB_TRACE(skb);
760 	SKB_IMPROVE("Really a TODO but get it away from logging");
761 	return (false);
762 }
763 
764 static inline void
765 skb_mark_not_on_list(struct sk_buff *skb)
766 {
767 	SKB_TRACE(skb);
768 	SKB_TODO();
769 }
770 
771 static inline void
772 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
773 {
774 	struct sk_buff *b, *e, *n;
775 
776 	SKB_TRACE2(from, to);
777 
778 	if (skb_queue_empty(from))
779 		return;
780 
781 	/* XXX do we need a barrier around this? */
782 	b = from->next;
783 	e = from->prev;
784 	n = to->next;
785 
786 	b->prev = (struct sk_buff *)to;
787 	to->next = b;
788 	e->next = n;
789 	n->prev = e;
790 
791 	to->qlen += from->qlen;
792 	__skb_queue_head_init(from);
793 }
794 
795 static inline void
796 skb_reset_transport_header(struct sk_buff *skb)
797 {
798 
799 	SKB_TRACE(skb);
800 	skb->l4hdroff = skb->data - skb->head;
801 }
802 
803 static inline uint8_t *
804 skb_transport_header(struct sk_buff *skb)
805 {
806 
807 	SKB_TRACE(skb);
808         return (skb->head + skb->l4hdroff);
809 }
810 
811 static inline uint8_t *
812 skb_network_header(struct sk_buff *skb)
813 {
814 
815 	SKB_TRACE(skb);
816         return (skb->head + skb->l3hdroff);
817 }
818 
819 static inline int
820 __skb_linearize(struct sk_buff *skb)
821 {
822 	SKB_TRACE(skb);
823 	SKB_TODO();
824 	return (ENXIO);
825 }
826 
827 static inline int
828 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
829 {
830 	SKB_TRACE(skb);
831 	SKB_TODO();
832 	return (-ENXIO);
833 }
834 
835 /* Not really seen this one but need it as symmetric accessor function. */
836 static inline void
837 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
838 {
839 
840 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
841 	skb->qmap = qmap;
842 }
843 
844 static inline uint16_t
845 skb_get_queue_mapping(struct sk_buff *skb)
846 {
847 
848 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
849 	return (skb->qmap);
850 }
851 
852 static inline bool
853 skb_header_cloned(struct sk_buff *skb)
854 {
855 	SKB_TRACE(skb);
856 	SKB_TODO();
857 	return (false);
858 }
859 
860 static inline uint8_t *
861 skb_mac_header(struct sk_buff *skb)
862 {
863 	SKB_TRACE(skb);
864 	SKB_TODO();
865 	return (NULL);
866 }
867 
868 static inline void
869 skb_orphan(struct sk_buff *skb)
870 {
871 	SKB_TRACE(skb);
872 	SKB_TODO();
873 }
874 
875 static inline void
876 skb_reset_mac_header(struct sk_buff *skb)
877 {
878 	SKB_TRACE(skb);
879 	SKB_TODO();
880 }
881 
882 static inline struct sk_buff *
883 skb_peek(struct sk_buff_head *q)
884 {
885 	SKB_TRACE(q);
886 	SKB_TODO();
887 	return (NULL);
888 }
889 
890 static inline __sum16
891 csum_unfold(__sum16 sum)
892 {
893 	SKB_TODO();
894 	return (sum);
895 }
896 
897 static __inline void
898 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
899 {
900 	SKB_TODO();
901 }
902 
903 static inline void
904 skb_reset_tail_pointer(struct sk_buff *skb)
905 {
906 
907 	SKB_TRACE(skb);
908 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
909 	SKB_TRACE(skb);
910 }
911 
912 static inline struct sk_buff *
913 skb_get(struct sk_buff *skb)
914 {
915 
916 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
917 	return (skb);
918 }
919 
920 static inline struct sk_buff *
921 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
922 {
923 
924 	SKB_TODO();
925 	return (NULL);
926 }
927 
928 static inline void
929 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
930 {
931 
932 	SKB_TRACE(skb);
933 	/* Let us just hope the destination has len space ... */
934 	memcpy(dst, skb->data, len);
935 }
936 
937 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
938