1 /*-
2 * Copyright (c) 2020-2025 The FreeBSD Foundation
3 * Copyright (c) 2021-2023 Bjoern A. Zeeb
4 *
5 * This software was developed by Björn Zeeb under sponsorship from
6 * the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
32 * Do not rely on the internals of this implementation. They are highly
33 * likely to change as we will improve the integration to FreeBSD mbufs.
34 */
35
36 #ifndef _LINUXKPI_LINUX_SKBUFF_H
37 #define _LINUXKPI_LINUX_SKBUFF_H
38
39 #include <linux/kernel.h>
40 #include <linux/page.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdev_features.h>
43 #include <linux/list.h>
44 #include <linux/gfp.h>
45 #include <linux/compiler.h>
46 #include <linux/spinlock.h>
47 #include <linux/ktime.h>
48 #include <linux/compiler.h>
49
50 #include "opt_wlan.h"
51
52 /* Currently this is only used for wlan so we can depend on that. */
53 #if defined(IEEE80211_DEBUG) && !defined(SKB_DEBUG)
54 #define SKB_DEBUG
55 #endif
56
57 /* #define SKB_DEBUG */
58
59 #ifdef SKB_DEBUG
60 #define DSKB_TODO 0x01
61 #define DSKB_IMPROVE 0x02
62 #define DSKB_TRACE 0x10
63 #define DSKB_TRACEX 0x20
64 extern int linuxkpi_debug_skb;
65
66 #define SKB_TODO() \
67 if (linuxkpi_debug_skb & DSKB_TODO) \
68 printf("SKB_TODO %s:%d\n", __func__, __LINE__)
69 #define SKB_IMPROVE(...) \
70 if (linuxkpi_debug_skb & DSKB_IMPROVE) \
71 printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
72 #define SKB_TRACE(_s) \
73 if (linuxkpi_debug_skb & DSKB_TRACE) \
74 printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
75 #define SKB_TRACE2(_s, _p) \
76 if (linuxkpi_debug_skb & DSKB_TRACE) \
77 printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
78 #define SKB_TRACE_FMT(_s, _fmt, ...) \
79 if (linuxkpi_debug_skb & DSKB_TRACE) \
80 printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s, \
81 __VA_ARGS__)
82 #else
83 #define SKB_TODO() do { } while(0)
84 #define SKB_IMPROVE(...) do { } while(0)
85 #define SKB_TRACE(_s) do { } while(0)
86 #define SKB_TRACE2(_s, _p) do { } while(0)
87 #define SKB_TRACE_FMT(_s, ...) do { } while(0)
88 #endif
89
90 enum sk_buff_pkt_type {
91 PACKET_BROADCAST,
92 PACKET_MULTICAST,
93 PACKET_OTHERHOST,
94 };
95
96 struct skb_shared_hwtstamps {
97 ktime_t hwtstamp;
98 };
99
100 #define NET_SKB_PAD max(CACHE_LINE_SIZE, 32)
101 #define SKB_DATA_ALIGN(_x) roundup2(_x, CACHE_LINE_SIZE)
102
103 struct sk_buff_head {
104 /* XXX TODO */
105 union {
106 struct {
107 struct sk_buff *next;
108 struct sk_buff *prev;
109 };
110 struct sk_buff_head_l {
111 struct sk_buff *next;
112 struct sk_buff *prev;
113 } list;
114 };
115 size_t qlen;
116 spinlock_t lock;
117 };
118
119 enum sk_checksum_flags {
120 CHECKSUM_NONE = 0x00,
121 CHECKSUM_UNNECESSARY = 0x01,
122 CHECKSUM_PARTIAL = 0x02,
123 CHECKSUM_COMPLETE = 0x04,
124 };
125
126 struct skb_frag {
127 /* XXX TODO */
128 struct page *page; /* XXX-BZ These three are a wild guess so far! */
129 off_t offset;
130 size_t size;
131 };
132 typedef struct skb_frag skb_frag_t;
133
134 enum skb_shared_info_gso_type {
135 SKB_GSO_TCPV4,
136 SKB_GSO_TCPV6,
137 };
138
139 struct skb_shared_info {
140 enum skb_shared_info_gso_type gso_type;
141 uint16_t gso_size;
142 uint16_t nr_frags;
143 struct sk_buff *frag_list;
144 skb_frag_t frags[64]; /* XXX TODO, 16xpage? */
145 };
146
147 struct sk_buff {
148 /* XXX TODO */
149 union {
150 /* struct sk_buff_head */
151 struct {
152 struct sk_buff *next;
153 struct sk_buff *prev;
154 };
155 struct list_head list;
156 };
157
158 uint8_t *head; /* Head of buffer. */
159 uint8_t *data; /* Head of data. */
160 uint8_t *tail; /* End of data. */
161 uint8_t *end; /* End of buffer. */
162
163 uint32_t len; /* ? */
164 uint32_t data_len; /* ? If we have frags? */
165 union {
166 __wsum csum;
167 struct {
168 uint16_t csum_offset;
169 uint16_t csum_start;
170 };
171 };
172 uint16_t protocol;
173 uint8_t ip_summed;
174 /* uint8_t */
175
176 /* "Scratch" area for layers to store metadata. */
177 /* ??? I see sizeof() operations so probably an array. */
178 uint8_t cb[64] __aligned(CACHE_LINE_SIZE);
179
180 struct skb_shared_info *shinfo __aligned(CACHE_LINE_SIZE);
181
182 uint32_t truesize; /* The total size of all buffers, incl. frags. */
183 uint32_t priority;
184 uint16_t qmap; /* queue mapping */
185 uint16_t _flags; /* Internal flags. */
186 #define _SKB_FLAGS_SKBEXTFRAG 0x0001
187 uint16_t l3hdroff; /* network header offset from *head */
188 uint16_t l4hdroff; /* transport header offset from *head */
189 uint16_t mac_header; /* offset of mac_header */
190 uint16_t mac_len; /* Link-layer header length. */
191 enum sk_buff_pkt_type pkt_type;
192 refcount_t refcnt;
193
194 struct net_device *dev;
195 void *sk; /* XXX net/sock.h? */
196
197 /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
198 void *m;
199 void(*m_free_func)(void *);
200
201 /* Force padding to CACHE_LINE_SIZE. */
202 uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE);
203 };
204
205 /* -------------------------------------------------------------------------- */
206
207 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
208 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
209 struct sk_buff *linuxkpi_build_skb(void *, size_t);
210 void linuxkpi_kfree_skb(struct sk_buff *);
211
212 struct sk_buff *linuxkpi_skb_copy(const struct sk_buff *, gfp_t);
213
214 /* -------------------------------------------------------------------------- */
215
216 static inline struct sk_buff *
alloc_skb(size_t size,gfp_t gfp)217 alloc_skb(size_t size, gfp_t gfp)
218 {
219 struct sk_buff *skb;
220
221 skb = linuxkpi_alloc_skb(size, gfp);
222 SKB_TRACE(skb);
223 return (skb);
224 }
225
226 static inline struct sk_buff *
__dev_alloc_skb(size_t len,gfp_t gfp)227 __dev_alloc_skb(size_t len, gfp_t gfp)
228 {
229 struct sk_buff *skb;
230
231 skb = linuxkpi_dev_alloc_skb(len, gfp);
232 SKB_IMPROVE();
233 SKB_TRACE(skb);
234 return (skb);
235 }
236
237 static inline struct sk_buff *
dev_alloc_skb(size_t len)238 dev_alloc_skb(size_t len)
239 {
240 struct sk_buff *skb;
241
242 skb = __dev_alloc_skb(len, GFP_NOWAIT);
243 SKB_IMPROVE();
244 SKB_TRACE(skb);
245 return (skb);
246 }
247
248 static inline void
kfree_skb(struct sk_buff * skb)249 kfree_skb(struct sk_buff *skb)
250 {
251 SKB_TRACE(skb);
252 linuxkpi_kfree_skb(skb);
253 }
254
255 static inline void
consume_skb(struct sk_buff * skb)256 consume_skb(struct sk_buff *skb)
257 {
258 SKB_TRACE(skb);
259 kfree_skb(skb);
260 }
261
262 static inline void
dev_kfree_skb(struct sk_buff * skb)263 dev_kfree_skb(struct sk_buff *skb)
264 {
265 SKB_TRACE(skb);
266 kfree_skb(skb);
267 }
268
269 static inline void
dev_kfree_skb_any(struct sk_buff * skb)270 dev_kfree_skb_any(struct sk_buff *skb)
271 {
272 SKB_TRACE(skb);
273 dev_kfree_skb(skb);
274 }
275
276 static inline void
dev_kfree_skb_irq(struct sk_buff * skb)277 dev_kfree_skb_irq(struct sk_buff *skb)
278 {
279 SKB_TRACE(skb);
280 SKB_IMPROVE("Do we have to defer this?");
281 dev_kfree_skb(skb);
282 }
283
284 static inline struct sk_buff *
build_skb(void * data,unsigned int fragsz)285 build_skb(void *data, unsigned int fragsz)
286 {
287 struct sk_buff *skb;
288
289 skb = linuxkpi_build_skb(data, fragsz);
290 SKB_TRACE(skb);
291 return (skb);
292 }
293
294 /* -------------------------------------------------------------------------- */
295
296 static inline bool
skb_is_nonlinear(struct sk_buff * skb)297 skb_is_nonlinear(struct sk_buff *skb)
298 {
299 SKB_TRACE(skb);
300 return ((skb->data_len > 0) ? true : false);
301 }
302
303 /* Add headroom; cannot do once there is data in there. */
304 static inline void
skb_reserve(struct sk_buff * skb,size_t len)305 skb_reserve(struct sk_buff *skb, size_t len)
306 {
307 SKB_TRACE(skb);
308 #if 0
309 /* Apparently it is allowed to call skb_reserve multiple times in a row. */
310 KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
311 "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
312 #else
313 KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
314 "empty head %p data %p tail %p len %u\n", __func__, skb,
315 skb->head, skb->data, skb->tail, skb->len));
316 #endif
317 skb->data += len;
318 skb->tail += len;
319 }
320
321 /*
322 * Remove headroom; return new data pointer; basically make space at the
323 * front to copy data in (manually).
324 */
325 static inline void *
__skb_push(struct sk_buff * skb,size_t len)326 __skb_push(struct sk_buff *skb, size_t len)
327 {
328 SKB_TRACE(skb);
329 KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
330 "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
331 skb->len += len;
332 skb->data -= len;
333 return (skb->data);
334 }
335
336 static inline void *
skb_push(struct sk_buff * skb,size_t len)337 skb_push(struct sk_buff *skb, size_t len)
338 {
339
340 SKB_TRACE(skb);
341 return (__skb_push(skb, len));
342 }
343
344 /*
345 * Length of the data on the skb (without any frags)???
346 */
347 static inline size_t
skb_headlen(struct sk_buff * skb)348 skb_headlen(struct sk_buff *skb)
349 {
350
351 SKB_TRACE(skb);
352 return (skb->len - skb->data_len);
353 }
354
355
356 /* Return the end of data (tail pointer). */
357 static inline uint8_t *
skb_tail_pointer(struct sk_buff * skb)358 skb_tail_pointer(struct sk_buff *skb)
359 {
360
361 SKB_TRACE(skb);
362 return (skb->tail);
363 }
364
365 /* Return number of bytes available at end of buffer. */
366 static inline unsigned int
skb_tailroom(struct sk_buff * skb)367 skb_tailroom(struct sk_buff *skb)
368 {
369
370 SKB_TRACE(skb);
371 KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
372 "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
373 if (unlikely(skb_is_nonlinear(skb)))
374 return (0);
375 return (skb->end - skb->tail);
376 }
377
378 /* Return number of bytes available at the beginning of buffer. */
379 static inline unsigned int
skb_headroom(const struct sk_buff * skb)380 skb_headroom(const struct sk_buff *skb)
381 {
382 SKB_TRACE(skb);
383 KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
384 "data %p head %p\n", __func__, skb, skb->data, skb->head));
385 return (skb->data - skb->head);
386 }
387
388
389 /*
390 * Remove tailroom; return the old tail pointer; basically make space at
391 * the end to copy data in (manually). See also skb_put_data() below.
392 */
393 static inline void *
__skb_put(struct sk_buff * skb,size_t len)394 __skb_put(struct sk_buff *skb, size_t len)
395 {
396 void *s;
397
398 SKB_TRACE(skb);
399 KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
400 "len %zu) > end %p, head %p data %p len %u\n", __func__,
401 skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
402
403 s = skb_tail_pointer(skb);
404 if (len == 0)
405 return (s);
406 skb->tail += len;
407 skb->len += len;
408 #ifdef SKB_DEBUG
409 if (linuxkpi_debug_skb & DSKB_TRACEX)
410 printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
411 __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
412 s, len);
413 #endif
414 return (s);
415 }
416
417 static inline void *
skb_put(struct sk_buff * skb,size_t len)418 skb_put(struct sk_buff *skb, size_t len)
419 {
420
421 SKB_TRACE(skb);
422 return (__skb_put(skb, len));
423 }
424
425 /* skb_put() + copying data in. */
426 static inline void *
skb_put_data(struct sk_buff * skb,const void * buf,size_t len)427 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
428 {
429 void *s;
430
431 SKB_TRACE2(skb, buf);
432 s = skb_put(skb, len);
433 if (len == 0)
434 return (s);
435 memcpy(s, buf, len);
436 return (s);
437 }
438
439 /* skb_put() + filling with zeros. */
440 static inline void *
skb_put_zero(struct sk_buff * skb,size_t len)441 skb_put_zero(struct sk_buff *skb, size_t len)
442 {
443 void *s;
444
445 SKB_TRACE(skb);
446 s = skb_put(skb, len);
447 memset(s, '\0', len);
448 return (s);
449 }
450
451 /*
452 * Remove len bytes from beginning of data.
453 *
454 * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
455 * we return the advanced data pointer so we don't have to keep a temp, correct?
456 */
457 static inline void *
skb_pull(struct sk_buff * skb,size_t len)458 skb_pull(struct sk_buff *skb, size_t len)
459 {
460
461 SKB_TRACE(skb);
462 #if 0 /* Apparently this doesn't barf... */
463 KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
464 __func__, skb, skb->len, len, skb->data));
465 #endif
466 if (skb->len < len)
467 return (NULL);
468 skb->len -= len;
469 skb->data += len;
470 return (skb->data);
471 }
472
473 /* Reduce skb data to given length or do nothing if smaller already. */
474 static inline void
__skb_trim(struct sk_buff * skb,unsigned int len)475 __skb_trim(struct sk_buff *skb, unsigned int len)
476 {
477
478 SKB_TRACE(skb);
479 if (skb->len < len)
480 return;
481
482 skb->len = len;
483 skb->tail = skb->data + skb->len;
484 }
485
486 static inline void
skb_trim(struct sk_buff * skb,unsigned int len)487 skb_trim(struct sk_buff *skb, unsigned int len)
488 {
489
490 return (__skb_trim(skb, len));
491 }
492
493 static inline struct skb_shared_info *
skb_shinfo(struct sk_buff * skb)494 skb_shinfo(struct sk_buff *skb)
495 {
496
497 SKB_TRACE(skb);
498 return (skb->shinfo);
499 }
500
501 static inline void
skb_add_rx_frag(struct sk_buff * skb,int fragno,struct page * page,off_t offset,size_t size,unsigned int truesize)502 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
503 off_t offset, size_t size, unsigned int truesize)
504 {
505 struct skb_shared_info *shinfo;
506
507 SKB_TRACE(skb);
508 #ifdef SKB_DEBUG
509 if (linuxkpi_debug_skb & DSKB_TRACEX)
510 printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
511 "page %#jx offset %ju size %zu truesize %u\n", __func__,
512 skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
513 (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
514 size, truesize);
515 #endif
516
517 shinfo = skb_shinfo(skb);
518 KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
519 "fragno %d too big\n", __func__, skb, fragno));
520 shinfo->frags[fragno].page = page;
521 shinfo->frags[fragno].offset = offset;
522 shinfo->frags[fragno].size = size;
523 shinfo->nr_frags = fragno + 1;
524 skb->len += size;
525 skb->data_len += size;
526 skb->truesize += truesize;
527 }
528
529 /* -------------------------------------------------------------------------- */
530
531 #define skb_queue_walk(_q, skb) \
532 for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \
533 (skb) = (skb)->next)
534
535 #define skb_queue_walk_safe(_q, skb, tmp) \
536 for ((skb) = (_q)->next, (tmp) = (skb)->next; \
537 (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
538
539 #define skb_list_walk_safe(_q, skb, tmp) \
540 for ((skb) = (_q), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL; \
541 ((skb) != NULL); \
542 (skb) = (tmp), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL)
543
544 static inline bool
skb_queue_empty(const struct sk_buff_head * q)545 skb_queue_empty(const struct sk_buff_head *q)
546 {
547 SKB_TRACE(q);
548 return (q->next == (const struct sk_buff *)q);
549 }
550
551 static inline bool
skb_queue_empty_lockless(const struct sk_buff_head * q)552 skb_queue_empty_lockless(const struct sk_buff_head *q)
553 {
554 SKB_TRACE(q);
555 return (READ_ONCE(q->next) == (const struct sk_buff *)q);
556 }
557
558 static inline void
__skb_queue_head_init(struct sk_buff_head * q)559 __skb_queue_head_init(struct sk_buff_head *q)
560 {
561 SKB_TRACE(q);
562 q->prev = q->next = (struct sk_buff *)q;
563 q->qlen = 0;
564 }
565
566 static inline void
skb_queue_head_init(struct sk_buff_head * q)567 skb_queue_head_init(struct sk_buff_head *q)
568 {
569 SKB_TRACE(q);
570 __skb_queue_head_init(q);
571 spin_lock_init(&q->lock);
572 }
573
574 static inline void
__skb_insert(struct sk_buff * new,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * q)575 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
576 struct sk_buff_head *q)
577 {
578
579 SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
580 WRITE_ONCE(new->prev, prev);
581 WRITE_ONCE(new->next, next);
582 WRITE_ONCE(((struct sk_buff_head_l *)next)->prev, new);
583 WRITE_ONCE(((struct sk_buff_head_l *)prev)->next, new);
584 WRITE_ONCE(q->qlen, q->qlen + 1);
585 }
586
587 static inline void
__skb_queue_after(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)588 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
589 struct sk_buff *new)
590 {
591
592 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
593 __skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
594 }
595
596 static inline void
__skb_queue_before(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)597 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
598 struct sk_buff *new)
599 {
600
601 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
602 __skb_insert(new, skb->prev, skb, q);
603 }
604
605 static inline void
__skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)606 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
607 {
608
609 SKB_TRACE2(q, new);
610 __skb_queue_before(q, (struct sk_buff *)q, new);
611 }
612
613 static inline void
skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)614 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
615 {
616 unsigned long flags;
617
618 SKB_TRACE2(q, new);
619 spin_lock_irqsave(&q->lock, flags);
620 __skb_queue_tail(q, new);
621 spin_unlock_irqrestore(&q->lock, flags);
622 }
623
624 static inline struct sk_buff *
skb_peek(const struct sk_buff_head * q)625 skb_peek(const struct sk_buff_head *q)
626 {
627 struct sk_buff *skb;
628
629 skb = q->next;
630 SKB_TRACE2(q, skb);
631 if (skb == (const struct sk_buff *)q)
632 return (NULL);
633 return (skb);
634 }
635
636 static inline struct sk_buff *
skb_peek_tail(const struct sk_buff_head * q)637 skb_peek_tail(const struct sk_buff_head *q)
638 {
639 struct sk_buff *skb;
640
641 skb = READ_ONCE(q->prev);
642 SKB_TRACE2(q, skb);
643 if (skb == (const struct sk_buff *)q)
644 return (NULL);
645 return (skb);
646 }
647
648 static inline void
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)649 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
650 {
651 struct sk_buff *p, *n;
652
653 SKB_TRACE2(skb, q);
654
655 WRITE_ONCE(q->qlen, q->qlen - 1);
656 p = skb->prev;
657 n = skb->next;
658 WRITE_ONCE(n->prev, p);
659 WRITE_ONCE(p->next, n);
660 skb->prev = skb->next = NULL;
661 }
662
663 static inline void
skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)664 skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
665 {
666 unsigned long flags;
667
668 SKB_TRACE2(skb, q);
669 spin_lock_irqsave(&q->lock, flags);
670 __skb_unlink(skb, q);
671 spin_unlock_irqrestore(&q->lock, flags);
672 }
673
674 static inline struct sk_buff *
__skb_dequeue(struct sk_buff_head * q)675 __skb_dequeue(struct sk_buff_head *q)
676 {
677 struct sk_buff *skb;
678
679 skb = skb_peek(q);
680 if (skb != NULL)
681 __skb_unlink(skb, q);
682 SKB_TRACE2(q, skb);
683 return (skb);
684 }
685
686 static inline struct sk_buff *
skb_dequeue(struct sk_buff_head * q)687 skb_dequeue(struct sk_buff_head *q)
688 {
689 unsigned long flags;
690 struct sk_buff *skb;
691
692 spin_lock_irqsave(&q->lock, flags);
693 skb = __skb_dequeue(q);
694 spin_unlock_irqrestore(&q->lock, flags);
695 SKB_TRACE2(q, skb);
696 return (skb);
697 }
698
699 static inline struct sk_buff *
__skb_dequeue_tail(struct sk_buff_head * q)700 __skb_dequeue_tail(struct sk_buff_head *q)
701 {
702 struct sk_buff *skb;
703
704 skb = skb_peek_tail(q);
705 if (skb != NULL)
706 __skb_unlink(skb, q);
707 SKB_TRACE2(q, skb);
708 return (skb);
709 }
710
711 static inline struct sk_buff *
skb_dequeue_tail(struct sk_buff_head * q)712 skb_dequeue_tail(struct sk_buff_head *q)
713 {
714 unsigned long flags;
715 struct sk_buff *skb;
716
717 spin_lock_irqsave(&q->lock, flags);
718 skb = __skb_dequeue_tail(q);
719 spin_unlock_irqrestore(&q->lock, flags);
720 SKB_TRACE2(q, skb);
721 return (skb);
722 }
723
724 static inline void
__skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)725 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
726 {
727
728 SKB_TRACE2(q, skb);
729 __skb_queue_after(q, (struct sk_buff *)q, skb);
730 }
731
732 static inline void
skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)733 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
734 {
735 unsigned long flags;
736
737 SKB_TRACE2(q, skb);
738 spin_lock_irqsave(&q->lock, flags);
739 __skb_queue_head(q, skb);
740 spin_unlock_irqrestore(&q->lock, flags);
741 }
742
743 static inline uint32_t
skb_queue_len(const struct sk_buff_head * q)744 skb_queue_len(const struct sk_buff_head *q)
745 {
746
747 SKB_TRACE(q);
748 return (q->qlen);
749 }
750
751 static inline uint32_t
skb_queue_len_lockless(const struct sk_buff_head * q)752 skb_queue_len_lockless(const struct sk_buff_head *q)
753 {
754
755 SKB_TRACE(q);
756 return (READ_ONCE(q->qlen));
757 }
758
759 static inline void
___skb_queue_splice(const struct sk_buff_head * from,struct sk_buff * p,struct sk_buff * n)760 ___skb_queue_splice(const struct sk_buff_head *from,
761 struct sk_buff *p, struct sk_buff *n)
762 {
763 struct sk_buff *b, *e;
764
765 b = from->next;
766 e = from->prev;
767
768 WRITE_ONCE(b->prev, p);
769 WRITE_ONCE(((struct sk_buff_head_l *)p)->next, b);
770 WRITE_ONCE(e->next, n);
771 WRITE_ONCE(((struct sk_buff_head_l *)n)->prev, e);
772 }
773
774 static inline void
skb_queue_splice_init(struct sk_buff_head * from,struct sk_buff_head * to)775 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
776 {
777
778 SKB_TRACE2(from, to);
779
780 if (skb_queue_empty(from))
781 return;
782
783 ___skb_queue_splice(from, (struct sk_buff *)to, to->next);
784 to->qlen += from->qlen;
785 __skb_queue_head_init(from);
786 }
787
788 static inline void
skb_queue_splice_tail_init(struct sk_buff_head * from,struct sk_buff_head * to)789 skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to)
790 {
791
792 SKB_TRACE2(from, to);
793
794 if (skb_queue_empty(from))
795 return;
796
797 ___skb_queue_splice(from, to->prev, (struct sk_buff *)to);
798 to->qlen += from->qlen;
799 __skb_queue_head_init(from);
800 }
801
802
803 static inline void
__skb_queue_purge(struct sk_buff_head * q)804 __skb_queue_purge(struct sk_buff_head *q)
805 {
806 struct sk_buff *skb;
807
808 SKB_TRACE(q);
809 while ((skb = __skb_dequeue(q)) != NULL)
810 kfree_skb(skb);
811 WARN_ONCE(skb_queue_len(q) != 0, "%s: queue %p not empty: %u",
812 __func__, q, skb_queue_len(q));
813 }
814
815 static inline void
skb_queue_purge(struct sk_buff_head * q)816 skb_queue_purge(struct sk_buff_head *q)
817 {
818 struct sk_buff_head _q;
819 unsigned long flags;
820
821 SKB_TRACE(q);
822
823 if (skb_queue_empty_lockless(q))
824 return;
825
826 __skb_queue_head_init(&_q);
827 spin_lock_irqsave(&q->lock, flags);
828 skb_queue_splice_init(q, &_q);
829 spin_unlock_irqrestore(&q->lock, flags);
830 __skb_queue_purge(&_q);
831 }
832
833 static inline struct sk_buff *
skb_queue_prev(struct sk_buff_head * q,struct sk_buff * skb)834 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
835 {
836
837 SKB_TRACE2(q, skb);
838 /* XXX what is the q argument good for? */
839 return (skb->prev);
840 }
841
842 /* -------------------------------------------------------------------------- */
843
844 static inline struct sk_buff *
skb_copy(const struct sk_buff * skb,gfp_t gfp)845 skb_copy(const struct sk_buff *skb, gfp_t gfp)
846 {
847 struct sk_buff *new;
848
849 new = linuxkpi_skb_copy(skb, gfp);
850 SKB_TRACE2(skb, new);
851 return (new);
852 }
853
854 static inline uint16_t
skb_checksum(struct sk_buff * skb,int offs,size_t len,int x)855 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
856 {
857 SKB_TRACE(skb);
858 SKB_TODO();
859 return (0xffff);
860 }
861
862 static inline int
skb_checksum_start_offset(struct sk_buff * skb)863 skb_checksum_start_offset(struct sk_buff *skb)
864 {
865 SKB_TRACE(skb);
866 SKB_TODO();
867 return (-1);
868 }
869
870 static inline dma_addr_t
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,int x,size_t fragsz,enum dma_data_direction dir)871 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
872 size_t fragsz, enum dma_data_direction dir)
873 {
874 SKB_TRACE2(frag, dev);
875 SKB_TODO();
876 return (-1);
877 }
878
879 static inline size_t
skb_frag_size(const skb_frag_t * frag)880 skb_frag_size(const skb_frag_t *frag)
881 {
882 SKB_TRACE(frag);
883 return (frag->size);
884 }
885
886 #define skb_walk_frags(_skb, _frag) \
887 for ((_frag) = (_skb); false; (_frag)++)
888
889 static inline void
skb_checksum_help(struct sk_buff * skb)890 skb_checksum_help(struct sk_buff *skb)
891 {
892 SKB_TRACE(skb);
893 SKB_TODO();
894 }
895
896 static inline bool
skb_ensure_writable(struct sk_buff * skb,size_t off)897 skb_ensure_writable(struct sk_buff *skb, size_t off)
898 {
899 SKB_TRACE(skb);
900 SKB_TODO();
901 return (false);
902 }
903
904 static inline void *
skb_frag_address(const skb_frag_t * frag)905 skb_frag_address(const skb_frag_t *frag)
906 {
907 SKB_TRACE(frag);
908 return (page_address(frag->page + frag->offset));
909 }
910
911 static inline void
skb_free_frag(void * frag)912 skb_free_frag(void *frag)
913 {
914
915 page_frag_free(frag);
916 }
917
918 static inline struct sk_buff *
skb_gso_segment(struct sk_buff * skb,netdev_features_t netdev_flags)919 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
920 {
921 SKB_TRACE(skb);
922 SKB_TODO();
923 return (NULL);
924 }
925
926 static inline bool
skb_is_gso(struct sk_buff * skb)927 skb_is_gso(struct sk_buff *skb)
928 {
929 SKB_TRACE(skb);
930 SKB_IMPROVE("Really a TODO but get it away from logging");
931 return (false);
932 }
933
934 static inline void
skb_mark_not_on_list(struct sk_buff * skb)935 skb_mark_not_on_list(struct sk_buff *skb)
936 {
937 SKB_TRACE(skb);
938 skb->next = NULL;
939 }
940
941 static inline void
skb_reset_transport_header(struct sk_buff * skb)942 skb_reset_transport_header(struct sk_buff *skb)
943 {
944
945 SKB_TRACE(skb);
946 skb->l4hdroff = skb->data - skb->head;
947 }
948
949 static inline uint8_t *
skb_transport_header(struct sk_buff * skb)950 skb_transport_header(struct sk_buff *skb)
951 {
952
953 SKB_TRACE(skb);
954 return (skb->head + skb->l4hdroff);
955 }
956
957 static inline uint8_t *
skb_network_header(struct sk_buff * skb)958 skb_network_header(struct sk_buff *skb)
959 {
960
961 SKB_TRACE(skb);
962 return (skb->head + skb->l3hdroff);
963 }
964
965 static inline int
__skb_linearize(struct sk_buff * skb)966 __skb_linearize(struct sk_buff *skb)
967 {
968 SKB_TRACE(skb);
969 SKB_TODO();
970 return (-ENXIO);
971 }
972
973 static inline int
skb_linearize(struct sk_buff * skb)974 skb_linearize(struct sk_buff *skb)
975 {
976 return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
977 }
978
979 static inline int
pskb_expand_head(struct sk_buff * skb,int x,int len,gfp_t gfp)980 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
981 {
982 SKB_TRACE(skb);
983 SKB_TODO();
984 return (-ENXIO);
985 }
986
987 /* Not really seen this one but need it as symmetric accessor function. */
988 static inline void
skb_set_queue_mapping(struct sk_buff * skb,uint16_t qmap)989 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
990 {
991
992 SKB_TRACE_FMT(skb, "qmap %u", qmap);
993 skb->qmap = qmap;
994 }
995
996 static inline uint16_t
skb_get_queue_mapping(struct sk_buff * skb)997 skb_get_queue_mapping(struct sk_buff *skb)
998 {
999
1000 SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
1001 return (skb->qmap);
1002 }
1003
1004 static inline void
skb_copy_header(struct sk_buff * to,const struct sk_buff * from)1005 skb_copy_header(struct sk_buff *to, const struct sk_buff *from)
1006 {
1007 SKB_TRACE2(to, from);
1008 SKB_TODO();
1009 }
1010
1011 static inline bool
skb_header_cloned(struct sk_buff * skb)1012 skb_header_cloned(struct sk_buff *skb)
1013 {
1014 SKB_TRACE(skb);
1015 SKB_TODO();
1016 return (true);
1017 }
1018
1019 static inline uint8_t *
skb_mac_header(const struct sk_buff * skb)1020 skb_mac_header(const struct sk_buff *skb)
1021 {
1022 SKB_TRACE(skb);
1023 return (skb->head + skb->mac_header);
1024 }
1025
1026 static inline void
skb_reset_mac_header(struct sk_buff * skb)1027 skb_reset_mac_header(struct sk_buff *skb)
1028 {
1029 SKB_TRACE(skb);
1030 skb->mac_header = skb->data - skb->head;
1031 }
1032
1033 static inline void
skb_set_mac_header(struct sk_buff * skb,const size_t len)1034 skb_set_mac_header(struct sk_buff *skb, const size_t len)
1035 {
1036 SKB_TRACE(skb);
1037 skb_reset_mac_header(skb);
1038 skb->mac_header += len;
1039 }
1040
1041 static inline struct skb_shared_hwtstamps *
skb_hwtstamps(struct sk_buff * skb)1042 skb_hwtstamps(struct sk_buff *skb)
1043 {
1044 SKB_TRACE(skb);
1045 SKB_TODO();
1046 return (NULL);
1047 }
1048
1049 static inline void
skb_orphan(struct sk_buff * skb)1050 skb_orphan(struct sk_buff *skb)
1051 {
1052 SKB_TRACE(skb);
1053 SKB_TODO();
1054 }
1055
1056 static inline __wsum
csum_unfold(__sum16 sum)1057 csum_unfold(__sum16 sum)
1058 {
1059 return (sum);
1060 }
1061
1062 static __inline void
skb_postpush_rcsum(struct sk_buff * skb,const void * data,size_t len)1063 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
1064 {
1065 SKB_TODO();
1066 }
1067
1068 static inline void
skb_reset_tail_pointer(struct sk_buff * skb)1069 skb_reset_tail_pointer(struct sk_buff *skb)
1070 {
1071
1072 SKB_TRACE(skb);
1073 #ifdef SKB_DOING_OFFSETS_US_NOT
1074 skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
1075 #endif
1076 skb->tail = skb->data;
1077 SKB_TRACE(skb);
1078 }
1079
1080 static inline struct sk_buff *
skb_get(struct sk_buff * skb)1081 skb_get(struct sk_buff *skb)
1082 {
1083
1084 SKB_TRACE(skb);
1085 refcount_inc(&skb->refcnt);
1086 return (skb);
1087 }
1088
1089 static inline struct sk_buff *
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1090 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1091 {
1092
1093 SKB_TODO();
1094 return (NULL);
1095 }
1096
1097 static inline void
skb_copy_from_linear_data(const struct sk_buff * skb,void * dst,size_t len)1098 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1099 {
1100
1101 SKB_TRACE(skb);
1102 /* Let us just hope the destination has len space ... */
1103 memcpy(dst, skb->data, len);
1104 }
1105
1106 static inline int
skb_pad(struct sk_buff * skb,int pad)1107 skb_pad(struct sk_buff *skb, int pad)
1108 {
1109
1110 SKB_TRACE(skb);
1111 SKB_TODO();
1112 return (-1);
1113 }
1114
1115 static inline void
skb_list_del_init(struct sk_buff * skb)1116 skb_list_del_init(struct sk_buff *skb)
1117 {
1118
1119 SKB_TRACE(skb);
1120 __list_del_entry(&skb->list);
1121 skb_mark_not_on_list(skb);
1122 }
1123
1124 static inline void
napi_consume_skb(struct sk_buff * skb,int budget)1125 napi_consume_skb(struct sk_buff *skb, int budget)
1126 {
1127
1128 SKB_TRACE(skb);
1129 SKB_TODO();
1130 }
1131
1132 static inline struct sk_buff *
napi_build_skb(void * data,size_t len)1133 napi_build_skb(void *data, size_t len)
1134 {
1135
1136 SKB_TODO();
1137 return (NULL);
1138 }
1139
1140 static inline uint32_t
skb_get_hash(struct sk_buff * skb)1141 skb_get_hash(struct sk_buff *skb)
1142 {
1143 SKB_TRACE(skb);
1144 SKB_TODO();
1145 return (0);
1146 }
1147
1148 static inline void
skb_mark_for_recycle(struct sk_buff * skb)1149 skb_mark_for_recycle(struct sk_buff *skb)
1150 {
1151 SKB_TRACE(skb);
1152 /* page_pool */
1153 SKB_TODO();
1154 }
1155
1156 static inline int
skb_cow_head(struct sk_buff * skb,unsigned int headroom)1157 skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1158 {
1159 SKB_TRACE(skb);
1160 SKB_TODO();
1161 return (-1);
1162 }
1163
1164 #define SKB_WITH_OVERHEAD(_s) \
1165 (_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1166
1167 #endif /* _LINUXKPI_LINUX_SKBUFF_H */
1168