1 /*-
2 * Copyright (c) 2020-2025 The FreeBSD Foundation
3 * Copyright (c) 2021-2025 Bjoern A. Zeeb
4 *
5 * This software was developed by Björn Zeeb under sponsorship from
6 * the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
32 * Do not rely on the internals of this implementation. They are highly
33 * likely to change as we will improve the integration to FreeBSD mbufs.
34 */
35
36 #ifndef _LINUXKPI_LINUX_SKBUFF_H
37 #define _LINUXKPI_LINUX_SKBUFF_H
38
39 #include <linux/kernel.h>
40 #include <linux/page.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdev_features.h>
43 #include <linux/list.h>
44 #include <linux/gfp.h>
45 #include <linux/compiler.h>
46 #include <linux/spinlock.h>
47 #include <linux/ktime.h>
48 #include <linux/compiler.h>
49
50 /*
51 * At least the net/intel-irdma-kmod port pulls this header in; likely through
52 * if_ether.h (see PR289268). This means we no longer can rely on
53 * IEEE80211_DEBUG (opt_wlan.h) to automatically set SKB_DEBUG.
54 */
55 /* #define SKB_DEBUG */
56
57 #ifdef SKB_DEBUG
58 #define DSKB_TODO 0x01
59 #define DSKB_IMPROVE 0x02
60 #define DSKB_TRACE 0x10
61 #define DSKB_TRACEX 0x20
62 extern int linuxkpi_debug_skb;
63
64 #define SKB_TODO() \
65 if (linuxkpi_debug_skb & DSKB_TODO) \
66 printf("SKB_TODO %s:%d\n", __func__, __LINE__)
67 #define SKB_IMPROVE(...) \
68 if (linuxkpi_debug_skb & DSKB_IMPROVE) \
69 printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
70 #define SKB_TRACE(_s) \
71 if (linuxkpi_debug_skb & DSKB_TRACE) \
72 printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
73 #define SKB_TRACE2(_s, _p) \
74 if (linuxkpi_debug_skb & DSKB_TRACE) \
75 printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
76 #define SKB_TRACE_FMT(_s, _fmt, ...) \
77 if (linuxkpi_debug_skb & DSKB_TRACE) \
78 printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s, \
79 __VA_ARGS__)
80 #else
81 #define SKB_TODO() do { } while(0)
82 #define SKB_IMPROVE(...) do { } while(0)
83 #define SKB_TRACE(_s) do { } while(0)
84 #define SKB_TRACE2(_s, _p) do { } while(0)
85 #define SKB_TRACE_FMT(_s, ...) do { } while(0)
86 #endif
87
88 enum sk_buff_pkt_type {
89 PACKET_BROADCAST,
90 PACKET_MULTICAST,
91 PACKET_OTHERHOST,
92 };
93
94 struct skb_shared_hwtstamps {
95 ktime_t hwtstamp;
96 };
97
98 #define NET_SKB_PAD max(CACHE_LINE_SIZE, 32)
99 #define SKB_DATA_ALIGN(_x) roundup2(_x, CACHE_LINE_SIZE)
100
101 struct sk_buff_head {
102 /* XXX TODO */
103 union {
104 struct {
105 struct sk_buff *next;
106 struct sk_buff *prev;
107 };
108 struct sk_buff_head_l {
109 struct sk_buff *next;
110 struct sk_buff *prev;
111 } list;
112 };
113 size_t qlen;
114 spinlock_t lock;
115 };
116
117 enum sk_checksum_flags {
118 CHECKSUM_NONE = 0x00,
119 CHECKSUM_UNNECESSARY = 0x01,
120 CHECKSUM_PARTIAL = 0x02,
121 CHECKSUM_COMPLETE = 0x03,
122 };
123
124 struct skb_frag {
125 /* XXX TODO */
126 struct page *page; /* XXX-BZ These three are a wild guess so far! */
127 off_t offset;
128 size_t size;
129 };
130 typedef struct skb_frag skb_frag_t;
131
132 enum skb_shared_info_gso_type {
133 SKB_GSO_TCPV4,
134 SKB_GSO_TCPV6,
135 };
136
137 struct skb_shared_info {
138 enum skb_shared_info_gso_type gso_type;
139 uint16_t gso_size;
140 uint16_t nr_frags;
141 struct sk_buff *frag_list;
142 skb_frag_t frags[64]; /* XXX TODO, 16xpage? */
143 };
144
145 struct sk_buff {
146 /* XXX TODO */
147 union {
148 /* struct sk_buff_head */
149 struct {
150 struct sk_buff *next;
151 struct sk_buff *prev;
152 };
153 struct list_head list;
154 };
155
156 uint8_t *head; /* Head of buffer. */
157 uint8_t *data; /* Head of data. */
158 uint8_t *tail; /* End of data. */
159 uint8_t *end; /* End of buffer. */
160
161 uint32_t len; /* ? */
162 uint32_t data_len; /* ? If we have frags? */
163 union {
164 __wsum csum;
165 struct {
166 uint16_t csum_offset;
167 uint16_t csum_start;
168 };
169 };
170 uint16_t protocol;
171 uint8_t ip_summed; /* 2 bit only. */
172 /* uint8_t */
173
174 /* "Scratch" area for layers to store metadata. */
175 /* ??? I see sizeof() operations so probably an array. */
176 uint8_t cb[64] __aligned(CACHE_LINE_SIZE);
177
178 struct skb_shared_info *shinfo __aligned(CACHE_LINE_SIZE);
179
180 uint32_t truesize; /* The total size of all buffers, incl. frags. */
181 uint32_t priority;
182 uint16_t qmap; /* queue mapping */
183 uint16_t _flags; /* Internal flags. */
184 #define _SKB_FLAGS_SKBEXTFRAG 0x0001
185 uint16_t l3hdroff; /* network header offset from *head */
186 uint16_t l4hdroff; /* transport header offset from *head */
187 uint16_t mac_header; /* offset of mac_header */
188 uint16_t mac_len; /* Link-layer header length. */
189 enum sk_buff_pkt_type pkt_type;
190 refcount_t refcnt;
191
192 struct net_device *dev;
193 void *sk; /* XXX net/sock.h? */
194
195 /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
196 void *m;
197 void(*m_free_func)(void *);
198
199 /* Force padding to CACHE_LINE_SIZE. */
200 uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE);
201 };
202
203 /* -------------------------------------------------------------------------- */
204
205 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
206 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
207 struct sk_buff *linuxkpi_build_skb(void *, size_t);
208 void linuxkpi_kfree_skb(struct sk_buff *);
209
210 struct sk_buff *linuxkpi_skb_copy(const struct sk_buff *, gfp_t);
211
212 /* -------------------------------------------------------------------------- */
213
214 static inline struct sk_buff *
alloc_skb(size_t size,gfp_t gfp)215 alloc_skb(size_t size, gfp_t gfp)
216 {
217 struct sk_buff *skb;
218
219 skb = linuxkpi_alloc_skb(size, gfp);
220 SKB_TRACE(skb);
221 return (skb);
222 }
223
224 static inline struct sk_buff *
__dev_alloc_skb(size_t len,gfp_t gfp)225 __dev_alloc_skb(size_t len, gfp_t gfp)
226 {
227 struct sk_buff *skb;
228
229 skb = linuxkpi_dev_alloc_skb(len, gfp);
230 SKB_IMPROVE();
231 SKB_TRACE(skb);
232 return (skb);
233 }
234
235 static inline struct sk_buff *
dev_alloc_skb(size_t len)236 dev_alloc_skb(size_t len)
237 {
238 struct sk_buff *skb;
239
240 skb = __dev_alloc_skb(len, GFP_NOWAIT);
241 SKB_IMPROVE();
242 SKB_TRACE(skb);
243 return (skb);
244 }
245
246 static inline void
kfree_skb(struct sk_buff * skb)247 kfree_skb(struct sk_buff *skb)
248 {
249 SKB_TRACE(skb);
250 linuxkpi_kfree_skb(skb);
251 }
252
253 static inline void
consume_skb(struct sk_buff * skb)254 consume_skb(struct sk_buff *skb)
255 {
256 SKB_TRACE(skb);
257 kfree_skb(skb);
258 }
259
260 static inline void
dev_kfree_skb(struct sk_buff * skb)261 dev_kfree_skb(struct sk_buff *skb)
262 {
263 SKB_TRACE(skb);
264 kfree_skb(skb);
265 }
266
267 static inline void
dev_kfree_skb_any(struct sk_buff * skb)268 dev_kfree_skb_any(struct sk_buff *skb)
269 {
270 SKB_TRACE(skb);
271 dev_kfree_skb(skb);
272 }
273
274 static inline void
dev_kfree_skb_irq(struct sk_buff * skb)275 dev_kfree_skb_irq(struct sk_buff *skb)
276 {
277 SKB_TRACE(skb);
278 SKB_IMPROVE("Do we have to defer this?");
279 dev_kfree_skb(skb);
280 }
281
282 static inline struct sk_buff *
build_skb(void * data,unsigned int fragsz)283 build_skb(void *data, unsigned int fragsz)
284 {
285 struct sk_buff *skb;
286
287 skb = linuxkpi_build_skb(data, fragsz);
288 SKB_TRACE(skb);
289 return (skb);
290 }
291
292 /* -------------------------------------------------------------------------- */
293
294 static inline bool
skb_is_nonlinear(struct sk_buff * skb)295 skb_is_nonlinear(struct sk_buff *skb)
296 {
297 SKB_TRACE(skb);
298 return ((skb->data_len > 0) ? true : false);
299 }
300
301 /* Add headroom; cannot do once there is data in there. */
302 static inline void
skb_reserve(struct sk_buff * skb,size_t len)303 skb_reserve(struct sk_buff *skb, size_t len)
304 {
305 SKB_TRACE(skb);
306 #if 0
307 /* Apparently it is allowed to call skb_reserve multiple times in a row. */
308 KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
309 "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
310 #else
311 KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
312 "empty head %p data %p tail %p len %u\n", __func__, skb,
313 skb->head, skb->data, skb->tail, skb->len));
314 #endif
315 skb->data += len;
316 skb->tail += len;
317 }
318
319 /*
320 * Remove headroom; return new data pointer; basically make space at the
321 * front to copy data in (manually).
322 */
323 static inline void *
__skb_push(struct sk_buff * skb,size_t len)324 __skb_push(struct sk_buff *skb, size_t len)
325 {
326 SKB_TRACE(skb);
327 KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
328 "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
329 skb->len += len;
330 skb->data -= len;
331 return (skb->data);
332 }
333
334 static inline void *
skb_push(struct sk_buff * skb,size_t len)335 skb_push(struct sk_buff *skb, size_t len)
336 {
337
338 SKB_TRACE(skb);
339 return (__skb_push(skb, len));
340 }
341
342 /*
343 * Length of the data on the skb (without any frags)???
344 */
345 static inline size_t
skb_headlen(struct sk_buff * skb)346 skb_headlen(struct sk_buff *skb)
347 {
348
349 SKB_TRACE(skb);
350 return (skb->len - skb->data_len);
351 }
352
353
354 /* Return the end of data (tail pointer). */
355 static inline uint8_t *
skb_tail_pointer(struct sk_buff * skb)356 skb_tail_pointer(struct sk_buff *skb)
357 {
358
359 SKB_TRACE(skb);
360 return (skb->tail);
361 }
362
363 /* Return number of bytes available at end of buffer. */
364 static inline unsigned int
skb_tailroom(struct sk_buff * skb)365 skb_tailroom(struct sk_buff *skb)
366 {
367
368 SKB_TRACE(skb);
369 KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
370 "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
371 if (unlikely(skb_is_nonlinear(skb)))
372 return (0);
373 return (skb->end - skb->tail);
374 }
375
376 /* Return number of bytes available at the beginning of buffer. */
377 static inline unsigned int
skb_headroom(const struct sk_buff * skb)378 skb_headroom(const struct sk_buff *skb)
379 {
380 SKB_TRACE(skb);
381 KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
382 "data %p head %p\n", __func__, skb, skb->data, skb->head));
383 return (skb->data - skb->head);
384 }
385
386
387 /*
388 * Remove tailroom; return the old tail pointer; basically make space at
389 * the end to copy data in (manually). See also skb_put_data() below.
390 */
391 static inline void *
__skb_put(struct sk_buff * skb,size_t len)392 __skb_put(struct sk_buff *skb, size_t len)
393 {
394 void *s;
395
396 SKB_TRACE(skb);
397 KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
398 "len %zu) > end %p, head %p data %p len %u\n", __func__,
399 skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
400
401 s = skb_tail_pointer(skb);
402 if (len == 0)
403 return (s);
404 skb->tail += len;
405 skb->len += len;
406 #ifdef SKB_DEBUG
407 if (linuxkpi_debug_skb & DSKB_TRACEX)
408 printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
409 __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
410 s, len);
411 #endif
412 return (s);
413 }
414
415 static inline void *
skb_put(struct sk_buff * skb,size_t len)416 skb_put(struct sk_buff *skb, size_t len)
417 {
418
419 SKB_TRACE(skb);
420 return (__skb_put(skb, len));
421 }
422
423 /* skb_put() + copying data in. */
424 static inline void *
skb_put_data(struct sk_buff * skb,const void * buf,size_t len)425 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
426 {
427 void *s;
428
429 SKB_TRACE2(skb, buf);
430 s = skb_put(skb, len);
431 if (len == 0)
432 return (s);
433 memcpy(s, buf, len);
434 return (s);
435 }
436
437 /* skb_put() + filling with zeros. */
438 static inline void *
skb_put_zero(struct sk_buff * skb,size_t len)439 skb_put_zero(struct sk_buff *skb, size_t len)
440 {
441 void *s;
442
443 SKB_TRACE(skb);
444 s = skb_put(skb, len);
445 memset(s, '\0', len);
446 return (s);
447 }
448
449 /*
450 * Remove len bytes from beginning of data.
451 *
452 * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
453 * we return the advanced data pointer so we don't have to keep a temp, correct?
454 */
455 static inline void *
skb_pull(struct sk_buff * skb,size_t len)456 skb_pull(struct sk_buff *skb, size_t len)
457 {
458
459 SKB_TRACE(skb);
460 #if 0 /* Apparently this doesn't barf... */
461 KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
462 __func__, skb, skb->len, len, skb->data));
463 #endif
464 if (skb->len < len)
465 return (NULL);
466 skb->len -= len;
467 skb->data += len;
468 return (skb->data);
469 }
470
471 /* Reduce skb data to given length or do nothing if smaller already. */
472 static inline void
__skb_trim(struct sk_buff * skb,unsigned int len)473 __skb_trim(struct sk_buff *skb, unsigned int len)
474 {
475
476 SKB_TRACE(skb);
477 if (skb->len < len)
478 return;
479
480 skb->len = len;
481 skb->tail = skb->data + skb->len;
482 }
483
484 static inline void
skb_trim(struct sk_buff * skb,unsigned int len)485 skb_trim(struct sk_buff *skb, unsigned int len)
486 {
487
488 return (__skb_trim(skb, len));
489 }
490
491 static inline struct skb_shared_info *
skb_shinfo(struct sk_buff * skb)492 skb_shinfo(struct sk_buff *skb)
493 {
494
495 SKB_TRACE(skb);
496 return (skb->shinfo);
497 }
498
499 static inline void
skb_add_rx_frag(struct sk_buff * skb,int fragno,struct page * page,off_t offset,size_t size,unsigned int truesize)500 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
501 off_t offset, size_t size, unsigned int truesize)
502 {
503 struct skb_shared_info *shinfo;
504
505 SKB_TRACE(skb);
506 #ifdef SKB_DEBUG
507 if (linuxkpi_debug_skb & DSKB_TRACEX)
508 printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
509 "page %#jx offset %ju size %zu truesize %u\n", __func__,
510 skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
511 (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
512 size, truesize);
513 #endif
514
515 shinfo = skb_shinfo(skb);
516 KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
517 "fragno %d too big\n", __func__, skb, fragno));
518 shinfo->frags[fragno].page = page;
519 shinfo->frags[fragno].offset = offset;
520 shinfo->frags[fragno].size = size;
521 shinfo->nr_frags = fragno + 1;
522 skb->len += size;
523 skb->data_len += size;
524 skb->truesize += truesize;
525 }
526
527 /* -------------------------------------------------------------------------- */
528
529 #define skb_queue_walk(_q, skb) \
530 for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \
531 (skb) = (skb)->next)
532
533 #define skb_queue_walk_safe(_q, skb, tmp) \
534 for ((skb) = (_q)->next, (tmp) = (skb)->next; \
535 (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
536
537 #define skb_list_walk_safe(_q, skb, tmp) \
538 for ((skb) = (_q), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL; \
539 ((skb) != NULL); \
540 (skb) = (tmp), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL)
541
542 static inline bool
skb_queue_empty(const struct sk_buff_head * q)543 skb_queue_empty(const struct sk_buff_head *q)
544 {
545 SKB_TRACE(q);
546 return (q->next == (const struct sk_buff *)q);
547 }
548
549 static inline bool
skb_queue_empty_lockless(const struct sk_buff_head * q)550 skb_queue_empty_lockless(const struct sk_buff_head *q)
551 {
552 SKB_TRACE(q);
553 return (READ_ONCE(q->next) == (const struct sk_buff *)q);
554 }
555
556 static inline void
__skb_queue_head_init(struct sk_buff_head * q)557 __skb_queue_head_init(struct sk_buff_head *q)
558 {
559 SKB_TRACE(q);
560 q->prev = q->next = (struct sk_buff *)q;
561 q->qlen = 0;
562 }
563
564 static inline void
skb_queue_head_init(struct sk_buff_head * q)565 skb_queue_head_init(struct sk_buff_head *q)
566 {
567 SKB_TRACE(q);
568 __skb_queue_head_init(q);
569 spin_lock_init(&q->lock);
570 }
571
572 static inline void
__skb_insert(struct sk_buff * new,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * q)573 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
574 struct sk_buff_head *q)
575 {
576
577 SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
578 WRITE_ONCE(new->prev, prev);
579 WRITE_ONCE(new->next, next);
580 WRITE_ONCE(((struct sk_buff_head_l *)next)->prev, new);
581 WRITE_ONCE(((struct sk_buff_head_l *)prev)->next, new);
582 WRITE_ONCE(q->qlen, q->qlen + 1);
583 }
584
585 static inline void
__skb_queue_after(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)586 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
587 struct sk_buff *new)
588 {
589
590 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
591 __skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
592 }
593
594 static inline void
__skb_queue_before(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)595 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
596 struct sk_buff *new)
597 {
598
599 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
600 __skb_insert(new, skb->prev, skb, q);
601 }
602
603 static inline void
__skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)604 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
605 {
606
607 SKB_TRACE2(q, new);
608 __skb_queue_before(q, (struct sk_buff *)q, new);
609 }
610
611 static inline void
skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)612 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
613 {
614 unsigned long flags;
615
616 SKB_TRACE2(q, new);
617 spin_lock_irqsave(&q->lock, flags);
618 __skb_queue_tail(q, new);
619 spin_unlock_irqrestore(&q->lock, flags);
620 }
621
622 static inline struct sk_buff *
skb_peek(const struct sk_buff_head * q)623 skb_peek(const struct sk_buff_head *q)
624 {
625 struct sk_buff *skb;
626
627 skb = q->next;
628 SKB_TRACE2(q, skb);
629 if (skb == (const struct sk_buff *)q)
630 return (NULL);
631 return (skb);
632 }
633
634 static inline struct sk_buff *
skb_peek_tail(const struct sk_buff_head * q)635 skb_peek_tail(const struct sk_buff_head *q)
636 {
637 struct sk_buff *skb;
638
639 skb = READ_ONCE(q->prev);
640 SKB_TRACE2(q, skb);
641 if (skb == (const struct sk_buff *)q)
642 return (NULL);
643 return (skb);
644 }
645
646 static inline void
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)647 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
648 {
649 struct sk_buff *p, *n;
650
651 SKB_TRACE2(skb, q);
652
653 WRITE_ONCE(q->qlen, q->qlen - 1);
654 p = skb->prev;
655 n = skb->next;
656 WRITE_ONCE(n->prev, p);
657 WRITE_ONCE(p->next, n);
658 skb->prev = skb->next = NULL;
659 }
660
661 static inline void
skb_unlink(struct sk_buff * skb,struct sk_buff_head * q)662 skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
663 {
664 unsigned long flags;
665
666 SKB_TRACE2(skb, q);
667 spin_lock_irqsave(&q->lock, flags);
668 __skb_unlink(skb, q);
669 spin_unlock_irqrestore(&q->lock, flags);
670 }
671
672 static inline struct sk_buff *
__skb_dequeue(struct sk_buff_head * q)673 __skb_dequeue(struct sk_buff_head *q)
674 {
675 struct sk_buff *skb;
676
677 skb = skb_peek(q);
678 if (skb != NULL)
679 __skb_unlink(skb, q);
680 SKB_TRACE2(q, skb);
681 return (skb);
682 }
683
684 static inline struct sk_buff *
skb_dequeue(struct sk_buff_head * q)685 skb_dequeue(struct sk_buff_head *q)
686 {
687 unsigned long flags;
688 struct sk_buff *skb;
689
690 spin_lock_irqsave(&q->lock, flags);
691 skb = __skb_dequeue(q);
692 spin_unlock_irqrestore(&q->lock, flags);
693 SKB_TRACE2(q, skb);
694 return (skb);
695 }
696
697 static inline struct sk_buff *
__skb_dequeue_tail(struct sk_buff_head * q)698 __skb_dequeue_tail(struct sk_buff_head *q)
699 {
700 struct sk_buff *skb;
701
702 skb = skb_peek_tail(q);
703 if (skb != NULL)
704 __skb_unlink(skb, q);
705 SKB_TRACE2(q, skb);
706 return (skb);
707 }
708
709 static inline struct sk_buff *
skb_dequeue_tail(struct sk_buff_head * q)710 skb_dequeue_tail(struct sk_buff_head *q)
711 {
712 unsigned long flags;
713 struct sk_buff *skb;
714
715 spin_lock_irqsave(&q->lock, flags);
716 skb = __skb_dequeue_tail(q);
717 spin_unlock_irqrestore(&q->lock, flags);
718 SKB_TRACE2(q, skb);
719 return (skb);
720 }
721
722 static inline void
__skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)723 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
724 {
725
726 SKB_TRACE2(q, skb);
727 __skb_queue_after(q, (struct sk_buff *)q, skb);
728 }
729
730 static inline void
skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)731 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
732 {
733 unsigned long flags;
734
735 SKB_TRACE2(q, skb);
736 spin_lock_irqsave(&q->lock, flags);
737 __skb_queue_head(q, skb);
738 spin_unlock_irqrestore(&q->lock, flags);
739 }
740
741 static inline uint32_t
skb_queue_len(const struct sk_buff_head * q)742 skb_queue_len(const struct sk_buff_head *q)
743 {
744
745 SKB_TRACE(q);
746 return (q->qlen);
747 }
748
749 static inline uint32_t
skb_queue_len_lockless(const struct sk_buff_head * q)750 skb_queue_len_lockless(const struct sk_buff_head *q)
751 {
752
753 SKB_TRACE(q);
754 return (READ_ONCE(q->qlen));
755 }
756
757 static inline void
___skb_queue_splice(const struct sk_buff_head * from,struct sk_buff * p,struct sk_buff * n)758 ___skb_queue_splice(const struct sk_buff_head *from,
759 struct sk_buff *p, struct sk_buff *n)
760 {
761 struct sk_buff *b, *e;
762
763 b = from->next;
764 e = from->prev;
765
766 WRITE_ONCE(b->prev, p);
767 WRITE_ONCE(((struct sk_buff_head_l *)p)->next, b);
768 WRITE_ONCE(e->next, n);
769 WRITE_ONCE(((struct sk_buff_head_l *)n)->prev, e);
770 }
771
772 static inline void
skb_queue_splice(const struct sk_buff_head * from,struct sk_buff_head * to)773 skb_queue_splice(const struct sk_buff_head *from, struct sk_buff_head *to)
774 {
775
776 SKB_TRACE2(from, to);
777
778 if (skb_queue_empty(from))
779 return;
780
781 ___skb_queue_splice(from, (struct sk_buff *)to, to->next);
782 to->qlen += from->qlen;
783 }
784
785 static inline void
skb_queue_splice_init(struct sk_buff_head * from,struct sk_buff_head * to)786 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
787 {
788
789 skb_queue_splice(from, to);
790 __skb_queue_head_init(from);
791 }
792
793 static inline void
skb_queue_splice_tail_init(struct sk_buff_head * from,struct sk_buff_head * to)794 skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to)
795 {
796
797 SKB_TRACE2(from, to);
798
799 if (skb_queue_empty(from))
800 return;
801
802 ___skb_queue_splice(from, to->prev, (struct sk_buff *)to);
803 to->qlen += from->qlen;
804 __skb_queue_head_init(from);
805 }
806
807
808 static inline void
__skb_queue_purge(struct sk_buff_head * q)809 __skb_queue_purge(struct sk_buff_head *q)
810 {
811 struct sk_buff *skb;
812
813 SKB_TRACE(q);
814 while ((skb = __skb_dequeue(q)) != NULL)
815 kfree_skb(skb);
816 WARN_ONCE(skb_queue_len(q) != 0, "%s: queue %p not empty: %u",
817 __func__, q, skb_queue_len(q));
818 }
819
820 static inline void
skb_queue_purge(struct sk_buff_head * q)821 skb_queue_purge(struct sk_buff_head *q)
822 {
823 struct sk_buff_head _q;
824 unsigned long flags;
825
826 SKB_TRACE(q);
827
828 if (skb_queue_empty_lockless(q))
829 return;
830
831 __skb_queue_head_init(&_q);
832 spin_lock_irqsave(&q->lock, flags);
833 skb_queue_splice_init(q, &_q);
834 spin_unlock_irqrestore(&q->lock, flags);
835 __skb_queue_purge(&_q);
836 }
837
838 static inline struct sk_buff *
skb_queue_prev(struct sk_buff_head * q,struct sk_buff * skb)839 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
840 {
841
842 SKB_TRACE2(q, skb);
843 /* XXX what is the q argument good for? */
844 return (skb->prev);
845 }
846
847 /* -------------------------------------------------------------------------- */
848
849 static inline struct sk_buff *
skb_copy(const struct sk_buff * skb,gfp_t gfp)850 skb_copy(const struct sk_buff *skb, gfp_t gfp)
851 {
852 struct sk_buff *new;
853
854 new = linuxkpi_skb_copy(skb, gfp);
855 SKB_TRACE2(skb, new);
856 return (new);
857 }
858
859 static inline uint16_t
skb_checksum(struct sk_buff * skb,int offs,size_t len,int x)860 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
861 {
862 SKB_TRACE(skb);
863 SKB_TODO();
864 return (0xffff);
865 }
866
867 static inline int
skb_checksum_start_offset(struct sk_buff * skb)868 skb_checksum_start_offset(struct sk_buff *skb)
869 {
870 SKB_TRACE(skb);
871 SKB_TODO();
872 return (-1);
873 }
874
875 static inline dma_addr_t
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,int x,size_t fragsz,enum dma_data_direction dir)876 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
877 size_t fragsz, enum dma_data_direction dir)
878 {
879 SKB_TRACE2(frag, dev);
880 SKB_TODO();
881 return (-1);
882 }
883
884 static inline size_t
skb_frag_size(const skb_frag_t * frag)885 skb_frag_size(const skb_frag_t *frag)
886 {
887 SKB_TRACE(frag);
888 return (frag->size);
889 }
890
891 #define skb_walk_frags(_skb, _frag) \
892 for ((_frag) = (_skb); false; (_frag)++)
893
894 static inline void
skb_checksum_help(struct sk_buff * skb)895 skb_checksum_help(struct sk_buff *skb)
896 {
897 SKB_TRACE(skb);
898 SKB_TODO();
899 }
900
901 static inline bool
skb_ensure_writable(struct sk_buff * skb,size_t off)902 skb_ensure_writable(struct sk_buff *skb, size_t off)
903 {
904 SKB_TRACE(skb);
905 SKB_TODO();
906 return (false);
907 }
908
909 static inline void *
skb_frag_address(const skb_frag_t * frag)910 skb_frag_address(const skb_frag_t *frag)
911 {
912 SKB_TRACE(frag);
913 return (page_address(frag->page + frag->offset));
914 }
915
916 static inline void
skb_free_frag(void * frag)917 skb_free_frag(void *frag)
918 {
919
920 page_frag_free(frag);
921 }
922
923 static inline struct sk_buff *
skb_gso_segment(struct sk_buff * skb,netdev_features_t netdev_flags)924 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
925 {
926 SKB_TRACE(skb);
927 SKB_TODO();
928 return (NULL);
929 }
930
931 static inline bool
skb_is_gso(struct sk_buff * skb)932 skb_is_gso(struct sk_buff *skb)
933 {
934 SKB_TRACE(skb);
935 SKB_IMPROVE("Really a TODO but get it away from logging");
936 return (false);
937 }
938
939 static inline void
skb_mark_not_on_list(struct sk_buff * skb)940 skb_mark_not_on_list(struct sk_buff *skb)
941 {
942 SKB_TRACE(skb);
943 skb->next = NULL;
944 }
945
946 static inline void
skb_reset_transport_header(struct sk_buff * skb)947 skb_reset_transport_header(struct sk_buff *skb)
948 {
949
950 SKB_TRACE(skb);
951 skb->l4hdroff = skb->data - skb->head;
952 }
953
954 static inline uint8_t *
skb_transport_header(struct sk_buff * skb)955 skb_transport_header(struct sk_buff *skb)
956 {
957
958 SKB_TRACE(skb);
959 return (skb->head + skb->l4hdroff);
960 }
961
962 static inline uint8_t *
skb_network_header(struct sk_buff * skb)963 skb_network_header(struct sk_buff *skb)
964 {
965
966 SKB_TRACE(skb);
967 return (skb->head + skb->l3hdroff);
968 }
969
970 static inline int
__skb_linearize(struct sk_buff * skb)971 __skb_linearize(struct sk_buff *skb)
972 {
973 SKB_TRACE(skb);
974 SKB_TODO();
975 return (-ENXIO);
976 }
977
978 static inline int
skb_linearize(struct sk_buff * skb)979 skb_linearize(struct sk_buff *skb)
980 {
981 return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
982 }
983
984 static inline int
pskb_expand_head(struct sk_buff * skb,int x,int len,gfp_t gfp)985 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
986 {
987 SKB_TRACE(skb);
988 SKB_TODO();
989 return (-ENXIO);
990 }
991
992 /* Not really seen this one but need it as symmetric accessor function. */
993 static inline void
skb_set_queue_mapping(struct sk_buff * skb,uint16_t qmap)994 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
995 {
996
997 SKB_TRACE_FMT(skb, "qmap %u", qmap);
998 skb->qmap = qmap;
999 }
1000
1001 static inline uint16_t
skb_get_queue_mapping(struct sk_buff * skb)1002 skb_get_queue_mapping(struct sk_buff *skb)
1003 {
1004
1005 SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
1006 return (skb->qmap);
1007 }
1008
1009 static inline void
skb_copy_header(struct sk_buff * to,const struct sk_buff * from)1010 skb_copy_header(struct sk_buff *to, const struct sk_buff *from)
1011 {
1012 SKB_TRACE2(to, from);
1013 SKB_TODO();
1014 }
1015
1016 static inline bool
skb_header_cloned(struct sk_buff * skb)1017 skb_header_cloned(struct sk_buff *skb)
1018 {
1019 SKB_TRACE(skb);
1020 SKB_TODO();
1021 return (true);
1022 }
1023
1024 static inline uint8_t *
skb_mac_header(const struct sk_buff * skb)1025 skb_mac_header(const struct sk_buff *skb)
1026 {
1027 SKB_TRACE(skb);
1028 return (skb->head + skb->mac_header);
1029 }
1030
1031 static inline void
skb_reset_mac_header(struct sk_buff * skb)1032 skb_reset_mac_header(struct sk_buff *skb)
1033 {
1034 SKB_TRACE(skb);
1035 skb->mac_header = skb->data - skb->head;
1036 }
1037
1038 static inline void
skb_set_mac_header(struct sk_buff * skb,const size_t len)1039 skb_set_mac_header(struct sk_buff *skb, const size_t len)
1040 {
1041 SKB_TRACE(skb);
1042 skb_reset_mac_header(skb);
1043 skb->mac_header += len;
1044 }
1045
1046 static inline struct skb_shared_hwtstamps *
skb_hwtstamps(struct sk_buff * skb)1047 skb_hwtstamps(struct sk_buff *skb)
1048 {
1049 SKB_TRACE(skb);
1050 SKB_TODO();
1051 return (NULL);
1052 }
1053
1054 static inline void
skb_orphan(struct sk_buff * skb)1055 skb_orphan(struct sk_buff *skb)
1056 {
1057 SKB_TRACE(skb);
1058 SKB_TODO();
1059 }
1060
1061 static inline __wsum
csum_unfold(__sum16 sum)1062 csum_unfold(__sum16 sum)
1063 {
1064 return (sum);
1065 }
1066
1067 static __inline void
skb_postpush_rcsum(struct sk_buff * skb,const void * data,size_t len)1068 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
1069 {
1070 SKB_TODO();
1071 }
1072
1073 static inline void
skb_reset_tail_pointer(struct sk_buff * skb)1074 skb_reset_tail_pointer(struct sk_buff *skb)
1075 {
1076
1077 SKB_TRACE(skb);
1078 #ifdef SKB_DOING_OFFSETS_US_NOT
1079 skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
1080 #endif
1081 skb->tail = skb->data;
1082 SKB_TRACE(skb);
1083 }
1084
1085 static inline struct sk_buff *
skb_get(struct sk_buff * skb)1086 skb_get(struct sk_buff *skb)
1087 {
1088
1089 SKB_TRACE(skb);
1090 refcount_inc(&skb->refcnt);
1091 return (skb);
1092 }
1093
1094 static inline struct sk_buff *
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1095 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1096 {
1097
1098 SKB_TODO();
1099 return (NULL);
1100 }
1101
1102 static inline void
skb_copy_from_linear_data(const struct sk_buff * skb,void * dst,size_t len)1103 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1104 {
1105
1106 SKB_TRACE(skb);
1107 /* Let us just hope the destination has len space ... */
1108 memcpy(dst, skb->data, len);
1109 }
1110
1111 static inline int
skb_pad(struct sk_buff * skb,int pad)1112 skb_pad(struct sk_buff *skb, int pad)
1113 {
1114
1115 SKB_TRACE(skb);
1116 SKB_TODO();
1117 return (-1);
1118 }
1119
1120 static inline void
skb_list_del_init(struct sk_buff * skb)1121 skb_list_del_init(struct sk_buff *skb)
1122 {
1123
1124 SKB_TRACE(skb);
1125 __list_del_entry(&skb->list);
1126 skb_mark_not_on_list(skb);
1127 }
1128
1129 static inline void
napi_consume_skb(struct sk_buff * skb,int budget)1130 napi_consume_skb(struct sk_buff *skb, int budget)
1131 {
1132
1133 SKB_TRACE(skb);
1134 SKB_TODO();
1135 }
1136
1137 static inline struct sk_buff *
napi_build_skb(void * data,size_t len)1138 napi_build_skb(void *data, size_t len)
1139 {
1140
1141 SKB_TODO();
1142 return (NULL);
1143 }
1144
1145 static inline uint32_t
skb_get_hash(struct sk_buff * skb)1146 skb_get_hash(struct sk_buff *skb)
1147 {
1148 SKB_TRACE(skb);
1149 SKB_TODO();
1150 return (0);
1151 }
1152
1153 static inline void
skb_mark_for_recycle(struct sk_buff * skb)1154 skb_mark_for_recycle(struct sk_buff *skb)
1155 {
1156 SKB_TRACE(skb);
1157 /* page_pool */
1158 SKB_TODO();
1159 }
1160
1161 static inline int
skb_cow_head(struct sk_buff * skb,unsigned int headroom)1162 skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1163 {
1164 SKB_TRACE(skb);
1165 SKB_TODO();
1166 return (-1);
1167 }
1168
1169 /* Misplaced here really but sock comes from skbuff. */
1170 #define sk_pacing_shift_update(sock, n)
1171
1172 #define SKB_WITH_OVERHEAD(_s) \
1173 (_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1174
1175 #endif /* _LINUXKPI_LINUX_SKBUFF_H */
1176