1 /*- 2 * Copyright (c) 2020-2022 The FreeBSD Foundation 3 * Copyright (c) 2021-2022 Bjoern A. Zeeb 4 * 5 * This software was developed by Björn Zeeb under sponsorship from 6 * the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL. 34 * Do not rely on the internals of this implementation. They are highly 35 * likely to change as we will improve the integration to FreeBSD mbufs. 36 */ 37 38 #ifndef _LINUXKPI_LINUX_SKBUFF_H 39 #define _LINUXKPI_LINUX_SKBUFF_H 40 41 #include <linux/kernel.h> 42 #include <linux/page.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/netdev_features.h> 45 #include <linux/list.h> 46 #include <linux/gfp.h> 47 #include <linux/compiler.h> 48 #include <linux/spinlock.h> 49 50 /* #define SKB_DEBUG */ 51 #ifdef SKB_DEBUG 52 #define DSKB_TODO 0x01 53 #define DSKB_IMPROVE 0x02 54 #define DSKB_TRACE 0x10 55 #define DSKB_TRACEX 0x20 56 extern int linuxkpi_debug_skb; 57 58 #define SKB_TODO() \ 59 if (linuxkpi_debug_skb & DSKB_TODO) \ 60 printf("SKB_TODO %s:%d\n", __func__, __LINE__) 61 #define SKB_IMPROVE(...) \ 62 if (linuxkpi_debug_skb & DSKB_IMPROVE) \ 63 printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__) 64 #define SKB_TRACE(_s) \ 65 if (linuxkpi_debug_skb & DSKB_TRACE) \ 66 printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s) 67 #define SKB_TRACE2(_s, _p) \ 68 if (linuxkpi_debug_skb & DSKB_TRACE) \ 69 printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p) 70 #define SKB_TRACE_FMT(_s, _fmt, ...) \ 71 if (linuxkpi_debug_skb & DSKB_TRACE) \ 72 printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s, \ 73 __VA_ARGS__) 74 #else 75 #define SKB_TODO() do { } while(0) 76 #define SKB_IMPROVE(...) do { } while(0) 77 #define SKB_TRACE(_s) do { } while(0) 78 #define SKB_TRACE2(_s, _p) do { } while(0) 79 #define SKB_TRACE_FMT(_s, ...) do { } while(0) 80 #endif 81 82 enum sk_buff_pkt_type { 83 PACKET_BROADCAST, 84 PACKET_MULTICAST, 85 PACKET_OTHERHOST, 86 }; 87 88 #define NET_SKB_PAD max(CACHE_LINE_SIZE, 32) 89 90 struct sk_buff_head { 91 /* XXX TODO */ 92 struct sk_buff *next; 93 struct sk_buff *prev; 94 size_t qlen; 95 spinlock_t lock; 96 }; 97 98 enum sk_checksum_flags { 99 CHECKSUM_NONE = 0x00, 100 CHECKSUM_UNNECESSARY = 0x01, 101 CHECKSUM_PARTIAL = 0x02, 102 CHECKSUM_COMPLETE = 0x04, 103 }; 104 105 struct skb_frag { 106 /* XXX TODO */ 107 struct page *page; /* XXX-BZ These three are a wild guess so far! */ 108 off_t offset; 109 size_t size; 110 }; 111 typedef struct skb_frag skb_frag_t; 112 113 enum skb_shared_info_gso_type { 114 SKB_GSO_TCPV4, 115 SKB_GSO_TCPV6, 116 }; 117 118 struct skb_shared_info { 119 enum skb_shared_info_gso_type gso_type; 120 uint16_t gso_size; 121 uint16_t nr_frags; 122 struct sk_buff *frag_list; 123 skb_frag_t frags[64]; /* XXX TODO, 16xpage? */ 124 }; 125 126 struct sk_buff { 127 /* XXX TODO */ 128 union { 129 /* struct sk_buff_head */ 130 struct { 131 struct sk_buff *next; 132 struct sk_buff *prev; 133 }; 134 struct list_head list; 135 }; 136 uint32_t _alloc_len; /* Length of alloc data-buf. XXX-BZ give up for truesize? */ 137 uint32_t len; /* ? */ 138 uint32_t data_len; /* ? If we have frags? */ 139 uint32_t truesize; /* The total size of all buffers, incl. frags. */ 140 uint16_t mac_len; /* Link-layer header length. */ 141 __sum16 csum; 142 uint16_t l3hdroff; /* network header offset from *head */ 143 uint16_t l4hdroff; /* transport header offset from *head */ 144 uint32_t priority; 145 uint16_t qmap; /* queue mapping */ 146 uint16_t _spareu16_0; 147 enum sk_buff_pkt_type pkt_type; 148 149 /* "Scratch" area for layers to store metadata. */ 150 /* ??? I see sizeof() operations so probably an array. */ 151 uint8_t cb[64] __aligned(CACHE_LINE_SIZE); 152 153 struct net_device *dev; 154 void *sk; /* XXX net/sock.h? */ 155 156 int csum_offset, csum_start, ip_summed, protocol; 157 158 uint8_t *head; /* Head of buffer. */ 159 uint8_t *data; /* Head of data. */ 160 uint8_t *tail; /* End of data. */ 161 uint8_t *end; /* End of buffer. */ 162 163 struct skb_shared_info *shinfo; 164 165 /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */ 166 void *m; 167 void(*m_free_func)(void *); 168 169 /* Force padding to CACHE_LINE_SIZE. */ 170 uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE); 171 }; 172 173 /* -------------------------------------------------------------------------- */ 174 175 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t); 176 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t); 177 void linuxkpi_kfree_skb(struct sk_buff *); 178 179 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t); 180 181 /* -------------------------------------------------------------------------- */ 182 183 static inline struct sk_buff * 184 alloc_skb(size_t size, gfp_t gfp) 185 { 186 struct sk_buff *skb; 187 188 skb = linuxkpi_alloc_skb(size, gfp); 189 SKB_TRACE(skb); 190 return (skb); 191 } 192 193 static inline struct sk_buff * 194 __dev_alloc_skb(size_t len, gfp_t gfp) 195 { 196 struct sk_buff *skb; 197 198 skb = linuxkpi_dev_alloc_skb(len, gfp); 199 SKB_IMPROVE(); 200 SKB_TRACE(skb); 201 return (skb); 202 } 203 204 static inline struct sk_buff * 205 dev_alloc_skb(size_t len) 206 { 207 struct sk_buff *skb; 208 209 skb = __dev_alloc_skb(len, GFP_NOWAIT); 210 SKB_IMPROVE(); 211 SKB_TRACE(skb); 212 return (skb); 213 } 214 215 static inline void 216 kfree_skb(struct sk_buff *skb) 217 { 218 SKB_TRACE(skb); 219 linuxkpi_kfree_skb(skb); 220 } 221 222 static inline void 223 dev_kfree_skb(struct sk_buff *skb) 224 { 225 SKB_TRACE(skb); 226 kfree_skb(skb); 227 } 228 229 static inline void 230 dev_kfree_skb_any(struct sk_buff *skb) 231 { 232 SKB_TRACE(skb); 233 dev_kfree_skb(skb); 234 } 235 236 static inline void 237 dev_kfree_skb_irq(struct sk_buff *skb) 238 { 239 SKB_TRACE(skb); 240 SKB_IMPROVE("Do we have to defer this?"); 241 dev_kfree_skb(skb); 242 } 243 244 /* -------------------------------------------------------------------------- */ 245 246 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */ 247 #define skb_list_walk_safe(_q, skb, tmp) \ 248 for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp)) 249 250 /* Add headroom; cannot do once there is data in there. */ 251 static inline void 252 skb_reserve(struct sk_buff *skb, size_t len) 253 { 254 SKB_TRACE(skb); 255 #if 0 256 /* Apparently it is allowed to call skb_reserve multiple times in a row. */ 257 KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p " 258 "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail)); 259 #else 260 KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not " 261 "empty head %p data %p tail %p len %u\n", __func__, skb, 262 skb->head, skb->data, skb->tail, skb->len)); 263 #endif 264 skb->data += len; 265 skb->tail += len; 266 } 267 268 /* 269 * Remove headroom; return new data pointer; basically make space at the 270 * front to copy data in (manually). 271 */ 272 static inline void * 273 __skb_push(struct sk_buff *skb, size_t len) 274 { 275 SKB_TRACE(skb); 276 KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - " 277 "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data)); 278 skb->len += len; 279 skb->data -= len; 280 return (skb->data); 281 } 282 283 static inline void * 284 skb_push(struct sk_buff *skb, size_t len) 285 { 286 287 SKB_TRACE(skb); 288 return (__skb_push(skb, len)); 289 } 290 291 /* 292 * Length of the data on the skb (without any frags)??? 293 */ 294 static inline size_t 295 skb_headlen(struct sk_buff *skb) 296 { 297 298 SKB_TRACE(skb); 299 return (skb->len - skb->data_len); 300 } 301 302 303 /* Return the end of data (tail pointer). */ 304 static inline uint8_t * 305 skb_tail_pointer(struct sk_buff *skb) 306 { 307 308 SKB_TRACE(skb); 309 return (skb->tail); 310 } 311 312 /* Return number of bytes available at end of buffer. */ 313 static inline unsigned int 314 skb_tailroom(struct sk_buff *skb) 315 { 316 317 SKB_TRACE(skb); 318 KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, " 319 "end %p tail %p\n", __func__, skb, skb->end, skb->tail)); 320 return (skb->end - skb->tail); 321 } 322 323 /* Return numer of bytes available at the beginning of buffer. */ 324 static inline unsigned int 325 skb_headroom(struct sk_buff *skb) 326 { 327 SKB_TRACE(skb); 328 KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, " 329 "data %p head %p\n", __func__, skb, skb->data, skb->head)); 330 return (skb->data - skb->head); 331 } 332 333 334 /* 335 * Remove tailroom; return the old tail pointer; basically make space at 336 * the end to copy data in (manually). See also skb_put_data() below. 337 */ 338 static inline void * 339 __skb_put(struct sk_buff *skb, size_t len) 340 { 341 void *s; 342 343 SKB_TRACE(skb); 344 KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + " 345 "len %zu) > end %p, head %p data %p len %u\n", __func__, 346 skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len)); 347 348 s = skb_tail_pointer(skb); 349 if (len == 0) 350 return (s); 351 skb->tail += len; 352 skb->len += len; 353 #ifdef SKB_DEBUG 354 if (linuxkpi_debug_skb & DSKB_TRACEX) 355 printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n", 356 __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end, 357 s, len); 358 #endif 359 return (s); 360 } 361 362 static inline void * 363 skb_put(struct sk_buff *skb, size_t len) 364 { 365 366 SKB_TRACE(skb); 367 return (__skb_put(skb, len)); 368 } 369 370 /* skb_put() + copying data in. */ 371 static inline void * 372 skb_put_data(struct sk_buff *skb, const void *buf, size_t len) 373 { 374 void *s; 375 376 SKB_TRACE2(skb, buf); 377 s = skb_put(skb, len); 378 if (len == 0) 379 return (s); 380 memcpy(s, buf, len); 381 return (s); 382 } 383 384 /* skb_put() + filling with zeros. */ 385 static inline void * 386 skb_put_zero(struct sk_buff *skb, size_t len) 387 { 388 void *s; 389 390 SKB_TRACE(skb); 391 s = skb_put(skb, len); 392 memset(s, '\0', len); 393 return (s); 394 } 395 396 /* 397 * Remove len bytes from beginning of data. 398 * 399 * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic; 400 * we return the advanced data pointer so we don't have to keep a temp, correct? 401 */ 402 static inline void * 403 skb_pull(struct sk_buff *skb, size_t len) 404 { 405 406 SKB_TRACE(skb); 407 #if 0 /* Apparently this doesn't barf... */ 408 KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n", 409 __func__, skb, skb->len, len, skb->data)); 410 #endif 411 if (skb->len < len) 412 return (NULL); 413 skb->len -= len; 414 skb->data += len; 415 return (skb->data); 416 } 417 418 /* Reduce skb data to given length or do nothing if smaller already. */ 419 static inline void 420 __skb_trim(struct sk_buff *skb, unsigned int len) 421 { 422 423 SKB_TRACE(skb); 424 if (skb->len < len) 425 return; 426 427 skb->len = len; 428 skb->tail = skb->data + skb->len; 429 } 430 431 static inline void 432 skb_trim(struct sk_buff *skb, unsigned int len) 433 { 434 435 return (__skb_trim(skb, len)); 436 } 437 438 static inline struct skb_shared_info * 439 skb_shinfo(struct sk_buff *skb) 440 { 441 442 SKB_TRACE(skb); 443 return (skb->shinfo); 444 } 445 446 static inline void 447 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page, 448 off_t offset, size_t size, unsigned int truesize) 449 { 450 struct skb_shared_info *shinfo; 451 452 SKB_TRACE(skb); 453 #ifdef SKB_DEBUG 454 if (linuxkpi_debug_skb & DSKB_TRACEX) 455 printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d " 456 "page %#jx offset %ju size %zu truesize %u\n", __func__, 457 skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno, 458 (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset, 459 size, truesize); 460 #endif 461 462 shinfo = skb_shinfo(skb); 463 KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p " 464 "fragno %d too big\n", __func__, skb, fragno)); 465 shinfo->frags[fragno].page = page; 466 shinfo->frags[fragno].offset = offset; 467 shinfo->frags[fragno].size = size; 468 shinfo->nr_frags = fragno + 1; 469 skb->len += size; 470 skb->truesize += truesize; 471 472 /* XXX TODO EXTEND truesize? */ 473 } 474 475 /* -------------------------------------------------------------------------- */ 476 477 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */ 478 #define skb_queue_walk(_q, skb) \ 479 for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \ 480 (skb) = (skb)->next) 481 482 #define skb_queue_walk_safe(_q, skb, tmp) \ 483 for ((skb) = (_q)->next, (tmp) = (skb)->next; \ 484 (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next) 485 486 static inline bool 487 skb_queue_empty(struct sk_buff_head *q) 488 { 489 490 SKB_TRACE(q); 491 return (q->qlen == 0); 492 } 493 494 static inline void 495 __skb_queue_head_init(struct sk_buff_head *q) 496 { 497 SKB_TRACE(q); 498 q->prev = q->next = (struct sk_buff *)q; 499 q->qlen = 0; 500 } 501 502 static inline void 503 skb_queue_head_init(struct sk_buff_head *q) 504 { 505 SKB_TRACE(q); 506 return (__skb_queue_head_init(q)); 507 } 508 509 static inline void 510 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next, 511 struct sk_buff_head *q) 512 { 513 514 SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q); 515 new->prev = prev; 516 new->next = next; 517 next->prev = new; 518 prev->next = new; 519 q->qlen++; 520 } 521 522 static inline void 523 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb, 524 struct sk_buff *new) 525 { 526 527 SKB_TRACE_FMT(q, "skb %p new %p", skb, new); 528 __skb_insert(new, skb, skb->next, q); 529 } 530 531 static inline void 532 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb, 533 struct sk_buff *new) 534 { 535 536 SKB_TRACE_FMT(q, "skb %p new %p", skb, new); 537 __skb_insert(new, skb->prev, skb, q); 538 } 539 540 static inline void 541 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb) 542 { 543 struct sk_buff *s; 544 545 SKB_TRACE2(q, skb); 546 q->qlen++; 547 s = (struct sk_buff *)q; 548 s->prev->next = skb; 549 skb->prev = s->prev; 550 skb->next = s; 551 s->prev = skb; 552 } 553 554 static inline void 555 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb) 556 { 557 SKB_TRACE2(q, skb); 558 return (__skb_queue_tail(q, skb)); 559 } 560 561 static inline struct sk_buff * 562 skb_peek_tail(struct sk_buff_head *q) 563 { 564 struct sk_buff *skb; 565 566 skb = q->prev; 567 SKB_TRACE2(q, skb); 568 if (skb == (struct sk_buff *)q) 569 return (NULL); 570 return (skb); 571 } 572 573 static inline void 574 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) 575 { 576 SKB_TRACE2(skb, head); 577 struct sk_buff *p, *n;; 578 579 head->qlen--; 580 p = skb->prev; 581 n = skb->next; 582 p->next = n; 583 n->prev = p; 584 skb->prev = skb->next = NULL; 585 } 586 587 static inline void 588 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) 589 { 590 SKB_TRACE2(skb, head); 591 return (__skb_unlink(skb, head)); 592 } 593 594 static inline struct sk_buff * 595 __skb_dequeue(struct sk_buff_head *q) 596 { 597 struct sk_buff *skb; 598 599 SKB_TRACE(q); 600 skb = q->next; 601 if (skb == (struct sk_buff *)q) 602 return (NULL); 603 if (skb != NULL) 604 __skb_unlink(skb, q); 605 SKB_TRACE(skb); 606 return (skb); 607 } 608 609 static inline struct sk_buff * 610 skb_dequeue(struct sk_buff_head *q) 611 { 612 SKB_TRACE(q); 613 return (__skb_dequeue(q)); 614 } 615 616 static inline struct sk_buff * 617 skb_dequeue_tail(struct sk_buff_head *q) 618 { 619 struct sk_buff *skb; 620 621 skb = skb_peek_tail(q); 622 if (skb != NULL) 623 __skb_unlink(skb, q); 624 625 SKB_TRACE2(q, skb); 626 return (skb); 627 } 628 629 static inline void 630 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb) 631 { 632 633 SKB_TRACE2(q, skb); 634 __skb_queue_after(q, (struct sk_buff *)q, skb); 635 } 636 637 static inline void 638 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb) 639 { 640 641 SKB_TRACE2(q, skb); 642 __skb_queue_after(q, (struct sk_buff *)q, skb); 643 } 644 645 static inline uint32_t 646 skb_queue_len(struct sk_buff_head *head) 647 { 648 649 SKB_TRACE(head); 650 return (head->qlen); 651 } 652 653 static inline uint32_t 654 skb_queue_len_lockless(const struct sk_buff_head *head) 655 { 656 657 SKB_TRACE(head); 658 return (READ_ONCE(head->qlen)); 659 } 660 661 static inline void 662 __skb_queue_purge(struct sk_buff_head *q) 663 { 664 struct sk_buff *skb; 665 666 SKB_TRACE(q); 667 while ((skb = __skb_dequeue(q)) != NULL) 668 kfree_skb(skb); 669 } 670 671 static inline void 672 skb_queue_purge(struct sk_buff_head *q) 673 { 674 SKB_TRACE(q); 675 return (__skb_queue_purge(q)); 676 } 677 678 static inline struct sk_buff * 679 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb) 680 { 681 682 SKB_TRACE2(q, skb); 683 /* XXX what is the q argument good for? */ 684 return (skb->prev); 685 } 686 687 /* -------------------------------------------------------------------------- */ 688 689 static inline struct sk_buff * 690 skb_copy(struct sk_buff *skb, gfp_t gfp) 691 { 692 struct sk_buff *new; 693 694 new = linuxkpi_skb_copy(skb, gfp); 695 SKB_TRACE2(skb, new); 696 return (new); 697 } 698 699 static inline void 700 consume_skb(struct sk_buff *skb) 701 { 702 SKB_TRACE(skb); 703 SKB_TODO(); 704 } 705 706 static inline uint16_t 707 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x) 708 { 709 SKB_TRACE(skb); 710 SKB_TODO(); 711 return (0xffff); 712 } 713 714 static inline int 715 skb_checksum_start_offset(struct sk_buff *skb) 716 { 717 SKB_TRACE(skb); 718 SKB_TODO(); 719 return (-1); 720 } 721 722 static inline dma_addr_t 723 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x, 724 size_t fragsz, enum dma_data_direction dir) 725 { 726 SKB_TRACE2(frag, dev); 727 SKB_TODO(); 728 return (-1); 729 } 730 731 static inline size_t 732 skb_frag_size(const skb_frag_t *frag) 733 { 734 SKB_TRACE(frag); 735 SKB_TODO(); 736 return (-1); 737 } 738 739 static inline bool 740 skb_is_nonlinear(struct sk_buff *skb) 741 { 742 SKB_TRACE(skb); 743 return ((skb->data_len > 0) ? true : false); 744 } 745 746 #define skb_walk_frags(_skb, _frag) \ 747 for ((_frag) = (_skb); false; (_frag)++) 748 749 static inline void 750 skb_checksum_help(struct sk_buff *skb) 751 { 752 SKB_TRACE(skb); 753 SKB_TODO(); 754 } 755 756 static inline bool 757 skb_ensure_writable(struct sk_buff *skb, size_t off) 758 { 759 SKB_TRACE(skb); 760 SKB_TODO(); 761 return (false); 762 } 763 764 static inline void * 765 skb_frag_address(const skb_frag_t *frag) 766 { 767 SKB_TRACE(frag); 768 SKB_TODO(); 769 return (NULL); 770 } 771 772 static inline void 773 skb_free_frag(void *frag) 774 { 775 776 SKB_TODO(); 777 } 778 779 static inline struct sk_buff * 780 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags) 781 { 782 SKB_TRACE(skb); 783 SKB_TODO(); 784 return (NULL); 785 } 786 787 static inline bool 788 skb_is_gso(struct sk_buff *skb) 789 { 790 SKB_TRACE(skb); 791 SKB_IMPROVE("Really a TODO but get it away from logging"); 792 return (false); 793 } 794 795 static inline void 796 skb_mark_not_on_list(struct sk_buff *skb) 797 { 798 SKB_TRACE(skb); 799 SKB_TODO(); 800 } 801 802 static inline void 803 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to) 804 { 805 struct sk_buff *b, *e, *n; 806 807 SKB_TRACE2(from, to); 808 809 if (skb_queue_empty(from)) 810 return; 811 812 /* XXX do we need a barrier around this? */ 813 b = from->next; 814 e = from->prev; 815 n = to->next; 816 817 b->prev = (struct sk_buff *)to; 818 to->next = b; 819 e->next = n; 820 n->prev = e; 821 822 to->qlen += from->qlen; 823 __skb_queue_head_init(from); 824 } 825 826 static inline void 827 skb_reset_transport_header(struct sk_buff *skb) 828 { 829 830 SKB_TRACE(skb); 831 skb->l4hdroff = skb->data - skb->head; 832 } 833 834 static inline uint8_t * 835 skb_transport_header(struct sk_buff *skb) 836 { 837 838 SKB_TRACE(skb); 839 return (skb->head + skb->l4hdroff); 840 } 841 842 static inline uint8_t * 843 skb_network_header(struct sk_buff *skb) 844 { 845 846 SKB_TRACE(skb); 847 return (skb->head + skb->l3hdroff); 848 } 849 850 static inline int 851 __skb_linearize(struct sk_buff *skb) 852 { 853 SKB_TRACE(skb); 854 SKB_TODO(); 855 return (ENXIO); 856 } 857 858 static inline int 859 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp) 860 { 861 SKB_TRACE(skb); 862 SKB_TODO(); 863 return (-ENXIO); 864 } 865 866 /* Not really seen this one but need it as symmetric accessor function. */ 867 static inline void 868 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap) 869 { 870 871 SKB_TRACE_FMT(skb, "qmap %u", qmap); 872 skb->qmap = qmap; 873 } 874 875 static inline uint16_t 876 skb_get_queue_mapping(struct sk_buff *skb) 877 { 878 879 SKB_TRACE_FMT(skb, "qmap %u", skb->qmap); 880 return (skb->qmap); 881 } 882 883 static inline bool 884 skb_header_cloned(struct sk_buff *skb) 885 { 886 SKB_TRACE(skb); 887 SKB_TODO(); 888 return (false); 889 } 890 891 static inline uint8_t * 892 skb_mac_header(struct sk_buff *skb) 893 { 894 SKB_TRACE(skb); 895 SKB_TODO(); 896 return (NULL); 897 } 898 899 static inline void 900 skb_orphan(struct sk_buff *skb) 901 { 902 SKB_TRACE(skb); 903 SKB_TODO(); 904 } 905 906 static inline void 907 skb_reset_mac_header(struct sk_buff *skb) 908 { 909 SKB_TRACE(skb); 910 SKB_TODO(); 911 } 912 913 static inline struct sk_buff * 914 skb_peek(struct sk_buff_head *q) 915 { 916 SKB_TRACE(q); 917 SKB_TODO(); 918 return (NULL); 919 } 920 921 static inline __sum16 922 csum_unfold(__sum16 sum) 923 { 924 SKB_TODO(); 925 return (sum); 926 } 927 928 static __inline void 929 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len) 930 { 931 SKB_TODO(); 932 } 933 934 static inline void 935 skb_reset_tail_pointer(struct sk_buff *skb) 936 { 937 938 SKB_TRACE(skb); 939 skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head); 940 SKB_TRACE(skb); 941 } 942 943 static inline struct sk_buff * 944 skb_get(struct sk_buff *skb) 945 { 946 947 SKB_TODO(); /* XXX refcnt? as in get/put_device? */ 948 return (skb); 949 } 950 951 static inline struct sk_buff * 952 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 953 { 954 955 SKB_TODO(); 956 return (NULL); 957 } 958 959 static inline void 960 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len) 961 { 962 963 SKB_TRACE(skb); 964 /* Let us just hope the destination has len space ... */ 965 memcpy(dst, skb->data, len); 966 } 967 968 static inline struct sk_buff * 969 build_skb(void *data, unsigned int fragsz) 970 { 971 972 SKB_TODO(); 973 return (NULL); 974 } 975 976 static inline int 977 skb_pad(struct sk_buff *skb, int pad) 978 { 979 980 SKB_TRACE(skb); 981 SKB_TODO(); 982 return (-1); 983 } 984 985 static inline void 986 skb_list_del_init(struct sk_buff *skb) 987 { 988 989 SKB_TRACE(skb); 990 SKB_TODO(); 991 } 992 993 static inline void 994 napi_consume_skb(struct sk_buff *skb, int budget) 995 { 996 997 SKB_TRACE(skb); 998 SKB_TODO(); 999 } 1000 1001 static inline bool 1002 skb_linearize(struct sk_buff *skb) 1003 { 1004 1005 SKB_TRACE(skb); 1006 SKB_TODO(); 1007 return (false); 1008 } 1009 1010 #define SKB_WITH_OVERHEAD(_s) \ 1011 (_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE) 1012 1013 #endif /* _LINUXKPI_LINUX_SKBUFF_H */ 1014