1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/module.h> 42 #include <linux/types.h> 43 #include <linux/kernel.h> 44 #include <linux/kmemcheck.h> 45 #include <linux/mm.h> 46 #include <linux/interrupt.h> 47 #include <linux/in.h> 48 #include <linux/inet.h> 49 #include <linux/slab.h> 50 #include <linux/netdevice.h> 51 #ifdef CONFIG_NET_CLS_ACT 52 #include <net/pkt_sched.h> 53 #endif 54 #include <linux/string.h> 55 #include <linux/skbuff.h> 56 #include <linux/splice.h> 57 #include <linux/cache.h> 58 #include <linux/rtnetlink.h> 59 #include <linux/init.h> 60 #include <linux/scatterlist.h> 61 #include <linux/errqueue.h> 62 #include <linux/prefetch.h> 63 64 #include <net/protocol.h> 65 #include <net/dst.h> 66 #include <net/sock.h> 67 #include <net/checksum.h> 68 #include <net/xfrm.h> 69 70 #include <asm/uaccess.h> 71 #include <trace/events/skb.h> 72 #include <linux/highmem.h> 73 74 struct kmem_cache *skbuff_head_cache __read_mostly; 75 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 76 77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 78 struct pipe_buffer *buf) 79 { 80 put_page(buf->page); 81 } 82 83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 84 struct pipe_buffer *buf) 85 { 86 get_page(buf->page); 87 } 88 89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 90 struct pipe_buffer *buf) 91 { 92 return 1; 93 } 94 95 96 /* Pipe buffer operations for a socket. */ 97 static const struct pipe_buf_operations sock_pipe_buf_ops = { 98 .can_merge = 0, 99 .map = generic_pipe_buf_map, 100 .unmap = generic_pipe_buf_unmap, 101 .confirm = generic_pipe_buf_confirm, 102 .release = sock_pipe_buf_release, 103 .steal = sock_pipe_buf_steal, 104 .get = sock_pipe_buf_get, 105 }; 106 107 /* 108 * Keep out-of-line to prevent kernel bloat. 109 * __builtin_return_address is not used because it is not always 110 * reliable. 111 */ 112 113 /** 114 * skb_over_panic - private function 115 * @skb: buffer 116 * @sz: size 117 * @here: address 118 * 119 * Out of line support code for skb_put(). Not user callable. 120 */ 121 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 122 { 123 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 124 __func__, here, skb->len, sz, skb->head, skb->data, 125 (unsigned long)skb->tail, (unsigned long)skb->end, 126 skb->dev ? skb->dev->name : "<NULL>"); 127 BUG(); 128 } 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 142 __func__, here, skb->len, sz, skb->head, skb->data, 143 (unsigned long)skb->tail, (unsigned long)skb->end, 144 skb->dev ? skb->dev->name : "<NULL>"); 145 BUG(); 146 } 147 148 149 /* 150 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 151 * the caller if emergency pfmemalloc reserves are being used. If it is and 152 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 153 * may be used. Otherwise, the packet data may be discarded until enough 154 * memory is free 155 */ 156 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 157 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 158 void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, 159 bool *pfmemalloc) 160 { 161 void *obj; 162 bool ret_pfmemalloc = false; 163 164 /* 165 * Try a regular allocation, when that fails and we're not entitled 166 * to the reserves, fail. 167 */ 168 obj = kmalloc_node_track_caller(size, 169 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 170 node); 171 if (obj || !(gfp_pfmemalloc_allowed(flags))) 172 goto out; 173 174 /* Try again but now we are using pfmemalloc reserves */ 175 ret_pfmemalloc = true; 176 obj = kmalloc_node_track_caller(size, flags, node); 177 178 out: 179 if (pfmemalloc) 180 *pfmemalloc = ret_pfmemalloc; 181 182 return obj; 183 } 184 185 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 186 * 'private' fields and also do memory statistics to find all the 187 * [BEEP] leaks. 188 * 189 */ 190 191 /** 192 * __alloc_skb - allocate a network buffer 193 * @size: size to allocate 194 * @gfp_mask: allocation mask 195 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 196 * instead of head cache and allocate a cloned (child) skb. 197 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 198 * allocations in case the data is required for writeback 199 * @node: numa node to allocate memory on 200 * 201 * Allocate a new &sk_buff. The returned buffer has no headroom and a 202 * tail room of at least size bytes. The object has a reference count 203 * of one. The return is the buffer. On a failure the return is %NULL. 204 * 205 * Buffers may only be allocated from interrupts using a @gfp_mask of 206 * %GFP_ATOMIC. 207 */ 208 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 209 int flags, int node) 210 { 211 struct kmem_cache *cache; 212 struct skb_shared_info *shinfo; 213 struct sk_buff *skb; 214 u8 *data; 215 bool pfmemalloc; 216 217 cache = (flags & SKB_ALLOC_FCLONE) 218 ? skbuff_fclone_cache : skbuff_head_cache; 219 220 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 221 gfp_mask |= __GFP_MEMALLOC; 222 223 /* Get the HEAD */ 224 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 225 if (!skb) 226 goto out; 227 prefetchw(skb); 228 229 /* We do our best to align skb_shared_info on a separate cache 230 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 231 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 232 * Both skb->head and skb_shared_info are cache line aligned. 233 */ 234 size = SKB_DATA_ALIGN(size); 235 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 236 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 237 if (!data) 238 goto nodata; 239 /* kmalloc(size) might give us more room than requested. 240 * Put skb_shared_info exactly at the end of allocated zone, 241 * to allow max possible filling before reallocation. 242 */ 243 size = SKB_WITH_OVERHEAD(ksize(data)); 244 prefetchw(data + size); 245 246 /* 247 * Only clear those fields we need to clear, not those that we will 248 * actually initialise below. Hence, don't put any more fields after 249 * the tail pointer in struct sk_buff! 250 */ 251 memset(skb, 0, offsetof(struct sk_buff, tail)); 252 /* Account for allocated memory : skb + skb->head */ 253 skb->truesize = SKB_TRUESIZE(size); 254 skb->pfmemalloc = pfmemalloc; 255 atomic_set(&skb->users, 1); 256 skb->head = data; 257 skb->data = data; 258 skb_reset_tail_pointer(skb); 259 skb->end = skb->tail + size; 260 #ifdef NET_SKBUFF_DATA_USES_OFFSET 261 skb->mac_header = ~0U; 262 #endif 263 264 /* make sure we initialize shinfo sequentially */ 265 shinfo = skb_shinfo(skb); 266 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 267 atomic_set(&shinfo->dataref, 1); 268 kmemcheck_annotate_variable(shinfo->destructor_arg); 269 270 if (flags & SKB_ALLOC_FCLONE) { 271 struct sk_buff *child = skb + 1; 272 atomic_t *fclone_ref = (atomic_t *) (child + 1); 273 274 kmemcheck_annotate_bitfield(child, flags1); 275 kmemcheck_annotate_bitfield(child, flags2); 276 skb->fclone = SKB_FCLONE_ORIG; 277 atomic_set(fclone_ref, 1); 278 279 child->fclone = SKB_FCLONE_UNAVAILABLE; 280 child->pfmemalloc = pfmemalloc; 281 } 282 out: 283 return skb; 284 nodata: 285 kmem_cache_free(cache, skb); 286 skb = NULL; 287 goto out; 288 } 289 EXPORT_SYMBOL(__alloc_skb); 290 291 /** 292 * build_skb - build a network buffer 293 * @data: data buffer provided by caller 294 * @frag_size: size of fragment, or 0 if head was kmalloced 295 * 296 * Allocate a new &sk_buff. Caller provides space holding head and 297 * skb_shared_info. @data must have been allocated by kmalloc() 298 * The return is the new skb buffer. 299 * On a failure the return is %NULL, and @data is not freed. 300 * Notes : 301 * Before IO, driver allocates only data buffer where NIC put incoming frame 302 * Driver should add room at head (NET_SKB_PAD) and 303 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 304 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 305 * before giving packet to stack. 306 * RX rings only contains data buffers, not full skbs. 307 */ 308 struct sk_buff *build_skb(void *data, unsigned int frag_size) 309 { 310 struct skb_shared_info *shinfo; 311 struct sk_buff *skb; 312 unsigned int size = frag_size ? : ksize(data); 313 314 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 315 if (!skb) 316 return NULL; 317 318 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 319 320 memset(skb, 0, offsetof(struct sk_buff, tail)); 321 skb->truesize = SKB_TRUESIZE(size); 322 skb->head_frag = frag_size != 0; 323 atomic_set(&skb->users, 1); 324 skb->head = data; 325 skb->data = data; 326 skb_reset_tail_pointer(skb); 327 skb->end = skb->tail + size; 328 #ifdef NET_SKBUFF_DATA_USES_OFFSET 329 skb->mac_header = ~0U; 330 #endif 331 332 /* make sure we initialize shinfo sequentially */ 333 shinfo = skb_shinfo(skb); 334 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 335 atomic_set(&shinfo->dataref, 1); 336 kmemcheck_annotate_variable(shinfo->destructor_arg); 337 338 return skb; 339 } 340 EXPORT_SYMBOL(build_skb); 341 342 struct netdev_alloc_cache { 343 struct page_frag frag; 344 /* we maintain a pagecount bias, so that we dont dirty cache line 345 * containing page->_count every time we allocate a fragment. 346 */ 347 unsigned int pagecnt_bias; 348 }; 349 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 350 351 #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) 352 #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) 353 #define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE 354 355 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 356 { 357 struct netdev_alloc_cache *nc; 358 void *data = NULL; 359 int order; 360 unsigned long flags; 361 362 local_irq_save(flags); 363 nc = &__get_cpu_var(netdev_alloc_cache); 364 if (unlikely(!nc->frag.page)) { 365 refill: 366 for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { 367 gfp_t gfp = gfp_mask; 368 369 if (order) 370 gfp |= __GFP_COMP | __GFP_NOWARN; 371 nc->frag.page = alloc_pages(gfp, order); 372 if (likely(nc->frag.page)) 373 break; 374 if (--order < 0) 375 goto end; 376 } 377 nc->frag.size = PAGE_SIZE << order; 378 recycle: 379 atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); 380 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; 381 nc->frag.offset = 0; 382 } 383 384 if (nc->frag.offset + fragsz > nc->frag.size) { 385 /* avoid unnecessary locked operations if possible */ 386 if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || 387 atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) 388 goto recycle; 389 goto refill; 390 } 391 392 data = page_address(nc->frag.page) + nc->frag.offset; 393 nc->frag.offset += fragsz; 394 nc->pagecnt_bias--; 395 end: 396 local_irq_restore(flags); 397 return data; 398 } 399 400 /** 401 * netdev_alloc_frag - allocate a page fragment 402 * @fragsz: fragment size 403 * 404 * Allocates a frag from a page for receive buffer. 405 * Uses GFP_ATOMIC allocations. 406 */ 407 void *netdev_alloc_frag(unsigned int fragsz) 408 { 409 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 410 } 411 EXPORT_SYMBOL(netdev_alloc_frag); 412 413 /** 414 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 415 * @dev: network device to receive on 416 * @length: length to allocate 417 * @gfp_mask: get_free_pages mask, passed to alloc_skb 418 * 419 * Allocate a new &sk_buff and assign it a usage count of one. The 420 * buffer has unspecified headroom built in. Users should allocate 421 * the headroom they think they need without accounting for the 422 * built in space. The built in space is used for optimisations. 423 * 424 * %NULL is returned if there is no free memory. 425 */ 426 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 427 unsigned int length, gfp_t gfp_mask) 428 { 429 struct sk_buff *skb = NULL; 430 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 431 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 432 433 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 434 void *data; 435 436 if (sk_memalloc_socks()) 437 gfp_mask |= __GFP_MEMALLOC; 438 439 data = __netdev_alloc_frag(fragsz, gfp_mask); 440 441 if (likely(data)) { 442 skb = build_skb(data, fragsz); 443 if (unlikely(!skb)) 444 put_page(virt_to_head_page(data)); 445 } 446 } else { 447 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 448 SKB_ALLOC_RX, NUMA_NO_NODE); 449 } 450 if (likely(skb)) { 451 skb_reserve(skb, NET_SKB_PAD); 452 skb->dev = dev; 453 } 454 return skb; 455 } 456 EXPORT_SYMBOL(__netdev_alloc_skb); 457 458 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 459 int size, unsigned int truesize) 460 { 461 skb_fill_page_desc(skb, i, page, off, size); 462 skb->len += size; 463 skb->data_len += size; 464 skb->truesize += truesize; 465 } 466 EXPORT_SYMBOL(skb_add_rx_frag); 467 468 static void skb_drop_list(struct sk_buff **listp) 469 { 470 struct sk_buff *list = *listp; 471 472 *listp = NULL; 473 474 do { 475 struct sk_buff *this = list; 476 list = list->next; 477 kfree_skb(this); 478 } while (list); 479 } 480 481 static inline void skb_drop_fraglist(struct sk_buff *skb) 482 { 483 skb_drop_list(&skb_shinfo(skb)->frag_list); 484 } 485 486 static void skb_clone_fraglist(struct sk_buff *skb) 487 { 488 struct sk_buff *list; 489 490 skb_walk_frags(skb, list) 491 skb_get(list); 492 } 493 494 static void skb_free_head(struct sk_buff *skb) 495 { 496 if (skb->head_frag) 497 put_page(virt_to_head_page(skb->head)); 498 else 499 kfree(skb->head); 500 } 501 502 static void skb_release_data(struct sk_buff *skb) 503 { 504 if (!skb->cloned || 505 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 506 &skb_shinfo(skb)->dataref)) { 507 if (skb_shinfo(skb)->nr_frags) { 508 int i; 509 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 510 skb_frag_unref(skb, i); 511 } 512 513 /* 514 * If skb buf is from userspace, we need to notify the caller 515 * the lower device DMA has done; 516 */ 517 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 518 struct ubuf_info *uarg; 519 520 uarg = skb_shinfo(skb)->destructor_arg; 521 if (uarg->callback) 522 uarg->callback(uarg); 523 } 524 525 if (skb_has_frag_list(skb)) 526 skb_drop_fraglist(skb); 527 528 skb_free_head(skb); 529 } 530 } 531 532 /* 533 * Free an skbuff by memory without cleaning the state. 534 */ 535 static void kfree_skbmem(struct sk_buff *skb) 536 { 537 struct sk_buff *other; 538 atomic_t *fclone_ref; 539 540 switch (skb->fclone) { 541 case SKB_FCLONE_UNAVAILABLE: 542 kmem_cache_free(skbuff_head_cache, skb); 543 break; 544 545 case SKB_FCLONE_ORIG: 546 fclone_ref = (atomic_t *) (skb + 2); 547 if (atomic_dec_and_test(fclone_ref)) 548 kmem_cache_free(skbuff_fclone_cache, skb); 549 break; 550 551 case SKB_FCLONE_CLONE: 552 fclone_ref = (atomic_t *) (skb + 1); 553 other = skb - 1; 554 555 /* The clone portion is available for 556 * fast-cloning again. 557 */ 558 skb->fclone = SKB_FCLONE_UNAVAILABLE; 559 560 if (atomic_dec_and_test(fclone_ref)) 561 kmem_cache_free(skbuff_fclone_cache, other); 562 break; 563 } 564 } 565 566 static void skb_release_head_state(struct sk_buff *skb) 567 { 568 skb_dst_drop(skb); 569 #ifdef CONFIG_XFRM 570 secpath_put(skb->sp); 571 #endif 572 if (skb->destructor) { 573 WARN_ON(in_irq()); 574 skb->destructor(skb); 575 } 576 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 577 nf_conntrack_put(skb->nfct); 578 #endif 579 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 580 nf_conntrack_put_reasm(skb->nfct_reasm); 581 #endif 582 #ifdef CONFIG_BRIDGE_NETFILTER 583 nf_bridge_put(skb->nf_bridge); 584 #endif 585 /* XXX: IS this still necessary? - JHS */ 586 #ifdef CONFIG_NET_SCHED 587 skb->tc_index = 0; 588 #ifdef CONFIG_NET_CLS_ACT 589 skb->tc_verd = 0; 590 #endif 591 #endif 592 } 593 594 /* Free everything but the sk_buff shell. */ 595 static void skb_release_all(struct sk_buff *skb) 596 { 597 skb_release_head_state(skb); 598 skb_release_data(skb); 599 } 600 601 /** 602 * __kfree_skb - private function 603 * @skb: buffer 604 * 605 * Free an sk_buff. Release anything attached to the buffer. 606 * Clean the state. This is an internal helper function. Users should 607 * always call kfree_skb 608 */ 609 610 void __kfree_skb(struct sk_buff *skb) 611 { 612 skb_release_all(skb); 613 kfree_skbmem(skb); 614 } 615 EXPORT_SYMBOL(__kfree_skb); 616 617 /** 618 * kfree_skb - free an sk_buff 619 * @skb: buffer to free 620 * 621 * Drop a reference to the buffer and free it if the usage count has 622 * hit zero. 623 */ 624 void kfree_skb(struct sk_buff *skb) 625 { 626 if (unlikely(!skb)) 627 return; 628 if (likely(atomic_read(&skb->users) == 1)) 629 smp_rmb(); 630 else if (likely(!atomic_dec_and_test(&skb->users))) 631 return; 632 trace_kfree_skb(skb, __builtin_return_address(0)); 633 __kfree_skb(skb); 634 } 635 EXPORT_SYMBOL(kfree_skb); 636 637 /** 638 * consume_skb - free an skbuff 639 * @skb: buffer to free 640 * 641 * Drop a ref to the buffer and free it if the usage count has hit zero 642 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 643 * is being dropped after a failure and notes that 644 */ 645 void consume_skb(struct sk_buff *skb) 646 { 647 if (unlikely(!skb)) 648 return; 649 if (likely(atomic_read(&skb->users) == 1)) 650 smp_rmb(); 651 else if (likely(!atomic_dec_and_test(&skb->users))) 652 return; 653 trace_consume_skb(skb); 654 __kfree_skb(skb); 655 } 656 EXPORT_SYMBOL(consume_skb); 657 658 /** 659 * skb_recycle - clean up an skb for reuse 660 * @skb: buffer 661 * 662 * Recycles the skb to be reused as a receive buffer. This 663 * function does any necessary reference count dropping, and 664 * cleans up the skbuff as if it just came from __alloc_skb(). 665 */ 666 void skb_recycle(struct sk_buff *skb) 667 { 668 struct skb_shared_info *shinfo; 669 670 skb_release_head_state(skb); 671 672 shinfo = skb_shinfo(skb); 673 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 674 atomic_set(&shinfo->dataref, 1); 675 676 memset(skb, 0, offsetof(struct sk_buff, tail)); 677 skb->data = skb->head + NET_SKB_PAD; 678 skb_reset_tail_pointer(skb); 679 } 680 EXPORT_SYMBOL(skb_recycle); 681 682 /** 683 * skb_recycle_check - check if skb can be reused for receive 684 * @skb: buffer 685 * @skb_size: minimum receive buffer size 686 * 687 * Checks that the skb passed in is not shared or cloned, and 688 * that it is linear and its head portion at least as large as 689 * skb_size so that it can be recycled as a receive buffer. 690 * If these conditions are met, this function does any necessary 691 * reference count dropping and cleans up the skbuff as if it 692 * just came from __alloc_skb(). 693 */ 694 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 695 { 696 if (!skb_is_recycleable(skb, skb_size)) 697 return false; 698 699 skb_recycle(skb); 700 701 return true; 702 } 703 EXPORT_SYMBOL(skb_recycle_check); 704 705 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 706 { 707 new->tstamp = old->tstamp; 708 new->dev = old->dev; 709 new->transport_header = old->transport_header; 710 new->network_header = old->network_header; 711 new->mac_header = old->mac_header; 712 skb_dst_copy(new, old); 713 new->rxhash = old->rxhash; 714 new->ooo_okay = old->ooo_okay; 715 new->l4_rxhash = old->l4_rxhash; 716 new->no_fcs = old->no_fcs; 717 #ifdef CONFIG_XFRM 718 new->sp = secpath_get(old->sp); 719 #endif 720 memcpy(new->cb, old->cb, sizeof(old->cb)); 721 new->csum = old->csum; 722 new->local_df = old->local_df; 723 new->pkt_type = old->pkt_type; 724 new->ip_summed = old->ip_summed; 725 skb_copy_queue_mapping(new, old); 726 new->priority = old->priority; 727 #if IS_ENABLED(CONFIG_IP_VS) 728 new->ipvs_property = old->ipvs_property; 729 #endif 730 new->pfmemalloc = old->pfmemalloc; 731 new->protocol = old->protocol; 732 new->mark = old->mark; 733 new->skb_iif = old->skb_iif; 734 __nf_copy(new, old); 735 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 736 new->nf_trace = old->nf_trace; 737 #endif 738 #ifdef CONFIG_NET_SCHED 739 new->tc_index = old->tc_index; 740 #ifdef CONFIG_NET_CLS_ACT 741 new->tc_verd = old->tc_verd; 742 #endif 743 #endif 744 new->vlan_tci = old->vlan_tci; 745 746 skb_copy_secmark(new, old); 747 } 748 749 /* 750 * You should not add any new code to this function. Add it to 751 * __copy_skb_header above instead. 752 */ 753 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 754 { 755 #define C(x) n->x = skb->x 756 757 n->next = n->prev = NULL; 758 n->sk = NULL; 759 __copy_skb_header(n, skb); 760 761 C(len); 762 C(data_len); 763 C(mac_len); 764 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 765 n->cloned = 1; 766 n->nohdr = 0; 767 n->destructor = NULL; 768 C(tail); 769 C(end); 770 C(head); 771 C(head_frag); 772 C(data); 773 C(truesize); 774 atomic_set(&n->users, 1); 775 776 atomic_inc(&(skb_shinfo(skb)->dataref)); 777 skb->cloned = 1; 778 779 return n; 780 #undef C 781 } 782 783 /** 784 * skb_morph - morph one skb into another 785 * @dst: the skb to receive the contents 786 * @src: the skb to supply the contents 787 * 788 * This is identical to skb_clone except that the target skb is 789 * supplied by the user. 790 * 791 * The target skb is returned upon exit. 792 */ 793 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 794 { 795 skb_release_all(dst); 796 return __skb_clone(dst, src); 797 } 798 EXPORT_SYMBOL_GPL(skb_morph); 799 800 /** 801 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 802 * @skb: the skb to modify 803 * @gfp_mask: allocation priority 804 * 805 * This must be called on SKBTX_DEV_ZEROCOPY skb. 806 * It will copy all frags into kernel and drop the reference 807 * to userspace pages. 808 * 809 * If this function is called from an interrupt gfp_mask() must be 810 * %GFP_ATOMIC. 811 * 812 * Returns 0 on success or a negative error code on failure 813 * to allocate kernel memory to copy to. 814 */ 815 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 816 { 817 int i; 818 int num_frags = skb_shinfo(skb)->nr_frags; 819 struct page *page, *head = NULL; 820 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 821 822 for (i = 0; i < num_frags; i++) { 823 u8 *vaddr; 824 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 825 826 page = alloc_page(gfp_mask); 827 if (!page) { 828 while (head) { 829 struct page *next = (struct page *)head->private; 830 put_page(head); 831 head = next; 832 } 833 return -ENOMEM; 834 } 835 vaddr = kmap_atomic(skb_frag_page(f)); 836 memcpy(page_address(page), 837 vaddr + f->page_offset, skb_frag_size(f)); 838 kunmap_atomic(vaddr); 839 page->private = (unsigned long)head; 840 head = page; 841 } 842 843 /* skb frags release userspace buffers */ 844 for (i = 0; i < num_frags; i++) 845 skb_frag_unref(skb, i); 846 847 uarg->callback(uarg); 848 849 /* skb frags point to kernel buffers */ 850 for (i = num_frags - 1; i >= 0; i--) { 851 __skb_fill_page_desc(skb, i, head, 0, 852 skb_shinfo(skb)->frags[i].size); 853 head = (struct page *)head->private; 854 } 855 856 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 857 return 0; 858 } 859 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 860 861 /** 862 * skb_clone - duplicate an sk_buff 863 * @skb: buffer to clone 864 * @gfp_mask: allocation priority 865 * 866 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 867 * copies share the same packet data but not structure. The new 868 * buffer has a reference count of 1. If the allocation fails the 869 * function returns %NULL otherwise the new buffer is returned. 870 * 871 * If this function is called from an interrupt gfp_mask() must be 872 * %GFP_ATOMIC. 873 */ 874 875 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 876 { 877 struct sk_buff *n; 878 879 if (skb_orphan_frags(skb, gfp_mask)) 880 return NULL; 881 882 n = skb + 1; 883 if (skb->fclone == SKB_FCLONE_ORIG && 884 n->fclone == SKB_FCLONE_UNAVAILABLE) { 885 atomic_t *fclone_ref = (atomic_t *) (n + 1); 886 n->fclone = SKB_FCLONE_CLONE; 887 atomic_inc(fclone_ref); 888 } else { 889 if (skb_pfmemalloc(skb)) 890 gfp_mask |= __GFP_MEMALLOC; 891 892 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 893 if (!n) 894 return NULL; 895 896 kmemcheck_annotate_bitfield(n, flags1); 897 kmemcheck_annotate_bitfield(n, flags2); 898 n->fclone = SKB_FCLONE_UNAVAILABLE; 899 } 900 901 return __skb_clone(n, skb); 902 } 903 EXPORT_SYMBOL(skb_clone); 904 905 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 906 { 907 #ifndef NET_SKBUFF_DATA_USES_OFFSET 908 /* 909 * Shift between the two data areas in bytes 910 */ 911 unsigned long offset = new->data - old->data; 912 #endif 913 914 __copy_skb_header(new, old); 915 916 #ifndef NET_SKBUFF_DATA_USES_OFFSET 917 /* {transport,network,mac}_header are relative to skb->head */ 918 new->transport_header += offset; 919 new->network_header += offset; 920 if (skb_mac_header_was_set(new)) 921 new->mac_header += offset; 922 #endif 923 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 924 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 925 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 926 } 927 928 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 929 { 930 if (skb_pfmemalloc(skb)) 931 return SKB_ALLOC_RX; 932 return 0; 933 } 934 935 /** 936 * skb_copy - create private copy of an sk_buff 937 * @skb: buffer to copy 938 * @gfp_mask: allocation priority 939 * 940 * Make a copy of both an &sk_buff and its data. This is used when the 941 * caller wishes to modify the data and needs a private copy of the 942 * data to alter. Returns %NULL on failure or the pointer to the buffer 943 * on success. The returned buffer has a reference count of 1. 944 * 945 * As by-product this function converts non-linear &sk_buff to linear 946 * one, so that &sk_buff becomes completely private and caller is allowed 947 * to modify all the data of returned buffer. This means that this 948 * function is not recommended for use in circumstances when only 949 * header is going to be modified. Use pskb_copy() instead. 950 */ 951 952 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 953 { 954 int headerlen = skb_headroom(skb); 955 unsigned int size = skb_end_offset(skb) + skb->data_len; 956 struct sk_buff *n = __alloc_skb(size, gfp_mask, 957 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 958 959 if (!n) 960 return NULL; 961 962 /* Set the data pointer */ 963 skb_reserve(n, headerlen); 964 /* Set the tail pointer and length */ 965 skb_put(n, skb->len); 966 967 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 968 BUG(); 969 970 copy_skb_header(n, skb); 971 return n; 972 } 973 EXPORT_SYMBOL(skb_copy); 974 975 /** 976 * __pskb_copy - create copy of an sk_buff with private head. 977 * @skb: buffer to copy 978 * @headroom: headroom of new skb 979 * @gfp_mask: allocation priority 980 * 981 * Make a copy of both an &sk_buff and part of its data, located 982 * in header. Fragmented data remain shared. This is used when 983 * the caller wishes to modify only header of &sk_buff and needs 984 * private copy of the header to alter. Returns %NULL on failure 985 * or the pointer to the buffer on success. 986 * The returned buffer has a reference count of 1. 987 */ 988 989 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 990 { 991 unsigned int size = skb_headlen(skb) + headroom; 992 struct sk_buff *n = __alloc_skb(size, gfp_mask, 993 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 994 995 if (!n) 996 goto out; 997 998 /* Set the data pointer */ 999 skb_reserve(n, headroom); 1000 /* Set the tail pointer and length */ 1001 skb_put(n, skb_headlen(skb)); 1002 /* Copy the bytes */ 1003 skb_copy_from_linear_data(skb, n->data, n->len); 1004 1005 n->truesize += skb->data_len; 1006 n->data_len = skb->data_len; 1007 n->len = skb->len; 1008 1009 if (skb_shinfo(skb)->nr_frags) { 1010 int i; 1011 1012 if (skb_orphan_frags(skb, gfp_mask)) { 1013 kfree_skb(n); 1014 n = NULL; 1015 goto out; 1016 } 1017 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1018 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1019 skb_frag_ref(skb, i); 1020 } 1021 skb_shinfo(n)->nr_frags = i; 1022 } 1023 1024 if (skb_has_frag_list(skb)) { 1025 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1026 skb_clone_fraglist(n); 1027 } 1028 1029 copy_skb_header(n, skb); 1030 out: 1031 return n; 1032 } 1033 EXPORT_SYMBOL(__pskb_copy); 1034 1035 /** 1036 * pskb_expand_head - reallocate header of &sk_buff 1037 * @skb: buffer to reallocate 1038 * @nhead: room to add at head 1039 * @ntail: room to add at tail 1040 * @gfp_mask: allocation priority 1041 * 1042 * Expands (or creates identical copy, if &nhead and &ntail are zero) 1043 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 1044 * reference count of 1. Returns zero in the case of success or error, 1045 * if expansion failed. In the last case, &sk_buff is not changed. 1046 * 1047 * All the pointers pointing into skb header may change and must be 1048 * reloaded after call to this function. 1049 */ 1050 1051 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1052 gfp_t gfp_mask) 1053 { 1054 int i; 1055 u8 *data; 1056 int size = nhead + skb_end_offset(skb) + ntail; 1057 long off; 1058 1059 BUG_ON(nhead < 0); 1060 1061 if (skb_shared(skb)) 1062 BUG(); 1063 1064 size = SKB_DATA_ALIGN(size); 1065 1066 if (skb_pfmemalloc(skb)) 1067 gfp_mask |= __GFP_MEMALLOC; 1068 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1069 gfp_mask, NUMA_NO_NODE, NULL); 1070 if (!data) 1071 goto nodata; 1072 size = SKB_WITH_OVERHEAD(ksize(data)); 1073 1074 /* Copy only real data... and, alas, header. This should be 1075 * optimized for the cases when header is void. 1076 */ 1077 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1078 1079 memcpy((struct skb_shared_info *)(data + size), 1080 skb_shinfo(skb), 1081 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1082 1083 /* 1084 * if shinfo is shared we must drop the old head gracefully, but if it 1085 * is not we can just drop the old head and let the existing refcount 1086 * be since all we did is relocate the values 1087 */ 1088 if (skb_cloned(skb)) { 1089 /* copy this zero copy skb frags */ 1090 if (skb_orphan_frags(skb, gfp_mask)) 1091 goto nofrags; 1092 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1093 skb_frag_ref(skb, i); 1094 1095 if (skb_has_frag_list(skb)) 1096 skb_clone_fraglist(skb); 1097 1098 skb_release_data(skb); 1099 } else { 1100 skb_free_head(skb); 1101 } 1102 off = (data + nhead) - skb->head; 1103 1104 skb->head = data; 1105 skb->head_frag = 0; 1106 skb->data += off; 1107 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1108 skb->end = size; 1109 off = nhead; 1110 #else 1111 skb->end = skb->head + size; 1112 #endif 1113 /* {transport,network,mac}_header and tail are relative to skb->head */ 1114 skb->tail += off; 1115 skb->transport_header += off; 1116 skb->network_header += off; 1117 if (skb_mac_header_was_set(skb)) 1118 skb->mac_header += off; 1119 /* Only adjust this if it actually is csum_start rather than csum */ 1120 if (skb->ip_summed == CHECKSUM_PARTIAL) 1121 skb->csum_start += nhead; 1122 skb->cloned = 0; 1123 skb->hdr_len = 0; 1124 skb->nohdr = 0; 1125 atomic_set(&skb_shinfo(skb)->dataref, 1); 1126 return 0; 1127 1128 nofrags: 1129 kfree(data); 1130 nodata: 1131 return -ENOMEM; 1132 } 1133 EXPORT_SYMBOL(pskb_expand_head); 1134 1135 /* Make private copy of skb with writable head and some headroom */ 1136 1137 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1138 { 1139 struct sk_buff *skb2; 1140 int delta = headroom - skb_headroom(skb); 1141 1142 if (delta <= 0) 1143 skb2 = pskb_copy(skb, GFP_ATOMIC); 1144 else { 1145 skb2 = skb_clone(skb, GFP_ATOMIC); 1146 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1147 GFP_ATOMIC)) { 1148 kfree_skb(skb2); 1149 skb2 = NULL; 1150 } 1151 } 1152 return skb2; 1153 } 1154 EXPORT_SYMBOL(skb_realloc_headroom); 1155 1156 /** 1157 * skb_copy_expand - copy and expand sk_buff 1158 * @skb: buffer to copy 1159 * @newheadroom: new free bytes at head 1160 * @newtailroom: new free bytes at tail 1161 * @gfp_mask: allocation priority 1162 * 1163 * Make a copy of both an &sk_buff and its data and while doing so 1164 * allocate additional space. 1165 * 1166 * This is used when the caller wishes to modify the data and needs a 1167 * private copy of the data to alter as well as more space for new fields. 1168 * Returns %NULL on failure or the pointer to the buffer 1169 * on success. The returned buffer has a reference count of 1. 1170 * 1171 * You must pass %GFP_ATOMIC as the allocation priority if this function 1172 * is called from an interrupt. 1173 */ 1174 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1175 int newheadroom, int newtailroom, 1176 gfp_t gfp_mask) 1177 { 1178 /* 1179 * Allocate the copy buffer 1180 */ 1181 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1182 gfp_mask, skb_alloc_rx_flag(skb), 1183 NUMA_NO_NODE); 1184 int oldheadroom = skb_headroom(skb); 1185 int head_copy_len, head_copy_off; 1186 int off; 1187 1188 if (!n) 1189 return NULL; 1190 1191 skb_reserve(n, newheadroom); 1192 1193 /* Set the tail pointer and length */ 1194 skb_put(n, skb->len); 1195 1196 head_copy_len = oldheadroom; 1197 head_copy_off = 0; 1198 if (newheadroom <= head_copy_len) 1199 head_copy_len = newheadroom; 1200 else 1201 head_copy_off = newheadroom - head_copy_len; 1202 1203 /* Copy the linear header and data. */ 1204 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1205 skb->len + head_copy_len)) 1206 BUG(); 1207 1208 copy_skb_header(n, skb); 1209 1210 off = newheadroom - oldheadroom; 1211 if (n->ip_summed == CHECKSUM_PARTIAL) 1212 n->csum_start += off; 1213 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1214 n->transport_header += off; 1215 n->network_header += off; 1216 if (skb_mac_header_was_set(skb)) 1217 n->mac_header += off; 1218 #endif 1219 1220 return n; 1221 } 1222 EXPORT_SYMBOL(skb_copy_expand); 1223 1224 /** 1225 * skb_pad - zero pad the tail of an skb 1226 * @skb: buffer to pad 1227 * @pad: space to pad 1228 * 1229 * Ensure that a buffer is followed by a padding area that is zero 1230 * filled. Used by network drivers which may DMA or transfer data 1231 * beyond the buffer end onto the wire. 1232 * 1233 * May return error in out of memory cases. The skb is freed on error. 1234 */ 1235 1236 int skb_pad(struct sk_buff *skb, int pad) 1237 { 1238 int err; 1239 int ntail; 1240 1241 /* If the skbuff is non linear tailroom is always zero.. */ 1242 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1243 memset(skb->data+skb->len, 0, pad); 1244 return 0; 1245 } 1246 1247 ntail = skb->data_len + pad - (skb->end - skb->tail); 1248 if (likely(skb_cloned(skb) || ntail > 0)) { 1249 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1250 if (unlikely(err)) 1251 goto free_skb; 1252 } 1253 1254 /* FIXME: The use of this function with non-linear skb's really needs 1255 * to be audited. 1256 */ 1257 err = skb_linearize(skb); 1258 if (unlikely(err)) 1259 goto free_skb; 1260 1261 memset(skb->data + skb->len, 0, pad); 1262 return 0; 1263 1264 free_skb: 1265 kfree_skb(skb); 1266 return err; 1267 } 1268 EXPORT_SYMBOL(skb_pad); 1269 1270 /** 1271 * skb_put - add data to a buffer 1272 * @skb: buffer to use 1273 * @len: amount of data to add 1274 * 1275 * This function extends the used data area of the buffer. If this would 1276 * exceed the total buffer size the kernel will panic. A pointer to the 1277 * first byte of the extra data is returned. 1278 */ 1279 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1280 { 1281 unsigned char *tmp = skb_tail_pointer(skb); 1282 SKB_LINEAR_ASSERT(skb); 1283 skb->tail += len; 1284 skb->len += len; 1285 if (unlikely(skb->tail > skb->end)) 1286 skb_over_panic(skb, len, __builtin_return_address(0)); 1287 return tmp; 1288 } 1289 EXPORT_SYMBOL(skb_put); 1290 1291 /** 1292 * skb_push - add data to the start of a buffer 1293 * @skb: buffer to use 1294 * @len: amount of data to add 1295 * 1296 * This function extends the used data area of the buffer at the buffer 1297 * start. If this would exceed the total buffer headroom the kernel will 1298 * panic. A pointer to the first byte of the extra data is returned. 1299 */ 1300 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1301 { 1302 skb->data -= len; 1303 skb->len += len; 1304 if (unlikely(skb->data<skb->head)) 1305 skb_under_panic(skb, len, __builtin_return_address(0)); 1306 return skb->data; 1307 } 1308 EXPORT_SYMBOL(skb_push); 1309 1310 /** 1311 * skb_pull - remove data from the start of a buffer 1312 * @skb: buffer to use 1313 * @len: amount of data to remove 1314 * 1315 * This function removes data from the start of a buffer, returning 1316 * the memory to the headroom. A pointer to the next data in the buffer 1317 * is returned. Once the data has been pulled future pushes will overwrite 1318 * the old data. 1319 */ 1320 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1321 { 1322 return skb_pull_inline(skb, len); 1323 } 1324 EXPORT_SYMBOL(skb_pull); 1325 1326 /** 1327 * skb_trim - remove end from a buffer 1328 * @skb: buffer to alter 1329 * @len: new length 1330 * 1331 * Cut the length of a buffer down by removing data from the tail. If 1332 * the buffer is already under the length specified it is not modified. 1333 * The skb must be linear. 1334 */ 1335 void skb_trim(struct sk_buff *skb, unsigned int len) 1336 { 1337 if (skb->len > len) 1338 __skb_trim(skb, len); 1339 } 1340 EXPORT_SYMBOL(skb_trim); 1341 1342 /* Trims skb to length len. It can change skb pointers. 1343 */ 1344 1345 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1346 { 1347 struct sk_buff **fragp; 1348 struct sk_buff *frag; 1349 int offset = skb_headlen(skb); 1350 int nfrags = skb_shinfo(skb)->nr_frags; 1351 int i; 1352 int err; 1353 1354 if (skb_cloned(skb) && 1355 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1356 return err; 1357 1358 i = 0; 1359 if (offset >= len) 1360 goto drop_pages; 1361 1362 for (; i < nfrags; i++) { 1363 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1364 1365 if (end < len) { 1366 offset = end; 1367 continue; 1368 } 1369 1370 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1371 1372 drop_pages: 1373 skb_shinfo(skb)->nr_frags = i; 1374 1375 for (; i < nfrags; i++) 1376 skb_frag_unref(skb, i); 1377 1378 if (skb_has_frag_list(skb)) 1379 skb_drop_fraglist(skb); 1380 goto done; 1381 } 1382 1383 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1384 fragp = &frag->next) { 1385 int end = offset + frag->len; 1386 1387 if (skb_shared(frag)) { 1388 struct sk_buff *nfrag; 1389 1390 nfrag = skb_clone(frag, GFP_ATOMIC); 1391 if (unlikely(!nfrag)) 1392 return -ENOMEM; 1393 1394 nfrag->next = frag->next; 1395 consume_skb(frag); 1396 frag = nfrag; 1397 *fragp = frag; 1398 } 1399 1400 if (end < len) { 1401 offset = end; 1402 continue; 1403 } 1404 1405 if (end > len && 1406 unlikely((err = pskb_trim(frag, len - offset)))) 1407 return err; 1408 1409 if (frag->next) 1410 skb_drop_list(&frag->next); 1411 break; 1412 } 1413 1414 done: 1415 if (len > skb_headlen(skb)) { 1416 skb->data_len -= skb->len - len; 1417 skb->len = len; 1418 } else { 1419 skb->len = len; 1420 skb->data_len = 0; 1421 skb_set_tail_pointer(skb, len); 1422 } 1423 1424 return 0; 1425 } 1426 EXPORT_SYMBOL(___pskb_trim); 1427 1428 /** 1429 * __pskb_pull_tail - advance tail of skb header 1430 * @skb: buffer to reallocate 1431 * @delta: number of bytes to advance tail 1432 * 1433 * The function makes a sense only on a fragmented &sk_buff, 1434 * it expands header moving its tail forward and copying necessary 1435 * data from fragmented part. 1436 * 1437 * &sk_buff MUST have reference count of 1. 1438 * 1439 * Returns %NULL (and &sk_buff does not change) if pull failed 1440 * or value of new tail of skb in the case of success. 1441 * 1442 * All the pointers pointing into skb header may change and must be 1443 * reloaded after call to this function. 1444 */ 1445 1446 /* Moves tail of skb head forward, copying data from fragmented part, 1447 * when it is necessary. 1448 * 1. It may fail due to malloc failure. 1449 * 2. It may change skb pointers. 1450 * 1451 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1452 */ 1453 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1454 { 1455 /* If skb has not enough free space at tail, get new one 1456 * plus 128 bytes for future expansions. If we have enough 1457 * room at tail, reallocate without expansion only if skb is cloned. 1458 */ 1459 int i, k, eat = (skb->tail + delta) - skb->end; 1460 1461 if (eat > 0 || skb_cloned(skb)) { 1462 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1463 GFP_ATOMIC)) 1464 return NULL; 1465 } 1466 1467 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1468 BUG(); 1469 1470 /* Optimization: no fragments, no reasons to preestimate 1471 * size of pulled pages. Superb. 1472 */ 1473 if (!skb_has_frag_list(skb)) 1474 goto pull_pages; 1475 1476 /* Estimate size of pulled pages. */ 1477 eat = delta; 1478 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1479 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1480 1481 if (size >= eat) 1482 goto pull_pages; 1483 eat -= size; 1484 } 1485 1486 /* If we need update frag list, we are in troubles. 1487 * Certainly, it possible to add an offset to skb data, 1488 * but taking into account that pulling is expected to 1489 * be very rare operation, it is worth to fight against 1490 * further bloating skb head and crucify ourselves here instead. 1491 * Pure masohism, indeed. 8)8) 1492 */ 1493 if (eat) { 1494 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1495 struct sk_buff *clone = NULL; 1496 struct sk_buff *insp = NULL; 1497 1498 do { 1499 BUG_ON(!list); 1500 1501 if (list->len <= eat) { 1502 /* Eaten as whole. */ 1503 eat -= list->len; 1504 list = list->next; 1505 insp = list; 1506 } else { 1507 /* Eaten partially. */ 1508 1509 if (skb_shared(list)) { 1510 /* Sucks! We need to fork list. :-( */ 1511 clone = skb_clone(list, GFP_ATOMIC); 1512 if (!clone) 1513 return NULL; 1514 insp = list->next; 1515 list = clone; 1516 } else { 1517 /* This may be pulled without 1518 * problems. */ 1519 insp = list; 1520 } 1521 if (!pskb_pull(list, eat)) { 1522 kfree_skb(clone); 1523 return NULL; 1524 } 1525 break; 1526 } 1527 } while (eat); 1528 1529 /* Free pulled out fragments. */ 1530 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1531 skb_shinfo(skb)->frag_list = list->next; 1532 kfree_skb(list); 1533 } 1534 /* And insert new clone at head. */ 1535 if (clone) { 1536 clone->next = list; 1537 skb_shinfo(skb)->frag_list = clone; 1538 } 1539 } 1540 /* Success! Now we may commit changes to skb data. */ 1541 1542 pull_pages: 1543 eat = delta; 1544 k = 0; 1545 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1546 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1547 1548 if (size <= eat) { 1549 skb_frag_unref(skb, i); 1550 eat -= size; 1551 } else { 1552 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1553 if (eat) { 1554 skb_shinfo(skb)->frags[k].page_offset += eat; 1555 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1556 eat = 0; 1557 } 1558 k++; 1559 } 1560 } 1561 skb_shinfo(skb)->nr_frags = k; 1562 1563 skb->tail += delta; 1564 skb->data_len -= delta; 1565 1566 return skb_tail_pointer(skb); 1567 } 1568 EXPORT_SYMBOL(__pskb_pull_tail); 1569 1570 /** 1571 * skb_copy_bits - copy bits from skb to kernel buffer 1572 * @skb: source skb 1573 * @offset: offset in source 1574 * @to: destination buffer 1575 * @len: number of bytes to copy 1576 * 1577 * Copy the specified number of bytes from the source skb to the 1578 * destination buffer. 1579 * 1580 * CAUTION ! : 1581 * If its prototype is ever changed, 1582 * check arch/{*}/net/{*}.S files, 1583 * since it is called from BPF assembly code. 1584 */ 1585 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1586 { 1587 int start = skb_headlen(skb); 1588 struct sk_buff *frag_iter; 1589 int i, copy; 1590 1591 if (offset > (int)skb->len - len) 1592 goto fault; 1593 1594 /* Copy header. */ 1595 if ((copy = start - offset) > 0) { 1596 if (copy > len) 1597 copy = len; 1598 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1599 if ((len -= copy) == 0) 1600 return 0; 1601 offset += copy; 1602 to += copy; 1603 } 1604 1605 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1606 int end; 1607 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1608 1609 WARN_ON(start > offset + len); 1610 1611 end = start + skb_frag_size(f); 1612 if ((copy = end - offset) > 0) { 1613 u8 *vaddr; 1614 1615 if (copy > len) 1616 copy = len; 1617 1618 vaddr = kmap_atomic(skb_frag_page(f)); 1619 memcpy(to, 1620 vaddr + f->page_offset + offset - start, 1621 copy); 1622 kunmap_atomic(vaddr); 1623 1624 if ((len -= copy) == 0) 1625 return 0; 1626 offset += copy; 1627 to += copy; 1628 } 1629 start = end; 1630 } 1631 1632 skb_walk_frags(skb, frag_iter) { 1633 int end; 1634 1635 WARN_ON(start > offset + len); 1636 1637 end = start + frag_iter->len; 1638 if ((copy = end - offset) > 0) { 1639 if (copy > len) 1640 copy = len; 1641 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1642 goto fault; 1643 if ((len -= copy) == 0) 1644 return 0; 1645 offset += copy; 1646 to += copy; 1647 } 1648 start = end; 1649 } 1650 1651 if (!len) 1652 return 0; 1653 1654 fault: 1655 return -EFAULT; 1656 } 1657 EXPORT_SYMBOL(skb_copy_bits); 1658 1659 /* 1660 * Callback from splice_to_pipe(), if we need to release some pages 1661 * at the end of the spd in case we error'ed out in filling the pipe. 1662 */ 1663 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1664 { 1665 put_page(spd->pages[i]); 1666 } 1667 1668 static struct page *linear_to_page(struct page *page, unsigned int *len, 1669 unsigned int *offset, 1670 struct sk_buff *skb, struct sock *sk) 1671 { 1672 struct page_frag *pfrag = sk_page_frag(sk); 1673 1674 if (!sk_page_frag_refill(sk, pfrag)) 1675 return NULL; 1676 1677 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 1678 1679 memcpy(page_address(pfrag->page) + pfrag->offset, 1680 page_address(page) + *offset, *len); 1681 *offset = pfrag->offset; 1682 pfrag->offset += *len; 1683 1684 return pfrag->page; 1685 } 1686 1687 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1688 struct page *page, 1689 unsigned int offset) 1690 { 1691 return spd->nr_pages && 1692 spd->pages[spd->nr_pages - 1] == page && 1693 (spd->partial[spd->nr_pages - 1].offset + 1694 spd->partial[spd->nr_pages - 1].len == offset); 1695 } 1696 1697 /* 1698 * Fill page/offset/length into spd, if it can hold more pages. 1699 */ 1700 static bool spd_fill_page(struct splice_pipe_desc *spd, 1701 struct pipe_inode_info *pipe, struct page *page, 1702 unsigned int *len, unsigned int offset, 1703 struct sk_buff *skb, bool linear, 1704 struct sock *sk) 1705 { 1706 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1707 return true; 1708 1709 if (linear) { 1710 page = linear_to_page(page, len, &offset, skb, sk); 1711 if (!page) 1712 return true; 1713 } 1714 if (spd_can_coalesce(spd, page, offset)) { 1715 spd->partial[spd->nr_pages - 1].len += *len; 1716 return false; 1717 } 1718 get_page(page); 1719 spd->pages[spd->nr_pages] = page; 1720 spd->partial[spd->nr_pages].len = *len; 1721 spd->partial[spd->nr_pages].offset = offset; 1722 spd->nr_pages++; 1723 1724 return false; 1725 } 1726 1727 static inline void __segment_seek(struct page **page, unsigned int *poff, 1728 unsigned int *plen, unsigned int off) 1729 { 1730 unsigned long n; 1731 1732 *poff += off; 1733 n = *poff / PAGE_SIZE; 1734 if (n) 1735 *page = nth_page(*page, n); 1736 1737 *poff = *poff % PAGE_SIZE; 1738 *plen -= off; 1739 } 1740 1741 static bool __splice_segment(struct page *page, unsigned int poff, 1742 unsigned int plen, unsigned int *off, 1743 unsigned int *len, struct sk_buff *skb, 1744 struct splice_pipe_desc *spd, bool linear, 1745 struct sock *sk, 1746 struct pipe_inode_info *pipe) 1747 { 1748 if (!*len) 1749 return true; 1750 1751 /* skip this segment if already processed */ 1752 if (*off >= plen) { 1753 *off -= plen; 1754 return false; 1755 } 1756 1757 /* ignore any bits we already processed */ 1758 if (*off) { 1759 __segment_seek(&page, &poff, &plen, *off); 1760 *off = 0; 1761 } 1762 1763 do { 1764 unsigned int flen = min(*len, plen); 1765 1766 /* the linear region may spread across several pages */ 1767 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1768 1769 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1770 return true; 1771 1772 __segment_seek(&page, &poff, &plen, flen); 1773 *len -= flen; 1774 1775 } while (*len && plen); 1776 1777 return false; 1778 } 1779 1780 /* 1781 * Map linear and fragment data from the skb to spd. It reports true if the 1782 * pipe is full or if we already spliced the requested length. 1783 */ 1784 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1785 unsigned int *offset, unsigned int *len, 1786 struct splice_pipe_desc *spd, struct sock *sk) 1787 { 1788 int seg; 1789 1790 /* map the linear part : 1791 * If skb->head_frag is set, this 'linear' part is backed by a 1792 * fragment, and if the head is not shared with any clones then 1793 * we can avoid a copy since we own the head portion of this page. 1794 */ 1795 if (__splice_segment(virt_to_page(skb->data), 1796 (unsigned long) skb->data & (PAGE_SIZE - 1), 1797 skb_headlen(skb), 1798 offset, len, skb, spd, 1799 skb_head_is_locked(skb), 1800 sk, pipe)) 1801 return true; 1802 1803 /* 1804 * then map the fragments 1805 */ 1806 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1807 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1808 1809 if (__splice_segment(skb_frag_page(f), 1810 f->page_offset, skb_frag_size(f), 1811 offset, len, skb, spd, false, sk, pipe)) 1812 return true; 1813 } 1814 1815 return false; 1816 } 1817 1818 /* 1819 * Map data from the skb to a pipe. Should handle both the linear part, 1820 * the fragments, and the frag list. It does NOT handle frag lists within 1821 * the frag list, if such a thing exists. We'd probably need to recurse to 1822 * handle that cleanly. 1823 */ 1824 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1825 struct pipe_inode_info *pipe, unsigned int tlen, 1826 unsigned int flags) 1827 { 1828 struct partial_page partial[MAX_SKB_FRAGS]; 1829 struct page *pages[MAX_SKB_FRAGS]; 1830 struct splice_pipe_desc spd = { 1831 .pages = pages, 1832 .partial = partial, 1833 .nr_pages_max = MAX_SKB_FRAGS, 1834 .flags = flags, 1835 .ops = &sock_pipe_buf_ops, 1836 .spd_release = sock_spd_release, 1837 }; 1838 struct sk_buff *frag_iter; 1839 struct sock *sk = skb->sk; 1840 int ret = 0; 1841 1842 /* 1843 * __skb_splice_bits() only fails if the output has no room left, 1844 * so no point in going over the frag_list for the error case. 1845 */ 1846 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1847 goto done; 1848 else if (!tlen) 1849 goto done; 1850 1851 /* 1852 * now see if we have a frag_list to map 1853 */ 1854 skb_walk_frags(skb, frag_iter) { 1855 if (!tlen) 1856 break; 1857 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1858 break; 1859 } 1860 1861 done: 1862 if (spd.nr_pages) { 1863 /* 1864 * Drop the socket lock, otherwise we have reverse 1865 * locking dependencies between sk_lock and i_mutex 1866 * here as compared to sendfile(). We enter here 1867 * with the socket lock held, and splice_to_pipe() will 1868 * grab the pipe inode lock. For sendfile() emulation, 1869 * we call into ->sendpage() with the i_mutex lock held 1870 * and networking will grab the socket lock. 1871 */ 1872 release_sock(sk); 1873 ret = splice_to_pipe(pipe, &spd); 1874 lock_sock(sk); 1875 } 1876 1877 return ret; 1878 } 1879 1880 /** 1881 * skb_store_bits - store bits from kernel buffer to skb 1882 * @skb: destination buffer 1883 * @offset: offset in destination 1884 * @from: source buffer 1885 * @len: number of bytes to copy 1886 * 1887 * Copy the specified number of bytes from the source buffer to the 1888 * destination skb. This function handles all the messy bits of 1889 * traversing fragment lists and such. 1890 */ 1891 1892 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1893 { 1894 int start = skb_headlen(skb); 1895 struct sk_buff *frag_iter; 1896 int i, copy; 1897 1898 if (offset > (int)skb->len - len) 1899 goto fault; 1900 1901 if ((copy = start - offset) > 0) { 1902 if (copy > len) 1903 copy = len; 1904 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1905 if ((len -= copy) == 0) 1906 return 0; 1907 offset += copy; 1908 from += copy; 1909 } 1910 1911 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1912 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1913 int end; 1914 1915 WARN_ON(start > offset + len); 1916 1917 end = start + skb_frag_size(frag); 1918 if ((copy = end - offset) > 0) { 1919 u8 *vaddr; 1920 1921 if (copy > len) 1922 copy = len; 1923 1924 vaddr = kmap_atomic(skb_frag_page(frag)); 1925 memcpy(vaddr + frag->page_offset + offset - start, 1926 from, copy); 1927 kunmap_atomic(vaddr); 1928 1929 if ((len -= copy) == 0) 1930 return 0; 1931 offset += copy; 1932 from += copy; 1933 } 1934 start = end; 1935 } 1936 1937 skb_walk_frags(skb, frag_iter) { 1938 int end; 1939 1940 WARN_ON(start > offset + len); 1941 1942 end = start + frag_iter->len; 1943 if ((copy = end - offset) > 0) { 1944 if (copy > len) 1945 copy = len; 1946 if (skb_store_bits(frag_iter, offset - start, 1947 from, copy)) 1948 goto fault; 1949 if ((len -= copy) == 0) 1950 return 0; 1951 offset += copy; 1952 from += copy; 1953 } 1954 start = end; 1955 } 1956 if (!len) 1957 return 0; 1958 1959 fault: 1960 return -EFAULT; 1961 } 1962 EXPORT_SYMBOL(skb_store_bits); 1963 1964 /* Checksum skb data. */ 1965 1966 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1967 int len, __wsum csum) 1968 { 1969 int start = skb_headlen(skb); 1970 int i, copy = start - offset; 1971 struct sk_buff *frag_iter; 1972 int pos = 0; 1973 1974 /* Checksum header. */ 1975 if (copy > 0) { 1976 if (copy > len) 1977 copy = len; 1978 csum = csum_partial(skb->data + offset, copy, csum); 1979 if ((len -= copy) == 0) 1980 return csum; 1981 offset += copy; 1982 pos = copy; 1983 } 1984 1985 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1986 int end; 1987 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1988 1989 WARN_ON(start > offset + len); 1990 1991 end = start + skb_frag_size(frag); 1992 if ((copy = end - offset) > 0) { 1993 __wsum csum2; 1994 u8 *vaddr; 1995 1996 if (copy > len) 1997 copy = len; 1998 vaddr = kmap_atomic(skb_frag_page(frag)); 1999 csum2 = csum_partial(vaddr + frag->page_offset + 2000 offset - start, copy, 0); 2001 kunmap_atomic(vaddr); 2002 csum = csum_block_add(csum, csum2, pos); 2003 if (!(len -= copy)) 2004 return csum; 2005 offset += copy; 2006 pos += copy; 2007 } 2008 start = end; 2009 } 2010 2011 skb_walk_frags(skb, frag_iter) { 2012 int end; 2013 2014 WARN_ON(start > offset + len); 2015 2016 end = start + frag_iter->len; 2017 if ((copy = end - offset) > 0) { 2018 __wsum csum2; 2019 if (copy > len) 2020 copy = len; 2021 csum2 = skb_checksum(frag_iter, offset - start, 2022 copy, 0); 2023 csum = csum_block_add(csum, csum2, pos); 2024 if ((len -= copy) == 0) 2025 return csum; 2026 offset += copy; 2027 pos += copy; 2028 } 2029 start = end; 2030 } 2031 BUG_ON(len); 2032 2033 return csum; 2034 } 2035 EXPORT_SYMBOL(skb_checksum); 2036 2037 /* Both of above in one bottle. */ 2038 2039 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2040 u8 *to, int len, __wsum csum) 2041 { 2042 int start = skb_headlen(skb); 2043 int i, copy = start - offset; 2044 struct sk_buff *frag_iter; 2045 int pos = 0; 2046 2047 /* Copy header. */ 2048 if (copy > 0) { 2049 if (copy > len) 2050 copy = len; 2051 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2052 copy, csum); 2053 if ((len -= copy) == 0) 2054 return csum; 2055 offset += copy; 2056 to += copy; 2057 pos = copy; 2058 } 2059 2060 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2061 int end; 2062 2063 WARN_ON(start > offset + len); 2064 2065 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2066 if ((copy = end - offset) > 0) { 2067 __wsum csum2; 2068 u8 *vaddr; 2069 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2070 2071 if (copy > len) 2072 copy = len; 2073 vaddr = kmap_atomic(skb_frag_page(frag)); 2074 csum2 = csum_partial_copy_nocheck(vaddr + 2075 frag->page_offset + 2076 offset - start, to, 2077 copy, 0); 2078 kunmap_atomic(vaddr); 2079 csum = csum_block_add(csum, csum2, pos); 2080 if (!(len -= copy)) 2081 return csum; 2082 offset += copy; 2083 to += copy; 2084 pos += copy; 2085 } 2086 start = end; 2087 } 2088 2089 skb_walk_frags(skb, frag_iter) { 2090 __wsum csum2; 2091 int end; 2092 2093 WARN_ON(start > offset + len); 2094 2095 end = start + frag_iter->len; 2096 if ((copy = end - offset) > 0) { 2097 if (copy > len) 2098 copy = len; 2099 csum2 = skb_copy_and_csum_bits(frag_iter, 2100 offset - start, 2101 to, copy, 0); 2102 csum = csum_block_add(csum, csum2, pos); 2103 if ((len -= copy) == 0) 2104 return csum; 2105 offset += copy; 2106 to += copy; 2107 pos += copy; 2108 } 2109 start = end; 2110 } 2111 BUG_ON(len); 2112 return csum; 2113 } 2114 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2115 2116 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2117 { 2118 __wsum csum; 2119 long csstart; 2120 2121 if (skb->ip_summed == CHECKSUM_PARTIAL) 2122 csstart = skb_checksum_start_offset(skb); 2123 else 2124 csstart = skb_headlen(skb); 2125 2126 BUG_ON(csstart > skb_headlen(skb)); 2127 2128 skb_copy_from_linear_data(skb, to, csstart); 2129 2130 csum = 0; 2131 if (csstart != skb->len) 2132 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2133 skb->len - csstart, 0); 2134 2135 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2136 long csstuff = csstart + skb->csum_offset; 2137 2138 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2139 } 2140 } 2141 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2142 2143 /** 2144 * skb_dequeue - remove from the head of the queue 2145 * @list: list to dequeue from 2146 * 2147 * Remove the head of the list. The list lock is taken so the function 2148 * may be used safely with other locking list functions. The head item is 2149 * returned or %NULL if the list is empty. 2150 */ 2151 2152 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2153 { 2154 unsigned long flags; 2155 struct sk_buff *result; 2156 2157 spin_lock_irqsave(&list->lock, flags); 2158 result = __skb_dequeue(list); 2159 spin_unlock_irqrestore(&list->lock, flags); 2160 return result; 2161 } 2162 EXPORT_SYMBOL(skb_dequeue); 2163 2164 /** 2165 * skb_dequeue_tail - remove from the tail of the queue 2166 * @list: list to dequeue from 2167 * 2168 * Remove the tail of the list. The list lock is taken so the function 2169 * may be used safely with other locking list functions. The tail item is 2170 * returned or %NULL if the list is empty. 2171 */ 2172 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2173 { 2174 unsigned long flags; 2175 struct sk_buff *result; 2176 2177 spin_lock_irqsave(&list->lock, flags); 2178 result = __skb_dequeue_tail(list); 2179 spin_unlock_irqrestore(&list->lock, flags); 2180 return result; 2181 } 2182 EXPORT_SYMBOL(skb_dequeue_tail); 2183 2184 /** 2185 * skb_queue_purge - empty a list 2186 * @list: list to empty 2187 * 2188 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2189 * the list and one reference dropped. This function takes the list 2190 * lock and is atomic with respect to other list locking functions. 2191 */ 2192 void skb_queue_purge(struct sk_buff_head *list) 2193 { 2194 struct sk_buff *skb; 2195 while ((skb = skb_dequeue(list)) != NULL) 2196 kfree_skb(skb); 2197 } 2198 EXPORT_SYMBOL(skb_queue_purge); 2199 2200 /** 2201 * skb_queue_head - queue a buffer at the list head 2202 * @list: list to use 2203 * @newsk: buffer to queue 2204 * 2205 * Queue a buffer at the start of the list. This function takes the 2206 * list lock and can be used safely with other locking &sk_buff functions 2207 * safely. 2208 * 2209 * A buffer cannot be placed on two lists at the same time. 2210 */ 2211 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2212 { 2213 unsigned long flags; 2214 2215 spin_lock_irqsave(&list->lock, flags); 2216 __skb_queue_head(list, newsk); 2217 spin_unlock_irqrestore(&list->lock, flags); 2218 } 2219 EXPORT_SYMBOL(skb_queue_head); 2220 2221 /** 2222 * skb_queue_tail - queue a buffer at the list tail 2223 * @list: list to use 2224 * @newsk: buffer to queue 2225 * 2226 * Queue a buffer at the tail of the list. This function takes the 2227 * list lock and can be used safely with other locking &sk_buff functions 2228 * safely. 2229 * 2230 * A buffer cannot be placed on two lists at the same time. 2231 */ 2232 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2233 { 2234 unsigned long flags; 2235 2236 spin_lock_irqsave(&list->lock, flags); 2237 __skb_queue_tail(list, newsk); 2238 spin_unlock_irqrestore(&list->lock, flags); 2239 } 2240 EXPORT_SYMBOL(skb_queue_tail); 2241 2242 /** 2243 * skb_unlink - remove a buffer from a list 2244 * @skb: buffer to remove 2245 * @list: list to use 2246 * 2247 * Remove a packet from a list. The list locks are taken and this 2248 * function is atomic with respect to other list locked calls 2249 * 2250 * You must know what list the SKB is on. 2251 */ 2252 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2253 { 2254 unsigned long flags; 2255 2256 spin_lock_irqsave(&list->lock, flags); 2257 __skb_unlink(skb, list); 2258 spin_unlock_irqrestore(&list->lock, flags); 2259 } 2260 EXPORT_SYMBOL(skb_unlink); 2261 2262 /** 2263 * skb_append - append a buffer 2264 * @old: buffer to insert after 2265 * @newsk: buffer to insert 2266 * @list: list to use 2267 * 2268 * Place a packet after a given packet in a list. The list locks are taken 2269 * and this function is atomic with respect to other list locked calls. 2270 * A buffer cannot be placed on two lists at the same time. 2271 */ 2272 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2273 { 2274 unsigned long flags; 2275 2276 spin_lock_irqsave(&list->lock, flags); 2277 __skb_queue_after(list, old, newsk); 2278 spin_unlock_irqrestore(&list->lock, flags); 2279 } 2280 EXPORT_SYMBOL(skb_append); 2281 2282 /** 2283 * skb_insert - insert a buffer 2284 * @old: buffer to insert before 2285 * @newsk: buffer to insert 2286 * @list: list to use 2287 * 2288 * Place a packet before a given packet in a list. The list locks are 2289 * taken and this function is atomic with respect to other list locked 2290 * calls. 2291 * 2292 * A buffer cannot be placed on two lists at the same time. 2293 */ 2294 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2295 { 2296 unsigned long flags; 2297 2298 spin_lock_irqsave(&list->lock, flags); 2299 __skb_insert(newsk, old->prev, old, list); 2300 spin_unlock_irqrestore(&list->lock, flags); 2301 } 2302 EXPORT_SYMBOL(skb_insert); 2303 2304 static inline void skb_split_inside_header(struct sk_buff *skb, 2305 struct sk_buff* skb1, 2306 const u32 len, const int pos) 2307 { 2308 int i; 2309 2310 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2311 pos - len); 2312 /* And move data appendix as is. */ 2313 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2314 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2315 2316 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2317 skb_shinfo(skb)->nr_frags = 0; 2318 skb1->data_len = skb->data_len; 2319 skb1->len += skb1->data_len; 2320 skb->data_len = 0; 2321 skb->len = len; 2322 skb_set_tail_pointer(skb, len); 2323 } 2324 2325 static inline void skb_split_no_header(struct sk_buff *skb, 2326 struct sk_buff* skb1, 2327 const u32 len, int pos) 2328 { 2329 int i, k = 0; 2330 const int nfrags = skb_shinfo(skb)->nr_frags; 2331 2332 skb_shinfo(skb)->nr_frags = 0; 2333 skb1->len = skb1->data_len = skb->len - len; 2334 skb->len = len; 2335 skb->data_len = len - pos; 2336 2337 for (i = 0; i < nfrags; i++) { 2338 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2339 2340 if (pos + size > len) { 2341 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2342 2343 if (pos < len) { 2344 /* Split frag. 2345 * We have two variants in this case: 2346 * 1. Move all the frag to the second 2347 * part, if it is possible. F.e. 2348 * this approach is mandatory for TUX, 2349 * where splitting is expensive. 2350 * 2. Split is accurately. We make this. 2351 */ 2352 skb_frag_ref(skb, i); 2353 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2354 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2355 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2356 skb_shinfo(skb)->nr_frags++; 2357 } 2358 k++; 2359 } else 2360 skb_shinfo(skb)->nr_frags++; 2361 pos += size; 2362 } 2363 skb_shinfo(skb1)->nr_frags = k; 2364 } 2365 2366 /** 2367 * skb_split - Split fragmented skb to two parts at length len. 2368 * @skb: the buffer to split 2369 * @skb1: the buffer to receive the second part 2370 * @len: new length for skb 2371 */ 2372 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2373 { 2374 int pos = skb_headlen(skb); 2375 2376 if (len < pos) /* Split line is inside header. */ 2377 skb_split_inside_header(skb, skb1, len, pos); 2378 else /* Second chunk has no header, nothing to copy. */ 2379 skb_split_no_header(skb, skb1, len, pos); 2380 } 2381 EXPORT_SYMBOL(skb_split); 2382 2383 /* Shifting from/to a cloned skb is a no-go. 2384 * 2385 * Caller cannot keep skb_shinfo related pointers past calling here! 2386 */ 2387 static int skb_prepare_for_shift(struct sk_buff *skb) 2388 { 2389 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2390 } 2391 2392 /** 2393 * skb_shift - Shifts paged data partially from skb to another 2394 * @tgt: buffer into which tail data gets added 2395 * @skb: buffer from which the paged data comes from 2396 * @shiftlen: shift up to this many bytes 2397 * 2398 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2399 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2400 * It's up to caller to free skb if everything was shifted. 2401 * 2402 * If @tgt runs out of frags, the whole operation is aborted. 2403 * 2404 * Skb cannot include anything else but paged data while tgt is allowed 2405 * to have non-paged data as well. 2406 * 2407 * TODO: full sized shift could be optimized but that would need 2408 * specialized skb free'er to handle frags without up-to-date nr_frags. 2409 */ 2410 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2411 { 2412 int from, to, merge, todo; 2413 struct skb_frag_struct *fragfrom, *fragto; 2414 2415 BUG_ON(shiftlen > skb->len); 2416 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2417 2418 todo = shiftlen; 2419 from = 0; 2420 to = skb_shinfo(tgt)->nr_frags; 2421 fragfrom = &skb_shinfo(skb)->frags[from]; 2422 2423 /* Actual merge is delayed until the point when we know we can 2424 * commit all, so that we don't have to undo partial changes 2425 */ 2426 if (!to || 2427 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2428 fragfrom->page_offset)) { 2429 merge = -1; 2430 } else { 2431 merge = to - 1; 2432 2433 todo -= skb_frag_size(fragfrom); 2434 if (todo < 0) { 2435 if (skb_prepare_for_shift(skb) || 2436 skb_prepare_for_shift(tgt)) 2437 return 0; 2438 2439 /* All previous frag pointers might be stale! */ 2440 fragfrom = &skb_shinfo(skb)->frags[from]; 2441 fragto = &skb_shinfo(tgt)->frags[merge]; 2442 2443 skb_frag_size_add(fragto, shiftlen); 2444 skb_frag_size_sub(fragfrom, shiftlen); 2445 fragfrom->page_offset += shiftlen; 2446 2447 goto onlymerged; 2448 } 2449 2450 from++; 2451 } 2452 2453 /* Skip full, not-fitting skb to avoid expensive operations */ 2454 if ((shiftlen == skb->len) && 2455 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2456 return 0; 2457 2458 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2459 return 0; 2460 2461 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2462 if (to == MAX_SKB_FRAGS) 2463 return 0; 2464 2465 fragfrom = &skb_shinfo(skb)->frags[from]; 2466 fragto = &skb_shinfo(tgt)->frags[to]; 2467 2468 if (todo >= skb_frag_size(fragfrom)) { 2469 *fragto = *fragfrom; 2470 todo -= skb_frag_size(fragfrom); 2471 from++; 2472 to++; 2473 2474 } else { 2475 __skb_frag_ref(fragfrom); 2476 fragto->page = fragfrom->page; 2477 fragto->page_offset = fragfrom->page_offset; 2478 skb_frag_size_set(fragto, todo); 2479 2480 fragfrom->page_offset += todo; 2481 skb_frag_size_sub(fragfrom, todo); 2482 todo = 0; 2483 2484 to++; 2485 break; 2486 } 2487 } 2488 2489 /* Ready to "commit" this state change to tgt */ 2490 skb_shinfo(tgt)->nr_frags = to; 2491 2492 if (merge >= 0) { 2493 fragfrom = &skb_shinfo(skb)->frags[0]; 2494 fragto = &skb_shinfo(tgt)->frags[merge]; 2495 2496 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2497 __skb_frag_unref(fragfrom); 2498 } 2499 2500 /* Reposition in the original skb */ 2501 to = 0; 2502 while (from < skb_shinfo(skb)->nr_frags) 2503 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2504 skb_shinfo(skb)->nr_frags = to; 2505 2506 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2507 2508 onlymerged: 2509 /* Most likely the tgt won't ever need its checksum anymore, skb on 2510 * the other hand might need it if it needs to be resent 2511 */ 2512 tgt->ip_summed = CHECKSUM_PARTIAL; 2513 skb->ip_summed = CHECKSUM_PARTIAL; 2514 2515 /* Yak, is it really working this way? Some helper please? */ 2516 skb->len -= shiftlen; 2517 skb->data_len -= shiftlen; 2518 skb->truesize -= shiftlen; 2519 tgt->len += shiftlen; 2520 tgt->data_len += shiftlen; 2521 tgt->truesize += shiftlen; 2522 2523 return shiftlen; 2524 } 2525 2526 /** 2527 * skb_prepare_seq_read - Prepare a sequential read of skb data 2528 * @skb: the buffer to read 2529 * @from: lower offset of data to be read 2530 * @to: upper offset of data to be read 2531 * @st: state variable 2532 * 2533 * Initializes the specified state variable. Must be called before 2534 * invoking skb_seq_read() for the first time. 2535 */ 2536 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2537 unsigned int to, struct skb_seq_state *st) 2538 { 2539 st->lower_offset = from; 2540 st->upper_offset = to; 2541 st->root_skb = st->cur_skb = skb; 2542 st->frag_idx = st->stepped_offset = 0; 2543 st->frag_data = NULL; 2544 } 2545 EXPORT_SYMBOL(skb_prepare_seq_read); 2546 2547 /** 2548 * skb_seq_read - Sequentially read skb data 2549 * @consumed: number of bytes consumed by the caller so far 2550 * @data: destination pointer for data to be returned 2551 * @st: state variable 2552 * 2553 * Reads a block of skb data at &consumed relative to the 2554 * lower offset specified to skb_prepare_seq_read(). Assigns 2555 * the head of the data block to &data and returns the length 2556 * of the block or 0 if the end of the skb data or the upper 2557 * offset has been reached. 2558 * 2559 * The caller is not required to consume all of the data 2560 * returned, i.e. &consumed is typically set to the number 2561 * of bytes already consumed and the next call to 2562 * skb_seq_read() will return the remaining part of the block. 2563 * 2564 * Note 1: The size of each block of data returned can be arbitrary, 2565 * this limitation is the cost for zerocopy seqeuental 2566 * reads of potentially non linear data. 2567 * 2568 * Note 2: Fragment lists within fragments are not implemented 2569 * at the moment, state->root_skb could be replaced with 2570 * a stack for this purpose. 2571 */ 2572 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2573 struct skb_seq_state *st) 2574 { 2575 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2576 skb_frag_t *frag; 2577 2578 if (unlikely(abs_offset >= st->upper_offset)) 2579 return 0; 2580 2581 next_skb: 2582 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2583 2584 if (abs_offset < block_limit && !st->frag_data) { 2585 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2586 return block_limit - abs_offset; 2587 } 2588 2589 if (st->frag_idx == 0 && !st->frag_data) 2590 st->stepped_offset += skb_headlen(st->cur_skb); 2591 2592 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2593 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2594 block_limit = skb_frag_size(frag) + st->stepped_offset; 2595 2596 if (abs_offset < block_limit) { 2597 if (!st->frag_data) 2598 st->frag_data = kmap_atomic(skb_frag_page(frag)); 2599 2600 *data = (u8 *) st->frag_data + frag->page_offset + 2601 (abs_offset - st->stepped_offset); 2602 2603 return block_limit - abs_offset; 2604 } 2605 2606 if (st->frag_data) { 2607 kunmap_atomic(st->frag_data); 2608 st->frag_data = NULL; 2609 } 2610 2611 st->frag_idx++; 2612 st->stepped_offset += skb_frag_size(frag); 2613 } 2614 2615 if (st->frag_data) { 2616 kunmap_atomic(st->frag_data); 2617 st->frag_data = NULL; 2618 } 2619 2620 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2621 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2622 st->frag_idx = 0; 2623 goto next_skb; 2624 } else if (st->cur_skb->next) { 2625 st->cur_skb = st->cur_skb->next; 2626 st->frag_idx = 0; 2627 goto next_skb; 2628 } 2629 2630 return 0; 2631 } 2632 EXPORT_SYMBOL(skb_seq_read); 2633 2634 /** 2635 * skb_abort_seq_read - Abort a sequential read of skb data 2636 * @st: state variable 2637 * 2638 * Must be called if skb_seq_read() was not called until it 2639 * returned 0. 2640 */ 2641 void skb_abort_seq_read(struct skb_seq_state *st) 2642 { 2643 if (st->frag_data) 2644 kunmap_atomic(st->frag_data); 2645 } 2646 EXPORT_SYMBOL(skb_abort_seq_read); 2647 2648 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2649 2650 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2651 struct ts_config *conf, 2652 struct ts_state *state) 2653 { 2654 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2655 } 2656 2657 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2658 { 2659 skb_abort_seq_read(TS_SKB_CB(state)); 2660 } 2661 2662 /** 2663 * skb_find_text - Find a text pattern in skb data 2664 * @skb: the buffer to look in 2665 * @from: search offset 2666 * @to: search limit 2667 * @config: textsearch configuration 2668 * @state: uninitialized textsearch state variable 2669 * 2670 * Finds a pattern in the skb data according to the specified 2671 * textsearch configuration. Use textsearch_next() to retrieve 2672 * subsequent occurrences of the pattern. Returns the offset 2673 * to the first occurrence or UINT_MAX if no match was found. 2674 */ 2675 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2676 unsigned int to, struct ts_config *config, 2677 struct ts_state *state) 2678 { 2679 unsigned int ret; 2680 2681 config->get_next_block = skb_ts_get_next_block; 2682 config->finish = skb_ts_finish; 2683 2684 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2685 2686 ret = textsearch_find(config, state); 2687 return (ret <= to - from ? ret : UINT_MAX); 2688 } 2689 EXPORT_SYMBOL(skb_find_text); 2690 2691 /** 2692 * skb_append_datato_frags - append the user data to a skb 2693 * @sk: sock structure 2694 * @skb: skb structure to be appened with user data. 2695 * @getfrag: call back function to be used for getting the user data 2696 * @from: pointer to user message iov 2697 * @length: length of the iov message 2698 * 2699 * Description: This procedure append the user data in the fragment part 2700 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2701 */ 2702 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2703 int (*getfrag)(void *from, char *to, int offset, 2704 int len, int odd, struct sk_buff *skb), 2705 void *from, int length) 2706 { 2707 int frg_cnt = 0; 2708 skb_frag_t *frag = NULL; 2709 struct page *page = NULL; 2710 int copy, left; 2711 int offset = 0; 2712 int ret; 2713 2714 do { 2715 /* Return error if we don't have space for new frag */ 2716 frg_cnt = skb_shinfo(skb)->nr_frags; 2717 if (frg_cnt >= MAX_SKB_FRAGS) 2718 return -EFAULT; 2719 2720 /* allocate a new page for next frag */ 2721 page = alloc_pages(sk->sk_allocation, 0); 2722 2723 /* If alloc_page fails just return failure and caller will 2724 * free previous allocated pages by doing kfree_skb() 2725 */ 2726 if (page == NULL) 2727 return -ENOMEM; 2728 2729 /* initialize the next frag */ 2730 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2731 skb->truesize += PAGE_SIZE; 2732 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2733 2734 /* get the new initialized frag */ 2735 frg_cnt = skb_shinfo(skb)->nr_frags; 2736 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2737 2738 /* copy the user data to page */ 2739 left = PAGE_SIZE - frag->page_offset; 2740 copy = (length > left)? left : length; 2741 2742 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2743 offset, copy, 0, skb); 2744 if (ret < 0) 2745 return -EFAULT; 2746 2747 /* copy was successful so update the size parameters */ 2748 skb_frag_size_add(frag, copy); 2749 skb->len += copy; 2750 skb->data_len += copy; 2751 offset += copy; 2752 length -= copy; 2753 2754 } while (length > 0); 2755 2756 return 0; 2757 } 2758 EXPORT_SYMBOL(skb_append_datato_frags); 2759 2760 /** 2761 * skb_pull_rcsum - pull skb and update receive checksum 2762 * @skb: buffer to update 2763 * @len: length of data pulled 2764 * 2765 * This function performs an skb_pull on the packet and updates 2766 * the CHECKSUM_COMPLETE checksum. It should be used on 2767 * receive path processing instead of skb_pull unless you know 2768 * that the checksum difference is zero (e.g., a valid IP header) 2769 * or you are setting ip_summed to CHECKSUM_NONE. 2770 */ 2771 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2772 { 2773 BUG_ON(len > skb->len); 2774 skb->len -= len; 2775 BUG_ON(skb->len < skb->data_len); 2776 skb_postpull_rcsum(skb, skb->data, len); 2777 return skb->data += len; 2778 } 2779 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2780 2781 /** 2782 * skb_segment - Perform protocol segmentation on skb. 2783 * @skb: buffer to segment 2784 * @features: features for the output path (see dev->features) 2785 * 2786 * This function performs segmentation on the given skb. It returns 2787 * a pointer to the first in a list of new skbs for the segments. 2788 * In case of error it returns ERR_PTR(err). 2789 */ 2790 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2791 { 2792 struct sk_buff *segs = NULL; 2793 struct sk_buff *tail = NULL; 2794 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2795 unsigned int mss = skb_shinfo(skb)->gso_size; 2796 unsigned int doffset = skb->data - skb_mac_header(skb); 2797 unsigned int offset = doffset; 2798 unsigned int headroom; 2799 unsigned int len; 2800 int sg = !!(features & NETIF_F_SG); 2801 int nfrags = skb_shinfo(skb)->nr_frags; 2802 int err = -ENOMEM; 2803 int i = 0; 2804 int pos; 2805 2806 __skb_push(skb, doffset); 2807 headroom = skb_headroom(skb); 2808 pos = skb_headlen(skb); 2809 2810 do { 2811 struct sk_buff *nskb; 2812 skb_frag_t *frag; 2813 int hsize; 2814 int size; 2815 2816 len = skb->len - offset; 2817 if (len > mss) 2818 len = mss; 2819 2820 hsize = skb_headlen(skb) - offset; 2821 if (hsize < 0) 2822 hsize = 0; 2823 if (hsize > len || !sg) 2824 hsize = len; 2825 2826 if (!hsize && i >= nfrags) { 2827 BUG_ON(fskb->len != len); 2828 2829 pos += len; 2830 nskb = skb_clone(fskb, GFP_ATOMIC); 2831 fskb = fskb->next; 2832 2833 if (unlikely(!nskb)) 2834 goto err; 2835 2836 hsize = skb_end_offset(nskb); 2837 if (skb_cow_head(nskb, doffset + headroom)) { 2838 kfree_skb(nskb); 2839 goto err; 2840 } 2841 2842 nskb->truesize += skb_end_offset(nskb) - hsize; 2843 skb_release_head_state(nskb); 2844 __skb_push(nskb, doffset); 2845 } else { 2846 nskb = __alloc_skb(hsize + doffset + headroom, 2847 GFP_ATOMIC, skb_alloc_rx_flag(skb), 2848 NUMA_NO_NODE); 2849 2850 if (unlikely(!nskb)) 2851 goto err; 2852 2853 skb_reserve(nskb, headroom); 2854 __skb_put(nskb, doffset); 2855 } 2856 2857 if (segs) 2858 tail->next = nskb; 2859 else 2860 segs = nskb; 2861 tail = nskb; 2862 2863 __copy_skb_header(nskb, skb); 2864 nskb->mac_len = skb->mac_len; 2865 2866 /* nskb and skb might have different headroom */ 2867 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2868 nskb->csum_start += skb_headroom(nskb) - headroom; 2869 2870 skb_reset_mac_header(nskb); 2871 skb_set_network_header(nskb, skb->mac_len); 2872 nskb->transport_header = (nskb->network_header + 2873 skb_network_header_len(skb)); 2874 skb_copy_from_linear_data(skb, nskb->data, doffset); 2875 2876 if (fskb != skb_shinfo(skb)->frag_list) 2877 continue; 2878 2879 if (!sg) { 2880 nskb->ip_summed = CHECKSUM_NONE; 2881 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2882 skb_put(nskb, len), 2883 len, 0); 2884 continue; 2885 } 2886 2887 frag = skb_shinfo(nskb)->frags; 2888 2889 skb_copy_from_linear_data_offset(skb, offset, 2890 skb_put(nskb, hsize), hsize); 2891 2892 while (pos < offset + len && i < nfrags) { 2893 *frag = skb_shinfo(skb)->frags[i]; 2894 __skb_frag_ref(frag); 2895 size = skb_frag_size(frag); 2896 2897 if (pos < offset) { 2898 frag->page_offset += offset - pos; 2899 skb_frag_size_sub(frag, offset - pos); 2900 } 2901 2902 skb_shinfo(nskb)->nr_frags++; 2903 2904 if (pos + size <= offset + len) { 2905 i++; 2906 pos += size; 2907 } else { 2908 skb_frag_size_sub(frag, pos + size - (offset + len)); 2909 goto skip_fraglist; 2910 } 2911 2912 frag++; 2913 } 2914 2915 if (pos < offset + len) { 2916 struct sk_buff *fskb2 = fskb; 2917 2918 BUG_ON(pos + fskb->len != offset + len); 2919 2920 pos += fskb->len; 2921 fskb = fskb->next; 2922 2923 if (fskb2->next) { 2924 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2925 if (!fskb2) 2926 goto err; 2927 } else 2928 skb_get(fskb2); 2929 2930 SKB_FRAG_ASSERT(nskb); 2931 skb_shinfo(nskb)->frag_list = fskb2; 2932 } 2933 2934 skip_fraglist: 2935 nskb->data_len = len - hsize; 2936 nskb->len += nskb->data_len; 2937 nskb->truesize += nskb->data_len; 2938 } while ((offset += len) < skb->len); 2939 2940 return segs; 2941 2942 err: 2943 while ((skb = segs)) { 2944 segs = skb->next; 2945 kfree_skb(skb); 2946 } 2947 return ERR_PTR(err); 2948 } 2949 EXPORT_SYMBOL_GPL(skb_segment); 2950 2951 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2952 { 2953 struct sk_buff *p = *head; 2954 struct sk_buff *nskb; 2955 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2956 struct skb_shared_info *pinfo = skb_shinfo(p); 2957 unsigned int headroom; 2958 unsigned int len = skb_gro_len(skb); 2959 unsigned int offset = skb_gro_offset(skb); 2960 unsigned int headlen = skb_headlen(skb); 2961 unsigned int delta_truesize; 2962 2963 if (p->len + len >= 65536) 2964 return -E2BIG; 2965 2966 if (pinfo->frag_list) 2967 goto merge; 2968 else if (headlen <= offset) { 2969 skb_frag_t *frag; 2970 skb_frag_t *frag2; 2971 int i = skbinfo->nr_frags; 2972 int nr_frags = pinfo->nr_frags + i; 2973 2974 offset -= headlen; 2975 2976 if (nr_frags > MAX_SKB_FRAGS) 2977 return -E2BIG; 2978 2979 pinfo->nr_frags = nr_frags; 2980 skbinfo->nr_frags = 0; 2981 2982 frag = pinfo->frags + nr_frags; 2983 frag2 = skbinfo->frags + i; 2984 do { 2985 *--frag = *--frag2; 2986 } while (--i); 2987 2988 frag->page_offset += offset; 2989 skb_frag_size_sub(frag, offset); 2990 2991 /* all fragments truesize : remove (head size + sk_buff) */ 2992 delta_truesize = skb->truesize - 2993 SKB_TRUESIZE(skb_end_offset(skb)); 2994 2995 skb->truesize -= skb->data_len; 2996 skb->len -= skb->data_len; 2997 skb->data_len = 0; 2998 2999 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 3000 goto done; 3001 } else if (skb->head_frag) { 3002 int nr_frags = pinfo->nr_frags; 3003 skb_frag_t *frag = pinfo->frags + nr_frags; 3004 struct page *page = virt_to_head_page(skb->head); 3005 unsigned int first_size = headlen - offset; 3006 unsigned int first_offset; 3007 3008 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 3009 return -E2BIG; 3010 3011 first_offset = skb->data - 3012 (unsigned char *)page_address(page) + 3013 offset; 3014 3015 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3016 3017 frag->page.p = page; 3018 frag->page_offset = first_offset; 3019 skb_frag_size_set(frag, first_size); 3020 3021 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3022 /* We dont need to clear skbinfo->nr_frags here */ 3023 3024 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3025 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3026 goto done; 3027 } else if (skb_gro_len(p) != pinfo->gso_size) 3028 return -E2BIG; 3029 3030 headroom = skb_headroom(p); 3031 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 3032 if (unlikely(!nskb)) 3033 return -ENOMEM; 3034 3035 __copy_skb_header(nskb, p); 3036 nskb->mac_len = p->mac_len; 3037 3038 skb_reserve(nskb, headroom); 3039 __skb_put(nskb, skb_gro_offset(p)); 3040 3041 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 3042 skb_set_network_header(nskb, skb_network_offset(p)); 3043 skb_set_transport_header(nskb, skb_transport_offset(p)); 3044 3045 __skb_pull(p, skb_gro_offset(p)); 3046 memcpy(skb_mac_header(nskb), skb_mac_header(p), 3047 p->data - skb_mac_header(p)); 3048 3049 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 3050 skb_shinfo(nskb)->frag_list = p; 3051 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 3052 pinfo->gso_size = 0; 3053 skb_header_release(p); 3054 nskb->prev = p; 3055 3056 nskb->data_len += p->len; 3057 nskb->truesize += p->truesize; 3058 nskb->len += p->len; 3059 3060 *head = nskb; 3061 nskb->next = p->next; 3062 p->next = NULL; 3063 3064 p = nskb; 3065 3066 merge: 3067 delta_truesize = skb->truesize; 3068 if (offset > headlen) { 3069 unsigned int eat = offset - headlen; 3070 3071 skbinfo->frags[0].page_offset += eat; 3072 skb_frag_size_sub(&skbinfo->frags[0], eat); 3073 skb->data_len -= eat; 3074 skb->len -= eat; 3075 offset = headlen; 3076 } 3077 3078 __skb_pull(skb, offset); 3079 3080 p->prev->next = skb; 3081 p->prev = skb; 3082 skb_header_release(skb); 3083 3084 done: 3085 NAPI_GRO_CB(p)->count++; 3086 p->data_len += len; 3087 p->truesize += delta_truesize; 3088 p->len += len; 3089 3090 NAPI_GRO_CB(skb)->same_flow = 1; 3091 return 0; 3092 } 3093 EXPORT_SYMBOL_GPL(skb_gro_receive); 3094 3095 void __init skb_init(void) 3096 { 3097 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 3098 sizeof(struct sk_buff), 3099 0, 3100 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3101 NULL); 3102 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3103 (2*sizeof(struct sk_buff)) + 3104 sizeof(atomic_t), 3105 0, 3106 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3107 NULL); 3108 } 3109 3110 /** 3111 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3112 * @skb: Socket buffer containing the buffers to be mapped 3113 * @sg: The scatter-gather list to map into 3114 * @offset: The offset into the buffer's contents to start mapping 3115 * @len: Length of buffer space to be mapped 3116 * 3117 * Fill the specified scatter-gather list with mappings/pointers into a 3118 * region of the buffer space attached to a socket buffer. 3119 */ 3120 static int 3121 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3122 { 3123 int start = skb_headlen(skb); 3124 int i, copy = start - offset; 3125 struct sk_buff *frag_iter; 3126 int elt = 0; 3127 3128 if (copy > 0) { 3129 if (copy > len) 3130 copy = len; 3131 sg_set_buf(sg, skb->data + offset, copy); 3132 elt++; 3133 if ((len -= copy) == 0) 3134 return elt; 3135 offset += copy; 3136 } 3137 3138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3139 int end; 3140 3141 WARN_ON(start > offset + len); 3142 3143 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3144 if ((copy = end - offset) > 0) { 3145 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3146 3147 if (copy > len) 3148 copy = len; 3149 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3150 frag->page_offset+offset-start); 3151 elt++; 3152 if (!(len -= copy)) 3153 return elt; 3154 offset += copy; 3155 } 3156 start = end; 3157 } 3158 3159 skb_walk_frags(skb, frag_iter) { 3160 int end; 3161 3162 WARN_ON(start > offset + len); 3163 3164 end = start + frag_iter->len; 3165 if ((copy = end - offset) > 0) { 3166 if (copy > len) 3167 copy = len; 3168 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3169 copy); 3170 if ((len -= copy) == 0) 3171 return elt; 3172 offset += copy; 3173 } 3174 start = end; 3175 } 3176 BUG_ON(len); 3177 return elt; 3178 } 3179 3180 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3181 { 3182 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3183 3184 sg_mark_end(&sg[nsg - 1]); 3185 3186 return nsg; 3187 } 3188 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3189 3190 /** 3191 * skb_cow_data - Check that a socket buffer's data buffers are writable 3192 * @skb: The socket buffer to check. 3193 * @tailbits: Amount of trailing space to be added 3194 * @trailer: Returned pointer to the skb where the @tailbits space begins 3195 * 3196 * Make sure that the data buffers attached to a socket buffer are 3197 * writable. If they are not, private copies are made of the data buffers 3198 * and the socket buffer is set to use these instead. 3199 * 3200 * If @tailbits is given, make sure that there is space to write @tailbits 3201 * bytes of data beyond current end of socket buffer. @trailer will be 3202 * set to point to the skb in which this space begins. 3203 * 3204 * The number of scatterlist elements required to completely map the 3205 * COW'd and extended socket buffer will be returned. 3206 */ 3207 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3208 { 3209 int copyflag; 3210 int elt; 3211 struct sk_buff *skb1, **skb_p; 3212 3213 /* If skb is cloned or its head is paged, reallocate 3214 * head pulling out all the pages (pages are considered not writable 3215 * at the moment even if they are anonymous). 3216 */ 3217 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3218 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3219 return -ENOMEM; 3220 3221 /* Easy case. Most of packets will go this way. */ 3222 if (!skb_has_frag_list(skb)) { 3223 /* A little of trouble, not enough of space for trailer. 3224 * This should not happen, when stack is tuned to generate 3225 * good frames. OK, on miss we reallocate and reserve even more 3226 * space, 128 bytes is fair. */ 3227 3228 if (skb_tailroom(skb) < tailbits && 3229 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3230 return -ENOMEM; 3231 3232 /* Voila! */ 3233 *trailer = skb; 3234 return 1; 3235 } 3236 3237 /* Misery. We are in troubles, going to mincer fragments... */ 3238 3239 elt = 1; 3240 skb_p = &skb_shinfo(skb)->frag_list; 3241 copyflag = 0; 3242 3243 while ((skb1 = *skb_p) != NULL) { 3244 int ntail = 0; 3245 3246 /* The fragment is partially pulled by someone, 3247 * this can happen on input. Copy it and everything 3248 * after it. */ 3249 3250 if (skb_shared(skb1)) 3251 copyflag = 1; 3252 3253 /* If the skb is the last, worry about trailer. */ 3254 3255 if (skb1->next == NULL && tailbits) { 3256 if (skb_shinfo(skb1)->nr_frags || 3257 skb_has_frag_list(skb1) || 3258 skb_tailroom(skb1) < tailbits) 3259 ntail = tailbits + 128; 3260 } 3261 3262 if (copyflag || 3263 skb_cloned(skb1) || 3264 ntail || 3265 skb_shinfo(skb1)->nr_frags || 3266 skb_has_frag_list(skb1)) { 3267 struct sk_buff *skb2; 3268 3269 /* Fuck, we are miserable poor guys... */ 3270 if (ntail == 0) 3271 skb2 = skb_copy(skb1, GFP_ATOMIC); 3272 else 3273 skb2 = skb_copy_expand(skb1, 3274 skb_headroom(skb1), 3275 ntail, 3276 GFP_ATOMIC); 3277 if (unlikely(skb2 == NULL)) 3278 return -ENOMEM; 3279 3280 if (skb1->sk) 3281 skb_set_owner_w(skb2, skb1->sk); 3282 3283 /* Looking around. Are we still alive? 3284 * OK, link new skb, drop old one */ 3285 3286 skb2->next = skb1->next; 3287 *skb_p = skb2; 3288 kfree_skb(skb1); 3289 skb1 = skb2; 3290 } 3291 elt++; 3292 *trailer = skb1; 3293 skb_p = &skb1->next; 3294 } 3295 3296 return elt; 3297 } 3298 EXPORT_SYMBOL_GPL(skb_cow_data); 3299 3300 static void sock_rmem_free(struct sk_buff *skb) 3301 { 3302 struct sock *sk = skb->sk; 3303 3304 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3305 } 3306 3307 /* 3308 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3309 */ 3310 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3311 { 3312 int len = skb->len; 3313 3314 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3315 (unsigned int)sk->sk_rcvbuf) 3316 return -ENOMEM; 3317 3318 skb_orphan(skb); 3319 skb->sk = sk; 3320 skb->destructor = sock_rmem_free; 3321 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3322 3323 /* before exiting rcu section, make sure dst is refcounted */ 3324 skb_dst_force(skb); 3325 3326 skb_queue_tail(&sk->sk_error_queue, skb); 3327 if (!sock_flag(sk, SOCK_DEAD)) 3328 sk->sk_data_ready(sk, len); 3329 return 0; 3330 } 3331 EXPORT_SYMBOL(sock_queue_err_skb); 3332 3333 void skb_tstamp_tx(struct sk_buff *orig_skb, 3334 struct skb_shared_hwtstamps *hwtstamps) 3335 { 3336 struct sock *sk = orig_skb->sk; 3337 struct sock_exterr_skb *serr; 3338 struct sk_buff *skb; 3339 int err; 3340 3341 if (!sk) 3342 return; 3343 3344 skb = skb_clone(orig_skb, GFP_ATOMIC); 3345 if (!skb) 3346 return; 3347 3348 if (hwtstamps) { 3349 *skb_hwtstamps(skb) = 3350 *hwtstamps; 3351 } else { 3352 /* 3353 * no hardware time stamps available, 3354 * so keep the shared tx_flags and only 3355 * store software time stamp 3356 */ 3357 skb->tstamp = ktime_get_real(); 3358 } 3359 3360 serr = SKB_EXT_ERR(skb); 3361 memset(serr, 0, sizeof(*serr)); 3362 serr->ee.ee_errno = ENOMSG; 3363 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3364 3365 err = sock_queue_err_skb(sk, skb); 3366 3367 if (err) 3368 kfree_skb(skb); 3369 } 3370 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3371 3372 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3373 { 3374 struct sock *sk = skb->sk; 3375 struct sock_exterr_skb *serr; 3376 int err; 3377 3378 skb->wifi_acked_valid = 1; 3379 skb->wifi_acked = acked; 3380 3381 serr = SKB_EXT_ERR(skb); 3382 memset(serr, 0, sizeof(*serr)); 3383 serr->ee.ee_errno = ENOMSG; 3384 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3385 3386 err = sock_queue_err_skb(sk, skb); 3387 if (err) 3388 kfree_skb(skb); 3389 } 3390 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3391 3392 3393 /** 3394 * skb_partial_csum_set - set up and verify partial csum values for packet 3395 * @skb: the skb to set 3396 * @start: the number of bytes after skb->data to start checksumming. 3397 * @off: the offset from start to place the checksum. 3398 * 3399 * For untrusted partially-checksummed packets, we need to make sure the values 3400 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3401 * 3402 * This function checks and sets those values and skb->ip_summed: if this 3403 * returns false you should drop the packet. 3404 */ 3405 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3406 { 3407 if (unlikely(start > skb_headlen(skb)) || 3408 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3409 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 3410 start, off, skb_headlen(skb)); 3411 return false; 3412 } 3413 skb->ip_summed = CHECKSUM_PARTIAL; 3414 skb->csum_start = skb_headroom(skb) + start; 3415 skb->csum_offset = off; 3416 return true; 3417 } 3418 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3419 3420 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3421 { 3422 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3423 skb->dev->name); 3424 } 3425 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3426 3427 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 3428 { 3429 if (head_stolen) 3430 kmem_cache_free(skbuff_head_cache, skb); 3431 else 3432 __kfree_skb(skb); 3433 } 3434 EXPORT_SYMBOL(kfree_skb_partial); 3435 3436 /** 3437 * skb_try_coalesce - try to merge skb to prior one 3438 * @to: prior buffer 3439 * @from: buffer to add 3440 * @fragstolen: pointer to boolean 3441 * @delta_truesize: how much more was allocated than was requested 3442 */ 3443 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3444 bool *fragstolen, int *delta_truesize) 3445 { 3446 int i, delta, len = from->len; 3447 3448 *fragstolen = false; 3449 3450 if (skb_cloned(to)) 3451 return false; 3452 3453 if (len <= skb_tailroom(to)) { 3454 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 3455 *delta_truesize = 0; 3456 return true; 3457 } 3458 3459 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 3460 return false; 3461 3462 if (skb_headlen(from) != 0) { 3463 struct page *page; 3464 unsigned int offset; 3465 3466 if (skb_shinfo(to)->nr_frags + 3467 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 3468 return false; 3469 3470 if (skb_head_is_locked(from)) 3471 return false; 3472 3473 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3474 3475 page = virt_to_head_page(from->head); 3476 offset = from->data - (unsigned char *)page_address(page); 3477 3478 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 3479 page, offset, skb_headlen(from)); 3480 *fragstolen = true; 3481 } else { 3482 if (skb_shinfo(to)->nr_frags + 3483 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 3484 return false; 3485 3486 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 3487 } 3488 3489 WARN_ON_ONCE(delta < len); 3490 3491 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 3492 skb_shinfo(from)->frags, 3493 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 3494 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 3495 3496 if (!skb_cloned(from)) 3497 skb_shinfo(from)->nr_frags = 0; 3498 3499 /* if the skb is not cloned this does nothing 3500 * since we set nr_frags to 0. 3501 */ 3502 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 3503 skb_frag_ref(from, i); 3504 3505 to->truesize += delta; 3506 to->len += len; 3507 to->data_len += len; 3508 3509 *delta_truesize = delta; 3510 return true; 3511 } 3512 EXPORT_SYMBOL(skb_try_coalesce); 3513