1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/module.h> 42 #include <linux/types.h> 43 #include <linux/kernel.h> 44 #include <linux/kmemcheck.h> 45 #include <linux/mm.h> 46 #include <linux/interrupt.h> 47 #include <linux/in.h> 48 #include <linux/inet.h> 49 #include <linux/slab.h> 50 #include <linux/netdevice.h> 51 #ifdef CONFIG_NET_CLS_ACT 52 #include <net/pkt_sched.h> 53 #endif 54 #include <linux/string.h> 55 #include <linux/skbuff.h> 56 #include <linux/splice.h> 57 #include <linux/cache.h> 58 #include <linux/rtnetlink.h> 59 #include <linux/init.h> 60 #include <linux/scatterlist.h> 61 #include <linux/errqueue.h> 62 #include <linux/prefetch.h> 63 64 #include <net/protocol.h> 65 #include <net/dst.h> 66 #include <net/sock.h> 67 #include <net/checksum.h> 68 #include <net/xfrm.h> 69 70 #include <asm/uaccess.h> 71 #include <trace/events/skb.h> 72 #include <linux/highmem.h> 73 74 struct kmem_cache *skbuff_head_cache __read_mostly; 75 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 76 77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 78 struct pipe_buffer *buf) 79 { 80 put_page(buf->page); 81 } 82 83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 84 struct pipe_buffer *buf) 85 { 86 get_page(buf->page); 87 } 88 89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 90 struct pipe_buffer *buf) 91 { 92 return 1; 93 } 94 95 96 /* Pipe buffer operations for a socket. */ 97 static const struct pipe_buf_operations sock_pipe_buf_ops = { 98 .can_merge = 0, 99 .map = generic_pipe_buf_map, 100 .unmap = generic_pipe_buf_unmap, 101 .confirm = generic_pipe_buf_confirm, 102 .release = sock_pipe_buf_release, 103 .steal = sock_pipe_buf_steal, 104 .get = sock_pipe_buf_get, 105 }; 106 107 /* 108 * Keep out-of-line to prevent kernel bloat. 109 * __builtin_return_address is not used because it is not always 110 * reliable. 111 */ 112 113 /** 114 * skb_over_panic - private function 115 * @skb: buffer 116 * @sz: size 117 * @here: address 118 * 119 * Out of line support code for skb_put(). Not user callable. 120 */ 121 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 122 { 123 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 124 __func__, here, skb->len, sz, skb->head, skb->data, 125 (unsigned long)skb->tail, (unsigned long)skb->end, 126 skb->dev ? skb->dev->name : "<NULL>"); 127 BUG(); 128 } 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 142 __func__, here, skb->len, sz, skb->head, skb->data, 143 (unsigned long)skb->tail, (unsigned long)skb->end, 144 skb->dev ? skb->dev->name : "<NULL>"); 145 BUG(); 146 } 147 148 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 149 * 'private' fields and also do memory statistics to find all the 150 * [BEEP] leaks. 151 * 152 */ 153 154 /** 155 * __alloc_skb - allocate a network buffer 156 * @size: size to allocate 157 * @gfp_mask: allocation mask 158 * @fclone: allocate from fclone cache instead of head cache 159 * and allocate a cloned (child) skb 160 * @node: numa node to allocate memory on 161 * 162 * Allocate a new &sk_buff. The returned buffer has no headroom and a 163 * tail room of at least size bytes. The object has a reference count 164 * of one. The return is the buffer. On a failure the return is %NULL. 165 * 166 * Buffers may only be allocated from interrupts using a @gfp_mask of 167 * %GFP_ATOMIC. 168 */ 169 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 170 int fclone, int node) 171 { 172 struct kmem_cache *cache; 173 struct skb_shared_info *shinfo; 174 struct sk_buff *skb; 175 u8 *data; 176 177 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 178 179 /* Get the HEAD */ 180 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 181 if (!skb) 182 goto out; 183 prefetchw(skb); 184 185 /* We do our best to align skb_shared_info on a separate cache 186 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 187 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 188 * Both skb->head and skb_shared_info are cache line aligned. 189 */ 190 size = SKB_DATA_ALIGN(size); 191 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 192 data = kmalloc_node_track_caller(size, gfp_mask, node); 193 if (!data) 194 goto nodata; 195 /* kmalloc(size) might give us more room than requested. 196 * Put skb_shared_info exactly at the end of allocated zone, 197 * to allow max possible filling before reallocation. 198 */ 199 size = SKB_WITH_OVERHEAD(ksize(data)); 200 prefetchw(data + size); 201 202 /* 203 * Only clear those fields we need to clear, not those that we will 204 * actually initialise below. Hence, don't put any more fields after 205 * the tail pointer in struct sk_buff! 206 */ 207 memset(skb, 0, offsetof(struct sk_buff, tail)); 208 /* Account for allocated memory : skb + skb->head */ 209 skb->truesize = SKB_TRUESIZE(size); 210 atomic_set(&skb->users, 1); 211 skb->head = data; 212 skb->data = data; 213 skb_reset_tail_pointer(skb); 214 skb->end = skb->tail + size; 215 #ifdef NET_SKBUFF_DATA_USES_OFFSET 216 skb->mac_header = ~0U; 217 #endif 218 219 /* make sure we initialize shinfo sequentially */ 220 shinfo = skb_shinfo(skb); 221 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 222 atomic_set(&shinfo->dataref, 1); 223 kmemcheck_annotate_variable(shinfo->destructor_arg); 224 225 if (fclone) { 226 struct sk_buff *child = skb + 1; 227 atomic_t *fclone_ref = (atomic_t *) (child + 1); 228 229 kmemcheck_annotate_bitfield(child, flags1); 230 kmemcheck_annotate_bitfield(child, flags2); 231 skb->fclone = SKB_FCLONE_ORIG; 232 atomic_set(fclone_ref, 1); 233 234 child->fclone = SKB_FCLONE_UNAVAILABLE; 235 } 236 out: 237 return skb; 238 nodata: 239 kmem_cache_free(cache, skb); 240 skb = NULL; 241 goto out; 242 } 243 EXPORT_SYMBOL(__alloc_skb); 244 245 /** 246 * build_skb - build a network buffer 247 * @data: data buffer provided by caller 248 * @frag_size: size of fragment, or 0 if head was kmalloced 249 * 250 * Allocate a new &sk_buff. Caller provides space holding head and 251 * skb_shared_info. @data must have been allocated by kmalloc() 252 * The return is the new skb buffer. 253 * On a failure the return is %NULL, and @data is not freed. 254 * Notes : 255 * Before IO, driver allocates only data buffer where NIC put incoming frame 256 * Driver should add room at head (NET_SKB_PAD) and 257 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 258 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 259 * before giving packet to stack. 260 * RX rings only contains data buffers, not full skbs. 261 */ 262 struct sk_buff *build_skb(void *data, unsigned int frag_size) 263 { 264 struct skb_shared_info *shinfo; 265 struct sk_buff *skb; 266 unsigned int size = frag_size ? : ksize(data); 267 268 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 269 if (!skb) 270 return NULL; 271 272 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 273 274 memset(skb, 0, offsetof(struct sk_buff, tail)); 275 skb->truesize = SKB_TRUESIZE(size); 276 skb->head_frag = frag_size != 0; 277 atomic_set(&skb->users, 1); 278 skb->head = data; 279 skb->data = data; 280 skb_reset_tail_pointer(skb); 281 skb->end = skb->tail + size; 282 #ifdef NET_SKBUFF_DATA_USES_OFFSET 283 skb->mac_header = ~0U; 284 #endif 285 286 /* make sure we initialize shinfo sequentially */ 287 shinfo = skb_shinfo(skb); 288 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 289 atomic_set(&shinfo->dataref, 1); 290 kmemcheck_annotate_variable(shinfo->destructor_arg); 291 292 return skb; 293 } 294 EXPORT_SYMBOL(build_skb); 295 296 struct netdev_alloc_cache { 297 struct page *page; 298 unsigned int offset; 299 unsigned int pagecnt_bias; 300 }; 301 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 302 303 #define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES) 304 305 /** 306 * netdev_alloc_frag - allocate a page fragment 307 * @fragsz: fragment size 308 * 309 * Allocates a frag from a page for receive buffer. 310 * Uses GFP_ATOMIC allocations. 311 */ 312 void *netdev_alloc_frag(unsigned int fragsz) 313 { 314 struct netdev_alloc_cache *nc; 315 void *data = NULL; 316 unsigned long flags; 317 318 local_irq_save(flags); 319 nc = &__get_cpu_var(netdev_alloc_cache); 320 if (unlikely(!nc->page)) { 321 refill: 322 nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD); 323 if (unlikely(!nc->page)) 324 goto end; 325 recycle: 326 atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS); 327 nc->pagecnt_bias = NETDEV_PAGECNT_BIAS; 328 nc->offset = 0; 329 } 330 331 if (nc->offset + fragsz > PAGE_SIZE) { 332 /* avoid unnecessary locked operations if possible */ 333 if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) || 334 atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count)) 335 goto recycle; 336 goto refill; 337 } 338 339 data = page_address(nc->page) + nc->offset; 340 nc->offset += fragsz; 341 nc->pagecnt_bias--; 342 end: 343 local_irq_restore(flags); 344 return data; 345 } 346 EXPORT_SYMBOL(netdev_alloc_frag); 347 348 /** 349 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 350 * @dev: network device to receive on 351 * @length: length to allocate 352 * @gfp_mask: get_free_pages mask, passed to alloc_skb 353 * 354 * Allocate a new &sk_buff and assign it a usage count of one. The 355 * buffer has unspecified headroom built in. Users should allocate 356 * the headroom they think they need without accounting for the 357 * built in space. The built in space is used for optimisations. 358 * 359 * %NULL is returned if there is no free memory. 360 */ 361 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 362 unsigned int length, gfp_t gfp_mask) 363 { 364 struct sk_buff *skb = NULL; 365 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 366 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 367 368 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 369 void *data = netdev_alloc_frag(fragsz); 370 371 if (likely(data)) { 372 skb = build_skb(data, fragsz); 373 if (unlikely(!skb)) 374 put_page(virt_to_head_page(data)); 375 } 376 } else { 377 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 378 } 379 if (likely(skb)) { 380 skb_reserve(skb, NET_SKB_PAD); 381 skb->dev = dev; 382 } 383 return skb; 384 } 385 EXPORT_SYMBOL(__netdev_alloc_skb); 386 387 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 388 int size, unsigned int truesize) 389 { 390 skb_fill_page_desc(skb, i, page, off, size); 391 skb->len += size; 392 skb->data_len += size; 393 skb->truesize += truesize; 394 } 395 EXPORT_SYMBOL(skb_add_rx_frag); 396 397 static void skb_drop_list(struct sk_buff **listp) 398 { 399 struct sk_buff *list = *listp; 400 401 *listp = NULL; 402 403 do { 404 struct sk_buff *this = list; 405 list = list->next; 406 kfree_skb(this); 407 } while (list); 408 } 409 410 static inline void skb_drop_fraglist(struct sk_buff *skb) 411 { 412 skb_drop_list(&skb_shinfo(skb)->frag_list); 413 } 414 415 static void skb_clone_fraglist(struct sk_buff *skb) 416 { 417 struct sk_buff *list; 418 419 skb_walk_frags(skb, list) 420 skb_get(list); 421 } 422 423 static void skb_free_head(struct sk_buff *skb) 424 { 425 if (skb->head_frag) 426 put_page(virt_to_head_page(skb->head)); 427 else 428 kfree(skb->head); 429 } 430 431 static void skb_release_data(struct sk_buff *skb) 432 { 433 if (!skb->cloned || 434 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 435 &skb_shinfo(skb)->dataref)) { 436 if (skb_shinfo(skb)->nr_frags) { 437 int i; 438 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 439 skb_frag_unref(skb, i); 440 } 441 442 /* 443 * If skb buf is from userspace, we need to notify the caller 444 * the lower device DMA has done; 445 */ 446 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 447 struct ubuf_info *uarg; 448 449 uarg = skb_shinfo(skb)->destructor_arg; 450 if (uarg->callback) 451 uarg->callback(uarg); 452 } 453 454 if (skb_has_frag_list(skb)) 455 skb_drop_fraglist(skb); 456 457 skb_free_head(skb); 458 } 459 } 460 461 /* 462 * Free an skbuff by memory without cleaning the state. 463 */ 464 static void kfree_skbmem(struct sk_buff *skb) 465 { 466 struct sk_buff *other; 467 atomic_t *fclone_ref; 468 469 switch (skb->fclone) { 470 case SKB_FCLONE_UNAVAILABLE: 471 kmem_cache_free(skbuff_head_cache, skb); 472 break; 473 474 case SKB_FCLONE_ORIG: 475 fclone_ref = (atomic_t *) (skb + 2); 476 if (atomic_dec_and_test(fclone_ref)) 477 kmem_cache_free(skbuff_fclone_cache, skb); 478 break; 479 480 case SKB_FCLONE_CLONE: 481 fclone_ref = (atomic_t *) (skb + 1); 482 other = skb - 1; 483 484 /* The clone portion is available for 485 * fast-cloning again. 486 */ 487 skb->fclone = SKB_FCLONE_UNAVAILABLE; 488 489 if (atomic_dec_and_test(fclone_ref)) 490 kmem_cache_free(skbuff_fclone_cache, other); 491 break; 492 } 493 } 494 495 static void skb_release_head_state(struct sk_buff *skb) 496 { 497 skb_dst_drop(skb); 498 #ifdef CONFIG_XFRM 499 secpath_put(skb->sp); 500 #endif 501 if (skb->destructor) { 502 WARN_ON(in_irq()); 503 skb->destructor(skb); 504 } 505 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 506 nf_conntrack_put(skb->nfct); 507 #endif 508 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 509 nf_conntrack_put_reasm(skb->nfct_reasm); 510 #endif 511 #ifdef CONFIG_BRIDGE_NETFILTER 512 nf_bridge_put(skb->nf_bridge); 513 #endif 514 /* XXX: IS this still necessary? - JHS */ 515 #ifdef CONFIG_NET_SCHED 516 skb->tc_index = 0; 517 #ifdef CONFIG_NET_CLS_ACT 518 skb->tc_verd = 0; 519 #endif 520 #endif 521 } 522 523 /* Free everything but the sk_buff shell. */ 524 static void skb_release_all(struct sk_buff *skb) 525 { 526 skb_release_head_state(skb); 527 skb_release_data(skb); 528 } 529 530 /** 531 * __kfree_skb - private function 532 * @skb: buffer 533 * 534 * Free an sk_buff. Release anything attached to the buffer. 535 * Clean the state. This is an internal helper function. Users should 536 * always call kfree_skb 537 */ 538 539 void __kfree_skb(struct sk_buff *skb) 540 { 541 skb_release_all(skb); 542 kfree_skbmem(skb); 543 } 544 EXPORT_SYMBOL(__kfree_skb); 545 546 /** 547 * kfree_skb - free an sk_buff 548 * @skb: buffer to free 549 * 550 * Drop a reference to the buffer and free it if the usage count has 551 * hit zero. 552 */ 553 void kfree_skb(struct sk_buff *skb) 554 { 555 if (unlikely(!skb)) 556 return; 557 if (likely(atomic_read(&skb->users) == 1)) 558 smp_rmb(); 559 else if (likely(!atomic_dec_and_test(&skb->users))) 560 return; 561 trace_kfree_skb(skb, __builtin_return_address(0)); 562 __kfree_skb(skb); 563 } 564 EXPORT_SYMBOL(kfree_skb); 565 566 /** 567 * consume_skb - free an skbuff 568 * @skb: buffer to free 569 * 570 * Drop a ref to the buffer and free it if the usage count has hit zero 571 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 572 * is being dropped after a failure and notes that 573 */ 574 void consume_skb(struct sk_buff *skb) 575 { 576 if (unlikely(!skb)) 577 return; 578 if (likely(atomic_read(&skb->users) == 1)) 579 smp_rmb(); 580 else if (likely(!atomic_dec_and_test(&skb->users))) 581 return; 582 trace_consume_skb(skb); 583 __kfree_skb(skb); 584 } 585 EXPORT_SYMBOL(consume_skb); 586 587 /** 588 * skb_recycle - clean up an skb for reuse 589 * @skb: buffer 590 * 591 * Recycles the skb to be reused as a receive buffer. This 592 * function does any necessary reference count dropping, and 593 * cleans up the skbuff as if it just came from __alloc_skb(). 594 */ 595 void skb_recycle(struct sk_buff *skb) 596 { 597 struct skb_shared_info *shinfo; 598 599 skb_release_head_state(skb); 600 601 shinfo = skb_shinfo(skb); 602 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 603 atomic_set(&shinfo->dataref, 1); 604 605 memset(skb, 0, offsetof(struct sk_buff, tail)); 606 skb->data = skb->head + NET_SKB_PAD; 607 skb_reset_tail_pointer(skb); 608 } 609 EXPORT_SYMBOL(skb_recycle); 610 611 /** 612 * skb_recycle_check - check if skb can be reused for receive 613 * @skb: buffer 614 * @skb_size: minimum receive buffer size 615 * 616 * Checks that the skb passed in is not shared or cloned, and 617 * that it is linear and its head portion at least as large as 618 * skb_size so that it can be recycled as a receive buffer. 619 * If these conditions are met, this function does any necessary 620 * reference count dropping and cleans up the skbuff as if it 621 * just came from __alloc_skb(). 622 */ 623 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 624 { 625 if (!skb_is_recycleable(skb, skb_size)) 626 return false; 627 628 skb_recycle(skb); 629 630 return true; 631 } 632 EXPORT_SYMBOL(skb_recycle_check); 633 634 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 635 { 636 new->tstamp = old->tstamp; 637 new->dev = old->dev; 638 new->transport_header = old->transport_header; 639 new->network_header = old->network_header; 640 new->mac_header = old->mac_header; 641 skb_dst_copy(new, old); 642 new->rxhash = old->rxhash; 643 new->ooo_okay = old->ooo_okay; 644 new->l4_rxhash = old->l4_rxhash; 645 new->no_fcs = old->no_fcs; 646 #ifdef CONFIG_XFRM 647 new->sp = secpath_get(old->sp); 648 #endif 649 memcpy(new->cb, old->cb, sizeof(old->cb)); 650 new->csum = old->csum; 651 new->local_df = old->local_df; 652 new->pkt_type = old->pkt_type; 653 new->ip_summed = old->ip_summed; 654 skb_copy_queue_mapping(new, old); 655 new->priority = old->priority; 656 #if IS_ENABLED(CONFIG_IP_VS) 657 new->ipvs_property = old->ipvs_property; 658 #endif 659 new->protocol = old->protocol; 660 new->mark = old->mark; 661 new->skb_iif = old->skb_iif; 662 __nf_copy(new, old); 663 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 664 new->nf_trace = old->nf_trace; 665 #endif 666 #ifdef CONFIG_NET_SCHED 667 new->tc_index = old->tc_index; 668 #ifdef CONFIG_NET_CLS_ACT 669 new->tc_verd = old->tc_verd; 670 #endif 671 #endif 672 new->vlan_tci = old->vlan_tci; 673 674 skb_copy_secmark(new, old); 675 } 676 677 /* 678 * You should not add any new code to this function. Add it to 679 * __copy_skb_header above instead. 680 */ 681 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 682 { 683 #define C(x) n->x = skb->x 684 685 n->next = n->prev = NULL; 686 n->sk = NULL; 687 __copy_skb_header(n, skb); 688 689 C(len); 690 C(data_len); 691 C(mac_len); 692 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 693 n->cloned = 1; 694 n->nohdr = 0; 695 n->destructor = NULL; 696 C(tail); 697 C(end); 698 C(head); 699 C(head_frag); 700 C(data); 701 C(truesize); 702 atomic_set(&n->users, 1); 703 704 atomic_inc(&(skb_shinfo(skb)->dataref)); 705 skb->cloned = 1; 706 707 return n; 708 #undef C 709 } 710 711 /** 712 * skb_morph - morph one skb into another 713 * @dst: the skb to receive the contents 714 * @src: the skb to supply the contents 715 * 716 * This is identical to skb_clone except that the target skb is 717 * supplied by the user. 718 * 719 * The target skb is returned upon exit. 720 */ 721 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 722 { 723 skb_release_all(dst); 724 return __skb_clone(dst, src); 725 } 726 EXPORT_SYMBOL_GPL(skb_morph); 727 728 /** 729 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 730 * @skb: the skb to modify 731 * @gfp_mask: allocation priority 732 * 733 * This must be called on SKBTX_DEV_ZEROCOPY skb. 734 * It will copy all frags into kernel and drop the reference 735 * to userspace pages. 736 * 737 * If this function is called from an interrupt gfp_mask() must be 738 * %GFP_ATOMIC. 739 * 740 * Returns 0 on success or a negative error code on failure 741 * to allocate kernel memory to copy to. 742 */ 743 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 744 { 745 int i; 746 int num_frags = skb_shinfo(skb)->nr_frags; 747 struct page *page, *head = NULL; 748 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 749 750 for (i = 0; i < num_frags; i++) { 751 u8 *vaddr; 752 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 753 754 page = alloc_page(gfp_mask); 755 if (!page) { 756 while (head) { 757 struct page *next = (struct page *)head->private; 758 put_page(head); 759 head = next; 760 } 761 return -ENOMEM; 762 } 763 vaddr = kmap_atomic(skb_frag_page(f)); 764 memcpy(page_address(page), 765 vaddr + f->page_offset, skb_frag_size(f)); 766 kunmap_atomic(vaddr); 767 page->private = (unsigned long)head; 768 head = page; 769 } 770 771 /* skb frags release userspace buffers */ 772 for (i = 0; i < num_frags; i++) 773 skb_frag_unref(skb, i); 774 775 uarg->callback(uarg); 776 777 /* skb frags point to kernel buffers */ 778 for (i = num_frags - 1; i >= 0; i--) { 779 __skb_fill_page_desc(skb, i, head, 0, 780 skb_shinfo(skb)->frags[i].size); 781 head = (struct page *)head->private; 782 } 783 784 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 785 return 0; 786 } 787 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 788 789 /** 790 * skb_clone - duplicate an sk_buff 791 * @skb: buffer to clone 792 * @gfp_mask: allocation priority 793 * 794 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 795 * copies share the same packet data but not structure. The new 796 * buffer has a reference count of 1. If the allocation fails the 797 * function returns %NULL otherwise the new buffer is returned. 798 * 799 * If this function is called from an interrupt gfp_mask() must be 800 * %GFP_ATOMIC. 801 */ 802 803 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 804 { 805 struct sk_buff *n; 806 807 if (skb_orphan_frags(skb, gfp_mask)) 808 return NULL; 809 810 n = skb + 1; 811 if (skb->fclone == SKB_FCLONE_ORIG && 812 n->fclone == SKB_FCLONE_UNAVAILABLE) { 813 atomic_t *fclone_ref = (atomic_t *) (n + 1); 814 n->fclone = SKB_FCLONE_CLONE; 815 atomic_inc(fclone_ref); 816 } else { 817 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 818 if (!n) 819 return NULL; 820 821 kmemcheck_annotate_bitfield(n, flags1); 822 kmemcheck_annotate_bitfield(n, flags2); 823 n->fclone = SKB_FCLONE_UNAVAILABLE; 824 } 825 826 return __skb_clone(n, skb); 827 } 828 EXPORT_SYMBOL(skb_clone); 829 830 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 831 { 832 #ifndef NET_SKBUFF_DATA_USES_OFFSET 833 /* 834 * Shift between the two data areas in bytes 835 */ 836 unsigned long offset = new->data - old->data; 837 #endif 838 839 __copy_skb_header(new, old); 840 841 #ifndef NET_SKBUFF_DATA_USES_OFFSET 842 /* {transport,network,mac}_header are relative to skb->head */ 843 new->transport_header += offset; 844 new->network_header += offset; 845 if (skb_mac_header_was_set(new)) 846 new->mac_header += offset; 847 #endif 848 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 849 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 850 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 851 } 852 853 /** 854 * skb_copy - create private copy of an sk_buff 855 * @skb: buffer to copy 856 * @gfp_mask: allocation priority 857 * 858 * Make a copy of both an &sk_buff and its data. This is used when the 859 * caller wishes to modify the data and needs a private copy of the 860 * data to alter. Returns %NULL on failure or the pointer to the buffer 861 * on success. The returned buffer has a reference count of 1. 862 * 863 * As by-product this function converts non-linear &sk_buff to linear 864 * one, so that &sk_buff becomes completely private and caller is allowed 865 * to modify all the data of returned buffer. This means that this 866 * function is not recommended for use in circumstances when only 867 * header is going to be modified. Use pskb_copy() instead. 868 */ 869 870 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 871 { 872 int headerlen = skb_headroom(skb); 873 unsigned int size = skb_end_offset(skb) + skb->data_len; 874 struct sk_buff *n = alloc_skb(size, gfp_mask); 875 876 if (!n) 877 return NULL; 878 879 /* Set the data pointer */ 880 skb_reserve(n, headerlen); 881 /* Set the tail pointer and length */ 882 skb_put(n, skb->len); 883 884 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 885 BUG(); 886 887 copy_skb_header(n, skb); 888 return n; 889 } 890 EXPORT_SYMBOL(skb_copy); 891 892 /** 893 * __pskb_copy - create copy of an sk_buff with private head. 894 * @skb: buffer to copy 895 * @headroom: headroom of new skb 896 * @gfp_mask: allocation priority 897 * 898 * Make a copy of both an &sk_buff and part of its data, located 899 * in header. Fragmented data remain shared. This is used when 900 * the caller wishes to modify only header of &sk_buff and needs 901 * private copy of the header to alter. Returns %NULL on failure 902 * or the pointer to the buffer on success. 903 * The returned buffer has a reference count of 1. 904 */ 905 906 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 907 { 908 unsigned int size = skb_headlen(skb) + headroom; 909 struct sk_buff *n = alloc_skb(size, gfp_mask); 910 911 if (!n) 912 goto out; 913 914 /* Set the data pointer */ 915 skb_reserve(n, headroom); 916 /* Set the tail pointer and length */ 917 skb_put(n, skb_headlen(skb)); 918 /* Copy the bytes */ 919 skb_copy_from_linear_data(skb, n->data, n->len); 920 921 n->truesize += skb->data_len; 922 n->data_len = skb->data_len; 923 n->len = skb->len; 924 925 if (skb_shinfo(skb)->nr_frags) { 926 int i; 927 928 if (skb_orphan_frags(skb, gfp_mask)) { 929 kfree_skb(n); 930 n = NULL; 931 goto out; 932 } 933 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 934 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 935 skb_frag_ref(skb, i); 936 } 937 skb_shinfo(n)->nr_frags = i; 938 } 939 940 if (skb_has_frag_list(skb)) { 941 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 942 skb_clone_fraglist(n); 943 } 944 945 copy_skb_header(n, skb); 946 out: 947 return n; 948 } 949 EXPORT_SYMBOL(__pskb_copy); 950 951 /** 952 * pskb_expand_head - reallocate header of &sk_buff 953 * @skb: buffer to reallocate 954 * @nhead: room to add at head 955 * @ntail: room to add at tail 956 * @gfp_mask: allocation priority 957 * 958 * Expands (or creates identical copy, if &nhead and &ntail are zero) 959 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 960 * reference count of 1. Returns zero in the case of success or error, 961 * if expansion failed. In the last case, &sk_buff is not changed. 962 * 963 * All the pointers pointing into skb header may change and must be 964 * reloaded after call to this function. 965 */ 966 967 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 968 gfp_t gfp_mask) 969 { 970 int i; 971 u8 *data; 972 int size = nhead + skb_end_offset(skb) + ntail; 973 long off; 974 975 BUG_ON(nhead < 0); 976 977 if (skb_shared(skb)) 978 BUG(); 979 980 size = SKB_DATA_ALIGN(size); 981 982 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 983 gfp_mask); 984 if (!data) 985 goto nodata; 986 size = SKB_WITH_OVERHEAD(ksize(data)); 987 988 /* Copy only real data... and, alas, header. This should be 989 * optimized for the cases when header is void. 990 */ 991 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 992 993 memcpy((struct skb_shared_info *)(data + size), 994 skb_shinfo(skb), 995 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 996 997 /* 998 * if shinfo is shared we must drop the old head gracefully, but if it 999 * is not we can just drop the old head and let the existing refcount 1000 * be since all we did is relocate the values 1001 */ 1002 if (skb_cloned(skb)) { 1003 /* copy this zero copy skb frags */ 1004 if (skb_orphan_frags(skb, gfp_mask)) 1005 goto nofrags; 1006 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1007 skb_frag_ref(skb, i); 1008 1009 if (skb_has_frag_list(skb)) 1010 skb_clone_fraglist(skb); 1011 1012 skb_release_data(skb); 1013 } else { 1014 skb_free_head(skb); 1015 } 1016 off = (data + nhead) - skb->head; 1017 1018 skb->head = data; 1019 skb->head_frag = 0; 1020 skb->data += off; 1021 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1022 skb->end = size; 1023 off = nhead; 1024 #else 1025 skb->end = skb->head + size; 1026 #endif 1027 /* {transport,network,mac}_header and tail are relative to skb->head */ 1028 skb->tail += off; 1029 skb->transport_header += off; 1030 skb->network_header += off; 1031 if (skb_mac_header_was_set(skb)) 1032 skb->mac_header += off; 1033 /* Only adjust this if it actually is csum_start rather than csum */ 1034 if (skb->ip_summed == CHECKSUM_PARTIAL) 1035 skb->csum_start += nhead; 1036 skb->cloned = 0; 1037 skb->hdr_len = 0; 1038 skb->nohdr = 0; 1039 atomic_set(&skb_shinfo(skb)->dataref, 1); 1040 return 0; 1041 1042 nofrags: 1043 kfree(data); 1044 nodata: 1045 return -ENOMEM; 1046 } 1047 EXPORT_SYMBOL(pskb_expand_head); 1048 1049 /* Make private copy of skb with writable head and some headroom */ 1050 1051 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1052 { 1053 struct sk_buff *skb2; 1054 int delta = headroom - skb_headroom(skb); 1055 1056 if (delta <= 0) 1057 skb2 = pskb_copy(skb, GFP_ATOMIC); 1058 else { 1059 skb2 = skb_clone(skb, GFP_ATOMIC); 1060 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1061 GFP_ATOMIC)) { 1062 kfree_skb(skb2); 1063 skb2 = NULL; 1064 } 1065 } 1066 return skb2; 1067 } 1068 EXPORT_SYMBOL(skb_realloc_headroom); 1069 1070 /** 1071 * skb_copy_expand - copy and expand sk_buff 1072 * @skb: buffer to copy 1073 * @newheadroom: new free bytes at head 1074 * @newtailroom: new free bytes at tail 1075 * @gfp_mask: allocation priority 1076 * 1077 * Make a copy of both an &sk_buff and its data and while doing so 1078 * allocate additional space. 1079 * 1080 * This is used when the caller wishes to modify the data and needs a 1081 * private copy of the data to alter as well as more space for new fields. 1082 * Returns %NULL on failure or the pointer to the buffer 1083 * on success. The returned buffer has a reference count of 1. 1084 * 1085 * You must pass %GFP_ATOMIC as the allocation priority if this function 1086 * is called from an interrupt. 1087 */ 1088 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1089 int newheadroom, int newtailroom, 1090 gfp_t gfp_mask) 1091 { 1092 /* 1093 * Allocate the copy buffer 1094 */ 1095 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 1096 gfp_mask); 1097 int oldheadroom = skb_headroom(skb); 1098 int head_copy_len, head_copy_off; 1099 int off; 1100 1101 if (!n) 1102 return NULL; 1103 1104 skb_reserve(n, newheadroom); 1105 1106 /* Set the tail pointer and length */ 1107 skb_put(n, skb->len); 1108 1109 head_copy_len = oldheadroom; 1110 head_copy_off = 0; 1111 if (newheadroom <= head_copy_len) 1112 head_copy_len = newheadroom; 1113 else 1114 head_copy_off = newheadroom - head_copy_len; 1115 1116 /* Copy the linear header and data. */ 1117 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1118 skb->len + head_copy_len)) 1119 BUG(); 1120 1121 copy_skb_header(n, skb); 1122 1123 off = newheadroom - oldheadroom; 1124 if (n->ip_summed == CHECKSUM_PARTIAL) 1125 n->csum_start += off; 1126 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1127 n->transport_header += off; 1128 n->network_header += off; 1129 if (skb_mac_header_was_set(skb)) 1130 n->mac_header += off; 1131 #endif 1132 1133 return n; 1134 } 1135 EXPORT_SYMBOL(skb_copy_expand); 1136 1137 /** 1138 * skb_pad - zero pad the tail of an skb 1139 * @skb: buffer to pad 1140 * @pad: space to pad 1141 * 1142 * Ensure that a buffer is followed by a padding area that is zero 1143 * filled. Used by network drivers which may DMA or transfer data 1144 * beyond the buffer end onto the wire. 1145 * 1146 * May return error in out of memory cases. The skb is freed on error. 1147 */ 1148 1149 int skb_pad(struct sk_buff *skb, int pad) 1150 { 1151 int err; 1152 int ntail; 1153 1154 /* If the skbuff is non linear tailroom is always zero.. */ 1155 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1156 memset(skb->data+skb->len, 0, pad); 1157 return 0; 1158 } 1159 1160 ntail = skb->data_len + pad - (skb->end - skb->tail); 1161 if (likely(skb_cloned(skb) || ntail > 0)) { 1162 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1163 if (unlikely(err)) 1164 goto free_skb; 1165 } 1166 1167 /* FIXME: The use of this function with non-linear skb's really needs 1168 * to be audited. 1169 */ 1170 err = skb_linearize(skb); 1171 if (unlikely(err)) 1172 goto free_skb; 1173 1174 memset(skb->data + skb->len, 0, pad); 1175 return 0; 1176 1177 free_skb: 1178 kfree_skb(skb); 1179 return err; 1180 } 1181 EXPORT_SYMBOL(skb_pad); 1182 1183 /** 1184 * skb_put - add data to a buffer 1185 * @skb: buffer to use 1186 * @len: amount of data to add 1187 * 1188 * This function extends the used data area of the buffer. If this would 1189 * exceed the total buffer size the kernel will panic. A pointer to the 1190 * first byte of the extra data is returned. 1191 */ 1192 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1193 { 1194 unsigned char *tmp = skb_tail_pointer(skb); 1195 SKB_LINEAR_ASSERT(skb); 1196 skb->tail += len; 1197 skb->len += len; 1198 if (unlikely(skb->tail > skb->end)) 1199 skb_over_panic(skb, len, __builtin_return_address(0)); 1200 return tmp; 1201 } 1202 EXPORT_SYMBOL(skb_put); 1203 1204 /** 1205 * skb_push - add data to the start of a buffer 1206 * @skb: buffer to use 1207 * @len: amount of data to add 1208 * 1209 * This function extends the used data area of the buffer at the buffer 1210 * start. If this would exceed the total buffer headroom the kernel will 1211 * panic. A pointer to the first byte of the extra data is returned. 1212 */ 1213 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1214 { 1215 skb->data -= len; 1216 skb->len += len; 1217 if (unlikely(skb->data<skb->head)) 1218 skb_under_panic(skb, len, __builtin_return_address(0)); 1219 return skb->data; 1220 } 1221 EXPORT_SYMBOL(skb_push); 1222 1223 /** 1224 * skb_pull - remove data from the start of a buffer 1225 * @skb: buffer to use 1226 * @len: amount of data to remove 1227 * 1228 * This function removes data from the start of a buffer, returning 1229 * the memory to the headroom. A pointer to the next data in the buffer 1230 * is returned. Once the data has been pulled future pushes will overwrite 1231 * the old data. 1232 */ 1233 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1234 { 1235 return skb_pull_inline(skb, len); 1236 } 1237 EXPORT_SYMBOL(skb_pull); 1238 1239 /** 1240 * skb_trim - remove end from a buffer 1241 * @skb: buffer to alter 1242 * @len: new length 1243 * 1244 * Cut the length of a buffer down by removing data from the tail. If 1245 * the buffer is already under the length specified it is not modified. 1246 * The skb must be linear. 1247 */ 1248 void skb_trim(struct sk_buff *skb, unsigned int len) 1249 { 1250 if (skb->len > len) 1251 __skb_trim(skb, len); 1252 } 1253 EXPORT_SYMBOL(skb_trim); 1254 1255 /* Trims skb to length len. It can change skb pointers. 1256 */ 1257 1258 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1259 { 1260 struct sk_buff **fragp; 1261 struct sk_buff *frag; 1262 int offset = skb_headlen(skb); 1263 int nfrags = skb_shinfo(skb)->nr_frags; 1264 int i; 1265 int err; 1266 1267 if (skb_cloned(skb) && 1268 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1269 return err; 1270 1271 i = 0; 1272 if (offset >= len) 1273 goto drop_pages; 1274 1275 for (; i < nfrags; i++) { 1276 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1277 1278 if (end < len) { 1279 offset = end; 1280 continue; 1281 } 1282 1283 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1284 1285 drop_pages: 1286 skb_shinfo(skb)->nr_frags = i; 1287 1288 for (; i < nfrags; i++) 1289 skb_frag_unref(skb, i); 1290 1291 if (skb_has_frag_list(skb)) 1292 skb_drop_fraglist(skb); 1293 goto done; 1294 } 1295 1296 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1297 fragp = &frag->next) { 1298 int end = offset + frag->len; 1299 1300 if (skb_shared(frag)) { 1301 struct sk_buff *nfrag; 1302 1303 nfrag = skb_clone(frag, GFP_ATOMIC); 1304 if (unlikely(!nfrag)) 1305 return -ENOMEM; 1306 1307 nfrag->next = frag->next; 1308 consume_skb(frag); 1309 frag = nfrag; 1310 *fragp = frag; 1311 } 1312 1313 if (end < len) { 1314 offset = end; 1315 continue; 1316 } 1317 1318 if (end > len && 1319 unlikely((err = pskb_trim(frag, len - offset)))) 1320 return err; 1321 1322 if (frag->next) 1323 skb_drop_list(&frag->next); 1324 break; 1325 } 1326 1327 done: 1328 if (len > skb_headlen(skb)) { 1329 skb->data_len -= skb->len - len; 1330 skb->len = len; 1331 } else { 1332 skb->len = len; 1333 skb->data_len = 0; 1334 skb_set_tail_pointer(skb, len); 1335 } 1336 1337 return 0; 1338 } 1339 EXPORT_SYMBOL(___pskb_trim); 1340 1341 /** 1342 * __pskb_pull_tail - advance tail of skb header 1343 * @skb: buffer to reallocate 1344 * @delta: number of bytes to advance tail 1345 * 1346 * The function makes a sense only on a fragmented &sk_buff, 1347 * it expands header moving its tail forward and copying necessary 1348 * data from fragmented part. 1349 * 1350 * &sk_buff MUST have reference count of 1. 1351 * 1352 * Returns %NULL (and &sk_buff does not change) if pull failed 1353 * or value of new tail of skb in the case of success. 1354 * 1355 * All the pointers pointing into skb header may change and must be 1356 * reloaded after call to this function. 1357 */ 1358 1359 /* Moves tail of skb head forward, copying data from fragmented part, 1360 * when it is necessary. 1361 * 1. It may fail due to malloc failure. 1362 * 2. It may change skb pointers. 1363 * 1364 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1365 */ 1366 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1367 { 1368 /* If skb has not enough free space at tail, get new one 1369 * plus 128 bytes for future expansions. If we have enough 1370 * room at tail, reallocate without expansion only if skb is cloned. 1371 */ 1372 int i, k, eat = (skb->tail + delta) - skb->end; 1373 1374 if (eat > 0 || skb_cloned(skb)) { 1375 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1376 GFP_ATOMIC)) 1377 return NULL; 1378 } 1379 1380 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1381 BUG(); 1382 1383 /* Optimization: no fragments, no reasons to preestimate 1384 * size of pulled pages. Superb. 1385 */ 1386 if (!skb_has_frag_list(skb)) 1387 goto pull_pages; 1388 1389 /* Estimate size of pulled pages. */ 1390 eat = delta; 1391 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1392 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1393 1394 if (size >= eat) 1395 goto pull_pages; 1396 eat -= size; 1397 } 1398 1399 /* If we need update frag list, we are in troubles. 1400 * Certainly, it possible to add an offset to skb data, 1401 * but taking into account that pulling is expected to 1402 * be very rare operation, it is worth to fight against 1403 * further bloating skb head and crucify ourselves here instead. 1404 * Pure masohism, indeed. 8)8) 1405 */ 1406 if (eat) { 1407 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1408 struct sk_buff *clone = NULL; 1409 struct sk_buff *insp = NULL; 1410 1411 do { 1412 BUG_ON(!list); 1413 1414 if (list->len <= eat) { 1415 /* Eaten as whole. */ 1416 eat -= list->len; 1417 list = list->next; 1418 insp = list; 1419 } else { 1420 /* Eaten partially. */ 1421 1422 if (skb_shared(list)) { 1423 /* Sucks! We need to fork list. :-( */ 1424 clone = skb_clone(list, GFP_ATOMIC); 1425 if (!clone) 1426 return NULL; 1427 insp = list->next; 1428 list = clone; 1429 } else { 1430 /* This may be pulled without 1431 * problems. */ 1432 insp = list; 1433 } 1434 if (!pskb_pull(list, eat)) { 1435 kfree_skb(clone); 1436 return NULL; 1437 } 1438 break; 1439 } 1440 } while (eat); 1441 1442 /* Free pulled out fragments. */ 1443 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1444 skb_shinfo(skb)->frag_list = list->next; 1445 kfree_skb(list); 1446 } 1447 /* And insert new clone at head. */ 1448 if (clone) { 1449 clone->next = list; 1450 skb_shinfo(skb)->frag_list = clone; 1451 } 1452 } 1453 /* Success! Now we may commit changes to skb data. */ 1454 1455 pull_pages: 1456 eat = delta; 1457 k = 0; 1458 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1459 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1460 1461 if (size <= eat) { 1462 skb_frag_unref(skb, i); 1463 eat -= size; 1464 } else { 1465 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1466 if (eat) { 1467 skb_shinfo(skb)->frags[k].page_offset += eat; 1468 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1469 eat = 0; 1470 } 1471 k++; 1472 } 1473 } 1474 skb_shinfo(skb)->nr_frags = k; 1475 1476 skb->tail += delta; 1477 skb->data_len -= delta; 1478 1479 return skb_tail_pointer(skb); 1480 } 1481 EXPORT_SYMBOL(__pskb_pull_tail); 1482 1483 /** 1484 * skb_copy_bits - copy bits from skb to kernel buffer 1485 * @skb: source skb 1486 * @offset: offset in source 1487 * @to: destination buffer 1488 * @len: number of bytes to copy 1489 * 1490 * Copy the specified number of bytes from the source skb to the 1491 * destination buffer. 1492 * 1493 * CAUTION ! : 1494 * If its prototype is ever changed, 1495 * check arch/{*}/net/{*}.S files, 1496 * since it is called from BPF assembly code. 1497 */ 1498 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1499 { 1500 int start = skb_headlen(skb); 1501 struct sk_buff *frag_iter; 1502 int i, copy; 1503 1504 if (offset > (int)skb->len - len) 1505 goto fault; 1506 1507 /* Copy header. */ 1508 if ((copy = start - offset) > 0) { 1509 if (copy > len) 1510 copy = len; 1511 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1512 if ((len -= copy) == 0) 1513 return 0; 1514 offset += copy; 1515 to += copy; 1516 } 1517 1518 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1519 int end; 1520 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1521 1522 WARN_ON(start > offset + len); 1523 1524 end = start + skb_frag_size(f); 1525 if ((copy = end - offset) > 0) { 1526 u8 *vaddr; 1527 1528 if (copy > len) 1529 copy = len; 1530 1531 vaddr = kmap_atomic(skb_frag_page(f)); 1532 memcpy(to, 1533 vaddr + f->page_offset + offset - start, 1534 copy); 1535 kunmap_atomic(vaddr); 1536 1537 if ((len -= copy) == 0) 1538 return 0; 1539 offset += copy; 1540 to += copy; 1541 } 1542 start = end; 1543 } 1544 1545 skb_walk_frags(skb, frag_iter) { 1546 int end; 1547 1548 WARN_ON(start > offset + len); 1549 1550 end = start + frag_iter->len; 1551 if ((copy = end - offset) > 0) { 1552 if (copy > len) 1553 copy = len; 1554 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1555 goto fault; 1556 if ((len -= copy) == 0) 1557 return 0; 1558 offset += copy; 1559 to += copy; 1560 } 1561 start = end; 1562 } 1563 1564 if (!len) 1565 return 0; 1566 1567 fault: 1568 return -EFAULT; 1569 } 1570 EXPORT_SYMBOL(skb_copy_bits); 1571 1572 /* 1573 * Callback from splice_to_pipe(), if we need to release some pages 1574 * at the end of the spd in case we error'ed out in filling the pipe. 1575 */ 1576 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1577 { 1578 put_page(spd->pages[i]); 1579 } 1580 1581 static struct page *linear_to_page(struct page *page, unsigned int *len, 1582 unsigned int *offset, 1583 struct sk_buff *skb, struct sock *sk) 1584 { 1585 struct page *p = sk->sk_sndmsg_page; 1586 unsigned int off; 1587 1588 if (!p) { 1589 new_page: 1590 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1591 if (!p) 1592 return NULL; 1593 1594 off = sk->sk_sndmsg_off = 0; 1595 /* hold one ref to this page until it's full */ 1596 } else { 1597 unsigned int mlen; 1598 1599 /* If we are the only user of the page, we can reset offset */ 1600 if (page_count(p) == 1) 1601 sk->sk_sndmsg_off = 0; 1602 off = sk->sk_sndmsg_off; 1603 mlen = PAGE_SIZE - off; 1604 if (mlen < 64 && mlen < *len) { 1605 put_page(p); 1606 goto new_page; 1607 } 1608 1609 *len = min_t(unsigned int, *len, mlen); 1610 } 1611 1612 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1613 sk->sk_sndmsg_off += *len; 1614 *offset = off; 1615 1616 return p; 1617 } 1618 1619 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1620 struct page *page, 1621 unsigned int offset) 1622 { 1623 return spd->nr_pages && 1624 spd->pages[spd->nr_pages - 1] == page && 1625 (spd->partial[spd->nr_pages - 1].offset + 1626 spd->partial[spd->nr_pages - 1].len == offset); 1627 } 1628 1629 /* 1630 * Fill page/offset/length into spd, if it can hold more pages. 1631 */ 1632 static bool spd_fill_page(struct splice_pipe_desc *spd, 1633 struct pipe_inode_info *pipe, struct page *page, 1634 unsigned int *len, unsigned int offset, 1635 struct sk_buff *skb, bool linear, 1636 struct sock *sk) 1637 { 1638 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1639 return true; 1640 1641 if (linear) { 1642 page = linear_to_page(page, len, &offset, skb, sk); 1643 if (!page) 1644 return true; 1645 } 1646 if (spd_can_coalesce(spd, page, offset)) { 1647 spd->partial[spd->nr_pages - 1].len += *len; 1648 return false; 1649 } 1650 get_page(page); 1651 spd->pages[spd->nr_pages] = page; 1652 spd->partial[spd->nr_pages].len = *len; 1653 spd->partial[spd->nr_pages].offset = offset; 1654 spd->nr_pages++; 1655 1656 return false; 1657 } 1658 1659 static inline void __segment_seek(struct page **page, unsigned int *poff, 1660 unsigned int *plen, unsigned int off) 1661 { 1662 unsigned long n; 1663 1664 *poff += off; 1665 n = *poff / PAGE_SIZE; 1666 if (n) 1667 *page = nth_page(*page, n); 1668 1669 *poff = *poff % PAGE_SIZE; 1670 *plen -= off; 1671 } 1672 1673 static bool __splice_segment(struct page *page, unsigned int poff, 1674 unsigned int plen, unsigned int *off, 1675 unsigned int *len, struct sk_buff *skb, 1676 struct splice_pipe_desc *spd, bool linear, 1677 struct sock *sk, 1678 struct pipe_inode_info *pipe) 1679 { 1680 if (!*len) 1681 return true; 1682 1683 /* skip this segment if already processed */ 1684 if (*off >= plen) { 1685 *off -= plen; 1686 return false; 1687 } 1688 1689 /* ignore any bits we already processed */ 1690 if (*off) { 1691 __segment_seek(&page, &poff, &plen, *off); 1692 *off = 0; 1693 } 1694 1695 do { 1696 unsigned int flen = min(*len, plen); 1697 1698 /* the linear region may spread across several pages */ 1699 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1700 1701 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1702 return true; 1703 1704 __segment_seek(&page, &poff, &plen, flen); 1705 *len -= flen; 1706 1707 } while (*len && plen); 1708 1709 return false; 1710 } 1711 1712 /* 1713 * Map linear and fragment data from the skb to spd. It reports true if the 1714 * pipe is full or if we already spliced the requested length. 1715 */ 1716 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1717 unsigned int *offset, unsigned int *len, 1718 struct splice_pipe_desc *spd, struct sock *sk) 1719 { 1720 int seg; 1721 1722 /* map the linear part : 1723 * If skb->head_frag is set, this 'linear' part is backed by a 1724 * fragment, and if the head is not shared with any clones then 1725 * we can avoid a copy since we own the head portion of this page. 1726 */ 1727 if (__splice_segment(virt_to_page(skb->data), 1728 (unsigned long) skb->data & (PAGE_SIZE - 1), 1729 skb_headlen(skb), 1730 offset, len, skb, spd, 1731 skb_head_is_locked(skb), 1732 sk, pipe)) 1733 return true; 1734 1735 /* 1736 * then map the fragments 1737 */ 1738 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1739 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1740 1741 if (__splice_segment(skb_frag_page(f), 1742 f->page_offset, skb_frag_size(f), 1743 offset, len, skb, spd, false, sk, pipe)) 1744 return true; 1745 } 1746 1747 return false; 1748 } 1749 1750 /* 1751 * Map data from the skb to a pipe. Should handle both the linear part, 1752 * the fragments, and the frag list. It does NOT handle frag lists within 1753 * the frag list, if such a thing exists. We'd probably need to recurse to 1754 * handle that cleanly. 1755 */ 1756 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1757 struct pipe_inode_info *pipe, unsigned int tlen, 1758 unsigned int flags) 1759 { 1760 struct partial_page partial[MAX_SKB_FRAGS]; 1761 struct page *pages[MAX_SKB_FRAGS]; 1762 struct splice_pipe_desc spd = { 1763 .pages = pages, 1764 .partial = partial, 1765 .nr_pages_max = MAX_SKB_FRAGS, 1766 .flags = flags, 1767 .ops = &sock_pipe_buf_ops, 1768 .spd_release = sock_spd_release, 1769 }; 1770 struct sk_buff *frag_iter; 1771 struct sock *sk = skb->sk; 1772 int ret = 0; 1773 1774 /* 1775 * __skb_splice_bits() only fails if the output has no room left, 1776 * so no point in going over the frag_list for the error case. 1777 */ 1778 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1779 goto done; 1780 else if (!tlen) 1781 goto done; 1782 1783 /* 1784 * now see if we have a frag_list to map 1785 */ 1786 skb_walk_frags(skb, frag_iter) { 1787 if (!tlen) 1788 break; 1789 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1790 break; 1791 } 1792 1793 done: 1794 if (spd.nr_pages) { 1795 /* 1796 * Drop the socket lock, otherwise we have reverse 1797 * locking dependencies between sk_lock and i_mutex 1798 * here as compared to sendfile(). We enter here 1799 * with the socket lock held, and splice_to_pipe() will 1800 * grab the pipe inode lock. For sendfile() emulation, 1801 * we call into ->sendpage() with the i_mutex lock held 1802 * and networking will grab the socket lock. 1803 */ 1804 release_sock(sk); 1805 ret = splice_to_pipe(pipe, &spd); 1806 lock_sock(sk); 1807 } 1808 1809 return ret; 1810 } 1811 1812 /** 1813 * skb_store_bits - store bits from kernel buffer to skb 1814 * @skb: destination buffer 1815 * @offset: offset in destination 1816 * @from: source buffer 1817 * @len: number of bytes to copy 1818 * 1819 * Copy the specified number of bytes from the source buffer to the 1820 * destination skb. This function handles all the messy bits of 1821 * traversing fragment lists and such. 1822 */ 1823 1824 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1825 { 1826 int start = skb_headlen(skb); 1827 struct sk_buff *frag_iter; 1828 int i, copy; 1829 1830 if (offset > (int)skb->len - len) 1831 goto fault; 1832 1833 if ((copy = start - offset) > 0) { 1834 if (copy > len) 1835 copy = len; 1836 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1837 if ((len -= copy) == 0) 1838 return 0; 1839 offset += copy; 1840 from += copy; 1841 } 1842 1843 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1844 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1845 int end; 1846 1847 WARN_ON(start > offset + len); 1848 1849 end = start + skb_frag_size(frag); 1850 if ((copy = end - offset) > 0) { 1851 u8 *vaddr; 1852 1853 if (copy > len) 1854 copy = len; 1855 1856 vaddr = kmap_atomic(skb_frag_page(frag)); 1857 memcpy(vaddr + frag->page_offset + offset - start, 1858 from, copy); 1859 kunmap_atomic(vaddr); 1860 1861 if ((len -= copy) == 0) 1862 return 0; 1863 offset += copy; 1864 from += copy; 1865 } 1866 start = end; 1867 } 1868 1869 skb_walk_frags(skb, frag_iter) { 1870 int end; 1871 1872 WARN_ON(start > offset + len); 1873 1874 end = start + frag_iter->len; 1875 if ((copy = end - offset) > 0) { 1876 if (copy > len) 1877 copy = len; 1878 if (skb_store_bits(frag_iter, offset - start, 1879 from, copy)) 1880 goto fault; 1881 if ((len -= copy) == 0) 1882 return 0; 1883 offset += copy; 1884 from += copy; 1885 } 1886 start = end; 1887 } 1888 if (!len) 1889 return 0; 1890 1891 fault: 1892 return -EFAULT; 1893 } 1894 EXPORT_SYMBOL(skb_store_bits); 1895 1896 /* Checksum skb data. */ 1897 1898 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1899 int len, __wsum csum) 1900 { 1901 int start = skb_headlen(skb); 1902 int i, copy = start - offset; 1903 struct sk_buff *frag_iter; 1904 int pos = 0; 1905 1906 /* Checksum header. */ 1907 if (copy > 0) { 1908 if (copy > len) 1909 copy = len; 1910 csum = csum_partial(skb->data + offset, copy, csum); 1911 if ((len -= copy) == 0) 1912 return csum; 1913 offset += copy; 1914 pos = copy; 1915 } 1916 1917 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1918 int end; 1919 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1920 1921 WARN_ON(start > offset + len); 1922 1923 end = start + skb_frag_size(frag); 1924 if ((copy = end - offset) > 0) { 1925 __wsum csum2; 1926 u8 *vaddr; 1927 1928 if (copy > len) 1929 copy = len; 1930 vaddr = kmap_atomic(skb_frag_page(frag)); 1931 csum2 = csum_partial(vaddr + frag->page_offset + 1932 offset - start, copy, 0); 1933 kunmap_atomic(vaddr); 1934 csum = csum_block_add(csum, csum2, pos); 1935 if (!(len -= copy)) 1936 return csum; 1937 offset += copy; 1938 pos += copy; 1939 } 1940 start = end; 1941 } 1942 1943 skb_walk_frags(skb, frag_iter) { 1944 int end; 1945 1946 WARN_ON(start > offset + len); 1947 1948 end = start + frag_iter->len; 1949 if ((copy = end - offset) > 0) { 1950 __wsum csum2; 1951 if (copy > len) 1952 copy = len; 1953 csum2 = skb_checksum(frag_iter, offset - start, 1954 copy, 0); 1955 csum = csum_block_add(csum, csum2, pos); 1956 if ((len -= copy) == 0) 1957 return csum; 1958 offset += copy; 1959 pos += copy; 1960 } 1961 start = end; 1962 } 1963 BUG_ON(len); 1964 1965 return csum; 1966 } 1967 EXPORT_SYMBOL(skb_checksum); 1968 1969 /* Both of above in one bottle. */ 1970 1971 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1972 u8 *to, int len, __wsum csum) 1973 { 1974 int start = skb_headlen(skb); 1975 int i, copy = start - offset; 1976 struct sk_buff *frag_iter; 1977 int pos = 0; 1978 1979 /* Copy header. */ 1980 if (copy > 0) { 1981 if (copy > len) 1982 copy = len; 1983 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1984 copy, csum); 1985 if ((len -= copy) == 0) 1986 return csum; 1987 offset += copy; 1988 to += copy; 1989 pos = copy; 1990 } 1991 1992 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1993 int end; 1994 1995 WARN_ON(start > offset + len); 1996 1997 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1998 if ((copy = end - offset) > 0) { 1999 __wsum csum2; 2000 u8 *vaddr; 2001 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2002 2003 if (copy > len) 2004 copy = len; 2005 vaddr = kmap_atomic(skb_frag_page(frag)); 2006 csum2 = csum_partial_copy_nocheck(vaddr + 2007 frag->page_offset + 2008 offset - start, to, 2009 copy, 0); 2010 kunmap_atomic(vaddr); 2011 csum = csum_block_add(csum, csum2, pos); 2012 if (!(len -= copy)) 2013 return csum; 2014 offset += copy; 2015 to += copy; 2016 pos += copy; 2017 } 2018 start = end; 2019 } 2020 2021 skb_walk_frags(skb, frag_iter) { 2022 __wsum csum2; 2023 int end; 2024 2025 WARN_ON(start > offset + len); 2026 2027 end = start + frag_iter->len; 2028 if ((copy = end - offset) > 0) { 2029 if (copy > len) 2030 copy = len; 2031 csum2 = skb_copy_and_csum_bits(frag_iter, 2032 offset - start, 2033 to, copy, 0); 2034 csum = csum_block_add(csum, csum2, pos); 2035 if ((len -= copy) == 0) 2036 return csum; 2037 offset += copy; 2038 to += copy; 2039 pos += copy; 2040 } 2041 start = end; 2042 } 2043 BUG_ON(len); 2044 return csum; 2045 } 2046 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2047 2048 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2049 { 2050 __wsum csum; 2051 long csstart; 2052 2053 if (skb->ip_summed == CHECKSUM_PARTIAL) 2054 csstart = skb_checksum_start_offset(skb); 2055 else 2056 csstart = skb_headlen(skb); 2057 2058 BUG_ON(csstart > skb_headlen(skb)); 2059 2060 skb_copy_from_linear_data(skb, to, csstart); 2061 2062 csum = 0; 2063 if (csstart != skb->len) 2064 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2065 skb->len - csstart, 0); 2066 2067 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2068 long csstuff = csstart + skb->csum_offset; 2069 2070 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2071 } 2072 } 2073 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2074 2075 /** 2076 * skb_dequeue - remove from the head of the queue 2077 * @list: list to dequeue from 2078 * 2079 * Remove the head of the list. The list lock is taken so the function 2080 * may be used safely with other locking list functions. The head item is 2081 * returned or %NULL if the list is empty. 2082 */ 2083 2084 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2085 { 2086 unsigned long flags; 2087 struct sk_buff *result; 2088 2089 spin_lock_irqsave(&list->lock, flags); 2090 result = __skb_dequeue(list); 2091 spin_unlock_irqrestore(&list->lock, flags); 2092 return result; 2093 } 2094 EXPORT_SYMBOL(skb_dequeue); 2095 2096 /** 2097 * skb_dequeue_tail - remove from the tail of the queue 2098 * @list: list to dequeue from 2099 * 2100 * Remove the tail of the list. The list lock is taken so the function 2101 * may be used safely with other locking list functions. The tail item is 2102 * returned or %NULL if the list is empty. 2103 */ 2104 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2105 { 2106 unsigned long flags; 2107 struct sk_buff *result; 2108 2109 spin_lock_irqsave(&list->lock, flags); 2110 result = __skb_dequeue_tail(list); 2111 spin_unlock_irqrestore(&list->lock, flags); 2112 return result; 2113 } 2114 EXPORT_SYMBOL(skb_dequeue_tail); 2115 2116 /** 2117 * skb_queue_purge - empty a list 2118 * @list: list to empty 2119 * 2120 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2121 * the list and one reference dropped. This function takes the list 2122 * lock and is atomic with respect to other list locking functions. 2123 */ 2124 void skb_queue_purge(struct sk_buff_head *list) 2125 { 2126 struct sk_buff *skb; 2127 while ((skb = skb_dequeue(list)) != NULL) 2128 kfree_skb(skb); 2129 } 2130 EXPORT_SYMBOL(skb_queue_purge); 2131 2132 /** 2133 * skb_queue_head - queue a buffer at the list head 2134 * @list: list to use 2135 * @newsk: buffer to queue 2136 * 2137 * Queue a buffer at the start of the list. This function takes the 2138 * list lock and can be used safely with other locking &sk_buff functions 2139 * safely. 2140 * 2141 * A buffer cannot be placed on two lists at the same time. 2142 */ 2143 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2144 { 2145 unsigned long flags; 2146 2147 spin_lock_irqsave(&list->lock, flags); 2148 __skb_queue_head(list, newsk); 2149 spin_unlock_irqrestore(&list->lock, flags); 2150 } 2151 EXPORT_SYMBOL(skb_queue_head); 2152 2153 /** 2154 * skb_queue_tail - queue a buffer at the list tail 2155 * @list: list to use 2156 * @newsk: buffer to queue 2157 * 2158 * Queue a buffer at the tail of the list. This function takes the 2159 * list lock and can be used safely with other locking &sk_buff functions 2160 * safely. 2161 * 2162 * A buffer cannot be placed on two lists at the same time. 2163 */ 2164 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2165 { 2166 unsigned long flags; 2167 2168 spin_lock_irqsave(&list->lock, flags); 2169 __skb_queue_tail(list, newsk); 2170 spin_unlock_irqrestore(&list->lock, flags); 2171 } 2172 EXPORT_SYMBOL(skb_queue_tail); 2173 2174 /** 2175 * skb_unlink - remove a buffer from a list 2176 * @skb: buffer to remove 2177 * @list: list to use 2178 * 2179 * Remove a packet from a list. The list locks are taken and this 2180 * function is atomic with respect to other list locked calls 2181 * 2182 * You must know what list the SKB is on. 2183 */ 2184 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2185 { 2186 unsigned long flags; 2187 2188 spin_lock_irqsave(&list->lock, flags); 2189 __skb_unlink(skb, list); 2190 spin_unlock_irqrestore(&list->lock, flags); 2191 } 2192 EXPORT_SYMBOL(skb_unlink); 2193 2194 /** 2195 * skb_append - append a buffer 2196 * @old: buffer to insert after 2197 * @newsk: buffer to insert 2198 * @list: list to use 2199 * 2200 * Place a packet after a given packet in a list. The list locks are taken 2201 * and this function is atomic with respect to other list locked calls. 2202 * A buffer cannot be placed on two lists at the same time. 2203 */ 2204 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2205 { 2206 unsigned long flags; 2207 2208 spin_lock_irqsave(&list->lock, flags); 2209 __skb_queue_after(list, old, newsk); 2210 spin_unlock_irqrestore(&list->lock, flags); 2211 } 2212 EXPORT_SYMBOL(skb_append); 2213 2214 /** 2215 * skb_insert - insert a buffer 2216 * @old: buffer to insert before 2217 * @newsk: buffer to insert 2218 * @list: list to use 2219 * 2220 * Place a packet before a given packet in a list. The list locks are 2221 * taken and this function is atomic with respect to other list locked 2222 * calls. 2223 * 2224 * A buffer cannot be placed on two lists at the same time. 2225 */ 2226 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2227 { 2228 unsigned long flags; 2229 2230 spin_lock_irqsave(&list->lock, flags); 2231 __skb_insert(newsk, old->prev, old, list); 2232 spin_unlock_irqrestore(&list->lock, flags); 2233 } 2234 EXPORT_SYMBOL(skb_insert); 2235 2236 static inline void skb_split_inside_header(struct sk_buff *skb, 2237 struct sk_buff* skb1, 2238 const u32 len, const int pos) 2239 { 2240 int i; 2241 2242 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2243 pos - len); 2244 /* And move data appendix as is. */ 2245 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2246 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2247 2248 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2249 skb_shinfo(skb)->nr_frags = 0; 2250 skb1->data_len = skb->data_len; 2251 skb1->len += skb1->data_len; 2252 skb->data_len = 0; 2253 skb->len = len; 2254 skb_set_tail_pointer(skb, len); 2255 } 2256 2257 static inline void skb_split_no_header(struct sk_buff *skb, 2258 struct sk_buff* skb1, 2259 const u32 len, int pos) 2260 { 2261 int i, k = 0; 2262 const int nfrags = skb_shinfo(skb)->nr_frags; 2263 2264 skb_shinfo(skb)->nr_frags = 0; 2265 skb1->len = skb1->data_len = skb->len - len; 2266 skb->len = len; 2267 skb->data_len = len - pos; 2268 2269 for (i = 0; i < nfrags; i++) { 2270 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2271 2272 if (pos + size > len) { 2273 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2274 2275 if (pos < len) { 2276 /* Split frag. 2277 * We have two variants in this case: 2278 * 1. Move all the frag to the second 2279 * part, if it is possible. F.e. 2280 * this approach is mandatory for TUX, 2281 * where splitting is expensive. 2282 * 2. Split is accurately. We make this. 2283 */ 2284 skb_frag_ref(skb, i); 2285 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2286 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2287 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2288 skb_shinfo(skb)->nr_frags++; 2289 } 2290 k++; 2291 } else 2292 skb_shinfo(skb)->nr_frags++; 2293 pos += size; 2294 } 2295 skb_shinfo(skb1)->nr_frags = k; 2296 } 2297 2298 /** 2299 * skb_split - Split fragmented skb to two parts at length len. 2300 * @skb: the buffer to split 2301 * @skb1: the buffer to receive the second part 2302 * @len: new length for skb 2303 */ 2304 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2305 { 2306 int pos = skb_headlen(skb); 2307 2308 if (len < pos) /* Split line is inside header. */ 2309 skb_split_inside_header(skb, skb1, len, pos); 2310 else /* Second chunk has no header, nothing to copy. */ 2311 skb_split_no_header(skb, skb1, len, pos); 2312 } 2313 EXPORT_SYMBOL(skb_split); 2314 2315 /* Shifting from/to a cloned skb is a no-go. 2316 * 2317 * Caller cannot keep skb_shinfo related pointers past calling here! 2318 */ 2319 static int skb_prepare_for_shift(struct sk_buff *skb) 2320 { 2321 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2322 } 2323 2324 /** 2325 * skb_shift - Shifts paged data partially from skb to another 2326 * @tgt: buffer into which tail data gets added 2327 * @skb: buffer from which the paged data comes from 2328 * @shiftlen: shift up to this many bytes 2329 * 2330 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2331 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2332 * It's up to caller to free skb if everything was shifted. 2333 * 2334 * If @tgt runs out of frags, the whole operation is aborted. 2335 * 2336 * Skb cannot include anything else but paged data while tgt is allowed 2337 * to have non-paged data as well. 2338 * 2339 * TODO: full sized shift could be optimized but that would need 2340 * specialized skb free'er to handle frags without up-to-date nr_frags. 2341 */ 2342 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2343 { 2344 int from, to, merge, todo; 2345 struct skb_frag_struct *fragfrom, *fragto; 2346 2347 BUG_ON(shiftlen > skb->len); 2348 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2349 2350 todo = shiftlen; 2351 from = 0; 2352 to = skb_shinfo(tgt)->nr_frags; 2353 fragfrom = &skb_shinfo(skb)->frags[from]; 2354 2355 /* Actual merge is delayed until the point when we know we can 2356 * commit all, so that we don't have to undo partial changes 2357 */ 2358 if (!to || 2359 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2360 fragfrom->page_offset)) { 2361 merge = -1; 2362 } else { 2363 merge = to - 1; 2364 2365 todo -= skb_frag_size(fragfrom); 2366 if (todo < 0) { 2367 if (skb_prepare_for_shift(skb) || 2368 skb_prepare_for_shift(tgt)) 2369 return 0; 2370 2371 /* All previous frag pointers might be stale! */ 2372 fragfrom = &skb_shinfo(skb)->frags[from]; 2373 fragto = &skb_shinfo(tgt)->frags[merge]; 2374 2375 skb_frag_size_add(fragto, shiftlen); 2376 skb_frag_size_sub(fragfrom, shiftlen); 2377 fragfrom->page_offset += shiftlen; 2378 2379 goto onlymerged; 2380 } 2381 2382 from++; 2383 } 2384 2385 /* Skip full, not-fitting skb to avoid expensive operations */ 2386 if ((shiftlen == skb->len) && 2387 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2388 return 0; 2389 2390 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2391 return 0; 2392 2393 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2394 if (to == MAX_SKB_FRAGS) 2395 return 0; 2396 2397 fragfrom = &skb_shinfo(skb)->frags[from]; 2398 fragto = &skb_shinfo(tgt)->frags[to]; 2399 2400 if (todo >= skb_frag_size(fragfrom)) { 2401 *fragto = *fragfrom; 2402 todo -= skb_frag_size(fragfrom); 2403 from++; 2404 to++; 2405 2406 } else { 2407 __skb_frag_ref(fragfrom); 2408 fragto->page = fragfrom->page; 2409 fragto->page_offset = fragfrom->page_offset; 2410 skb_frag_size_set(fragto, todo); 2411 2412 fragfrom->page_offset += todo; 2413 skb_frag_size_sub(fragfrom, todo); 2414 todo = 0; 2415 2416 to++; 2417 break; 2418 } 2419 } 2420 2421 /* Ready to "commit" this state change to tgt */ 2422 skb_shinfo(tgt)->nr_frags = to; 2423 2424 if (merge >= 0) { 2425 fragfrom = &skb_shinfo(skb)->frags[0]; 2426 fragto = &skb_shinfo(tgt)->frags[merge]; 2427 2428 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2429 __skb_frag_unref(fragfrom); 2430 } 2431 2432 /* Reposition in the original skb */ 2433 to = 0; 2434 while (from < skb_shinfo(skb)->nr_frags) 2435 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2436 skb_shinfo(skb)->nr_frags = to; 2437 2438 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2439 2440 onlymerged: 2441 /* Most likely the tgt won't ever need its checksum anymore, skb on 2442 * the other hand might need it if it needs to be resent 2443 */ 2444 tgt->ip_summed = CHECKSUM_PARTIAL; 2445 skb->ip_summed = CHECKSUM_PARTIAL; 2446 2447 /* Yak, is it really working this way? Some helper please? */ 2448 skb->len -= shiftlen; 2449 skb->data_len -= shiftlen; 2450 skb->truesize -= shiftlen; 2451 tgt->len += shiftlen; 2452 tgt->data_len += shiftlen; 2453 tgt->truesize += shiftlen; 2454 2455 return shiftlen; 2456 } 2457 2458 /** 2459 * skb_prepare_seq_read - Prepare a sequential read of skb data 2460 * @skb: the buffer to read 2461 * @from: lower offset of data to be read 2462 * @to: upper offset of data to be read 2463 * @st: state variable 2464 * 2465 * Initializes the specified state variable. Must be called before 2466 * invoking skb_seq_read() for the first time. 2467 */ 2468 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2469 unsigned int to, struct skb_seq_state *st) 2470 { 2471 st->lower_offset = from; 2472 st->upper_offset = to; 2473 st->root_skb = st->cur_skb = skb; 2474 st->frag_idx = st->stepped_offset = 0; 2475 st->frag_data = NULL; 2476 } 2477 EXPORT_SYMBOL(skb_prepare_seq_read); 2478 2479 /** 2480 * skb_seq_read - Sequentially read skb data 2481 * @consumed: number of bytes consumed by the caller so far 2482 * @data: destination pointer for data to be returned 2483 * @st: state variable 2484 * 2485 * Reads a block of skb data at &consumed relative to the 2486 * lower offset specified to skb_prepare_seq_read(). Assigns 2487 * the head of the data block to &data and returns the length 2488 * of the block or 0 if the end of the skb data or the upper 2489 * offset has been reached. 2490 * 2491 * The caller is not required to consume all of the data 2492 * returned, i.e. &consumed is typically set to the number 2493 * of bytes already consumed and the next call to 2494 * skb_seq_read() will return the remaining part of the block. 2495 * 2496 * Note 1: The size of each block of data returned can be arbitrary, 2497 * this limitation is the cost for zerocopy seqeuental 2498 * reads of potentially non linear data. 2499 * 2500 * Note 2: Fragment lists within fragments are not implemented 2501 * at the moment, state->root_skb could be replaced with 2502 * a stack for this purpose. 2503 */ 2504 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2505 struct skb_seq_state *st) 2506 { 2507 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2508 skb_frag_t *frag; 2509 2510 if (unlikely(abs_offset >= st->upper_offset)) 2511 return 0; 2512 2513 next_skb: 2514 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2515 2516 if (abs_offset < block_limit && !st->frag_data) { 2517 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2518 return block_limit - abs_offset; 2519 } 2520 2521 if (st->frag_idx == 0 && !st->frag_data) 2522 st->stepped_offset += skb_headlen(st->cur_skb); 2523 2524 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2525 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2526 block_limit = skb_frag_size(frag) + st->stepped_offset; 2527 2528 if (abs_offset < block_limit) { 2529 if (!st->frag_data) 2530 st->frag_data = kmap_atomic(skb_frag_page(frag)); 2531 2532 *data = (u8 *) st->frag_data + frag->page_offset + 2533 (abs_offset - st->stepped_offset); 2534 2535 return block_limit - abs_offset; 2536 } 2537 2538 if (st->frag_data) { 2539 kunmap_atomic(st->frag_data); 2540 st->frag_data = NULL; 2541 } 2542 2543 st->frag_idx++; 2544 st->stepped_offset += skb_frag_size(frag); 2545 } 2546 2547 if (st->frag_data) { 2548 kunmap_atomic(st->frag_data); 2549 st->frag_data = NULL; 2550 } 2551 2552 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2553 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2554 st->frag_idx = 0; 2555 goto next_skb; 2556 } else if (st->cur_skb->next) { 2557 st->cur_skb = st->cur_skb->next; 2558 st->frag_idx = 0; 2559 goto next_skb; 2560 } 2561 2562 return 0; 2563 } 2564 EXPORT_SYMBOL(skb_seq_read); 2565 2566 /** 2567 * skb_abort_seq_read - Abort a sequential read of skb data 2568 * @st: state variable 2569 * 2570 * Must be called if skb_seq_read() was not called until it 2571 * returned 0. 2572 */ 2573 void skb_abort_seq_read(struct skb_seq_state *st) 2574 { 2575 if (st->frag_data) 2576 kunmap_atomic(st->frag_data); 2577 } 2578 EXPORT_SYMBOL(skb_abort_seq_read); 2579 2580 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2581 2582 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2583 struct ts_config *conf, 2584 struct ts_state *state) 2585 { 2586 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2587 } 2588 2589 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2590 { 2591 skb_abort_seq_read(TS_SKB_CB(state)); 2592 } 2593 2594 /** 2595 * skb_find_text - Find a text pattern in skb data 2596 * @skb: the buffer to look in 2597 * @from: search offset 2598 * @to: search limit 2599 * @config: textsearch configuration 2600 * @state: uninitialized textsearch state variable 2601 * 2602 * Finds a pattern in the skb data according to the specified 2603 * textsearch configuration. Use textsearch_next() to retrieve 2604 * subsequent occurrences of the pattern. Returns the offset 2605 * to the first occurrence or UINT_MAX if no match was found. 2606 */ 2607 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2608 unsigned int to, struct ts_config *config, 2609 struct ts_state *state) 2610 { 2611 unsigned int ret; 2612 2613 config->get_next_block = skb_ts_get_next_block; 2614 config->finish = skb_ts_finish; 2615 2616 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2617 2618 ret = textsearch_find(config, state); 2619 return (ret <= to - from ? ret : UINT_MAX); 2620 } 2621 EXPORT_SYMBOL(skb_find_text); 2622 2623 /** 2624 * skb_append_datato_frags - append the user data to a skb 2625 * @sk: sock structure 2626 * @skb: skb structure to be appened with user data. 2627 * @getfrag: call back function to be used for getting the user data 2628 * @from: pointer to user message iov 2629 * @length: length of the iov message 2630 * 2631 * Description: This procedure append the user data in the fragment part 2632 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2633 */ 2634 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2635 int (*getfrag)(void *from, char *to, int offset, 2636 int len, int odd, struct sk_buff *skb), 2637 void *from, int length) 2638 { 2639 int frg_cnt = 0; 2640 skb_frag_t *frag = NULL; 2641 struct page *page = NULL; 2642 int copy, left; 2643 int offset = 0; 2644 int ret; 2645 2646 do { 2647 /* Return error if we don't have space for new frag */ 2648 frg_cnt = skb_shinfo(skb)->nr_frags; 2649 if (frg_cnt >= MAX_SKB_FRAGS) 2650 return -EFAULT; 2651 2652 /* allocate a new page for next frag */ 2653 page = alloc_pages(sk->sk_allocation, 0); 2654 2655 /* If alloc_page fails just return failure and caller will 2656 * free previous allocated pages by doing kfree_skb() 2657 */ 2658 if (page == NULL) 2659 return -ENOMEM; 2660 2661 /* initialize the next frag */ 2662 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2663 skb->truesize += PAGE_SIZE; 2664 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2665 2666 /* get the new initialized frag */ 2667 frg_cnt = skb_shinfo(skb)->nr_frags; 2668 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2669 2670 /* copy the user data to page */ 2671 left = PAGE_SIZE - frag->page_offset; 2672 copy = (length > left)? left : length; 2673 2674 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2675 offset, copy, 0, skb); 2676 if (ret < 0) 2677 return -EFAULT; 2678 2679 /* copy was successful so update the size parameters */ 2680 skb_frag_size_add(frag, copy); 2681 skb->len += copy; 2682 skb->data_len += copy; 2683 offset += copy; 2684 length -= copy; 2685 2686 } while (length > 0); 2687 2688 return 0; 2689 } 2690 EXPORT_SYMBOL(skb_append_datato_frags); 2691 2692 /** 2693 * skb_pull_rcsum - pull skb and update receive checksum 2694 * @skb: buffer to update 2695 * @len: length of data pulled 2696 * 2697 * This function performs an skb_pull on the packet and updates 2698 * the CHECKSUM_COMPLETE checksum. It should be used on 2699 * receive path processing instead of skb_pull unless you know 2700 * that the checksum difference is zero (e.g., a valid IP header) 2701 * or you are setting ip_summed to CHECKSUM_NONE. 2702 */ 2703 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2704 { 2705 BUG_ON(len > skb->len); 2706 skb->len -= len; 2707 BUG_ON(skb->len < skb->data_len); 2708 skb_postpull_rcsum(skb, skb->data, len); 2709 return skb->data += len; 2710 } 2711 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2712 2713 /** 2714 * skb_segment - Perform protocol segmentation on skb. 2715 * @skb: buffer to segment 2716 * @features: features for the output path (see dev->features) 2717 * 2718 * This function performs segmentation on the given skb. It returns 2719 * a pointer to the first in a list of new skbs for the segments. 2720 * In case of error it returns ERR_PTR(err). 2721 */ 2722 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2723 { 2724 struct sk_buff *segs = NULL; 2725 struct sk_buff *tail = NULL; 2726 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2727 unsigned int mss = skb_shinfo(skb)->gso_size; 2728 unsigned int doffset = skb->data - skb_mac_header(skb); 2729 unsigned int offset = doffset; 2730 unsigned int headroom; 2731 unsigned int len; 2732 int sg = !!(features & NETIF_F_SG); 2733 int nfrags = skb_shinfo(skb)->nr_frags; 2734 int err = -ENOMEM; 2735 int i = 0; 2736 int pos; 2737 2738 __skb_push(skb, doffset); 2739 headroom = skb_headroom(skb); 2740 pos = skb_headlen(skb); 2741 2742 do { 2743 struct sk_buff *nskb; 2744 skb_frag_t *frag; 2745 int hsize; 2746 int size; 2747 2748 len = skb->len - offset; 2749 if (len > mss) 2750 len = mss; 2751 2752 hsize = skb_headlen(skb) - offset; 2753 if (hsize < 0) 2754 hsize = 0; 2755 if (hsize > len || !sg) 2756 hsize = len; 2757 2758 if (!hsize && i >= nfrags) { 2759 BUG_ON(fskb->len != len); 2760 2761 pos += len; 2762 nskb = skb_clone(fskb, GFP_ATOMIC); 2763 fskb = fskb->next; 2764 2765 if (unlikely(!nskb)) 2766 goto err; 2767 2768 hsize = skb_end_offset(nskb); 2769 if (skb_cow_head(nskb, doffset + headroom)) { 2770 kfree_skb(nskb); 2771 goto err; 2772 } 2773 2774 nskb->truesize += skb_end_offset(nskb) - hsize; 2775 skb_release_head_state(nskb); 2776 __skb_push(nskb, doffset); 2777 } else { 2778 nskb = alloc_skb(hsize + doffset + headroom, 2779 GFP_ATOMIC); 2780 2781 if (unlikely(!nskb)) 2782 goto err; 2783 2784 skb_reserve(nskb, headroom); 2785 __skb_put(nskb, doffset); 2786 } 2787 2788 if (segs) 2789 tail->next = nskb; 2790 else 2791 segs = nskb; 2792 tail = nskb; 2793 2794 __copy_skb_header(nskb, skb); 2795 nskb->mac_len = skb->mac_len; 2796 2797 /* nskb and skb might have different headroom */ 2798 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2799 nskb->csum_start += skb_headroom(nskb) - headroom; 2800 2801 skb_reset_mac_header(nskb); 2802 skb_set_network_header(nskb, skb->mac_len); 2803 nskb->transport_header = (nskb->network_header + 2804 skb_network_header_len(skb)); 2805 skb_copy_from_linear_data(skb, nskb->data, doffset); 2806 2807 if (fskb != skb_shinfo(skb)->frag_list) 2808 continue; 2809 2810 if (!sg) { 2811 nskb->ip_summed = CHECKSUM_NONE; 2812 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2813 skb_put(nskb, len), 2814 len, 0); 2815 continue; 2816 } 2817 2818 frag = skb_shinfo(nskb)->frags; 2819 2820 skb_copy_from_linear_data_offset(skb, offset, 2821 skb_put(nskb, hsize), hsize); 2822 2823 while (pos < offset + len && i < nfrags) { 2824 *frag = skb_shinfo(skb)->frags[i]; 2825 __skb_frag_ref(frag); 2826 size = skb_frag_size(frag); 2827 2828 if (pos < offset) { 2829 frag->page_offset += offset - pos; 2830 skb_frag_size_sub(frag, offset - pos); 2831 } 2832 2833 skb_shinfo(nskb)->nr_frags++; 2834 2835 if (pos + size <= offset + len) { 2836 i++; 2837 pos += size; 2838 } else { 2839 skb_frag_size_sub(frag, pos + size - (offset + len)); 2840 goto skip_fraglist; 2841 } 2842 2843 frag++; 2844 } 2845 2846 if (pos < offset + len) { 2847 struct sk_buff *fskb2 = fskb; 2848 2849 BUG_ON(pos + fskb->len != offset + len); 2850 2851 pos += fskb->len; 2852 fskb = fskb->next; 2853 2854 if (fskb2->next) { 2855 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2856 if (!fskb2) 2857 goto err; 2858 } else 2859 skb_get(fskb2); 2860 2861 SKB_FRAG_ASSERT(nskb); 2862 skb_shinfo(nskb)->frag_list = fskb2; 2863 } 2864 2865 skip_fraglist: 2866 nskb->data_len = len - hsize; 2867 nskb->len += nskb->data_len; 2868 nskb->truesize += nskb->data_len; 2869 } while ((offset += len) < skb->len); 2870 2871 return segs; 2872 2873 err: 2874 while ((skb = segs)) { 2875 segs = skb->next; 2876 kfree_skb(skb); 2877 } 2878 return ERR_PTR(err); 2879 } 2880 EXPORT_SYMBOL_GPL(skb_segment); 2881 2882 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2883 { 2884 struct sk_buff *p = *head; 2885 struct sk_buff *nskb; 2886 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2887 struct skb_shared_info *pinfo = skb_shinfo(p); 2888 unsigned int headroom; 2889 unsigned int len = skb_gro_len(skb); 2890 unsigned int offset = skb_gro_offset(skb); 2891 unsigned int headlen = skb_headlen(skb); 2892 unsigned int delta_truesize; 2893 2894 if (p->len + len >= 65536) 2895 return -E2BIG; 2896 2897 if (pinfo->frag_list) 2898 goto merge; 2899 else if (headlen <= offset) { 2900 skb_frag_t *frag; 2901 skb_frag_t *frag2; 2902 int i = skbinfo->nr_frags; 2903 int nr_frags = pinfo->nr_frags + i; 2904 2905 offset -= headlen; 2906 2907 if (nr_frags > MAX_SKB_FRAGS) 2908 return -E2BIG; 2909 2910 pinfo->nr_frags = nr_frags; 2911 skbinfo->nr_frags = 0; 2912 2913 frag = pinfo->frags + nr_frags; 2914 frag2 = skbinfo->frags + i; 2915 do { 2916 *--frag = *--frag2; 2917 } while (--i); 2918 2919 frag->page_offset += offset; 2920 skb_frag_size_sub(frag, offset); 2921 2922 /* all fragments truesize : remove (head size + sk_buff) */ 2923 delta_truesize = skb->truesize - 2924 SKB_TRUESIZE(skb_end_offset(skb)); 2925 2926 skb->truesize -= skb->data_len; 2927 skb->len -= skb->data_len; 2928 skb->data_len = 0; 2929 2930 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 2931 goto done; 2932 } else if (skb->head_frag) { 2933 int nr_frags = pinfo->nr_frags; 2934 skb_frag_t *frag = pinfo->frags + nr_frags; 2935 struct page *page = virt_to_head_page(skb->head); 2936 unsigned int first_size = headlen - offset; 2937 unsigned int first_offset; 2938 2939 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 2940 return -E2BIG; 2941 2942 first_offset = skb->data - 2943 (unsigned char *)page_address(page) + 2944 offset; 2945 2946 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 2947 2948 frag->page.p = page; 2949 frag->page_offset = first_offset; 2950 skb_frag_size_set(frag, first_size); 2951 2952 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 2953 /* We dont need to clear skbinfo->nr_frags here */ 2954 2955 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 2956 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 2957 goto done; 2958 } else if (skb_gro_len(p) != pinfo->gso_size) 2959 return -E2BIG; 2960 2961 headroom = skb_headroom(p); 2962 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2963 if (unlikely(!nskb)) 2964 return -ENOMEM; 2965 2966 __copy_skb_header(nskb, p); 2967 nskb->mac_len = p->mac_len; 2968 2969 skb_reserve(nskb, headroom); 2970 __skb_put(nskb, skb_gro_offset(p)); 2971 2972 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2973 skb_set_network_header(nskb, skb_network_offset(p)); 2974 skb_set_transport_header(nskb, skb_transport_offset(p)); 2975 2976 __skb_pull(p, skb_gro_offset(p)); 2977 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2978 p->data - skb_mac_header(p)); 2979 2980 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2981 skb_shinfo(nskb)->frag_list = p; 2982 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2983 pinfo->gso_size = 0; 2984 skb_header_release(p); 2985 nskb->prev = p; 2986 2987 nskb->data_len += p->len; 2988 nskb->truesize += p->truesize; 2989 nskb->len += p->len; 2990 2991 *head = nskb; 2992 nskb->next = p->next; 2993 p->next = NULL; 2994 2995 p = nskb; 2996 2997 merge: 2998 delta_truesize = skb->truesize; 2999 if (offset > headlen) { 3000 unsigned int eat = offset - headlen; 3001 3002 skbinfo->frags[0].page_offset += eat; 3003 skb_frag_size_sub(&skbinfo->frags[0], eat); 3004 skb->data_len -= eat; 3005 skb->len -= eat; 3006 offset = headlen; 3007 } 3008 3009 __skb_pull(skb, offset); 3010 3011 p->prev->next = skb; 3012 p->prev = skb; 3013 skb_header_release(skb); 3014 3015 done: 3016 NAPI_GRO_CB(p)->count++; 3017 p->data_len += len; 3018 p->truesize += delta_truesize; 3019 p->len += len; 3020 3021 NAPI_GRO_CB(skb)->same_flow = 1; 3022 return 0; 3023 } 3024 EXPORT_SYMBOL_GPL(skb_gro_receive); 3025 3026 void __init skb_init(void) 3027 { 3028 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 3029 sizeof(struct sk_buff), 3030 0, 3031 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3032 NULL); 3033 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3034 (2*sizeof(struct sk_buff)) + 3035 sizeof(atomic_t), 3036 0, 3037 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3038 NULL); 3039 } 3040 3041 /** 3042 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3043 * @skb: Socket buffer containing the buffers to be mapped 3044 * @sg: The scatter-gather list to map into 3045 * @offset: The offset into the buffer's contents to start mapping 3046 * @len: Length of buffer space to be mapped 3047 * 3048 * Fill the specified scatter-gather list with mappings/pointers into a 3049 * region of the buffer space attached to a socket buffer. 3050 */ 3051 static int 3052 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3053 { 3054 int start = skb_headlen(skb); 3055 int i, copy = start - offset; 3056 struct sk_buff *frag_iter; 3057 int elt = 0; 3058 3059 if (copy > 0) { 3060 if (copy > len) 3061 copy = len; 3062 sg_set_buf(sg, skb->data + offset, copy); 3063 elt++; 3064 if ((len -= copy) == 0) 3065 return elt; 3066 offset += copy; 3067 } 3068 3069 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3070 int end; 3071 3072 WARN_ON(start > offset + len); 3073 3074 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3075 if ((copy = end - offset) > 0) { 3076 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3077 3078 if (copy > len) 3079 copy = len; 3080 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3081 frag->page_offset+offset-start); 3082 elt++; 3083 if (!(len -= copy)) 3084 return elt; 3085 offset += copy; 3086 } 3087 start = end; 3088 } 3089 3090 skb_walk_frags(skb, frag_iter) { 3091 int end; 3092 3093 WARN_ON(start > offset + len); 3094 3095 end = start + frag_iter->len; 3096 if ((copy = end - offset) > 0) { 3097 if (copy > len) 3098 copy = len; 3099 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3100 copy); 3101 if ((len -= copy) == 0) 3102 return elt; 3103 offset += copy; 3104 } 3105 start = end; 3106 } 3107 BUG_ON(len); 3108 return elt; 3109 } 3110 3111 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3112 { 3113 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3114 3115 sg_mark_end(&sg[nsg - 1]); 3116 3117 return nsg; 3118 } 3119 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3120 3121 /** 3122 * skb_cow_data - Check that a socket buffer's data buffers are writable 3123 * @skb: The socket buffer to check. 3124 * @tailbits: Amount of trailing space to be added 3125 * @trailer: Returned pointer to the skb where the @tailbits space begins 3126 * 3127 * Make sure that the data buffers attached to a socket buffer are 3128 * writable. If they are not, private copies are made of the data buffers 3129 * and the socket buffer is set to use these instead. 3130 * 3131 * If @tailbits is given, make sure that there is space to write @tailbits 3132 * bytes of data beyond current end of socket buffer. @trailer will be 3133 * set to point to the skb in which this space begins. 3134 * 3135 * The number of scatterlist elements required to completely map the 3136 * COW'd and extended socket buffer will be returned. 3137 */ 3138 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3139 { 3140 int copyflag; 3141 int elt; 3142 struct sk_buff *skb1, **skb_p; 3143 3144 /* If skb is cloned or its head is paged, reallocate 3145 * head pulling out all the pages (pages are considered not writable 3146 * at the moment even if they are anonymous). 3147 */ 3148 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3149 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3150 return -ENOMEM; 3151 3152 /* Easy case. Most of packets will go this way. */ 3153 if (!skb_has_frag_list(skb)) { 3154 /* A little of trouble, not enough of space for trailer. 3155 * This should not happen, when stack is tuned to generate 3156 * good frames. OK, on miss we reallocate and reserve even more 3157 * space, 128 bytes is fair. */ 3158 3159 if (skb_tailroom(skb) < tailbits && 3160 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3161 return -ENOMEM; 3162 3163 /* Voila! */ 3164 *trailer = skb; 3165 return 1; 3166 } 3167 3168 /* Misery. We are in troubles, going to mincer fragments... */ 3169 3170 elt = 1; 3171 skb_p = &skb_shinfo(skb)->frag_list; 3172 copyflag = 0; 3173 3174 while ((skb1 = *skb_p) != NULL) { 3175 int ntail = 0; 3176 3177 /* The fragment is partially pulled by someone, 3178 * this can happen on input. Copy it and everything 3179 * after it. */ 3180 3181 if (skb_shared(skb1)) 3182 copyflag = 1; 3183 3184 /* If the skb is the last, worry about trailer. */ 3185 3186 if (skb1->next == NULL && tailbits) { 3187 if (skb_shinfo(skb1)->nr_frags || 3188 skb_has_frag_list(skb1) || 3189 skb_tailroom(skb1) < tailbits) 3190 ntail = tailbits + 128; 3191 } 3192 3193 if (copyflag || 3194 skb_cloned(skb1) || 3195 ntail || 3196 skb_shinfo(skb1)->nr_frags || 3197 skb_has_frag_list(skb1)) { 3198 struct sk_buff *skb2; 3199 3200 /* Fuck, we are miserable poor guys... */ 3201 if (ntail == 0) 3202 skb2 = skb_copy(skb1, GFP_ATOMIC); 3203 else 3204 skb2 = skb_copy_expand(skb1, 3205 skb_headroom(skb1), 3206 ntail, 3207 GFP_ATOMIC); 3208 if (unlikely(skb2 == NULL)) 3209 return -ENOMEM; 3210 3211 if (skb1->sk) 3212 skb_set_owner_w(skb2, skb1->sk); 3213 3214 /* Looking around. Are we still alive? 3215 * OK, link new skb, drop old one */ 3216 3217 skb2->next = skb1->next; 3218 *skb_p = skb2; 3219 kfree_skb(skb1); 3220 skb1 = skb2; 3221 } 3222 elt++; 3223 *trailer = skb1; 3224 skb_p = &skb1->next; 3225 } 3226 3227 return elt; 3228 } 3229 EXPORT_SYMBOL_GPL(skb_cow_data); 3230 3231 static void sock_rmem_free(struct sk_buff *skb) 3232 { 3233 struct sock *sk = skb->sk; 3234 3235 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3236 } 3237 3238 /* 3239 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3240 */ 3241 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3242 { 3243 int len = skb->len; 3244 3245 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3246 (unsigned int)sk->sk_rcvbuf) 3247 return -ENOMEM; 3248 3249 skb_orphan(skb); 3250 skb->sk = sk; 3251 skb->destructor = sock_rmem_free; 3252 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3253 3254 /* before exiting rcu section, make sure dst is refcounted */ 3255 skb_dst_force(skb); 3256 3257 skb_queue_tail(&sk->sk_error_queue, skb); 3258 if (!sock_flag(sk, SOCK_DEAD)) 3259 sk->sk_data_ready(sk, len); 3260 return 0; 3261 } 3262 EXPORT_SYMBOL(sock_queue_err_skb); 3263 3264 void skb_tstamp_tx(struct sk_buff *orig_skb, 3265 struct skb_shared_hwtstamps *hwtstamps) 3266 { 3267 struct sock *sk = orig_skb->sk; 3268 struct sock_exterr_skb *serr; 3269 struct sk_buff *skb; 3270 int err; 3271 3272 if (!sk) 3273 return; 3274 3275 skb = skb_clone(orig_skb, GFP_ATOMIC); 3276 if (!skb) 3277 return; 3278 3279 if (hwtstamps) { 3280 *skb_hwtstamps(skb) = 3281 *hwtstamps; 3282 } else { 3283 /* 3284 * no hardware time stamps available, 3285 * so keep the shared tx_flags and only 3286 * store software time stamp 3287 */ 3288 skb->tstamp = ktime_get_real(); 3289 } 3290 3291 serr = SKB_EXT_ERR(skb); 3292 memset(serr, 0, sizeof(*serr)); 3293 serr->ee.ee_errno = ENOMSG; 3294 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3295 3296 err = sock_queue_err_skb(sk, skb); 3297 3298 if (err) 3299 kfree_skb(skb); 3300 } 3301 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3302 3303 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3304 { 3305 struct sock *sk = skb->sk; 3306 struct sock_exterr_skb *serr; 3307 int err; 3308 3309 skb->wifi_acked_valid = 1; 3310 skb->wifi_acked = acked; 3311 3312 serr = SKB_EXT_ERR(skb); 3313 memset(serr, 0, sizeof(*serr)); 3314 serr->ee.ee_errno = ENOMSG; 3315 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3316 3317 err = sock_queue_err_skb(sk, skb); 3318 if (err) 3319 kfree_skb(skb); 3320 } 3321 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3322 3323 3324 /** 3325 * skb_partial_csum_set - set up and verify partial csum values for packet 3326 * @skb: the skb to set 3327 * @start: the number of bytes after skb->data to start checksumming. 3328 * @off: the offset from start to place the checksum. 3329 * 3330 * For untrusted partially-checksummed packets, we need to make sure the values 3331 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3332 * 3333 * This function checks and sets those values and skb->ip_summed: if this 3334 * returns false you should drop the packet. 3335 */ 3336 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3337 { 3338 if (unlikely(start > skb_headlen(skb)) || 3339 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3340 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 3341 start, off, skb_headlen(skb)); 3342 return false; 3343 } 3344 skb->ip_summed = CHECKSUM_PARTIAL; 3345 skb->csum_start = skb_headroom(skb) + start; 3346 skb->csum_offset = off; 3347 return true; 3348 } 3349 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3350 3351 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3352 { 3353 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3354 skb->dev->name); 3355 } 3356 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3357 3358 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 3359 { 3360 if (head_stolen) 3361 kmem_cache_free(skbuff_head_cache, skb); 3362 else 3363 __kfree_skb(skb); 3364 } 3365 EXPORT_SYMBOL(kfree_skb_partial); 3366 3367 /** 3368 * skb_try_coalesce - try to merge skb to prior one 3369 * @to: prior buffer 3370 * @from: buffer to add 3371 * @fragstolen: pointer to boolean 3372 * @delta_truesize: how much more was allocated than was requested 3373 */ 3374 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3375 bool *fragstolen, int *delta_truesize) 3376 { 3377 int i, delta, len = from->len; 3378 3379 *fragstolen = false; 3380 3381 if (skb_cloned(to)) 3382 return false; 3383 3384 if (len <= skb_tailroom(to)) { 3385 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 3386 *delta_truesize = 0; 3387 return true; 3388 } 3389 3390 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 3391 return false; 3392 3393 if (skb_headlen(from) != 0) { 3394 struct page *page; 3395 unsigned int offset; 3396 3397 if (skb_shinfo(to)->nr_frags + 3398 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 3399 return false; 3400 3401 if (skb_head_is_locked(from)) 3402 return false; 3403 3404 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3405 3406 page = virt_to_head_page(from->head); 3407 offset = from->data - (unsigned char *)page_address(page); 3408 3409 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 3410 page, offset, skb_headlen(from)); 3411 *fragstolen = true; 3412 } else { 3413 if (skb_shinfo(to)->nr_frags + 3414 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 3415 return false; 3416 3417 delta = from->truesize - 3418 SKB_TRUESIZE(skb_end_pointer(from) - from->head); 3419 } 3420 3421 WARN_ON_ONCE(delta < len); 3422 3423 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 3424 skb_shinfo(from)->frags, 3425 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 3426 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 3427 3428 if (!skb_cloned(from)) 3429 skb_shinfo(from)->nr_frags = 0; 3430 3431 /* if the skb is cloned this does nothing since we set nr_frags to 0 */ 3432 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 3433 skb_frag_ref(from, i); 3434 3435 to->truesize += delta; 3436 to->len += len; 3437 to->data_len += len; 3438 3439 *delta_truesize = delta; 3440 return true; 3441 } 3442 EXPORT_SYMBOL(skb_try_coalesce); 3443