1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/mm.h> 44 #include <linux/interrupt.h> 45 #include <linux/in.h> 46 #include <linux/inet.h> 47 #include <linux/slab.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 62 #include <net/protocol.h> 63 #include <net/dst.h> 64 #include <net/sock.h> 65 #include <net/checksum.h> 66 #include <net/xfrm.h> 67 68 #include <asm/uaccess.h> 69 #include <asm/system.h> 70 #include <trace/events/skb.h> 71 72 #include "kmap_skb.h" 73 74 static struct kmem_cache *skbuff_head_cache __read_mostly; 75 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 76 77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 78 struct pipe_buffer *buf) 79 { 80 put_page(buf->page); 81 } 82 83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 84 struct pipe_buffer *buf) 85 { 86 get_page(buf->page); 87 } 88 89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 90 struct pipe_buffer *buf) 91 { 92 return 1; 93 } 94 95 96 /* Pipe buffer operations for a socket. */ 97 static const struct pipe_buf_operations sock_pipe_buf_ops = { 98 .can_merge = 0, 99 .map = generic_pipe_buf_map, 100 .unmap = generic_pipe_buf_unmap, 101 .confirm = generic_pipe_buf_confirm, 102 .release = sock_pipe_buf_release, 103 .steal = sock_pipe_buf_steal, 104 .get = sock_pipe_buf_get, 105 }; 106 107 /* 108 * Keep out-of-line to prevent kernel bloat. 109 * __builtin_return_address is not used because it is not always 110 * reliable. 111 */ 112 113 /** 114 * skb_over_panic - private function 115 * @skb: buffer 116 * @sz: size 117 * @here: address 118 * 119 * Out of line support code for skb_put(). Not user callable. 120 */ 121 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 122 { 123 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 124 "data:%p tail:%#lx end:%#lx dev:%s\n", 125 here, skb->len, sz, skb->head, skb->data, 126 (unsigned long)skb->tail, (unsigned long)skb->end, 127 skb->dev ? skb->dev->name : "<NULL>"); 128 BUG(); 129 } 130 131 /** 132 * skb_under_panic - private function 133 * @skb: buffer 134 * @sz: size 135 * @here: address 136 * 137 * Out of line support code for skb_push(). Not user callable. 138 */ 139 140 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 141 { 142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 143 "data:%p tail:%#lx end:%#lx dev:%s\n", 144 here, skb->len, sz, skb->head, skb->data, 145 (unsigned long)skb->tail, (unsigned long)skb->end, 146 skb->dev ? skb->dev->name : "<NULL>"); 147 BUG(); 148 } 149 150 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 151 * 'private' fields and also do memory statistics to find all the 152 * [BEEP] leaks. 153 * 154 */ 155 156 /** 157 * __alloc_skb - allocate a network buffer 158 * @size: size to allocate 159 * @gfp_mask: allocation mask 160 * @fclone: allocate from fclone cache instead of head cache 161 * and allocate a cloned (child) skb 162 * @node: numa node to allocate memory on 163 * 164 * Allocate a new &sk_buff. The returned buffer has no headroom and a 165 * tail room of size bytes. The object has a reference count of one. 166 * The return is the buffer. On a failure the return is %NULL. 167 * 168 * Buffers may only be allocated from interrupts using a @gfp_mask of 169 * %GFP_ATOMIC. 170 */ 171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 172 int fclone, int node) 173 { 174 struct kmem_cache *cache; 175 struct skb_shared_info *shinfo; 176 struct sk_buff *skb; 177 u8 *data; 178 179 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 180 181 /* Get the HEAD */ 182 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 183 if (!skb) 184 goto out; 185 prefetchw(skb); 186 187 size = SKB_DATA_ALIGN(size); 188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 189 gfp_mask, node); 190 if (!data) 191 goto nodata; 192 prefetchw(data + size); 193 194 /* 195 * Only clear those fields we need to clear, not those that we will 196 * actually initialise below. Hence, don't put any more fields after 197 * the tail pointer in struct sk_buff! 198 */ 199 memset(skb, 0, offsetof(struct sk_buff, tail)); 200 skb->truesize = size + sizeof(struct sk_buff); 201 atomic_set(&skb->users, 1); 202 skb->head = data; 203 skb->data = data; 204 skb_reset_tail_pointer(skb); 205 skb->end = skb->tail + size; 206 #ifdef NET_SKBUFF_DATA_USES_OFFSET 207 skb->mac_header = ~0U; 208 #endif 209 210 /* make sure we initialize shinfo sequentially */ 211 shinfo = skb_shinfo(skb); 212 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 213 atomic_set(&shinfo->dataref, 1); 214 kmemcheck_annotate_variable(shinfo->destructor_arg); 215 216 if (fclone) { 217 struct sk_buff *child = skb + 1; 218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 219 220 kmemcheck_annotate_bitfield(child, flags1); 221 kmemcheck_annotate_bitfield(child, flags2); 222 skb->fclone = SKB_FCLONE_ORIG; 223 atomic_set(fclone_ref, 1); 224 225 child->fclone = SKB_FCLONE_UNAVAILABLE; 226 } 227 out: 228 return skb; 229 nodata: 230 kmem_cache_free(cache, skb); 231 skb = NULL; 232 goto out; 233 } 234 EXPORT_SYMBOL(__alloc_skb); 235 236 /** 237 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 238 * @dev: network device to receive on 239 * @length: length to allocate 240 * @gfp_mask: get_free_pages mask, passed to alloc_skb 241 * 242 * Allocate a new &sk_buff and assign it a usage count of one. The 243 * buffer has unspecified headroom built in. Users should allocate 244 * the headroom they think they need without accounting for the 245 * built in space. The built in space is used for optimisations. 246 * 247 * %NULL is returned if there is no free memory. 248 */ 249 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 250 unsigned int length, gfp_t gfp_mask) 251 { 252 struct sk_buff *skb; 253 254 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 255 if (likely(skb)) { 256 skb_reserve(skb, NET_SKB_PAD); 257 skb->dev = dev; 258 } 259 return skb; 260 } 261 EXPORT_SYMBOL(__netdev_alloc_skb); 262 263 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 264 int size) 265 { 266 skb_fill_page_desc(skb, i, page, off, size); 267 skb->len += size; 268 skb->data_len += size; 269 skb->truesize += size; 270 } 271 EXPORT_SYMBOL(skb_add_rx_frag); 272 273 /** 274 * dev_alloc_skb - allocate an skbuff for receiving 275 * @length: length to allocate 276 * 277 * Allocate a new &sk_buff and assign it a usage count of one. The 278 * buffer has unspecified headroom built in. Users should allocate 279 * the headroom they think they need without accounting for the 280 * built in space. The built in space is used for optimisations. 281 * 282 * %NULL is returned if there is no free memory. Although this function 283 * allocates memory it can be called from an interrupt. 284 */ 285 struct sk_buff *dev_alloc_skb(unsigned int length) 286 { 287 /* 288 * There is more code here than it seems: 289 * __dev_alloc_skb is an inline 290 */ 291 return __dev_alloc_skb(length, GFP_ATOMIC); 292 } 293 EXPORT_SYMBOL(dev_alloc_skb); 294 295 static void skb_drop_list(struct sk_buff **listp) 296 { 297 struct sk_buff *list = *listp; 298 299 *listp = NULL; 300 301 do { 302 struct sk_buff *this = list; 303 list = list->next; 304 kfree_skb(this); 305 } while (list); 306 } 307 308 static inline void skb_drop_fraglist(struct sk_buff *skb) 309 { 310 skb_drop_list(&skb_shinfo(skb)->frag_list); 311 } 312 313 static void skb_clone_fraglist(struct sk_buff *skb) 314 { 315 struct sk_buff *list; 316 317 skb_walk_frags(skb, list) 318 skb_get(list); 319 } 320 321 static void skb_release_data(struct sk_buff *skb) 322 { 323 if (!skb->cloned || 324 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 325 &skb_shinfo(skb)->dataref)) { 326 if (skb_shinfo(skb)->nr_frags) { 327 int i; 328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 329 put_page(skb_shinfo(skb)->frags[i].page); 330 } 331 332 /* 333 * If skb buf is from userspace, we need to notify the caller 334 * the lower device DMA has done; 335 */ 336 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 337 struct ubuf_info *uarg; 338 339 uarg = skb_shinfo(skb)->destructor_arg; 340 if (uarg->callback) 341 uarg->callback(uarg); 342 } 343 344 if (skb_has_frag_list(skb)) 345 skb_drop_fraglist(skb); 346 347 kfree(skb->head); 348 } 349 } 350 351 /* 352 * Free an skbuff by memory without cleaning the state. 353 */ 354 static void kfree_skbmem(struct sk_buff *skb) 355 { 356 struct sk_buff *other; 357 atomic_t *fclone_ref; 358 359 switch (skb->fclone) { 360 case SKB_FCLONE_UNAVAILABLE: 361 kmem_cache_free(skbuff_head_cache, skb); 362 break; 363 364 case SKB_FCLONE_ORIG: 365 fclone_ref = (atomic_t *) (skb + 2); 366 if (atomic_dec_and_test(fclone_ref)) 367 kmem_cache_free(skbuff_fclone_cache, skb); 368 break; 369 370 case SKB_FCLONE_CLONE: 371 fclone_ref = (atomic_t *) (skb + 1); 372 other = skb - 1; 373 374 /* The clone portion is available for 375 * fast-cloning again. 376 */ 377 skb->fclone = SKB_FCLONE_UNAVAILABLE; 378 379 if (atomic_dec_and_test(fclone_ref)) 380 kmem_cache_free(skbuff_fclone_cache, other); 381 break; 382 } 383 } 384 385 static void skb_release_head_state(struct sk_buff *skb) 386 { 387 skb_dst_drop(skb); 388 #ifdef CONFIG_XFRM 389 secpath_put(skb->sp); 390 #endif 391 if (skb->destructor) { 392 WARN_ON(in_irq()); 393 skb->destructor(skb); 394 } 395 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 396 nf_conntrack_put(skb->nfct); 397 #endif 398 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 399 nf_conntrack_put_reasm(skb->nfct_reasm); 400 #endif 401 #ifdef CONFIG_BRIDGE_NETFILTER 402 nf_bridge_put(skb->nf_bridge); 403 #endif 404 /* XXX: IS this still necessary? - JHS */ 405 #ifdef CONFIG_NET_SCHED 406 skb->tc_index = 0; 407 #ifdef CONFIG_NET_CLS_ACT 408 skb->tc_verd = 0; 409 #endif 410 #endif 411 } 412 413 /* Free everything but the sk_buff shell. */ 414 static void skb_release_all(struct sk_buff *skb) 415 { 416 skb_release_head_state(skb); 417 skb_release_data(skb); 418 } 419 420 /** 421 * __kfree_skb - private function 422 * @skb: buffer 423 * 424 * Free an sk_buff. Release anything attached to the buffer. 425 * Clean the state. This is an internal helper function. Users should 426 * always call kfree_skb 427 */ 428 429 void __kfree_skb(struct sk_buff *skb) 430 { 431 skb_release_all(skb); 432 kfree_skbmem(skb); 433 } 434 EXPORT_SYMBOL(__kfree_skb); 435 436 /** 437 * kfree_skb - free an sk_buff 438 * @skb: buffer to free 439 * 440 * Drop a reference to the buffer and free it if the usage count has 441 * hit zero. 442 */ 443 void kfree_skb(struct sk_buff *skb) 444 { 445 if (unlikely(!skb)) 446 return; 447 if (likely(atomic_read(&skb->users) == 1)) 448 smp_rmb(); 449 else if (likely(!atomic_dec_and_test(&skb->users))) 450 return; 451 trace_kfree_skb(skb, __builtin_return_address(0)); 452 __kfree_skb(skb); 453 } 454 EXPORT_SYMBOL(kfree_skb); 455 456 /** 457 * consume_skb - free an skbuff 458 * @skb: buffer to free 459 * 460 * Drop a ref to the buffer and free it if the usage count has hit zero 461 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 462 * is being dropped after a failure and notes that 463 */ 464 void consume_skb(struct sk_buff *skb) 465 { 466 if (unlikely(!skb)) 467 return; 468 if (likely(atomic_read(&skb->users) == 1)) 469 smp_rmb(); 470 else if (likely(!atomic_dec_and_test(&skb->users))) 471 return; 472 trace_consume_skb(skb); 473 __kfree_skb(skb); 474 } 475 EXPORT_SYMBOL(consume_skb); 476 477 /** 478 * skb_recycle_check - check if skb can be reused for receive 479 * @skb: buffer 480 * @skb_size: minimum receive buffer size 481 * 482 * Checks that the skb passed in is not shared or cloned, and 483 * that it is linear and its head portion at least as large as 484 * skb_size so that it can be recycled as a receive buffer. 485 * If these conditions are met, this function does any necessary 486 * reference count dropping and cleans up the skbuff as if it 487 * just came from __alloc_skb(). 488 */ 489 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 490 { 491 struct skb_shared_info *shinfo; 492 493 if (irqs_disabled()) 494 return false; 495 496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 497 return false; 498 499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 500 return false; 501 502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 503 if (skb_end_pointer(skb) - skb->head < skb_size) 504 return false; 505 506 if (skb_shared(skb) || skb_cloned(skb)) 507 return false; 508 509 skb_release_head_state(skb); 510 511 shinfo = skb_shinfo(skb); 512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 513 atomic_set(&shinfo->dataref, 1); 514 515 memset(skb, 0, offsetof(struct sk_buff, tail)); 516 skb->data = skb->head + NET_SKB_PAD; 517 skb_reset_tail_pointer(skb); 518 519 return true; 520 } 521 EXPORT_SYMBOL(skb_recycle_check); 522 523 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 524 { 525 new->tstamp = old->tstamp; 526 new->dev = old->dev; 527 new->transport_header = old->transport_header; 528 new->network_header = old->network_header; 529 new->mac_header = old->mac_header; 530 skb_dst_copy(new, old); 531 new->rxhash = old->rxhash; 532 #ifdef CONFIG_XFRM 533 new->sp = secpath_get(old->sp); 534 #endif 535 memcpy(new->cb, old->cb, sizeof(old->cb)); 536 new->csum = old->csum; 537 new->local_df = old->local_df; 538 new->pkt_type = old->pkt_type; 539 new->ip_summed = old->ip_summed; 540 skb_copy_queue_mapping(new, old); 541 new->priority = old->priority; 542 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 543 new->ipvs_property = old->ipvs_property; 544 #endif 545 new->protocol = old->protocol; 546 new->mark = old->mark; 547 new->skb_iif = old->skb_iif; 548 __nf_copy(new, old); 549 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 550 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 551 new->nf_trace = old->nf_trace; 552 #endif 553 #ifdef CONFIG_NET_SCHED 554 new->tc_index = old->tc_index; 555 #ifdef CONFIG_NET_CLS_ACT 556 new->tc_verd = old->tc_verd; 557 #endif 558 #endif 559 new->vlan_tci = old->vlan_tci; 560 561 skb_copy_secmark(new, old); 562 } 563 564 /* 565 * You should not add any new code to this function. Add it to 566 * __copy_skb_header above instead. 567 */ 568 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 569 { 570 #define C(x) n->x = skb->x 571 572 n->next = n->prev = NULL; 573 n->sk = NULL; 574 __copy_skb_header(n, skb); 575 576 C(len); 577 C(data_len); 578 C(mac_len); 579 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 580 n->cloned = 1; 581 n->nohdr = 0; 582 n->destructor = NULL; 583 C(tail); 584 C(end); 585 C(head); 586 C(data); 587 C(truesize); 588 atomic_set(&n->users, 1); 589 590 atomic_inc(&(skb_shinfo(skb)->dataref)); 591 skb->cloned = 1; 592 593 return n; 594 #undef C 595 } 596 597 /** 598 * skb_morph - morph one skb into another 599 * @dst: the skb to receive the contents 600 * @src: the skb to supply the contents 601 * 602 * This is identical to skb_clone except that the target skb is 603 * supplied by the user. 604 * 605 * The target skb is returned upon exit. 606 */ 607 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 608 { 609 skb_release_all(dst); 610 return __skb_clone(dst, src); 611 } 612 EXPORT_SYMBOL_GPL(skb_morph); 613 614 /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 615 * @skb: the skb to modify 616 * @gfp_mask: allocation priority 617 * 618 * This must be called on SKBTX_DEV_ZEROCOPY skb. 619 * It will copy all frags into kernel and drop the reference 620 * to userspace pages. 621 * 622 * If this function is called from an interrupt gfp_mask() must be 623 * %GFP_ATOMIC. 624 * 625 * Returns 0 on success or a negative error code on failure 626 * to allocate kernel memory to copy to. 627 */ 628 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 629 { 630 int i; 631 int num_frags = skb_shinfo(skb)->nr_frags; 632 struct page *page, *head = NULL; 633 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 634 635 for (i = 0; i < num_frags; i++) { 636 u8 *vaddr; 637 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 638 639 page = alloc_page(GFP_ATOMIC); 640 if (!page) { 641 while (head) { 642 struct page *next = (struct page *)head->private; 643 put_page(head); 644 head = next; 645 } 646 return -ENOMEM; 647 } 648 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 649 memcpy(page_address(page), 650 vaddr + f->page_offset, f->size); 651 kunmap_skb_frag(vaddr); 652 page->private = (unsigned long)head; 653 head = page; 654 } 655 656 /* skb frags release userspace buffers */ 657 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 658 put_page(skb_shinfo(skb)->frags[i].page); 659 660 uarg->callback(uarg); 661 662 /* skb frags point to kernel buffers */ 663 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 664 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 665 skb_shinfo(skb)->frags[i - 1].page = head; 666 head = (struct page *)head->private; 667 } 668 669 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 670 return 0; 671 } 672 673 674 /** 675 * skb_clone - duplicate an sk_buff 676 * @skb: buffer to clone 677 * @gfp_mask: allocation priority 678 * 679 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 680 * copies share the same packet data but not structure. The new 681 * buffer has a reference count of 1. If the allocation fails the 682 * function returns %NULL otherwise the new buffer is returned. 683 * 684 * If this function is called from an interrupt gfp_mask() must be 685 * %GFP_ATOMIC. 686 */ 687 688 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 689 { 690 struct sk_buff *n; 691 692 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 693 if (skb_copy_ubufs(skb, gfp_mask)) 694 return NULL; 695 } 696 697 n = skb + 1; 698 if (skb->fclone == SKB_FCLONE_ORIG && 699 n->fclone == SKB_FCLONE_UNAVAILABLE) { 700 atomic_t *fclone_ref = (atomic_t *) (n + 1); 701 n->fclone = SKB_FCLONE_CLONE; 702 atomic_inc(fclone_ref); 703 } else { 704 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 705 if (!n) 706 return NULL; 707 708 kmemcheck_annotate_bitfield(n, flags1); 709 kmemcheck_annotate_bitfield(n, flags2); 710 n->fclone = SKB_FCLONE_UNAVAILABLE; 711 } 712 713 return __skb_clone(n, skb); 714 } 715 EXPORT_SYMBOL(skb_clone); 716 717 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 718 { 719 #ifndef NET_SKBUFF_DATA_USES_OFFSET 720 /* 721 * Shift between the two data areas in bytes 722 */ 723 unsigned long offset = new->data - old->data; 724 #endif 725 726 __copy_skb_header(new, old); 727 728 #ifndef NET_SKBUFF_DATA_USES_OFFSET 729 /* {transport,network,mac}_header are relative to skb->head */ 730 new->transport_header += offset; 731 new->network_header += offset; 732 if (skb_mac_header_was_set(new)) 733 new->mac_header += offset; 734 #endif 735 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 736 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 737 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 738 } 739 740 /** 741 * skb_copy - create private copy of an sk_buff 742 * @skb: buffer to copy 743 * @gfp_mask: allocation priority 744 * 745 * Make a copy of both an &sk_buff and its data. This is used when the 746 * caller wishes to modify the data and needs a private copy of the 747 * data to alter. Returns %NULL on failure or the pointer to the buffer 748 * on success. The returned buffer has a reference count of 1. 749 * 750 * As by-product this function converts non-linear &sk_buff to linear 751 * one, so that &sk_buff becomes completely private and caller is allowed 752 * to modify all the data of returned buffer. This means that this 753 * function is not recommended for use in circumstances when only 754 * header is going to be modified. Use pskb_copy() instead. 755 */ 756 757 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 758 { 759 int headerlen = skb_headroom(skb); 760 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 761 struct sk_buff *n = alloc_skb(size, gfp_mask); 762 763 if (!n) 764 return NULL; 765 766 /* Set the data pointer */ 767 skb_reserve(n, headerlen); 768 /* Set the tail pointer and length */ 769 skb_put(n, skb->len); 770 771 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 772 BUG(); 773 774 copy_skb_header(n, skb); 775 return n; 776 } 777 EXPORT_SYMBOL(skb_copy); 778 779 /** 780 * pskb_copy - create copy of an sk_buff with private head. 781 * @skb: buffer to copy 782 * @gfp_mask: allocation priority 783 * 784 * Make a copy of both an &sk_buff and part of its data, located 785 * in header. Fragmented data remain shared. This is used when 786 * the caller wishes to modify only header of &sk_buff and needs 787 * private copy of the header to alter. Returns %NULL on failure 788 * or the pointer to the buffer on success. 789 * The returned buffer has a reference count of 1. 790 */ 791 792 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 793 { 794 unsigned int size = skb_end_pointer(skb) - skb->head; 795 struct sk_buff *n = alloc_skb(size, gfp_mask); 796 797 if (!n) 798 goto out; 799 800 /* Set the data pointer */ 801 skb_reserve(n, skb_headroom(skb)); 802 /* Set the tail pointer and length */ 803 skb_put(n, skb_headlen(skb)); 804 /* Copy the bytes */ 805 skb_copy_from_linear_data(skb, n->data, n->len); 806 807 n->truesize += skb->data_len; 808 n->data_len = skb->data_len; 809 n->len = skb->len; 810 811 if (skb_shinfo(skb)->nr_frags) { 812 int i; 813 814 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 815 if (skb_copy_ubufs(skb, gfp_mask)) { 816 kfree_skb(n); 817 n = NULL; 818 goto out; 819 } 820 } 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 823 get_page(skb_shinfo(n)->frags[i].page); 824 } 825 skb_shinfo(n)->nr_frags = i; 826 } 827 828 if (skb_has_frag_list(skb)) { 829 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 830 skb_clone_fraglist(n); 831 } 832 833 copy_skb_header(n, skb); 834 out: 835 return n; 836 } 837 EXPORT_SYMBOL(pskb_copy); 838 839 /** 840 * pskb_expand_head - reallocate header of &sk_buff 841 * @skb: buffer to reallocate 842 * @nhead: room to add at head 843 * @ntail: room to add at tail 844 * @gfp_mask: allocation priority 845 * 846 * Expands (or creates identical copy, if &nhead and &ntail are zero) 847 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 848 * reference count of 1. Returns zero in the case of success or error, 849 * if expansion failed. In the last case, &sk_buff is not changed. 850 * 851 * All the pointers pointing into skb header may change and must be 852 * reloaded after call to this function. 853 */ 854 855 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 856 gfp_t gfp_mask) 857 { 858 int i; 859 u8 *data; 860 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 861 long off; 862 bool fastpath; 863 864 BUG_ON(nhead < 0); 865 866 if (skb_shared(skb)) 867 BUG(); 868 869 size = SKB_DATA_ALIGN(size); 870 871 /* Check if we can avoid taking references on fragments if we own 872 * the last reference on skb->head. (see skb_release_data()) 873 */ 874 if (!skb->cloned) 875 fastpath = true; 876 else { 877 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 878 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 879 } 880 881 if (fastpath && 882 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { 883 memmove(skb->head + size, skb_shinfo(skb), 884 offsetof(struct skb_shared_info, 885 frags[skb_shinfo(skb)->nr_frags])); 886 memmove(skb->head + nhead, skb->head, 887 skb_tail_pointer(skb) - skb->head); 888 off = nhead; 889 goto adjust_others; 890 } 891 892 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 893 if (!data) 894 goto nodata; 895 896 /* Copy only real data... and, alas, header. This should be 897 * optimized for the cases when header is void. 898 */ 899 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 900 901 memcpy((struct skb_shared_info *)(data + size), 902 skb_shinfo(skb), 903 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 904 905 if (fastpath) { 906 kfree(skb->head); 907 } else { 908 /* copy this zero copy skb frags */ 909 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 910 if (skb_copy_ubufs(skb, gfp_mask)) 911 goto nofrags; 912 } 913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 914 get_page(skb_shinfo(skb)->frags[i].page); 915 916 if (skb_has_frag_list(skb)) 917 skb_clone_fraglist(skb); 918 919 skb_release_data(skb); 920 } 921 off = (data + nhead) - skb->head; 922 923 skb->head = data; 924 adjust_others: 925 skb->data += off; 926 #ifdef NET_SKBUFF_DATA_USES_OFFSET 927 skb->end = size; 928 off = nhead; 929 #else 930 skb->end = skb->head + size; 931 #endif 932 /* {transport,network,mac}_header and tail are relative to skb->head */ 933 skb->tail += off; 934 skb->transport_header += off; 935 skb->network_header += off; 936 if (skb_mac_header_was_set(skb)) 937 skb->mac_header += off; 938 /* Only adjust this if it actually is csum_start rather than csum */ 939 if (skb->ip_summed == CHECKSUM_PARTIAL) 940 skb->csum_start += nhead; 941 skb->cloned = 0; 942 skb->hdr_len = 0; 943 skb->nohdr = 0; 944 atomic_set(&skb_shinfo(skb)->dataref, 1); 945 return 0; 946 947 nofrags: 948 kfree(data); 949 nodata: 950 return -ENOMEM; 951 } 952 EXPORT_SYMBOL(pskb_expand_head); 953 954 /* Make private copy of skb with writable head and some headroom */ 955 956 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 957 { 958 struct sk_buff *skb2; 959 int delta = headroom - skb_headroom(skb); 960 961 if (delta <= 0) 962 skb2 = pskb_copy(skb, GFP_ATOMIC); 963 else { 964 skb2 = skb_clone(skb, GFP_ATOMIC); 965 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 966 GFP_ATOMIC)) { 967 kfree_skb(skb2); 968 skb2 = NULL; 969 } 970 } 971 return skb2; 972 } 973 EXPORT_SYMBOL(skb_realloc_headroom); 974 975 /** 976 * skb_copy_expand - copy and expand sk_buff 977 * @skb: buffer to copy 978 * @newheadroom: new free bytes at head 979 * @newtailroom: new free bytes at tail 980 * @gfp_mask: allocation priority 981 * 982 * Make a copy of both an &sk_buff and its data and while doing so 983 * allocate additional space. 984 * 985 * This is used when the caller wishes to modify the data and needs a 986 * private copy of the data to alter as well as more space for new fields. 987 * Returns %NULL on failure or the pointer to the buffer 988 * on success. The returned buffer has a reference count of 1. 989 * 990 * You must pass %GFP_ATOMIC as the allocation priority if this function 991 * is called from an interrupt. 992 */ 993 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 994 int newheadroom, int newtailroom, 995 gfp_t gfp_mask) 996 { 997 /* 998 * Allocate the copy buffer 999 */ 1000 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 1001 gfp_mask); 1002 int oldheadroom = skb_headroom(skb); 1003 int head_copy_len, head_copy_off; 1004 int off; 1005 1006 if (!n) 1007 return NULL; 1008 1009 skb_reserve(n, newheadroom); 1010 1011 /* Set the tail pointer and length */ 1012 skb_put(n, skb->len); 1013 1014 head_copy_len = oldheadroom; 1015 head_copy_off = 0; 1016 if (newheadroom <= head_copy_len) 1017 head_copy_len = newheadroom; 1018 else 1019 head_copy_off = newheadroom - head_copy_len; 1020 1021 /* Copy the linear header and data. */ 1022 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1023 skb->len + head_copy_len)) 1024 BUG(); 1025 1026 copy_skb_header(n, skb); 1027 1028 off = newheadroom - oldheadroom; 1029 if (n->ip_summed == CHECKSUM_PARTIAL) 1030 n->csum_start += off; 1031 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1032 n->transport_header += off; 1033 n->network_header += off; 1034 if (skb_mac_header_was_set(skb)) 1035 n->mac_header += off; 1036 #endif 1037 1038 return n; 1039 } 1040 EXPORT_SYMBOL(skb_copy_expand); 1041 1042 /** 1043 * skb_pad - zero pad the tail of an skb 1044 * @skb: buffer to pad 1045 * @pad: space to pad 1046 * 1047 * Ensure that a buffer is followed by a padding area that is zero 1048 * filled. Used by network drivers which may DMA or transfer data 1049 * beyond the buffer end onto the wire. 1050 * 1051 * May return error in out of memory cases. The skb is freed on error. 1052 */ 1053 1054 int skb_pad(struct sk_buff *skb, int pad) 1055 { 1056 int err; 1057 int ntail; 1058 1059 /* If the skbuff is non linear tailroom is always zero.. */ 1060 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1061 memset(skb->data+skb->len, 0, pad); 1062 return 0; 1063 } 1064 1065 ntail = skb->data_len + pad - (skb->end - skb->tail); 1066 if (likely(skb_cloned(skb) || ntail > 0)) { 1067 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1068 if (unlikely(err)) 1069 goto free_skb; 1070 } 1071 1072 /* FIXME: The use of this function with non-linear skb's really needs 1073 * to be audited. 1074 */ 1075 err = skb_linearize(skb); 1076 if (unlikely(err)) 1077 goto free_skb; 1078 1079 memset(skb->data + skb->len, 0, pad); 1080 return 0; 1081 1082 free_skb: 1083 kfree_skb(skb); 1084 return err; 1085 } 1086 EXPORT_SYMBOL(skb_pad); 1087 1088 /** 1089 * skb_put - add data to a buffer 1090 * @skb: buffer to use 1091 * @len: amount of data to add 1092 * 1093 * This function extends the used data area of the buffer. If this would 1094 * exceed the total buffer size the kernel will panic. A pointer to the 1095 * first byte of the extra data is returned. 1096 */ 1097 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1098 { 1099 unsigned char *tmp = skb_tail_pointer(skb); 1100 SKB_LINEAR_ASSERT(skb); 1101 skb->tail += len; 1102 skb->len += len; 1103 if (unlikely(skb->tail > skb->end)) 1104 skb_over_panic(skb, len, __builtin_return_address(0)); 1105 return tmp; 1106 } 1107 EXPORT_SYMBOL(skb_put); 1108 1109 /** 1110 * skb_push - add data to the start of a buffer 1111 * @skb: buffer to use 1112 * @len: amount of data to add 1113 * 1114 * This function extends the used data area of the buffer at the buffer 1115 * start. If this would exceed the total buffer headroom the kernel will 1116 * panic. A pointer to the first byte of the extra data is returned. 1117 */ 1118 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1119 { 1120 skb->data -= len; 1121 skb->len += len; 1122 if (unlikely(skb->data<skb->head)) 1123 skb_under_panic(skb, len, __builtin_return_address(0)); 1124 return skb->data; 1125 } 1126 EXPORT_SYMBOL(skb_push); 1127 1128 /** 1129 * skb_pull - remove data from the start of a buffer 1130 * @skb: buffer to use 1131 * @len: amount of data to remove 1132 * 1133 * This function removes data from the start of a buffer, returning 1134 * the memory to the headroom. A pointer to the next data in the buffer 1135 * is returned. Once the data has been pulled future pushes will overwrite 1136 * the old data. 1137 */ 1138 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1139 { 1140 return skb_pull_inline(skb, len); 1141 } 1142 EXPORT_SYMBOL(skb_pull); 1143 1144 /** 1145 * skb_trim - remove end from a buffer 1146 * @skb: buffer to alter 1147 * @len: new length 1148 * 1149 * Cut the length of a buffer down by removing data from the tail. If 1150 * the buffer is already under the length specified it is not modified. 1151 * The skb must be linear. 1152 */ 1153 void skb_trim(struct sk_buff *skb, unsigned int len) 1154 { 1155 if (skb->len > len) 1156 __skb_trim(skb, len); 1157 } 1158 EXPORT_SYMBOL(skb_trim); 1159 1160 /* Trims skb to length len. It can change skb pointers. 1161 */ 1162 1163 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1164 { 1165 struct sk_buff **fragp; 1166 struct sk_buff *frag; 1167 int offset = skb_headlen(skb); 1168 int nfrags = skb_shinfo(skb)->nr_frags; 1169 int i; 1170 int err; 1171 1172 if (skb_cloned(skb) && 1173 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1174 return err; 1175 1176 i = 0; 1177 if (offset >= len) 1178 goto drop_pages; 1179 1180 for (; i < nfrags; i++) { 1181 int end = offset + skb_shinfo(skb)->frags[i].size; 1182 1183 if (end < len) { 1184 offset = end; 1185 continue; 1186 } 1187 1188 skb_shinfo(skb)->frags[i++].size = len - offset; 1189 1190 drop_pages: 1191 skb_shinfo(skb)->nr_frags = i; 1192 1193 for (; i < nfrags; i++) 1194 put_page(skb_shinfo(skb)->frags[i].page); 1195 1196 if (skb_has_frag_list(skb)) 1197 skb_drop_fraglist(skb); 1198 goto done; 1199 } 1200 1201 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1202 fragp = &frag->next) { 1203 int end = offset + frag->len; 1204 1205 if (skb_shared(frag)) { 1206 struct sk_buff *nfrag; 1207 1208 nfrag = skb_clone(frag, GFP_ATOMIC); 1209 if (unlikely(!nfrag)) 1210 return -ENOMEM; 1211 1212 nfrag->next = frag->next; 1213 kfree_skb(frag); 1214 frag = nfrag; 1215 *fragp = frag; 1216 } 1217 1218 if (end < len) { 1219 offset = end; 1220 continue; 1221 } 1222 1223 if (end > len && 1224 unlikely((err = pskb_trim(frag, len - offset)))) 1225 return err; 1226 1227 if (frag->next) 1228 skb_drop_list(&frag->next); 1229 break; 1230 } 1231 1232 done: 1233 if (len > skb_headlen(skb)) { 1234 skb->data_len -= skb->len - len; 1235 skb->len = len; 1236 } else { 1237 skb->len = len; 1238 skb->data_len = 0; 1239 skb_set_tail_pointer(skb, len); 1240 } 1241 1242 return 0; 1243 } 1244 EXPORT_SYMBOL(___pskb_trim); 1245 1246 /** 1247 * __pskb_pull_tail - advance tail of skb header 1248 * @skb: buffer to reallocate 1249 * @delta: number of bytes to advance tail 1250 * 1251 * The function makes a sense only on a fragmented &sk_buff, 1252 * it expands header moving its tail forward and copying necessary 1253 * data from fragmented part. 1254 * 1255 * &sk_buff MUST have reference count of 1. 1256 * 1257 * Returns %NULL (and &sk_buff does not change) if pull failed 1258 * or value of new tail of skb in the case of success. 1259 * 1260 * All the pointers pointing into skb header may change and must be 1261 * reloaded after call to this function. 1262 */ 1263 1264 /* Moves tail of skb head forward, copying data from fragmented part, 1265 * when it is necessary. 1266 * 1. It may fail due to malloc failure. 1267 * 2. It may change skb pointers. 1268 * 1269 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1270 */ 1271 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1272 { 1273 /* If skb has not enough free space at tail, get new one 1274 * plus 128 bytes for future expansions. If we have enough 1275 * room at tail, reallocate without expansion only if skb is cloned. 1276 */ 1277 int i, k, eat = (skb->tail + delta) - skb->end; 1278 1279 if (eat > 0 || skb_cloned(skb)) { 1280 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1281 GFP_ATOMIC)) 1282 return NULL; 1283 } 1284 1285 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1286 BUG(); 1287 1288 /* Optimization: no fragments, no reasons to preestimate 1289 * size of pulled pages. Superb. 1290 */ 1291 if (!skb_has_frag_list(skb)) 1292 goto pull_pages; 1293 1294 /* Estimate size of pulled pages. */ 1295 eat = delta; 1296 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1297 if (skb_shinfo(skb)->frags[i].size >= eat) 1298 goto pull_pages; 1299 eat -= skb_shinfo(skb)->frags[i].size; 1300 } 1301 1302 /* If we need update frag list, we are in troubles. 1303 * Certainly, it possible to add an offset to skb data, 1304 * but taking into account that pulling is expected to 1305 * be very rare operation, it is worth to fight against 1306 * further bloating skb head and crucify ourselves here instead. 1307 * Pure masohism, indeed. 8)8) 1308 */ 1309 if (eat) { 1310 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1311 struct sk_buff *clone = NULL; 1312 struct sk_buff *insp = NULL; 1313 1314 do { 1315 BUG_ON(!list); 1316 1317 if (list->len <= eat) { 1318 /* Eaten as whole. */ 1319 eat -= list->len; 1320 list = list->next; 1321 insp = list; 1322 } else { 1323 /* Eaten partially. */ 1324 1325 if (skb_shared(list)) { 1326 /* Sucks! We need to fork list. :-( */ 1327 clone = skb_clone(list, GFP_ATOMIC); 1328 if (!clone) 1329 return NULL; 1330 insp = list->next; 1331 list = clone; 1332 } else { 1333 /* This may be pulled without 1334 * problems. */ 1335 insp = list; 1336 } 1337 if (!pskb_pull(list, eat)) { 1338 kfree_skb(clone); 1339 return NULL; 1340 } 1341 break; 1342 } 1343 } while (eat); 1344 1345 /* Free pulled out fragments. */ 1346 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1347 skb_shinfo(skb)->frag_list = list->next; 1348 kfree_skb(list); 1349 } 1350 /* And insert new clone at head. */ 1351 if (clone) { 1352 clone->next = list; 1353 skb_shinfo(skb)->frag_list = clone; 1354 } 1355 } 1356 /* Success! Now we may commit changes to skb data. */ 1357 1358 pull_pages: 1359 eat = delta; 1360 k = 0; 1361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1362 if (skb_shinfo(skb)->frags[i].size <= eat) { 1363 put_page(skb_shinfo(skb)->frags[i].page); 1364 eat -= skb_shinfo(skb)->frags[i].size; 1365 } else { 1366 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1367 if (eat) { 1368 skb_shinfo(skb)->frags[k].page_offset += eat; 1369 skb_shinfo(skb)->frags[k].size -= eat; 1370 eat = 0; 1371 } 1372 k++; 1373 } 1374 } 1375 skb_shinfo(skb)->nr_frags = k; 1376 1377 skb->tail += delta; 1378 skb->data_len -= delta; 1379 1380 return skb_tail_pointer(skb); 1381 } 1382 EXPORT_SYMBOL(__pskb_pull_tail); 1383 1384 /** 1385 * skb_copy_bits - copy bits from skb to kernel buffer 1386 * @skb: source skb 1387 * @offset: offset in source 1388 * @to: destination buffer 1389 * @len: number of bytes to copy 1390 * 1391 * Copy the specified number of bytes from the source skb to the 1392 * destination buffer. 1393 * 1394 * CAUTION ! : 1395 * If its prototype is ever changed, 1396 * check arch/{*}/net/{*}.S files, 1397 * since it is called from BPF assembly code. 1398 */ 1399 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1400 { 1401 int start = skb_headlen(skb); 1402 struct sk_buff *frag_iter; 1403 int i, copy; 1404 1405 if (offset > (int)skb->len - len) 1406 goto fault; 1407 1408 /* Copy header. */ 1409 if ((copy = start - offset) > 0) { 1410 if (copy > len) 1411 copy = len; 1412 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1413 if ((len -= copy) == 0) 1414 return 0; 1415 offset += copy; 1416 to += copy; 1417 } 1418 1419 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1420 int end; 1421 1422 WARN_ON(start > offset + len); 1423 1424 end = start + skb_shinfo(skb)->frags[i].size; 1425 if ((copy = end - offset) > 0) { 1426 u8 *vaddr; 1427 1428 if (copy > len) 1429 copy = len; 1430 1431 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1432 memcpy(to, 1433 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1434 offset - start, copy); 1435 kunmap_skb_frag(vaddr); 1436 1437 if ((len -= copy) == 0) 1438 return 0; 1439 offset += copy; 1440 to += copy; 1441 } 1442 start = end; 1443 } 1444 1445 skb_walk_frags(skb, frag_iter) { 1446 int end; 1447 1448 WARN_ON(start > offset + len); 1449 1450 end = start + frag_iter->len; 1451 if ((copy = end - offset) > 0) { 1452 if (copy > len) 1453 copy = len; 1454 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1455 goto fault; 1456 if ((len -= copy) == 0) 1457 return 0; 1458 offset += copy; 1459 to += copy; 1460 } 1461 start = end; 1462 } 1463 1464 if (!len) 1465 return 0; 1466 1467 fault: 1468 return -EFAULT; 1469 } 1470 EXPORT_SYMBOL(skb_copy_bits); 1471 1472 /* 1473 * Callback from splice_to_pipe(), if we need to release some pages 1474 * at the end of the spd in case we error'ed out in filling the pipe. 1475 */ 1476 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1477 { 1478 put_page(spd->pages[i]); 1479 } 1480 1481 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1482 unsigned int *offset, 1483 struct sk_buff *skb, struct sock *sk) 1484 { 1485 struct page *p = sk->sk_sndmsg_page; 1486 unsigned int off; 1487 1488 if (!p) { 1489 new_page: 1490 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1491 if (!p) 1492 return NULL; 1493 1494 off = sk->sk_sndmsg_off = 0; 1495 /* hold one ref to this page until it's full */ 1496 } else { 1497 unsigned int mlen; 1498 1499 off = sk->sk_sndmsg_off; 1500 mlen = PAGE_SIZE - off; 1501 if (mlen < 64 && mlen < *len) { 1502 put_page(p); 1503 goto new_page; 1504 } 1505 1506 *len = min_t(unsigned int, *len, mlen); 1507 } 1508 1509 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1510 sk->sk_sndmsg_off += *len; 1511 *offset = off; 1512 get_page(p); 1513 1514 return p; 1515 } 1516 1517 /* 1518 * Fill page/offset/length into spd, if it can hold more pages. 1519 */ 1520 static inline int spd_fill_page(struct splice_pipe_desc *spd, 1521 struct pipe_inode_info *pipe, struct page *page, 1522 unsigned int *len, unsigned int offset, 1523 struct sk_buff *skb, int linear, 1524 struct sock *sk) 1525 { 1526 if (unlikely(spd->nr_pages == pipe->buffers)) 1527 return 1; 1528 1529 if (linear) { 1530 page = linear_to_page(page, len, &offset, skb, sk); 1531 if (!page) 1532 return 1; 1533 } else 1534 get_page(page); 1535 1536 spd->pages[spd->nr_pages] = page; 1537 spd->partial[spd->nr_pages].len = *len; 1538 spd->partial[spd->nr_pages].offset = offset; 1539 spd->nr_pages++; 1540 1541 return 0; 1542 } 1543 1544 static inline void __segment_seek(struct page **page, unsigned int *poff, 1545 unsigned int *plen, unsigned int off) 1546 { 1547 unsigned long n; 1548 1549 *poff += off; 1550 n = *poff / PAGE_SIZE; 1551 if (n) 1552 *page = nth_page(*page, n); 1553 1554 *poff = *poff % PAGE_SIZE; 1555 *plen -= off; 1556 } 1557 1558 static inline int __splice_segment(struct page *page, unsigned int poff, 1559 unsigned int plen, unsigned int *off, 1560 unsigned int *len, struct sk_buff *skb, 1561 struct splice_pipe_desc *spd, int linear, 1562 struct sock *sk, 1563 struct pipe_inode_info *pipe) 1564 { 1565 if (!*len) 1566 return 1; 1567 1568 /* skip this segment if already processed */ 1569 if (*off >= plen) { 1570 *off -= plen; 1571 return 0; 1572 } 1573 1574 /* ignore any bits we already processed */ 1575 if (*off) { 1576 __segment_seek(&page, &poff, &plen, *off); 1577 *off = 0; 1578 } 1579 1580 do { 1581 unsigned int flen = min(*len, plen); 1582 1583 /* the linear region may spread across several pages */ 1584 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1585 1586 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1587 return 1; 1588 1589 __segment_seek(&page, &poff, &plen, flen); 1590 *len -= flen; 1591 1592 } while (*len && plen); 1593 1594 return 0; 1595 } 1596 1597 /* 1598 * Map linear and fragment data from the skb to spd. It reports failure if the 1599 * pipe is full or if we already spliced the requested length. 1600 */ 1601 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1602 unsigned int *offset, unsigned int *len, 1603 struct splice_pipe_desc *spd, struct sock *sk) 1604 { 1605 int seg; 1606 1607 /* 1608 * map the linear part 1609 */ 1610 if (__splice_segment(virt_to_page(skb->data), 1611 (unsigned long) skb->data & (PAGE_SIZE - 1), 1612 skb_headlen(skb), 1613 offset, len, skb, spd, 1, sk, pipe)) 1614 return 1; 1615 1616 /* 1617 * then map the fragments 1618 */ 1619 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1620 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1621 1622 if (__splice_segment(f->page, f->page_offset, f->size, 1623 offset, len, skb, spd, 0, sk, pipe)) 1624 return 1; 1625 } 1626 1627 return 0; 1628 } 1629 1630 /* 1631 * Map data from the skb to a pipe. Should handle both the linear part, 1632 * the fragments, and the frag list. It does NOT handle frag lists within 1633 * the frag list, if such a thing exists. We'd probably need to recurse to 1634 * handle that cleanly. 1635 */ 1636 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1637 struct pipe_inode_info *pipe, unsigned int tlen, 1638 unsigned int flags) 1639 { 1640 struct partial_page partial[PIPE_DEF_BUFFERS]; 1641 struct page *pages[PIPE_DEF_BUFFERS]; 1642 struct splice_pipe_desc spd = { 1643 .pages = pages, 1644 .partial = partial, 1645 .flags = flags, 1646 .ops = &sock_pipe_buf_ops, 1647 .spd_release = sock_spd_release, 1648 }; 1649 struct sk_buff *frag_iter; 1650 struct sock *sk = skb->sk; 1651 int ret = 0; 1652 1653 if (splice_grow_spd(pipe, &spd)) 1654 return -ENOMEM; 1655 1656 /* 1657 * __skb_splice_bits() only fails if the output has no room left, 1658 * so no point in going over the frag_list for the error case. 1659 */ 1660 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1661 goto done; 1662 else if (!tlen) 1663 goto done; 1664 1665 /* 1666 * now see if we have a frag_list to map 1667 */ 1668 skb_walk_frags(skb, frag_iter) { 1669 if (!tlen) 1670 break; 1671 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1672 break; 1673 } 1674 1675 done: 1676 if (spd.nr_pages) { 1677 /* 1678 * Drop the socket lock, otherwise we have reverse 1679 * locking dependencies between sk_lock and i_mutex 1680 * here as compared to sendfile(). We enter here 1681 * with the socket lock held, and splice_to_pipe() will 1682 * grab the pipe inode lock. For sendfile() emulation, 1683 * we call into ->sendpage() with the i_mutex lock held 1684 * and networking will grab the socket lock. 1685 */ 1686 release_sock(sk); 1687 ret = splice_to_pipe(pipe, &spd); 1688 lock_sock(sk); 1689 } 1690 1691 splice_shrink_spd(pipe, &spd); 1692 return ret; 1693 } 1694 1695 /** 1696 * skb_store_bits - store bits from kernel buffer to skb 1697 * @skb: destination buffer 1698 * @offset: offset in destination 1699 * @from: source buffer 1700 * @len: number of bytes to copy 1701 * 1702 * Copy the specified number of bytes from the source buffer to the 1703 * destination skb. This function handles all the messy bits of 1704 * traversing fragment lists and such. 1705 */ 1706 1707 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1708 { 1709 int start = skb_headlen(skb); 1710 struct sk_buff *frag_iter; 1711 int i, copy; 1712 1713 if (offset > (int)skb->len - len) 1714 goto fault; 1715 1716 if ((copy = start - offset) > 0) { 1717 if (copy > len) 1718 copy = len; 1719 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1720 if ((len -= copy) == 0) 1721 return 0; 1722 offset += copy; 1723 from += copy; 1724 } 1725 1726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1727 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1728 int end; 1729 1730 WARN_ON(start > offset + len); 1731 1732 end = start + frag->size; 1733 if ((copy = end - offset) > 0) { 1734 u8 *vaddr; 1735 1736 if (copy > len) 1737 copy = len; 1738 1739 vaddr = kmap_skb_frag(frag); 1740 memcpy(vaddr + frag->page_offset + offset - start, 1741 from, copy); 1742 kunmap_skb_frag(vaddr); 1743 1744 if ((len -= copy) == 0) 1745 return 0; 1746 offset += copy; 1747 from += copy; 1748 } 1749 start = end; 1750 } 1751 1752 skb_walk_frags(skb, frag_iter) { 1753 int end; 1754 1755 WARN_ON(start > offset + len); 1756 1757 end = start + frag_iter->len; 1758 if ((copy = end - offset) > 0) { 1759 if (copy > len) 1760 copy = len; 1761 if (skb_store_bits(frag_iter, offset - start, 1762 from, copy)) 1763 goto fault; 1764 if ((len -= copy) == 0) 1765 return 0; 1766 offset += copy; 1767 from += copy; 1768 } 1769 start = end; 1770 } 1771 if (!len) 1772 return 0; 1773 1774 fault: 1775 return -EFAULT; 1776 } 1777 EXPORT_SYMBOL(skb_store_bits); 1778 1779 /* Checksum skb data. */ 1780 1781 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1782 int len, __wsum csum) 1783 { 1784 int start = skb_headlen(skb); 1785 int i, copy = start - offset; 1786 struct sk_buff *frag_iter; 1787 int pos = 0; 1788 1789 /* Checksum header. */ 1790 if (copy > 0) { 1791 if (copy > len) 1792 copy = len; 1793 csum = csum_partial(skb->data + offset, copy, csum); 1794 if ((len -= copy) == 0) 1795 return csum; 1796 offset += copy; 1797 pos = copy; 1798 } 1799 1800 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1801 int end; 1802 1803 WARN_ON(start > offset + len); 1804 1805 end = start + skb_shinfo(skb)->frags[i].size; 1806 if ((copy = end - offset) > 0) { 1807 __wsum csum2; 1808 u8 *vaddr; 1809 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1810 1811 if (copy > len) 1812 copy = len; 1813 vaddr = kmap_skb_frag(frag); 1814 csum2 = csum_partial(vaddr + frag->page_offset + 1815 offset - start, copy, 0); 1816 kunmap_skb_frag(vaddr); 1817 csum = csum_block_add(csum, csum2, pos); 1818 if (!(len -= copy)) 1819 return csum; 1820 offset += copy; 1821 pos += copy; 1822 } 1823 start = end; 1824 } 1825 1826 skb_walk_frags(skb, frag_iter) { 1827 int end; 1828 1829 WARN_ON(start > offset + len); 1830 1831 end = start + frag_iter->len; 1832 if ((copy = end - offset) > 0) { 1833 __wsum csum2; 1834 if (copy > len) 1835 copy = len; 1836 csum2 = skb_checksum(frag_iter, offset - start, 1837 copy, 0); 1838 csum = csum_block_add(csum, csum2, pos); 1839 if ((len -= copy) == 0) 1840 return csum; 1841 offset += copy; 1842 pos += copy; 1843 } 1844 start = end; 1845 } 1846 BUG_ON(len); 1847 1848 return csum; 1849 } 1850 EXPORT_SYMBOL(skb_checksum); 1851 1852 /* Both of above in one bottle. */ 1853 1854 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1855 u8 *to, int len, __wsum csum) 1856 { 1857 int start = skb_headlen(skb); 1858 int i, copy = start - offset; 1859 struct sk_buff *frag_iter; 1860 int pos = 0; 1861 1862 /* Copy header. */ 1863 if (copy > 0) { 1864 if (copy > len) 1865 copy = len; 1866 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1867 copy, csum); 1868 if ((len -= copy) == 0) 1869 return csum; 1870 offset += copy; 1871 to += copy; 1872 pos = copy; 1873 } 1874 1875 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1876 int end; 1877 1878 WARN_ON(start > offset + len); 1879 1880 end = start + skb_shinfo(skb)->frags[i].size; 1881 if ((copy = end - offset) > 0) { 1882 __wsum csum2; 1883 u8 *vaddr; 1884 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1885 1886 if (copy > len) 1887 copy = len; 1888 vaddr = kmap_skb_frag(frag); 1889 csum2 = csum_partial_copy_nocheck(vaddr + 1890 frag->page_offset + 1891 offset - start, to, 1892 copy, 0); 1893 kunmap_skb_frag(vaddr); 1894 csum = csum_block_add(csum, csum2, pos); 1895 if (!(len -= copy)) 1896 return csum; 1897 offset += copy; 1898 to += copy; 1899 pos += copy; 1900 } 1901 start = end; 1902 } 1903 1904 skb_walk_frags(skb, frag_iter) { 1905 __wsum csum2; 1906 int end; 1907 1908 WARN_ON(start > offset + len); 1909 1910 end = start + frag_iter->len; 1911 if ((copy = end - offset) > 0) { 1912 if (copy > len) 1913 copy = len; 1914 csum2 = skb_copy_and_csum_bits(frag_iter, 1915 offset - start, 1916 to, copy, 0); 1917 csum = csum_block_add(csum, csum2, pos); 1918 if ((len -= copy) == 0) 1919 return csum; 1920 offset += copy; 1921 to += copy; 1922 pos += copy; 1923 } 1924 start = end; 1925 } 1926 BUG_ON(len); 1927 return csum; 1928 } 1929 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1930 1931 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1932 { 1933 __wsum csum; 1934 long csstart; 1935 1936 if (skb->ip_summed == CHECKSUM_PARTIAL) 1937 csstart = skb_checksum_start_offset(skb); 1938 else 1939 csstart = skb_headlen(skb); 1940 1941 BUG_ON(csstart > skb_headlen(skb)); 1942 1943 skb_copy_from_linear_data(skb, to, csstart); 1944 1945 csum = 0; 1946 if (csstart != skb->len) 1947 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1948 skb->len - csstart, 0); 1949 1950 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1951 long csstuff = csstart + skb->csum_offset; 1952 1953 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1954 } 1955 } 1956 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1957 1958 /** 1959 * skb_dequeue - remove from the head of the queue 1960 * @list: list to dequeue from 1961 * 1962 * Remove the head of the list. The list lock is taken so the function 1963 * may be used safely with other locking list functions. The head item is 1964 * returned or %NULL if the list is empty. 1965 */ 1966 1967 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1968 { 1969 unsigned long flags; 1970 struct sk_buff *result; 1971 1972 spin_lock_irqsave(&list->lock, flags); 1973 result = __skb_dequeue(list); 1974 spin_unlock_irqrestore(&list->lock, flags); 1975 return result; 1976 } 1977 EXPORT_SYMBOL(skb_dequeue); 1978 1979 /** 1980 * skb_dequeue_tail - remove from the tail of the queue 1981 * @list: list to dequeue from 1982 * 1983 * Remove the tail of the list. The list lock is taken so the function 1984 * may be used safely with other locking list functions. The tail item is 1985 * returned or %NULL if the list is empty. 1986 */ 1987 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1988 { 1989 unsigned long flags; 1990 struct sk_buff *result; 1991 1992 spin_lock_irqsave(&list->lock, flags); 1993 result = __skb_dequeue_tail(list); 1994 spin_unlock_irqrestore(&list->lock, flags); 1995 return result; 1996 } 1997 EXPORT_SYMBOL(skb_dequeue_tail); 1998 1999 /** 2000 * skb_queue_purge - empty a list 2001 * @list: list to empty 2002 * 2003 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2004 * the list and one reference dropped. This function takes the list 2005 * lock and is atomic with respect to other list locking functions. 2006 */ 2007 void skb_queue_purge(struct sk_buff_head *list) 2008 { 2009 struct sk_buff *skb; 2010 while ((skb = skb_dequeue(list)) != NULL) 2011 kfree_skb(skb); 2012 } 2013 EXPORT_SYMBOL(skb_queue_purge); 2014 2015 /** 2016 * skb_queue_head - queue a buffer at the list head 2017 * @list: list to use 2018 * @newsk: buffer to queue 2019 * 2020 * Queue a buffer at the start of the list. This function takes the 2021 * list lock and can be used safely with other locking &sk_buff functions 2022 * safely. 2023 * 2024 * A buffer cannot be placed on two lists at the same time. 2025 */ 2026 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2027 { 2028 unsigned long flags; 2029 2030 spin_lock_irqsave(&list->lock, flags); 2031 __skb_queue_head(list, newsk); 2032 spin_unlock_irqrestore(&list->lock, flags); 2033 } 2034 EXPORT_SYMBOL(skb_queue_head); 2035 2036 /** 2037 * skb_queue_tail - queue a buffer at the list tail 2038 * @list: list to use 2039 * @newsk: buffer to queue 2040 * 2041 * Queue a buffer at the tail of the list. This function takes the 2042 * list lock and can be used safely with other locking &sk_buff functions 2043 * safely. 2044 * 2045 * A buffer cannot be placed on two lists at the same time. 2046 */ 2047 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2048 { 2049 unsigned long flags; 2050 2051 spin_lock_irqsave(&list->lock, flags); 2052 __skb_queue_tail(list, newsk); 2053 spin_unlock_irqrestore(&list->lock, flags); 2054 } 2055 EXPORT_SYMBOL(skb_queue_tail); 2056 2057 /** 2058 * skb_unlink - remove a buffer from a list 2059 * @skb: buffer to remove 2060 * @list: list to use 2061 * 2062 * Remove a packet from a list. The list locks are taken and this 2063 * function is atomic with respect to other list locked calls 2064 * 2065 * You must know what list the SKB is on. 2066 */ 2067 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2068 { 2069 unsigned long flags; 2070 2071 spin_lock_irqsave(&list->lock, flags); 2072 __skb_unlink(skb, list); 2073 spin_unlock_irqrestore(&list->lock, flags); 2074 } 2075 EXPORT_SYMBOL(skb_unlink); 2076 2077 /** 2078 * skb_append - append a buffer 2079 * @old: buffer to insert after 2080 * @newsk: buffer to insert 2081 * @list: list to use 2082 * 2083 * Place a packet after a given packet in a list. The list locks are taken 2084 * and this function is atomic with respect to other list locked calls. 2085 * A buffer cannot be placed on two lists at the same time. 2086 */ 2087 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2088 { 2089 unsigned long flags; 2090 2091 spin_lock_irqsave(&list->lock, flags); 2092 __skb_queue_after(list, old, newsk); 2093 spin_unlock_irqrestore(&list->lock, flags); 2094 } 2095 EXPORT_SYMBOL(skb_append); 2096 2097 /** 2098 * skb_insert - insert a buffer 2099 * @old: buffer to insert before 2100 * @newsk: buffer to insert 2101 * @list: list to use 2102 * 2103 * Place a packet before a given packet in a list. The list locks are 2104 * taken and this function is atomic with respect to other list locked 2105 * calls. 2106 * 2107 * A buffer cannot be placed on two lists at the same time. 2108 */ 2109 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2110 { 2111 unsigned long flags; 2112 2113 spin_lock_irqsave(&list->lock, flags); 2114 __skb_insert(newsk, old->prev, old, list); 2115 spin_unlock_irqrestore(&list->lock, flags); 2116 } 2117 EXPORT_SYMBOL(skb_insert); 2118 2119 static inline void skb_split_inside_header(struct sk_buff *skb, 2120 struct sk_buff* skb1, 2121 const u32 len, const int pos) 2122 { 2123 int i; 2124 2125 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2126 pos - len); 2127 /* And move data appendix as is. */ 2128 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2129 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2130 2131 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2132 skb_shinfo(skb)->nr_frags = 0; 2133 skb1->data_len = skb->data_len; 2134 skb1->len += skb1->data_len; 2135 skb->data_len = 0; 2136 skb->len = len; 2137 skb_set_tail_pointer(skb, len); 2138 } 2139 2140 static inline void skb_split_no_header(struct sk_buff *skb, 2141 struct sk_buff* skb1, 2142 const u32 len, int pos) 2143 { 2144 int i, k = 0; 2145 const int nfrags = skb_shinfo(skb)->nr_frags; 2146 2147 skb_shinfo(skb)->nr_frags = 0; 2148 skb1->len = skb1->data_len = skb->len - len; 2149 skb->len = len; 2150 skb->data_len = len - pos; 2151 2152 for (i = 0; i < nfrags; i++) { 2153 int size = skb_shinfo(skb)->frags[i].size; 2154 2155 if (pos + size > len) { 2156 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2157 2158 if (pos < len) { 2159 /* Split frag. 2160 * We have two variants in this case: 2161 * 1. Move all the frag to the second 2162 * part, if it is possible. F.e. 2163 * this approach is mandatory for TUX, 2164 * where splitting is expensive. 2165 * 2. Split is accurately. We make this. 2166 */ 2167 get_page(skb_shinfo(skb)->frags[i].page); 2168 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2169 skb_shinfo(skb1)->frags[0].size -= len - pos; 2170 skb_shinfo(skb)->frags[i].size = len - pos; 2171 skb_shinfo(skb)->nr_frags++; 2172 } 2173 k++; 2174 } else 2175 skb_shinfo(skb)->nr_frags++; 2176 pos += size; 2177 } 2178 skb_shinfo(skb1)->nr_frags = k; 2179 } 2180 2181 /** 2182 * skb_split - Split fragmented skb to two parts at length len. 2183 * @skb: the buffer to split 2184 * @skb1: the buffer to receive the second part 2185 * @len: new length for skb 2186 */ 2187 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2188 { 2189 int pos = skb_headlen(skb); 2190 2191 if (len < pos) /* Split line is inside header. */ 2192 skb_split_inside_header(skb, skb1, len, pos); 2193 else /* Second chunk has no header, nothing to copy. */ 2194 skb_split_no_header(skb, skb1, len, pos); 2195 } 2196 EXPORT_SYMBOL(skb_split); 2197 2198 /* Shifting from/to a cloned skb is a no-go. 2199 * 2200 * Caller cannot keep skb_shinfo related pointers past calling here! 2201 */ 2202 static int skb_prepare_for_shift(struct sk_buff *skb) 2203 { 2204 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2205 } 2206 2207 /** 2208 * skb_shift - Shifts paged data partially from skb to another 2209 * @tgt: buffer into which tail data gets added 2210 * @skb: buffer from which the paged data comes from 2211 * @shiftlen: shift up to this many bytes 2212 * 2213 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2214 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2215 * It's up to caller to free skb if everything was shifted. 2216 * 2217 * If @tgt runs out of frags, the whole operation is aborted. 2218 * 2219 * Skb cannot include anything else but paged data while tgt is allowed 2220 * to have non-paged data as well. 2221 * 2222 * TODO: full sized shift could be optimized but that would need 2223 * specialized skb free'er to handle frags without up-to-date nr_frags. 2224 */ 2225 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2226 { 2227 int from, to, merge, todo; 2228 struct skb_frag_struct *fragfrom, *fragto; 2229 2230 BUG_ON(shiftlen > skb->len); 2231 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2232 2233 todo = shiftlen; 2234 from = 0; 2235 to = skb_shinfo(tgt)->nr_frags; 2236 fragfrom = &skb_shinfo(skb)->frags[from]; 2237 2238 /* Actual merge is delayed until the point when we know we can 2239 * commit all, so that we don't have to undo partial changes 2240 */ 2241 if (!to || 2242 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2243 merge = -1; 2244 } else { 2245 merge = to - 1; 2246 2247 todo -= fragfrom->size; 2248 if (todo < 0) { 2249 if (skb_prepare_for_shift(skb) || 2250 skb_prepare_for_shift(tgt)) 2251 return 0; 2252 2253 /* All previous frag pointers might be stale! */ 2254 fragfrom = &skb_shinfo(skb)->frags[from]; 2255 fragto = &skb_shinfo(tgt)->frags[merge]; 2256 2257 fragto->size += shiftlen; 2258 fragfrom->size -= shiftlen; 2259 fragfrom->page_offset += shiftlen; 2260 2261 goto onlymerged; 2262 } 2263 2264 from++; 2265 } 2266 2267 /* Skip full, not-fitting skb to avoid expensive operations */ 2268 if ((shiftlen == skb->len) && 2269 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2270 return 0; 2271 2272 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2273 return 0; 2274 2275 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2276 if (to == MAX_SKB_FRAGS) 2277 return 0; 2278 2279 fragfrom = &skb_shinfo(skb)->frags[from]; 2280 fragto = &skb_shinfo(tgt)->frags[to]; 2281 2282 if (todo >= fragfrom->size) { 2283 *fragto = *fragfrom; 2284 todo -= fragfrom->size; 2285 from++; 2286 to++; 2287 2288 } else { 2289 get_page(fragfrom->page); 2290 fragto->page = fragfrom->page; 2291 fragto->page_offset = fragfrom->page_offset; 2292 fragto->size = todo; 2293 2294 fragfrom->page_offset += todo; 2295 fragfrom->size -= todo; 2296 todo = 0; 2297 2298 to++; 2299 break; 2300 } 2301 } 2302 2303 /* Ready to "commit" this state change to tgt */ 2304 skb_shinfo(tgt)->nr_frags = to; 2305 2306 if (merge >= 0) { 2307 fragfrom = &skb_shinfo(skb)->frags[0]; 2308 fragto = &skb_shinfo(tgt)->frags[merge]; 2309 2310 fragto->size += fragfrom->size; 2311 put_page(fragfrom->page); 2312 } 2313 2314 /* Reposition in the original skb */ 2315 to = 0; 2316 while (from < skb_shinfo(skb)->nr_frags) 2317 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2318 skb_shinfo(skb)->nr_frags = to; 2319 2320 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2321 2322 onlymerged: 2323 /* Most likely the tgt won't ever need its checksum anymore, skb on 2324 * the other hand might need it if it needs to be resent 2325 */ 2326 tgt->ip_summed = CHECKSUM_PARTIAL; 2327 skb->ip_summed = CHECKSUM_PARTIAL; 2328 2329 /* Yak, is it really working this way? Some helper please? */ 2330 skb->len -= shiftlen; 2331 skb->data_len -= shiftlen; 2332 skb->truesize -= shiftlen; 2333 tgt->len += shiftlen; 2334 tgt->data_len += shiftlen; 2335 tgt->truesize += shiftlen; 2336 2337 return shiftlen; 2338 } 2339 2340 /** 2341 * skb_prepare_seq_read - Prepare a sequential read of skb data 2342 * @skb: the buffer to read 2343 * @from: lower offset of data to be read 2344 * @to: upper offset of data to be read 2345 * @st: state variable 2346 * 2347 * Initializes the specified state variable. Must be called before 2348 * invoking skb_seq_read() for the first time. 2349 */ 2350 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2351 unsigned int to, struct skb_seq_state *st) 2352 { 2353 st->lower_offset = from; 2354 st->upper_offset = to; 2355 st->root_skb = st->cur_skb = skb; 2356 st->frag_idx = st->stepped_offset = 0; 2357 st->frag_data = NULL; 2358 } 2359 EXPORT_SYMBOL(skb_prepare_seq_read); 2360 2361 /** 2362 * skb_seq_read - Sequentially read skb data 2363 * @consumed: number of bytes consumed by the caller so far 2364 * @data: destination pointer for data to be returned 2365 * @st: state variable 2366 * 2367 * Reads a block of skb data at &consumed relative to the 2368 * lower offset specified to skb_prepare_seq_read(). Assigns 2369 * the head of the data block to &data and returns the length 2370 * of the block or 0 if the end of the skb data or the upper 2371 * offset has been reached. 2372 * 2373 * The caller is not required to consume all of the data 2374 * returned, i.e. &consumed is typically set to the number 2375 * of bytes already consumed and the next call to 2376 * skb_seq_read() will return the remaining part of the block. 2377 * 2378 * Note 1: The size of each block of data returned can be arbitrary, 2379 * this limitation is the cost for zerocopy seqeuental 2380 * reads of potentially non linear data. 2381 * 2382 * Note 2: Fragment lists within fragments are not implemented 2383 * at the moment, state->root_skb could be replaced with 2384 * a stack for this purpose. 2385 */ 2386 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2387 struct skb_seq_state *st) 2388 { 2389 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2390 skb_frag_t *frag; 2391 2392 if (unlikely(abs_offset >= st->upper_offset)) 2393 return 0; 2394 2395 next_skb: 2396 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2397 2398 if (abs_offset < block_limit && !st->frag_data) { 2399 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2400 return block_limit - abs_offset; 2401 } 2402 2403 if (st->frag_idx == 0 && !st->frag_data) 2404 st->stepped_offset += skb_headlen(st->cur_skb); 2405 2406 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2407 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2408 block_limit = frag->size + st->stepped_offset; 2409 2410 if (abs_offset < block_limit) { 2411 if (!st->frag_data) 2412 st->frag_data = kmap_skb_frag(frag); 2413 2414 *data = (u8 *) st->frag_data + frag->page_offset + 2415 (abs_offset - st->stepped_offset); 2416 2417 return block_limit - abs_offset; 2418 } 2419 2420 if (st->frag_data) { 2421 kunmap_skb_frag(st->frag_data); 2422 st->frag_data = NULL; 2423 } 2424 2425 st->frag_idx++; 2426 st->stepped_offset += frag->size; 2427 } 2428 2429 if (st->frag_data) { 2430 kunmap_skb_frag(st->frag_data); 2431 st->frag_data = NULL; 2432 } 2433 2434 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2435 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2436 st->frag_idx = 0; 2437 goto next_skb; 2438 } else if (st->cur_skb->next) { 2439 st->cur_skb = st->cur_skb->next; 2440 st->frag_idx = 0; 2441 goto next_skb; 2442 } 2443 2444 return 0; 2445 } 2446 EXPORT_SYMBOL(skb_seq_read); 2447 2448 /** 2449 * skb_abort_seq_read - Abort a sequential read of skb data 2450 * @st: state variable 2451 * 2452 * Must be called if skb_seq_read() was not called until it 2453 * returned 0. 2454 */ 2455 void skb_abort_seq_read(struct skb_seq_state *st) 2456 { 2457 if (st->frag_data) 2458 kunmap_skb_frag(st->frag_data); 2459 } 2460 EXPORT_SYMBOL(skb_abort_seq_read); 2461 2462 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2463 2464 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2465 struct ts_config *conf, 2466 struct ts_state *state) 2467 { 2468 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2469 } 2470 2471 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2472 { 2473 skb_abort_seq_read(TS_SKB_CB(state)); 2474 } 2475 2476 /** 2477 * skb_find_text - Find a text pattern in skb data 2478 * @skb: the buffer to look in 2479 * @from: search offset 2480 * @to: search limit 2481 * @config: textsearch configuration 2482 * @state: uninitialized textsearch state variable 2483 * 2484 * Finds a pattern in the skb data according to the specified 2485 * textsearch configuration. Use textsearch_next() to retrieve 2486 * subsequent occurrences of the pattern. Returns the offset 2487 * to the first occurrence or UINT_MAX if no match was found. 2488 */ 2489 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2490 unsigned int to, struct ts_config *config, 2491 struct ts_state *state) 2492 { 2493 unsigned int ret; 2494 2495 config->get_next_block = skb_ts_get_next_block; 2496 config->finish = skb_ts_finish; 2497 2498 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2499 2500 ret = textsearch_find(config, state); 2501 return (ret <= to - from ? ret : UINT_MAX); 2502 } 2503 EXPORT_SYMBOL(skb_find_text); 2504 2505 /** 2506 * skb_append_datato_frags: - append the user data to a skb 2507 * @sk: sock structure 2508 * @skb: skb structure to be appened with user data. 2509 * @getfrag: call back function to be used for getting the user data 2510 * @from: pointer to user message iov 2511 * @length: length of the iov message 2512 * 2513 * Description: This procedure append the user data in the fragment part 2514 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2515 */ 2516 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2517 int (*getfrag)(void *from, char *to, int offset, 2518 int len, int odd, struct sk_buff *skb), 2519 void *from, int length) 2520 { 2521 int frg_cnt = 0; 2522 skb_frag_t *frag = NULL; 2523 struct page *page = NULL; 2524 int copy, left; 2525 int offset = 0; 2526 int ret; 2527 2528 do { 2529 /* Return error if we don't have space for new frag */ 2530 frg_cnt = skb_shinfo(skb)->nr_frags; 2531 if (frg_cnt >= MAX_SKB_FRAGS) 2532 return -EFAULT; 2533 2534 /* allocate a new page for next frag */ 2535 page = alloc_pages(sk->sk_allocation, 0); 2536 2537 /* If alloc_page fails just return failure and caller will 2538 * free previous allocated pages by doing kfree_skb() 2539 */ 2540 if (page == NULL) 2541 return -ENOMEM; 2542 2543 /* initialize the next frag */ 2544 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2545 skb->truesize += PAGE_SIZE; 2546 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2547 2548 /* get the new initialized frag */ 2549 frg_cnt = skb_shinfo(skb)->nr_frags; 2550 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2551 2552 /* copy the user data to page */ 2553 left = PAGE_SIZE - frag->page_offset; 2554 copy = (length > left)? left : length; 2555 2556 ret = getfrag(from, (page_address(frag->page) + 2557 frag->page_offset + frag->size), 2558 offset, copy, 0, skb); 2559 if (ret < 0) 2560 return -EFAULT; 2561 2562 /* copy was successful so update the size parameters */ 2563 frag->size += copy; 2564 skb->len += copy; 2565 skb->data_len += copy; 2566 offset += copy; 2567 length -= copy; 2568 2569 } while (length > 0); 2570 2571 return 0; 2572 } 2573 EXPORT_SYMBOL(skb_append_datato_frags); 2574 2575 /** 2576 * skb_pull_rcsum - pull skb and update receive checksum 2577 * @skb: buffer to update 2578 * @len: length of data pulled 2579 * 2580 * This function performs an skb_pull on the packet and updates 2581 * the CHECKSUM_COMPLETE checksum. It should be used on 2582 * receive path processing instead of skb_pull unless you know 2583 * that the checksum difference is zero (e.g., a valid IP header) 2584 * or you are setting ip_summed to CHECKSUM_NONE. 2585 */ 2586 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2587 { 2588 BUG_ON(len > skb->len); 2589 skb->len -= len; 2590 BUG_ON(skb->len < skb->data_len); 2591 skb_postpull_rcsum(skb, skb->data, len); 2592 return skb->data += len; 2593 } 2594 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2595 2596 /** 2597 * skb_segment - Perform protocol segmentation on skb. 2598 * @skb: buffer to segment 2599 * @features: features for the output path (see dev->features) 2600 * 2601 * This function performs segmentation on the given skb. It returns 2602 * a pointer to the first in a list of new skbs for the segments. 2603 * In case of error it returns ERR_PTR(err). 2604 */ 2605 struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) 2606 { 2607 struct sk_buff *segs = NULL; 2608 struct sk_buff *tail = NULL; 2609 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2610 unsigned int mss = skb_shinfo(skb)->gso_size; 2611 unsigned int doffset = skb->data - skb_mac_header(skb); 2612 unsigned int offset = doffset; 2613 unsigned int headroom; 2614 unsigned int len; 2615 int sg = !!(features & NETIF_F_SG); 2616 int nfrags = skb_shinfo(skb)->nr_frags; 2617 int err = -ENOMEM; 2618 int i = 0; 2619 int pos; 2620 2621 __skb_push(skb, doffset); 2622 headroom = skb_headroom(skb); 2623 pos = skb_headlen(skb); 2624 2625 do { 2626 struct sk_buff *nskb; 2627 skb_frag_t *frag; 2628 int hsize; 2629 int size; 2630 2631 len = skb->len - offset; 2632 if (len > mss) 2633 len = mss; 2634 2635 hsize = skb_headlen(skb) - offset; 2636 if (hsize < 0) 2637 hsize = 0; 2638 if (hsize > len || !sg) 2639 hsize = len; 2640 2641 if (!hsize && i >= nfrags) { 2642 BUG_ON(fskb->len != len); 2643 2644 pos += len; 2645 nskb = skb_clone(fskb, GFP_ATOMIC); 2646 fskb = fskb->next; 2647 2648 if (unlikely(!nskb)) 2649 goto err; 2650 2651 hsize = skb_end_pointer(nskb) - nskb->head; 2652 if (skb_cow_head(nskb, doffset + headroom)) { 2653 kfree_skb(nskb); 2654 goto err; 2655 } 2656 2657 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2658 hsize; 2659 skb_release_head_state(nskb); 2660 __skb_push(nskb, doffset); 2661 } else { 2662 nskb = alloc_skb(hsize + doffset + headroom, 2663 GFP_ATOMIC); 2664 2665 if (unlikely(!nskb)) 2666 goto err; 2667 2668 skb_reserve(nskb, headroom); 2669 __skb_put(nskb, doffset); 2670 } 2671 2672 if (segs) 2673 tail->next = nskb; 2674 else 2675 segs = nskb; 2676 tail = nskb; 2677 2678 __copy_skb_header(nskb, skb); 2679 nskb->mac_len = skb->mac_len; 2680 2681 /* nskb and skb might have different headroom */ 2682 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2683 nskb->csum_start += skb_headroom(nskb) - headroom; 2684 2685 skb_reset_mac_header(nskb); 2686 skb_set_network_header(nskb, skb->mac_len); 2687 nskb->transport_header = (nskb->network_header + 2688 skb_network_header_len(skb)); 2689 skb_copy_from_linear_data(skb, nskb->data, doffset); 2690 2691 if (fskb != skb_shinfo(skb)->frag_list) 2692 continue; 2693 2694 if (!sg) { 2695 nskb->ip_summed = CHECKSUM_NONE; 2696 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2697 skb_put(nskb, len), 2698 len, 0); 2699 continue; 2700 } 2701 2702 frag = skb_shinfo(nskb)->frags; 2703 2704 skb_copy_from_linear_data_offset(skb, offset, 2705 skb_put(nskb, hsize), hsize); 2706 2707 while (pos < offset + len && i < nfrags) { 2708 *frag = skb_shinfo(skb)->frags[i]; 2709 get_page(frag->page); 2710 size = frag->size; 2711 2712 if (pos < offset) { 2713 frag->page_offset += offset - pos; 2714 frag->size -= offset - pos; 2715 } 2716 2717 skb_shinfo(nskb)->nr_frags++; 2718 2719 if (pos + size <= offset + len) { 2720 i++; 2721 pos += size; 2722 } else { 2723 frag->size -= pos + size - (offset + len); 2724 goto skip_fraglist; 2725 } 2726 2727 frag++; 2728 } 2729 2730 if (pos < offset + len) { 2731 struct sk_buff *fskb2 = fskb; 2732 2733 BUG_ON(pos + fskb->len != offset + len); 2734 2735 pos += fskb->len; 2736 fskb = fskb->next; 2737 2738 if (fskb2->next) { 2739 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2740 if (!fskb2) 2741 goto err; 2742 } else 2743 skb_get(fskb2); 2744 2745 SKB_FRAG_ASSERT(nskb); 2746 skb_shinfo(nskb)->frag_list = fskb2; 2747 } 2748 2749 skip_fraglist: 2750 nskb->data_len = len - hsize; 2751 nskb->len += nskb->data_len; 2752 nskb->truesize += nskb->data_len; 2753 } while ((offset += len) < skb->len); 2754 2755 return segs; 2756 2757 err: 2758 while ((skb = segs)) { 2759 segs = skb->next; 2760 kfree_skb(skb); 2761 } 2762 return ERR_PTR(err); 2763 } 2764 EXPORT_SYMBOL_GPL(skb_segment); 2765 2766 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2767 { 2768 struct sk_buff *p = *head; 2769 struct sk_buff *nskb; 2770 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2771 struct skb_shared_info *pinfo = skb_shinfo(p); 2772 unsigned int headroom; 2773 unsigned int len = skb_gro_len(skb); 2774 unsigned int offset = skb_gro_offset(skb); 2775 unsigned int headlen = skb_headlen(skb); 2776 2777 if (p->len + len >= 65536) 2778 return -E2BIG; 2779 2780 if (pinfo->frag_list) 2781 goto merge; 2782 else if (headlen <= offset) { 2783 skb_frag_t *frag; 2784 skb_frag_t *frag2; 2785 int i = skbinfo->nr_frags; 2786 int nr_frags = pinfo->nr_frags + i; 2787 2788 offset -= headlen; 2789 2790 if (nr_frags > MAX_SKB_FRAGS) 2791 return -E2BIG; 2792 2793 pinfo->nr_frags = nr_frags; 2794 skbinfo->nr_frags = 0; 2795 2796 frag = pinfo->frags + nr_frags; 2797 frag2 = skbinfo->frags + i; 2798 do { 2799 *--frag = *--frag2; 2800 } while (--i); 2801 2802 frag->page_offset += offset; 2803 frag->size -= offset; 2804 2805 skb->truesize -= skb->data_len; 2806 skb->len -= skb->data_len; 2807 skb->data_len = 0; 2808 2809 NAPI_GRO_CB(skb)->free = 1; 2810 goto done; 2811 } else if (skb_gro_len(p) != pinfo->gso_size) 2812 return -E2BIG; 2813 2814 headroom = skb_headroom(p); 2815 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2816 if (unlikely(!nskb)) 2817 return -ENOMEM; 2818 2819 __copy_skb_header(nskb, p); 2820 nskb->mac_len = p->mac_len; 2821 2822 skb_reserve(nskb, headroom); 2823 __skb_put(nskb, skb_gro_offset(p)); 2824 2825 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2826 skb_set_network_header(nskb, skb_network_offset(p)); 2827 skb_set_transport_header(nskb, skb_transport_offset(p)); 2828 2829 __skb_pull(p, skb_gro_offset(p)); 2830 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2831 p->data - skb_mac_header(p)); 2832 2833 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2834 skb_shinfo(nskb)->frag_list = p; 2835 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2836 pinfo->gso_size = 0; 2837 skb_header_release(p); 2838 nskb->prev = p; 2839 2840 nskb->data_len += p->len; 2841 nskb->truesize += p->len; 2842 nskb->len += p->len; 2843 2844 *head = nskb; 2845 nskb->next = p->next; 2846 p->next = NULL; 2847 2848 p = nskb; 2849 2850 merge: 2851 if (offset > headlen) { 2852 unsigned int eat = offset - headlen; 2853 2854 skbinfo->frags[0].page_offset += eat; 2855 skbinfo->frags[0].size -= eat; 2856 skb->data_len -= eat; 2857 skb->len -= eat; 2858 offset = headlen; 2859 } 2860 2861 __skb_pull(skb, offset); 2862 2863 p->prev->next = skb; 2864 p->prev = skb; 2865 skb_header_release(skb); 2866 2867 done: 2868 NAPI_GRO_CB(p)->count++; 2869 p->data_len += len; 2870 p->truesize += len; 2871 p->len += len; 2872 2873 NAPI_GRO_CB(skb)->same_flow = 1; 2874 return 0; 2875 } 2876 EXPORT_SYMBOL_GPL(skb_gro_receive); 2877 2878 void __init skb_init(void) 2879 { 2880 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2881 sizeof(struct sk_buff), 2882 0, 2883 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2884 NULL); 2885 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2886 (2*sizeof(struct sk_buff)) + 2887 sizeof(atomic_t), 2888 0, 2889 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2890 NULL); 2891 } 2892 2893 /** 2894 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2895 * @skb: Socket buffer containing the buffers to be mapped 2896 * @sg: The scatter-gather list to map into 2897 * @offset: The offset into the buffer's contents to start mapping 2898 * @len: Length of buffer space to be mapped 2899 * 2900 * Fill the specified scatter-gather list with mappings/pointers into a 2901 * region of the buffer space attached to a socket buffer. 2902 */ 2903 static int 2904 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2905 { 2906 int start = skb_headlen(skb); 2907 int i, copy = start - offset; 2908 struct sk_buff *frag_iter; 2909 int elt = 0; 2910 2911 if (copy > 0) { 2912 if (copy > len) 2913 copy = len; 2914 sg_set_buf(sg, skb->data + offset, copy); 2915 elt++; 2916 if ((len -= copy) == 0) 2917 return elt; 2918 offset += copy; 2919 } 2920 2921 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2922 int end; 2923 2924 WARN_ON(start > offset + len); 2925 2926 end = start + skb_shinfo(skb)->frags[i].size; 2927 if ((copy = end - offset) > 0) { 2928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2929 2930 if (copy > len) 2931 copy = len; 2932 sg_set_page(&sg[elt], frag->page, copy, 2933 frag->page_offset+offset-start); 2934 elt++; 2935 if (!(len -= copy)) 2936 return elt; 2937 offset += copy; 2938 } 2939 start = end; 2940 } 2941 2942 skb_walk_frags(skb, frag_iter) { 2943 int end; 2944 2945 WARN_ON(start > offset + len); 2946 2947 end = start + frag_iter->len; 2948 if ((copy = end - offset) > 0) { 2949 if (copy > len) 2950 copy = len; 2951 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 2952 copy); 2953 if ((len -= copy) == 0) 2954 return elt; 2955 offset += copy; 2956 } 2957 start = end; 2958 } 2959 BUG_ON(len); 2960 return elt; 2961 } 2962 2963 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2964 { 2965 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2966 2967 sg_mark_end(&sg[nsg - 1]); 2968 2969 return nsg; 2970 } 2971 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2972 2973 /** 2974 * skb_cow_data - Check that a socket buffer's data buffers are writable 2975 * @skb: The socket buffer to check. 2976 * @tailbits: Amount of trailing space to be added 2977 * @trailer: Returned pointer to the skb where the @tailbits space begins 2978 * 2979 * Make sure that the data buffers attached to a socket buffer are 2980 * writable. If they are not, private copies are made of the data buffers 2981 * and the socket buffer is set to use these instead. 2982 * 2983 * If @tailbits is given, make sure that there is space to write @tailbits 2984 * bytes of data beyond current end of socket buffer. @trailer will be 2985 * set to point to the skb in which this space begins. 2986 * 2987 * The number of scatterlist elements required to completely map the 2988 * COW'd and extended socket buffer will be returned. 2989 */ 2990 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2991 { 2992 int copyflag; 2993 int elt; 2994 struct sk_buff *skb1, **skb_p; 2995 2996 /* If skb is cloned or its head is paged, reallocate 2997 * head pulling out all the pages (pages are considered not writable 2998 * at the moment even if they are anonymous). 2999 */ 3000 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3001 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3002 return -ENOMEM; 3003 3004 /* Easy case. Most of packets will go this way. */ 3005 if (!skb_has_frag_list(skb)) { 3006 /* A little of trouble, not enough of space for trailer. 3007 * This should not happen, when stack is tuned to generate 3008 * good frames. OK, on miss we reallocate and reserve even more 3009 * space, 128 bytes is fair. */ 3010 3011 if (skb_tailroom(skb) < tailbits && 3012 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3013 return -ENOMEM; 3014 3015 /* Voila! */ 3016 *trailer = skb; 3017 return 1; 3018 } 3019 3020 /* Misery. We are in troubles, going to mincer fragments... */ 3021 3022 elt = 1; 3023 skb_p = &skb_shinfo(skb)->frag_list; 3024 copyflag = 0; 3025 3026 while ((skb1 = *skb_p) != NULL) { 3027 int ntail = 0; 3028 3029 /* The fragment is partially pulled by someone, 3030 * this can happen on input. Copy it and everything 3031 * after it. */ 3032 3033 if (skb_shared(skb1)) 3034 copyflag = 1; 3035 3036 /* If the skb is the last, worry about trailer. */ 3037 3038 if (skb1->next == NULL && tailbits) { 3039 if (skb_shinfo(skb1)->nr_frags || 3040 skb_has_frag_list(skb1) || 3041 skb_tailroom(skb1) < tailbits) 3042 ntail = tailbits + 128; 3043 } 3044 3045 if (copyflag || 3046 skb_cloned(skb1) || 3047 ntail || 3048 skb_shinfo(skb1)->nr_frags || 3049 skb_has_frag_list(skb1)) { 3050 struct sk_buff *skb2; 3051 3052 /* Fuck, we are miserable poor guys... */ 3053 if (ntail == 0) 3054 skb2 = skb_copy(skb1, GFP_ATOMIC); 3055 else 3056 skb2 = skb_copy_expand(skb1, 3057 skb_headroom(skb1), 3058 ntail, 3059 GFP_ATOMIC); 3060 if (unlikely(skb2 == NULL)) 3061 return -ENOMEM; 3062 3063 if (skb1->sk) 3064 skb_set_owner_w(skb2, skb1->sk); 3065 3066 /* Looking around. Are we still alive? 3067 * OK, link new skb, drop old one */ 3068 3069 skb2->next = skb1->next; 3070 *skb_p = skb2; 3071 kfree_skb(skb1); 3072 skb1 = skb2; 3073 } 3074 elt++; 3075 *trailer = skb1; 3076 skb_p = &skb1->next; 3077 } 3078 3079 return elt; 3080 } 3081 EXPORT_SYMBOL_GPL(skb_cow_data); 3082 3083 static void sock_rmem_free(struct sk_buff *skb) 3084 { 3085 struct sock *sk = skb->sk; 3086 3087 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3088 } 3089 3090 /* 3091 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3092 */ 3093 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3094 { 3095 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3096 (unsigned)sk->sk_rcvbuf) 3097 return -ENOMEM; 3098 3099 skb_orphan(skb); 3100 skb->sk = sk; 3101 skb->destructor = sock_rmem_free; 3102 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3103 3104 /* before exiting rcu section, make sure dst is refcounted */ 3105 skb_dst_force(skb); 3106 3107 skb_queue_tail(&sk->sk_error_queue, skb); 3108 if (!sock_flag(sk, SOCK_DEAD)) 3109 sk->sk_data_ready(sk, skb->len); 3110 return 0; 3111 } 3112 EXPORT_SYMBOL(sock_queue_err_skb); 3113 3114 void skb_tstamp_tx(struct sk_buff *orig_skb, 3115 struct skb_shared_hwtstamps *hwtstamps) 3116 { 3117 struct sock *sk = orig_skb->sk; 3118 struct sock_exterr_skb *serr; 3119 struct sk_buff *skb; 3120 int err; 3121 3122 if (!sk) 3123 return; 3124 3125 skb = skb_clone(orig_skb, GFP_ATOMIC); 3126 if (!skb) 3127 return; 3128 3129 if (hwtstamps) { 3130 *skb_hwtstamps(skb) = 3131 *hwtstamps; 3132 } else { 3133 /* 3134 * no hardware time stamps available, 3135 * so keep the shared tx_flags and only 3136 * store software time stamp 3137 */ 3138 skb->tstamp = ktime_get_real(); 3139 } 3140 3141 serr = SKB_EXT_ERR(skb); 3142 memset(serr, 0, sizeof(*serr)); 3143 serr->ee.ee_errno = ENOMSG; 3144 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3145 3146 err = sock_queue_err_skb(sk, skb); 3147 3148 if (err) 3149 kfree_skb(skb); 3150 } 3151 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3152 3153 3154 /** 3155 * skb_partial_csum_set - set up and verify partial csum values for packet 3156 * @skb: the skb to set 3157 * @start: the number of bytes after skb->data to start checksumming. 3158 * @off: the offset from start to place the checksum. 3159 * 3160 * For untrusted partially-checksummed packets, we need to make sure the values 3161 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3162 * 3163 * This function checks and sets those values and skb->ip_summed: if this 3164 * returns false you should drop the packet. 3165 */ 3166 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3167 { 3168 if (unlikely(start > skb_headlen(skb)) || 3169 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3170 if (net_ratelimit()) 3171 printk(KERN_WARNING 3172 "bad partial csum: csum=%u/%u len=%u\n", 3173 start, off, skb_headlen(skb)); 3174 return false; 3175 } 3176 skb->ip_summed = CHECKSUM_PARTIAL; 3177 skb->csum_start = skb_headroom(skb) + start; 3178 skb->csum_offset = off; 3179 return true; 3180 } 3181 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3182 3183 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3184 { 3185 if (net_ratelimit()) 3186 pr_warning("%s: received packets cannot be forwarded" 3187 " while LRO is enabled\n", skb->dev->name); 3188 } 3189 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3190