1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/mm.h> 44 #include <linux/interrupt.h> 45 #include <linux/in.h> 46 #include <linux/inet.h> 47 #include <linux/slab.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 62 #include <net/protocol.h> 63 #include <net/dst.h> 64 #include <net/sock.h> 65 #include <net/checksum.h> 66 #include <net/xfrm.h> 67 68 #include <asm/uaccess.h> 69 #include <asm/system.h> 70 #include <trace/events/skb.h> 71 72 #include "kmap_skb.h" 73 74 static struct kmem_cache *skbuff_head_cache __read_mostly; 75 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 76 77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 78 struct pipe_buffer *buf) 79 { 80 put_page(buf->page); 81 } 82 83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 84 struct pipe_buffer *buf) 85 { 86 get_page(buf->page); 87 } 88 89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 90 struct pipe_buffer *buf) 91 { 92 return 1; 93 } 94 95 96 /* Pipe buffer operations for a socket. */ 97 static const struct pipe_buf_operations sock_pipe_buf_ops = { 98 .can_merge = 0, 99 .map = generic_pipe_buf_map, 100 .unmap = generic_pipe_buf_unmap, 101 .confirm = generic_pipe_buf_confirm, 102 .release = sock_pipe_buf_release, 103 .steal = sock_pipe_buf_steal, 104 .get = sock_pipe_buf_get, 105 }; 106 107 /* 108 * Keep out-of-line to prevent kernel bloat. 109 * __builtin_return_address is not used because it is not always 110 * reliable. 111 */ 112 113 /** 114 * skb_over_panic - private function 115 * @skb: buffer 116 * @sz: size 117 * @here: address 118 * 119 * Out of line support code for skb_put(). Not user callable. 120 */ 121 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 122 { 123 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 124 "data:%p tail:%#lx end:%#lx dev:%s\n", 125 here, skb->len, sz, skb->head, skb->data, 126 (unsigned long)skb->tail, (unsigned long)skb->end, 127 skb->dev ? skb->dev->name : "<NULL>"); 128 BUG(); 129 } 130 131 /** 132 * skb_under_panic - private function 133 * @skb: buffer 134 * @sz: size 135 * @here: address 136 * 137 * Out of line support code for skb_push(). Not user callable. 138 */ 139 140 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 141 { 142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 143 "data:%p tail:%#lx end:%#lx dev:%s\n", 144 here, skb->len, sz, skb->head, skb->data, 145 (unsigned long)skb->tail, (unsigned long)skb->end, 146 skb->dev ? skb->dev->name : "<NULL>"); 147 BUG(); 148 } 149 150 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 151 * 'private' fields and also do memory statistics to find all the 152 * [BEEP] leaks. 153 * 154 */ 155 156 /** 157 * __alloc_skb - allocate a network buffer 158 * @size: size to allocate 159 * @gfp_mask: allocation mask 160 * @fclone: allocate from fclone cache instead of head cache 161 * and allocate a cloned (child) skb 162 * @node: numa node to allocate memory on 163 * 164 * Allocate a new &sk_buff. The returned buffer has no headroom and a 165 * tail room of size bytes. The object has a reference count of one. 166 * The return is the buffer. On a failure the return is %NULL. 167 * 168 * Buffers may only be allocated from interrupts using a @gfp_mask of 169 * %GFP_ATOMIC. 170 */ 171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 172 int fclone, int node) 173 { 174 struct kmem_cache *cache; 175 struct skb_shared_info *shinfo; 176 struct sk_buff *skb; 177 u8 *data; 178 179 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 180 181 /* Get the HEAD */ 182 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 183 if (!skb) 184 goto out; 185 prefetchw(skb); 186 187 size = SKB_DATA_ALIGN(size); 188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 189 gfp_mask, node); 190 if (!data) 191 goto nodata; 192 prefetchw(data + size); 193 194 /* 195 * Only clear those fields we need to clear, not those that we will 196 * actually initialise below. Hence, don't put any more fields after 197 * the tail pointer in struct sk_buff! 198 */ 199 memset(skb, 0, offsetof(struct sk_buff, tail)); 200 skb->truesize = size + sizeof(struct sk_buff); 201 atomic_set(&skb->users, 1); 202 skb->head = data; 203 skb->data = data; 204 skb_reset_tail_pointer(skb); 205 skb->end = skb->tail + size; 206 #ifdef NET_SKBUFF_DATA_USES_OFFSET 207 skb->mac_header = ~0U; 208 #endif 209 210 /* make sure we initialize shinfo sequentially */ 211 shinfo = skb_shinfo(skb); 212 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 213 atomic_set(&shinfo->dataref, 1); 214 kmemcheck_annotate_variable(shinfo->destructor_arg); 215 216 if (fclone) { 217 struct sk_buff *child = skb + 1; 218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 219 220 kmemcheck_annotate_bitfield(child, flags1); 221 kmemcheck_annotate_bitfield(child, flags2); 222 skb->fclone = SKB_FCLONE_ORIG; 223 atomic_set(fclone_ref, 1); 224 225 child->fclone = SKB_FCLONE_UNAVAILABLE; 226 } 227 out: 228 return skb; 229 nodata: 230 kmem_cache_free(cache, skb); 231 skb = NULL; 232 goto out; 233 } 234 EXPORT_SYMBOL(__alloc_skb); 235 236 /** 237 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 238 * @dev: network device to receive on 239 * @length: length to allocate 240 * @gfp_mask: get_free_pages mask, passed to alloc_skb 241 * 242 * Allocate a new &sk_buff and assign it a usage count of one. The 243 * buffer has unspecified headroom built in. Users should allocate 244 * the headroom they think they need without accounting for the 245 * built in space. The built in space is used for optimisations. 246 * 247 * %NULL is returned if there is no free memory. 248 */ 249 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 250 unsigned int length, gfp_t gfp_mask) 251 { 252 struct sk_buff *skb; 253 254 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 255 if (likely(skb)) { 256 skb_reserve(skb, NET_SKB_PAD); 257 skb->dev = dev; 258 } 259 return skb; 260 } 261 EXPORT_SYMBOL(__netdev_alloc_skb); 262 263 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 264 int size) 265 { 266 skb_fill_page_desc(skb, i, page, off, size); 267 skb->len += size; 268 skb->data_len += size; 269 skb->truesize += size; 270 } 271 EXPORT_SYMBOL(skb_add_rx_frag); 272 273 /** 274 * dev_alloc_skb - allocate an skbuff for receiving 275 * @length: length to allocate 276 * 277 * Allocate a new &sk_buff and assign it a usage count of one. The 278 * buffer has unspecified headroom built in. Users should allocate 279 * the headroom they think they need without accounting for the 280 * built in space. The built in space is used for optimisations. 281 * 282 * %NULL is returned if there is no free memory. Although this function 283 * allocates memory it can be called from an interrupt. 284 */ 285 struct sk_buff *dev_alloc_skb(unsigned int length) 286 { 287 /* 288 * There is more code here than it seems: 289 * __dev_alloc_skb is an inline 290 */ 291 return __dev_alloc_skb(length, GFP_ATOMIC); 292 } 293 EXPORT_SYMBOL(dev_alloc_skb); 294 295 static void skb_drop_list(struct sk_buff **listp) 296 { 297 struct sk_buff *list = *listp; 298 299 *listp = NULL; 300 301 do { 302 struct sk_buff *this = list; 303 list = list->next; 304 kfree_skb(this); 305 } while (list); 306 } 307 308 static inline void skb_drop_fraglist(struct sk_buff *skb) 309 { 310 skb_drop_list(&skb_shinfo(skb)->frag_list); 311 } 312 313 static void skb_clone_fraglist(struct sk_buff *skb) 314 { 315 struct sk_buff *list; 316 317 skb_walk_frags(skb, list) 318 skb_get(list); 319 } 320 321 static void skb_release_data(struct sk_buff *skb) 322 { 323 if (!skb->cloned || 324 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 325 &skb_shinfo(skb)->dataref)) { 326 if (skb_shinfo(skb)->nr_frags) { 327 int i; 328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 329 put_page(skb_shinfo(skb)->frags[i].page); 330 } 331 332 /* 333 * If skb buf is from userspace, we need to notify the caller 334 * the lower device DMA has done; 335 */ 336 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 337 struct ubuf_info *uarg; 338 339 uarg = skb_shinfo(skb)->destructor_arg; 340 if (uarg->callback) 341 uarg->callback(uarg); 342 } 343 344 if (skb_has_frag_list(skb)) 345 skb_drop_fraglist(skb); 346 347 kfree(skb->head); 348 } 349 } 350 351 /* 352 * Free an skbuff by memory without cleaning the state. 353 */ 354 static void kfree_skbmem(struct sk_buff *skb) 355 { 356 struct sk_buff *other; 357 atomic_t *fclone_ref; 358 359 switch (skb->fclone) { 360 case SKB_FCLONE_UNAVAILABLE: 361 kmem_cache_free(skbuff_head_cache, skb); 362 break; 363 364 case SKB_FCLONE_ORIG: 365 fclone_ref = (atomic_t *) (skb + 2); 366 if (atomic_dec_and_test(fclone_ref)) 367 kmem_cache_free(skbuff_fclone_cache, skb); 368 break; 369 370 case SKB_FCLONE_CLONE: 371 fclone_ref = (atomic_t *) (skb + 1); 372 other = skb - 1; 373 374 /* The clone portion is available for 375 * fast-cloning again. 376 */ 377 skb->fclone = SKB_FCLONE_UNAVAILABLE; 378 379 if (atomic_dec_and_test(fclone_ref)) 380 kmem_cache_free(skbuff_fclone_cache, other); 381 break; 382 } 383 } 384 385 static void skb_release_head_state(struct sk_buff *skb) 386 { 387 skb_dst_drop(skb); 388 #ifdef CONFIG_XFRM 389 secpath_put(skb->sp); 390 #endif 391 if (skb->destructor) { 392 WARN_ON(in_irq()); 393 skb->destructor(skb); 394 } 395 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 396 nf_conntrack_put(skb->nfct); 397 #endif 398 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 399 nf_conntrack_put_reasm(skb->nfct_reasm); 400 #endif 401 #ifdef CONFIG_BRIDGE_NETFILTER 402 nf_bridge_put(skb->nf_bridge); 403 #endif 404 /* XXX: IS this still necessary? - JHS */ 405 #ifdef CONFIG_NET_SCHED 406 skb->tc_index = 0; 407 #ifdef CONFIG_NET_CLS_ACT 408 skb->tc_verd = 0; 409 #endif 410 #endif 411 } 412 413 /* Free everything but the sk_buff shell. */ 414 static void skb_release_all(struct sk_buff *skb) 415 { 416 skb_release_head_state(skb); 417 skb_release_data(skb); 418 } 419 420 /** 421 * __kfree_skb - private function 422 * @skb: buffer 423 * 424 * Free an sk_buff. Release anything attached to the buffer. 425 * Clean the state. This is an internal helper function. Users should 426 * always call kfree_skb 427 */ 428 429 void __kfree_skb(struct sk_buff *skb) 430 { 431 skb_release_all(skb); 432 kfree_skbmem(skb); 433 } 434 EXPORT_SYMBOL(__kfree_skb); 435 436 /** 437 * kfree_skb - free an sk_buff 438 * @skb: buffer to free 439 * 440 * Drop a reference to the buffer and free it if the usage count has 441 * hit zero. 442 */ 443 void kfree_skb(struct sk_buff *skb) 444 { 445 if (unlikely(!skb)) 446 return; 447 if (likely(atomic_read(&skb->users) == 1)) 448 smp_rmb(); 449 else if (likely(!atomic_dec_and_test(&skb->users))) 450 return; 451 trace_kfree_skb(skb, __builtin_return_address(0)); 452 __kfree_skb(skb); 453 } 454 EXPORT_SYMBOL(kfree_skb); 455 456 /** 457 * consume_skb - free an skbuff 458 * @skb: buffer to free 459 * 460 * Drop a ref to the buffer and free it if the usage count has hit zero 461 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 462 * is being dropped after a failure and notes that 463 */ 464 void consume_skb(struct sk_buff *skb) 465 { 466 if (unlikely(!skb)) 467 return; 468 if (likely(atomic_read(&skb->users) == 1)) 469 smp_rmb(); 470 else if (likely(!atomic_dec_and_test(&skb->users))) 471 return; 472 trace_consume_skb(skb); 473 __kfree_skb(skb); 474 } 475 EXPORT_SYMBOL(consume_skb); 476 477 /** 478 * skb_recycle_check - check if skb can be reused for receive 479 * @skb: buffer 480 * @skb_size: minimum receive buffer size 481 * 482 * Checks that the skb passed in is not shared or cloned, and 483 * that it is linear and its head portion at least as large as 484 * skb_size so that it can be recycled as a receive buffer. 485 * If these conditions are met, this function does any necessary 486 * reference count dropping and cleans up the skbuff as if it 487 * just came from __alloc_skb(). 488 */ 489 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 490 { 491 struct skb_shared_info *shinfo; 492 493 if (irqs_disabled()) 494 return false; 495 496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 497 return false; 498 499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 500 return false; 501 502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 503 if (skb_end_pointer(skb) - skb->head < skb_size) 504 return false; 505 506 if (skb_shared(skb) || skb_cloned(skb)) 507 return false; 508 509 skb_release_head_state(skb); 510 511 shinfo = skb_shinfo(skb); 512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 513 atomic_set(&shinfo->dataref, 1); 514 515 memset(skb, 0, offsetof(struct sk_buff, tail)); 516 skb->data = skb->head + NET_SKB_PAD; 517 skb_reset_tail_pointer(skb); 518 519 return true; 520 } 521 EXPORT_SYMBOL(skb_recycle_check); 522 523 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 524 { 525 new->tstamp = old->tstamp; 526 new->dev = old->dev; 527 new->transport_header = old->transport_header; 528 new->network_header = old->network_header; 529 new->mac_header = old->mac_header; 530 skb_dst_copy(new, old); 531 new->rxhash = old->rxhash; 532 #ifdef CONFIG_XFRM 533 new->sp = secpath_get(old->sp); 534 #endif 535 memcpy(new->cb, old->cb, sizeof(old->cb)); 536 new->csum = old->csum; 537 new->local_df = old->local_df; 538 new->pkt_type = old->pkt_type; 539 new->ip_summed = old->ip_summed; 540 skb_copy_queue_mapping(new, old); 541 new->priority = old->priority; 542 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 543 new->ipvs_property = old->ipvs_property; 544 #endif 545 new->protocol = old->protocol; 546 new->mark = old->mark; 547 new->skb_iif = old->skb_iif; 548 __nf_copy(new, old); 549 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 550 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 551 new->nf_trace = old->nf_trace; 552 #endif 553 #ifdef CONFIG_NET_SCHED 554 new->tc_index = old->tc_index; 555 #ifdef CONFIG_NET_CLS_ACT 556 new->tc_verd = old->tc_verd; 557 #endif 558 #endif 559 new->vlan_tci = old->vlan_tci; 560 561 skb_copy_secmark(new, old); 562 } 563 564 /* 565 * You should not add any new code to this function. Add it to 566 * __copy_skb_header above instead. 567 */ 568 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 569 { 570 #define C(x) n->x = skb->x 571 572 n->next = n->prev = NULL; 573 n->sk = NULL; 574 __copy_skb_header(n, skb); 575 576 C(len); 577 C(data_len); 578 C(mac_len); 579 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 580 n->cloned = 1; 581 n->nohdr = 0; 582 n->destructor = NULL; 583 C(tail); 584 C(end); 585 C(head); 586 C(data); 587 C(truesize); 588 atomic_set(&n->users, 1); 589 590 atomic_inc(&(skb_shinfo(skb)->dataref)); 591 skb->cloned = 1; 592 593 return n; 594 #undef C 595 } 596 597 /** 598 * skb_morph - morph one skb into another 599 * @dst: the skb to receive the contents 600 * @src: the skb to supply the contents 601 * 602 * This is identical to skb_clone except that the target skb is 603 * supplied by the user. 604 * 605 * The target skb is returned upon exit. 606 */ 607 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 608 { 609 skb_release_all(dst); 610 return __skb_clone(dst, src); 611 } 612 EXPORT_SYMBOL_GPL(skb_morph); 613 614 /* skb frags copy userspace buffers to kernel */ 615 static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 616 { 617 int i; 618 int num_frags = skb_shinfo(skb)->nr_frags; 619 struct page *page, *head = NULL; 620 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 621 622 for (i = 0; i < num_frags; i++) { 623 u8 *vaddr; 624 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 625 626 page = alloc_page(GFP_ATOMIC); 627 if (!page) { 628 while (head) { 629 struct page *next = (struct page *)head->private; 630 put_page(head); 631 head = next; 632 } 633 return -ENOMEM; 634 } 635 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 636 memcpy(page_address(page), 637 vaddr + f->page_offset, f->size); 638 kunmap_skb_frag(vaddr); 639 page->private = (unsigned long)head; 640 head = page; 641 } 642 643 /* skb frags release userspace buffers */ 644 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 645 put_page(skb_shinfo(skb)->frags[i].page); 646 647 uarg->callback(uarg); 648 649 /* skb frags point to kernel buffers */ 650 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 651 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 652 skb_shinfo(skb)->frags[i - 1].page = head; 653 head = (struct page *)head->private; 654 } 655 return 0; 656 } 657 658 659 /** 660 * skb_clone - duplicate an sk_buff 661 * @skb: buffer to clone 662 * @gfp_mask: allocation priority 663 * 664 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 665 * copies share the same packet data but not structure. The new 666 * buffer has a reference count of 1. If the allocation fails the 667 * function returns %NULL otherwise the new buffer is returned. 668 * 669 * If this function is called from an interrupt gfp_mask() must be 670 * %GFP_ATOMIC. 671 */ 672 673 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 674 { 675 struct sk_buff *n; 676 677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 678 if (skb_copy_ubufs(skb, gfp_mask)) 679 return NULL; 680 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 681 } 682 683 n = skb + 1; 684 if (skb->fclone == SKB_FCLONE_ORIG && 685 n->fclone == SKB_FCLONE_UNAVAILABLE) { 686 atomic_t *fclone_ref = (atomic_t *) (n + 1); 687 n->fclone = SKB_FCLONE_CLONE; 688 atomic_inc(fclone_ref); 689 } else { 690 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 691 if (!n) 692 return NULL; 693 694 kmemcheck_annotate_bitfield(n, flags1); 695 kmemcheck_annotate_bitfield(n, flags2); 696 n->fclone = SKB_FCLONE_UNAVAILABLE; 697 } 698 699 return __skb_clone(n, skb); 700 } 701 EXPORT_SYMBOL(skb_clone); 702 703 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 704 { 705 #ifndef NET_SKBUFF_DATA_USES_OFFSET 706 /* 707 * Shift between the two data areas in bytes 708 */ 709 unsigned long offset = new->data - old->data; 710 #endif 711 712 __copy_skb_header(new, old); 713 714 #ifndef NET_SKBUFF_DATA_USES_OFFSET 715 /* {transport,network,mac}_header are relative to skb->head */ 716 new->transport_header += offset; 717 new->network_header += offset; 718 if (skb_mac_header_was_set(new)) 719 new->mac_header += offset; 720 #endif 721 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 722 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 723 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 724 } 725 726 /** 727 * skb_copy - create private copy of an sk_buff 728 * @skb: buffer to copy 729 * @gfp_mask: allocation priority 730 * 731 * Make a copy of both an &sk_buff and its data. This is used when the 732 * caller wishes to modify the data and needs a private copy of the 733 * data to alter. Returns %NULL on failure or the pointer to the buffer 734 * on success. The returned buffer has a reference count of 1. 735 * 736 * As by-product this function converts non-linear &sk_buff to linear 737 * one, so that &sk_buff becomes completely private and caller is allowed 738 * to modify all the data of returned buffer. This means that this 739 * function is not recommended for use in circumstances when only 740 * header is going to be modified. Use pskb_copy() instead. 741 */ 742 743 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 744 { 745 int headerlen = skb_headroom(skb); 746 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 747 struct sk_buff *n = alloc_skb(size, gfp_mask); 748 749 if (!n) 750 return NULL; 751 752 /* Set the data pointer */ 753 skb_reserve(n, headerlen); 754 /* Set the tail pointer and length */ 755 skb_put(n, skb->len); 756 757 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 758 BUG(); 759 760 copy_skb_header(n, skb); 761 return n; 762 } 763 EXPORT_SYMBOL(skb_copy); 764 765 /** 766 * pskb_copy - create copy of an sk_buff with private head. 767 * @skb: buffer to copy 768 * @gfp_mask: allocation priority 769 * 770 * Make a copy of both an &sk_buff and part of its data, located 771 * in header. Fragmented data remain shared. This is used when 772 * the caller wishes to modify only header of &sk_buff and needs 773 * private copy of the header to alter. Returns %NULL on failure 774 * or the pointer to the buffer on success. 775 * The returned buffer has a reference count of 1. 776 */ 777 778 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 779 { 780 unsigned int size = skb_end_pointer(skb) - skb->head; 781 struct sk_buff *n = alloc_skb(size, gfp_mask); 782 783 if (!n) 784 goto out; 785 786 /* Set the data pointer */ 787 skb_reserve(n, skb_headroom(skb)); 788 /* Set the tail pointer and length */ 789 skb_put(n, skb_headlen(skb)); 790 /* Copy the bytes */ 791 skb_copy_from_linear_data(skb, n->data, n->len); 792 793 n->truesize += skb->data_len; 794 n->data_len = skb->data_len; 795 n->len = skb->len; 796 797 if (skb_shinfo(skb)->nr_frags) { 798 int i; 799 800 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 801 if (skb_copy_ubufs(skb, gfp_mask)) { 802 kfree_skb(n); 803 n = NULL; 804 goto out; 805 } 806 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 807 } 808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 809 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 810 get_page(skb_shinfo(n)->frags[i].page); 811 } 812 skb_shinfo(n)->nr_frags = i; 813 } 814 815 if (skb_has_frag_list(skb)) { 816 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 817 skb_clone_fraglist(n); 818 } 819 820 copy_skb_header(n, skb); 821 out: 822 return n; 823 } 824 EXPORT_SYMBOL(pskb_copy); 825 826 /** 827 * pskb_expand_head - reallocate header of &sk_buff 828 * @skb: buffer to reallocate 829 * @nhead: room to add at head 830 * @ntail: room to add at tail 831 * @gfp_mask: allocation priority 832 * 833 * Expands (or creates identical copy, if &nhead and &ntail are zero) 834 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 835 * reference count of 1. Returns zero in the case of success or error, 836 * if expansion failed. In the last case, &sk_buff is not changed. 837 * 838 * All the pointers pointing into skb header may change and must be 839 * reloaded after call to this function. 840 */ 841 842 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 843 gfp_t gfp_mask) 844 { 845 int i; 846 u8 *data; 847 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 848 long off; 849 bool fastpath; 850 851 BUG_ON(nhead < 0); 852 853 if (skb_shared(skb)) 854 BUG(); 855 856 size = SKB_DATA_ALIGN(size); 857 858 /* Check if we can avoid taking references on fragments if we own 859 * the last reference on skb->head. (see skb_release_data()) 860 */ 861 if (!skb->cloned) 862 fastpath = true; 863 else { 864 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 865 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 866 } 867 868 if (fastpath && 869 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { 870 memmove(skb->head + size, skb_shinfo(skb), 871 offsetof(struct skb_shared_info, 872 frags[skb_shinfo(skb)->nr_frags])); 873 memmove(skb->head + nhead, skb->head, 874 skb_tail_pointer(skb) - skb->head); 875 off = nhead; 876 goto adjust_others; 877 } 878 879 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 880 if (!data) 881 goto nodata; 882 883 /* Copy only real data... and, alas, header. This should be 884 * optimized for the cases when header is void. 885 */ 886 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 887 888 memcpy((struct skb_shared_info *)(data + size), 889 skb_shinfo(skb), 890 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 891 892 if (fastpath) { 893 kfree(skb->head); 894 } else { 895 /* copy this zero copy skb frags */ 896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 897 if (skb_copy_ubufs(skb, gfp_mask)) 898 goto nofrags; 899 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 900 } 901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 902 get_page(skb_shinfo(skb)->frags[i].page); 903 904 if (skb_has_frag_list(skb)) 905 skb_clone_fraglist(skb); 906 907 skb_release_data(skb); 908 } 909 off = (data + nhead) - skb->head; 910 911 skb->head = data; 912 adjust_others: 913 skb->data += off; 914 #ifdef NET_SKBUFF_DATA_USES_OFFSET 915 skb->end = size; 916 off = nhead; 917 #else 918 skb->end = skb->head + size; 919 #endif 920 /* {transport,network,mac}_header and tail are relative to skb->head */ 921 skb->tail += off; 922 skb->transport_header += off; 923 skb->network_header += off; 924 if (skb_mac_header_was_set(skb)) 925 skb->mac_header += off; 926 /* Only adjust this if it actually is csum_start rather than csum */ 927 if (skb->ip_summed == CHECKSUM_PARTIAL) 928 skb->csum_start += nhead; 929 skb->cloned = 0; 930 skb->hdr_len = 0; 931 skb->nohdr = 0; 932 atomic_set(&skb_shinfo(skb)->dataref, 1); 933 return 0; 934 935 nofrags: 936 kfree(data); 937 nodata: 938 return -ENOMEM; 939 } 940 EXPORT_SYMBOL(pskb_expand_head); 941 942 /* Make private copy of skb with writable head and some headroom */ 943 944 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 945 { 946 struct sk_buff *skb2; 947 int delta = headroom - skb_headroom(skb); 948 949 if (delta <= 0) 950 skb2 = pskb_copy(skb, GFP_ATOMIC); 951 else { 952 skb2 = skb_clone(skb, GFP_ATOMIC); 953 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 954 GFP_ATOMIC)) { 955 kfree_skb(skb2); 956 skb2 = NULL; 957 } 958 } 959 return skb2; 960 } 961 EXPORT_SYMBOL(skb_realloc_headroom); 962 963 /** 964 * skb_copy_expand - copy and expand sk_buff 965 * @skb: buffer to copy 966 * @newheadroom: new free bytes at head 967 * @newtailroom: new free bytes at tail 968 * @gfp_mask: allocation priority 969 * 970 * Make a copy of both an &sk_buff and its data and while doing so 971 * allocate additional space. 972 * 973 * This is used when the caller wishes to modify the data and needs a 974 * private copy of the data to alter as well as more space for new fields. 975 * Returns %NULL on failure or the pointer to the buffer 976 * on success. The returned buffer has a reference count of 1. 977 * 978 * You must pass %GFP_ATOMIC as the allocation priority if this function 979 * is called from an interrupt. 980 */ 981 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 982 int newheadroom, int newtailroom, 983 gfp_t gfp_mask) 984 { 985 /* 986 * Allocate the copy buffer 987 */ 988 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 989 gfp_mask); 990 int oldheadroom = skb_headroom(skb); 991 int head_copy_len, head_copy_off; 992 int off; 993 994 if (!n) 995 return NULL; 996 997 skb_reserve(n, newheadroom); 998 999 /* Set the tail pointer and length */ 1000 skb_put(n, skb->len); 1001 1002 head_copy_len = oldheadroom; 1003 head_copy_off = 0; 1004 if (newheadroom <= head_copy_len) 1005 head_copy_len = newheadroom; 1006 else 1007 head_copy_off = newheadroom - head_copy_len; 1008 1009 /* Copy the linear header and data. */ 1010 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1011 skb->len + head_copy_len)) 1012 BUG(); 1013 1014 copy_skb_header(n, skb); 1015 1016 off = newheadroom - oldheadroom; 1017 if (n->ip_summed == CHECKSUM_PARTIAL) 1018 n->csum_start += off; 1019 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1020 n->transport_header += off; 1021 n->network_header += off; 1022 if (skb_mac_header_was_set(skb)) 1023 n->mac_header += off; 1024 #endif 1025 1026 return n; 1027 } 1028 EXPORT_SYMBOL(skb_copy_expand); 1029 1030 /** 1031 * skb_pad - zero pad the tail of an skb 1032 * @skb: buffer to pad 1033 * @pad: space to pad 1034 * 1035 * Ensure that a buffer is followed by a padding area that is zero 1036 * filled. Used by network drivers which may DMA or transfer data 1037 * beyond the buffer end onto the wire. 1038 * 1039 * May return error in out of memory cases. The skb is freed on error. 1040 */ 1041 1042 int skb_pad(struct sk_buff *skb, int pad) 1043 { 1044 int err; 1045 int ntail; 1046 1047 /* If the skbuff is non linear tailroom is always zero.. */ 1048 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1049 memset(skb->data+skb->len, 0, pad); 1050 return 0; 1051 } 1052 1053 ntail = skb->data_len + pad - (skb->end - skb->tail); 1054 if (likely(skb_cloned(skb) || ntail > 0)) { 1055 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1056 if (unlikely(err)) 1057 goto free_skb; 1058 } 1059 1060 /* FIXME: The use of this function with non-linear skb's really needs 1061 * to be audited. 1062 */ 1063 err = skb_linearize(skb); 1064 if (unlikely(err)) 1065 goto free_skb; 1066 1067 memset(skb->data + skb->len, 0, pad); 1068 return 0; 1069 1070 free_skb: 1071 kfree_skb(skb); 1072 return err; 1073 } 1074 EXPORT_SYMBOL(skb_pad); 1075 1076 /** 1077 * skb_put - add data to a buffer 1078 * @skb: buffer to use 1079 * @len: amount of data to add 1080 * 1081 * This function extends the used data area of the buffer. If this would 1082 * exceed the total buffer size the kernel will panic. A pointer to the 1083 * first byte of the extra data is returned. 1084 */ 1085 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1086 { 1087 unsigned char *tmp = skb_tail_pointer(skb); 1088 SKB_LINEAR_ASSERT(skb); 1089 skb->tail += len; 1090 skb->len += len; 1091 if (unlikely(skb->tail > skb->end)) 1092 skb_over_panic(skb, len, __builtin_return_address(0)); 1093 return tmp; 1094 } 1095 EXPORT_SYMBOL(skb_put); 1096 1097 /** 1098 * skb_push - add data to the start of a buffer 1099 * @skb: buffer to use 1100 * @len: amount of data to add 1101 * 1102 * This function extends the used data area of the buffer at the buffer 1103 * start. If this would exceed the total buffer headroom the kernel will 1104 * panic. A pointer to the first byte of the extra data is returned. 1105 */ 1106 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1107 { 1108 skb->data -= len; 1109 skb->len += len; 1110 if (unlikely(skb->data<skb->head)) 1111 skb_under_panic(skb, len, __builtin_return_address(0)); 1112 return skb->data; 1113 } 1114 EXPORT_SYMBOL(skb_push); 1115 1116 /** 1117 * skb_pull - remove data from the start of a buffer 1118 * @skb: buffer to use 1119 * @len: amount of data to remove 1120 * 1121 * This function removes data from the start of a buffer, returning 1122 * the memory to the headroom. A pointer to the next data in the buffer 1123 * is returned. Once the data has been pulled future pushes will overwrite 1124 * the old data. 1125 */ 1126 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1127 { 1128 return skb_pull_inline(skb, len); 1129 } 1130 EXPORT_SYMBOL(skb_pull); 1131 1132 /** 1133 * skb_trim - remove end from a buffer 1134 * @skb: buffer to alter 1135 * @len: new length 1136 * 1137 * Cut the length of a buffer down by removing data from the tail. If 1138 * the buffer is already under the length specified it is not modified. 1139 * The skb must be linear. 1140 */ 1141 void skb_trim(struct sk_buff *skb, unsigned int len) 1142 { 1143 if (skb->len > len) 1144 __skb_trim(skb, len); 1145 } 1146 EXPORT_SYMBOL(skb_trim); 1147 1148 /* Trims skb to length len. It can change skb pointers. 1149 */ 1150 1151 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1152 { 1153 struct sk_buff **fragp; 1154 struct sk_buff *frag; 1155 int offset = skb_headlen(skb); 1156 int nfrags = skb_shinfo(skb)->nr_frags; 1157 int i; 1158 int err; 1159 1160 if (skb_cloned(skb) && 1161 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1162 return err; 1163 1164 i = 0; 1165 if (offset >= len) 1166 goto drop_pages; 1167 1168 for (; i < nfrags; i++) { 1169 int end = offset + skb_shinfo(skb)->frags[i].size; 1170 1171 if (end < len) { 1172 offset = end; 1173 continue; 1174 } 1175 1176 skb_shinfo(skb)->frags[i++].size = len - offset; 1177 1178 drop_pages: 1179 skb_shinfo(skb)->nr_frags = i; 1180 1181 for (; i < nfrags; i++) 1182 put_page(skb_shinfo(skb)->frags[i].page); 1183 1184 if (skb_has_frag_list(skb)) 1185 skb_drop_fraglist(skb); 1186 goto done; 1187 } 1188 1189 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1190 fragp = &frag->next) { 1191 int end = offset + frag->len; 1192 1193 if (skb_shared(frag)) { 1194 struct sk_buff *nfrag; 1195 1196 nfrag = skb_clone(frag, GFP_ATOMIC); 1197 if (unlikely(!nfrag)) 1198 return -ENOMEM; 1199 1200 nfrag->next = frag->next; 1201 kfree_skb(frag); 1202 frag = nfrag; 1203 *fragp = frag; 1204 } 1205 1206 if (end < len) { 1207 offset = end; 1208 continue; 1209 } 1210 1211 if (end > len && 1212 unlikely((err = pskb_trim(frag, len - offset)))) 1213 return err; 1214 1215 if (frag->next) 1216 skb_drop_list(&frag->next); 1217 break; 1218 } 1219 1220 done: 1221 if (len > skb_headlen(skb)) { 1222 skb->data_len -= skb->len - len; 1223 skb->len = len; 1224 } else { 1225 skb->len = len; 1226 skb->data_len = 0; 1227 skb_set_tail_pointer(skb, len); 1228 } 1229 1230 return 0; 1231 } 1232 EXPORT_SYMBOL(___pskb_trim); 1233 1234 /** 1235 * __pskb_pull_tail - advance tail of skb header 1236 * @skb: buffer to reallocate 1237 * @delta: number of bytes to advance tail 1238 * 1239 * The function makes a sense only on a fragmented &sk_buff, 1240 * it expands header moving its tail forward and copying necessary 1241 * data from fragmented part. 1242 * 1243 * &sk_buff MUST have reference count of 1. 1244 * 1245 * Returns %NULL (and &sk_buff does not change) if pull failed 1246 * or value of new tail of skb in the case of success. 1247 * 1248 * All the pointers pointing into skb header may change and must be 1249 * reloaded after call to this function. 1250 */ 1251 1252 /* Moves tail of skb head forward, copying data from fragmented part, 1253 * when it is necessary. 1254 * 1. It may fail due to malloc failure. 1255 * 2. It may change skb pointers. 1256 * 1257 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1258 */ 1259 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1260 { 1261 /* If skb has not enough free space at tail, get new one 1262 * plus 128 bytes for future expansions. If we have enough 1263 * room at tail, reallocate without expansion only if skb is cloned. 1264 */ 1265 int i, k, eat = (skb->tail + delta) - skb->end; 1266 1267 if (eat > 0 || skb_cloned(skb)) { 1268 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1269 GFP_ATOMIC)) 1270 return NULL; 1271 } 1272 1273 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1274 BUG(); 1275 1276 /* Optimization: no fragments, no reasons to preestimate 1277 * size of pulled pages. Superb. 1278 */ 1279 if (!skb_has_frag_list(skb)) 1280 goto pull_pages; 1281 1282 /* Estimate size of pulled pages. */ 1283 eat = delta; 1284 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1285 if (skb_shinfo(skb)->frags[i].size >= eat) 1286 goto pull_pages; 1287 eat -= skb_shinfo(skb)->frags[i].size; 1288 } 1289 1290 /* If we need update frag list, we are in troubles. 1291 * Certainly, it possible to add an offset to skb data, 1292 * but taking into account that pulling is expected to 1293 * be very rare operation, it is worth to fight against 1294 * further bloating skb head and crucify ourselves here instead. 1295 * Pure masohism, indeed. 8)8) 1296 */ 1297 if (eat) { 1298 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1299 struct sk_buff *clone = NULL; 1300 struct sk_buff *insp = NULL; 1301 1302 do { 1303 BUG_ON(!list); 1304 1305 if (list->len <= eat) { 1306 /* Eaten as whole. */ 1307 eat -= list->len; 1308 list = list->next; 1309 insp = list; 1310 } else { 1311 /* Eaten partially. */ 1312 1313 if (skb_shared(list)) { 1314 /* Sucks! We need to fork list. :-( */ 1315 clone = skb_clone(list, GFP_ATOMIC); 1316 if (!clone) 1317 return NULL; 1318 insp = list->next; 1319 list = clone; 1320 } else { 1321 /* This may be pulled without 1322 * problems. */ 1323 insp = list; 1324 } 1325 if (!pskb_pull(list, eat)) { 1326 kfree_skb(clone); 1327 return NULL; 1328 } 1329 break; 1330 } 1331 } while (eat); 1332 1333 /* Free pulled out fragments. */ 1334 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1335 skb_shinfo(skb)->frag_list = list->next; 1336 kfree_skb(list); 1337 } 1338 /* And insert new clone at head. */ 1339 if (clone) { 1340 clone->next = list; 1341 skb_shinfo(skb)->frag_list = clone; 1342 } 1343 } 1344 /* Success! Now we may commit changes to skb data. */ 1345 1346 pull_pages: 1347 eat = delta; 1348 k = 0; 1349 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1350 if (skb_shinfo(skb)->frags[i].size <= eat) { 1351 put_page(skb_shinfo(skb)->frags[i].page); 1352 eat -= skb_shinfo(skb)->frags[i].size; 1353 } else { 1354 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1355 if (eat) { 1356 skb_shinfo(skb)->frags[k].page_offset += eat; 1357 skb_shinfo(skb)->frags[k].size -= eat; 1358 eat = 0; 1359 } 1360 k++; 1361 } 1362 } 1363 skb_shinfo(skb)->nr_frags = k; 1364 1365 skb->tail += delta; 1366 skb->data_len -= delta; 1367 1368 return skb_tail_pointer(skb); 1369 } 1370 EXPORT_SYMBOL(__pskb_pull_tail); 1371 1372 /* Copy some data bits from skb to kernel buffer. */ 1373 1374 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1375 { 1376 int start = skb_headlen(skb); 1377 struct sk_buff *frag_iter; 1378 int i, copy; 1379 1380 if (offset > (int)skb->len - len) 1381 goto fault; 1382 1383 /* Copy header. */ 1384 if ((copy = start - offset) > 0) { 1385 if (copy > len) 1386 copy = len; 1387 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1388 if ((len -= copy) == 0) 1389 return 0; 1390 offset += copy; 1391 to += copy; 1392 } 1393 1394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1395 int end; 1396 1397 WARN_ON(start > offset + len); 1398 1399 end = start + skb_shinfo(skb)->frags[i].size; 1400 if ((copy = end - offset) > 0) { 1401 u8 *vaddr; 1402 1403 if (copy > len) 1404 copy = len; 1405 1406 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1407 memcpy(to, 1408 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1409 offset - start, copy); 1410 kunmap_skb_frag(vaddr); 1411 1412 if ((len -= copy) == 0) 1413 return 0; 1414 offset += copy; 1415 to += copy; 1416 } 1417 start = end; 1418 } 1419 1420 skb_walk_frags(skb, frag_iter) { 1421 int end; 1422 1423 WARN_ON(start > offset + len); 1424 1425 end = start + frag_iter->len; 1426 if ((copy = end - offset) > 0) { 1427 if (copy > len) 1428 copy = len; 1429 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1430 goto fault; 1431 if ((len -= copy) == 0) 1432 return 0; 1433 offset += copy; 1434 to += copy; 1435 } 1436 start = end; 1437 } 1438 1439 if (!len) 1440 return 0; 1441 1442 fault: 1443 return -EFAULT; 1444 } 1445 EXPORT_SYMBOL(skb_copy_bits); 1446 1447 /* 1448 * Callback from splice_to_pipe(), if we need to release some pages 1449 * at the end of the spd in case we error'ed out in filling the pipe. 1450 */ 1451 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1452 { 1453 put_page(spd->pages[i]); 1454 } 1455 1456 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1457 unsigned int *offset, 1458 struct sk_buff *skb, struct sock *sk) 1459 { 1460 struct page *p = sk->sk_sndmsg_page; 1461 unsigned int off; 1462 1463 if (!p) { 1464 new_page: 1465 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1466 if (!p) 1467 return NULL; 1468 1469 off = sk->sk_sndmsg_off = 0; 1470 /* hold one ref to this page until it's full */ 1471 } else { 1472 unsigned int mlen; 1473 1474 off = sk->sk_sndmsg_off; 1475 mlen = PAGE_SIZE - off; 1476 if (mlen < 64 && mlen < *len) { 1477 put_page(p); 1478 goto new_page; 1479 } 1480 1481 *len = min_t(unsigned int, *len, mlen); 1482 } 1483 1484 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1485 sk->sk_sndmsg_off += *len; 1486 *offset = off; 1487 get_page(p); 1488 1489 return p; 1490 } 1491 1492 /* 1493 * Fill page/offset/length into spd, if it can hold more pages. 1494 */ 1495 static inline int spd_fill_page(struct splice_pipe_desc *spd, 1496 struct pipe_inode_info *pipe, struct page *page, 1497 unsigned int *len, unsigned int offset, 1498 struct sk_buff *skb, int linear, 1499 struct sock *sk) 1500 { 1501 if (unlikely(spd->nr_pages == pipe->buffers)) 1502 return 1; 1503 1504 if (linear) { 1505 page = linear_to_page(page, len, &offset, skb, sk); 1506 if (!page) 1507 return 1; 1508 } else 1509 get_page(page); 1510 1511 spd->pages[spd->nr_pages] = page; 1512 spd->partial[spd->nr_pages].len = *len; 1513 spd->partial[spd->nr_pages].offset = offset; 1514 spd->nr_pages++; 1515 1516 return 0; 1517 } 1518 1519 static inline void __segment_seek(struct page **page, unsigned int *poff, 1520 unsigned int *plen, unsigned int off) 1521 { 1522 unsigned long n; 1523 1524 *poff += off; 1525 n = *poff / PAGE_SIZE; 1526 if (n) 1527 *page = nth_page(*page, n); 1528 1529 *poff = *poff % PAGE_SIZE; 1530 *plen -= off; 1531 } 1532 1533 static inline int __splice_segment(struct page *page, unsigned int poff, 1534 unsigned int plen, unsigned int *off, 1535 unsigned int *len, struct sk_buff *skb, 1536 struct splice_pipe_desc *spd, int linear, 1537 struct sock *sk, 1538 struct pipe_inode_info *pipe) 1539 { 1540 if (!*len) 1541 return 1; 1542 1543 /* skip this segment if already processed */ 1544 if (*off >= plen) { 1545 *off -= plen; 1546 return 0; 1547 } 1548 1549 /* ignore any bits we already processed */ 1550 if (*off) { 1551 __segment_seek(&page, &poff, &plen, *off); 1552 *off = 0; 1553 } 1554 1555 do { 1556 unsigned int flen = min(*len, plen); 1557 1558 /* the linear region may spread across several pages */ 1559 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1560 1561 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1562 return 1; 1563 1564 __segment_seek(&page, &poff, &plen, flen); 1565 *len -= flen; 1566 1567 } while (*len && plen); 1568 1569 return 0; 1570 } 1571 1572 /* 1573 * Map linear and fragment data from the skb to spd. It reports failure if the 1574 * pipe is full or if we already spliced the requested length. 1575 */ 1576 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1577 unsigned int *offset, unsigned int *len, 1578 struct splice_pipe_desc *spd, struct sock *sk) 1579 { 1580 int seg; 1581 1582 /* 1583 * map the linear part 1584 */ 1585 if (__splice_segment(virt_to_page(skb->data), 1586 (unsigned long) skb->data & (PAGE_SIZE - 1), 1587 skb_headlen(skb), 1588 offset, len, skb, spd, 1, sk, pipe)) 1589 return 1; 1590 1591 /* 1592 * then map the fragments 1593 */ 1594 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1595 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1596 1597 if (__splice_segment(f->page, f->page_offset, f->size, 1598 offset, len, skb, spd, 0, sk, pipe)) 1599 return 1; 1600 } 1601 1602 return 0; 1603 } 1604 1605 /* 1606 * Map data from the skb to a pipe. Should handle both the linear part, 1607 * the fragments, and the frag list. It does NOT handle frag lists within 1608 * the frag list, if such a thing exists. We'd probably need to recurse to 1609 * handle that cleanly. 1610 */ 1611 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1612 struct pipe_inode_info *pipe, unsigned int tlen, 1613 unsigned int flags) 1614 { 1615 struct partial_page partial[PIPE_DEF_BUFFERS]; 1616 struct page *pages[PIPE_DEF_BUFFERS]; 1617 struct splice_pipe_desc spd = { 1618 .pages = pages, 1619 .partial = partial, 1620 .flags = flags, 1621 .ops = &sock_pipe_buf_ops, 1622 .spd_release = sock_spd_release, 1623 }; 1624 struct sk_buff *frag_iter; 1625 struct sock *sk = skb->sk; 1626 int ret = 0; 1627 1628 if (splice_grow_spd(pipe, &spd)) 1629 return -ENOMEM; 1630 1631 /* 1632 * __skb_splice_bits() only fails if the output has no room left, 1633 * so no point in going over the frag_list for the error case. 1634 */ 1635 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1636 goto done; 1637 else if (!tlen) 1638 goto done; 1639 1640 /* 1641 * now see if we have a frag_list to map 1642 */ 1643 skb_walk_frags(skb, frag_iter) { 1644 if (!tlen) 1645 break; 1646 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1647 break; 1648 } 1649 1650 done: 1651 if (spd.nr_pages) { 1652 /* 1653 * Drop the socket lock, otherwise we have reverse 1654 * locking dependencies between sk_lock and i_mutex 1655 * here as compared to sendfile(). We enter here 1656 * with the socket lock held, and splice_to_pipe() will 1657 * grab the pipe inode lock. For sendfile() emulation, 1658 * we call into ->sendpage() with the i_mutex lock held 1659 * and networking will grab the socket lock. 1660 */ 1661 release_sock(sk); 1662 ret = splice_to_pipe(pipe, &spd); 1663 lock_sock(sk); 1664 } 1665 1666 splice_shrink_spd(pipe, &spd); 1667 return ret; 1668 } 1669 1670 /** 1671 * skb_store_bits - store bits from kernel buffer to skb 1672 * @skb: destination buffer 1673 * @offset: offset in destination 1674 * @from: source buffer 1675 * @len: number of bytes to copy 1676 * 1677 * Copy the specified number of bytes from the source buffer to the 1678 * destination skb. This function handles all the messy bits of 1679 * traversing fragment lists and such. 1680 */ 1681 1682 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1683 { 1684 int start = skb_headlen(skb); 1685 struct sk_buff *frag_iter; 1686 int i, copy; 1687 1688 if (offset > (int)skb->len - len) 1689 goto fault; 1690 1691 if ((copy = start - offset) > 0) { 1692 if (copy > len) 1693 copy = len; 1694 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1695 if ((len -= copy) == 0) 1696 return 0; 1697 offset += copy; 1698 from += copy; 1699 } 1700 1701 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1702 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1703 int end; 1704 1705 WARN_ON(start > offset + len); 1706 1707 end = start + frag->size; 1708 if ((copy = end - offset) > 0) { 1709 u8 *vaddr; 1710 1711 if (copy > len) 1712 copy = len; 1713 1714 vaddr = kmap_skb_frag(frag); 1715 memcpy(vaddr + frag->page_offset + offset - start, 1716 from, copy); 1717 kunmap_skb_frag(vaddr); 1718 1719 if ((len -= copy) == 0) 1720 return 0; 1721 offset += copy; 1722 from += copy; 1723 } 1724 start = end; 1725 } 1726 1727 skb_walk_frags(skb, frag_iter) { 1728 int end; 1729 1730 WARN_ON(start > offset + len); 1731 1732 end = start + frag_iter->len; 1733 if ((copy = end - offset) > 0) { 1734 if (copy > len) 1735 copy = len; 1736 if (skb_store_bits(frag_iter, offset - start, 1737 from, copy)) 1738 goto fault; 1739 if ((len -= copy) == 0) 1740 return 0; 1741 offset += copy; 1742 from += copy; 1743 } 1744 start = end; 1745 } 1746 if (!len) 1747 return 0; 1748 1749 fault: 1750 return -EFAULT; 1751 } 1752 EXPORT_SYMBOL(skb_store_bits); 1753 1754 /* Checksum skb data. */ 1755 1756 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1757 int len, __wsum csum) 1758 { 1759 int start = skb_headlen(skb); 1760 int i, copy = start - offset; 1761 struct sk_buff *frag_iter; 1762 int pos = 0; 1763 1764 /* Checksum header. */ 1765 if (copy > 0) { 1766 if (copy > len) 1767 copy = len; 1768 csum = csum_partial(skb->data + offset, copy, csum); 1769 if ((len -= copy) == 0) 1770 return csum; 1771 offset += copy; 1772 pos = copy; 1773 } 1774 1775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1776 int end; 1777 1778 WARN_ON(start > offset + len); 1779 1780 end = start + skb_shinfo(skb)->frags[i].size; 1781 if ((copy = end - offset) > 0) { 1782 __wsum csum2; 1783 u8 *vaddr; 1784 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1785 1786 if (copy > len) 1787 copy = len; 1788 vaddr = kmap_skb_frag(frag); 1789 csum2 = csum_partial(vaddr + frag->page_offset + 1790 offset - start, copy, 0); 1791 kunmap_skb_frag(vaddr); 1792 csum = csum_block_add(csum, csum2, pos); 1793 if (!(len -= copy)) 1794 return csum; 1795 offset += copy; 1796 pos += copy; 1797 } 1798 start = end; 1799 } 1800 1801 skb_walk_frags(skb, frag_iter) { 1802 int end; 1803 1804 WARN_ON(start > offset + len); 1805 1806 end = start + frag_iter->len; 1807 if ((copy = end - offset) > 0) { 1808 __wsum csum2; 1809 if (copy > len) 1810 copy = len; 1811 csum2 = skb_checksum(frag_iter, offset - start, 1812 copy, 0); 1813 csum = csum_block_add(csum, csum2, pos); 1814 if ((len -= copy) == 0) 1815 return csum; 1816 offset += copy; 1817 pos += copy; 1818 } 1819 start = end; 1820 } 1821 BUG_ON(len); 1822 1823 return csum; 1824 } 1825 EXPORT_SYMBOL(skb_checksum); 1826 1827 /* Both of above in one bottle. */ 1828 1829 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1830 u8 *to, int len, __wsum csum) 1831 { 1832 int start = skb_headlen(skb); 1833 int i, copy = start - offset; 1834 struct sk_buff *frag_iter; 1835 int pos = 0; 1836 1837 /* Copy header. */ 1838 if (copy > 0) { 1839 if (copy > len) 1840 copy = len; 1841 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1842 copy, csum); 1843 if ((len -= copy) == 0) 1844 return csum; 1845 offset += copy; 1846 to += copy; 1847 pos = copy; 1848 } 1849 1850 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1851 int end; 1852 1853 WARN_ON(start > offset + len); 1854 1855 end = start + skb_shinfo(skb)->frags[i].size; 1856 if ((copy = end - offset) > 0) { 1857 __wsum csum2; 1858 u8 *vaddr; 1859 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1860 1861 if (copy > len) 1862 copy = len; 1863 vaddr = kmap_skb_frag(frag); 1864 csum2 = csum_partial_copy_nocheck(vaddr + 1865 frag->page_offset + 1866 offset - start, to, 1867 copy, 0); 1868 kunmap_skb_frag(vaddr); 1869 csum = csum_block_add(csum, csum2, pos); 1870 if (!(len -= copy)) 1871 return csum; 1872 offset += copy; 1873 to += copy; 1874 pos += copy; 1875 } 1876 start = end; 1877 } 1878 1879 skb_walk_frags(skb, frag_iter) { 1880 __wsum csum2; 1881 int end; 1882 1883 WARN_ON(start > offset + len); 1884 1885 end = start + frag_iter->len; 1886 if ((copy = end - offset) > 0) { 1887 if (copy > len) 1888 copy = len; 1889 csum2 = skb_copy_and_csum_bits(frag_iter, 1890 offset - start, 1891 to, copy, 0); 1892 csum = csum_block_add(csum, csum2, pos); 1893 if ((len -= copy) == 0) 1894 return csum; 1895 offset += copy; 1896 to += copy; 1897 pos += copy; 1898 } 1899 start = end; 1900 } 1901 BUG_ON(len); 1902 return csum; 1903 } 1904 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1905 1906 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1907 { 1908 __wsum csum; 1909 long csstart; 1910 1911 if (skb->ip_summed == CHECKSUM_PARTIAL) 1912 csstart = skb_checksum_start_offset(skb); 1913 else 1914 csstart = skb_headlen(skb); 1915 1916 BUG_ON(csstart > skb_headlen(skb)); 1917 1918 skb_copy_from_linear_data(skb, to, csstart); 1919 1920 csum = 0; 1921 if (csstart != skb->len) 1922 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1923 skb->len - csstart, 0); 1924 1925 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1926 long csstuff = csstart + skb->csum_offset; 1927 1928 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1929 } 1930 } 1931 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1932 1933 /** 1934 * skb_dequeue - remove from the head of the queue 1935 * @list: list to dequeue from 1936 * 1937 * Remove the head of the list. The list lock is taken so the function 1938 * may be used safely with other locking list functions. The head item is 1939 * returned or %NULL if the list is empty. 1940 */ 1941 1942 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1943 { 1944 unsigned long flags; 1945 struct sk_buff *result; 1946 1947 spin_lock_irqsave(&list->lock, flags); 1948 result = __skb_dequeue(list); 1949 spin_unlock_irqrestore(&list->lock, flags); 1950 return result; 1951 } 1952 EXPORT_SYMBOL(skb_dequeue); 1953 1954 /** 1955 * skb_dequeue_tail - remove from the tail of the queue 1956 * @list: list to dequeue from 1957 * 1958 * Remove the tail of the list. The list lock is taken so the function 1959 * may be used safely with other locking list functions. The tail item is 1960 * returned or %NULL if the list is empty. 1961 */ 1962 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1963 { 1964 unsigned long flags; 1965 struct sk_buff *result; 1966 1967 spin_lock_irqsave(&list->lock, flags); 1968 result = __skb_dequeue_tail(list); 1969 spin_unlock_irqrestore(&list->lock, flags); 1970 return result; 1971 } 1972 EXPORT_SYMBOL(skb_dequeue_tail); 1973 1974 /** 1975 * skb_queue_purge - empty a list 1976 * @list: list to empty 1977 * 1978 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1979 * the list and one reference dropped. This function takes the list 1980 * lock and is atomic with respect to other list locking functions. 1981 */ 1982 void skb_queue_purge(struct sk_buff_head *list) 1983 { 1984 struct sk_buff *skb; 1985 while ((skb = skb_dequeue(list)) != NULL) 1986 kfree_skb(skb); 1987 } 1988 EXPORT_SYMBOL(skb_queue_purge); 1989 1990 /** 1991 * skb_queue_head - queue a buffer at the list head 1992 * @list: list to use 1993 * @newsk: buffer to queue 1994 * 1995 * Queue a buffer at the start of the list. This function takes the 1996 * list lock and can be used safely with other locking &sk_buff functions 1997 * safely. 1998 * 1999 * A buffer cannot be placed on two lists at the same time. 2000 */ 2001 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2002 { 2003 unsigned long flags; 2004 2005 spin_lock_irqsave(&list->lock, flags); 2006 __skb_queue_head(list, newsk); 2007 spin_unlock_irqrestore(&list->lock, flags); 2008 } 2009 EXPORT_SYMBOL(skb_queue_head); 2010 2011 /** 2012 * skb_queue_tail - queue a buffer at the list tail 2013 * @list: list to use 2014 * @newsk: buffer to queue 2015 * 2016 * Queue a buffer at the tail of the list. This function takes the 2017 * list lock and can be used safely with other locking &sk_buff functions 2018 * safely. 2019 * 2020 * A buffer cannot be placed on two lists at the same time. 2021 */ 2022 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2023 { 2024 unsigned long flags; 2025 2026 spin_lock_irqsave(&list->lock, flags); 2027 __skb_queue_tail(list, newsk); 2028 spin_unlock_irqrestore(&list->lock, flags); 2029 } 2030 EXPORT_SYMBOL(skb_queue_tail); 2031 2032 /** 2033 * skb_unlink - remove a buffer from a list 2034 * @skb: buffer to remove 2035 * @list: list to use 2036 * 2037 * Remove a packet from a list. The list locks are taken and this 2038 * function is atomic with respect to other list locked calls 2039 * 2040 * You must know what list the SKB is on. 2041 */ 2042 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2043 { 2044 unsigned long flags; 2045 2046 spin_lock_irqsave(&list->lock, flags); 2047 __skb_unlink(skb, list); 2048 spin_unlock_irqrestore(&list->lock, flags); 2049 } 2050 EXPORT_SYMBOL(skb_unlink); 2051 2052 /** 2053 * skb_append - append a buffer 2054 * @old: buffer to insert after 2055 * @newsk: buffer to insert 2056 * @list: list to use 2057 * 2058 * Place a packet after a given packet in a list. The list locks are taken 2059 * and this function is atomic with respect to other list locked calls. 2060 * A buffer cannot be placed on two lists at the same time. 2061 */ 2062 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2063 { 2064 unsigned long flags; 2065 2066 spin_lock_irqsave(&list->lock, flags); 2067 __skb_queue_after(list, old, newsk); 2068 spin_unlock_irqrestore(&list->lock, flags); 2069 } 2070 EXPORT_SYMBOL(skb_append); 2071 2072 /** 2073 * skb_insert - insert a buffer 2074 * @old: buffer to insert before 2075 * @newsk: buffer to insert 2076 * @list: list to use 2077 * 2078 * Place a packet before a given packet in a list. The list locks are 2079 * taken and this function is atomic with respect to other list locked 2080 * calls. 2081 * 2082 * A buffer cannot be placed on two lists at the same time. 2083 */ 2084 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2085 { 2086 unsigned long flags; 2087 2088 spin_lock_irqsave(&list->lock, flags); 2089 __skb_insert(newsk, old->prev, old, list); 2090 spin_unlock_irqrestore(&list->lock, flags); 2091 } 2092 EXPORT_SYMBOL(skb_insert); 2093 2094 static inline void skb_split_inside_header(struct sk_buff *skb, 2095 struct sk_buff* skb1, 2096 const u32 len, const int pos) 2097 { 2098 int i; 2099 2100 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2101 pos - len); 2102 /* And move data appendix as is. */ 2103 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2104 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2105 2106 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2107 skb_shinfo(skb)->nr_frags = 0; 2108 skb1->data_len = skb->data_len; 2109 skb1->len += skb1->data_len; 2110 skb->data_len = 0; 2111 skb->len = len; 2112 skb_set_tail_pointer(skb, len); 2113 } 2114 2115 static inline void skb_split_no_header(struct sk_buff *skb, 2116 struct sk_buff* skb1, 2117 const u32 len, int pos) 2118 { 2119 int i, k = 0; 2120 const int nfrags = skb_shinfo(skb)->nr_frags; 2121 2122 skb_shinfo(skb)->nr_frags = 0; 2123 skb1->len = skb1->data_len = skb->len - len; 2124 skb->len = len; 2125 skb->data_len = len - pos; 2126 2127 for (i = 0; i < nfrags; i++) { 2128 int size = skb_shinfo(skb)->frags[i].size; 2129 2130 if (pos + size > len) { 2131 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2132 2133 if (pos < len) { 2134 /* Split frag. 2135 * We have two variants in this case: 2136 * 1. Move all the frag to the second 2137 * part, if it is possible. F.e. 2138 * this approach is mandatory for TUX, 2139 * where splitting is expensive. 2140 * 2. Split is accurately. We make this. 2141 */ 2142 get_page(skb_shinfo(skb)->frags[i].page); 2143 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2144 skb_shinfo(skb1)->frags[0].size -= len - pos; 2145 skb_shinfo(skb)->frags[i].size = len - pos; 2146 skb_shinfo(skb)->nr_frags++; 2147 } 2148 k++; 2149 } else 2150 skb_shinfo(skb)->nr_frags++; 2151 pos += size; 2152 } 2153 skb_shinfo(skb1)->nr_frags = k; 2154 } 2155 2156 /** 2157 * skb_split - Split fragmented skb to two parts at length len. 2158 * @skb: the buffer to split 2159 * @skb1: the buffer to receive the second part 2160 * @len: new length for skb 2161 */ 2162 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2163 { 2164 int pos = skb_headlen(skb); 2165 2166 if (len < pos) /* Split line is inside header. */ 2167 skb_split_inside_header(skb, skb1, len, pos); 2168 else /* Second chunk has no header, nothing to copy. */ 2169 skb_split_no_header(skb, skb1, len, pos); 2170 } 2171 EXPORT_SYMBOL(skb_split); 2172 2173 /* Shifting from/to a cloned skb is a no-go. 2174 * 2175 * Caller cannot keep skb_shinfo related pointers past calling here! 2176 */ 2177 static int skb_prepare_for_shift(struct sk_buff *skb) 2178 { 2179 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2180 } 2181 2182 /** 2183 * skb_shift - Shifts paged data partially from skb to another 2184 * @tgt: buffer into which tail data gets added 2185 * @skb: buffer from which the paged data comes from 2186 * @shiftlen: shift up to this many bytes 2187 * 2188 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2189 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2190 * It's up to caller to free skb if everything was shifted. 2191 * 2192 * If @tgt runs out of frags, the whole operation is aborted. 2193 * 2194 * Skb cannot include anything else but paged data while tgt is allowed 2195 * to have non-paged data as well. 2196 * 2197 * TODO: full sized shift could be optimized but that would need 2198 * specialized skb free'er to handle frags without up-to-date nr_frags. 2199 */ 2200 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2201 { 2202 int from, to, merge, todo; 2203 struct skb_frag_struct *fragfrom, *fragto; 2204 2205 BUG_ON(shiftlen > skb->len); 2206 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2207 2208 todo = shiftlen; 2209 from = 0; 2210 to = skb_shinfo(tgt)->nr_frags; 2211 fragfrom = &skb_shinfo(skb)->frags[from]; 2212 2213 /* Actual merge is delayed until the point when we know we can 2214 * commit all, so that we don't have to undo partial changes 2215 */ 2216 if (!to || 2217 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2218 merge = -1; 2219 } else { 2220 merge = to - 1; 2221 2222 todo -= fragfrom->size; 2223 if (todo < 0) { 2224 if (skb_prepare_for_shift(skb) || 2225 skb_prepare_for_shift(tgt)) 2226 return 0; 2227 2228 /* All previous frag pointers might be stale! */ 2229 fragfrom = &skb_shinfo(skb)->frags[from]; 2230 fragto = &skb_shinfo(tgt)->frags[merge]; 2231 2232 fragto->size += shiftlen; 2233 fragfrom->size -= shiftlen; 2234 fragfrom->page_offset += shiftlen; 2235 2236 goto onlymerged; 2237 } 2238 2239 from++; 2240 } 2241 2242 /* Skip full, not-fitting skb to avoid expensive operations */ 2243 if ((shiftlen == skb->len) && 2244 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2245 return 0; 2246 2247 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2248 return 0; 2249 2250 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2251 if (to == MAX_SKB_FRAGS) 2252 return 0; 2253 2254 fragfrom = &skb_shinfo(skb)->frags[from]; 2255 fragto = &skb_shinfo(tgt)->frags[to]; 2256 2257 if (todo >= fragfrom->size) { 2258 *fragto = *fragfrom; 2259 todo -= fragfrom->size; 2260 from++; 2261 to++; 2262 2263 } else { 2264 get_page(fragfrom->page); 2265 fragto->page = fragfrom->page; 2266 fragto->page_offset = fragfrom->page_offset; 2267 fragto->size = todo; 2268 2269 fragfrom->page_offset += todo; 2270 fragfrom->size -= todo; 2271 todo = 0; 2272 2273 to++; 2274 break; 2275 } 2276 } 2277 2278 /* Ready to "commit" this state change to tgt */ 2279 skb_shinfo(tgt)->nr_frags = to; 2280 2281 if (merge >= 0) { 2282 fragfrom = &skb_shinfo(skb)->frags[0]; 2283 fragto = &skb_shinfo(tgt)->frags[merge]; 2284 2285 fragto->size += fragfrom->size; 2286 put_page(fragfrom->page); 2287 } 2288 2289 /* Reposition in the original skb */ 2290 to = 0; 2291 while (from < skb_shinfo(skb)->nr_frags) 2292 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2293 skb_shinfo(skb)->nr_frags = to; 2294 2295 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2296 2297 onlymerged: 2298 /* Most likely the tgt won't ever need its checksum anymore, skb on 2299 * the other hand might need it if it needs to be resent 2300 */ 2301 tgt->ip_summed = CHECKSUM_PARTIAL; 2302 skb->ip_summed = CHECKSUM_PARTIAL; 2303 2304 /* Yak, is it really working this way? Some helper please? */ 2305 skb->len -= shiftlen; 2306 skb->data_len -= shiftlen; 2307 skb->truesize -= shiftlen; 2308 tgt->len += shiftlen; 2309 tgt->data_len += shiftlen; 2310 tgt->truesize += shiftlen; 2311 2312 return shiftlen; 2313 } 2314 2315 /** 2316 * skb_prepare_seq_read - Prepare a sequential read of skb data 2317 * @skb: the buffer to read 2318 * @from: lower offset of data to be read 2319 * @to: upper offset of data to be read 2320 * @st: state variable 2321 * 2322 * Initializes the specified state variable. Must be called before 2323 * invoking skb_seq_read() for the first time. 2324 */ 2325 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2326 unsigned int to, struct skb_seq_state *st) 2327 { 2328 st->lower_offset = from; 2329 st->upper_offset = to; 2330 st->root_skb = st->cur_skb = skb; 2331 st->frag_idx = st->stepped_offset = 0; 2332 st->frag_data = NULL; 2333 } 2334 EXPORT_SYMBOL(skb_prepare_seq_read); 2335 2336 /** 2337 * skb_seq_read - Sequentially read skb data 2338 * @consumed: number of bytes consumed by the caller so far 2339 * @data: destination pointer for data to be returned 2340 * @st: state variable 2341 * 2342 * Reads a block of skb data at &consumed relative to the 2343 * lower offset specified to skb_prepare_seq_read(). Assigns 2344 * the head of the data block to &data and returns the length 2345 * of the block or 0 if the end of the skb data or the upper 2346 * offset has been reached. 2347 * 2348 * The caller is not required to consume all of the data 2349 * returned, i.e. &consumed is typically set to the number 2350 * of bytes already consumed and the next call to 2351 * skb_seq_read() will return the remaining part of the block. 2352 * 2353 * Note 1: The size of each block of data returned can be arbitrary, 2354 * this limitation is the cost for zerocopy seqeuental 2355 * reads of potentially non linear data. 2356 * 2357 * Note 2: Fragment lists within fragments are not implemented 2358 * at the moment, state->root_skb could be replaced with 2359 * a stack for this purpose. 2360 */ 2361 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2362 struct skb_seq_state *st) 2363 { 2364 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2365 skb_frag_t *frag; 2366 2367 if (unlikely(abs_offset >= st->upper_offset)) 2368 return 0; 2369 2370 next_skb: 2371 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2372 2373 if (abs_offset < block_limit && !st->frag_data) { 2374 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2375 return block_limit - abs_offset; 2376 } 2377 2378 if (st->frag_idx == 0 && !st->frag_data) 2379 st->stepped_offset += skb_headlen(st->cur_skb); 2380 2381 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2382 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2383 block_limit = frag->size + st->stepped_offset; 2384 2385 if (abs_offset < block_limit) { 2386 if (!st->frag_data) 2387 st->frag_data = kmap_skb_frag(frag); 2388 2389 *data = (u8 *) st->frag_data + frag->page_offset + 2390 (abs_offset - st->stepped_offset); 2391 2392 return block_limit - abs_offset; 2393 } 2394 2395 if (st->frag_data) { 2396 kunmap_skb_frag(st->frag_data); 2397 st->frag_data = NULL; 2398 } 2399 2400 st->frag_idx++; 2401 st->stepped_offset += frag->size; 2402 } 2403 2404 if (st->frag_data) { 2405 kunmap_skb_frag(st->frag_data); 2406 st->frag_data = NULL; 2407 } 2408 2409 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2410 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2411 st->frag_idx = 0; 2412 goto next_skb; 2413 } else if (st->cur_skb->next) { 2414 st->cur_skb = st->cur_skb->next; 2415 st->frag_idx = 0; 2416 goto next_skb; 2417 } 2418 2419 return 0; 2420 } 2421 EXPORT_SYMBOL(skb_seq_read); 2422 2423 /** 2424 * skb_abort_seq_read - Abort a sequential read of skb data 2425 * @st: state variable 2426 * 2427 * Must be called if skb_seq_read() was not called until it 2428 * returned 0. 2429 */ 2430 void skb_abort_seq_read(struct skb_seq_state *st) 2431 { 2432 if (st->frag_data) 2433 kunmap_skb_frag(st->frag_data); 2434 } 2435 EXPORT_SYMBOL(skb_abort_seq_read); 2436 2437 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2438 2439 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2440 struct ts_config *conf, 2441 struct ts_state *state) 2442 { 2443 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2444 } 2445 2446 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2447 { 2448 skb_abort_seq_read(TS_SKB_CB(state)); 2449 } 2450 2451 /** 2452 * skb_find_text - Find a text pattern in skb data 2453 * @skb: the buffer to look in 2454 * @from: search offset 2455 * @to: search limit 2456 * @config: textsearch configuration 2457 * @state: uninitialized textsearch state variable 2458 * 2459 * Finds a pattern in the skb data according to the specified 2460 * textsearch configuration. Use textsearch_next() to retrieve 2461 * subsequent occurrences of the pattern. Returns the offset 2462 * to the first occurrence or UINT_MAX if no match was found. 2463 */ 2464 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2465 unsigned int to, struct ts_config *config, 2466 struct ts_state *state) 2467 { 2468 unsigned int ret; 2469 2470 config->get_next_block = skb_ts_get_next_block; 2471 config->finish = skb_ts_finish; 2472 2473 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2474 2475 ret = textsearch_find(config, state); 2476 return (ret <= to - from ? ret : UINT_MAX); 2477 } 2478 EXPORT_SYMBOL(skb_find_text); 2479 2480 /** 2481 * skb_append_datato_frags: - append the user data to a skb 2482 * @sk: sock structure 2483 * @skb: skb structure to be appened with user data. 2484 * @getfrag: call back function to be used for getting the user data 2485 * @from: pointer to user message iov 2486 * @length: length of the iov message 2487 * 2488 * Description: This procedure append the user data in the fragment part 2489 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2490 */ 2491 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2492 int (*getfrag)(void *from, char *to, int offset, 2493 int len, int odd, struct sk_buff *skb), 2494 void *from, int length) 2495 { 2496 int frg_cnt = 0; 2497 skb_frag_t *frag = NULL; 2498 struct page *page = NULL; 2499 int copy, left; 2500 int offset = 0; 2501 int ret; 2502 2503 do { 2504 /* Return error if we don't have space for new frag */ 2505 frg_cnt = skb_shinfo(skb)->nr_frags; 2506 if (frg_cnt >= MAX_SKB_FRAGS) 2507 return -EFAULT; 2508 2509 /* allocate a new page for next frag */ 2510 page = alloc_pages(sk->sk_allocation, 0); 2511 2512 /* If alloc_page fails just return failure and caller will 2513 * free previous allocated pages by doing kfree_skb() 2514 */ 2515 if (page == NULL) 2516 return -ENOMEM; 2517 2518 /* initialize the next frag */ 2519 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2520 skb->truesize += PAGE_SIZE; 2521 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2522 2523 /* get the new initialized frag */ 2524 frg_cnt = skb_shinfo(skb)->nr_frags; 2525 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2526 2527 /* copy the user data to page */ 2528 left = PAGE_SIZE - frag->page_offset; 2529 copy = (length > left)? left : length; 2530 2531 ret = getfrag(from, (page_address(frag->page) + 2532 frag->page_offset + frag->size), 2533 offset, copy, 0, skb); 2534 if (ret < 0) 2535 return -EFAULT; 2536 2537 /* copy was successful so update the size parameters */ 2538 frag->size += copy; 2539 skb->len += copy; 2540 skb->data_len += copy; 2541 offset += copy; 2542 length -= copy; 2543 2544 } while (length > 0); 2545 2546 return 0; 2547 } 2548 EXPORT_SYMBOL(skb_append_datato_frags); 2549 2550 /** 2551 * skb_pull_rcsum - pull skb and update receive checksum 2552 * @skb: buffer to update 2553 * @len: length of data pulled 2554 * 2555 * This function performs an skb_pull on the packet and updates 2556 * the CHECKSUM_COMPLETE checksum. It should be used on 2557 * receive path processing instead of skb_pull unless you know 2558 * that the checksum difference is zero (e.g., a valid IP header) 2559 * or you are setting ip_summed to CHECKSUM_NONE. 2560 */ 2561 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2562 { 2563 BUG_ON(len > skb->len); 2564 skb->len -= len; 2565 BUG_ON(skb->len < skb->data_len); 2566 skb_postpull_rcsum(skb, skb->data, len); 2567 return skb->data += len; 2568 } 2569 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2570 2571 /** 2572 * skb_segment - Perform protocol segmentation on skb. 2573 * @skb: buffer to segment 2574 * @features: features for the output path (see dev->features) 2575 * 2576 * This function performs segmentation on the given skb. It returns 2577 * a pointer to the first in a list of new skbs for the segments. 2578 * In case of error it returns ERR_PTR(err). 2579 */ 2580 struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) 2581 { 2582 struct sk_buff *segs = NULL; 2583 struct sk_buff *tail = NULL; 2584 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2585 unsigned int mss = skb_shinfo(skb)->gso_size; 2586 unsigned int doffset = skb->data - skb_mac_header(skb); 2587 unsigned int offset = doffset; 2588 unsigned int headroom; 2589 unsigned int len; 2590 int sg = !!(features & NETIF_F_SG); 2591 int nfrags = skb_shinfo(skb)->nr_frags; 2592 int err = -ENOMEM; 2593 int i = 0; 2594 int pos; 2595 2596 __skb_push(skb, doffset); 2597 headroom = skb_headroom(skb); 2598 pos = skb_headlen(skb); 2599 2600 do { 2601 struct sk_buff *nskb; 2602 skb_frag_t *frag; 2603 int hsize; 2604 int size; 2605 2606 len = skb->len - offset; 2607 if (len > mss) 2608 len = mss; 2609 2610 hsize = skb_headlen(skb) - offset; 2611 if (hsize < 0) 2612 hsize = 0; 2613 if (hsize > len || !sg) 2614 hsize = len; 2615 2616 if (!hsize && i >= nfrags) { 2617 BUG_ON(fskb->len != len); 2618 2619 pos += len; 2620 nskb = skb_clone(fskb, GFP_ATOMIC); 2621 fskb = fskb->next; 2622 2623 if (unlikely(!nskb)) 2624 goto err; 2625 2626 hsize = skb_end_pointer(nskb) - nskb->head; 2627 if (skb_cow_head(nskb, doffset + headroom)) { 2628 kfree_skb(nskb); 2629 goto err; 2630 } 2631 2632 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2633 hsize; 2634 skb_release_head_state(nskb); 2635 __skb_push(nskb, doffset); 2636 } else { 2637 nskb = alloc_skb(hsize + doffset + headroom, 2638 GFP_ATOMIC); 2639 2640 if (unlikely(!nskb)) 2641 goto err; 2642 2643 skb_reserve(nskb, headroom); 2644 __skb_put(nskb, doffset); 2645 } 2646 2647 if (segs) 2648 tail->next = nskb; 2649 else 2650 segs = nskb; 2651 tail = nskb; 2652 2653 __copy_skb_header(nskb, skb); 2654 nskb->mac_len = skb->mac_len; 2655 2656 /* nskb and skb might have different headroom */ 2657 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2658 nskb->csum_start += skb_headroom(nskb) - headroom; 2659 2660 skb_reset_mac_header(nskb); 2661 skb_set_network_header(nskb, skb->mac_len); 2662 nskb->transport_header = (nskb->network_header + 2663 skb_network_header_len(skb)); 2664 skb_copy_from_linear_data(skb, nskb->data, doffset); 2665 2666 if (fskb != skb_shinfo(skb)->frag_list) 2667 continue; 2668 2669 if (!sg) { 2670 nskb->ip_summed = CHECKSUM_NONE; 2671 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2672 skb_put(nskb, len), 2673 len, 0); 2674 continue; 2675 } 2676 2677 frag = skb_shinfo(nskb)->frags; 2678 2679 skb_copy_from_linear_data_offset(skb, offset, 2680 skb_put(nskb, hsize), hsize); 2681 2682 while (pos < offset + len && i < nfrags) { 2683 *frag = skb_shinfo(skb)->frags[i]; 2684 get_page(frag->page); 2685 size = frag->size; 2686 2687 if (pos < offset) { 2688 frag->page_offset += offset - pos; 2689 frag->size -= offset - pos; 2690 } 2691 2692 skb_shinfo(nskb)->nr_frags++; 2693 2694 if (pos + size <= offset + len) { 2695 i++; 2696 pos += size; 2697 } else { 2698 frag->size -= pos + size - (offset + len); 2699 goto skip_fraglist; 2700 } 2701 2702 frag++; 2703 } 2704 2705 if (pos < offset + len) { 2706 struct sk_buff *fskb2 = fskb; 2707 2708 BUG_ON(pos + fskb->len != offset + len); 2709 2710 pos += fskb->len; 2711 fskb = fskb->next; 2712 2713 if (fskb2->next) { 2714 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2715 if (!fskb2) 2716 goto err; 2717 } else 2718 skb_get(fskb2); 2719 2720 SKB_FRAG_ASSERT(nskb); 2721 skb_shinfo(nskb)->frag_list = fskb2; 2722 } 2723 2724 skip_fraglist: 2725 nskb->data_len = len - hsize; 2726 nskb->len += nskb->data_len; 2727 nskb->truesize += nskb->data_len; 2728 } while ((offset += len) < skb->len); 2729 2730 return segs; 2731 2732 err: 2733 while ((skb = segs)) { 2734 segs = skb->next; 2735 kfree_skb(skb); 2736 } 2737 return ERR_PTR(err); 2738 } 2739 EXPORT_SYMBOL_GPL(skb_segment); 2740 2741 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2742 { 2743 struct sk_buff *p = *head; 2744 struct sk_buff *nskb; 2745 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2746 struct skb_shared_info *pinfo = skb_shinfo(p); 2747 unsigned int headroom; 2748 unsigned int len = skb_gro_len(skb); 2749 unsigned int offset = skb_gro_offset(skb); 2750 unsigned int headlen = skb_headlen(skb); 2751 2752 if (p->len + len >= 65536) 2753 return -E2BIG; 2754 2755 if (pinfo->frag_list) 2756 goto merge; 2757 else if (headlen <= offset) { 2758 skb_frag_t *frag; 2759 skb_frag_t *frag2; 2760 int i = skbinfo->nr_frags; 2761 int nr_frags = pinfo->nr_frags + i; 2762 2763 offset -= headlen; 2764 2765 if (nr_frags > MAX_SKB_FRAGS) 2766 return -E2BIG; 2767 2768 pinfo->nr_frags = nr_frags; 2769 skbinfo->nr_frags = 0; 2770 2771 frag = pinfo->frags + nr_frags; 2772 frag2 = skbinfo->frags + i; 2773 do { 2774 *--frag = *--frag2; 2775 } while (--i); 2776 2777 frag->page_offset += offset; 2778 frag->size -= offset; 2779 2780 skb->truesize -= skb->data_len; 2781 skb->len -= skb->data_len; 2782 skb->data_len = 0; 2783 2784 NAPI_GRO_CB(skb)->free = 1; 2785 goto done; 2786 } else if (skb_gro_len(p) != pinfo->gso_size) 2787 return -E2BIG; 2788 2789 headroom = skb_headroom(p); 2790 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2791 if (unlikely(!nskb)) 2792 return -ENOMEM; 2793 2794 __copy_skb_header(nskb, p); 2795 nskb->mac_len = p->mac_len; 2796 2797 skb_reserve(nskb, headroom); 2798 __skb_put(nskb, skb_gro_offset(p)); 2799 2800 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2801 skb_set_network_header(nskb, skb_network_offset(p)); 2802 skb_set_transport_header(nskb, skb_transport_offset(p)); 2803 2804 __skb_pull(p, skb_gro_offset(p)); 2805 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2806 p->data - skb_mac_header(p)); 2807 2808 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2809 skb_shinfo(nskb)->frag_list = p; 2810 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2811 pinfo->gso_size = 0; 2812 skb_header_release(p); 2813 nskb->prev = p; 2814 2815 nskb->data_len += p->len; 2816 nskb->truesize += p->len; 2817 nskb->len += p->len; 2818 2819 *head = nskb; 2820 nskb->next = p->next; 2821 p->next = NULL; 2822 2823 p = nskb; 2824 2825 merge: 2826 if (offset > headlen) { 2827 unsigned int eat = offset - headlen; 2828 2829 skbinfo->frags[0].page_offset += eat; 2830 skbinfo->frags[0].size -= eat; 2831 skb->data_len -= eat; 2832 skb->len -= eat; 2833 offset = headlen; 2834 } 2835 2836 __skb_pull(skb, offset); 2837 2838 p->prev->next = skb; 2839 p->prev = skb; 2840 skb_header_release(skb); 2841 2842 done: 2843 NAPI_GRO_CB(p)->count++; 2844 p->data_len += len; 2845 p->truesize += len; 2846 p->len += len; 2847 2848 NAPI_GRO_CB(skb)->same_flow = 1; 2849 return 0; 2850 } 2851 EXPORT_SYMBOL_GPL(skb_gro_receive); 2852 2853 void __init skb_init(void) 2854 { 2855 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2856 sizeof(struct sk_buff), 2857 0, 2858 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2859 NULL); 2860 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2861 (2*sizeof(struct sk_buff)) + 2862 sizeof(atomic_t), 2863 0, 2864 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2865 NULL); 2866 } 2867 2868 /** 2869 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2870 * @skb: Socket buffer containing the buffers to be mapped 2871 * @sg: The scatter-gather list to map into 2872 * @offset: The offset into the buffer's contents to start mapping 2873 * @len: Length of buffer space to be mapped 2874 * 2875 * Fill the specified scatter-gather list with mappings/pointers into a 2876 * region of the buffer space attached to a socket buffer. 2877 */ 2878 static int 2879 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2880 { 2881 int start = skb_headlen(skb); 2882 int i, copy = start - offset; 2883 struct sk_buff *frag_iter; 2884 int elt = 0; 2885 2886 if (copy > 0) { 2887 if (copy > len) 2888 copy = len; 2889 sg_set_buf(sg, skb->data + offset, copy); 2890 elt++; 2891 if ((len -= copy) == 0) 2892 return elt; 2893 offset += copy; 2894 } 2895 2896 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2897 int end; 2898 2899 WARN_ON(start > offset + len); 2900 2901 end = start + skb_shinfo(skb)->frags[i].size; 2902 if ((copy = end - offset) > 0) { 2903 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2904 2905 if (copy > len) 2906 copy = len; 2907 sg_set_page(&sg[elt], frag->page, copy, 2908 frag->page_offset+offset-start); 2909 elt++; 2910 if (!(len -= copy)) 2911 return elt; 2912 offset += copy; 2913 } 2914 start = end; 2915 } 2916 2917 skb_walk_frags(skb, frag_iter) { 2918 int end; 2919 2920 WARN_ON(start > offset + len); 2921 2922 end = start + frag_iter->len; 2923 if ((copy = end - offset) > 0) { 2924 if (copy > len) 2925 copy = len; 2926 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 2927 copy); 2928 if ((len -= copy) == 0) 2929 return elt; 2930 offset += copy; 2931 } 2932 start = end; 2933 } 2934 BUG_ON(len); 2935 return elt; 2936 } 2937 2938 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2939 { 2940 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2941 2942 sg_mark_end(&sg[nsg - 1]); 2943 2944 return nsg; 2945 } 2946 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2947 2948 /** 2949 * skb_cow_data - Check that a socket buffer's data buffers are writable 2950 * @skb: The socket buffer to check. 2951 * @tailbits: Amount of trailing space to be added 2952 * @trailer: Returned pointer to the skb where the @tailbits space begins 2953 * 2954 * Make sure that the data buffers attached to a socket buffer are 2955 * writable. If they are not, private copies are made of the data buffers 2956 * and the socket buffer is set to use these instead. 2957 * 2958 * If @tailbits is given, make sure that there is space to write @tailbits 2959 * bytes of data beyond current end of socket buffer. @trailer will be 2960 * set to point to the skb in which this space begins. 2961 * 2962 * The number of scatterlist elements required to completely map the 2963 * COW'd and extended socket buffer will be returned. 2964 */ 2965 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2966 { 2967 int copyflag; 2968 int elt; 2969 struct sk_buff *skb1, **skb_p; 2970 2971 /* If skb is cloned or its head is paged, reallocate 2972 * head pulling out all the pages (pages are considered not writable 2973 * at the moment even if they are anonymous). 2974 */ 2975 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2976 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2977 return -ENOMEM; 2978 2979 /* Easy case. Most of packets will go this way. */ 2980 if (!skb_has_frag_list(skb)) { 2981 /* A little of trouble, not enough of space for trailer. 2982 * This should not happen, when stack is tuned to generate 2983 * good frames. OK, on miss we reallocate and reserve even more 2984 * space, 128 bytes is fair. */ 2985 2986 if (skb_tailroom(skb) < tailbits && 2987 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2988 return -ENOMEM; 2989 2990 /* Voila! */ 2991 *trailer = skb; 2992 return 1; 2993 } 2994 2995 /* Misery. We are in troubles, going to mincer fragments... */ 2996 2997 elt = 1; 2998 skb_p = &skb_shinfo(skb)->frag_list; 2999 copyflag = 0; 3000 3001 while ((skb1 = *skb_p) != NULL) { 3002 int ntail = 0; 3003 3004 /* The fragment is partially pulled by someone, 3005 * this can happen on input. Copy it and everything 3006 * after it. */ 3007 3008 if (skb_shared(skb1)) 3009 copyflag = 1; 3010 3011 /* If the skb is the last, worry about trailer. */ 3012 3013 if (skb1->next == NULL && tailbits) { 3014 if (skb_shinfo(skb1)->nr_frags || 3015 skb_has_frag_list(skb1) || 3016 skb_tailroom(skb1) < tailbits) 3017 ntail = tailbits + 128; 3018 } 3019 3020 if (copyflag || 3021 skb_cloned(skb1) || 3022 ntail || 3023 skb_shinfo(skb1)->nr_frags || 3024 skb_has_frag_list(skb1)) { 3025 struct sk_buff *skb2; 3026 3027 /* Fuck, we are miserable poor guys... */ 3028 if (ntail == 0) 3029 skb2 = skb_copy(skb1, GFP_ATOMIC); 3030 else 3031 skb2 = skb_copy_expand(skb1, 3032 skb_headroom(skb1), 3033 ntail, 3034 GFP_ATOMIC); 3035 if (unlikely(skb2 == NULL)) 3036 return -ENOMEM; 3037 3038 if (skb1->sk) 3039 skb_set_owner_w(skb2, skb1->sk); 3040 3041 /* Looking around. Are we still alive? 3042 * OK, link new skb, drop old one */ 3043 3044 skb2->next = skb1->next; 3045 *skb_p = skb2; 3046 kfree_skb(skb1); 3047 skb1 = skb2; 3048 } 3049 elt++; 3050 *trailer = skb1; 3051 skb_p = &skb1->next; 3052 } 3053 3054 return elt; 3055 } 3056 EXPORT_SYMBOL_GPL(skb_cow_data); 3057 3058 static void sock_rmem_free(struct sk_buff *skb) 3059 { 3060 struct sock *sk = skb->sk; 3061 3062 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3063 } 3064 3065 /* 3066 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3067 */ 3068 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3069 { 3070 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3071 (unsigned)sk->sk_rcvbuf) 3072 return -ENOMEM; 3073 3074 skb_orphan(skb); 3075 skb->sk = sk; 3076 skb->destructor = sock_rmem_free; 3077 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3078 3079 /* before exiting rcu section, make sure dst is refcounted */ 3080 skb_dst_force(skb); 3081 3082 skb_queue_tail(&sk->sk_error_queue, skb); 3083 if (!sock_flag(sk, SOCK_DEAD)) 3084 sk->sk_data_ready(sk, skb->len); 3085 return 0; 3086 } 3087 EXPORT_SYMBOL(sock_queue_err_skb); 3088 3089 void skb_tstamp_tx(struct sk_buff *orig_skb, 3090 struct skb_shared_hwtstamps *hwtstamps) 3091 { 3092 struct sock *sk = orig_skb->sk; 3093 struct sock_exterr_skb *serr; 3094 struct sk_buff *skb; 3095 int err; 3096 3097 if (!sk) 3098 return; 3099 3100 skb = skb_clone(orig_skb, GFP_ATOMIC); 3101 if (!skb) 3102 return; 3103 3104 if (hwtstamps) { 3105 *skb_hwtstamps(skb) = 3106 *hwtstamps; 3107 } else { 3108 /* 3109 * no hardware time stamps available, 3110 * so keep the shared tx_flags and only 3111 * store software time stamp 3112 */ 3113 skb->tstamp = ktime_get_real(); 3114 } 3115 3116 serr = SKB_EXT_ERR(skb); 3117 memset(serr, 0, sizeof(*serr)); 3118 serr->ee.ee_errno = ENOMSG; 3119 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3120 3121 err = sock_queue_err_skb(sk, skb); 3122 3123 if (err) 3124 kfree_skb(skb); 3125 } 3126 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3127 3128 3129 /** 3130 * skb_partial_csum_set - set up and verify partial csum values for packet 3131 * @skb: the skb to set 3132 * @start: the number of bytes after skb->data to start checksumming. 3133 * @off: the offset from start to place the checksum. 3134 * 3135 * For untrusted partially-checksummed packets, we need to make sure the values 3136 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3137 * 3138 * This function checks and sets those values and skb->ip_summed: if this 3139 * returns false you should drop the packet. 3140 */ 3141 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3142 { 3143 if (unlikely(start > skb_headlen(skb)) || 3144 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3145 if (net_ratelimit()) 3146 printk(KERN_WARNING 3147 "bad partial csum: csum=%u/%u len=%u\n", 3148 start, off, skb_headlen(skb)); 3149 return false; 3150 } 3151 skb->ip_summed = CHECKSUM_PARTIAL; 3152 skb->csum_start = skb_headroom(skb) + start; 3153 skb->csum_offset = off; 3154 return true; 3155 } 3156 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3157 3158 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3159 { 3160 if (net_ratelimit()) 3161 pr_warning("%s: received packets cannot be forwarded" 3162 " while LRO is enabled\n", skb->dev->name); 3163 } 3164 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3165