1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/mm.h> 44 #include <linux/interrupt.h> 45 #include <linux/in.h> 46 #include <linux/inet.h> 47 #include <linux/slab.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 62 #include <net/protocol.h> 63 #include <net/dst.h> 64 #include <net/sock.h> 65 #include <net/checksum.h> 66 #include <net/xfrm.h> 67 68 #include <asm/uaccess.h> 69 #include <trace/events/skb.h> 70 71 #include "kmap_skb.h" 72 73 static struct kmem_cache *skbuff_head_cache __read_mostly; 74 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 75 76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 77 struct pipe_buffer *buf) 78 { 79 put_page(buf->page); 80 } 81 82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 83 struct pipe_buffer *buf) 84 { 85 get_page(buf->page); 86 } 87 88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 89 struct pipe_buffer *buf) 90 { 91 return 1; 92 } 93 94 95 /* Pipe buffer operations for a socket. */ 96 static const struct pipe_buf_operations sock_pipe_buf_ops = { 97 .can_merge = 0, 98 .map = generic_pipe_buf_map, 99 .unmap = generic_pipe_buf_unmap, 100 .confirm = generic_pipe_buf_confirm, 101 .release = sock_pipe_buf_release, 102 .steal = sock_pipe_buf_steal, 103 .get = sock_pipe_buf_get, 104 }; 105 106 /* 107 * Keep out-of-line to prevent kernel bloat. 108 * __builtin_return_address is not used because it is not always 109 * reliable. 110 */ 111 112 /** 113 * skb_over_panic - private function 114 * @skb: buffer 115 * @sz: size 116 * @here: address 117 * 118 * Out of line support code for skb_put(). Not user callable. 119 */ 120 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 121 { 122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 123 "data:%p tail:%#lx end:%#lx dev:%s\n", 124 here, skb->len, sz, skb->head, skb->data, 125 (unsigned long)skb->tail, (unsigned long)skb->end, 126 skb->dev ? skb->dev->name : "<NULL>"); 127 BUG(); 128 } 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 142 "data:%p tail:%#lx end:%#lx dev:%s\n", 143 here, skb->len, sz, skb->head, skb->data, 144 (unsigned long)skb->tail, (unsigned long)skb->end, 145 skb->dev ? skb->dev->name : "<NULL>"); 146 BUG(); 147 } 148 149 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 150 * 'private' fields and also do memory statistics to find all the 151 * [BEEP] leaks. 152 * 153 */ 154 155 /** 156 * __alloc_skb - allocate a network buffer 157 * @size: size to allocate 158 * @gfp_mask: allocation mask 159 * @fclone: allocate from fclone cache instead of head cache 160 * and allocate a cloned (child) skb 161 * @node: numa node to allocate memory on 162 * 163 * Allocate a new &sk_buff. The returned buffer has no headroom and a 164 * tail room of size bytes. The object has a reference count of one. 165 * The return is the buffer. On a failure the return is %NULL. 166 * 167 * Buffers may only be allocated from interrupts using a @gfp_mask of 168 * %GFP_ATOMIC. 169 */ 170 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 171 int fclone, int node) 172 { 173 struct kmem_cache *cache; 174 struct skb_shared_info *shinfo; 175 struct sk_buff *skb; 176 u8 *data; 177 178 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 179 180 /* Get the HEAD */ 181 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 182 if (!skb) 183 goto out; 184 prefetchw(skb); 185 186 /* We do our best to align skb_shared_info on a separate cache 187 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 188 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 189 * Both skb->head and skb_shared_info are cache line aligned. 190 */ 191 size = SKB_DATA_ALIGN(size); 192 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 193 data = kmalloc_node_track_caller(size, gfp_mask, node); 194 if (!data) 195 goto nodata; 196 /* kmalloc(size) might give us more room than requested. 197 * Put skb_shared_info exactly at the end of allocated zone, 198 * to allow max possible filling before reallocation. 199 */ 200 size = SKB_WITH_OVERHEAD(ksize(data)); 201 prefetchw(data + size); 202 203 /* 204 * Only clear those fields we need to clear, not those that we will 205 * actually initialise below. Hence, don't put any more fields after 206 * the tail pointer in struct sk_buff! 207 */ 208 memset(skb, 0, offsetof(struct sk_buff, tail)); 209 /* Account for allocated memory : skb + skb->head */ 210 skb->truesize = SKB_TRUESIZE(size); 211 atomic_set(&skb->users, 1); 212 skb->head = data; 213 skb->data = data; 214 skb_reset_tail_pointer(skb); 215 skb->end = skb->tail + size; 216 #ifdef NET_SKBUFF_DATA_USES_OFFSET 217 skb->mac_header = ~0U; 218 #endif 219 220 /* make sure we initialize shinfo sequentially */ 221 shinfo = skb_shinfo(skb); 222 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 223 atomic_set(&shinfo->dataref, 1); 224 kmemcheck_annotate_variable(shinfo->destructor_arg); 225 226 if (fclone) { 227 struct sk_buff *child = skb + 1; 228 atomic_t *fclone_ref = (atomic_t *) (child + 1); 229 230 kmemcheck_annotate_bitfield(child, flags1); 231 kmemcheck_annotate_bitfield(child, flags2); 232 skb->fclone = SKB_FCLONE_ORIG; 233 atomic_set(fclone_ref, 1); 234 235 child->fclone = SKB_FCLONE_UNAVAILABLE; 236 } 237 out: 238 return skb; 239 nodata: 240 kmem_cache_free(cache, skb); 241 skb = NULL; 242 goto out; 243 } 244 EXPORT_SYMBOL(__alloc_skb); 245 246 /** 247 * build_skb - build a network buffer 248 * @data: data buffer provided by caller 249 * 250 * Allocate a new &sk_buff. Caller provides space holding head and 251 * skb_shared_info. @data must have been allocated by kmalloc() 252 * The return is the new skb buffer. 253 * On a failure the return is %NULL, and @data is not freed. 254 * Notes : 255 * Before IO, driver allocates only data buffer where NIC put incoming frame 256 * Driver should add room at head (NET_SKB_PAD) and 257 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 258 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 259 * before giving packet to stack. 260 * RX rings only contains data buffers, not full skbs. 261 */ 262 struct sk_buff *build_skb(void *data) 263 { 264 struct skb_shared_info *shinfo; 265 struct sk_buff *skb; 266 unsigned int size; 267 268 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 269 if (!skb) 270 return NULL; 271 272 size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 273 274 memset(skb, 0, offsetof(struct sk_buff, tail)); 275 skb->truesize = SKB_TRUESIZE(size); 276 atomic_set(&skb->users, 1); 277 skb->head = data; 278 skb->data = data; 279 skb_reset_tail_pointer(skb); 280 skb->end = skb->tail + size; 281 #ifdef NET_SKBUFF_DATA_USES_OFFSET 282 skb->mac_header = ~0U; 283 #endif 284 285 /* make sure we initialize shinfo sequentially */ 286 shinfo = skb_shinfo(skb); 287 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 288 atomic_set(&shinfo->dataref, 1); 289 kmemcheck_annotate_variable(shinfo->destructor_arg); 290 291 return skb; 292 } 293 EXPORT_SYMBOL(build_skb); 294 295 /** 296 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 297 * @dev: network device to receive on 298 * @length: length to allocate 299 * @gfp_mask: get_free_pages mask, passed to alloc_skb 300 * 301 * Allocate a new &sk_buff and assign it a usage count of one. The 302 * buffer has unspecified headroom built in. Users should allocate 303 * the headroom they think they need without accounting for the 304 * built in space. The built in space is used for optimisations. 305 * 306 * %NULL is returned if there is no free memory. 307 */ 308 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 309 unsigned int length, gfp_t gfp_mask) 310 { 311 struct sk_buff *skb; 312 313 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 314 if (likely(skb)) { 315 skb_reserve(skb, NET_SKB_PAD); 316 skb->dev = dev; 317 } 318 return skb; 319 } 320 EXPORT_SYMBOL(__netdev_alloc_skb); 321 322 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 323 int size) 324 { 325 skb_fill_page_desc(skb, i, page, off, size); 326 skb->len += size; 327 skb->data_len += size; 328 skb->truesize += size; 329 } 330 EXPORT_SYMBOL(skb_add_rx_frag); 331 332 /** 333 * dev_alloc_skb - allocate an skbuff for receiving 334 * @length: length to allocate 335 * 336 * Allocate a new &sk_buff and assign it a usage count of one. The 337 * buffer has unspecified headroom built in. Users should allocate 338 * the headroom they think they need without accounting for the 339 * built in space. The built in space is used for optimisations. 340 * 341 * %NULL is returned if there is no free memory. Although this function 342 * allocates memory it can be called from an interrupt. 343 */ 344 struct sk_buff *dev_alloc_skb(unsigned int length) 345 { 346 /* 347 * There is more code here than it seems: 348 * __dev_alloc_skb is an inline 349 */ 350 return __dev_alloc_skb(length, GFP_ATOMIC); 351 } 352 EXPORT_SYMBOL(dev_alloc_skb); 353 354 static void skb_drop_list(struct sk_buff **listp) 355 { 356 struct sk_buff *list = *listp; 357 358 *listp = NULL; 359 360 do { 361 struct sk_buff *this = list; 362 list = list->next; 363 kfree_skb(this); 364 } while (list); 365 } 366 367 static inline void skb_drop_fraglist(struct sk_buff *skb) 368 { 369 skb_drop_list(&skb_shinfo(skb)->frag_list); 370 } 371 372 static void skb_clone_fraglist(struct sk_buff *skb) 373 { 374 struct sk_buff *list; 375 376 skb_walk_frags(skb, list) 377 skb_get(list); 378 } 379 380 static void skb_release_data(struct sk_buff *skb) 381 { 382 if (!skb->cloned || 383 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 384 &skb_shinfo(skb)->dataref)) { 385 if (skb_shinfo(skb)->nr_frags) { 386 int i; 387 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 388 skb_frag_unref(skb, i); 389 } 390 391 /* 392 * If skb buf is from userspace, we need to notify the caller 393 * the lower device DMA has done; 394 */ 395 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 396 struct ubuf_info *uarg; 397 398 uarg = skb_shinfo(skb)->destructor_arg; 399 if (uarg->callback) 400 uarg->callback(uarg); 401 } 402 403 if (skb_has_frag_list(skb)) 404 skb_drop_fraglist(skb); 405 406 kfree(skb->head); 407 } 408 } 409 410 /* 411 * Free an skbuff by memory without cleaning the state. 412 */ 413 static void kfree_skbmem(struct sk_buff *skb) 414 { 415 struct sk_buff *other; 416 atomic_t *fclone_ref; 417 418 switch (skb->fclone) { 419 case SKB_FCLONE_UNAVAILABLE: 420 kmem_cache_free(skbuff_head_cache, skb); 421 break; 422 423 case SKB_FCLONE_ORIG: 424 fclone_ref = (atomic_t *) (skb + 2); 425 if (atomic_dec_and_test(fclone_ref)) 426 kmem_cache_free(skbuff_fclone_cache, skb); 427 break; 428 429 case SKB_FCLONE_CLONE: 430 fclone_ref = (atomic_t *) (skb + 1); 431 other = skb - 1; 432 433 /* The clone portion is available for 434 * fast-cloning again. 435 */ 436 skb->fclone = SKB_FCLONE_UNAVAILABLE; 437 438 if (atomic_dec_and_test(fclone_ref)) 439 kmem_cache_free(skbuff_fclone_cache, other); 440 break; 441 } 442 } 443 444 static void skb_release_head_state(struct sk_buff *skb) 445 { 446 skb_dst_drop(skb); 447 #ifdef CONFIG_XFRM 448 secpath_put(skb->sp); 449 #endif 450 if (skb->destructor) { 451 WARN_ON(in_irq()); 452 skb->destructor(skb); 453 } 454 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 455 nf_conntrack_put(skb->nfct); 456 #endif 457 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 458 nf_conntrack_put_reasm(skb->nfct_reasm); 459 #endif 460 #ifdef CONFIG_BRIDGE_NETFILTER 461 nf_bridge_put(skb->nf_bridge); 462 #endif 463 /* XXX: IS this still necessary? - JHS */ 464 #ifdef CONFIG_NET_SCHED 465 skb->tc_index = 0; 466 #ifdef CONFIG_NET_CLS_ACT 467 skb->tc_verd = 0; 468 #endif 469 #endif 470 } 471 472 /* Free everything but the sk_buff shell. */ 473 static void skb_release_all(struct sk_buff *skb) 474 { 475 skb_release_head_state(skb); 476 skb_release_data(skb); 477 } 478 479 /** 480 * __kfree_skb - private function 481 * @skb: buffer 482 * 483 * Free an sk_buff. Release anything attached to the buffer. 484 * Clean the state. This is an internal helper function. Users should 485 * always call kfree_skb 486 */ 487 488 void __kfree_skb(struct sk_buff *skb) 489 { 490 skb_release_all(skb); 491 kfree_skbmem(skb); 492 } 493 EXPORT_SYMBOL(__kfree_skb); 494 495 /** 496 * kfree_skb - free an sk_buff 497 * @skb: buffer to free 498 * 499 * Drop a reference to the buffer and free it if the usage count has 500 * hit zero. 501 */ 502 void kfree_skb(struct sk_buff *skb) 503 { 504 if (unlikely(!skb)) 505 return; 506 if (likely(atomic_read(&skb->users) == 1)) 507 smp_rmb(); 508 else if (likely(!atomic_dec_and_test(&skb->users))) 509 return; 510 trace_kfree_skb(skb, __builtin_return_address(0)); 511 __kfree_skb(skb); 512 } 513 EXPORT_SYMBOL(kfree_skb); 514 515 /** 516 * consume_skb - free an skbuff 517 * @skb: buffer to free 518 * 519 * Drop a ref to the buffer and free it if the usage count has hit zero 520 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 521 * is being dropped after a failure and notes that 522 */ 523 void consume_skb(struct sk_buff *skb) 524 { 525 if (unlikely(!skb)) 526 return; 527 if (likely(atomic_read(&skb->users) == 1)) 528 smp_rmb(); 529 else if (likely(!atomic_dec_and_test(&skb->users))) 530 return; 531 trace_consume_skb(skb); 532 __kfree_skb(skb); 533 } 534 EXPORT_SYMBOL(consume_skb); 535 536 /** 537 * skb_recycle - clean up an skb for reuse 538 * @skb: buffer 539 * 540 * Recycles the skb to be reused as a receive buffer. This 541 * function does any necessary reference count dropping, and 542 * cleans up the skbuff as if it just came from __alloc_skb(). 543 */ 544 void skb_recycle(struct sk_buff *skb) 545 { 546 struct skb_shared_info *shinfo; 547 548 skb_release_head_state(skb); 549 550 shinfo = skb_shinfo(skb); 551 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 552 atomic_set(&shinfo->dataref, 1); 553 554 memset(skb, 0, offsetof(struct sk_buff, tail)); 555 skb->data = skb->head + NET_SKB_PAD; 556 skb_reset_tail_pointer(skb); 557 } 558 EXPORT_SYMBOL(skb_recycle); 559 560 /** 561 * skb_recycle_check - check if skb can be reused for receive 562 * @skb: buffer 563 * @skb_size: minimum receive buffer size 564 * 565 * Checks that the skb passed in is not shared or cloned, and 566 * that it is linear and its head portion at least as large as 567 * skb_size so that it can be recycled as a receive buffer. 568 * If these conditions are met, this function does any necessary 569 * reference count dropping and cleans up the skbuff as if it 570 * just came from __alloc_skb(). 571 */ 572 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 573 { 574 if (!skb_is_recycleable(skb, skb_size)) 575 return false; 576 577 skb_recycle(skb); 578 579 return true; 580 } 581 EXPORT_SYMBOL(skb_recycle_check); 582 583 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 584 { 585 new->tstamp = old->tstamp; 586 new->dev = old->dev; 587 new->transport_header = old->transport_header; 588 new->network_header = old->network_header; 589 new->mac_header = old->mac_header; 590 skb_dst_copy(new, old); 591 new->rxhash = old->rxhash; 592 new->ooo_okay = old->ooo_okay; 593 new->l4_rxhash = old->l4_rxhash; 594 new->no_fcs = old->no_fcs; 595 #ifdef CONFIG_XFRM 596 new->sp = secpath_get(old->sp); 597 #endif 598 memcpy(new->cb, old->cb, sizeof(old->cb)); 599 new->csum = old->csum; 600 new->local_df = old->local_df; 601 new->pkt_type = old->pkt_type; 602 new->ip_summed = old->ip_summed; 603 skb_copy_queue_mapping(new, old); 604 new->priority = old->priority; 605 #if IS_ENABLED(CONFIG_IP_VS) 606 new->ipvs_property = old->ipvs_property; 607 #endif 608 new->protocol = old->protocol; 609 new->mark = old->mark; 610 new->skb_iif = old->skb_iif; 611 __nf_copy(new, old); 612 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 613 new->nf_trace = old->nf_trace; 614 #endif 615 #ifdef CONFIG_NET_SCHED 616 new->tc_index = old->tc_index; 617 #ifdef CONFIG_NET_CLS_ACT 618 new->tc_verd = old->tc_verd; 619 #endif 620 #endif 621 new->vlan_tci = old->vlan_tci; 622 623 skb_copy_secmark(new, old); 624 } 625 626 /* 627 * You should not add any new code to this function. Add it to 628 * __copy_skb_header above instead. 629 */ 630 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 631 { 632 #define C(x) n->x = skb->x 633 634 n->next = n->prev = NULL; 635 n->sk = NULL; 636 __copy_skb_header(n, skb); 637 638 C(len); 639 C(data_len); 640 C(mac_len); 641 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 642 n->cloned = 1; 643 n->nohdr = 0; 644 n->destructor = NULL; 645 C(tail); 646 C(end); 647 C(head); 648 C(data); 649 C(truesize); 650 atomic_set(&n->users, 1); 651 652 atomic_inc(&(skb_shinfo(skb)->dataref)); 653 skb->cloned = 1; 654 655 return n; 656 #undef C 657 } 658 659 /** 660 * skb_morph - morph one skb into another 661 * @dst: the skb to receive the contents 662 * @src: the skb to supply the contents 663 * 664 * This is identical to skb_clone except that the target skb is 665 * supplied by the user. 666 * 667 * The target skb is returned upon exit. 668 */ 669 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 670 { 671 skb_release_all(dst); 672 return __skb_clone(dst, src); 673 } 674 EXPORT_SYMBOL_GPL(skb_morph); 675 676 /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 677 * @skb: the skb to modify 678 * @gfp_mask: allocation priority 679 * 680 * This must be called on SKBTX_DEV_ZEROCOPY skb. 681 * It will copy all frags into kernel and drop the reference 682 * to userspace pages. 683 * 684 * If this function is called from an interrupt gfp_mask() must be 685 * %GFP_ATOMIC. 686 * 687 * Returns 0 on success or a negative error code on failure 688 * to allocate kernel memory to copy to. 689 */ 690 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 691 { 692 int i; 693 int num_frags = skb_shinfo(skb)->nr_frags; 694 struct page *page, *head = NULL; 695 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 696 697 for (i = 0; i < num_frags; i++) { 698 u8 *vaddr; 699 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 700 701 page = alloc_page(GFP_ATOMIC); 702 if (!page) { 703 while (head) { 704 struct page *next = (struct page *)head->private; 705 put_page(head); 706 head = next; 707 } 708 return -ENOMEM; 709 } 710 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 711 memcpy(page_address(page), 712 vaddr + f->page_offset, skb_frag_size(f)); 713 kunmap_skb_frag(vaddr); 714 page->private = (unsigned long)head; 715 head = page; 716 } 717 718 /* skb frags release userspace buffers */ 719 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 720 skb_frag_unref(skb, i); 721 722 uarg->callback(uarg); 723 724 /* skb frags point to kernel buffers */ 725 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 726 __skb_fill_page_desc(skb, i-1, head, 0, 727 skb_shinfo(skb)->frags[i - 1].size); 728 head = (struct page *)head->private; 729 } 730 731 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 732 return 0; 733 } 734 735 736 /** 737 * skb_clone - duplicate an sk_buff 738 * @skb: buffer to clone 739 * @gfp_mask: allocation priority 740 * 741 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 742 * copies share the same packet data but not structure. The new 743 * buffer has a reference count of 1. If the allocation fails the 744 * function returns %NULL otherwise the new buffer is returned. 745 * 746 * If this function is called from an interrupt gfp_mask() must be 747 * %GFP_ATOMIC. 748 */ 749 750 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 751 { 752 struct sk_buff *n; 753 754 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 755 if (skb_copy_ubufs(skb, gfp_mask)) 756 return NULL; 757 } 758 759 n = skb + 1; 760 if (skb->fclone == SKB_FCLONE_ORIG && 761 n->fclone == SKB_FCLONE_UNAVAILABLE) { 762 atomic_t *fclone_ref = (atomic_t *) (n + 1); 763 n->fclone = SKB_FCLONE_CLONE; 764 atomic_inc(fclone_ref); 765 } else { 766 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 767 if (!n) 768 return NULL; 769 770 kmemcheck_annotate_bitfield(n, flags1); 771 kmemcheck_annotate_bitfield(n, flags2); 772 n->fclone = SKB_FCLONE_UNAVAILABLE; 773 } 774 775 return __skb_clone(n, skb); 776 } 777 EXPORT_SYMBOL(skb_clone); 778 779 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 780 { 781 #ifndef NET_SKBUFF_DATA_USES_OFFSET 782 /* 783 * Shift between the two data areas in bytes 784 */ 785 unsigned long offset = new->data - old->data; 786 #endif 787 788 __copy_skb_header(new, old); 789 790 #ifndef NET_SKBUFF_DATA_USES_OFFSET 791 /* {transport,network,mac}_header are relative to skb->head */ 792 new->transport_header += offset; 793 new->network_header += offset; 794 if (skb_mac_header_was_set(new)) 795 new->mac_header += offset; 796 #endif 797 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 798 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 799 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 800 } 801 802 /** 803 * skb_copy - create private copy of an sk_buff 804 * @skb: buffer to copy 805 * @gfp_mask: allocation priority 806 * 807 * Make a copy of both an &sk_buff and its data. This is used when the 808 * caller wishes to modify the data and needs a private copy of the 809 * data to alter. Returns %NULL on failure or the pointer to the buffer 810 * on success. The returned buffer has a reference count of 1. 811 * 812 * As by-product this function converts non-linear &sk_buff to linear 813 * one, so that &sk_buff becomes completely private and caller is allowed 814 * to modify all the data of returned buffer. This means that this 815 * function is not recommended for use in circumstances when only 816 * header is going to be modified. Use pskb_copy() instead. 817 */ 818 819 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 820 { 821 int headerlen = skb_headroom(skb); 822 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 823 struct sk_buff *n = alloc_skb(size, gfp_mask); 824 825 if (!n) 826 return NULL; 827 828 /* Set the data pointer */ 829 skb_reserve(n, headerlen); 830 /* Set the tail pointer and length */ 831 skb_put(n, skb->len); 832 833 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 834 BUG(); 835 836 copy_skb_header(n, skb); 837 return n; 838 } 839 EXPORT_SYMBOL(skb_copy); 840 841 /** 842 * __pskb_copy - create copy of an sk_buff with private head. 843 * @skb: buffer to copy 844 * @headroom: headroom of new skb 845 * @gfp_mask: allocation priority 846 * 847 * Make a copy of both an &sk_buff and part of its data, located 848 * in header. Fragmented data remain shared. This is used when 849 * the caller wishes to modify only header of &sk_buff and needs 850 * private copy of the header to alter. Returns %NULL on failure 851 * or the pointer to the buffer on success. 852 * The returned buffer has a reference count of 1. 853 */ 854 855 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 856 { 857 unsigned int size = skb_headlen(skb) + headroom; 858 struct sk_buff *n = alloc_skb(size, gfp_mask); 859 860 if (!n) 861 goto out; 862 863 /* Set the data pointer */ 864 skb_reserve(n, headroom); 865 /* Set the tail pointer and length */ 866 skb_put(n, skb_headlen(skb)); 867 /* Copy the bytes */ 868 skb_copy_from_linear_data(skb, n->data, n->len); 869 870 n->truesize += skb->data_len; 871 n->data_len = skb->data_len; 872 n->len = skb->len; 873 874 if (skb_shinfo(skb)->nr_frags) { 875 int i; 876 877 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 878 if (skb_copy_ubufs(skb, gfp_mask)) { 879 kfree_skb(n); 880 n = NULL; 881 goto out; 882 } 883 } 884 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 885 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 886 skb_frag_ref(skb, i); 887 } 888 skb_shinfo(n)->nr_frags = i; 889 } 890 891 if (skb_has_frag_list(skb)) { 892 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 893 skb_clone_fraglist(n); 894 } 895 896 copy_skb_header(n, skb); 897 out: 898 return n; 899 } 900 EXPORT_SYMBOL(__pskb_copy); 901 902 /** 903 * pskb_expand_head - reallocate header of &sk_buff 904 * @skb: buffer to reallocate 905 * @nhead: room to add at head 906 * @ntail: room to add at tail 907 * @gfp_mask: allocation priority 908 * 909 * Expands (or creates identical copy, if &nhead and &ntail are zero) 910 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 911 * reference count of 1. Returns zero in the case of success or error, 912 * if expansion failed. In the last case, &sk_buff is not changed. 913 * 914 * All the pointers pointing into skb header may change and must be 915 * reloaded after call to this function. 916 */ 917 918 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 919 gfp_t gfp_mask) 920 { 921 int i; 922 u8 *data; 923 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 924 long off; 925 bool fastpath; 926 927 BUG_ON(nhead < 0); 928 929 if (skb_shared(skb)) 930 BUG(); 931 932 size = SKB_DATA_ALIGN(size); 933 934 /* Check if we can avoid taking references on fragments if we own 935 * the last reference on skb->head. (see skb_release_data()) 936 */ 937 if (!skb->cloned) 938 fastpath = true; 939 else { 940 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 941 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 942 } 943 944 if (fastpath && 945 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { 946 memmove(skb->head + size, skb_shinfo(skb), 947 offsetof(struct skb_shared_info, 948 frags[skb_shinfo(skb)->nr_frags])); 949 memmove(skb->head + nhead, skb->head, 950 skb_tail_pointer(skb) - skb->head); 951 off = nhead; 952 goto adjust_others; 953 } 954 955 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 956 if (!data) 957 goto nodata; 958 959 /* Copy only real data... and, alas, header. This should be 960 * optimized for the cases when header is void. 961 */ 962 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 963 964 memcpy((struct skb_shared_info *)(data + size), 965 skb_shinfo(skb), 966 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 967 968 if (fastpath) { 969 kfree(skb->head); 970 } else { 971 /* copy this zero copy skb frags */ 972 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 973 if (skb_copy_ubufs(skb, gfp_mask)) 974 goto nofrags; 975 } 976 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 977 skb_frag_ref(skb, i); 978 979 if (skb_has_frag_list(skb)) 980 skb_clone_fraglist(skb); 981 982 skb_release_data(skb); 983 } 984 off = (data + nhead) - skb->head; 985 986 skb->head = data; 987 adjust_others: 988 skb->data += off; 989 #ifdef NET_SKBUFF_DATA_USES_OFFSET 990 skb->end = size; 991 off = nhead; 992 #else 993 skb->end = skb->head + size; 994 #endif 995 /* {transport,network,mac}_header and tail are relative to skb->head */ 996 skb->tail += off; 997 skb->transport_header += off; 998 skb->network_header += off; 999 if (skb_mac_header_was_set(skb)) 1000 skb->mac_header += off; 1001 /* Only adjust this if it actually is csum_start rather than csum */ 1002 if (skb->ip_summed == CHECKSUM_PARTIAL) 1003 skb->csum_start += nhead; 1004 skb->cloned = 0; 1005 skb->hdr_len = 0; 1006 skb->nohdr = 0; 1007 atomic_set(&skb_shinfo(skb)->dataref, 1); 1008 return 0; 1009 1010 nofrags: 1011 kfree(data); 1012 nodata: 1013 return -ENOMEM; 1014 } 1015 EXPORT_SYMBOL(pskb_expand_head); 1016 1017 /* Make private copy of skb with writable head and some headroom */ 1018 1019 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1020 { 1021 struct sk_buff *skb2; 1022 int delta = headroom - skb_headroom(skb); 1023 1024 if (delta <= 0) 1025 skb2 = pskb_copy(skb, GFP_ATOMIC); 1026 else { 1027 skb2 = skb_clone(skb, GFP_ATOMIC); 1028 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1029 GFP_ATOMIC)) { 1030 kfree_skb(skb2); 1031 skb2 = NULL; 1032 } 1033 } 1034 return skb2; 1035 } 1036 EXPORT_SYMBOL(skb_realloc_headroom); 1037 1038 /** 1039 * skb_copy_expand - copy and expand sk_buff 1040 * @skb: buffer to copy 1041 * @newheadroom: new free bytes at head 1042 * @newtailroom: new free bytes at tail 1043 * @gfp_mask: allocation priority 1044 * 1045 * Make a copy of both an &sk_buff and its data and while doing so 1046 * allocate additional space. 1047 * 1048 * This is used when the caller wishes to modify the data and needs a 1049 * private copy of the data to alter as well as more space for new fields. 1050 * Returns %NULL on failure or the pointer to the buffer 1051 * on success. The returned buffer has a reference count of 1. 1052 * 1053 * You must pass %GFP_ATOMIC as the allocation priority if this function 1054 * is called from an interrupt. 1055 */ 1056 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1057 int newheadroom, int newtailroom, 1058 gfp_t gfp_mask) 1059 { 1060 /* 1061 * Allocate the copy buffer 1062 */ 1063 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 1064 gfp_mask); 1065 int oldheadroom = skb_headroom(skb); 1066 int head_copy_len, head_copy_off; 1067 int off; 1068 1069 if (!n) 1070 return NULL; 1071 1072 skb_reserve(n, newheadroom); 1073 1074 /* Set the tail pointer and length */ 1075 skb_put(n, skb->len); 1076 1077 head_copy_len = oldheadroom; 1078 head_copy_off = 0; 1079 if (newheadroom <= head_copy_len) 1080 head_copy_len = newheadroom; 1081 else 1082 head_copy_off = newheadroom - head_copy_len; 1083 1084 /* Copy the linear header and data. */ 1085 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1086 skb->len + head_copy_len)) 1087 BUG(); 1088 1089 copy_skb_header(n, skb); 1090 1091 off = newheadroom - oldheadroom; 1092 if (n->ip_summed == CHECKSUM_PARTIAL) 1093 n->csum_start += off; 1094 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1095 n->transport_header += off; 1096 n->network_header += off; 1097 if (skb_mac_header_was_set(skb)) 1098 n->mac_header += off; 1099 #endif 1100 1101 return n; 1102 } 1103 EXPORT_SYMBOL(skb_copy_expand); 1104 1105 /** 1106 * skb_pad - zero pad the tail of an skb 1107 * @skb: buffer to pad 1108 * @pad: space to pad 1109 * 1110 * Ensure that a buffer is followed by a padding area that is zero 1111 * filled. Used by network drivers which may DMA or transfer data 1112 * beyond the buffer end onto the wire. 1113 * 1114 * May return error in out of memory cases. The skb is freed on error. 1115 */ 1116 1117 int skb_pad(struct sk_buff *skb, int pad) 1118 { 1119 int err; 1120 int ntail; 1121 1122 /* If the skbuff is non linear tailroom is always zero.. */ 1123 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1124 memset(skb->data+skb->len, 0, pad); 1125 return 0; 1126 } 1127 1128 ntail = skb->data_len + pad - (skb->end - skb->tail); 1129 if (likely(skb_cloned(skb) || ntail > 0)) { 1130 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1131 if (unlikely(err)) 1132 goto free_skb; 1133 } 1134 1135 /* FIXME: The use of this function with non-linear skb's really needs 1136 * to be audited. 1137 */ 1138 err = skb_linearize(skb); 1139 if (unlikely(err)) 1140 goto free_skb; 1141 1142 memset(skb->data + skb->len, 0, pad); 1143 return 0; 1144 1145 free_skb: 1146 kfree_skb(skb); 1147 return err; 1148 } 1149 EXPORT_SYMBOL(skb_pad); 1150 1151 /** 1152 * skb_put - add data to a buffer 1153 * @skb: buffer to use 1154 * @len: amount of data to add 1155 * 1156 * This function extends the used data area of the buffer. If this would 1157 * exceed the total buffer size the kernel will panic. A pointer to the 1158 * first byte of the extra data is returned. 1159 */ 1160 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1161 { 1162 unsigned char *tmp = skb_tail_pointer(skb); 1163 SKB_LINEAR_ASSERT(skb); 1164 skb->tail += len; 1165 skb->len += len; 1166 if (unlikely(skb->tail > skb->end)) 1167 skb_over_panic(skb, len, __builtin_return_address(0)); 1168 return tmp; 1169 } 1170 EXPORT_SYMBOL(skb_put); 1171 1172 /** 1173 * skb_push - add data to the start of a buffer 1174 * @skb: buffer to use 1175 * @len: amount of data to add 1176 * 1177 * This function extends the used data area of the buffer at the buffer 1178 * start. If this would exceed the total buffer headroom the kernel will 1179 * panic. A pointer to the first byte of the extra data is returned. 1180 */ 1181 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1182 { 1183 skb->data -= len; 1184 skb->len += len; 1185 if (unlikely(skb->data<skb->head)) 1186 skb_under_panic(skb, len, __builtin_return_address(0)); 1187 return skb->data; 1188 } 1189 EXPORT_SYMBOL(skb_push); 1190 1191 /** 1192 * skb_pull - remove data from the start of a buffer 1193 * @skb: buffer to use 1194 * @len: amount of data to remove 1195 * 1196 * This function removes data from the start of a buffer, returning 1197 * the memory to the headroom. A pointer to the next data in the buffer 1198 * is returned. Once the data has been pulled future pushes will overwrite 1199 * the old data. 1200 */ 1201 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1202 { 1203 return skb_pull_inline(skb, len); 1204 } 1205 EXPORT_SYMBOL(skb_pull); 1206 1207 /** 1208 * skb_trim - remove end from a buffer 1209 * @skb: buffer to alter 1210 * @len: new length 1211 * 1212 * Cut the length of a buffer down by removing data from the tail. If 1213 * the buffer is already under the length specified it is not modified. 1214 * The skb must be linear. 1215 */ 1216 void skb_trim(struct sk_buff *skb, unsigned int len) 1217 { 1218 if (skb->len > len) 1219 __skb_trim(skb, len); 1220 } 1221 EXPORT_SYMBOL(skb_trim); 1222 1223 /* Trims skb to length len. It can change skb pointers. 1224 */ 1225 1226 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1227 { 1228 struct sk_buff **fragp; 1229 struct sk_buff *frag; 1230 int offset = skb_headlen(skb); 1231 int nfrags = skb_shinfo(skb)->nr_frags; 1232 int i; 1233 int err; 1234 1235 if (skb_cloned(skb) && 1236 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1237 return err; 1238 1239 i = 0; 1240 if (offset >= len) 1241 goto drop_pages; 1242 1243 for (; i < nfrags; i++) { 1244 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1245 1246 if (end < len) { 1247 offset = end; 1248 continue; 1249 } 1250 1251 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1252 1253 drop_pages: 1254 skb_shinfo(skb)->nr_frags = i; 1255 1256 for (; i < nfrags; i++) 1257 skb_frag_unref(skb, i); 1258 1259 if (skb_has_frag_list(skb)) 1260 skb_drop_fraglist(skb); 1261 goto done; 1262 } 1263 1264 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1265 fragp = &frag->next) { 1266 int end = offset + frag->len; 1267 1268 if (skb_shared(frag)) { 1269 struct sk_buff *nfrag; 1270 1271 nfrag = skb_clone(frag, GFP_ATOMIC); 1272 if (unlikely(!nfrag)) 1273 return -ENOMEM; 1274 1275 nfrag->next = frag->next; 1276 kfree_skb(frag); 1277 frag = nfrag; 1278 *fragp = frag; 1279 } 1280 1281 if (end < len) { 1282 offset = end; 1283 continue; 1284 } 1285 1286 if (end > len && 1287 unlikely((err = pskb_trim(frag, len - offset)))) 1288 return err; 1289 1290 if (frag->next) 1291 skb_drop_list(&frag->next); 1292 break; 1293 } 1294 1295 done: 1296 if (len > skb_headlen(skb)) { 1297 skb->data_len -= skb->len - len; 1298 skb->len = len; 1299 } else { 1300 skb->len = len; 1301 skb->data_len = 0; 1302 skb_set_tail_pointer(skb, len); 1303 } 1304 1305 return 0; 1306 } 1307 EXPORT_SYMBOL(___pskb_trim); 1308 1309 /** 1310 * __pskb_pull_tail - advance tail of skb header 1311 * @skb: buffer to reallocate 1312 * @delta: number of bytes to advance tail 1313 * 1314 * The function makes a sense only on a fragmented &sk_buff, 1315 * it expands header moving its tail forward and copying necessary 1316 * data from fragmented part. 1317 * 1318 * &sk_buff MUST have reference count of 1. 1319 * 1320 * Returns %NULL (and &sk_buff does not change) if pull failed 1321 * or value of new tail of skb in the case of success. 1322 * 1323 * All the pointers pointing into skb header may change and must be 1324 * reloaded after call to this function. 1325 */ 1326 1327 /* Moves tail of skb head forward, copying data from fragmented part, 1328 * when it is necessary. 1329 * 1. It may fail due to malloc failure. 1330 * 2. It may change skb pointers. 1331 * 1332 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1333 */ 1334 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1335 { 1336 /* If skb has not enough free space at tail, get new one 1337 * plus 128 bytes for future expansions. If we have enough 1338 * room at tail, reallocate without expansion only if skb is cloned. 1339 */ 1340 int i, k, eat = (skb->tail + delta) - skb->end; 1341 1342 if (eat > 0 || skb_cloned(skb)) { 1343 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1344 GFP_ATOMIC)) 1345 return NULL; 1346 } 1347 1348 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1349 BUG(); 1350 1351 /* Optimization: no fragments, no reasons to preestimate 1352 * size of pulled pages. Superb. 1353 */ 1354 if (!skb_has_frag_list(skb)) 1355 goto pull_pages; 1356 1357 /* Estimate size of pulled pages. */ 1358 eat = delta; 1359 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1360 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1361 1362 if (size >= eat) 1363 goto pull_pages; 1364 eat -= size; 1365 } 1366 1367 /* If we need update frag list, we are in troubles. 1368 * Certainly, it possible to add an offset to skb data, 1369 * but taking into account that pulling is expected to 1370 * be very rare operation, it is worth to fight against 1371 * further bloating skb head and crucify ourselves here instead. 1372 * Pure masohism, indeed. 8)8) 1373 */ 1374 if (eat) { 1375 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1376 struct sk_buff *clone = NULL; 1377 struct sk_buff *insp = NULL; 1378 1379 do { 1380 BUG_ON(!list); 1381 1382 if (list->len <= eat) { 1383 /* Eaten as whole. */ 1384 eat -= list->len; 1385 list = list->next; 1386 insp = list; 1387 } else { 1388 /* Eaten partially. */ 1389 1390 if (skb_shared(list)) { 1391 /* Sucks! We need to fork list. :-( */ 1392 clone = skb_clone(list, GFP_ATOMIC); 1393 if (!clone) 1394 return NULL; 1395 insp = list->next; 1396 list = clone; 1397 } else { 1398 /* This may be pulled without 1399 * problems. */ 1400 insp = list; 1401 } 1402 if (!pskb_pull(list, eat)) { 1403 kfree_skb(clone); 1404 return NULL; 1405 } 1406 break; 1407 } 1408 } while (eat); 1409 1410 /* Free pulled out fragments. */ 1411 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1412 skb_shinfo(skb)->frag_list = list->next; 1413 kfree_skb(list); 1414 } 1415 /* And insert new clone at head. */ 1416 if (clone) { 1417 clone->next = list; 1418 skb_shinfo(skb)->frag_list = clone; 1419 } 1420 } 1421 /* Success! Now we may commit changes to skb data. */ 1422 1423 pull_pages: 1424 eat = delta; 1425 k = 0; 1426 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1427 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1428 1429 if (size <= eat) { 1430 skb_frag_unref(skb, i); 1431 eat -= size; 1432 } else { 1433 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1434 if (eat) { 1435 skb_shinfo(skb)->frags[k].page_offset += eat; 1436 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1437 eat = 0; 1438 } 1439 k++; 1440 } 1441 } 1442 skb_shinfo(skb)->nr_frags = k; 1443 1444 skb->tail += delta; 1445 skb->data_len -= delta; 1446 1447 return skb_tail_pointer(skb); 1448 } 1449 EXPORT_SYMBOL(__pskb_pull_tail); 1450 1451 /** 1452 * skb_copy_bits - copy bits from skb to kernel buffer 1453 * @skb: source skb 1454 * @offset: offset in source 1455 * @to: destination buffer 1456 * @len: number of bytes to copy 1457 * 1458 * Copy the specified number of bytes from the source skb to the 1459 * destination buffer. 1460 * 1461 * CAUTION ! : 1462 * If its prototype is ever changed, 1463 * check arch/{*}/net/{*}.S files, 1464 * since it is called from BPF assembly code. 1465 */ 1466 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1467 { 1468 int start = skb_headlen(skb); 1469 struct sk_buff *frag_iter; 1470 int i, copy; 1471 1472 if (offset > (int)skb->len - len) 1473 goto fault; 1474 1475 /* Copy header. */ 1476 if ((copy = start - offset) > 0) { 1477 if (copy > len) 1478 copy = len; 1479 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1480 if ((len -= copy) == 0) 1481 return 0; 1482 offset += copy; 1483 to += copy; 1484 } 1485 1486 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1487 int end; 1488 1489 WARN_ON(start > offset + len); 1490 1491 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1492 if ((copy = end - offset) > 0) { 1493 u8 *vaddr; 1494 1495 if (copy > len) 1496 copy = len; 1497 1498 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1499 memcpy(to, 1500 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1501 offset - start, copy); 1502 kunmap_skb_frag(vaddr); 1503 1504 if ((len -= copy) == 0) 1505 return 0; 1506 offset += copy; 1507 to += copy; 1508 } 1509 start = end; 1510 } 1511 1512 skb_walk_frags(skb, frag_iter) { 1513 int end; 1514 1515 WARN_ON(start > offset + len); 1516 1517 end = start + frag_iter->len; 1518 if ((copy = end - offset) > 0) { 1519 if (copy > len) 1520 copy = len; 1521 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1522 goto fault; 1523 if ((len -= copy) == 0) 1524 return 0; 1525 offset += copy; 1526 to += copy; 1527 } 1528 start = end; 1529 } 1530 1531 if (!len) 1532 return 0; 1533 1534 fault: 1535 return -EFAULT; 1536 } 1537 EXPORT_SYMBOL(skb_copy_bits); 1538 1539 /* 1540 * Callback from splice_to_pipe(), if we need to release some pages 1541 * at the end of the spd in case we error'ed out in filling the pipe. 1542 */ 1543 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1544 { 1545 put_page(spd->pages[i]); 1546 } 1547 1548 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1549 unsigned int *offset, 1550 struct sk_buff *skb, struct sock *sk) 1551 { 1552 struct page *p = sk->sk_sndmsg_page; 1553 unsigned int off; 1554 1555 if (!p) { 1556 new_page: 1557 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1558 if (!p) 1559 return NULL; 1560 1561 off = sk->sk_sndmsg_off = 0; 1562 /* hold one ref to this page until it's full */ 1563 } else { 1564 unsigned int mlen; 1565 1566 off = sk->sk_sndmsg_off; 1567 mlen = PAGE_SIZE - off; 1568 if (mlen < 64 && mlen < *len) { 1569 put_page(p); 1570 goto new_page; 1571 } 1572 1573 *len = min_t(unsigned int, *len, mlen); 1574 } 1575 1576 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1577 sk->sk_sndmsg_off += *len; 1578 *offset = off; 1579 get_page(p); 1580 1581 return p; 1582 } 1583 1584 /* 1585 * Fill page/offset/length into spd, if it can hold more pages. 1586 */ 1587 static inline int spd_fill_page(struct splice_pipe_desc *spd, 1588 struct pipe_inode_info *pipe, struct page *page, 1589 unsigned int *len, unsigned int offset, 1590 struct sk_buff *skb, int linear, 1591 struct sock *sk) 1592 { 1593 if (unlikely(spd->nr_pages == pipe->buffers)) 1594 return 1; 1595 1596 if (linear) { 1597 page = linear_to_page(page, len, &offset, skb, sk); 1598 if (!page) 1599 return 1; 1600 } else 1601 get_page(page); 1602 1603 spd->pages[spd->nr_pages] = page; 1604 spd->partial[spd->nr_pages].len = *len; 1605 spd->partial[spd->nr_pages].offset = offset; 1606 spd->nr_pages++; 1607 1608 return 0; 1609 } 1610 1611 static inline void __segment_seek(struct page **page, unsigned int *poff, 1612 unsigned int *plen, unsigned int off) 1613 { 1614 unsigned long n; 1615 1616 *poff += off; 1617 n = *poff / PAGE_SIZE; 1618 if (n) 1619 *page = nth_page(*page, n); 1620 1621 *poff = *poff % PAGE_SIZE; 1622 *plen -= off; 1623 } 1624 1625 static inline int __splice_segment(struct page *page, unsigned int poff, 1626 unsigned int plen, unsigned int *off, 1627 unsigned int *len, struct sk_buff *skb, 1628 struct splice_pipe_desc *spd, int linear, 1629 struct sock *sk, 1630 struct pipe_inode_info *pipe) 1631 { 1632 if (!*len) 1633 return 1; 1634 1635 /* skip this segment if already processed */ 1636 if (*off >= plen) { 1637 *off -= plen; 1638 return 0; 1639 } 1640 1641 /* ignore any bits we already processed */ 1642 if (*off) { 1643 __segment_seek(&page, &poff, &plen, *off); 1644 *off = 0; 1645 } 1646 1647 do { 1648 unsigned int flen = min(*len, plen); 1649 1650 /* the linear region may spread across several pages */ 1651 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1652 1653 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1654 return 1; 1655 1656 __segment_seek(&page, &poff, &plen, flen); 1657 *len -= flen; 1658 1659 } while (*len && plen); 1660 1661 return 0; 1662 } 1663 1664 /* 1665 * Map linear and fragment data from the skb to spd. It reports failure if the 1666 * pipe is full or if we already spliced the requested length. 1667 */ 1668 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1669 unsigned int *offset, unsigned int *len, 1670 struct splice_pipe_desc *spd, struct sock *sk) 1671 { 1672 int seg; 1673 1674 /* 1675 * map the linear part 1676 */ 1677 if (__splice_segment(virt_to_page(skb->data), 1678 (unsigned long) skb->data & (PAGE_SIZE - 1), 1679 skb_headlen(skb), 1680 offset, len, skb, spd, 1, sk, pipe)) 1681 return 1; 1682 1683 /* 1684 * then map the fragments 1685 */ 1686 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1687 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1688 1689 if (__splice_segment(skb_frag_page(f), 1690 f->page_offset, skb_frag_size(f), 1691 offset, len, skb, spd, 0, sk, pipe)) 1692 return 1; 1693 } 1694 1695 return 0; 1696 } 1697 1698 /* 1699 * Map data from the skb to a pipe. Should handle both the linear part, 1700 * the fragments, and the frag list. It does NOT handle frag lists within 1701 * the frag list, if such a thing exists. We'd probably need to recurse to 1702 * handle that cleanly. 1703 */ 1704 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1705 struct pipe_inode_info *pipe, unsigned int tlen, 1706 unsigned int flags) 1707 { 1708 struct partial_page partial[PIPE_DEF_BUFFERS]; 1709 struct page *pages[PIPE_DEF_BUFFERS]; 1710 struct splice_pipe_desc spd = { 1711 .pages = pages, 1712 .partial = partial, 1713 .flags = flags, 1714 .ops = &sock_pipe_buf_ops, 1715 .spd_release = sock_spd_release, 1716 }; 1717 struct sk_buff *frag_iter; 1718 struct sock *sk = skb->sk; 1719 int ret = 0; 1720 1721 if (splice_grow_spd(pipe, &spd)) 1722 return -ENOMEM; 1723 1724 /* 1725 * __skb_splice_bits() only fails if the output has no room left, 1726 * so no point in going over the frag_list for the error case. 1727 */ 1728 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1729 goto done; 1730 else if (!tlen) 1731 goto done; 1732 1733 /* 1734 * now see if we have a frag_list to map 1735 */ 1736 skb_walk_frags(skb, frag_iter) { 1737 if (!tlen) 1738 break; 1739 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1740 break; 1741 } 1742 1743 done: 1744 if (spd.nr_pages) { 1745 /* 1746 * Drop the socket lock, otherwise we have reverse 1747 * locking dependencies between sk_lock and i_mutex 1748 * here as compared to sendfile(). We enter here 1749 * with the socket lock held, and splice_to_pipe() will 1750 * grab the pipe inode lock. For sendfile() emulation, 1751 * we call into ->sendpage() with the i_mutex lock held 1752 * and networking will grab the socket lock. 1753 */ 1754 release_sock(sk); 1755 ret = splice_to_pipe(pipe, &spd); 1756 lock_sock(sk); 1757 } 1758 1759 splice_shrink_spd(pipe, &spd); 1760 return ret; 1761 } 1762 1763 /** 1764 * skb_store_bits - store bits from kernel buffer to skb 1765 * @skb: destination buffer 1766 * @offset: offset in destination 1767 * @from: source buffer 1768 * @len: number of bytes to copy 1769 * 1770 * Copy the specified number of bytes from the source buffer to the 1771 * destination skb. This function handles all the messy bits of 1772 * traversing fragment lists and such. 1773 */ 1774 1775 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1776 { 1777 int start = skb_headlen(skb); 1778 struct sk_buff *frag_iter; 1779 int i, copy; 1780 1781 if (offset > (int)skb->len - len) 1782 goto fault; 1783 1784 if ((copy = start - offset) > 0) { 1785 if (copy > len) 1786 copy = len; 1787 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1788 if ((len -= copy) == 0) 1789 return 0; 1790 offset += copy; 1791 from += copy; 1792 } 1793 1794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1795 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1796 int end; 1797 1798 WARN_ON(start > offset + len); 1799 1800 end = start + skb_frag_size(frag); 1801 if ((copy = end - offset) > 0) { 1802 u8 *vaddr; 1803 1804 if (copy > len) 1805 copy = len; 1806 1807 vaddr = kmap_skb_frag(frag); 1808 memcpy(vaddr + frag->page_offset + offset - start, 1809 from, copy); 1810 kunmap_skb_frag(vaddr); 1811 1812 if ((len -= copy) == 0) 1813 return 0; 1814 offset += copy; 1815 from += copy; 1816 } 1817 start = end; 1818 } 1819 1820 skb_walk_frags(skb, frag_iter) { 1821 int end; 1822 1823 WARN_ON(start > offset + len); 1824 1825 end = start + frag_iter->len; 1826 if ((copy = end - offset) > 0) { 1827 if (copy > len) 1828 copy = len; 1829 if (skb_store_bits(frag_iter, offset - start, 1830 from, copy)) 1831 goto fault; 1832 if ((len -= copy) == 0) 1833 return 0; 1834 offset += copy; 1835 from += copy; 1836 } 1837 start = end; 1838 } 1839 if (!len) 1840 return 0; 1841 1842 fault: 1843 return -EFAULT; 1844 } 1845 EXPORT_SYMBOL(skb_store_bits); 1846 1847 /* Checksum skb data. */ 1848 1849 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1850 int len, __wsum csum) 1851 { 1852 int start = skb_headlen(skb); 1853 int i, copy = start - offset; 1854 struct sk_buff *frag_iter; 1855 int pos = 0; 1856 1857 /* Checksum header. */ 1858 if (copy > 0) { 1859 if (copy > len) 1860 copy = len; 1861 csum = csum_partial(skb->data + offset, copy, csum); 1862 if ((len -= copy) == 0) 1863 return csum; 1864 offset += copy; 1865 pos = copy; 1866 } 1867 1868 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1869 int end; 1870 1871 WARN_ON(start > offset + len); 1872 1873 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1874 if ((copy = end - offset) > 0) { 1875 __wsum csum2; 1876 u8 *vaddr; 1877 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1878 1879 if (copy > len) 1880 copy = len; 1881 vaddr = kmap_skb_frag(frag); 1882 csum2 = csum_partial(vaddr + frag->page_offset + 1883 offset - start, copy, 0); 1884 kunmap_skb_frag(vaddr); 1885 csum = csum_block_add(csum, csum2, pos); 1886 if (!(len -= copy)) 1887 return csum; 1888 offset += copy; 1889 pos += copy; 1890 } 1891 start = end; 1892 } 1893 1894 skb_walk_frags(skb, frag_iter) { 1895 int end; 1896 1897 WARN_ON(start > offset + len); 1898 1899 end = start + frag_iter->len; 1900 if ((copy = end - offset) > 0) { 1901 __wsum csum2; 1902 if (copy > len) 1903 copy = len; 1904 csum2 = skb_checksum(frag_iter, offset - start, 1905 copy, 0); 1906 csum = csum_block_add(csum, csum2, pos); 1907 if ((len -= copy) == 0) 1908 return csum; 1909 offset += copy; 1910 pos += copy; 1911 } 1912 start = end; 1913 } 1914 BUG_ON(len); 1915 1916 return csum; 1917 } 1918 EXPORT_SYMBOL(skb_checksum); 1919 1920 /* Both of above in one bottle. */ 1921 1922 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1923 u8 *to, int len, __wsum csum) 1924 { 1925 int start = skb_headlen(skb); 1926 int i, copy = start - offset; 1927 struct sk_buff *frag_iter; 1928 int pos = 0; 1929 1930 /* Copy header. */ 1931 if (copy > 0) { 1932 if (copy > len) 1933 copy = len; 1934 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1935 copy, csum); 1936 if ((len -= copy) == 0) 1937 return csum; 1938 offset += copy; 1939 to += copy; 1940 pos = copy; 1941 } 1942 1943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1944 int end; 1945 1946 WARN_ON(start > offset + len); 1947 1948 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1949 if ((copy = end - offset) > 0) { 1950 __wsum csum2; 1951 u8 *vaddr; 1952 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1953 1954 if (copy > len) 1955 copy = len; 1956 vaddr = kmap_skb_frag(frag); 1957 csum2 = csum_partial_copy_nocheck(vaddr + 1958 frag->page_offset + 1959 offset - start, to, 1960 copy, 0); 1961 kunmap_skb_frag(vaddr); 1962 csum = csum_block_add(csum, csum2, pos); 1963 if (!(len -= copy)) 1964 return csum; 1965 offset += copy; 1966 to += copy; 1967 pos += copy; 1968 } 1969 start = end; 1970 } 1971 1972 skb_walk_frags(skb, frag_iter) { 1973 __wsum csum2; 1974 int end; 1975 1976 WARN_ON(start > offset + len); 1977 1978 end = start + frag_iter->len; 1979 if ((copy = end - offset) > 0) { 1980 if (copy > len) 1981 copy = len; 1982 csum2 = skb_copy_and_csum_bits(frag_iter, 1983 offset - start, 1984 to, copy, 0); 1985 csum = csum_block_add(csum, csum2, pos); 1986 if ((len -= copy) == 0) 1987 return csum; 1988 offset += copy; 1989 to += copy; 1990 pos += copy; 1991 } 1992 start = end; 1993 } 1994 BUG_ON(len); 1995 return csum; 1996 } 1997 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1998 1999 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2000 { 2001 __wsum csum; 2002 long csstart; 2003 2004 if (skb->ip_summed == CHECKSUM_PARTIAL) 2005 csstart = skb_checksum_start_offset(skb); 2006 else 2007 csstart = skb_headlen(skb); 2008 2009 BUG_ON(csstart > skb_headlen(skb)); 2010 2011 skb_copy_from_linear_data(skb, to, csstart); 2012 2013 csum = 0; 2014 if (csstart != skb->len) 2015 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2016 skb->len - csstart, 0); 2017 2018 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2019 long csstuff = csstart + skb->csum_offset; 2020 2021 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2022 } 2023 } 2024 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2025 2026 /** 2027 * skb_dequeue - remove from the head of the queue 2028 * @list: list to dequeue from 2029 * 2030 * Remove the head of the list. The list lock is taken so the function 2031 * may be used safely with other locking list functions. The head item is 2032 * returned or %NULL if the list is empty. 2033 */ 2034 2035 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2036 { 2037 unsigned long flags; 2038 struct sk_buff *result; 2039 2040 spin_lock_irqsave(&list->lock, flags); 2041 result = __skb_dequeue(list); 2042 spin_unlock_irqrestore(&list->lock, flags); 2043 return result; 2044 } 2045 EXPORT_SYMBOL(skb_dequeue); 2046 2047 /** 2048 * skb_dequeue_tail - remove from the tail of the queue 2049 * @list: list to dequeue from 2050 * 2051 * Remove the tail of the list. The list lock is taken so the function 2052 * may be used safely with other locking list functions. The tail item is 2053 * returned or %NULL if the list is empty. 2054 */ 2055 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2056 { 2057 unsigned long flags; 2058 struct sk_buff *result; 2059 2060 spin_lock_irqsave(&list->lock, flags); 2061 result = __skb_dequeue_tail(list); 2062 spin_unlock_irqrestore(&list->lock, flags); 2063 return result; 2064 } 2065 EXPORT_SYMBOL(skb_dequeue_tail); 2066 2067 /** 2068 * skb_queue_purge - empty a list 2069 * @list: list to empty 2070 * 2071 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2072 * the list and one reference dropped. This function takes the list 2073 * lock and is atomic with respect to other list locking functions. 2074 */ 2075 void skb_queue_purge(struct sk_buff_head *list) 2076 { 2077 struct sk_buff *skb; 2078 while ((skb = skb_dequeue(list)) != NULL) 2079 kfree_skb(skb); 2080 } 2081 EXPORT_SYMBOL(skb_queue_purge); 2082 2083 /** 2084 * skb_queue_head - queue a buffer at the list head 2085 * @list: list to use 2086 * @newsk: buffer to queue 2087 * 2088 * Queue a buffer at the start of the list. This function takes the 2089 * list lock and can be used safely with other locking &sk_buff functions 2090 * safely. 2091 * 2092 * A buffer cannot be placed on two lists at the same time. 2093 */ 2094 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2095 { 2096 unsigned long flags; 2097 2098 spin_lock_irqsave(&list->lock, flags); 2099 __skb_queue_head(list, newsk); 2100 spin_unlock_irqrestore(&list->lock, flags); 2101 } 2102 EXPORT_SYMBOL(skb_queue_head); 2103 2104 /** 2105 * skb_queue_tail - queue a buffer at the list tail 2106 * @list: list to use 2107 * @newsk: buffer to queue 2108 * 2109 * Queue a buffer at the tail of the list. This function takes the 2110 * list lock and can be used safely with other locking &sk_buff functions 2111 * safely. 2112 * 2113 * A buffer cannot be placed on two lists at the same time. 2114 */ 2115 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2116 { 2117 unsigned long flags; 2118 2119 spin_lock_irqsave(&list->lock, flags); 2120 __skb_queue_tail(list, newsk); 2121 spin_unlock_irqrestore(&list->lock, flags); 2122 } 2123 EXPORT_SYMBOL(skb_queue_tail); 2124 2125 /** 2126 * skb_unlink - remove a buffer from a list 2127 * @skb: buffer to remove 2128 * @list: list to use 2129 * 2130 * Remove a packet from a list. The list locks are taken and this 2131 * function is atomic with respect to other list locked calls 2132 * 2133 * You must know what list the SKB is on. 2134 */ 2135 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2136 { 2137 unsigned long flags; 2138 2139 spin_lock_irqsave(&list->lock, flags); 2140 __skb_unlink(skb, list); 2141 spin_unlock_irqrestore(&list->lock, flags); 2142 } 2143 EXPORT_SYMBOL(skb_unlink); 2144 2145 /** 2146 * skb_append - append a buffer 2147 * @old: buffer to insert after 2148 * @newsk: buffer to insert 2149 * @list: list to use 2150 * 2151 * Place a packet after a given packet in a list. The list locks are taken 2152 * and this function is atomic with respect to other list locked calls. 2153 * A buffer cannot be placed on two lists at the same time. 2154 */ 2155 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2156 { 2157 unsigned long flags; 2158 2159 spin_lock_irqsave(&list->lock, flags); 2160 __skb_queue_after(list, old, newsk); 2161 spin_unlock_irqrestore(&list->lock, flags); 2162 } 2163 EXPORT_SYMBOL(skb_append); 2164 2165 /** 2166 * skb_insert - insert a buffer 2167 * @old: buffer to insert before 2168 * @newsk: buffer to insert 2169 * @list: list to use 2170 * 2171 * Place a packet before a given packet in a list. The list locks are 2172 * taken and this function is atomic with respect to other list locked 2173 * calls. 2174 * 2175 * A buffer cannot be placed on two lists at the same time. 2176 */ 2177 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2178 { 2179 unsigned long flags; 2180 2181 spin_lock_irqsave(&list->lock, flags); 2182 __skb_insert(newsk, old->prev, old, list); 2183 spin_unlock_irqrestore(&list->lock, flags); 2184 } 2185 EXPORT_SYMBOL(skb_insert); 2186 2187 static inline void skb_split_inside_header(struct sk_buff *skb, 2188 struct sk_buff* skb1, 2189 const u32 len, const int pos) 2190 { 2191 int i; 2192 2193 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2194 pos - len); 2195 /* And move data appendix as is. */ 2196 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2197 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2198 2199 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2200 skb_shinfo(skb)->nr_frags = 0; 2201 skb1->data_len = skb->data_len; 2202 skb1->len += skb1->data_len; 2203 skb->data_len = 0; 2204 skb->len = len; 2205 skb_set_tail_pointer(skb, len); 2206 } 2207 2208 static inline void skb_split_no_header(struct sk_buff *skb, 2209 struct sk_buff* skb1, 2210 const u32 len, int pos) 2211 { 2212 int i, k = 0; 2213 const int nfrags = skb_shinfo(skb)->nr_frags; 2214 2215 skb_shinfo(skb)->nr_frags = 0; 2216 skb1->len = skb1->data_len = skb->len - len; 2217 skb->len = len; 2218 skb->data_len = len - pos; 2219 2220 for (i = 0; i < nfrags; i++) { 2221 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2222 2223 if (pos + size > len) { 2224 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2225 2226 if (pos < len) { 2227 /* Split frag. 2228 * We have two variants in this case: 2229 * 1. Move all the frag to the second 2230 * part, if it is possible. F.e. 2231 * this approach is mandatory for TUX, 2232 * where splitting is expensive. 2233 * 2. Split is accurately. We make this. 2234 */ 2235 skb_frag_ref(skb, i); 2236 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2237 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2238 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2239 skb_shinfo(skb)->nr_frags++; 2240 } 2241 k++; 2242 } else 2243 skb_shinfo(skb)->nr_frags++; 2244 pos += size; 2245 } 2246 skb_shinfo(skb1)->nr_frags = k; 2247 } 2248 2249 /** 2250 * skb_split - Split fragmented skb to two parts at length len. 2251 * @skb: the buffer to split 2252 * @skb1: the buffer to receive the second part 2253 * @len: new length for skb 2254 */ 2255 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2256 { 2257 int pos = skb_headlen(skb); 2258 2259 if (len < pos) /* Split line is inside header. */ 2260 skb_split_inside_header(skb, skb1, len, pos); 2261 else /* Second chunk has no header, nothing to copy. */ 2262 skb_split_no_header(skb, skb1, len, pos); 2263 } 2264 EXPORT_SYMBOL(skb_split); 2265 2266 /* Shifting from/to a cloned skb is a no-go. 2267 * 2268 * Caller cannot keep skb_shinfo related pointers past calling here! 2269 */ 2270 static int skb_prepare_for_shift(struct sk_buff *skb) 2271 { 2272 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2273 } 2274 2275 /** 2276 * skb_shift - Shifts paged data partially from skb to another 2277 * @tgt: buffer into which tail data gets added 2278 * @skb: buffer from which the paged data comes from 2279 * @shiftlen: shift up to this many bytes 2280 * 2281 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2282 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2283 * It's up to caller to free skb if everything was shifted. 2284 * 2285 * If @tgt runs out of frags, the whole operation is aborted. 2286 * 2287 * Skb cannot include anything else but paged data while tgt is allowed 2288 * to have non-paged data as well. 2289 * 2290 * TODO: full sized shift could be optimized but that would need 2291 * specialized skb free'er to handle frags without up-to-date nr_frags. 2292 */ 2293 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2294 { 2295 int from, to, merge, todo; 2296 struct skb_frag_struct *fragfrom, *fragto; 2297 2298 BUG_ON(shiftlen > skb->len); 2299 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2300 2301 todo = shiftlen; 2302 from = 0; 2303 to = skb_shinfo(tgt)->nr_frags; 2304 fragfrom = &skb_shinfo(skb)->frags[from]; 2305 2306 /* Actual merge is delayed until the point when we know we can 2307 * commit all, so that we don't have to undo partial changes 2308 */ 2309 if (!to || 2310 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2311 fragfrom->page_offset)) { 2312 merge = -1; 2313 } else { 2314 merge = to - 1; 2315 2316 todo -= skb_frag_size(fragfrom); 2317 if (todo < 0) { 2318 if (skb_prepare_for_shift(skb) || 2319 skb_prepare_for_shift(tgt)) 2320 return 0; 2321 2322 /* All previous frag pointers might be stale! */ 2323 fragfrom = &skb_shinfo(skb)->frags[from]; 2324 fragto = &skb_shinfo(tgt)->frags[merge]; 2325 2326 skb_frag_size_add(fragto, shiftlen); 2327 skb_frag_size_sub(fragfrom, shiftlen); 2328 fragfrom->page_offset += shiftlen; 2329 2330 goto onlymerged; 2331 } 2332 2333 from++; 2334 } 2335 2336 /* Skip full, not-fitting skb to avoid expensive operations */ 2337 if ((shiftlen == skb->len) && 2338 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2339 return 0; 2340 2341 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2342 return 0; 2343 2344 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2345 if (to == MAX_SKB_FRAGS) 2346 return 0; 2347 2348 fragfrom = &skb_shinfo(skb)->frags[from]; 2349 fragto = &skb_shinfo(tgt)->frags[to]; 2350 2351 if (todo >= skb_frag_size(fragfrom)) { 2352 *fragto = *fragfrom; 2353 todo -= skb_frag_size(fragfrom); 2354 from++; 2355 to++; 2356 2357 } else { 2358 __skb_frag_ref(fragfrom); 2359 fragto->page = fragfrom->page; 2360 fragto->page_offset = fragfrom->page_offset; 2361 skb_frag_size_set(fragto, todo); 2362 2363 fragfrom->page_offset += todo; 2364 skb_frag_size_sub(fragfrom, todo); 2365 todo = 0; 2366 2367 to++; 2368 break; 2369 } 2370 } 2371 2372 /* Ready to "commit" this state change to tgt */ 2373 skb_shinfo(tgt)->nr_frags = to; 2374 2375 if (merge >= 0) { 2376 fragfrom = &skb_shinfo(skb)->frags[0]; 2377 fragto = &skb_shinfo(tgt)->frags[merge]; 2378 2379 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2380 __skb_frag_unref(fragfrom); 2381 } 2382 2383 /* Reposition in the original skb */ 2384 to = 0; 2385 while (from < skb_shinfo(skb)->nr_frags) 2386 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2387 skb_shinfo(skb)->nr_frags = to; 2388 2389 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2390 2391 onlymerged: 2392 /* Most likely the tgt won't ever need its checksum anymore, skb on 2393 * the other hand might need it if it needs to be resent 2394 */ 2395 tgt->ip_summed = CHECKSUM_PARTIAL; 2396 skb->ip_summed = CHECKSUM_PARTIAL; 2397 2398 /* Yak, is it really working this way? Some helper please? */ 2399 skb->len -= shiftlen; 2400 skb->data_len -= shiftlen; 2401 skb->truesize -= shiftlen; 2402 tgt->len += shiftlen; 2403 tgt->data_len += shiftlen; 2404 tgt->truesize += shiftlen; 2405 2406 return shiftlen; 2407 } 2408 2409 /** 2410 * skb_prepare_seq_read - Prepare a sequential read of skb data 2411 * @skb: the buffer to read 2412 * @from: lower offset of data to be read 2413 * @to: upper offset of data to be read 2414 * @st: state variable 2415 * 2416 * Initializes the specified state variable. Must be called before 2417 * invoking skb_seq_read() for the first time. 2418 */ 2419 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2420 unsigned int to, struct skb_seq_state *st) 2421 { 2422 st->lower_offset = from; 2423 st->upper_offset = to; 2424 st->root_skb = st->cur_skb = skb; 2425 st->frag_idx = st->stepped_offset = 0; 2426 st->frag_data = NULL; 2427 } 2428 EXPORT_SYMBOL(skb_prepare_seq_read); 2429 2430 /** 2431 * skb_seq_read - Sequentially read skb data 2432 * @consumed: number of bytes consumed by the caller so far 2433 * @data: destination pointer for data to be returned 2434 * @st: state variable 2435 * 2436 * Reads a block of skb data at &consumed relative to the 2437 * lower offset specified to skb_prepare_seq_read(). Assigns 2438 * the head of the data block to &data and returns the length 2439 * of the block or 0 if the end of the skb data or the upper 2440 * offset has been reached. 2441 * 2442 * The caller is not required to consume all of the data 2443 * returned, i.e. &consumed is typically set to the number 2444 * of bytes already consumed and the next call to 2445 * skb_seq_read() will return the remaining part of the block. 2446 * 2447 * Note 1: The size of each block of data returned can be arbitrary, 2448 * this limitation is the cost for zerocopy seqeuental 2449 * reads of potentially non linear data. 2450 * 2451 * Note 2: Fragment lists within fragments are not implemented 2452 * at the moment, state->root_skb could be replaced with 2453 * a stack for this purpose. 2454 */ 2455 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2456 struct skb_seq_state *st) 2457 { 2458 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2459 skb_frag_t *frag; 2460 2461 if (unlikely(abs_offset >= st->upper_offset)) 2462 return 0; 2463 2464 next_skb: 2465 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2466 2467 if (abs_offset < block_limit && !st->frag_data) { 2468 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2469 return block_limit - abs_offset; 2470 } 2471 2472 if (st->frag_idx == 0 && !st->frag_data) 2473 st->stepped_offset += skb_headlen(st->cur_skb); 2474 2475 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2476 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2477 block_limit = skb_frag_size(frag) + st->stepped_offset; 2478 2479 if (abs_offset < block_limit) { 2480 if (!st->frag_data) 2481 st->frag_data = kmap_skb_frag(frag); 2482 2483 *data = (u8 *) st->frag_data + frag->page_offset + 2484 (abs_offset - st->stepped_offset); 2485 2486 return block_limit - abs_offset; 2487 } 2488 2489 if (st->frag_data) { 2490 kunmap_skb_frag(st->frag_data); 2491 st->frag_data = NULL; 2492 } 2493 2494 st->frag_idx++; 2495 st->stepped_offset += skb_frag_size(frag); 2496 } 2497 2498 if (st->frag_data) { 2499 kunmap_skb_frag(st->frag_data); 2500 st->frag_data = NULL; 2501 } 2502 2503 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2504 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2505 st->frag_idx = 0; 2506 goto next_skb; 2507 } else if (st->cur_skb->next) { 2508 st->cur_skb = st->cur_skb->next; 2509 st->frag_idx = 0; 2510 goto next_skb; 2511 } 2512 2513 return 0; 2514 } 2515 EXPORT_SYMBOL(skb_seq_read); 2516 2517 /** 2518 * skb_abort_seq_read - Abort a sequential read of skb data 2519 * @st: state variable 2520 * 2521 * Must be called if skb_seq_read() was not called until it 2522 * returned 0. 2523 */ 2524 void skb_abort_seq_read(struct skb_seq_state *st) 2525 { 2526 if (st->frag_data) 2527 kunmap_skb_frag(st->frag_data); 2528 } 2529 EXPORT_SYMBOL(skb_abort_seq_read); 2530 2531 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2532 2533 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2534 struct ts_config *conf, 2535 struct ts_state *state) 2536 { 2537 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2538 } 2539 2540 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2541 { 2542 skb_abort_seq_read(TS_SKB_CB(state)); 2543 } 2544 2545 /** 2546 * skb_find_text - Find a text pattern in skb data 2547 * @skb: the buffer to look in 2548 * @from: search offset 2549 * @to: search limit 2550 * @config: textsearch configuration 2551 * @state: uninitialized textsearch state variable 2552 * 2553 * Finds a pattern in the skb data according to the specified 2554 * textsearch configuration. Use textsearch_next() to retrieve 2555 * subsequent occurrences of the pattern. Returns the offset 2556 * to the first occurrence or UINT_MAX if no match was found. 2557 */ 2558 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2559 unsigned int to, struct ts_config *config, 2560 struct ts_state *state) 2561 { 2562 unsigned int ret; 2563 2564 config->get_next_block = skb_ts_get_next_block; 2565 config->finish = skb_ts_finish; 2566 2567 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2568 2569 ret = textsearch_find(config, state); 2570 return (ret <= to - from ? ret : UINT_MAX); 2571 } 2572 EXPORT_SYMBOL(skb_find_text); 2573 2574 /** 2575 * skb_append_datato_frags: - append the user data to a skb 2576 * @sk: sock structure 2577 * @skb: skb structure to be appened with user data. 2578 * @getfrag: call back function to be used for getting the user data 2579 * @from: pointer to user message iov 2580 * @length: length of the iov message 2581 * 2582 * Description: This procedure append the user data in the fragment part 2583 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2584 */ 2585 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2586 int (*getfrag)(void *from, char *to, int offset, 2587 int len, int odd, struct sk_buff *skb), 2588 void *from, int length) 2589 { 2590 int frg_cnt = 0; 2591 skb_frag_t *frag = NULL; 2592 struct page *page = NULL; 2593 int copy, left; 2594 int offset = 0; 2595 int ret; 2596 2597 do { 2598 /* Return error if we don't have space for new frag */ 2599 frg_cnt = skb_shinfo(skb)->nr_frags; 2600 if (frg_cnt >= MAX_SKB_FRAGS) 2601 return -EFAULT; 2602 2603 /* allocate a new page for next frag */ 2604 page = alloc_pages(sk->sk_allocation, 0); 2605 2606 /* If alloc_page fails just return failure and caller will 2607 * free previous allocated pages by doing kfree_skb() 2608 */ 2609 if (page == NULL) 2610 return -ENOMEM; 2611 2612 /* initialize the next frag */ 2613 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2614 skb->truesize += PAGE_SIZE; 2615 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2616 2617 /* get the new initialized frag */ 2618 frg_cnt = skb_shinfo(skb)->nr_frags; 2619 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2620 2621 /* copy the user data to page */ 2622 left = PAGE_SIZE - frag->page_offset; 2623 copy = (length > left)? left : length; 2624 2625 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2626 offset, copy, 0, skb); 2627 if (ret < 0) 2628 return -EFAULT; 2629 2630 /* copy was successful so update the size parameters */ 2631 skb_frag_size_add(frag, copy); 2632 skb->len += copy; 2633 skb->data_len += copy; 2634 offset += copy; 2635 length -= copy; 2636 2637 } while (length > 0); 2638 2639 return 0; 2640 } 2641 EXPORT_SYMBOL(skb_append_datato_frags); 2642 2643 /** 2644 * skb_pull_rcsum - pull skb and update receive checksum 2645 * @skb: buffer to update 2646 * @len: length of data pulled 2647 * 2648 * This function performs an skb_pull on the packet and updates 2649 * the CHECKSUM_COMPLETE checksum. It should be used on 2650 * receive path processing instead of skb_pull unless you know 2651 * that the checksum difference is zero (e.g., a valid IP header) 2652 * or you are setting ip_summed to CHECKSUM_NONE. 2653 */ 2654 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2655 { 2656 BUG_ON(len > skb->len); 2657 skb->len -= len; 2658 BUG_ON(skb->len < skb->data_len); 2659 skb_postpull_rcsum(skb, skb->data, len); 2660 return skb->data += len; 2661 } 2662 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2663 2664 /** 2665 * skb_segment - Perform protocol segmentation on skb. 2666 * @skb: buffer to segment 2667 * @features: features for the output path (see dev->features) 2668 * 2669 * This function performs segmentation on the given skb. It returns 2670 * a pointer to the first in a list of new skbs for the segments. 2671 * In case of error it returns ERR_PTR(err). 2672 */ 2673 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2674 { 2675 struct sk_buff *segs = NULL; 2676 struct sk_buff *tail = NULL; 2677 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2678 unsigned int mss = skb_shinfo(skb)->gso_size; 2679 unsigned int doffset = skb->data - skb_mac_header(skb); 2680 unsigned int offset = doffset; 2681 unsigned int headroom; 2682 unsigned int len; 2683 int sg = !!(features & NETIF_F_SG); 2684 int nfrags = skb_shinfo(skb)->nr_frags; 2685 int err = -ENOMEM; 2686 int i = 0; 2687 int pos; 2688 2689 __skb_push(skb, doffset); 2690 headroom = skb_headroom(skb); 2691 pos = skb_headlen(skb); 2692 2693 do { 2694 struct sk_buff *nskb; 2695 skb_frag_t *frag; 2696 int hsize; 2697 int size; 2698 2699 len = skb->len - offset; 2700 if (len > mss) 2701 len = mss; 2702 2703 hsize = skb_headlen(skb) - offset; 2704 if (hsize < 0) 2705 hsize = 0; 2706 if (hsize > len || !sg) 2707 hsize = len; 2708 2709 if (!hsize && i >= nfrags) { 2710 BUG_ON(fskb->len != len); 2711 2712 pos += len; 2713 nskb = skb_clone(fskb, GFP_ATOMIC); 2714 fskb = fskb->next; 2715 2716 if (unlikely(!nskb)) 2717 goto err; 2718 2719 hsize = skb_end_pointer(nskb) - nskb->head; 2720 if (skb_cow_head(nskb, doffset + headroom)) { 2721 kfree_skb(nskb); 2722 goto err; 2723 } 2724 2725 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2726 hsize; 2727 skb_release_head_state(nskb); 2728 __skb_push(nskb, doffset); 2729 } else { 2730 nskb = alloc_skb(hsize + doffset + headroom, 2731 GFP_ATOMIC); 2732 2733 if (unlikely(!nskb)) 2734 goto err; 2735 2736 skb_reserve(nskb, headroom); 2737 __skb_put(nskb, doffset); 2738 } 2739 2740 if (segs) 2741 tail->next = nskb; 2742 else 2743 segs = nskb; 2744 tail = nskb; 2745 2746 __copy_skb_header(nskb, skb); 2747 nskb->mac_len = skb->mac_len; 2748 2749 /* nskb and skb might have different headroom */ 2750 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2751 nskb->csum_start += skb_headroom(nskb) - headroom; 2752 2753 skb_reset_mac_header(nskb); 2754 skb_set_network_header(nskb, skb->mac_len); 2755 nskb->transport_header = (nskb->network_header + 2756 skb_network_header_len(skb)); 2757 skb_copy_from_linear_data(skb, nskb->data, doffset); 2758 2759 if (fskb != skb_shinfo(skb)->frag_list) 2760 continue; 2761 2762 if (!sg) { 2763 nskb->ip_summed = CHECKSUM_NONE; 2764 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2765 skb_put(nskb, len), 2766 len, 0); 2767 continue; 2768 } 2769 2770 frag = skb_shinfo(nskb)->frags; 2771 2772 skb_copy_from_linear_data_offset(skb, offset, 2773 skb_put(nskb, hsize), hsize); 2774 2775 while (pos < offset + len && i < nfrags) { 2776 *frag = skb_shinfo(skb)->frags[i]; 2777 __skb_frag_ref(frag); 2778 size = skb_frag_size(frag); 2779 2780 if (pos < offset) { 2781 frag->page_offset += offset - pos; 2782 skb_frag_size_sub(frag, offset - pos); 2783 } 2784 2785 skb_shinfo(nskb)->nr_frags++; 2786 2787 if (pos + size <= offset + len) { 2788 i++; 2789 pos += size; 2790 } else { 2791 skb_frag_size_sub(frag, pos + size - (offset + len)); 2792 goto skip_fraglist; 2793 } 2794 2795 frag++; 2796 } 2797 2798 if (pos < offset + len) { 2799 struct sk_buff *fskb2 = fskb; 2800 2801 BUG_ON(pos + fskb->len != offset + len); 2802 2803 pos += fskb->len; 2804 fskb = fskb->next; 2805 2806 if (fskb2->next) { 2807 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2808 if (!fskb2) 2809 goto err; 2810 } else 2811 skb_get(fskb2); 2812 2813 SKB_FRAG_ASSERT(nskb); 2814 skb_shinfo(nskb)->frag_list = fskb2; 2815 } 2816 2817 skip_fraglist: 2818 nskb->data_len = len - hsize; 2819 nskb->len += nskb->data_len; 2820 nskb->truesize += nskb->data_len; 2821 } while ((offset += len) < skb->len); 2822 2823 return segs; 2824 2825 err: 2826 while ((skb = segs)) { 2827 segs = skb->next; 2828 kfree_skb(skb); 2829 } 2830 return ERR_PTR(err); 2831 } 2832 EXPORT_SYMBOL_GPL(skb_segment); 2833 2834 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2835 { 2836 struct sk_buff *p = *head; 2837 struct sk_buff *nskb; 2838 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2839 struct skb_shared_info *pinfo = skb_shinfo(p); 2840 unsigned int headroom; 2841 unsigned int len = skb_gro_len(skb); 2842 unsigned int offset = skb_gro_offset(skb); 2843 unsigned int headlen = skb_headlen(skb); 2844 2845 if (p->len + len >= 65536) 2846 return -E2BIG; 2847 2848 if (pinfo->frag_list) 2849 goto merge; 2850 else if (headlen <= offset) { 2851 skb_frag_t *frag; 2852 skb_frag_t *frag2; 2853 int i = skbinfo->nr_frags; 2854 int nr_frags = pinfo->nr_frags + i; 2855 2856 offset -= headlen; 2857 2858 if (nr_frags > MAX_SKB_FRAGS) 2859 return -E2BIG; 2860 2861 pinfo->nr_frags = nr_frags; 2862 skbinfo->nr_frags = 0; 2863 2864 frag = pinfo->frags + nr_frags; 2865 frag2 = skbinfo->frags + i; 2866 do { 2867 *--frag = *--frag2; 2868 } while (--i); 2869 2870 frag->page_offset += offset; 2871 skb_frag_size_sub(frag, offset); 2872 2873 skb->truesize -= skb->data_len; 2874 skb->len -= skb->data_len; 2875 skb->data_len = 0; 2876 2877 NAPI_GRO_CB(skb)->free = 1; 2878 goto done; 2879 } else if (skb_gro_len(p) != pinfo->gso_size) 2880 return -E2BIG; 2881 2882 headroom = skb_headroom(p); 2883 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2884 if (unlikely(!nskb)) 2885 return -ENOMEM; 2886 2887 __copy_skb_header(nskb, p); 2888 nskb->mac_len = p->mac_len; 2889 2890 skb_reserve(nskb, headroom); 2891 __skb_put(nskb, skb_gro_offset(p)); 2892 2893 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2894 skb_set_network_header(nskb, skb_network_offset(p)); 2895 skb_set_transport_header(nskb, skb_transport_offset(p)); 2896 2897 __skb_pull(p, skb_gro_offset(p)); 2898 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2899 p->data - skb_mac_header(p)); 2900 2901 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2902 skb_shinfo(nskb)->frag_list = p; 2903 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2904 pinfo->gso_size = 0; 2905 skb_header_release(p); 2906 nskb->prev = p; 2907 2908 nskb->data_len += p->len; 2909 nskb->truesize += p->truesize; 2910 nskb->len += p->len; 2911 2912 *head = nskb; 2913 nskb->next = p->next; 2914 p->next = NULL; 2915 2916 p = nskb; 2917 2918 merge: 2919 p->truesize += skb->truesize - len; 2920 if (offset > headlen) { 2921 unsigned int eat = offset - headlen; 2922 2923 skbinfo->frags[0].page_offset += eat; 2924 skb_frag_size_sub(&skbinfo->frags[0], eat); 2925 skb->data_len -= eat; 2926 skb->len -= eat; 2927 offset = headlen; 2928 } 2929 2930 __skb_pull(skb, offset); 2931 2932 p->prev->next = skb; 2933 p->prev = skb; 2934 skb_header_release(skb); 2935 2936 done: 2937 NAPI_GRO_CB(p)->count++; 2938 p->data_len += len; 2939 p->truesize += len; 2940 p->len += len; 2941 2942 NAPI_GRO_CB(skb)->same_flow = 1; 2943 return 0; 2944 } 2945 EXPORT_SYMBOL_GPL(skb_gro_receive); 2946 2947 void __init skb_init(void) 2948 { 2949 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2950 sizeof(struct sk_buff), 2951 0, 2952 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2953 NULL); 2954 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2955 (2*sizeof(struct sk_buff)) + 2956 sizeof(atomic_t), 2957 0, 2958 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2959 NULL); 2960 } 2961 2962 /** 2963 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2964 * @skb: Socket buffer containing the buffers to be mapped 2965 * @sg: The scatter-gather list to map into 2966 * @offset: The offset into the buffer's contents to start mapping 2967 * @len: Length of buffer space to be mapped 2968 * 2969 * Fill the specified scatter-gather list with mappings/pointers into a 2970 * region of the buffer space attached to a socket buffer. 2971 */ 2972 static int 2973 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2974 { 2975 int start = skb_headlen(skb); 2976 int i, copy = start - offset; 2977 struct sk_buff *frag_iter; 2978 int elt = 0; 2979 2980 if (copy > 0) { 2981 if (copy > len) 2982 copy = len; 2983 sg_set_buf(sg, skb->data + offset, copy); 2984 elt++; 2985 if ((len -= copy) == 0) 2986 return elt; 2987 offset += copy; 2988 } 2989 2990 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2991 int end; 2992 2993 WARN_ON(start > offset + len); 2994 2995 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2996 if ((copy = end - offset) > 0) { 2997 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2998 2999 if (copy > len) 3000 copy = len; 3001 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3002 frag->page_offset+offset-start); 3003 elt++; 3004 if (!(len -= copy)) 3005 return elt; 3006 offset += copy; 3007 } 3008 start = end; 3009 } 3010 3011 skb_walk_frags(skb, frag_iter) { 3012 int end; 3013 3014 WARN_ON(start > offset + len); 3015 3016 end = start + frag_iter->len; 3017 if ((copy = end - offset) > 0) { 3018 if (copy > len) 3019 copy = len; 3020 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3021 copy); 3022 if ((len -= copy) == 0) 3023 return elt; 3024 offset += copy; 3025 } 3026 start = end; 3027 } 3028 BUG_ON(len); 3029 return elt; 3030 } 3031 3032 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3033 { 3034 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3035 3036 sg_mark_end(&sg[nsg - 1]); 3037 3038 return nsg; 3039 } 3040 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3041 3042 /** 3043 * skb_cow_data - Check that a socket buffer's data buffers are writable 3044 * @skb: The socket buffer to check. 3045 * @tailbits: Amount of trailing space to be added 3046 * @trailer: Returned pointer to the skb where the @tailbits space begins 3047 * 3048 * Make sure that the data buffers attached to a socket buffer are 3049 * writable. If they are not, private copies are made of the data buffers 3050 * and the socket buffer is set to use these instead. 3051 * 3052 * If @tailbits is given, make sure that there is space to write @tailbits 3053 * bytes of data beyond current end of socket buffer. @trailer will be 3054 * set to point to the skb in which this space begins. 3055 * 3056 * The number of scatterlist elements required to completely map the 3057 * COW'd and extended socket buffer will be returned. 3058 */ 3059 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3060 { 3061 int copyflag; 3062 int elt; 3063 struct sk_buff *skb1, **skb_p; 3064 3065 /* If skb is cloned or its head is paged, reallocate 3066 * head pulling out all the pages (pages are considered not writable 3067 * at the moment even if they are anonymous). 3068 */ 3069 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3070 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3071 return -ENOMEM; 3072 3073 /* Easy case. Most of packets will go this way. */ 3074 if (!skb_has_frag_list(skb)) { 3075 /* A little of trouble, not enough of space for trailer. 3076 * This should not happen, when stack is tuned to generate 3077 * good frames. OK, on miss we reallocate and reserve even more 3078 * space, 128 bytes is fair. */ 3079 3080 if (skb_tailroom(skb) < tailbits && 3081 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3082 return -ENOMEM; 3083 3084 /* Voila! */ 3085 *trailer = skb; 3086 return 1; 3087 } 3088 3089 /* Misery. We are in troubles, going to mincer fragments... */ 3090 3091 elt = 1; 3092 skb_p = &skb_shinfo(skb)->frag_list; 3093 copyflag = 0; 3094 3095 while ((skb1 = *skb_p) != NULL) { 3096 int ntail = 0; 3097 3098 /* The fragment is partially pulled by someone, 3099 * this can happen on input. Copy it and everything 3100 * after it. */ 3101 3102 if (skb_shared(skb1)) 3103 copyflag = 1; 3104 3105 /* If the skb is the last, worry about trailer. */ 3106 3107 if (skb1->next == NULL && tailbits) { 3108 if (skb_shinfo(skb1)->nr_frags || 3109 skb_has_frag_list(skb1) || 3110 skb_tailroom(skb1) < tailbits) 3111 ntail = tailbits + 128; 3112 } 3113 3114 if (copyflag || 3115 skb_cloned(skb1) || 3116 ntail || 3117 skb_shinfo(skb1)->nr_frags || 3118 skb_has_frag_list(skb1)) { 3119 struct sk_buff *skb2; 3120 3121 /* Fuck, we are miserable poor guys... */ 3122 if (ntail == 0) 3123 skb2 = skb_copy(skb1, GFP_ATOMIC); 3124 else 3125 skb2 = skb_copy_expand(skb1, 3126 skb_headroom(skb1), 3127 ntail, 3128 GFP_ATOMIC); 3129 if (unlikely(skb2 == NULL)) 3130 return -ENOMEM; 3131 3132 if (skb1->sk) 3133 skb_set_owner_w(skb2, skb1->sk); 3134 3135 /* Looking around. Are we still alive? 3136 * OK, link new skb, drop old one */ 3137 3138 skb2->next = skb1->next; 3139 *skb_p = skb2; 3140 kfree_skb(skb1); 3141 skb1 = skb2; 3142 } 3143 elt++; 3144 *trailer = skb1; 3145 skb_p = &skb1->next; 3146 } 3147 3148 return elt; 3149 } 3150 EXPORT_SYMBOL_GPL(skb_cow_data); 3151 3152 static void sock_rmem_free(struct sk_buff *skb) 3153 { 3154 struct sock *sk = skb->sk; 3155 3156 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3157 } 3158 3159 /* 3160 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3161 */ 3162 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3163 { 3164 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3165 (unsigned)sk->sk_rcvbuf) 3166 return -ENOMEM; 3167 3168 skb_orphan(skb); 3169 skb->sk = sk; 3170 skb->destructor = sock_rmem_free; 3171 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3172 3173 /* before exiting rcu section, make sure dst is refcounted */ 3174 skb_dst_force(skb); 3175 3176 skb_queue_tail(&sk->sk_error_queue, skb); 3177 if (!sock_flag(sk, SOCK_DEAD)) 3178 sk->sk_data_ready(sk, skb->len); 3179 return 0; 3180 } 3181 EXPORT_SYMBOL(sock_queue_err_skb); 3182 3183 void skb_tstamp_tx(struct sk_buff *orig_skb, 3184 struct skb_shared_hwtstamps *hwtstamps) 3185 { 3186 struct sock *sk = orig_skb->sk; 3187 struct sock_exterr_skb *serr; 3188 struct sk_buff *skb; 3189 int err; 3190 3191 if (!sk) 3192 return; 3193 3194 skb = skb_clone(orig_skb, GFP_ATOMIC); 3195 if (!skb) 3196 return; 3197 3198 if (hwtstamps) { 3199 *skb_hwtstamps(skb) = 3200 *hwtstamps; 3201 } else { 3202 /* 3203 * no hardware time stamps available, 3204 * so keep the shared tx_flags and only 3205 * store software time stamp 3206 */ 3207 skb->tstamp = ktime_get_real(); 3208 } 3209 3210 serr = SKB_EXT_ERR(skb); 3211 memset(serr, 0, sizeof(*serr)); 3212 serr->ee.ee_errno = ENOMSG; 3213 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3214 3215 err = sock_queue_err_skb(sk, skb); 3216 3217 if (err) 3218 kfree_skb(skb); 3219 } 3220 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3221 3222 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3223 { 3224 struct sock *sk = skb->sk; 3225 struct sock_exterr_skb *serr; 3226 int err; 3227 3228 skb->wifi_acked_valid = 1; 3229 skb->wifi_acked = acked; 3230 3231 serr = SKB_EXT_ERR(skb); 3232 memset(serr, 0, sizeof(*serr)); 3233 serr->ee.ee_errno = ENOMSG; 3234 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3235 3236 err = sock_queue_err_skb(sk, skb); 3237 if (err) 3238 kfree_skb(skb); 3239 } 3240 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3241 3242 3243 /** 3244 * skb_partial_csum_set - set up and verify partial csum values for packet 3245 * @skb: the skb to set 3246 * @start: the number of bytes after skb->data to start checksumming. 3247 * @off: the offset from start to place the checksum. 3248 * 3249 * For untrusted partially-checksummed packets, we need to make sure the values 3250 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3251 * 3252 * This function checks and sets those values and skb->ip_summed: if this 3253 * returns false you should drop the packet. 3254 */ 3255 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3256 { 3257 if (unlikely(start > skb_headlen(skb)) || 3258 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3259 if (net_ratelimit()) 3260 printk(KERN_WARNING 3261 "bad partial csum: csum=%u/%u len=%u\n", 3262 start, off, skb_headlen(skb)); 3263 return false; 3264 } 3265 skb->ip_summed = CHECKSUM_PARTIAL; 3266 skb->csum_start = skb_headroom(skb) + start; 3267 skb->csum_offset = off; 3268 return true; 3269 } 3270 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3271 3272 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3273 { 3274 if (net_ratelimit()) 3275 pr_warning("%s: received packets cannot be forwarded" 3276 " while LRO is enabled\n", skb->dev->name); 3277 } 3278 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3279