1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/mm.h> 44 #include <linux/interrupt.h> 45 #include <linux/in.h> 46 #include <linux/inet.h> 47 #include <linux/slab.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 61 #include <net/protocol.h> 62 #include <net/dst.h> 63 #include <net/sock.h> 64 #include <net/checksum.h> 65 #include <net/xfrm.h> 66 67 #include <asm/uaccess.h> 68 #include <asm/system.h> 69 #include <trace/events/skb.h> 70 71 #include "kmap_skb.h" 72 73 static struct kmem_cache *skbuff_head_cache __read_mostly; 74 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 75 76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 77 struct pipe_buffer *buf) 78 { 79 put_page(buf->page); 80 } 81 82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 83 struct pipe_buffer *buf) 84 { 85 get_page(buf->page); 86 } 87 88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 89 struct pipe_buffer *buf) 90 { 91 return 1; 92 } 93 94 95 /* Pipe buffer operations for a socket. */ 96 static const struct pipe_buf_operations sock_pipe_buf_ops = { 97 .can_merge = 0, 98 .map = generic_pipe_buf_map, 99 .unmap = generic_pipe_buf_unmap, 100 .confirm = generic_pipe_buf_confirm, 101 .release = sock_pipe_buf_release, 102 .steal = sock_pipe_buf_steal, 103 .get = sock_pipe_buf_get, 104 }; 105 106 /* 107 * Keep out-of-line to prevent kernel bloat. 108 * __builtin_return_address is not used because it is not always 109 * reliable. 110 */ 111 112 /** 113 * skb_over_panic - private function 114 * @skb: buffer 115 * @sz: size 116 * @here: address 117 * 118 * Out of line support code for skb_put(). Not user callable. 119 */ 120 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 121 { 122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 123 "data:%p tail:%#lx end:%#lx dev:%s\n", 124 here, skb->len, sz, skb->head, skb->data, 125 (unsigned long)skb->tail, (unsigned long)skb->end, 126 skb->dev ? skb->dev->name : "<NULL>"); 127 BUG(); 128 } 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 142 "data:%p tail:%#lx end:%#lx dev:%s\n", 143 here, skb->len, sz, skb->head, skb->data, 144 (unsigned long)skb->tail, (unsigned long)skb->end, 145 skb->dev ? skb->dev->name : "<NULL>"); 146 BUG(); 147 } 148 149 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 150 * 'private' fields and also do memory statistics to find all the 151 * [BEEP] leaks. 152 * 153 */ 154 155 /** 156 * __alloc_skb - allocate a network buffer 157 * @size: size to allocate 158 * @gfp_mask: allocation mask 159 * @fclone: allocate from fclone cache instead of head cache 160 * and allocate a cloned (child) skb 161 * @node: numa node to allocate memory on 162 * 163 * Allocate a new &sk_buff. The returned buffer has no headroom and a 164 * tail room of size bytes. The object has a reference count of one. 165 * The return is the buffer. On a failure the return is %NULL. 166 * 167 * Buffers may only be allocated from interrupts using a @gfp_mask of 168 * %GFP_ATOMIC. 169 */ 170 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 171 int fclone, int node) 172 { 173 struct kmem_cache *cache; 174 struct skb_shared_info *shinfo; 175 struct sk_buff *skb; 176 u8 *data; 177 178 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 179 180 /* Get the HEAD */ 181 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 182 if (!skb) 183 goto out; 184 prefetchw(skb); 185 186 size = SKB_DATA_ALIGN(size); 187 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 188 gfp_mask, node); 189 if (!data) 190 goto nodata; 191 prefetchw(data + size); 192 193 /* 194 * Only clear those fields we need to clear, not those that we will 195 * actually initialise below. Hence, don't put any more fields after 196 * the tail pointer in struct sk_buff! 197 */ 198 memset(skb, 0, offsetof(struct sk_buff, tail)); 199 skb->truesize = size + sizeof(struct sk_buff); 200 atomic_set(&skb->users, 1); 201 skb->head = data; 202 skb->data = data; 203 skb_reset_tail_pointer(skb); 204 skb->end = skb->tail + size; 205 kmemcheck_annotate_bitfield(skb, flags1); 206 kmemcheck_annotate_bitfield(skb, flags2); 207 #ifdef NET_SKBUFF_DATA_USES_OFFSET 208 skb->mac_header = ~0U; 209 #endif 210 211 /* make sure we initialize shinfo sequentially */ 212 shinfo = skb_shinfo(skb); 213 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 214 atomic_set(&shinfo->dataref, 1); 215 216 if (fclone) { 217 struct sk_buff *child = skb + 1; 218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 219 220 kmemcheck_annotate_bitfield(child, flags1); 221 kmemcheck_annotate_bitfield(child, flags2); 222 skb->fclone = SKB_FCLONE_ORIG; 223 atomic_set(fclone_ref, 1); 224 225 child->fclone = SKB_FCLONE_UNAVAILABLE; 226 } 227 out: 228 return skb; 229 nodata: 230 kmem_cache_free(cache, skb); 231 skb = NULL; 232 goto out; 233 } 234 EXPORT_SYMBOL(__alloc_skb); 235 236 /** 237 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 238 * @dev: network device to receive on 239 * @length: length to allocate 240 * @gfp_mask: get_free_pages mask, passed to alloc_skb 241 * 242 * Allocate a new &sk_buff and assign it a usage count of one. The 243 * buffer has unspecified headroom built in. Users should allocate 244 * the headroom they think they need without accounting for the 245 * built in space. The built in space is used for optimisations. 246 * 247 * %NULL is returned if there is no free memory. 248 */ 249 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 250 unsigned int length, gfp_t gfp_mask) 251 { 252 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 253 struct sk_buff *skb; 254 255 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 256 if (likely(skb)) { 257 skb_reserve(skb, NET_SKB_PAD); 258 skb->dev = dev; 259 } 260 return skb; 261 } 262 EXPORT_SYMBOL(__netdev_alloc_skb); 263 264 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) 265 { 266 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 267 struct page *page; 268 269 page = alloc_pages_node(node, gfp_mask, 0); 270 return page; 271 } 272 EXPORT_SYMBOL(__netdev_alloc_page); 273 274 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 275 int size) 276 { 277 skb_fill_page_desc(skb, i, page, off, size); 278 skb->len += size; 279 skb->data_len += size; 280 skb->truesize += size; 281 } 282 EXPORT_SYMBOL(skb_add_rx_frag); 283 284 /** 285 * dev_alloc_skb - allocate an skbuff for receiving 286 * @length: length to allocate 287 * 288 * Allocate a new &sk_buff and assign it a usage count of one. The 289 * buffer has unspecified headroom built in. Users should allocate 290 * the headroom they think they need without accounting for the 291 * built in space. The built in space is used for optimisations. 292 * 293 * %NULL is returned if there is no free memory. Although this function 294 * allocates memory it can be called from an interrupt. 295 */ 296 struct sk_buff *dev_alloc_skb(unsigned int length) 297 { 298 /* 299 * There is more code here than it seems: 300 * __dev_alloc_skb is an inline 301 */ 302 return __dev_alloc_skb(length, GFP_ATOMIC); 303 } 304 EXPORT_SYMBOL(dev_alloc_skb); 305 306 static void skb_drop_list(struct sk_buff **listp) 307 { 308 struct sk_buff *list = *listp; 309 310 *listp = NULL; 311 312 do { 313 struct sk_buff *this = list; 314 list = list->next; 315 kfree_skb(this); 316 } while (list); 317 } 318 319 static inline void skb_drop_fraglist(struct sk_buff *skb) 320 { 321 skb_drop_list(&skb_shinfo(skb)->frag_list); 322 } 323 324 static void skb_clone_fraglist(struct sk_buff *skb) 325 { 326 struct sk_buff *list; 327 328 skb_walk_frags(skb, list) 329 skb_get(list); 330 } 331 332 static void skb_release_data(struct sk_buff *skb) 333 { 334 if (!skb->cloned || 335 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 336 &skb_shinfo(skb)->dataref)) { 337 if (skb_shinfo(skb)->nr_frags) { 338 int i; 339 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 340 put_page(skb_shinfo(skb)->frags[i].page); 341 } 342 343 if (skb_has_frags(skb)) 344 skb_drop_fraglist(skb); 345 346 kfree(skb->head); 347 } 348 } 349 350 /* 351 * Free an skbuff by memory without cleaning the state. 352 */ 353 static void kfree_skbmem(struct sk_buff *skb) 354 { 355 struct sk_buff *other; 356 atomic_t *fclone_ref; 357 358 switch (skb->fclone) { 359 case SKB_FCLONE_UNAVAILABLE: 360 kmem_cache_free(skbuff_head_cache, skb); 361 break; 362 363 case SKB_FCLONE_ORIG: 364 fclone_ref = (atomic_t *) (skb + 2); 365 if (atomic_dec_and_test(fclone_ref)) 366 kmem_cache_free(skbuff_fclone_cache, skb); 367 break; 368 369 case SKB_FCLONE_CLONE: 370 fclone_ref = (atomic_t *) (skb + 1); 371 other = skb - 1; 372 373 /* The clone portion is available for 374 * fast-cloning again. 375 */ 376 skb->fclone = SKB_FCLONE_UNAVAILABLE; 377 378 if (atomic_dec_and_test(fclone_ref)) 379 kmem_cache_free(skbuff_fclone_cache, other); 380 break; 381 } 382 } 383 384 static void skb_release_head_state(struct sk_buff *skb) 385 { 386 skb_dst_drop(skb); 387 #ifdef CONFIG_XFRM 388 secpath_put(skb->sp); 389 #endif 390 if (skb->destructor) { 391 WARN_ON(in_irq()); 392 skb->destructor(skb); 393 } 394 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 395 nf_conntrack_put(skb->nfct); 396 nf_conntrack_put_reasm(skb->nfct_reasm); 397 #endif 398 #ifdef CONFIG_BRIDGE_NETFILTER 399 nf_bridge_put(skb->nf_bridge); 400 #endif 401 /* XXX: IS this still necessary? - JHS */ 402 #ifdef CONFIG_NET_SCHED 403 skb->tc_index = 0; 404 #ifdef CONFIG_NET_CLS_ACT 405 skb->tc_verd = 0; 406 #endif 407 #endif 408 } 409 410 /* Free everything but the sk_buff shell. */ 411 static void skb_release_all(struct sk_buff *skb) 412 { 413 skb_release_head_state(skb); 414 skb_release_data(skb); 415 } 416 417 /** 418 * __kfree_skb - private function 419 * @skb: buffer 420 * 421 * Free an sk_buff. Release anything attached to the buffer. 422 * Clean the state. This is an internal helper function. Users should 423 * always call kfree_skb 424 */ 425 426 void __kfree_skb(struct sk_buff *skb) 427 { 428 skb_release_all(skb); 429 kfree_skbmem(skb); 430 } 431 EXPORT_SYMBOL(__kfree_skb); 432 433 /** 434 * kfree_skb - free an sk_buff 435 * @skb: buffer to free 436 * 437 * Drop a reference to the buffer and free it if the usage count has 438 * hit zero. 439 */ 440 void kfree_skb(struct sk_buff *skb) 441 { 442 if (unlikely(!skb)) 443 return; 444 if (likely(atomic_read(&skb->users) == 1)) 445 smp_rmb(); 446 else if (likely(!atomic_dec_and_test(&skb->users))) 447 return; 448 trace_kfree_skb(skb, __builtin_return_address(0)); 449 __kfree_skb(skb); 450 } 451 EXPORT_SYMBOL(kfree_skb); 452 453 /** 454 * consume_skb - free an skbuff 455 * @skb: buffer to free 456 * 457 * Drop a ref to the buffer and free it if the usage count has hit zero 458 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 459 * is being dropped after a failure and notes that 460 */ 461 void consume_skb(struct sk_buff *skb) 462 { 463 if (unlikely(!skb)) 464 return; 465 if (likely(atomic_read(&skb->users) == 1)) 466 smp_rmb(); 467 else if (likely(!atomic_dec_and_test(&skb->users))) 468 return; 469 trace_consume_skb(skb); 470 __kfree_skb(skb); 471 } 472 EXPORT_SYMBOL(consume_skb); 473 474 /** 475 * skb_recycle_check - check if skb can be reused for receive 476 * @skb: buffer 477 * @skb_size: minimum receive buffer size 478 * 479 * Checks that the skb passed in is not shared or cloned, and 480 * that it is linear and its head portion at least as large as 481 * skb_size so that it can be recycled as a receive buffer. 482 * If these conditions are met, this function does any necessary 483 * reference count dropping and cleans up the skbuff as if it 484 * just came from __alloc_skb(). 485 */ 486 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 487 { 488 struct skb_shared_info *shinfo; 489 490 if (irqs_disabled()) 491 return false; 492 493 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 494 return false; 495 496 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 497 if (skb_end_pointer(skb) - skb->head < skb_size) 498 return false; 499 500 if (skb_shared(skb) || skb_cloned(skb)) 501 return false; 502 503 skb_release_head_state(skb); 504 505 shinfo = skb_shinfo(skb); 506 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 507 atomic_set(&shinfo->dataref, 1); 508 509 memset(skb, 0, offsetof(struct sk_buff, tail)); 510 skb->data = skb->head + NET_SKB_PAD; 511 skb_reset_tail_pointer(skb); 512 513 return true; 514 } 515 EXPORT_SYMBOL(skb_recycle_check); 516 517 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 518 { 519 new->tstamp = old->tstamp; 520 new->dev = old->dev; 521 new->transport_header = old->transport_header; 522 new->network_header = old->network_header; 523 new->mac_header = old->mac_header; 524 skb_dst_copy(new, old); 525 new->rxhash = old->rxhash; 526 #ifdef CONFIG_XFRM 527 new->sp = secpath_get(old->sp); 528 #endif 529 memcpy(new->cb, old->cb, sizeof(old->cb)); 530 new->csum = old->csum; 531 new->local_df = old->local_df; 532 new->pkt_type = old->pkt_type; 533 new->ip_summed = old->ip_summed; 534 skb_copy_queue_mapping(new, old); 535 new->priority = old->priority; 536 new->deliver_no_wcard = old->deliver_no_wcard; 537 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 538 new->ipvs_property = old->ipvs_property; 539 #endif 540 new->protocol = old->protocol; 541 new->mark = old->mark; 542 new->skb_iif = old->skb_iif; 543 __nf_copy(new, old); 544 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 545 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 546 new->nf_trace = old->nf_trace; 547 #endif 548 #ifdef CONFIG_NET_SCHED 549 new->tc_index = old->tc_index; 550 #ifdef CONFIG_NET_CLS_ACT 551 new->tc_verd = old->tc_verd; 552 #endif 553 #endif 554 new->vlan_tci = old->vlan_tci; 555 556 skb_copy_secmark(new, old); 557 } 558 559 /* 560 * You should not add any new code to this function. Add it to 561 * __copy_skb_header above instead. 562 */ 563 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 564 { 565 #define C(x) n->x = skb->x 566 567 n->next = n->prev = NULL; 568 n->sk = NULL; 569 __copy_skb_header(n, skb); 570 571 C(len); 572 C(data_len); 573 C(mac_len); 574 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 575 n->cloned = 1; 576 n->nohdr = 0; 577 n->destructor = NULL; 578 C(tail); 579 C(end); 580 C(head); 581 C(data); 582 C(truesize); 583 atomic_set(&n->users, 1); 584 585 atomic_inc(&(skb_shinfo(skb)->dataref)); 586 skb->cloned = 1; 587 588 return n; 589 #undef C 590 } 591 592 /** 593 * skb_morph - morph one skb into another 594 * @dst: the skb to receive the contents 595 * @src: the skb to supply the contents 596 * 597 * This is identical to skb_clone except that the target skb is 598 * supplied by the user. 599 * 600 * The target skb is returned upon exit. 601 */ 602 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 603 { 604 skb_release_all(dst); 605 return __skb_clone(dst, src); 606 } 607 EXPORT_SYMBOL_GPL(skb_morph); 608 609 /** 610 * skb_clone - duplicate an sk_buff 611 * @skb: buffer to clone 612 * @gfp_mask: allocation priority 613 * 614 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 615 * copies share the same packet data but not structure. The new 616 * buffer has a reference count of 1. If the allocation fails the 617 * function returns %NULL otherwise the new buffer is returned. 618 * 619 * If this function is called from an interrupt gfp_mask() must be 620 * %GFP_ATOMIC. 621 */ 622 623 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 624 { 625 struct sk_buff *n; 626 627 n = skb + 1; 628 if (skb->fclone == SKB_FCLONE_ORIG && 629 n->fclone == SKB_FCLONE_UNAVAILABLE) { 630 atomic_t *fclone_ref = (atomic_t *) (n + 1); 631 n->fclone = SKB_FCLONE_CLONE; 632 atomic_inc(fclone_ref); 633 } else { 634 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 635 if (!n) 636 return NULL; 637 638 kmemcheck_annotate_bitfield(n, flags1); 639 kmemcheck_annotate_bitfield(n, flags2); 640 n->fclone = SKB_FCLONE_UNAVAILABLE; 641 } 642 643 return __skb_clone(n, skb); 644 } 645 EXPORT_SYMBOL(skb_clone); 646 647 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 648 { 649 #ifndef NET_SKBUFF_DATA_USES_OFFSET 650 /* 651 * Shift between the two data areas in bytes 652 */ 653 unsigned long offset = new->data - old->data; 654 #endif 655 656 __copy_skb_header(new, old); 657 658 #ifndef NET_SKBUFF_DATA_USES_OFFSET 659 /* {transport,network,mac}_header are relative to skb->head */ 660 new->transport_header += offset; 661 new->network_header += offset; 662 if (skb_mac_header_was_set(new)) 663 new->mac_header += offset; 664 #endif 665 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 666 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 667 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 668 } 669 670 /** 671 * skb_copy - create private copy of an sk_buff 672 * @skb: buffer to copy 673 * @gfp_mask: allocation priority 674 * 675 * Make a copy of both an &sk_buff and its data. This is used when the 676 * caller wishes to modify the data and needs a private copy of the 677 * data to alter. Returns %NULL on failure or the pointer to the buffer 678 * on success. The returned buffer has a reference count of 1. 679 * 680 * As by-product this function converts non-linear &sk_buff to linear 681 * one, so that &sk_buff becomes completely private and caller is allowed 682 * to modify all the data of returned buffer. This means that this 683 * function is not recommended for use in circumstances when only 684 * header is going to be modified. Use pskb_copy() instead. 685 */ 686 687 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 688 { 689 int headerlen = skb->data - skb->head; 690 /* 691 * Allocate the copy buffer 692 */ 693 struct sk_buff *n; 694 #ifdef NET_SKBUFF_DATA_USES_OFFSET 695 n = alloc_skb(skb->end + skb->data_len, gfp_mask); 696 #else 697 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); 698 #endif 699 if (!n) 700 return NULL; 701 702 /* Set the data pointer */ 703 skb_reserve(n, headerlen); 704 /* Set the tail pointer and length */ 705 skb_put(n, skb->len); 706 707 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 708 BUG(); 709 710 copy_skb_header(n, skb); 711 return n; 712 } 713 EXPORT_SYMBOL(skb_copy); 714 715 /** 716 * pskb_copy - create copy of an sk_buff with private head. 717 * @skb: buffer to copy 718 * @gfp_mask: allocation priority 719 * 720 * Make a copy of both an &sk_buff and part of its data, located 721 * in header. Fragmented data remain shared. This is used when 722 * the caller wishes to modify only header of &sk_buff and needs 723 * private copy of the header to alter. Returns %NULL on failure 724 * or the pointer to the buffer on success. 725 * The returned buffer has a reference count of 1. 726 */ 727 728 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 729 { 730 /* 731 * Allocate the copy buffer 732 */ 733 struct sk_buff *n; 734 #ifdef NET_SKBUFF_DATA_USES_OFFSET 735 n = alloc_skb(skb->end, gfp_mask); 736 #else 737 n = alloc_skb(skb->end - skb->head, gfp_mask); 738 #endif 739 if (!n) 740 goto out; 741 742 /* Set the data pointer */ 743 skb_reserve(n, skb->data - skb->head); 744 /* Set the tail pointer and length */ 745 skb_put(n, skb_headlen(skb)); 746 /* Copy the bytes */ 747 skb_copy_from_linear_data(skb, n->data, n->len); 748 749 n->truesize += skb->data_len; 750 n->data_len = skb->data_len; 751 n->len = skb->len; 752 753 if (skb_shinfo(skb)->nr_frags) { 754 int i; 755 756 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 757 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 758 get_page(skb_shinfo(n)->frags[i].page); 759 } 760 skb_shinfo(n)->nr_frags = i; 761 } 762 763 if (skb_has_frags(skb)) { 764 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 765 skb_clone_fraglist(n); 766 } 767 768 copy_skb_header(n, skb); 769 out: 770 return n; 771 } 772 EXPORT_SYMBOL(pskb_copy); 773 774 /** 775 * pskb_expand_head - reallocate header of &sk_buff 776 * @skb: buffer to reallocate 777 * @nhead: room to add at head 778 * @ntail: room to add at tail 779 * @gfp_mask: allocation priority 780 * 781 * Expands (or creates identical copy, if &nhead and &ntail are zero) 782 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 783 * reference count of 1. Returns zero in the case of success or error, 784 * if expansion failed. In the last case, &sk_buff is not changed. 785 * 786 * All the pointers pointing into skb header may change and must be 787 * reloaded after call to this function. 788 */ 789 790 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 791 gfp_t gfp_mask) 792 { 793 int i; 794 u8 *data; 795 #ifdef NET_SKBUFF_DATA_USES_OFFSET 796 int size = nhead + skb->end + ntail; 797 #else 798 int size = nhead + (skb->end - skb->head) + ntail; 799 #endif 800 long off; 801 802 BUG_ON(nhead < 0); 803 804 if (skb_shared(skb)) 805 BUG(); 806 807 size = SKB_DATA_ALIGN(size); 808 809 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 810 if (!data) 811 goto nodata; 812 813 /* Copy only real data... and, alas, header. This should be 814 * optimized for the cases when header is void. */ 815 #ifdef NET_SKBUFF_DATA_USES_OFFSET 816 memcpy(data + nhead, skb->head, skb->tail); 817 #else 818 memcpy(data + nhead, skb->head, skb->tail - skb->head); 819 #endif 820 memcpy(data + size, skb_end_pointer(skb), 821 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 822 823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 824 get_page(skb_shinfo(skb)->frags[i].page); 825 826 if (skb_has_frags(skb)) 827 skb_clone_fraglist(skb); 828 829 skb_release_data(skb); 830 831 off = (data + nhead) - skb->head; 832 833 skb->head = data; 834 skb->data += off; 835 #ifdef NET_SKBUFF_DATA_USES_OFFSET 836 skb->end = size; 837 off = nhead; 838 #else 839 skb->end = skb->head + size; 840 #endif 841 /* {transport,network,mac}_header and tail are relative to skb->head */ 842 skb->tail += off; 843 skb->transport_header += off; 844 skb->network_header += off; 845 if (skb_mac_header_was_set(skb)) 846 skb->mac_header += off; 847 /* Only adjust this if it actually is csum_start rather than csum */ 848 if (skb->ip_summed == CHECKSUM_PARTIAL) 849 skb->csum_start += nhead; 850 skb->cloned = 0; 851 skb->hdr_len = 0; 852 skb->nohdr = 0; 853 atomic_set(&skb_shinfo(skb)->dataref, 1); 854 return 0; 855 856 nodata: 857 return -ENOMEM; 858 } 859 EXPORT_SYMBOL(pskb_expand_head); 860 861 /* Make private copy of skb with writable head and some headroom */ 862 863 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 864 { 865 struct sk_buff *skb2; 866 int delta = headroom - skb_headroom(skb); 867 868 if (delta <= 0) 869 skb2 = pskb_copy(skb, GFP_ATOMIC); 870 else { 871 skb2 = skb_clone(skb, GFP_ATOMIC); 872 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 873 GFP_ATOMIC)) { 874 kfree_skb(skb2); 875 skb2 = NULL; 876 } 877 } 878 return skb2; 879 } 880 EXPORT_SYMBOL(skb_realloc_headroom); 881 882 /** 883 * skb_copy_expand - copy and expand sk_buff 884 * @skb: buffer to copy 885 * @newheadroom: new free bytes at head 886 * @newtailroom: new free bytes at tail 887 * @gfp_mask: allocation priority 888 * 889 * Make a copy of both an &sk_buff and its data and while doing so 890 * allocate additional space. 891 * 892 * This is used when the caller wishes to modify the data and needs a 893 * private copy of the data to alter as well as more space for new fields. 894 * Returns %NULL on failure or the pointer to the buffer 895 * on success. The returned buffer has a reference count of 1. 896 * 897 * You must pass %GFP_ATOMIC as the allocation priority if this function 898 * is called from an interrupt. 899 */ 900 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 901 int newheadroom, int newtailroom, 902 gfp_t gfp_mask) 903 { 904 /* 905 * Allocate the copy buffer 906 */ 907 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 908 gfp_mask); 909 int oldheadroom = skb_headroom(skb); 910 int head_copy_len, head_copy_off; 911 int off; 912 913 if (!n) 914 return NULL; 915 916 skb_reserve(n, newheadroom); 917 918 /* Set the tail pointer and length */ 919 skb_put(n, skb->len); 920 921 head_copy_len = oldheadroom; 922 head_copy_off = 0; 923 if (newheadroom <= head_copy_len) 924 head_copy_len = newheadroom; 925 else 926 head_copy_off = newheadroom - head_copy_len; 927 928 /* Copy the linear header and data. */ 929 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 930 skb->len + head_copy_len)) 931 BUG(); 932 933 copy_skb_header(n, skb); 934 935 off = newheadroom - oldheadroom; 936 if (n->ip_summed == CHECKSUM_PARTIAL) 937 n->csum_start += off; 938 #ifdef NET_SKBUFF_DATA_USES_OFFSET 939 n->transport_header += off; 940 n->network_header += off; 941 if (skb_mac_header_was_set(skb)) 942 n->mac_header += off; 943 #endif 944 945 return n; 946 } 947 EXPORT_SYMBOL(skb_copy_expand); 948 949 /** 950 * skb_pad - zero pad the tail of an skb 951 * @skb: buffer to pad 952 * @pad: space to pad 953 * 954 * Ensure that a buffer is followed by a padding area that is zero 955 * filled. Used by network drivers which may DMA or transfer data 956 * beyond the buffer end onto the wire. 957 * 958 * May return error in out of memory cases. The skb is freed on error. 959 */ 960 961 int skb_pad(struct sk_buff *skb, int pad) 962 { 963 int err; 964 int ntail; 965 966 /* If the skbuff is non linear tailroom is always zero.. */ 967 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 968 memset(skb->data+skb->len, 0, pad); 969 return 0; 970 } 971 972 ntail = skb->data_len + pad - (skb->end - skb->tail); 973 if (likely(skb_cloned(skb) || ntail > 0)) { 974 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 975 if (unlikely(err)) 976 goto free_skb; 977 } 978 979 /* FIXME: The use of this function with non-linear skb's really needs 980 * to be audited. 981 */ 982 err = skb_linearize(skb); 983 if (unlikely(err)) 984 goto free_skb; 985 986 memset(skb->data + skb->len, 0, pad); 987 return 0; 988 989 free_skb: 990 kfree_skb(skb); 991 return err; 992 } 993 EXPORT_SYMBOL(skb_pad); 994 995 /** 996 * skb_put - add data to a buffer 997 * @skb: buffer to use 998 * @len: amount of data to add 999 * 1000 * This function extends the used data area of the buffer. If this would 1001 * exceed the total buffer size the kernel will panic. A pointer to the 1002 * first byte of the extra data is returned. 1003 */ 1004 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1005 { 1006 unsigned char *tmp = skb_tail_pointer(skb); 1007 SKB_LINEAR_ASSERT(skb); 1008 skb->tail += len; 1009 skb->len += len; 1010 if (unlikely(skb->tail > skb->end)) 1011 skb_over_panic(skb, len, __builtin_return_address(0)); 1012 return tmp; 1013 } 1014 EXPORT_SYMBOL(skb_put); 1015 1016 /** 1017 * skb_push - add data to the start of a buffer 1018 * @skb: buffer to use 1019 * @len: amount of data to add 1020 * 1021 * This function extends the used data area of the buffer at the buffer 1022 * start. If this would exceed the total buffer headroom the kernel will 1023 * panic. A pointer to the first byte of the extra data is returned. 1024 */ 1025 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1026 { 1027 skb->data -= len; 1028 skb->len += len; 1029 if (unlikely(skb->data<skb->head)) 1030 skb_under_panic(skb, len, __builtin_return_address(0)); 1031 return skb->data; 1032 } 1033 EXPORT_SYMBOL(skb_push); 1034 1035 /** 1036 * skb_pull - remove data from the start of a buffer 1037 * @skb: buffer to use 1038 * @len: amount of data to remove 1039 * 1040 * This function removes data from the start of a buffer, returning 1041 * the memory to the headroom. A pointer to the next data in the buffer 1042 * is returned. Once the data has been pulled future pushes will overwrite 1043 * the old data. 1044 */ 1045 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1046 { 1047 return skb_pull_inline(skb, len); 1048 } 1049 EXPORT_SYMBOL(skb_pull); 1050 1051 /** 1052 * skb_trim - remove end from a buffer 1053 * @skb: buffer to alter 1054 * @len: new length 1055 * 1056 * Cut the length of a buffer down by removing data from the tail. If 1057 * the buffer is already under the length specified it is not modified. 1058 * The skb must be linear. 1059 */ 1060 void skb_trim(struct sk_buff *skb, unsigned int len) 1061 { 1062 if (skb->len > len) 1063 __skb_trim(skb, len); 1064 } 1065 EXPORT_SYMBOL(skb_trim); 1066 1067 /* Trims skb to length len. It can change skb pointers. 1068 */ 1069 1070 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1071 { 1072 struct sk_buff **fragp; 1073 struct sk_buff *frag; 1074 int offset = skb_headlen(skb); 1075 int nfrags = skb_shinfo(skb)->nr_frags; 1076 int i; 1077 int err; 1078 1079 if (skb_cloned(skb) && 1080 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1081 return err; 1082 1083 i = 0; 1084 if (offset >= len) 1085 goto drop_pages; 1086 1087 for (; i < nfrags; i++) { 1088 int end = offset + skb_shinfo(skb)->frags[i].size; 1089 1090 if (end < len) { 1091 offset = end; 1092 continue; 1093 } 1094 1095 skb_shinfo(skb)->frags[i++].size = len - offset; 1096 1097 drop_pages: 1098 skb_shinfo(skb)->nr_frags = i; 1099 1100 for (; i < nfrags; i++) 1101 put_page(skb_shinfo(skb)->frags[i].page); 1102 1103 if (skb_has_frags(skb)) 1104 skb_drop_fraglist(skb); 1105 goto done; 1106 } 1107 1108 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1109 fragp = &frag->next) { 1110 int end = offset + frag->len; 1111 1112 if (skb_shared(frag)) { 1113 struct sk_buff *nfrag; 1114 1115 nfrag = skb_clone(frag, GFP_ATOMIC); 1116 if (unlikely(!nfrag)) 1117 return -ENOMEM; 1118 1119 nfrag->next = frag->next; 1120 kfree_skb(frag); 1121 frag = nfrag; 1122 *fragp = frag; 1123 } 1124 1125 if (end < len) { 1126 offset = end; 1127 continue; 1128 } 1129 1130 if (end > len && 1131 unlikely((err = pskb_trim(frag, len - offset)))) 1132 return err; 1133 1134 if (frag->next) 1135 skb_drop_list(&frag->next); 1136 break; 1137 } 1138 1139 done: 1140 if (len > skb_headlen(skb)) { 1141 skb->data_len -= skb->len - len; 1142 skb->len = len; 1143 } else { 1144 skb->len = len; 1145 skb->data_len = 0; 1146 skb_set_tail_pointer(skb, len); 1147 } 1148 1149 return 0; 1150 } 1151 EXPORT_SYMBOL(___pskb_trim); 1152 1153 /** 1154 * __pskb_pull_tail - advance tail of skb header 1155 * @skb: buffer to reallocate 1156 * @delta: number of bytes to advance tail 1157 * 1158 * The function makes a sense only on a fragmented &sk_buff, 1159 * it expands header moving its tail forward and copying necessary 1160 * data from fragmented part. 1161 * 1162 * &sk_buff MUST have reference count of 1. 1163 * 1164 * Returns %NULL (and &sk_buff does not change) if pull failed 1165 * or value of new tail of skb in the case of success. 1166 * 1167 * All the pointers pointing into skb header may change and must be 1168 * reloaded after call to this function. 1169 */ 1170 1171 /* Moves tail of skb head forward, copying data from fragmented part, 1172 * when it is necessary. 1173 * 1. It may fail due to malloc failure. 1174 * 2. It may change skb pointers. 1175 * 1176 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1177 */ 1178 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1179 { 1180 /* If skb has not enough free space at tail, get new one 1181 * plus 128 bytes for future expansions. If we have enough 1182 * room at tail, reallocate without expansion only if skb is cloned. 1183 */ 1184 int i, k, eat = (skb->tail + delta) - skb->end; 1185 1186 if (eat > 0 || skb_cloned(skb)) { 1187 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1188 GFP_ATOMIC)) 1189 return NULL; 1190 } 1191 1192 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1193 BUG(); 1194 1195 /* Optimization: no fragments, no reasons to preestimate 1196 * size of pulled pages. Superb. 1197 */ 1198 if (!skb_has_frags(skb)) 1199 goto pull_pages; 1200 1201 /* Estimate size of pulled pages. */ 1202 eat = delta; 1203 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1204 if (skb_shinfo(skb)->frags[i].size >= eat) 1205 goto pull_pages; 1206 eat -= skb_shinfo(skb)->frags[i].size; 1207 } 1208 1209 /* If we need update frag list, we are in troubles. 1210 * Certainly, it possible to add an offset to skb data, 1211 * but taking into account that pulling is expected to 1212 * be very rare operation, it is worth to fight against 1213 * further bloating skb head and crucify ourselves here instead. 1214 * Pure masohism, indeed. 8)8) 1215 */ 1216 if (eat) { 1217 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1218 struct sk_buff *clone = NULL; 1219 struct sk_buff *insp = NULL; 1220 1221 do { 1222 BUG_ON(!list); 1223 1224 if (list->len <= eat) { 1225 /* Eaten as whole. */ 1226 eat -= list->len; 1227 list = list->next; 1228 insp = list; 1229 } else { 1230 /* Eaten partially. */ 1231 1232 if (skb_shared(list)) { 1233 /* Sucks! We need to fork list. :-( */ 1234 clone = skb_clone(list, GFP_ATOMIC); 1235 if (!clone) 1236 return NULL; 1237 insp = list->next; 1238 list = clone; 1239 } else { 1240 /* This may be pulled without 1241 * problems. */ 1242 insp = list; 1243 } 1244 if (!pskb_pull(list, eat)) { 1245 kfree_skb(clone); 1246 return NULL; 1247 } 1248 break; 1249 } 1250 } while (eat); 1251 1252 /* Free pulled out fragments. */ 1253 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1254 skb_shinfo(skb)->frag_list = list->next; 1255 kfree_skb(list); 1256 } 1257 /* And insert new clone at head. */ 1258 if (clone) { 1259 clone->next = list; 1260 skb_shinfo(skb)->frag_list = clone; 1261 } 1262 } 1263 /* Success! Now we may commit changes to skb data. */ 1264 1265 pull_pages: 1266 eat = delta; 1267 k = 0; 1268 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1269 if (skb_shinfo(skb)->frags[i].size <= eat) { 1270 put_page(skb_shinfo(skb)->frags[i].page); 1271 eat -= skb_shinfo(skb)->frags[i].size; 1272 } else { 1273 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1274 if (eat) { 1275 skb_shinfo(skb)->frags[k].page_offset += eat; 1276 skb_shinfo(skb)->frags[k].size -= eat; 1277 eat = 0; 1278 } 1279 k++; 1280 } 1281 } 1282 skb_shinfo(skb)->nr_frags = k; 1283 1284 skb->tail += delta; 1285 skb->data_len -= delta; 1286 1287 return skb_tail_pointer(skb); 1288 } 1289 EXPORT_SYMBOL(__pskb_pull_tail); 1290 1291 /* Copy some data bits from skb to kernel buffer. */ 1292 1293 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1294 { 1295 int start = skb_headlen(skb); 1296 struct sk_buff *frag_iter; 1297 int i, copy; 1298 1299 if (offset > (int)skb->len - len) 1300 goto fault; 1301 1302 /* Copy header. */ 1303 if ((copy = start - offset) > 0) { 1304 if (copy > len) 1305 copy = len; 1306 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1307 if ((len -= copy) == 0) 1308 return 0; 1309 offset += copy; 1310 to += copy; 1311 } 1312 1313 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1314 int end; 1315 1316 WARN_ON(start > offset + len); 1317 1318 end = start + skb_shinfo(skb)->frags[i].size; 1319 if ((copy = end - offset) > 0) { 1320 u8 *vaddr; 1321 1322 if (copy > len) 1323 copy = len; 1324 1325 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1326 memcpy(to, 1327 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1328 offset - start, copy); 1329 kunmap_skb_frag(vaddr); 1330 1331 if ((len -= copy) == 0) 1332 return 0; 1333 offset += copy; 1334 to += copy; 1335 } 1336 start = end; 1337 } 1338 1339 skb_walk_frags(skb, frag_iter) { 1340 int end; 1341 1342 WARN_ON(start > offset + len); 1343 1344 end = start + frag_iter->len; 1345 if ((copy = end - offset) > 0) { 1346 if (copy > len) 1347 copy = len; 1348 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1349 goto fault; 1350 if ((len -= copy) == 0) 1351 return 0; 1352 offset += copy; 1353 to += copy; 1354 } 1355 start = end; 1356 } 1357 if (!len) 1358 return 0; 1359 1360 fault: 1361 return -EFAULT; 1362 } 1363 EXPORT_SYMBOL(skb_copy_bits); 1364 1365 /* 1366 * Callback from splice_to_pipe(), if we need to release some pages 1367 * at the end of the spd in case we error'ed out in filling the pipe. 1368 */ 1369 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1370 { 1371 put_page(spd->pages[i]); 1372 } 1373 1374 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1375 unsigned int *offset, 1376 struct sk_buff *skb, struct sock *sk) 1377 { 1378 struct page *p = sk->sk_sndmsg_page; 1379 unsigned int off; 1380 1381 if (!p) { 1382 new_page: 1383 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1384 if (!p) 1385 return NULL; 1386 1387 off = sk->sk_sndmsg_off = 0; 1388 /* hold one ref to this page until it's full */ 1389 } else { 1390 unsigned int mlen; 1391 1392 off = sk->sk_sndmsg_off; 1393 mlen = PAGE_SIZE - off; 1394 if (mlen < 64 && mlen < *len) { 1395 put_page(p); 1396 goto new_page; 1397 } 1398 1399 *len = min_t(unsigned int, *len, mlen); 1400 } 1401 1402 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1403 sk->sk_sndmsg_off += *len; 1404 *offset = off; 1405 get_page(p); 1406 1407 return p; 1408 } 1409 1410 /* 1411 * Fill page/offset/length into spd, if it can hold more pages. 1412 */ 1413 static inline int spd_fill_page(struct splice_pipe_desc *spd, 1414 struct pipe_inode_info *pipe, struct page *page, 1415 unsigned int *len, unsigned int offset, 1416 struct sk_buff *skb, int linear, 1417 struct sock *sk) 1418 { 1419 if (unlikely(spd->nr_pages == pipe->buffers)) 1420 return 1; 1421 1422 if (linear) { 1423 page = linear_to_page(page, len, &offset, skb, sk); 1424 if (!page) 1425 return 1; 1426 } else 1427 get_page(page); 1428 1429 spd->pages[spd->nr_pages] = page; 1430 spd->partial[spd->nr_pages].len = *len; 1431 spd->partial[spd->nr_pages].offset = offset; 1432 spd->nr_pages++; 1433 1434 return 0; 1435 } 1436 1437 static inline void __segment_seek(struct page **page, unsigned int *poff, 1438 unsigned int *plen, unsigned int off) 1439 { 1440 unsigned long n; 1441 1442 *poff += off; 1443 n = *poff / PAGE_SIZE; 1444 if (n) 1445 *page = nth_page(*page, n); 1446 1447 *poff = *poff % PAGE_SIZE; 1448 *plen -= off; 1449 } 1450 1451 static inline int __splice_segment(struct page *page, unsigned int poff, 1452 unsigned int plen, unsigned int *off, 1453 unsigned int *len, struct sk_buff *skb, 1454 struct splice_pipe_desc *spd, int linear, 1455 struct sock *sk, 1456 struct pipe_inode_info *pipe) 1457 { 1458 if (!*len) 1459 return 1; 1460 1461 /* skip this segment if already processed */ 1462 if (*off >= plen) { 1463 *off -= plen; 1464 return 0; 1465 } 1466 1467 /* ignore any bits we already processed */ 1468 if (*off) { 1469 __segment_seek(&page, &poff, &plen, *off); 1470 *off = 0; 1471 } 1472 1473 do { 1474 unsigned int flen = min(*len, plen); 1475 1476 /* the linear region may spread across several pages */ 1477 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1478 1479 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1480 return 1; 1481 1482 __segment_seek(&page, &poff, &plen, flen); 1483 *len -= flen; 1484 1485 } while (*len && plen); 1486 1487 return 0; 1488 } 1489 1490 /* 1491 * Map linear and fragment data from the skb to spd. It reports failure if the 1492 * pipe is full or if we already spliced the requested length. 1493 */ 1494 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1495 unsigned int *offset, unsigned int *len, 1496 struct splice_pipe_desc *spd, struct sock *sk) 1497 { 1498 int seg; 1499 1500 /* 1501 * map the linear part 1502 */ 1503 if (__splice_segment(virt_to_page(skb->data), 1504 (unsigned long) skb->data & (PAGE_SIZE - 1), 1505 skb_headlen(skb), 1506 offset, len, skb, spd, 1, sk, pipe)) 1507 return 1; 1508 1509 /* 1510 * then map the fragments 1511 */ 1512 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1513 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1514 1515 if (__splice_segment(f->page, f->page_offset, f->size, 1516 offset, len, skb, spd, 0, sk, pipe)) 1517 return 1; 1518 } 1519 1520 return 0; 1521 } 1522 1523 /* 1524 * Map data from the skb to a pipe. Should handle both the linear part, 1525 * the fragments, and the frag list. It does NOT handle frag lists within 1526 * the frag list, if such a thing exists. We'd probably need to recurse to 1527 * handle that cleanly. 1528 */ 1529 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1530 struct pipe_inode_info *pipe, unsigned int tlen, 1531 unsigned int flags) 1532 { 1533 struct partial_page partial[PIPE_DEF_BUFFERS]; 1534 struct page *pages[PIPE_DEF_BUFFERS]; 1535 struct splice_pipe_desc spd = { 1536 .pages = pages, 1537 .partial = partial, 1538 .flags = flags, 1539 .ops = &sock_pipe_buf_ops, 1540 .spd_release = sock_spd_release, 1541 }; 1542 struct sk_buff *frag_iter; 1543 struct sock *sk = skb->sk; 1544 int ret = 0; 1545 1546 if (splice_grow_spd(pipe, &spd)) 1547 return -ENOMEM; 1548 1549 /* 1550 * __skb_splice_bits() only fails if the output has no room left, 1551 * so no point in going over the frag_list for the error case. 1552 */ 1553 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1554 goto done; 1555 else if (!tlen) 1556 goto done; 1557 1558 /* 1559 * now see if we have a frag_list to map 1560 */ 1561 skb_walk_frags(skb, frag_iter) { 1562 if (!tlen) 1563 break; 1564 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1565 break; 1566 } 1567 1568 done: 1569 if (spd.nr_pages) { 1570 /* 1571 * Drop the socket lock, otherwise we have reverse 1572 * locking dependencies between sk_lock and i_mutex 1573 * here as compared to sendfile(). We enter here 1574 * with the socket lock held, and splice_to_pipe() will 1575 * grab the pipe inode lock. For sendfile() emulation, 1576 * we call into ->sendpage() with the i_mutex lock held 1577 * and networking will grab the socket lock. 1578 */ 1579 release_sock(sk); 1580 ret = splice_to_pipe(pipe, &spd); 1581 lock_sock(sk); 1582 } 1583 1584 splice_shrink_spd(pipe, &spd); 1585 return ret; 1586 } 1587 1588 /** 1589 * skb_store_bits - store bits from kernel buffer to skb 1590 * @skb: destination buffer 1591 * @offset: offset in destination 1592 * @from: source buffer 1593 * @len: number of bytes to copy 1594 * 1595 * Copy the specified number of bytes from the source buffer to the 1596 * destination skb. This function handles all the messy bits of 1597 * traversing fragment lists and such. 1598 */ 1599 1600 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1601 { 1602 int start = skb_headlen(skb); 1603 struct sk_buff *frag_iter; 1604 int i, copy; 1605 1606 if (offset > (int)skb->len - len) 1607 goto fault; 1608 1609 if ((copy = start - offset) > 0) { 1610 if (copy > len) 1611 copy = len; 1612 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1613 if ((len -= copy) == 0) 1614 return 0; 1615 offset += copy; 1616 from += copy; 1617 } 1618 1619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1620 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1621 int end; 1622 1623 WARN_ON(start > offset + len); 1624 1625 end = start + frag->size; 1626 if ((copy = end - offset) > 0) { 1627 u8 *vaddr; 1628 1629 if (copy > len) 1630 copy = len; 1631 1632 vaddr = kmap_skb_frag(frag); 1633 memcpy(vaddr + frag->page_offset + offset - start, 1634 from, copy); 1635 kunmap_skb_frag(vaddr); 1636 1637 if ((len -= copy) == 0) 1638 return 0; 1639 offset += copy; 1640 from += copy; 1641 } 1642 start = end; 1643 } 1644 1645 skb_walk_frags(skb, frag_iter) { 1646 int end; 1647 1648 WARN_ON(start > offset + len); 1649 1650 end = start + frag_iter->len; 1651 if ((copy = end - offset) > 0) { 1652 if (copy > len) 1653 copy = len; 1654 if (skb_store_bits(frag_iter, offset - start, 1655 from, copy)) 1656 goto fault; 1657 if ((len -= copy) == 0) 1658 return 0; 1659 offset += copy; 1660 from += copy; 1661 } 1662 start = end; 1663 } 1664 if (!len) 1665 return 0; 1666 1667 fault: 1668 return -EFAULT; 1669 } 1670 EXPORT_SYMBOL(skb_store_bits); 1671 1672 /* Checksum skb data. */ 1673 1674 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1675 int len, __wsum csum) 1676 { 1677 int start = skb_headlen(skb); 1678 int i, copy = start - offset; 1679 struct sk_buff *frag_iter; 1680 int pos = 0; 1681 1682 /* Checksum header. */ 1683 if (copy > 0) { 1684 if (copy > len) 1685 copy = len; 1686 csum = csum_partial(skb->data + offset, copy, csum); 1687 if ((len -= copy) == 0) 1688 return csum; 1689 offset += copy; 1690 pos = copy; 1691 } 1692 1693 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1694 int end; 1695 1696 WARN_ON(start > offset + len); 1697 1698 end = start + skb_shinfo(skb)->frags[i].size; 1699 if ((copy = end - offset) > 0) { 1700 __wsum csum2; 1701 u8 *vaddr; 1702 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1703 1704 if (copy > len) 1705 copy = len; 1706 vaddr = kmap_skb_frag(frag); 1707 csum2 = csum_partial(vaddr + frag->page_offset + 1708 offset - start, copy, 0); 1709 kunmap_skb_frag(vaddr); 1710 csum = csum_block_add(csum, csum2, pos); 1711 if (!(len -= copy)) 1712 return csum; 1713 offset += copy; 1714 pos += copy; 1715 } 1716 start = end; 1717 } 1718 1719 skb_walk_frags(skb, frag_iter) { 1720 int end; 1721 1722 WARN_ON(start > offset + len); 1723 1724 end = start + frag_iter->len; 1725 if ((copy = end - offset) > 0) { 1726 __wsum csum2; 1727 if (copy > len) 1728 copy = len; 1729 csum2 = skb_checksum(frag_iter, offset - start, 1730 copy, 0); 1731 csum = csum_block_add(csum, csum2, pos); 1732 if ((len -= copy) == 0) 1733 return csum; 1734 offset += copy; 1735 pos += copy; 1736 } 1737 start = end; 1738 } 1739 BUG_ON(len); 1740 1741 return csum; 1742 } 1743 EXPORT_SYMBOL(skb_checksum); 1744 1745 /* Both of above in one bottle. */ 1746 1747 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1748 u8 *to, int len, __wsum csum) 1749 { 1750 int start = skb_headlen(skb); 1751 int i, copy = start - offset; 1752 struct sk_buff *frag_iter; 1753 int pos = 0; 1754 1755 /* Copy header. */ 1756 if (copy > 0) { 1757 if (copy > len) 1758 copy = len; 1759 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1760 copy, csum); 1761 if ((len -= copy) == 0) 1762 return csum; 1763 offset += copy; 1764 to += copy; 1765 pos = copy; 1766 } 1767 1768 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1769 int end; 1770 1771 WARN_ON(start > offset + len); 1772 1773 end = start + skb_shinfo(skb)->frags[i].size; 1774 if ((copy = end - offset) > 0) { 1775 __wsum csum2; 1776 u8 *vaddr; 1777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1778 1779 if (copy > len) 1780 copy = len; 1781 vaddr = kmap_skb_frag(frag); 1782 csum2 = csum_partial_copy_nocheck(vaddr + 1783 frag->page_offset + 1784 offset - start, to, 1785 copy, 0); 1786 kunmap_skb_frag(vaddr); 1787 csum = csum_block_add(csum, csum2, pos); 1788 if (!(len -= copy)) 1789 return csum; 1790 offset += copy; 1791 to += copy; 1792 pos += copy; 1793 } 1794 start = end; 1795 } 1796 1797 skb_walk_frags(skb, frag_iter) { 1798 __wsum csum2; 1799 int end; 1800 1801 WARN_ON(start > offset + len); 1802 1803 end = start + frag_iter->len; 1804 if ((copy = end - offset) > 0) { 1805 if (copy > len) 1806 copy = len; 1807 csum2 = skb_copy_and_csum_bits(frag_iter, 1808 offset - start, 1809 to, copy, 0); 1810 csum = csum_block_add(csum, csum2, pos); 1811 if ((len -= copy) == 0) 1812 return csum; 1813 offset += copy; 1814 to += copy; 1815 pos += copy; 1816 } 1817 start = end; 1818 } 1819 BUG_ON(len); 1820 return csum; 1821 } 1822 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1823 1824 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1825 { 1826 __wsum csum; 1827 long csstart; 1828 1829 if (skb->ip_summed == CHECKSUM_PARTIAL) 1830 csstart = skb->csum_start - skb_headroom(skb); 1831 else 1832 csstart = skb_headlen(skb); 1833 1834 BUG_ON(csstart > skb_headlen(skb)); 1835 1836 skb_copy_from_linear_data(skb, to, csstart); 1837 1838 csum = 0; 1839 if (csstart != skb->len) 1840 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1841 skb->len - csstart, 0); 1842 1843 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1844 long csstuff = csstart + skb->csum_offset; 1845 1846 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1847 } 1848 } 1849 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1850 1851 /** 1852 * skb_dequeue - remove from the head of the queue 1853 * @list: list to dequeue from 1854 * 1855 * Remove the head of the list. The list lock is taken so the function 1856 * may be used safely with other locking list functions. The head item is 1857 * returned or %NULL if the list is empty. 1858 */ 1859 1860 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1861 { 1862 unsigned long flags; 1863 struct sk_buff *result; 1864 1865 spin_lock_irqsave(&list->lock, flags); 1866 result = __skb_dequeue(list); 1867 spin_unlock_irqrestore(&list->lock, flags); 1868 return result; 1869 } 1870 EXPORT_SYMBOL(skb_dequeue); 1871 1872 /** 1873 * skb_dequeue_tail - remove from the tail of the queue 1874 * @list: list to dequeue from 1875 * 1876 * Remove the tail of the list. The list lock is taken so the function 1877 * may be used safely with other locking list functions. The tail item is 1878 * returned or %NULL if the list is empty. 1879 */ 1880 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1881 { 1882 unsigned long flags; 1883 struct sk_buff *result; 1884 1885 spin_lock_irqsave(&list->lock, flags); 1886 result = __skb_dequeue_tail(list); 1887 spin_unlock_irqrestore(&list->lock, flags); 1888 return result; 1889 } 1890 EXPORT_SYMBOL(skb_dequeue_tail); 1891 1892 /** 1893 * skb_queue_purge - empty a list 1894 * @list: list to empty 1895 * 1896 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1897 * the list and one reference dropped. This function takes the list 1898 * lock and is atomic with respect to other list locking functions. 1899 */ 1900 void skb_queue_purge(struct sk_buff_head *list) 1901 { 1902 struct sk_buff *skb; 1903 while ((skb = skb_dequeue(list)) != NULL) 1904 kfree_skb(skb); 1905 } 1906 EXPORT_SYMBOL(skb_queue_purge); 1907 1908 /** 1909 * skb_queue_head - queue a buffer at the list head 1910 * @list: list to use 1911 * @newsk: buffer to queue 1912 * 1913 * Queue a buffer at the start of the list. This function takes the 1914 * list lock and can be used safely with other locking &sk_buff functions 1915 * safely. 1916 * 1917 * A buffer cannot be placed on two lists at the same time. 1918 */ 1919 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1920 { 1921 unsigned long flags; 1922 1923 spin_lock_irqsave(&list->lock, flags); 1924 __skb_queue_head(list, newsk); 1925 spin_unlock_irqrestore(&list->lock, flags); 1926 } 1927 EXPORT_SYMBOL(skb_queue_head); 1928 1929 /** 1930 * skb_queue_tail - queue a buffer at the list tail 1931 * @list: list to use 1932 * @newsk: buffer to queue 1933 * 1934 * Queue a buffer at the tail of the list. This function takes the 1935 * list lock and can be used safely with other locking &sk_buff functions 1936 * safely. 1937 * 1938 * A buffer cannot be placed on two lists at the same time. 1939 */ 1940 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1941 { 1942 unsigned long flags; 1943 1944 spin_lock_irqsave(&list->lock, flags); 1945 __skb_queue_tail(list, newsk); 1946 spin_unlock_irqrestore(&list->lock, flags); 1947 } 1948 EXPORT_SYMBOL(skb_queue_tail); 1949 1950 /** 1951 * skb_unlink - remove a buffer from a list 1952 * @skb: buffer to remove 1953 * @list: list to use 1954 * 1955 * Remove a packet from a list. The list locks are taken and this 1956 * function is atomic with respect to other list locked calls 1957 * 1958 * You must know what list the SKB is on. 1959 */ 1960 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1961 { 1962 unsigned long flags; 1963 1964 spin_lock_irqsave(&list->lock, flags); 1965 __skb_unlink(skb, list); 1966 spin_unlock_irqrestore(&list->lock, flags); 1967 } 1968 EXPORT_SYMBOL(skb_unlink); 1969 1970 /** 1971 * skb_append - append a buffer 1972 * @old: buffer to insert after 1973 * @newsk: buffer to insert 1974 * @list: list to use 1975 * 1976 * Place a packet after a given packet in a list. The list locks are taken 1977 * and this function is atomic with respect to other list locked calls. 1978 * A buffer cannot be placed on two lists at the same time. 1979 */ 1980 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1981 { 1982 unsigned long flags; 1983 1984 spin_lock_irqsave(&list->lock, flags); 1985 __skb_queue_after(list, old, newsk); 1986 spin_unlock_irqrestore(&list->lock, flags); 1987 } 1988 EXPORT_SYMBOL(skb_append); 1989 1990 /** 1991 * skb_insert - insert a buffer 1992 * @old: buffer to insert before 1993 * @newsk: buffer to insert 1994 * @list: list to use 1995 * 1996 * Place a packet before a given packet in a list. The list locks are 1997 * taken and this function is atomic with respect to other list locked 1998 * calls. 1999 * 2000 * A buffer cannot be placed on two lists at the same time. 2001 */ 2002 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2003 { 2004 unsigned long flags; 2005 2006 spin_lock_irqsave(&list->lock, flags); 2007 __skb_insert(newsk, old->prev, old, list); 2008 spin_unlock_irqrestore(&list->lock, flags); 2009 } 2010 EXPORT_SYMBOL(skb_insert); 2011 2012 static inline void skb_split_inside_header(struct sk_buff *skb, 2013 struct sk_buff* skb1, 2014 const u32 len, const int pos) 2015 { 2016 int i; 2017 2018 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2019 pos - len); 2020 /* And move data appendix as is. */ 2021 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2022 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2023 2024 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2025 skb_shinfo(skb)->nr_frags = 0; 2026 skb1->data_len = skb->data_len; 2027 skb1->len += skb1->data_len; 2028 skb->data_len = 0; 2029 skb->len = len; 2030 skb_set_tail_pointer(skb, len); 2031 } 2032 2033 static inline void skb_split_no_header(struct sk_buff *skb, 2034 struct sk_buff* skb1, 2035 const u32 len, int pos) 2036 { 2037 int i, k = 0; 2038 const int nfrags = skb_shinfo(skb)->nr_frags; 2039 2040 skb_shinfo(skb)->nr_frags = 0; 2041 skb1->len = skb1->data_len = skb->len - len; 2042 skb->len = len; 2043 skb->data_len = len - pos; 2044 2045 for (i = 0; i < nfrags; i++) { 2046 int size = skb_shinfo(skb)->frags[i].size; 2047 2048 if (pos + size > len) { 2049 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2050 2051 if (pos < len) { 2052 /* Split frag. 2053 * We have two variants in this case: 2054 * 1. Move all the frag to the second 2055 * part, if it is possible. F.e. 2056 * this approach is mandatory for TUX, 2057 * where splitting is expensive. 2058 * 2. Split is accurately. We make this. 2059 */ 2060 get_page(skb_shinfo(skb)->frags[i].page); 2061 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2062 skb_shinfo(skb1)->frags[0].size -= len - pos; 2063 skb_shinfo(skb)->frags[i].size = len - pos; 2064 skb_shinfo(skb)->nr_frags++; 2065 } 2066 k++; 2067 } else 2068 skb_shinfo(skb)->nr_frags++; 2069 pos += size; 2070 } 2071 skb_shinfo(skb1)->nr_frags = k; 2072 } 2073 2074 /** 2075 * skb_split - Split fragmented skb to two parts at length len. 2076 * @skb: the buffer to split 2077 * @skb1: the buffer to receive the second part 2078 * @len: new length for skb 2079 */ 2080 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2081 { 2082 int pos = skb_headlen(skb); 2083 2084 if (len < pos) /* Split line is inside header. */ 2085 skb_split_inside_header(skb, skb1, len, pos); 2086 else /* Second chunk has no header, nothing to copy. */ 2087 skb_split_no_header(skb, skb1, len, pos); 2088 } 2089 EXPORT_SYMBOL(skb_split); 2090 2091 /* Shifting from/to a cloned skb is a no-go. 2092 * 2093 * Caller cannot keep skb_shinfo related pointers past calling here! 2094 */ 2095 static int skb_prepare_for_shift(struct sk_buff *skb) 2096 { 2097 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2098 } 2099 2100 /** 2101 * skb_shift - Shifts paged data partially from skb to another 2102 * @tgt: buffer into which tail data gets added 2103 * @skb: buffer from which the paged data comes from 2104 * @shiftlen: shift up to this many bytes 2105 * 2106 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2107 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2108 * It's up to caller to free skb if everything was shifted. 2109 * 2110 * If @tgt runs out of frags, the whole operation is aborted. 2111 * 2112 * Skb cannot include anything else but paged data while tgt is allowed 2113 * to have non-paged data as well. 2114 * 2115 * TODO: full sized shift could be optimized but that would need 2116 * specialized skb free'er to handle frags without up-to-date nr_frags. 2117 */ 2118 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2119 { 2120 int from, to, merge, todo; 2121 struct skb_frag_struct *fragfrom, *fragto; 2122 2123 BUG_ON(shiftlen > skb->len); 2124 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2125 2126 todo = shiftlen; 2127 from = 0; 2128 to = skb_shinfo(tgt)->nr_frags; 2129 fragfrom = &skb_shinfo(skb)->frags[from]; 2130 2131 /* Actual merge is delayed until the point when we know we can 2132 * commit all, so that we don't have to undo partial changes 2133 */ 2134 if (!to || 2135 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2136 merge = -1; 2137 } else { 2138 merge = to - 1; 2139 2140 todo -= fragfrom->size; 2141 if (todo < 0) { 2142 if (skb_prepare_for_shift(skb) || 2143 skb_prepare_for_shift(tgt)) 2144 return 0; 2145 2146 /* All previous frag pointers might be stale! */ 2147 fragfrom = &skb_shinfo(skb)->frags[from]; 2148 fragto = &skb_shinfo(tgt)->frags[merge]; 2149 2150 fragto->size += shiftlen; 2151 fragfrom->size -= shiftlen; 2152 fragfrom->page_offset += shiftlen; 2153 2154 goto onlymerged; 2155 } 2156 2157 from++; 2158 } 2159 2160 /* Skip full, not-fitting skb to avoid expensive operations */ 2161 if ((shiftlen == skb->len) && 2162 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2163 return 0; 2164 2165 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2166 return 0; 2167 2168 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2169 if (to == MAX_SKB_FRAGS) 2170 return 0; 2171 2172 fragfrom = &skb_shinfo(skb)->frags[from]; 2173 fragto = &skb_shinfo(tgt)->frags[to]; 2174 2175 if (todo >= fragfrom->size) { 2176 *fragto = *fragfrom; 2177 todo -= fragfrom->size; 2178 from++; 2179 to++; 2180 2181 } else { 2182 get_page(fragfrom->page); 2183 fragto->page = fragfrom->page; 2184 fragto->page_offset = fragfrom->page_offset; 2185 fragto->size = todo; 2186 2187 fragfrom->page_offset += todo; 2188 fragfrom->size -= todo; 2189 todo = 0; 2190 2191 to++; 2192 break; 2193 } 2194 } 2195 2196 /* Ready to "commit" this state change to tgt */ 2197 skb_shinfo(tgt)->nr_frags = to; 2198 2199 if (merge >= 0) { 2200 fragfrom = &skb_shinfo(skb)->frags[0]; 2201 fragto = &skb_shinfo(tgt)->frags[merge]; 2202 2203 fragto->size += fragfrom->size; 2204 put_page(fragfrom->page); 2205 } 2206 2207 /* Reposition in the original skb */ 2208 to = 0; 2209 while (from < skb_shinfo(skb)->nr_frags) 2210 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2211 skb_shinfo(skb)->nr_frags = to; 2212 2213 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2214 2215 onlymerged: 2216 /* Most likely the tgt won't ever need its checksum anymore, skb on 2217 * the other hand might need it if it needs to be resent 2218 */ 2219 tgt->ip_summed = CHECKSUM_PARTIAL; 2220 skb->ip_summed = CHECKSUM_PARTIAL; 2221 2222 /* Yak, is it really working this way? Some helper please? */ 2223 skb->len -= shiftlen; 2224 skb->data_len -= shiftlen; 2225 skb->truesize -= shiftlen; 2226 tgt->len += shiftlen; 2227 tgt->data_len += shiftlen; 2228 tgt->truesize += shiftlen; 2229 2230 return shiftlen; 2231 } 2232 2233 /** 2234 * skb_prepare_seq_read - Prepare a sequential read of skb data 2235 * @skb: the buffer to read 2236 * @from: lower offset of data to be read 2237 * @to: upper offset of data to be read 2238 * @st: state variable 2239 * 2240 * Initializes the specified state variable. Must be called before 2241 * invoking skb_seq_read() for the first time. 2242 */ 2243 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2244 unsigned int to, struct skb_seq_state *st) 2245 { 2246 st->lower_offset = from; 2247 st->upper_offset = to; 2248 st->root_skb = st->cur_skb = skb; 2249 st->frag_idx = st->stepped_offset = 0; 2250 st->frag_data = NULL; 2251 } 2252 EXPORT_SYMBOL(skb_prepare_seq_read); 2253 2254 /** 2255 * skb_seq_read - Sequentially read skb data 2256 * @consumed: number of bytes consumed by the caller so far 2257 * @data: destination pointer for data to be returned 2258 * @st: state variable 2259 * 2260 * Reads a block of skb data at &consumed relative to the 2261 * lower offset specified to skb_prepare_seq_read(). Assigns 2262 * the head of the data block to &data and returns the length 2263 * of the block or 0 if the end of the skb data or the upper 2264 * offset has been reached. 2265 * 2266 * The caller is not required to consume all of the data 2267 * returned, i.e. &consumed is typically set to the number 2268 * of bytes already consumed and the next call to 2269 * skb_seq_read() will return the remaining part of the block. 2270 * 2271 * Note 1: The size of each block of data returned can be arbitary, 2272 * this limitation is the cost for zerocopy seqeuental 2273 * reads of potentially non linear data. 2274 * 2275 * Note 2: Fragment lists within fragments are not implemented 2276 * at the moment, state->root_skb could be replaced with 2277 * a stack for this purpose. 2278 */ 2279 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2280 struct skb_seq_state *st) 2281 { 2282 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2283 skb_frag_t *frag; 2284 2285 if (unlikely(abs_offset >= st->upper_offset)) 2286 return 0; 2287 2288 next_skb: 2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2290 2291 if (abs_offset < block_limit && !st->frag_data) { 2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2293 return block_limit - abs_offset; 2294 } 2295 2296 if (st->frag_idx == 0 && !st->frag_data) 2297 st->stepped_offset += skb_headlen(st->cur_skb); 2298 2299 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2300 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2301 block_limit = frag->size + st->stepped_offset; 2302 2303 if (abs_offset < block_limit) { 2304 if (!st->frag_data) 2305 st->frag_data = kmap_skb_frag(frag); 2306 2307 *data = (u8 *) st->frag_data + frag->page_offset + 2308 (abs_offset - st->stepped_offset); 2309 2310 return block_limit - abs_offset; 2311 } 2312 2313 if (st->frag_data) { 2314 kunmap_skb_frag(st->frag_data); 2315 st->frag_data = NULL; 2316 } 2317 2318 st->frag_idx++; 2319 st->stepped_offset += frag->size; 2320 } 2321 2322 if (st->frag_data) { 2323 kunmap_skb_frag(st->frag_data); 2324 st->frag_data = NULL; 2325 } 2326 2327 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { 2328 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2329 st->frag_idx = 0; 2330 goto next_skb; 2331 } else if (st->cur_skb->next) { 2332 st->cur_skb = st->cur_skb->next; 2333 st->frag_idx = 0; 2334 goto next_skb; 2335 } 2336 2337 return 0; 2338 } 2339 EXPORT_SYMBOL(skb_seq_read); 2340 2341 /** 2342 * skb_abort_seq_read - Abort a sequential read of skb data 2343 * @st: state variable 2344 * 2345 * Must be called if skb_seq_read() was not called until it 2346 * returned 0. 2347 */ 2348 void skb_abort_seq_read(struct skb_seq_state *st) 2349 { 2350 if (st->frag_data) 2351 kunmap_skb_frag(st->frag_data); 2352 } 2353 EXPORT_SYMBOL(skb_abort_seq_read); 2354 2355 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2356 2357 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2358 struct ts_config *conf, 2359 struct ts_state *state) 2360 { 2361 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2362 } 2363 2364 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2365 { 2366 skb_abort_seq_read(TS_SKB_CB(state)); 2367 } 2368 2369 /** 2370 * skb_find_text - Find a text pattern in skb data 2371 * @skb: the buffer to look in 2372 * @from: search offset 2373 * @to: search limit 2374 * @config: textsearch configuration 2375 * @state: uninitialized textsearch state variable 2376 * 2377 * Finds a pattern in the skb data according to the specified 2378 * textsearch configuration. Use textsearch_next() to retrieve 2379 * subsequent occurrences of the pattern. Returns the offset 2380 * to the first occurrence or UINT_MAX if no match was found. 2381 */ 2382 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2383 unsigned int to, struct ts_config *config, 2384 struct ts_state *state) 2385 { 2386 unsigned int ret; 2387 2388 config->get_next_block = skb_ts_get_next_block; 2389 config->finish = skb_ts_finish; 2390 2391 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2392 2393 ret = textsearch_find(config, state); 2394 return (ret <= to - from ? ret : UINT_MAX); 2395 } 2396 EXPORT_SYMBOL(skb_find_text); 2397 2398 /** 2399 * skb_append_datato_frags: - append the user data to a skb 2400 * @sk: sock structure 2401 * @skb: skb structure to be appened with user data. 2402 * @getfrag: call back function to be used for getting the user data 2403 * @from: pointer to user message iov 2404 * @length: length of the iov message 2405 * 2406 * Description: This procedure append the user data in the fragment part 2407 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2408 */ 2409 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2410 int (*getfrag)(void *from, char *to, int offset, 2411 int len, int odd, struct sk_buff *skb), 2412 void *from, int length) 2413 { 2414 int frg_cnt = 0; 2415 skb_frag_t *frag = NULL; 2416 struct page *page = NULL; 2417 int copy, left; 2418 int offset = 0; 2419 int ret; 2420 2421 do { 2422 /* Return error if we don't have space for new frag */ 2423 frg_cnt = skb_shinfo(skb)->nr_frags; 2424 if (frg_cnt >= MAX_SKB_FRAGS) 2425 return -EFAULT; 2426 2427 /* allocate a new page for next frag */ 2428 page = alloc_pages(sk->sk_allocation, 0); 2429 2430 /* If alloc_page fails just return failure and caller will 2431 * free previous allocated pages by doing kfree_skb() 2432 */ 2433 if (page == NULL) 2434 return -ENOMEM; 2435 2436 /* initialize the next frag */ 2437 sk->sk_sndmsg_page = page; 2438 sk->sk_sndmsg_off = 0; 2439 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2440 skb->truesize += PAGE_SIZE; 2441 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2442 2443 /* get the new initialized frag */ 2444 frg_cnt = skb_shinfo(skb)->nr_frags; 2445 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2446 2447 /* copy the user data to page */ 2448 left = PAGE_SIZE - frag->page_offset; 2449 copy = (length > left)? left : length; 2450 2451 ret = getfrag(from, (page_address(frag->page) + 2452 frag->page_offset + frag->size), 2453 offset, copy, 0, skb); 2454 if (ret < 0) 2455 return -EFAULT; 2456 2457 /* copy was successful so update the size parameters */ 2458 sk->sk_sndmsg_off += copy; 2459 frag->size += copy; 2460 skb->len += copy; 2461 skb->data_len += copy; 2462 offset += copy; 2463 length -= copy; 2464 2465 } while (length > 0); 2466 2467 return 0; 2468 } 2469 EXPORT_SYMBOL(skb_append_datato_frags); 2470 2471 /** 2472 * skb_pull_rcsum - pull skb and update receive checksum 2473 * @skb: buffer to update 2474 * @len: length of data pulled 2475 * 2476 * This function performs an skb_pull on the packet and updates 2477 * the CHECKSUM_COMPLETE checksum. It should be used on 2478 * receive path processing instead of skb_pull unless you know 2479 * that the checksum difference is zero (e.g., a valid IP header) 2480 * or you are setting ip_summed to CHECKSUM_NONE. 2481 */ 2482 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2483 { 2484 BUG_ON(len > skb->len); 2485 skb->len -= len; 2486 BUG_ON(skb->len < skb->data_len); 2487 skb_postpull_rcsum(skb, skb->data, len); 2488 return skb->data += len; 2489 } 2490 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2491 2492 /** 2493 * skb_segment - Perform protocol segmentation on skb. 2494 * @skb: buffer to segment 2495 * @features: features for the output path (see dev->features) 2496 * 2497 * This function performs segmentation on the given skb. It returns 2498 * a pointer to the first in a list of new skbs for the segments. 2499 * In case of error it returns ERR_PTR(err). 2500 */ 2501 struct sk_buff *skb_segment(struct sk_buff *skb, int features) 2502 { 2503 struct sk_buff *segs = NULL; 2504 struct sk_buff *tail = NULL; 2505 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2506 unsigned int mss = skb_shinfo(skb)->gso_size; 2507 unsigned int doffset = skb->data - skb_mac_header(skb); 2508 unsigned int offset = doffset; 2509 unsigned int headroom; 2510 unsigned int len; 2511 int sg = features & NETIF_F_SG; 2512 int nfrags = skb_shinfo(skb)->nr_frags; 2513 int err = -ENOMEM; 2514 int i = 0; 2515 int pos; 2516 2517 __skb_push(skb, doffset); 2518 headroom = skb_headroom(skb); 2519 pos = skb_headlen(skb); 2520 2521 do { 2522 struct sk_buff *nskb; 2523 skb_frag_t *frag; 2524 int hsize; 2525 int size; 2526 2527 len = skb->len - offset; 2528 if (len > mss) 2529 len = mss; 2530 2531 hsize = skb_headlen(skb) - offset; 2532 if (hsize < 0) 2533 hsize = 0; 2534 if (hsize > len || !sg) 2535 hsize = len; 2536 2537 if (!hsize && i >= nfrags) { 2538 BUG_ON(fskb->len != len); 2539 2540 pos += len; 2541 nskb = skb_clone(fskb, GFP_ATOMIC); 2542 fskb = fskb->next; 2543 2544 if (unlikely(!nskb)) 2545 goto err; 2546 2547 hsize = skb_end_pointer(nskb) - nskb->head; 2548 if (skb_cow_head(nskb, doffset + headroom)) { 2549 kfree_skb(nskb); 2550 goto err; 2551 } 2552 2553 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2554 hsize; 2555 skb_release_head_state(nskb); 2556 __skb_push(nskb, doffset); 2557 } else { 2558 nskb = alloc_skb(hsize + doffset + headroom, 2559 GFP_ATOMIC); 2560 2561 if (unlikely(!nskb)) 2562 goto err; 2563 2564 skb_reserve(nskb, headroom); 2565 __skb_put(nskb, doffset); 2566 } 2567 2568 if (segs) 2569 tail->next = nskb; 2570 else 2571 segs = nskb; 2572 tail = nskb; 2573 2574 __copy_skb_header(nskb, skb); 2575 nskb->mac_len = skb->mac_len; 2576 2577 /* nskb and skb might have different headroom */ 2578 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2579 nskb->csum_start += skb_headroom(nskb) - headroom; 2580 2581 skb_reset_mac_header(nskb); 2582 skb_set_network_header(nskb, skb->mac_len); 2583 nskb->transport_header = (nskb->network_header + 2584 skb_network_header_len(skb)); 2585 skb_copy_from_linear_data(skb, nskb->data, doffset); 2586 2587 if (fskb != skb_shinfo(skb)->frag_list) 2588 continue; 2589 2590 if (!sg) { 2591 nskb->ip_summed = CHECKSUM_NONE; 2592 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2593 skb_put(nskb, len), 2594 len, 0); 2595 continue; 2596 } 2597 2598 frag = skb_shinfo(nskb)->frags; 2599 2600 skb_copy_from_linear_data_offset(skb, offset, 2601 skb_put(nskb, hsize), hsize); 2602 2603 while (pos < offset + len && i < nfrags) { 2604 *frag = skb_shinfo(skb)->frags[i]; 2605 get_page(frag->page); 2606 size = frag->size; 2607 2608 if (pos < offset) { 2609 frag->page_offset += offset - pos; 2610 frag->size -= offset - pos; 2611 } 2612 2613 skb_shinfo(nskb)->nr_frags++; 2614 2615 if (pos + size <= offset + len) { 2616 i++; 2617 pos += size; 2618 } else { 2619 frag->size -= pos + size - (offset + len); 2620 goto skip_fraglist; 2621 } 2622 2623 frag++; 2624 } 2625 2626 if (pos < offset + len) { 2627 struct sk_buff *fskb2 = fskb; 2628 2629 BUG_ON(pos + fskb->len != offset + len); 2630 2631 pos += fskb->len; 2632 fskb = fskb->next; 2633 2634 if (fskb2->next) { 2635 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2636 if (!fskb2) 2637 goto err; 2638 } else 2639 skb_get(fskb2); 2640 2641 SKB_FRAG_ASSERT(nskb); 2642 skb_shinfo(nskb)->frag_list = fskb2; 2643 } 2644 2645 skip_fraglist: 2646 nskb->data_len = len - hsize; 2647 nskb->len += nskb->data_len; 2648 nskb->truesize += nskb->data_len; 2649 } while ((offset += len) < skb->len); 2650 2651 return segs; 2652 2653 err: 2654 while ((skb = segs)) { 2655 segs = skb->next; 2656 kfree_skb(skb); 2657 } 2658 return ERR_PTR(err); 2659 } 2660 EXPORT_SYMBOL_GPL(skb_segment); 2661 2662 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2663 { 2664 struct sk_buff *p = *head; 2665 struct sk_buff *nskb; 2666 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2667 struct skb_shared_info *pinfo = skb_shinfo(p); 2668 unsigned int headroom; 2669 unsigned int len = skb_gro_len(skb); 2670 unsigned int offset = skb_gro_offset(skb); 2671 unsigned int headlen = skb_headlen(skb); 2672 2673 if (p->len + len >= 65536) 2674 return -E2BIG; 2675 2676 if (pinfo->frag_list) 2677 goto merge; 2678 else if (headlen <= offset) { 2679 skb_frag_t *frag; 2680 skb_frag_t *frag2; 2681 int i = skbinfo->nr_frags; 2682 int nr_frags = pinfo->nr_frags + i; 2683 2684 offset -= headlen; 2685 2686 if (nr_frags > MAX_SKB_FRAGS) 2687 return -E2BIG; 2688 2689 pinfo->nr_frags = nr_frags; 2690 skbinfo->nr_frags = 0; 2691 2692 frag = pinfo->frags + nr_frags; 2693 frag2 = skbinfo->frags + i; 2694 do { 2695 *--frag = *--frag2; 2696 } while (--i); 2697 2698 frag->page_offset += offset; 2699 frag->size -= offset; 2700 2701 skb->truesize -= skb->data_len; 2702 skb->len -= skb->data_len; 2703 skb->data_len = 0; 2704 2705 NAPI_GRO_CB(skb)->free = 1; 2706 goto done; 2707 } else if (skb_gro_len(p) != pinfo->gso_size) 2708 return -E2BIG; 2709 2710 headroom = skb_headroom(p); 2711 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2712 if (unlikely(!nskb)) 2713 return -ENOMEM; 2714 2715 __copy_skb_header(nskb, p); 2716 nskb->mac_len = p->mac_len; 2717 2718 skb_reserve(nskb, headroom); 2719 __skb_put(nskb, skb_gro_offset(p)); 2720 2721 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2722 skb_set_network_header(nskb, skb_network_offset(p)); 2723 skb_set_transport_header(nskb, skb_transport_offset(p)); 2724 2725 __skb_pull(p, skb_gro_offset(p)); 2726 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2727 p->data - skb_mac_header(p)); 2728 2729 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2730 skb_shinfo(nskb)->frag_list = p; 2731 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2732 pinfo->gso_size = 0; 2733 skb_header_release(p); 2734 nskb->prev = p; 2735 2736 nskb->data_len += p->len; 2737 nskb->truesize += p->len; 2738 nskb->len += p->len; 2739 2740 *head = nskb; 2741 nskb->next = p->next; 2742 p->next = NULL; 2743 2744 p = nskb; 2745 2746 merge: 2747 if (offset > headlen) { 2748 skbinfo->frags[0].page_offset += offset - headlen; 2749 skbinfo->frags[0].size -= offset - headlen; 2750 offset = headlen; 2751 } 2752 2753 __skb_pull(skb, offset); 2754 2755 p->prev->next = skb; 2756 p->prev = skb; 2757 skb_header_release(skb); 2758 2759 done: 2760 NAPI_GRO_CB(p)->count++; 2761 p->data_len += len; 2762 p->truesize += len; 2763 p->len += len; 2764 2765 NAPI_GRO_CB(skb)->same_flow = 1; 2766 return 0; 2767 } 2768 EXPORT_SYMBOL_GPL(skb_gro_receive); 2769 2770 void __init skb_init(void) 2771 { 2772 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2773 sizeof(struct sk_buff), 2774 0, 2775 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2776 NULL); 2777 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2778 (2*sizeof(struct sk_buff)) + 2779 sizeof(atomic_t), 2780 0, 2781 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2782 NULL); 2783 } 2784 2785 /** 2786 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2787 * @skb: Socket buffer containing the buffers to be mapped 2788 * @sg: The scatter-gather list to map into 2789 * @offset: The offset into the buffer's contents to start mapping 2790 * @len: Length of buffer space to be mapped 2791 * 2792 * Fill the specified scatter-gather list with mappings/pointers into a 2793 * region of the buffer space attached to a socket buffer. 2794 */ 2795 static int 2796 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2797 { 2798 int start = skb_headlen(skb); 2799 int i, copy = start - offset; 2800 struct sk_buff *frag_iter; 2801 int elt = 0; 2802 2803 if (copy > 0) { 2804 if (copy > len) 2805 copy = len; 2806 sg_set_buf(sg, skb->data + offset, copy); 2807 elt++; 2808 if ((len -= copy) == 0) 2809 return elt; 2810 offset += copy; 2811 } 2812 2813 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2814 int end; 2815 2816 WARN_ON(start > offset + len); 2817 2818 end = start + skb_shinfo(skb)->frags[i].size; 2819 if ((copy = end - offset) > 0) { 2820 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2821 2822 if (copy > len) 2823 copy = len; 2824 sg_set_page(&sg[elt], frag->page, copy, 2825 frag->page_offset+offset-start); 2826 elt++; 2827 if (!(len -= copy)) 2828 return elt; 2829 offset += copy; 2830 } 2831 start = end; 2832 } 2833 2834 skb_walk_frags(skb, frag_iter) { 2835 int end; 2836 2837 WARN_ON(start > offset + len); 2838 2839 end = start + frag_iter->len; 2840 if ((copy = end - offset) > 0) { 2841 if (copy > len) 2842 copy = len; 2843 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 2844 copy); 2845 if ((len -= copy) == 0) 2846 return elt; 2847 offset += copy; 2848 } 2849 start = end; 2850 } 2851 BUG_ON(len); 2852 return elt; 2853 } 2854 2855 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2856 { 2857 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2858 2859 sg_mark_end(&sg[nsg - 1]); 2860 2861 return nsg; 2862 } 2863 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2864 2865 /** 2866 * skb_cow_data - Check that a socket buffer's data buffers are writable 2867 * @skb: The socket buffer to check. 2868 * @tailbits: Amount of trailing space to be added 2869 * @trailer: Returned pointer to the skb where the @tailbits space begins 2870 * 2871 * Make sure that the data buffers attached to a socket buffer are 2872 * writable. If they are not, private copies are made of the data buffers 2873 * and the socket buffer is set to use these instead. 2874 * 2875 * If @tailbits is given, make sure that there is space to write @tailbits 2876 * bytes of data beyond current end of socket buffer. @trailer will be 2877 * set to point to the skb in which this space begins. 2878 * 2879 * The number of scatterlist elements required to completely map the 2880 * COW'd and extended socket buffer will be returned. 2881 */ 2882 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2883 { 2884 int copyflag; 2885 int elt; 2886 struct sk_buff *skb1, **skb_p; 2887 2888 /* If skb is cloned or its head is paged, reallocate 2889 * head pulling out all the pages (pages are considered not writable 2890 * at the moment even if they are anonymous). 2891 */ 2892 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2893 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2894 return -ENOMEM; 2895 2896 /* Easy case. Most of packets will go this way. */ 2897 if (!skb_has_frags(skb)) { 2898 /* A little of trouble, not enough of space for trailer. 2899 * This should not happen, when stack is tuned to generate 2900 * good frames. OK, on miss we reallocate and reserve even more 2901 * space, 128 bytes is fair. */ 2902 2903 if (skb_tailroom(skb) < tailbits && 2904 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2905 return -ENOMEM; 2906 2907 /* Voila! */ 2908 *trailer = skb; 2909 return 1; 2910 } 2911 2912 /* Misery. We are in troubles, going to mincer fragments... */ 2913 2914 elt = 1; 2915 skb_p = &skb_shinfo(skb)->frag_list; 2916 copyflag = 0; 2917 2918 while ((skb1 = *skb_p) != NULL) { 2919 int ntail = 0; 2920 2921 /* The fragment is partially pulled by someone, 2922 * this can happen on input. Copy it and everything 2923 * after it. */ 2924 2925 if (skb_shared(skb1)) 2926 copyflag = 1; 2927 2928 /* If the skb is the last, worry about trailer. */ 2929 2930 if (skb1->next == NULL && tailbits) { 2931 if (skb_shinfo(skb1)->nr_frags || 2932 skb_has_frags(skb1) || 2933 skb_tailroom(skb1) < tailbits) 2934 ntail = tailbits + 128; 2935 } 2936 2937 if (copyflag || 2938 skb_cloned(skb1) || 2939 ntail || 2940 skb_shinfo(skb1)->nr_frags || 2941 skb_has_frags(skb1)) { 2942 struct sk_buff *skb2; 2943 2944 /* Fuck, we are miserable poor guys... */ 2945 if (ntail == 0) 2946 skb2 = skb_copy(skb1, GFP_ATOMIC); 2947 else 2948 skb2 = skb_copy_expand(skb1, 2949 skb_headroom(skb1), 2950 ntail, 2951 GFP_ATOMIC); 2952 if (unlikely(skb2 == NULL)) 2953 return -ENOMEM; 2954 2955 if (skb1->sk) 2956 skb_set_owner_w(skb2, skb1->sk); 2957 2958 /* Looking around. Are we still alive? 2959 * OK, link new skb, drop old one */ 2960 2961 skb2->next = skb1->next; 2962 *skb_p = skb2; 2963 kfree_skb(skb1); 2964 skb1 = skb2; 2965 } 2966 elt++; 2967 *trailer = skb1; 2968 skb_p = &skb1->next; 2969 } 2970 2971 return elt; 2972 } 2973 EXPORT_SYMBOL_GPL(skb_cow_data); 2974 2975 static void sock_rmem_free(struct sk_buff *skb) 2976 { 2977 struct sock *sk = skb->sk; 2978 2979 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 2980 } 2981 2982 /* 2983 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 2984 */ 2985 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 2986 { 2987 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 2988 (unsigned)sk->sk_rcvbuf) 2989 return -ENOMEM; 2990 2991 skb_orphan(skb); 2992 skb->sk = sk; 2993 skb->destructor = sock_rmem_free; 2994 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2995 2996 skb_queue_tail(&sk->sk_error_queue, skb); 2997 if (!sock_flag(sk, SOCK_DEAD)) 2998 sk->sk_data_ready(sk, skb->len); 2999 return 0; 3000 } 3001 EXPORT_SYMBOL(sock_queue_err_skb); 3002 3003 void skb_tstamp_tx(struct sk_buff *orig_skb, 3004 struct skb_shared_hwtstamps *hwtstamps) 3005 { 3006 struct sock *sk = orig_skb->sk; 3007 struct sock_exterr_skb *serr; 3008 struct sk_buff *skb; 3009 int err; 3010 3011 if (!sk) 3012 return; 3013 3014 skb = skb_clone(orig_skb, GFP_ATOMIC); 3015 if (!skb) 3016 return; 3017 3018 if (hwtstamps) { 3019 *skb_hwtstamps(skb) = 3020 *hwtstamps; 3021 } else { 3022 /* 3023 * no hardware time stamps available, 3024 * so keep the skb_shared_tx and only 3025 * store software time stamp 3026 */ 3027 skb->tstamp = ktime_get_real(); 3028 } 3029 3030 serr = SKB_EXT_ERR(skb); 3031 memset(serr, 0, sizeof(*serr)); 3032 serr->ee.ee_errno = ENOMSG; 3033 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3034 3035 err = sock_queue_err_skb(sk, skb); 3036 3037 if (err) 3038 kfree_skb(skb); 3039 } 3040 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3041 3042 3043 /** 3044 * skb_partial_csum_set - set up and verify partial csum values for packet 3045 * @skb: the skb to set 3046 * @start: the number of bytes after skb->data to start checksumming. 3047 * @off: the offset from start to place the checksum. 3048 * 3049 * For untrusted partially-checksummed packets, we need to make sure the values 3050 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3051 * 3052 * This function checks and sets those values and skb->ip_summed: if this 3053 * returns false you should drop the packet. 3054 */ 3055 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3056 { 3057 if (unlikely(start > skb_headlen(skb)) || 3058 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3059 if (net_ratelimit()) 3060 printk(KERN_WARNING 3061 "bad partial csum: csum=%u/%u len=%u\n", 3062 start, off, skb_headlen(skb)); 3063 return false; 3064 } 3065 skb->ip_summed = CHECKSUM_PARTIAL; 3066 skb->csum_start = skb_headroom(skb) + start; 3067 skb->csum_offset = off; 3068 return true; 3069 } 3070 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3071 3072 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3073 { 3074 if (net_ratelimit()) 3075 pr_warning("%s: received packets cannot be forwarded" 3076 " while LRO is enabled\n", skb->dev->name); 3077 } 3078 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3079