1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/mm.h> 43 #include <linux/interrupt.h> 44 #include <linux/in.h> 45 #include <linux/inet.h> 46 #include <linux/slab.h> 47 #include <linux/netdevice.h> 48 #ifdef CONFIG_NET_CLS_ACT 49 #include <net/pkt_sched.h> 50 #endif 51 #include <linux/string.h> 52 #include <linux/skbuff.h> 53 #include <linux/splice.h> 54 #include <linux/cache.h> 55 #include <linux/rtnetlink.h> 56 #include <linux/init.h> 57 #include <linux/scatterlist.h> 58 #include <linux/errqueue.h> 59 60 #include <net/protocol.h> 61 #include <net/dst.h> 62 #include <net/sock.h> 63 #include <net/checksum.h> 64 #include <net/xfrm.h> 65 66 #include <asm/uaccess.h> 67 #include <asm/system.h> 68 #include <trace/events/skb.h> 69 70 #include "kmap_skb.h" 71 72 static struct kmem_cache *skbuff_head_cache __read_mostly; 73 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 74 75 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 76 struct pipe_buffer *buf) 77 { 78 put_page(buf->page); 79 } 80 81 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 82 struct pipe_buffer *buf) 83 { 84 get_page(buf->page); 85 } 86 87 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 88 struct pipe_buffer *buf) 89 { 90 return 1; 91 } 92 93 94 /* Pipe buffer operations for a socket. */ 95 static struct pipe_buf_operations sock_pipe_buf_ops = { 96 .can_merge = 0, 97 .map = generic_pipe_buf_map, 98 .unmap = generic_pipe_buf_unmap, 99 .confirm = generic_pipe_buf_confirm, 100 .release = sock_pipe_buf_release, 101 .steal = sock_pipe_buf_steal, 102 .get = sock_pipe_buf_get, 103 }; 104 105 /* 106 * Keep out-of-line to prevent kernel bloat. 107 * __builtin_return_address is not used because it is not always 108 * reliable. 109 */ 110 111 /** 112 * skb_over_panic - private function 113 * @skb: buffer 114 * @sz: size 115 * @here: address 116 * 117 * Out of line support code for skb_put(). Not user callable. 118 */ 119 void skb_over_panic(struct sk_buff *skb, int sz, void *here) 120 { 121 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 122 "data:%p tail:%#lx end:%#lx dev:%s\n", 123 here, skb->len, sz, skb->head, skb->data, 124 (unsigned long)skb->tail, (unsigned long)skb->end, 125 skb->dev ? skb->dev->name : "<NULL>"); 126 BUG(); 127 } 128 EXPORT_SYMBOL(skb_over_panic); 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 142 "data:%p tail:%#lx end:%#lx dev:%s\n", 143 here, skb->len, sz, skb->head, skb->data, 144 (unsigned long)skb->tail, (unsigned long)skb->end, 145 skb->dev ? skb->dev->name : "<NULL>"); 146 BUG(); 147 } 148 EXPORT_SYMBOL(skb_under_panic); 149 150 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 151 * 'private' fields and also do memory statistics to find all the 152 * [BEEP] leaks. 153 * 154 */ 155 156 /** 157 * __alloc_skb - allocate a network buffer 158 * @size: size to allocate 159 * @gfp_mask: allocation mask 160 * @fclone: allocate from fclone cache instead of head cache 161 * and allocate a cloned (child) skb 162 * @node: numa node to allocate memory on 163 * 164 * Allocate a new &sk_buff. The returned buffer has no headroom and a 165 * tail room of size bytes. The object has a reference count of one. 166 * The return is the buffer. On a failure the return is %NULL. 167 * 168 * Buffers may only be allocated from interrupts using a @gfp_mask of 169 * %GFP_ATOMIC. 170 */ 171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 172 int fclone, int node) 173 { 174 struct kmem_cache *cache; 175 struct skb_shared_info *shinfo; 176 struct sk_buff *skb; 177 u8 *data; 178 179 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 180 181 /* Get the HEAD */ 182 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 183 if (!skb) 184 goto out; 185 186 size = SKB_DATA_ALIGN(size); 187 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 188 gfp_mask, node); 189 if (!data) 190 goto nodata; 191 192 /* 193 * Only clear those fields we need to clear, not those that we will 194 * actually initialise below. Hence, don't put any more fields after 195 * the tail pointer in struct sk_buff! 196 */ 197 memset(skb, 0, offsetof(struct sk_buff, tail)); 198 skb->truesize = size + sizeof(struct sk_buff); 199 atomic_set(&skb->users, 1); 200 skb->head = data; 201 skb->data = data; 202 skb_reset_tail_pointer(skb); 203 skb->end = skb->tail + size; 204 /* make sure we initialize shinfo sequentially */ 205 shinfo = skb_shinfo(skb); 206 atomic_set(&shinfo->dataref, 1); 207 shinfo->nr_frags = 0; 208 shinfo->gso_size = 0; 209 shinfo->gso_segs = 0; 210 shinfo->gso_type = 0; 211 shinfo->ip6_frag_id = 0; 212 shinfo->tx_flags.flags = 0; 213 skb_frag_list_init(skb); 214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 215 216 if (fclone) { 217 struct sk_buff *child = skb + 1; 218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 219 220 skb->fclone = SKB_FCLONE_ORIG; 221 atomic_set(fclone_ref, 1); 222 223 child->fclone = SKB_FCLONE_UNAVAILABLE; 224 } 225 out: 226 return skb; 227 nodata: 228 kmem_cache_free(cache, skb); 229 skb = NULL; 230 goto out; 231 } 232 EXPORT_SYMBOL(__alloc_skb); 233 234 /** 235 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 236 * @dev: network device to receive on 237 * @length: length to allocate 238 * @gfp_mask: get_free_pages mask, passed to alloc_skb 239 * 240 * Allocate a new &sk_buff and assign it a usage count of one. The 241 * buffer has unspecified headroom built in. Users should allocate 242 * the headroom they think they need without accounting for the 243 * built in space. The built in space is used for optimisations. 244 * 245 * %NULL is returned if there is no free memory. 246 */ 247 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 248 unsigned int length, gfp_t gfp_mask) 249 { 250 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 251 struct sk_buff *skb; 252 253 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 254 if (likely(skb)) { 255 skb_reserve(skb, NET_SKB_PAD); 256 skb->dev = dev; 257 } 258 return skb; 259 } 260 EXPORT_SYMBOL(__netdev_alloc_skb); 261 262 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) 263 { 264 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 265 struct page *page; 266 267 page = alloc_pages_node(node, gfp_mask, 0); 268 return page; 269 } 270 EXPORT_SYMBOL(__netdev_alloc_page); 271 272 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 273 int size) 274 { 275 skb_fill_page_desc(skb, i, page, off, size); 276 skb->len += size; 277 skb->data_len += size; 278 skb->truesize += size; 279 } 280 EXPORT_SYMBOL(skb_add_rx_frag); 281 282 /** 283 * dev_alloc_skb - allocate an skbuff for receiving 284 * @length: length to allocate 285 * 286 * Allocate a new &sk_buff and assign it a usage count of one. The 287 * buffer has unspecified headroom built in. Users should allocate 288 * the headroom they think they need without accounting for the 289 * built in space. The built in space is used for optimisations. 290 * 291 * %NULL is returned if there is no free memory. Although this function 292 * allocates memory it can be called from an interrupt. 293 */ 294 struct sk_buff *dev_alloc_skb(unsigned int length) 295 { 296 /* 297 * There is more code here than it seems: 298 * __dev_alloc_skb is an inline 299 */ 300 return __dev_alloc_skb(length, GFP_ATOMIC); 301 } 302 EXPORT_SYMBOL(dev_alloc_skb); 303 304 static void skb_drop_list(struct sk_buff **listp) 305 { 306 struct sk_buff *list = *listp; 307 308 *listp = NULL; 309 310 do { 311 struct sk_buff *this = list; 312 list = list->next; 313 kfree_skb(this); 314 } while (list); 315 } 316 317 static inline void skb_drop_fraglist(struct sk_buff *skb) 318 { 319 skb_drop_list(&skb_shinfo(skb)->frag_list); 320 } 321 322 static void skb_clone_fraglist(struct sk_buff *skb) 323 { 324 struct sk_buff *list; 325 326 skb_walk_frags(skb, list) 327 skb_get(list); 328 } 329 330 static void skb_release_data(struct sk_buff *skb) 331 { 332 if (!skb->cloned || 333 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 334 &skb_shinfo(skb)->dataref)) { 335 if (skb_shinfo(skb)->nr_frags) { 336 int i; 337 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 338 put_page(skb_shinfo(skb)->frags[i].page); 339 } 340 341 if (skb_has_frags(skb)) 342 skb_drop_fraglist(skb); 343 344 kfree(skb->head); 345 } 346 } 347 348 /* 349 * Free an skbuff by memory without cleaning the state. 350 */ 351 static void kfree_skbmem(struct sk_buff *skb) 352 { 353 struct sk_buff *other; 354 atomic_t *fclone_ref; 355 356 switch (skb->fclone) { 357 case SKB_FCLONE_UNAVAILABLE: 358 kmem_cache_free(skbuff_head_cache, skb); 359 break; 360 361 case SKB_FCLONE_ORIG: 362 fclone_ref = (atomic_t *) (skb + 2); 363 if (atomic_dec_and_test(fclone_ref)) 364 kmem_cache_free(skbuff_fclone_cache, skb); 365 break; 366 367 case SKB_FCLONE_CLONE: 368 fclone_ref = (atomic_t *) (skb + 1); 369 other = skb - 1; 370 371 /* The clone portion is available for 372 * fast-cloning again. 373 */ 374 skb->fclone = SKB_FCLONE_UNAVAILABLE; 375 376 if (atomic_dec_and_test(fclone_ref)) 377 kmem_cache_free(skbuff_fclone_cache, other); 378 break; 379 } 380 } 381 382 static void skb_release_head_state(struct sk_buff *skb) 383 { 384 skb_dst_drop(skb); 385 #ifdef CONFIG_XFRM 386 secpath_put(skb->sp); 387 #endif 388 if (skb->destructor) { 389 WARN_ON(in_irq()); 390 skb->destructor(skb); 391 } 392 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 393 nf_conntrack_put(skb->nfct); 394 nf_conntrack_put_reasm(skb->nfct_reasm); 395 #endif 396 #ifdef CONFIG_BRIDGE_NETFILTER 397 nf_bridge_put(skb->nf_bridge); 398 #endif 399 /* XXX: IS this still necessary? - JHS */ 400 #ifdef CONFIG_NET_SCHED 401 skb->tc_index = 0; 402 #ifdef CONFIG_NET_CLS_ACT 403 skb->tc_verd = 0; 404 #endif 405 #endif 406 } 407 408 /* Free everything but the sk_buff shell. */ 409 static void skb_release_all(struct sk_buff *skb) 410 { 411 skb_release_head_state(skb); 412 skb_release_data(skb); 413 } 414 415 /** 416 * __kfree_skb - private function 417 * @skb: buffer 418 * 419 * Free an sk_buff. Release anything attached to the buffer. 420 * Clean the state. This is an internal helper function. Users should 421 * always call kfree_skb 422 */ 423 424 void __kfree_skb(struct sk_buff *skb) 425 { 426 skb_release_all(skb); 427 kfree_skbmem(skb); 428 } 429 EXPORT_SYMBOL(__kfree_skb); 430 431 /** 432 * kfree_skb - free an sk_buff 433 * @skb: buffer to free 434 * 435 * Drop a reference to the buffer and free it if the usage count has 436 * hit zero. 437 */ 438 void kfree_skb(struct sk_buff *skb) 439 { 440 if (unlikely(!skb)) 441 return; 442 if (likely(atomic_read(&skb->users) == 1)) 443 smp_rmb(); 444 else if (likely(!atomic_dec_and_test(&skb->users))) 445 return; 446 trace_kfree_skb(skb, __builtin_return_address(0)); 447 __kfree_skb(skb); 448 } 449 EXPORT_SYMBOL(kfree_skb); 450 451 /** 452 * consume_skb - free an skbuff 453 * @skb: buffer to free 454 * 455 * Drop a ref to the buffer and free it if the usage count has hit zero 456 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 457 * is being dropped after a failure and notes that 458 */ 459 void consume_skb(struct sk_buff *skb) 460 { 461 if (unlikely(!skb)) 462 return; 463 if (likely(atomic_read(&skb->users) == 1)) 464 smp_rmb(); 465 else if (likely(!atomic_dec_and_test(&skb->users))) 466 return; 467 __kfree_skb(skb); 468 } 469 EXPORT_SYMBOL(consume_skb); 470 471 /** 472 * skb_recycle_check - check if skb can be reused for receive 473 * @skb: buffer 474 * @skb_size: minimum receive buffer size 475 * 476 * Checks that the skb passed in is not shared or cloned, and 477 * that it is linear and its head portion at least as large as 478 * skb_size so that it can be recycled as a receive buffer. 479 * If these conditions are met, this function does any necessary 480 * reference count dropping and cleans up the skbuff as if it 481 * just came from __alloc_skb(). 482 */ 483 int skb_recycle_check(struct sk_buff *skb, int skb_size) 484 { 485 struct skb_shared_info *shinfo; 486 487 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 488 return 0; 489 490 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 491 if (skb_end_pointer(skb) - skb->head < skb_size) 492 return 0; 493 494 if (skb_shared(skb) || skb_cloned(skb)) 495 return 0; 496 497 skb_release_head_state(skb); 498 shinfo = skb_shinfo(skb); 499 atomic_set(&shinfo->dataref, 1); 500 shinfo->nr_frags = 0; 501 shinfo->gso_size = 0; 502 shinfo->gso_segs = 0; 503 shinfo->gso_type = 0; 504 shinfo->ip6_frag_id = 0; 505 shinfo->tx_flags.flags = 0; 506 skb_frag_list_init(skb); 507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 508 509 memset(skb, 0, offsetof(struct sk_buff, tail)); 510 skb->data = skb->head + NET_SKB_PAD; 511 skb_reset_tail_pointer(skb); 512 513 return 1; 514 } 515 EXPORT_SYMBOL(skb_recycle_check); 516 517 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 518 { 519 new->tstamp = old->tstamp; 520 new->dev = old->dev; 521 new->transport_header = old->transport_header; 522 new->network_header = old->network_header; 523 new->mac_header = old->mac_header; 524 skb_dst_set(new, dst_clone(skb_dst(old))); 525 #ifdef CONFIG_XFRM 526 new->sp = secpath_get(old->sp); 527 #endif 528 memcpy(new->cb, old->cb, sizeof(old->cb)); 529 new->csum = old->csum; 530 new->local_df = old->local_df; 531 new->pkt_type = old->pkt_type; 532 new->ip_summed = old->ip_summed; 533 skb_copy_queue_mapping(new, old); 534 new->priority = old->priority; 535 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 536 new->ipvs_property = old->ipvs_property; 537 #endif 538 new->protocol = old->protocol; 539 new->mark = old->mark; 540 new->iif = old->iif; 541 __nf_copy(new, old); 542 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 543 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 544 new->nf_trace = old->nf_trace; 545 #endif 546 #ifdef CONFIG_NET_SCHED 547 new->tc_index = old->tc_index; 548 #ifdef CONFIG_NET_CLS_ACT 549 new->tc_verd = old->tc_verd; 550 #endif 551 #endif 552 new->vlan_tci = old->vlan_tci; 553 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 554 new->do_not_encrypt = old->do_not_encrypt; 555 #endif 556 557 skb_copy_secmark(new, old); 558 } 559 560 /* 561 * You should not add any new code to this function. Add it to 562 * __copy_skb_header above instead. 563 */ 564 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 565 { 566 #define C(x) n->x = skb->x 567 568 n->next = n->prev = NULL; 569 n->sk = NULL; 570 __copy_skb_header(n, skb); 571 572 C(len); 573 C(data_len); 574 C(mac_len); 575 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 576 n->cloned = 1; 577 n->nohdr = 0; 578 n->destructor = NULL; 579 C(tail); 580 C(end); 581 C(head); 582 C(data); 583 C(truesize); 584 atomic_set(&n->users, 1); 585 586 atomic_inc(&(skb_shinfo(skb)->dataref)); 587 skb->cloned = 1; 588 589 return n; 590 #undef C 591 } 592 593 /** 594 * skb_morph - morph one skb into another 595 * @dst: the skb to receive the contents 596 * @src: the skb to supply the contents 597 * 598 * This is identical to skb_clone except that the target skb is 599 * supplied by the user. 600 * 601 * The target skb is returned upon exit. 602 */ 603 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 604 { 605 skb_release_all(dst); 606 return __skb_clone(dst, src); 607 } 608 EXPORT_SYMBOL_GPL(skb_morph); 609 610 /** 611 * skb_clone - duplicate an sk_buff 612 * @skb: buffer to clone 613 * @gfp_mask: allocation priority 614 * 615 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 616 * copies share the same packet data but not structure. The new 617 * buffer has a reference count of 1. If the allocation fails the 618 * function returns %NULL otherwise the new buffer is returned. 619 * 620 * If this function is called from an interrupt gfp_mask() must be 621 * %GFP_ATOMIC. 622 */ 623 624 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 625 { 626 struct sk_buff *n; 627 628 n = skb + 1; 629 if (skb->fclone == SKB_FCLONE_ORIG && 630 n->fclone == SKB_FCLONE_UNAVAILABLE) { 631 atomic_t *fclone_ref = (atomic_t *) (n + 1); 632 n->fclone = SKB_FCLONE_CLONE; 633 atomic_inc(fclone_ref); 634 } else { 635 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 636 if (!n) 637 return NULL; 638 n->fclone = SKB_FCLONE_UNAVAILABLE; 639 } 640 641 return __skb_clone(n, skb); 642 } 643 EXPORT_SYMBOL(skb_clone); 644 645 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 646 { 647 #ifndef NET_SKBUFF_DATA_USES_OFFSET 648 /* 649 * Shift between the two data areas in bytes 650 */ 651 unsigned long offset = new->data - old->data; 652 #endif 653 654 __copy_skb_header(new, old); 655 656 #ifndef NET_SKBUFF_DATA_USES_OFFSET 657 /* {transport,network,mac}_header are relative to skb->head */ 658 new->transport_header += offset; 659 new->network_header += offset; 660 new->mac_header += offset; 661 #endif 662 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 663 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 664 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 665 } 666 667 /** 668 * skb_copy - create private copy of an sk_buff 669 * @skb: buffer to copy 670 * @gfp_mask: allocation priority 671 * 672 * Make a copy of both an &sk_buff and its data. This is used when the 673 * caller wishes to modify the data and needs a private copy of the 674 * data to alter. Returns %NULL on failure or the pointer to the buffer 675 * on success. The returned buffer has a reference count of 1. 676 * 677 * As by-product this function converts non-linear &sk_buff to linear 678 * one, so that &sk_buff becomes completely private and caller is allowed 679 * to modify all the data of returned buffer. This means that this 680 * function is not recommended for use in circumstances when only 681 * header is going to be modified. Use pskb_copy() instead. 682 */ 683 684 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 685 { 686 int headerlen = skb->data - skb->head; 687 /* 688 * Allocate the copy buffer 689 */ 690 struct sk_buff *n; 691 #ifdef NET_SKBUFF_DATA_USES_OFFSET 692 n = alloc_skb(skb->end + skb->data_len, gfp_mask); 693 #else 694 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); 695 #endif 696 if (!n) 697 return NULL; 698 699 /* Set the data pointer */ 700 skb_reserve(n, headerlen); 701 /* Set the tail pointer and length */ 702 skb_put(n, skb->len); 703 704 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 705 BUG(); 706 707 copy_skb_header(n, skb); 708 return n; 709 } 710 EXPORT_SYMBOL(skb_copy); 711 712 /** 713 * pskb_copy - create copy of an sk_buff with private head. 714 * @skb: buffer to copy 715 * @gfp_mask: allocation priority 716 * 717 * Make a copy of both an &sk_buff and part of its data, located 718 * in header. Fragmented data remain shared. This is used when 719 * the caller wishes to modify only header of &sk_buff and needs 720 * private copy of the header to alter. Returns %NULL on failure 721 * or the pointer to the buffer on success. 722 * The returned buffer has a reference count of 1. 723 */ 724 725 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 726 { 727 /* 728 * Allocate the copy buffer 729 */ 730 struct sk_buff *n; 731 #ifdef NET_SKBUFF_DATA_USES_OFFSET 732 n = alloc_skb(skb->end, gfp_mask); 733 #else 734 n = alloc_skb(skb->end - skb->head, gfp_mask); 735 #endif 736 if (!n) 737 goto out; 738 739 /* Set the data pointer */ 740 skb_reserve(n, skb->data - skb->head); 741 /* Set the tail pointer and length */ 742 skb_put(n, skb_headlen(skb)); 743 /* Copy the bytes */ 744 skb_copy_from_linear_data(skb, n->data, n->len); 745 746 n->truesize += skb->data_len; 747 n->data_len = skb->data_len; 748 n->len = skb->len; 749 750 if (skb_shinfo(skb)->nr_frags) { 751 int i; 752 753 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 754 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 755 get_page(skb_shinfo(n)->frags[i].page); 756 } 757 skb_shinfo(n)->nr_frags = i; 758 } 759 760 if (skb_has_frags(skb)) { 761 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 762 skb_clone_fraglist(n); 763 } 764 765 copy_skb_header(n, skb); 766 out: 767 return n; 768 } 769 EXPORT_SYMBOL(pskb_copy); 770 771 /** 772 * pskb_expand_head - reallocate header of &sk_buff 773 * @skb: buffer to reallocate 774 * @nhead: room to add at head 775 * @ntail: room to add at tail 776 * @gfp_mask: allocation priority 777 * 778 * Expands (or creates identical copy, if &nhead and &ntail are zero) 779 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 780 * reference count of 1. Returns zero in the case of success or error, 781 * if expansion failed. In the last case, &sk_buff is not changed. 782 * 783 * All the pointers pointing into skb header may change and must be 784 * reloaded after call to this function. 785 */ 786 787 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 788 gfp_t gfp_mask) 789 { 790 int i; 791 u8 *data; 792 #ifdef NET_SKBUFF_DATA_USES_OFFSET 793 int size = nhead + skb->end + ntail; 794 #else 795 int size = nhead + (skb->end - skb->head) + ntail; 796 #endif 797 long off; 798 799 BUG_ON(nhead < 0); 800 801 if (skb_shared(skb)) 802 BUG(); 803 804 size = SKB_DATA_ALIGN(size); 805 806 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 807 if (!data) 808 goto nodata; 809 810 /* Copy only real data... and, alas, header. This should be 811 * optimized for the cases when header is void. */ 812 #ifdef NET_SKBUFF_DATA_USES_OFFSET 813 memcpy(data + nhead, skb->head, skb->tail); 814 #else 815 memcpy(data + nhead, skb->head, skb->tail - skb->head); 816 #endif 817 memcpy(data + size, skb_end_pointer(skb), 818 sizeof(struct skb_shared_info)); 819 820 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 821 get_page(skb_shinfo(skb)->frags[i].page); 822 823 if (skb_has_frags(skb)) 824 skb_clone_fraglist(skb); 825 826 skb_release_data(skb); 827 828 off = (data + nhead) - skb->head; 829 830 skb->head = data; 831 skb->data += off; 832 #ifdef NET_SKBUFF_DATA_USES_OFFSET 833 skb->end = size; 834 off = nhead; 835 #else 836 skb->end = skb->head + size; 837 #endif 838 /* {transport,network,mac}_header and tail are relative to skb->head */ 839 skb->tail += off; 840 skb->transport_header += off; 841 skb->network_header += off; 842 skb->mac_header += off; 843 skb->csum_start += nhead; 844 skb->cloned = 0; 845 skb->hdr_len = 0; 846 skb->nohdr = 0; 847 atomic_set(&skb_shinfo(skb)->dataref, 1); 848 return 0; 849 850 nodata: 851 return -ENOMEM; 852 } 853 EXPORT_SYMBOL(pskb_expand_head); 854 855 /* Make private copy of skb with writable head and some headroom */ 856 857 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 858 { 859 struct sk_buff *skb2; 860 int delta = headroom - skb_headroom(skb); 861 862 if (delta <= 0) 863 skb2 = pskb_copy(skb, GFP_ATOMIC); 864 else { 865 skb2 = skb_clone(skb, GFP_ATOMIC); 866 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 867 GFP_ATOMIC)) { 868 kfree_skb(skb2); 869 skb2 = NULL; 870 } 871 } 872 return skb2; 873 } 874 EXPORT_SYMBOL(skb_realloc_headroom); 875 876 /** 877 * skb_copy_expand - copy and expand sk_buff 878 * @skb: buffer to copy 879 * @newheadroom: new free bytes at head 880 * @newtailroom: new free bytes at tail 881 * @gfp_mask: allocation priority 882 * 883 * Make a copy of both an &sk_buff and its data and while doing so 884 * allocate additional space. 885 * 886 * This is used when the caller wishes to modify the data and needs a 887 * private copy of the data to alter as well as more space for new fields. 888 * Returns %NULL on failure or the pointer to the buffer 889 * on success. The returned buffer has a reference count of 1. 890 * 891 * You must pass %GFP_ATOMIC as the allocation priority if this function 892 * is called from an interrupt. 893 */ 894 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 895 int newheadroom, int newtailroom, 896 gfp_t gfp_mask) 897 { 898 /* 899 * Allocate the copy buffer 900 */ 901 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 902 gfp_mask); 903 int oldheadroom = skb_headroom(skb); 904 int head_copy_len, head_copy_off; 905 int off; 906 907 if (!n) 908 return NULL; 909 910 skb_reserve(n, newheadroom); 911 912 /* Set the tail pointer and length */ 913 skb_put(n, skb->len); 914 915 head_copy_len = oldheadroom; 916 head_copy_off = 0; 917 if (newheadroom <= head_copy_len) 918 head_copy_len = newheadroom; 919 else 920 head_copy_off = newheadroom - head_copy_len; 921 922 /* Copy the linear header and data. */ 923 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 924 skb->len + head_copy_len)) 925 BUG(); 926 927 copy_skb_header(n, skb); 928 929 off = newheadroom - oldheadroom; 930 n->csum_start += off; 931 #ifdef NET_SKBUFF_DATA_USES_OFFSET 932 n->transport_header += off; 933 n->network_header += off; 934 n->mac_header += off; 935 #endif 936 937 return n; 938 } 939 EXPORT_SYMBOL(skb_copy_expand); 940 941 /** 942 * skb_pad - zero pad the tail of an skb 943 * @skb: buffer to pad 944 * @pad: space to pad 945 * 946 * Ensure that a buffer is followed by a padding area that is zero 947 * filled. Used by network drivers which may DMA or transfer data 948 * beyond the buffer end onto the wire. 949 * 950 * May return error in out of memory cases. The skb is freed on error. 951 */ 952 953 int skb_pad(struct sk_buff *skb, int pad) 954 { 955 int err; 956 int ntail; 957 958 /* If the skbuff is non linear tailroom is always zero.. */ 959 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 960 memset(skb->data+skb->len, 0, pad); 961 return 0; 962 } 963 964 ntail = skb->data_len + pad - (skb->end - skb->tail); 965 if (likely(skb_cloned(skb) || ntail > 0)) { 966 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 967 if (unlikely(err)) 968 goto free_skb; 969 } 970 971 /* FIXME: The use of this function with non-linear skb's really needs 972 * to be audited. 973 */ 974 err = skb_linearize(skb); 975 if (unlikely(err)) 976 goto free_skb; 977 978 memset(skb->data + skb->len, 0, pad); 979 return 0; 980 981 free_skb: 982 kfree_skb(skb); 983 return err; 984 } 985 EXPORT_SYMBOL(skb_pad); 986 987 /** 988 * skb_put - add data to a buffer 989 * @skb: buffer to use 990 * @len: amount of data to add 991 * 992 * This function extends the used data area of the buffer. If this would 993 * exceed the total buffer size the kernel will panic. A pointer to the 994 * first byte of the extra data is returned. 995 */ 996 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 997 { 998 unsigned char *tmp = skb_tail_pointer(skb); 999 SKB_LINEAR_ASSERT(skb); 1000 skb->tail += len; 1001 skb->len += len; 1002 if (unlikely(skb->tail > skb->end)) 1003 skb_over_panic(skb, len, __builtin_return_address(0)); 1004 return tmp; 1005 } 1006 EXPORT_SYMBOL(skb_put); 1007 1008 /** 1009 * skb_push - add data to the start of a buffer 1010 * @skb: buffer to use 1011 * @len: amount of data to add 1012 * 1013 * This function extends the used data area of the buffer at the buffer 1014 * start. If this would exceed the total buffer headroom the kernel will 1015 * panic. A pointer to the first byte of the extra data is returned. 1016 */ 1017 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1018 { 1019 skb->data -= len; 1020 skb->len += len; 1021 if (unlikely(skb->data<skb->head)) 1022 skb_under_panic(skb, len, __builtin_return_address(0)); 1023 return skb->data; 1024 } 1025 EXPORT_SYMBOL(skb_push); 1026 1027 /** 1028 * skb_pull - remove data from the start of a buffer 1029 * @skb: buffer to use 1030 * @len: amount of data to remove 1031 * 1032 * This function removes data from the start of a buffer, returning 1033 * the memory to the headroom. A pointer to the next data in the buffer 1034 * is returned. Once the data has been pulled future pushes will overwrite 1035 * the old data. 1036 */ 1037 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1038 { 1039 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1040 } 1041 EXPORT_SYMBOL(skb_pull); 1042 1043 /** 1044 * skb_trim - remove end from a buffer 1045 * @skb: buffer to alter 1046 * @len: new length 1047 * 1048 * Cut the length of a buffer down by removing data from the tail. If 1049 * the buffer is already under the length specified it is not modified. 1050 * The skb must be linear. 1051 */ 1052 void skb_trim(struct sk_buff *skb, unsigned int len) 1053 { 1054 if (skb->len > len) 1055 __skb_trim(skb, len); 1056 } 1057 EXPORT_SYMBOL(skb_trim); 1058 1059 /* Trims skb to length len. It can change skb pointers. 1060 */ 1061 1062 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1063 { 1064 struct sk_buff **fragp; 1065 struct sk_buff *frag; 1066 int offset = skb_headlen(skb); 1067 int nfrags = skb_shinfo(skb)->nr_frags; 1068 int i; 1069 int err; 1070 1071 if (skb_cloned(skb) && 1072 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1073 return err; 1074 1075 i = 0; 1076 if (offset >= len) 1077 goto drop_pages; 1078 1079 for (; i < nfrags; i++) { 1080 int end = offset + skb_shinfo(skb)->frags[i].size; 1081 1082 if (end < len) { 1083 offset = end; 1084 continue; 1085 } 1086 1087 skb_shinfo(skb)->frags[i++].size = len - offset; 1088 1089 drop_pages: 1090 skb_shinfo(skb)->nr_frags = i; 1091 1092 for (; i < nfrags; i++) 1093 put_page(skb_shinfo(skb)->frags[i].page); 1094 1095 if (skb_has_frags(skb)) 1096 skb_drop_fraglist(skb); 1097 goto done; 1098 } 1099 1100 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1101 fragp = &frag->next) { 1102 int end = offset + frag->len; 1103 1104 if (skb_shared(frag)) { 1105 struct sk_buff *nfrag; 1106 1107 nfrag = skb_clone(frag, GFP_ATOMIC); 1108 if (unlikely(!nfrag)) 1109 return -ENOMEM; 1110 1111 nfrag->next = frag->next; 1112 kfree_skb(frag); 1113 frag = nfrag; 1114 *fragp = frag; 1115 } 1116 1117 if (end < len) { 1118 offset = end; 1119 continue; 1120 } 1121 1122 if (end > len && 1123 unlikely((err = pskb_trim(frag, len - offset)))) 1124 return err; 1125 1126 if (frag->next) 1127 skb_drop_list(&frag->next); 1128 break; 1129 } 1130 1131 done: 1132 if (len > skb_headlen(skb)) { 1133 skb->data_len -= skb->len - len; 1134 skb->len = len; 1135 } else { 1136 skb->len = len; 1137 skb->data_len = 0; 1138 skb_set_tail_pointer(skb, len); 1139 } 1140 1141 return 0; 1142 } 1143 EXPORT_SYMBOL(___pskb_trim); 1144 1145 /** 1146 * __pskb_pull_tail - advance tail of skb header 1147 * @skb: buffer to reallocate 1148 * @delta: number of bytes to advance tail 1149 * 1150 * The function makes a sense only on a fragmented &sk_buff, 1151 * it expands header moving its tail forward and copying necessary 1152 * data from fragmented part. 1153 * 1154 * &sk_buff MUST have reference count of 1. 1155 * 1156 * Returns %NULL (and &sk_buff does not change) if pull failed 1157 * or value of new tail of skb in the case of success. 1158 * 1159 * All the pointers pointing into skb header may change and must be 1160 * reloaded after call to this function. 1161 */ 1162 1163 /* Moves tail of skb head forward, copying data from fragmented part, 1164 * when it is necessary. 1165 * 1. It may fail due to malloc failure. 1166 * 2. It may change skb pointers. 1167 * 1168 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1169 */ 1170 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1171 { 1172 /* If skb has not enough free space at tail, get new one 1173 * plus 128 bytes for future expansions. If we have enough 1174 * room at tail, reallocate without expansion only if skb is cloned. 1175 */ 1176 int i, k, eat = (skb->tail + delta) - skb->end; 1177 1178 if (eat > 0 || skb_cloned(skb)) { 1179 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1180 GFP_ATOMIC)) 1181 return NULL; 1182 } 1183 1184 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1185 BUG(); 1186 1187 /* Optimization: no fragments, no reasons to preestimate 1188 * size of pulled pages. Superb. 1189 */ 1190 if (!skb_has_frags(skb)) 1191 goto pull_pages; 1192 1193 /* Estimate size of pulled pages. */ 1194 eat = delta; 1195 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1196 if (skb_shinfo(skb)->frags[i].size >= eat) 1197 goto pull_pages; 1198 eat -= skb_shinfo(skb)->frags[i].size; 1199 } 1200 1201 /* If we need update frag list, we are in troubles. 1202 * Certainly, it possible to add an offset to skb data, 1203 * but taking into account that pulling is expected to 1204 * be very rare operation, it is worth to fight against 1205 * further bloating skb head and crucify ourselves here instead. 1206 * Pure masohism, indeed. 8)8) 1207 */ 1208 if (eat) { 1209 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1210 struct sk_buff *clone = NULL; 1211 struct sk_buff *insp = NULL; 1212 1213 do { 1214 BUG_ON(!list); 1215 1216 if (list->len <= eat) { 1217 /* Eaten as whole. */ 1218 eat -= list->len; 1219 list = list->next; 1220 insp = list; 1221 } else { 1222 /* Eaten partially. */ 1223 1224 if (skb_shared(list)) { 1225 /* Sucks! We need to fork list. :-( */ 1226 clone = skb_clone(list, GFP_ATOMIC); 1227 if (!clone) 1228 return NULL; 1229 insp = list->next; 1230 list = clone; 1231 } else { 1232 /* This may be pulled without 1233 * problems. */ 1234 insp = list; 1235 } 1236 if (!pskb_pull(list, eat)) { 1237 kfree_skb(clone); 1238 return NULL; 1239 } 1240 break; 1241 } 1242 } while (eat); 1243 1244 /* Free pulled out fragments. */ 1245 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1246 skb_shinfo(skb)->frag_list = list->next; 1247 kfree_skb(list); 1248 } 1249 /* And insert new clone at head. */ 1250 if (clone) { 1251 clone->next = list; 1252 skb_shinfo(skb)->frag_list = clone; 1253 } 1254 } 1255 /* Success! Now we may commit changes to skb data. */ 1256 1257 pull_pages: 1258 eat = delta; 1259 k = 0; 1260 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1261 if (skb_shinfo(skb)->frags[i].size <= eat) { 1262 put_page(skb_shinfo(skb)->frags[i].page); 1263 eat -= skb_shinfo(skb)->frags[i].size; 1264 } else { 1265 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1266 if (eat) { 1267 skb_shinfo(skb)->frags[k].page_offset += eat; 1268 skb_shinfo(skb)->frags[k].size -= eat; 1269 eat = 0; 1270 } 1271 k++; 1272 } 1273 } 1274 skb_shinfo(skb)->nr_frags = k; 1275 1276 skb->tail += delta; 1277 skb->data_len -= delta; 1278 1279 return skb_tail_pointer(skb); 1280 } 1281 EXPORT_SYMBOL(__pskb_pull_tail); 1282 1283 /* Copy some data bits from skb to kernel buffer. */ 1284 1285 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1286 { 1287 int start = skb_headlen(skb); 1288 struct sk_buff *frag_iter; 1289 int i, copy; 1290 1291 if (offset > (int)skb->len - len) 1292 goto fault; 1293 1294 /* Copy header. */ 1295 if ((copy = start - offset) > 0) { 1296 if (copy > len) 1297 copy = len; 1298 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1299 if ((len -= copy) == 0) 1300 return 0; 1301 offset += copy; 1302 to += copy; 1303 } 1304 1305 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1306 int end; 1307 1308 WARN_ON(start > offset + len); 1309 1310 end = start + skb_shinfo(skb)->frags[i].size; 1311 if ((copy = end - offset) > 0) { 1312 u8 *vaddr; 1313 1314 if (copy > len) 1315 copy = len; 1316 1317 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1318 memcpy(to, 1319 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1320 offset - start, copy); 1321 kunmap_skb_frag(vaddr); 1322 1323 if ((len -= copy) == 0) 1324 return 0; 1325 offset += copy; 1326 to += copy; 1327 } 1328 start = end; 1329 } 1330 1331 skb_walk_frags(skb, frag_iter) { 1332 int end; 1333 1334 WARN_ON(start > offset + len); 1335 1336 end = start + frag_iter->len; 1337 if ((copy = end - offset) > 0) { 1338 if (copy > len) 1339 copy = len; 1340 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1341 goto fault; 1342 if ((len -= copy) == 0) 1343 return 0; 1344 offset += copy; 1345 to += copy; 1346 } 1347 start = end; 1348 } 1349 if (!len) 1350 return 0; 1351 1352 fault: 1353 return -EFAULT; 1354 } 1355 EXPORT_SYMBOL(skb_copy_bits); 1356 1357 /* 1358 * Callback from splice_to_pipe(), if we need to release some pages 1359 * at the end of the spd in case we error'ed out in filling the pipe. 1360 */ 1361 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1362 { 1363 put_page(spd->pages[i]); 1364 } 1365 1366 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1367 unsigned int *offset, 1368 struct sk_buff *skb, struct sock *sk) 1369 { 1370 struct page *p = sk->sk_sndmsg_page; 1371 unsigned int off; 1372 1373 if (!p) { 1374 new_page: 1375 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1376 if (!p) 1377 return NULL; 1378 1379 off = sk->sk_sndmsg_off = 0; 1380 /* hold one ref to this page until it's full */ 1381 } else { 1382 unsigned int mlen; 1383 1384 off = sk->sk_sndmsg_off; 1385 mlen = PAGE_SIZE - off; 1386 if (mlen < 64 && mlen < *len) { 1387 put_page(p); 1388 goto new_page; 1389 } 1390 1391 *len = min_t(unsigned int, *len, mlen); 1392 } 1393 1394 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1395 sk->sk_sndmsg_off += *len; 1396 *offset = off; 1397 get_page(p); 1398 1399 return p; 1400 } 1401 1402 /* 1403 * Fill page/offset/length into spd, if it can hold more pages. 1404 */ 1405 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1406 unsigned int *len, unsigned int offset, 1407 struct sk_buff *skb, int linear, 1408 struct sock *sk) 1409 { 1410 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1411 return 1; 1412 1413 if (linear) { 1414 page = linear_to_page(page, len, &offset, skb, sk); 1415 if (!page) 1416 return 1; 1417 } else 1418 get_page(page); 1419 1420 spd->pages[spd->nr_pages] = page; 1421 spd->partial[spd->nr_pages].len = *len; 1422 spd->partial[spd->nr_pages].offset = offset; 1423 spd->nr_pages++; 1424 1425 return 0; 1426 } 1427 1428 static inline void __segment_seek(struct page **page, unsigned int *poff, 1429 unsigned int *plen, unsigned int off) 1430 { 1431 unsigned long n; 1432 1433 *poff += off; 1434 n = *poff / PAGE_SIZE; 1435 if (n) 1436 *page = nth_page(*page, n); 1437 1438 *poff = *poff % PAGE_SIZE; 1439 *plen -= off; 1440 } 1441 1442 static inline int __splice_segment(struct page *page, unsigned int poff, 1443 unsigned int plen, unsigned int *off, 1444 unsigned int *len, struct sk_buff *skb, 1445 struct splice_pipe_desc *spd, int linear, 1446 struct sock *sk) 1447 { 1448 if (!*len) 1449 return 1; 1450 1451 /* skip this segment if already processed */ 1452 if (*off >= plen) { 1453 *off -= plen; 1454 return 0; 1455 } 1456 1457 /* ignore any bits we already processed */ 1458 if (*off) { 1459 __segment_seek(&page, &poff, &plen, *off); 1460 *off = 0; 1461 } 1462 1463 do { 1464 unsigned int flen = min(*len, plen); 1465 1466 /* the linear region may spread across several pages */ 1467 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1468 1469 if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) 1470 return 1; 1471 1472 __segment_seek(&page, &poff, &plen, flen); 1473 *len -= flen; 1474 1475 } while (*len && plen); 1476 1477 return 0; 1478 } 1479 1480 /* 1481 * Map linear and fragment data from the skb to spd. It reports failure if the 1482 * pipe is full or if we already spliced the requested length. 1483 */ 1484 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1485 unsigned int *len, struct splice_pipe_desc *spd, 1486 struct sock *sk) 1487 { 1488 int seg; 1489 1490 /* 1491 * map the linear part 1492 */ 1493 if (__splice_segment(virt_to_page(skb->data), 1494 (unsigned long) skb->data & (PAGE_SIZE - 1), 1495 skb_headlen(skb), 1496 offset, len, skb, spd, 1, sk)) 1497 return 1; 1498 1499 /* 1500 * then map the fragments 1501 */ 1502 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1503 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1504 1505 if (__splice_segment(f->page, f->page_offset, f->size, 1506 offset, len, skb, spd, 0, sk)) 1507 return 1; 1508 } 1509 1510 return 0; 1511 } 1512 1513 /* 1514 * Map data from the skb to a pipe. Should handle both the linear part, 1515 * the fragments, and the frag list. It does NOT handle frag lists within 1516 * the frag list, if such a thing exists. We'd probably need to recurse to 1517 * handle that cleanly. 1518 */ 1519 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1520 struct pipe_inode_info *pipe, unsigned int tlen, 1521 unsigned int flags) 1522 { 1523 struct partial_page partial[PIPE_BUFFERS]; 1524 struct page *pages[PIPE_BUFFERS]; 1525 struct splice_pipe_desc spd = { 1526 .pages = pages, 1527 .partial = partial, 1528 .flags = flags, 1529 .ops = &sock_pipe_buf_ops, 1530 .spd_release = sock_spd_release, 1531 }; 1532 struct sk_buff *frag_iter; 1533 struct sock *sk = skb->sk; 1534 1535 /* 1536 * __skb_splice_bits() only fails if the output has no room left, 1537 * so no point in going over the frag_list for the error case. 1538 */ 1539 if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) 1540 goto done; 1541 else if (!tlen) 1542 goto done; 1543 1544 /* 1545 * now see if we have a frag_list to map 1546 */ 1547 skb_walk_frags(skb, frag_iter) { 1548 if (!tlen) 1549 break; 1550 if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) 1551 break; 1552 } 1553 1554 done: 1555 if (spd.nr_pages) { 1556 int ret; 1557 1558 /* 1559 * Drop the socket lock, otherwise we have reverse 1560 * locking dependencies between sk_lock and i_mutex 1561 * here as compared to sendfile(). We enter here 1562 * with the socket lock held, and splice_to_pipe() will 1563 * grab the pipe inode lock. For sendfile() emulation, 1564 * we call into ->sendpage() with the i_mutex lock held 1565 * and networking will grab the socket lock. 1566 */ 1567 release_sock(sk); 1568 ret = splice_to_pipe(pipe, &spd); 1569 lock_sock(sk); 1570 return ret; 1571 } 1572 1573 return 0; 1574 } 1575 1576 /** 1577 * skb_store_bits - store bits from kernel buffer to skb 1578 * @skb: destination buffer 1579 * @offset: offset in destination 1580 * @from: source buffer 1581 * @len: number of bytes to copy 1582 * 1583 * Copy the specified number of bytes from the source buffer to the 1584 * destination skb. This function handles all the messy bits of 1585 * traversing fragment lists and such. 1586 */ 1587 1588 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1589 { 1590 int start = skb_headlen(skb); 1591 struct sk_buff *frag_iter; 1592 int i, copy; 1593 1594 if (offset > (int)skb->len - len) 1595 goto fault; 1596 1597 if ((copy = start - offset) > 0) { 1598 if (copy > len) 1599 copy = len; 1600 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1601 if ((len -= copy) == 0) 1602 return 0; 1603 offset += copy; 1604 from += copy; 1605 } 1606 1607 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1608 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1609 int end; 1610 1611 WARN_ON(start > offset + len); 1612 1613 end = start + frag->size; 1614 if ((copy = end - offset) > 0) { 1615 u8 *vaddr; 1616 1617 if (copy > len) 1618 copy = len; 1619 1620 vaddr = kmap_skb_frag(frag); 1621 memcpy(vaddr + frag->page_offset + offset - start, 1622 from, copy); 1623 kunmap_skb_frag(vaddr); 1624 1625 if ((len -= copy) == 0) 1626 return 0; 1627 offset += copy; 1628 from += copy; 1629 } 1630 start = end; 1631 } 1632 1633 skb_walk_frags(skb, frag_iter) { 1634 int end; 1635 1636 WARN_ON(start > offset + len); 1637 1638 end = start + frag_iter->len; 1639 if ((copy = end - offset) > 0) { 1640 if (copy > len) 1641 copy = len; 1642 if (skb_store_bits(frag_iter, offset - start, 1643 from, copy)) 1644 goto fault; 1645 if ((len -= copy) == 0) 1646 return 0; 1647 offset += copy; 1648 from += copy; 1649 } 1650 start = end; 1651 } 1652 if (!len) 1653 return 0; 1654 1655 fault: 1656 return -EFAULT; 1657 } 1658 EXPORT_SYMBOL(skb_store_bits); 1659 1660 /* Checksum skb data. */ 1661 1662 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1663 int len, __wsum csum) 1664 { 1665 int start = skb_headlen(skb); 1666 int i, copy = start - offset; 1667 struct sk_buff *frag_iter; 1668 int pos = 0; 1669 1670 /* Checksum header. */ 1671 if (copy > 0) { 1672 if (copy > len) 1673 copy = len; 1674 csum = csum_partial(skb->data + offset, copy, csum); 1675 if ((len -= copy) == 0) 1676 return csum; 1677 offset += copy; 1678 pos = copy; 1679 } 1680 1681 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1682 int end; 1683 1684 WARN_ON(start > offset + len); 1685 1686 end = start + skb_shinfo(skb)->frags[i].size; 1687 if ((copy = end - offset) > 0) { 1688 __wsum csum2; 1689 u8 *vaddr; 1690 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1691 1692 if (copy > len) 1693 copy = len; 1694 vaddr = kmap_skb_frag(frag); 1695 csum2 = csum_partial(vaddr + frag->page_offset + 1696 offset - start, copy, 0); 1697 kunmap_skb_frag(vaddr); 1698 csum = csum_block_add(csum, csum2, pos); 1699 if (!(len -= copy)) 1700 return csum; 1701 offset += copy; 1702 pos += copy; 1703 } 1704 start = end; 1705 } 1706 1707 skb_walk_frags(skb, frag_iter) { 1708 int end; 1709 1710 WARN_ON(start > offset + len); 1711 1712 end = start + frag_iter->len; 1713 if ((copy = end - offset) > 0) { 1714 __wsum csum2; 1715 if (copy > len) 1716 copy = len; 1717 csum2 = skb_checksum(frag_iter, offset - start, 1718 copy, 0); 1719 csum = csum_block_add(csum, csum2, pos); 1720 if ((len -= copy) == 0) 1721 return csum; 1722 offset += copy; 1723 pos += copy; 1724 } 1725 start = end; 1726 } 1727 BUG_ON(len); 1728 1729 return csum; 1730 } 1731 EXPORT_SYMBOL(skb_checksum); 1732 1733 /* Both of above in one bottle. */ 1734 1735 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1736 u8 *to, int len, __wsum csum) 1737 { 1738 int start = skb_headlen(skb); 1739 int i, copy = start - offset; 1740 struct sk_buff *frag_iter; 1741 int pos = 0; 1742 1743 /* Copy header. */ 1744 if (copy > 0) { 1745 if (copy > len) 1746 copy = len; 1747 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1748 copy, csum); 1749 if ((len -= copy) == 0) 1750 return csum; 1751 offset += copy; 1752 to += copy; 1753 pos = copy; 1754 } 1755 1756 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1757 int end; 1758 1759 WARN_ON(start > offset + len); 1760 1761 end = start + skb_shinfo(skb)->frags[i].size; 1762 if ((copy = end - offset) > 0) { 1763 __wsum csum2; 1764 u8 *vaddr; 1765 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1766 1767 if (copy > len) 1768 copy = len; 1769 vaddr = kmap_skb_frag(frag); 1770 csum2 = csum_partial_copy_nocheck(vaddr + 1771 frag->page_offset + 1772 offset - start, to, 1773 copy, 0); 1774 kunmap_skb_frag(vaddr); 1775 csum = csum_block_add(csum, csum2, pos); 1776 if (!(len -= copy)) 1777 return csum; 1778 offset += copy; 1779 to += copy; 1780 pos += copy; 1781 } 1782 start = end; 1783 } 1784 1785 skb_walk_frags(skb, frag_iter) { 1786 __wsum csum2; 1787 int end; 1788 1789 WARN_ON(start > offset + len); 1790 1791 end = start + frag_iter->len; 1792 if ((copy = end - offset) > 0) { 1793 if (copy > len) 1794 copy = len; 1795 csum2 = skb_copy_and_csum_bits(frag_iter, 1796 offset - start, 1797 to, copy, 0); 1798 csum = csum_block_add(csum, csum2, pos); 1799 if ((len -= copy) == 0) 1800 return csum; 1801 offset += copy; 1802 to += copy; 1803 pos += copy; 1804 } 1805 start = end; 1806 } 1807 BUG_ON(len); 1808 return csum; 1809 } 1810 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1811 1812 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1813 { 1814 __wsum csum; 1815 long csstart; 1816 1817 if (skb->ip_summed == CHECKSUM_PARTIAL) 1818 csstart = skb->csum_start - skb_headroom(skb); 1819 else 1820 csstart = skb_headlen(skb); 1821 1822 BUG_ON(csstart > skb_headlen(skb)); 1823 1824 skb_copy_from_linear_data(skb, to, csstart); 1825 1826 csum = 0; 1827 if (csstart != skb->len) 1828 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1829 skb->len - csstart, 0); 1830 1831 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1832 long csstuff = csstart + skb->csum_offset; 1833 1834 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1835 } 1836 } 1837 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1838 1839 /** 1840 * skb_dequeue - remove from the head of the queue 1841 * @list: list to dequeue from 1842 * 1843 * Remove the head of the list. The list lock is taken so the function 1844 * may be used safely with other locking list functions. The head item is 1845 * returned or %NULL if the list is empty. 1846 */ 1847 1848 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1849 { 1850 unsigned long flags; 1851 struct sk_buff *result; 1852 1853 spin_lock_irqsave(&list->lock, flags); 1854 result = __skb_dequeue(list); 1855 spin_unlock_irqrestore(&list->lock, flags); 1856 return result; 1857 } 1858 EXPORT_SYMBOL(skb_dequeue); 1859 1860 /** 1861 * skb_dequeue_tail - remove from the tail of the queue 1862 * @list: list to dequeue from 1863 * 1864 * Remove the tail of the list. The list lock is taken so the function 1865 * may be used safely with other locking list functions. The tail item is 1866 * returned or %NULL if the list is empty. 1867 */ 1868 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1869 { 1870 unsigned long flags; 1871 struct sk_buff *result; 1872 1873 spin_lock_irqsave(&list->lock, flags); 1874 result = __skb_dequeue_tail(list); 1875 spin_unlock_irqrestore(&list->lock, flags); 1876 return result; 1877 } 1878 EXPORT_SYMBOL(skb_dequeue_tail); 1879 1880 /** 1881 * skb_queue_purge - empty a list 1882 * @list: list to empty 1883 * 1884 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1885 * the list and one reference dropped. This function takes the list 1886 * lock and is atomic with respect to other list locking functions. 1887 */ 1888 void skb_queue_purge(struct sk_buff_head *list) 1889 { 1890 struct sk_buff *skb; 1891 while ((skb = skb_dequeue(list)) != NULL) 1892 kfree_skb(skb); 1893 } 1894 EXPORT_SYMBOL(skb_queue_purge); 1895 1896 /** 1897 * skb_queue_head - queue a buffer at the list head 1898 * @list: list to use 1899 * @newsk: buffer to queue 1900 * 1901 * Queue a buffer at the start of the list. This function takes the 1902 * list lock and can be used safely with other locking &sk_buff functions 1903 * safely. 1904 * 1905 * A buffer cannot be placed on two lists at the same time. 1906 */ 1907 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1908 { 1909 unsigned long flags; 1910 1911 spin_lock_irqsave(&list->lock, flags); 1912 __skb_queue_head(list, newsk); 1913 spin_unlock_irqrestore(&list->lock, flags); 1914 } 1915 EXPORT_SYMBOL(skb_queue_head); 1916 1917 /** 1918 * skb_queue_tail - queue a buffer at the list tail 1919 * @list: list to use 1920 * @newsk: buffer to queue 1921 * 1922 * Queue a buffer at the tail of the list. This function takes the 1923 * list lock and can be used safely with other locking &sk_buff functions 1924 * safely. 1925 * 1926 * A buffer cannot be placed on two lists at the same time. 1927 */ 1928 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1929 { 1930 unsigned long flags; 1931 1932 spin_lock_irqsave(&list->lock, flags); 1933 __skb_queue_tail(list, newsk); 1934 spin_unlock_irqrestore(&list->lock, flags); 1935 } 1936 EXPORT_SYMBOL(skb_queue_tail); 1937 1938 /** 1939 * skb_unlink - remove a buffer from a list 1940 * @skb: buffer to remove 1941 * @list: list to use 1942 * 1943 * Remove a packet from a list. The list locks are taken and this 1944 * function is atomic with respect to other list locked calls 1945 * 1946 * You must know what list the SKB is on. 1947 */ 1948 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1949 { 1950 unsigned long flags; 1951 1952 spin_lock_irqsave(&list->lock, flags); 1953 __skb_unlink(skb, list); 1954 spin_unlock_irqrestore(&list->lock, flags); 1955 } 1956 EXPORT_SYMBOL(skb_unlink); 1957 1958 /** 1959 * skb_append - append a buffer 1960 * @old: buffer to insert after 1961 * @newsk: buffer to insert 1962 * @list: list to use 1963 * 1964 * Place a packet after a given packet in a list. The list locks are taken 1965 * and this function is atomic with respect to other list locked calls. 1966 * A buffer cannot be placed on two lists at the same time. 1967 */ 1968 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1969 { 1970 unsigned long flags; 1971 1972 spin_lock_irqsave(&list->lock, flags); 1973 __skb_queue_after(list, old, newsk); 1974 spin_unlock_irqrestore(&list->lock, flags); 1975 } 1976 EXPORT_SYMBOL(skb_append); 1977 1978 /** 1979 * skb_insert - insert a buffer 1980 * @old: buffer to insert before 1981 * @newsk: buffer to insert 1982 * @list: list to use 1983 * 1984 * Place a packet before a given packet in a list. The list locks are 1985 * taken and this function is atomic with respect to other list locked 1986 * calls. 1987 * 1988 * A buffer cannot be placed on two lists at the same time. 1989 */ 1990 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1991 { 1992 unsigned long flags; 1993 1994 spin_lock_irqsave(&list->lock, flags); 1995 __skb_insert(newsk, old->prev, old, list); 1996 spin_unlock_irqrestore(&list->lock, flags); 1997 } 1998 EXPORT_SYMBOL(skb_insert); 1999 2000 static inline void skb_split_inside_header(struct sk_buff *skb, 2001 struct sk_buff* skb1, 2002 const u32 len, const int pos) 2003 { 2004 int i; 2005 2006 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2007 pos - len); 2008 /* And move data appendix as is. */ 2009 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2010 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2011 2012 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2013 skb_shinfo(skb)->nr_frags = 0; 2014 skb1->data_len = skb->data_len; 2015 skb1->len += skb1->data_len; 2016 skb->data_len = 0; 2017 skb->len = len; 2018 skb_set_tail_pointer(skb, len); 2019 } 2020 2021 static inline void skb_split_no_header(struct sk_buff *skb, 2022 struct sk_buff* skb1, 2023 const u32 len, int pos) 2024 { 2025 int i, k = 0; 2026 const int nfrags = skb_shinfo(skb)->nr_frags; 2027 2028 skb_shinfo(skb)->nr_frags = 0; 2029 skb1->len = skb1->data_len = skb->len - len; 2030 skb->len = len; 2031 skb->data_len = len - pos; 2032 2033 for (i = 0; i < nfrags; i++) { 2034 int size = skb_shinfo(skb)->frags[i].size; 2035 2036 if (pos + size > len) { 2037 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2038 2039 if (pos < len) { 2040 /* Split frag. 2041 * We have two variants in this case: 2042 * 1. Move all the frag to the second 2043 * part, if it is possible. F.e. 2044 * this approach is mandatory for TUX, 2045 * where splitting is expensive. 2046 * 2. Split is accurately. We make this. 2047 */ 2048 get_page(skb_shinfo(skb)->frags[i].page); 2049 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2050 skb_shinfo(skb1)->frags[0].size -= len - pos; 2051 skb_shinfo(skb)->frags[i].size = len - pos; 2052 skb_shinfo(skb)->nr_frags++; 2053 } 2054 k++; 2055 } else 2056 skb_shinfo(skb)->nr_frags++; 2057 pos += size; 2058 } 2059 skb_shinfo(skb1)->nr_frags = k; 2060 } 2061 2062 /** 2063 * skb_split - Split fragmented skb to two parts at length len. 2064 * @skb: the buffer to split 2065 * @skb1: the buffer to receive the second part 2066 * @len: new length for skb 2067 */ 2068 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2069 { 2070 int pos = skb_headlen(skb); 2071 2072 if (len < pos) /* Split line is inside header. */ 2073 skb_split_inside_header(skb, skb1, len, pos); 2074 else /* Second chunk has no header, nothing to copy. */ 2075 skb_split_no_header(skb, skb1, len, pos); 2076 } 2077 EXPORT_SYMBOL(skb_split); 2078 2079 /* Shifting from/to a cloned skb is a no-go. 2080 * 2081 * Caller cannot keep skb_shinfo related pointers past calling here! 2082 */ 2083 static int skb_prepare_for_shift(struct sk_buff *skb) 2084 { 2085 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2086 } 2087 2088 /** 2089 * skb_shift - Shifts paged data partially from skb to another 2090 * @tgt: buffer into which tail data gets added 2091 * @skb: buffer from which the paged data comes from 2092 * @shiftlen: shift up to this many bytes 2093 * 2094 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2095 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2096 * It's up to caller to free skb if everything was shifted. 2097 * 2098 * If @tgt runs out of frags, the whole operation is aborted. 2099 * 2100 * Skb cannot include anything else but paged data while tgt is allowed 2101 * to have non-paged data as well. 2102 * 2103 * TODO: full sized shift could be optimized but that would need 2104 * specialized skb free'er to handle frags without up-to-date nr_frags. 2105 */ 2106 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2107 { 2108 int from, to, merge, todo; 2109 struct skb_frag_struct *fragfrom, *fragto; 2110 2111 BUG_ON(shiftlen > skb->len); 2112 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2113 2114 todo = shiftlen; 2115 from = 0; 2116 to = skb_shinfo(tgt)->nr_frags; 2117 fragfrom = &skb_shinfo(skb)->frags[from]; 2118 2119 /* Actual merge is delayed until the point when we know we can 2120 * commit all, so that we don't have to undo partial changes 2121 */ 2122 if (!to || 2123 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2124 merge = -1; 2125 } else { 2126 merge = to - 1; 2127 2128 todo -= fragfrom->size; 2129 if (todo < 0) { 2130 if (skb_prepare_for_shift(skb) || 2131 skb_prepare_for_shift(tgt)) 2132 return 0; 2133 2134 /* All previous frag pointers might be stale! */ 2135 fragfrom = &skb_shinfo(skb)->frags[from]; 2136 fragto = &skb_shinfo(tgt)->frags[merge]; 2137 2138 fragto->size += shiftlen; 2139 fragfrom->size -= shiftlen; 2140 fragfrom->page_offset += shiftlen; 2141 2142 goto onlymerged; 2143 } 2144 2145 from++; 2146 } 2147 2148 /* Skip full, not-fitting skb to avoid expensive operations */ 2149 if ((shiftlen == skb->len) && 2150 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2151 return 0; 2152 2153 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2154 return 0; 2155 2156 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2157 if (to == MAX_SKB_FRAGS) 2158 return 0; 2159 2160 fragfrom = &skb_shinfo(skb)->frags[from]; 2161 fragto = &skb_shinfo(tgt)->frags[to]; 2162 2163 if (todo >= fragfrom->size) { 2164 *fragto = *fragfrom; 2165 todo -= fragfrom->size; 2166 from++; 2167 to++; 2168 2169 } else { 2170 get_page(fragfrom->page); 2171 fragto->page = fragfrom->page; 2172 fragto->page_offset = fragfrom->page_offset; 2173 fragto->size = todo; 2174 2175 fragfrom->page_offset += todo; 2176 fragfrom->size -= todo; 2177 todo = 0; 2178 2179 to++; 2180 break; 2181 } 2182 } 2183 2184 /* Ready to "commit" this state change to tgt */ 2185 skb_shinfo(tgt)->nr_frags = to; 2186 2187 if (merge >= 0) { 2188 fragfrom = &skb_shinfo(skb)->frags[0]; 2189 fragto = &skb_shinfo(tgt)->frags[merge]; 2190 2191 fragto->size += fragfrom->size; 2192 put_page(fragfrom->page); 2193 } 2194 2195 /* Reposition in the original skb */ 2196 to = 0; 2197 while (from < skb_shinfo(skb)->nr_frags) 2198 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2199 skb_shinfo(skb)->nr_frags = to; 2200 2201 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2202 2203 onlymerged: 2204 /* Most likely the tgt won't ever need its checksum anymore, skb on 2205 * the other hand might need it if it needs to be resent 2206 */ 2207 tgt->ip_summed = CHECKSUM_PARTIAL; 2208 skb->ip_summed = CHECKSUM_PARTIAL; 2209 2210 /* Yak, is it really working this way? Some helper please? */ 2211 skb->len -= shiftlen; 2212 skb->data_len -= shiftlen; 2213 skb->truesize -= shiftlen; 2214 tgt->len += shiftlen; 2215 tgt->data_len += shiftlen; 2216 tgt->truesize += shiftlen; 2217 2218 return shiftlen; 2219 } 2220 2221 /** 2222 * skb_prepare_seq_read - Prepare a sequential read of skb data 2223 * @skb: the buffer to read 2224 * @from: lower offset of data to be read 2225 * @to: upper offset of data to be read 2226 * @st: state variable 2227 * 2228 * Initializes the specified state variable. Must be called before 2229 * invoking skb_seq_read() for the first time. 2230 */ 2231 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2232 unsigned int to, struct skb_seq_state *st) 2233 { 2234 st->lower_offset = from; 2235 st->upper_offset = to; 2236 st->root_skb = st->cur_skb = skb; 2237 st->frag_idx = st->stepped_offset = 0; 2238 st->frag_data = NULL; 2239 } 2240 EXPORT_SYMBOL(skb_prepare_seq_read); 2241 2242 /** 2243 * skb_seq_read - Sequentially read skb data 2244 * @consumed: number of bytes consumed by the caller so far 2245 * @data: destination pointer for data to be returned 2246 * @st: state variable 2247 * 2248 * Reads a block of skb data at &consumed relative to the 2249 * lower offset specified to skb_prepare_seq_read(). Assigns 2250 * the head of the data block to &data and returns the length 2251 * of the block or 0 if the end of the skb data or the upper 2252 * offset has been reached. 2253 * 2254 * The caller is not required to consume all of the data 2255 * returned, i.e. &consumed is typically set to the number 2256 * of bytes already consumed and the next call to 2257 * skb_seq_read() will return the remaining part of the block. 2258 * 2259 * Note 1: The size of each block of data returned can be arbitary, 2260 * this limitation is the cost for zerocopy seqeuental 2261 * reads of potentially non linear data. 2262 * 2263 * Note 2: Fragment lists within fragments are not implemented 2264 * at the moment, state->root_skb could be replaced with 2265 * a stack for this purpose. 2266 */ 2267 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2268 struct skb_seq_state *st) 2269 { 2270 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2271 skb_frag_t *frag; 2272 2273 if (unlikely(abs_offset >= st->upper_offset)) 2274 return 0; 2275 2276 next_skb: 2277 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2278 2279 if (abs_offset < block_limit && !st->frag_data) { 2280 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2281 return block_limit - abs_offset; 2282 } 2283 2284 if (st->frag_idx == 0 && !st->frag_data) 2285 st->stepped_offset += skb_headlen(st->cur_skb); 2286 2287 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2288 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2289 block_limit = frag->size + st->stepped_offset; 2290 2291 if (abs_offset < block_limit) { 2292 if (!st->frag_data) 2293 st->frag_data = kmap_skb_frag(frag); 2294 2295 *data = (u8 *) st->frag_data + frag->page_offset + 2296 (abs_offset - st->stepped_offset); 2297 2298 return block_limit - abs_offset; 2299 } 2300 2301 if (st->frag_data) { 2302 kunmap_skb_frag(st->frag_data); 2303 st->frag_data = NULL; 2304 } 2305 2306 st->frag_idx++; 2307 st->stepped_offset += frag->size; 2308 } 2309 2310 if (st->frag_data) { 2311 kunmap_skb_frag(st->frag_data); 2312 st->frag_data = NULL; 2313 } 2314 2315 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { 2316 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2317 st->frag_idx = 0; 2318 goto next_skb; 2319 } else if (st->cur_skb->next) { 2320 st->cur_skb = st->cur_skb->next; 2321 st->frag_idx = 0; 2322 goto next_skb; 2323 } 2324 2325 return 0; 2326 } 2327 EXPORT_SYMBOL(skb_seq_read); 2328 2329 /** 2330 * skb_abort_seq_read - Abort a sequential read of skb data 2331 * @st: state variable 2332 * 2333 * Must be called if skb_seq_read() was not called until it 2334 * returned 0. 2335 */ 2336 void skb_abort_seq_read(struct skb_seq_state *st) 2337 { 2338 if (st->frag_data) 2339 kunmap_skb_frag(st->frag_data); 2340 } 2341 EXPORT_SYMBOL(skb_abort_seq_read); 2342 2343 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2344 2345 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2346 struct ts_config *conf, 2347 struct ts_state *state) 2348 { 2349 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2350 } 2351 2352 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2353 { 2354 skb_abort_seq_read(TS_SKB_CB(state)); 2355 } 2356 2357 /** 2358 * skb_find_text - Find a text pattern in skb data 2359 * @skb: the buffer to look in 2360 * @from: search offset 2361 * @to: search limit 2362 * @config: textsearch configuration 2363 * @state: uninitialized textsearch state variable 2364 * 2365 * Finds a pattern in the skb data according to the specified 2366 * textsearch configuration. Use textsearch_next() to retrieve 2367 * subsequent occurrences of the pattern. Returns the offset 2368 * to the first occurrence or UINT_MAX if no match was found. 2369 */ 2370 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2371 unsigned int to, struct ts_config *config, 2372 struct ts_state *state) 2373 { 2374 unsigned int ret; 2375 2376 config->get_next_block = skb_ts_get_next_block; 2377 config->finish = skb_ts_finish; 2378 2379 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2380 2381 ret = textsearch_find(config, state); 2382 return (ret <= to - from ? ret : UINT_MAX); 2383 } 2384 EXPORT_SYMBOL(skb_find_text); 2385 2386 /** 2387 * skb_append_datato_frags: - append the user data to a skb 2388 * @sk: sock structure 2389 * @skb: skb structure to be appened with user data. 2390 * @getfrag: call back function to be used for getting the user data 2391 * @from: pointer to user message iov 2392 * @length: length of the iov message 2393 * 2394 * Description: This procedure append the user data in the fragment part 2395 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2396 */ 2397 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2398 int (*getfrag)(void *from, char *to, int offset, 2399 int len, int odd, struct sk_buff *skb), 2400 void *from, int length) 2401 { 2402 int frg_cnt = 0; 2403 skb_frag_t *frag = NULL; 2404 struct page *page = NULL; 2405 int copy, left; 2406 int offset = 0; 2407 int ret; 2408 2409 do { 2410 /* Return error if we don't have space for new frag */ 2411 frg_cnt = skb_shinfo(skb)->nr_frags; 2412 if (frg_cnt >= MAX_SKB_FRAGS) 2413 return -EFAULT; 2414 2415 /* allocate a new page for next frag */ 2416 page = alloc_pages(sk->sk_allocation, 0); 2417 2418 /* If alloc_page fails just return failure and caller will 2419 * free previous allocated pages by doing kfree_skb() 2420 */ 2421 if (page == NULL) 2422 return -ENOMEM; 2423 2424 /* initialize the next frag */ 2425 sk->sk_sndmsg_page = page; 2426 sk->sk_sndmsg_off = 0; 2427 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2428 skb->truesize += PAGE_SIZE; 2429 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2430 2431 /* get the new initialized frag */ 2432 frg_cnt = skb_shinfo(skb)->nr_frags; 2433 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2434 2435 /* copy the user data to page */ 2436 left = PAGE_SIZE - frag->page_offset; 2437 copy = (length > left)? left : length; 2438 2439 ret = getfrag(from, (page_address(frag->page) + 2440 frag->page_offset + frag->size), 2441 offset, copy, 0, skb); 2442 if (ret < 0) 2443 return -EFAULT; 2444 2445 /* copy was successful so update the size parameters */ 2446 sk->sk_sndmsg_off += copy; 2447 frag->size += copy; 2448 skb->len += copy; 2449 skb->data_len += copy; 2450 offset += copy; 2451 length -= copy; 2452 2453 } while (length > 0); 2454 2455 return 0; 2456 } 2457 EXPORT_SYMBOL(skb_append_datato_frags); 2458 2459 /** 2460 * skb_pull_rcsum - pull skb and update receive checksum 2461 * @skb: buffer to update 2462 * @len: length of data pulled 2463 * 2464 * This function performs an skb_pull on the packet and updates 2465 * the CHECKSUM_COMPLETE checksum. It should be used on 2466 * receive path processing instead of skb_pull unless you know 2467 * that the checksum difference is zero (e.g., a valid IP header) 2468 * or you are setting ip_summed to CHECKSUM_NONE. 2469 */ 2470 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2471 { 2472 BUG_ON(len > skb->len); 2473 skb->len -= len; 2474 BUG_ON(skb->len < skb->data_len); 2475 skb_postpull_rcsum(skb, skb->data, len); 2476 return skb->data += len; 2477 } 2478 2479 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2480 2481 /** 2482 * skb_segment - Perform protocol segmentation on skb. 2483 * @skb: buffer to segment 2484 * @features: features for the output path (see dev->features) 2485 * 2486 * This function performs segmentation on the given skb. It returns 2487 * a pointer to the first in a list of new skbs for the segments. 2488 * In case of error it returns ERR_PTR(err). 2489 */ 2490 struct sk_buff *skb_segment(struct sk_buff *skb, int features) 2491 { 2492 struct sk_buff *segs = NULL; 2493 struct sk_buff *tail = NULL; 2494 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2495 unsigned int mss = skb_shinfo(skb)->gso_size; 2496 unsigned int doffset = skb->data - skb_mac_header(skb); 2497 unsigned int offset = doffset; 2498 unsigned int headroom; 2499 unsigned int len; 2500 int sg = features & NETIF_F_SG; 2501 int nfrags = skb_shinfo(skb)->nr_frags; 2502 int err = -ENOMEM; 2503 int i = 0; 2504 int pos; 2505 2506 __skb_push(skb, doffset); 2507 headroom = skb_headroom(skb); 2508 pos = skb_headlen(skb); 2509 2510 do { 2511 struct sk_buff *nskb; 2512 skb_frag_t *frag; 2513 int hsize; 2514 int size; 2515 2516 len = skb->len - offset; 2517 if (len > mss) 2518 len = mss; 2519 2520 hsize = skb_headlen(skb) - offset; 2521 if (hsize < 0) 2522 hsize = 0; 2523 if (hsize > len || !sg) 2524 hsize = len; 2525 2526 if (!hsize && i >= nfrags) { 2527 BUG_ON(fskb->len != len); 2528 2529 pos += len; 2530 nskb = skb_clone(fskb, GFP_ATOMIC); 2531 fskb = fskb->next; 2532 2533 if (unlikely(!nskb)) 2534 goto err; 2535 2536 hsize = skb_end_pointer(nskb) - nskb->head; 2537 if (skb_cow_head(nskb, doffset + headroom)) { 2538 kfree_skb(nskb); 2539 goto err; 2540 } 2541 2542 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2543 hsize; 2544 skb_release_head_state(nskb); 2545 __skb_push(nskb, doffset); 2546 } else { 2547 nskb = alloc_skb(hsize + doffset + headroom, 2548 GFP_ATOMIC); 2549 2550 if (unlikely(!nskb)) 2551 goto err; 2552 2553 skb_reserve(nskb, headroom); 2554 __skb_put(nskb, doffset); 2555 } 2556 2557 if (segs) 2558 tail->next = nskb; 2559 else 2560 segs = nskb; 2561 tail = nskb; 2562 2563 __copy_skb_header(nskb, skb); 2564 nskb->mac_len = skb->mac_len; 2565 2566 skb_reset_mac_header(nskb); 2567 skb_set_network_header(nskb, skb->mac_len); 2568 nskb->transport_header = (nskb->network_header + 2569 skb_network_header_len(skb)); 2570 skb_copy_from_linear_data(skb, nskb->data, doffset); 2571 2572 if (fskb != skb_shinfo(skb)->frag_list) 2573 continue; 2574 2575 if (!sg) { 2576 nskb->ip_summed = CHECKSUM_NONE; 2577 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2578 skb_put(nskb, len), 2579 len, 0); 2580 continue; 2581 } 2582 2583 frag = skb_shinfo(nskb)->frags; 2584 2585 skb_copy_from_linear_data_offset(skb, offset, 2586 skb_put(nskb, hsize), hsize); 2587 2588 while (pos < offset + len && i < nfrags) { 2589 *frag = skb_shinfo(skb)->frags[i]; 2590 get_page(frag->page); 2591 size = frag->size; 2592 2593 if (pos < offset) { 2594 frag->page_offset += offset - pos; 2595 frag->size -= offset - pos; 2596 } 2597 2598 skb_shinfo(nskb)->nr_frags++; 2599 2600 if (pos + size <= offset + len) { 2601 i++; 2602 pos += size; 2603 } else { 2604 frag->size -= pos + size - (offset + len); 2605 goto skip_fraglist; 2606 } 2607 2608 frag++; 2609 } 2610 2611 if (pos < offset + len) { 2612 struct sk_buff *fskb2 = fskb; 2613 2614 BUG_ON(pos + fskb->len != offset + len); 2615 2616 pos += fskb->len; 2617 fskb = fskb->next; 2618 2619 if (fskb2->next) { 2620 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2621 if (!fskb2) 2622 goto err; 2623 } else 2624 skb_get(fskb2); 2625 2626 SKB_FRAG_ASSERT(nskb); 2627 skb_shinfo(nskb)->frag_list = fskb2; 2628 } 2629 2630 skip_fraglist: 2631 nskb->data_len = len - hsize; 2632 nskb->len += nskb->data_len; 2633 nskb->truesize += nskb->data_len; 2634 } while ((offset += len) < skb->len); 2635 2636 return segs; 2637 2638 err: 2639 while ((skb = segs)) { 2640 segs = skb->next; 2641 kfree_skb(skb); 2642 } 2643 return ERR_PTR(err); 2644 } 2645 EXPORT_SYMBOL_GPL(skb_segment); 2646 2647 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2648 { 2649 struct sk_buff *p = *head; 2650 struct sk_buff *nskb; 2651 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2652 struct skb_shared_info *pinfo = skb_shinfo(p); 2653 unsigned int headroom; 2654 unsigned int len = skb_gro_len(skb); 2655 unsigned int offset = skb_gro_offset(skb); 2656 unsigned int headlen = skb_headlen(skb); 2657 2658 if (p->len + len >= 65536) 2659 return -E2BIG; 2660 2661 if (pinfo->frag_list) 2662 goto merge; 2663 else if (headlen <= offset) { 2664 skb_frag_t *frag; 2665 skb_frag_t *frag2; 2666 int i = skbinfo->nr_frags; 2667 int nr_frags = pinfo->nr_frags + i; 2668 2669 offset -= headlen; 2670 2671 if (nr_frags > MAX_SKB_FRAGS) 2672 return -E2BIG; 2673 2674 pinfo->nr_frags = nr_frags; 2675 skbinfo->nr_frags = 0; 2676 2677 frag = pinfo->frags + nr_frags; 2678 frag2 = skbinfo->frags + i; 2679 do { 2680 *--frag = *--frag2; 2681 } while (--i); 2682 2683 frag->page_offset += offset; 2684 frag->size -= offset; 2685 2686 skb->truesize -= skb->data_len; 2687 skb->len -= skb->data_len; 2688 skb->data_len = 0; 2689 2690 NAPI_GRO_CB(skb)->free = 1; 2691 goto done; 2692 } 2693 2694 headroom = skb_headroom(p); 2695 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); 2696 if (unlikely(!nskb)) 2697 return -ENOMEM; 2698 2699 __copy_skb_header(nskb, p); 2700 nskb->mac_len = p->mac_len; 2701 2702 skb_reserve(nskb, headroom); 2703 __skb_put(nskb, skb_gro_offset(p)); 2704 2705 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2706 skb_set_network_header(nskb, skb_network_offset(p)); 2707 skb_set_transport_header(nskb, skb_transport_offset(p)); 2708 2709 __skb_pull(p, skb_gro_offset(p)); 2710 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2711 p->data - skb_mac_header(p)); 2712 2713 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2714 skb_shinfo(nskb)->frag_list = p; 2715 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2716 skb_header_release(p); 2717 nskb->prev = p; 2718 2719 nskb->data_len += p->len; 2720 nskb->truesize += p->len; 2721 nskb->len += p->len; 2722 2723 *head = nskb; 2724 nskb->next = p->next; 2725 p->next = NULL; 2726 2727 p = nskb; 2728 2729 merge: 2730 if (offset > headlen) { 2731 skbinfo->frags[0].page_offset += offset - headlen; 2732 skbinfo->frags[0].size -= offset - headlen; 2733 offset = headlen; 2734 } 2735 2736 __skb_pull(skb, offset); 2737 2738 p->prev->next = skb; 2739 p->prev = skb; 2740 skb_header_release(skb); 2741 2742 done: 2743 NAPI_GRO_CB(p)->count++; 2744 p->data_len += len; 2745 p->truesize += len; 2746 p->len += len; 2747 2748 NAPI_GRO_CB(skb)->same_flow = 1; 2749 return 0; 2750 } 2751 EXPORT_SYMBOL_GPL(skb_gro_receive); 2752 2753 void __init skb_init(void) 2754 { 2755 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2756 sizeof(struct sk_buff), 2757 0, 2758 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2759 NULL); 2760 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2761 (2*sizeof(struct sk_buff)) + 2762 sizeof(atomic_t), 2763 0, 2764 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2765 NULL); 2766 } 2767 2768 /** 2769 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2770 * @skb: Socket buffer containing the buffers to be mapped 2771 * @sg: The scatter-gather list to map into 2772 * @offset: The offset into the buffer's contents to start mapping 2773 * @len: Length of buffer space to be mapped 2774 * 2775 * Fill the specified scatter-gather list with mappings/pointers into a 2776 * region of the buffer space attached to a socket buffer. 2777 */ 2778 static int 2779 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2780 { 2781 int start = skb_headlen(skb); 2782 int i, copy = start - offset; 2783 struct sk_buff *frag_iter; 2784 int elt = 0; 2785 2786 if (copy > 0) { 2787 if (copy > len) 2788 copy = len; 2789 sg_set_buf(sg, skb->data + offset, copy); 2790 elt++; 2791 if ((len -= copy) == 0) 2792 return elt; 2793 offset += copy; 2794 } 2795 2796 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2797 int end; 2798 2799 WARN_ON(start > offset + len); 2800 2801 end = start + skb_shinfo(skb)->frags[i].size; 2802 if ((copy = end - offset) > 0) { 2803 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2804 2805 if (copy > len) 2806 copy = len; 2807 sg_set_page(&sg[elt], frag->page, copy, 2808 frag->page_offset+offset-start); 2809 elt++; 2810 if (!(len -= copy)) 2811 return elt; 2812 offset += copy; 2813 } 2814 start = end; 2815 } 2816 2817 skb_walk_frags(skb, frag_iter) { 2818 int end; 2819 2820 WARN_ON(start > offset + len); 2821 2822 end = start + frag_iter->len; 2823 if ((copy = end - offset) > 0) { 2824 if (copy > len) 2825 copy = len; 2826 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 2827 copy); 2828 if ((len -= copy) == 0) 2829 return elt; 2830 offset += copy; 2831 } 2832 start = end; 2833 } 2834 BUG_ON(len); 2835 return elt; 2836 } 2837 2838 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2839 { 2840 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2841 2842 sg_mark_end(&sg[nsg - 1]); 2843 2844 return nsg; 2845 } 2846 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2847 2848 /** 2849 * skb_cow_data - Check that a socket buffer's data buffers are writable 2850 * @skb: The socket buffer to check. 2851 * @tailbits: Amount of trailing space to be added 2852 * @trailer: Returned pointer to the skb where the @tailbits space begins 2853 * 2854 * Make sure that the data buffers attached to a socket buffer are 2855 * writable. If they are not, private copies are made of the data buffers 2856 * and the socket buffer is set to use these instead. 2857 * 2858 * If @tailbits is given, make sure that there is space to write @tailbits 2859 * bytes of data beyond current end of socket buffer. @trailer will be 2860 * set to point to the skb in which this space begins. 2861 * 2862 * The number of scatterlist elements required to completely map the 2863 * COW'd and extended socket buffer will be returned. 2864 */ 2865 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2866 { 2867 int copyflag; 2868 int elt; 2869 struct sk_buff *skb1, **skb_p; 2870 2871 /* If skb is cloned or its head is paged, reallocate 2872 * head pulling out all the pages (pages are considered not writable 2873 * at the moment even if they are anonymous). 2874 */ 2875 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2876 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2877 return -ENOMEM; 2878 2879 /* Easy case. Most of packets will go this way. */ 2880 if (!skb_has_frags(skb)) { 2881 /* A little of trouble, not enough of space for trailer. 2882 * This should not happen, when stack is tuned to generate 2883 * good frames. OK, on miss we reallocate and reserve even more 2884 * space, 128 bytes is fair. */ 2885 2886 if (skb_tailroom(skb) < tailbits && 2887 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2888 return -ENOMEM; 2889 2890 /* Voila! */ 2891 *trailer = skb; 2892 return 1; 2893 } 2894 2895 /* Misery. We are in troubles, going to mincer fragments... */ 2896 2897 elt = 1; 2898 skb_p = &skb_shinfo(skb)->frag_list; 2899 copyflag = 0; 2900 2901 while ((skb1 = *skb_p) != NULL) { 2902 int ntail = 0; 2903 2904 /* The fragment is partially pulled by someone, 2905 * this can happen on input. Copy it and everything 2906 * after it. */ 2907 2908 if (skb_shared(skb1)) 2909 copyflag = 1; 2910 2911 /* If the skb is the last, worry about trailer. */ 2912 2913 if (skb1->next == NULL && tailbits) { 2914 if (skb_shinfo(skb1)->nr_frags || 2915 skb_has_frags(skb1) || 2916 skb_tailroom(skb1) < tailbits) 2917 ntail = tailbits + 128; 2918 } 2919 2920 if (copyflag || 2921 skb_cloned(skb1) || 2922 ntail || 2923 skb_shinfo(skb1)->nr_frags || 2924 skb_has_frags(skb1)) { 2925 struct sk_buff *skb2; 2926 2927 /* Fuck, we are miserable poor guys... */ 2928 if (ntail == 0) 2929 skb2 = skb_copy(skb1, GFP_ATOMIC); 2930 else 2931 skb2 = skb_copy_expand(skb1, 2932 skb_headroom(skb1), 2933 ntail, 2934 GFP_ATOMIC); 2935 if (unlikely(skb2 == NULL)) 2936 return -ENOMEM; 2937 2938 if (skb1->sk) 2939 skb_set_owner_w(skb2, skb1->sk); 2940 2941 /* Looking around. Are we still alive? 2942 * OK, link new skb, drop old one */ 2943 2944 skb2->next = skb1->next; 2945 *skb_p = skb2; 2946 kfree_skb(skb1); 2947 skb1 = skb2; 2948 } 2949 elt++; 2950 *trailer = skb1; 2951 skb_p = &skb1->next; 2952 } 2953 2954 return elt; 2955 } 2956 EXPORT_SYMBOL_GPL(skb_cow_data); 2957 2958 void skb_tstamp_tx(struct sk_buff *orig_skb, 2959 struct skb_shared_hwtstamps *hwtstamps) 2960 { 2961 struct sock *sk = orig_skb->sk; 2962 struct sock_exterr_skb *serr; 2963 struct sk_buff *skb; 2964 int err; 2965 2966 if (!sk) 2967 return; 2968 2969 skb = skb_clone(orig_skb, GFP_ATOMIC); 2970 if (!skb) 2971 return; 2972 2973 if (hwtstamps) { 2974 *skb_hwtstamps(skb) = 2975 *hwtstamps; 2976 } else { 2977 /* 2978 * no hardware time stamps available, 2979 * so keep the skb_shared_tx and only 2980 * store software time stamp 2981 */ 2982 skb->tstamp = ktime_get_real(); 2983 } 2984 2985 serr = SKB_EXT_ERR(skb); 2986 memset(serr, 0, sizeof(*serr)); 2987 serr->ee.ee_errno = ENOMSG; 2988 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 2989 err = sock_queue_err_skb(sk, skb); 2990 if (err) 2991 kfree_skb(skb); 2992 } 2993 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 2994 2995 2996 /** 2997 * skb_partial_csum_set - set up and verify partial csum values for packet 2998 * @skb: the skb to set 2999 * @start: the number of bytes after skb->data to start checksumming. 3000 * @off: the offset from start to place the checksum. 3001 * 3002 * For untrusted partially-checksummed packets, we need to make sure the values 3003 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3004 * 3005 * This function checks and sets those values and skb->ip_summed: if this 3006 * returns false you should drop the packet. 3007 */ 3008 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3009 { 3010 if (unlikely(start > skb_headlen(skb)) || 3011 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3012 if (net_ratelimit()) 3013 printk(KERN_WARNING 3014 "bad partial csum: csum=%u/%u len=%u\n", 3015 start, off, skb_headlen(skb)); 3016 return false; 3017 } 3018 skb->ip_summed = CHECKSUM_PARTIAL; 3019 skb->csum_start = skb_headroom(skb) + start; 3020 skb->csum_offset = off; 3021 return true; 3022 } 3023 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3024 3025 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3026 { 3027 if (net_ratelimit()) 3028 pr_warning("%s: received packets cannot be forwarded" 3029 " while LRO is enabled\n", skb->dev->name); 3030 } 3031 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3032