1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/mm.h> 43 #include <linux/interrupt.h> 44 #include <linux/in.h> 45 #include <linux/inet.h> 46 #include <linux/slab.h> 47 #include <linux/netdevice.h> 48 #ifdef CONFIG_NET_CLS_ACT 49 #include <net/pkt_sched.h> 50 #endif 51 #include <linux/string.h> 52 #include <linux/skbuff.h> 53 #include <linux/splice.h> 54 #include <linux/cache.h> 55 #include <linux/rtnetlink.h> 56 #include <linux/init.h> 57 #include <linux/scatterlist.h> 58 #include <linux/errqueue.h> 59 60 #include <net/protocol.h> 61 #include <net/dst.h> 62 #include <net/sock.h> 63 #include <net/checksum.h> 64 #include <net/xfrm.h> 65 66 #include <asm/uaccess.h> 67 #include <asm/system.h> 68 #include <trace/skb.h> 69 70 #include "kmap_skb.h" 71 72 static struct kmem_cache *skbuff_head_cache __read_mostly; 73 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 74 75 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 76 struct pipe_buffer *buf) 77 { 78 put_page(buf->page); 79 } 80 81 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 82 struct pipe_buffer *buf) 83 { 84 get_page(buf->page); 85 } 86 87 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 88 struct pipe_buffer *buf) 89 { 90 return 1; 91 } 92 93 94 /* Pipe buffer operations for a socket. */ 95 static struct pipe_buf_operations sock_pipe_buf_ops = { 96 .can_merge = 0, 97 .map = generic_pipe_buf_map, 98 .unmap = generic_pipe_buf_unmap, 99 .confirm = generic_pipe_buf_confirm, 100 .release = sock_pipe_buf_release, 101 .steal = sock_pipe_buf_steal, 102 .get = sock_pipe_buf_get, 103 }; 104 105 /* 106 * Keep out-of-line to prevent kernel bloat. 107 * __builtin_return_address is not used because it is not always 108 * reliable. 109 */ 110 111 /** 112 * skb_over_panic - private function 113 * @skb: buffer 114 * @sz: size 115 * @here: address 116 * 117 * Out of line support code for skb_put(). Not user callable. 118 */ 119 void skb_over_panic(struct sk_buff *skb, int sz, void *here) 120 { 121 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 122 "data:%p tail:%#lx end:%#lx dev:%s\n", 123 here, skb->len, sz, skb->head, skb->data, 124 (unsigned long)skb->tail, (unsigned long)skb->end, 125 skb->dev ? skb->dev->name : "<NULL>"); 126 BUG(); 127 } 128 EXPORT_SYMBOL(skb_over_panic); 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 142 "data:%p tail:%#lx end:%#lx dev:%s\n", 143 here, skb->len, sz, skb->head, skb->data, 144 (unsigned long)skb->tail, (unsigned long)skb->end, 145 skb->dev ? skb->dev->name : "<NULL>"); 146 BUG(); 147 } 148 EXPORT_SYMBOL(skb_under_panic); 149 150 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 151 * 'private' fields and also do memory statistics to find all the 152 * [BEEP] leaks. 153 * 154 */ 155 156 /** 157 * __alloc_skb - allocate a network buffer 158 * @size: size to allocate 159 * @gfp_mask: allocation mask 160 * @fclone: allocate from fclone cache instead of head cache 161 * and allocate a cloned (child) skb 162 * @node: numa node to allocate memory on 163 * 164 * Allocate a new &sk_buff. The returned buffer has no headroom and a 165 * tail room of size bytes. The object has a reference count of one. 166 * The return is the buffer. On a failure the return is %NULL. 167 * 168 * Buffers may only be allocated from interrupts using a @gfp_mask of 169 * %GFP_ATOMIC. 170 */ 171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 172 int fclone, int node) 173 { 174 struct kmem_cache *cache; 175 struct skb_shared_info *shinfo; 176 struct sk_buff *skb; 177 u8 *data; 178 179 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 180 181 /* Get the HEAD */ 182 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 183 if (!skb) 184 goto out; 185 186 size = SKB_DATA_ALIGN(size); 187 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 188 gfp_mask, node); 189 if (!data) 190 goto nodata; 191 192 /* 193 * Only clear those fields we need to clear, not those that we will 194 * actually initialise below. Hence, don't put any more fields after 195 * the tail pointer in struct sk_buff! 196 */ 197 memset(skb, 0, offsetof(struct sk_buff, tail)); 198 skb->truesize = size + sizeof(struct sk_buff); 199 atomic_set(&skb->users, 1); 200 skb->head = data; 201 skb->data = data; 202 skb_reset_tail_pointer(skb); 203 skb->end = skb->tail + size; 204 /* make sure we initialize shinfo sequentially */ 205 shinfo = skb_shinfo(skb); 206 atomic_set(&shinfo->dataref, 1); 207 shinfo->nr_frags = 0; 208 shinfo->gso_size = 0; 209 shinfo->gso_segs = 0; 210 shinfo->gso_type = 0; 211 shinfo->ip6_frag_id = 0; 212 shinfo->tx_flags.flags = 0; 213 shinfo->frag_list = NULL; 214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 215 216 if (fclone) { 217 struct sk_buff *child = skb + 1; 218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 219 220 skb->fclone = SKB_FCLONE_ORIG; 221 atomic_set(fclone_ref, 1); 222 223 child->fclone = SKB_FCLONE_UNAVAILABLE; 224 } 225 out: 226 return skb; 227 nodata: 228 kmem_cache_free(cache, skb); 229 skb = NULL; 230 goto out; 231 } 232 EXPORT_SYMBOL(__alloc_skb); 233 234 /** 235 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 236 * @dev: network device to receive on 237 * @length: length to allocate 238 * @gfp_mask: get_free_pages mask, passed to alloc_skb 239 * 240 * Allocate a new &sk_buff and assign it a usage count of one. The 241 * buffer has unspecified headroom built in. Users should allocate 242 * the headroom they think they need without accounting for the 243 * built in space. The built in space is used for optimisations. 244 * 245 * %NULL is returned if there is no free memory. 246 */ 247 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 248 unsigned int length, gfp_t gfp_mask) 249 { 250 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 251 struct sk_buff *skb; 252 253 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 254 if (likely(skb)) { 255 skb_reserve(skb, NET_SKB_PAD); 256 skb->dev = dev; 257 } 258 return skb; 259 } 260 EXPORT_SYMBOL(__netdev_alloc_skb); 261 262 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) 263 { 264 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 265 struct page *page; 266 267 page = alloc_pages_node(node, gfp_mask, 0); 268 return page; 269 } 270 EXPORT_SYMBOL(__netdev_alloc_page); 271 272 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 273 int size) 274 { 275 skb_fill_page_desc(skb, i, page, off, size); 276 skb->len += size; 277 skb->data_len += size; 278 skb->truesize += size; 279 } 280 EXPORT_SYMBOL(skb_add_rx_frag); 281 282 /** 283 * dev_alloc_skb - allocate an skbuff for receiving 284 * @length: length to allocate 285 * 286 * Allocate a new &sk_buff and assign it a usage count of one. The 287 * buffer has unspecified headroom built in. Users should allocate 288 * the headroom they think they need without accounting for the 289 * built in space. The built in space is used for optimisations. 290 * 291 * %NULL is returned if there is no free memory. Although this function 292 * allocates memory it can be called from an interrupt. 293 */ 294 struct sk_buff *dev_alloc_skb(unsigned int length) 295 { 296 /* 297 * There is more code here than it seems: 298 * __dev_alloc_skb is an inline 299 */ 300 return __dev_alloc_skb(length, GFP_ATOMIC); 301 } 302 EXPORT_SYMBOL(dev_alloc_skb); 303 304 static void skb_drop_list(struct sk_buff **listp) 305 { 306 struct sk_buff *list = *listp; 307 308 *listp = NULL; 309 310 do { 311 struct sk_buff *this = list; 312 list = list->next; 313 kfree_skb(this); 314 } while (list); 315 } 316 317 static inline void skb_drop_fraglist(struct sk_buff *skb) 318 { 319 skb_drop_list(&skb_shinfo(skb)->frag_list); 320 } 321 322 static void skb_clone_fraglist(struct sk_buff *skb) 323 { 324 struct sk_buff *list; 325 326 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 327 skb_get(list); 328 } 329 330 static void skb_release_data(struct sk_buff *skb) 331 { 332 if (!skb->cloned || 333 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 334 &skb_shinfo(skb)->dataref)) { 335 if (skb_shinfo(skb)->nr_frags) { 336 int i; 337 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 338 put_page(skb_shinfo(skb)->frags[i].page); 339 } 340 341 if (skb_shinfo(skb)->frag_list) 342 skb_drop_fraglist(skb); 343 344 kfree(skb->head); 345 } 346 } 347 348 /* 349 * Free an skbuff by memory without cleaning the state. 350 */ 351 static void kfree_skbmem(struct sk_buff *skb) 352 { 353 struct sk_buff *other; 354 atomic_t *fclone_ref; 355 356 switch (skb->fclone) { 357 case SKB_FCLONE_UNAVAILABLE: 358 kmem_cache_free(skbuff_head_cache, skb); 359 break; 360 361 case SKB_FCLONE_ORIG: 362 fclone_ref = (atomic_t *) (skb + 2); 363 if (atomic_dec_and_test(fclone_ref)) 364 kmem_cache_free(skbuff_fclone_cache, skb); 365 break; 366 367 case SKB_FCLONE_CLONE: 368 fclone_ref = (atomic_t *) (skb + 1); 369 other = skb - 1; 370 371 /* The clone portion is available for 372 * fast-cloning again. 373 */ 374 skb->fclone = SKB_FCLONE_UNAVAILABLE; 375 376 if (atomic_dec_and_test(fclone_ref)) 377 kmem_cache_free(skbuff_fclone_cache, other); 378 break; 379 } 380 } 381 382 static void skb_release_head_state(struct sk_buff *skb) 383 { 384 dst_release(skb->dst); 385 #ifdef CONFIG_XFRM 386 secpath_put(skb->sp); 387 #endif 388 if (skb->destructor) { 389 WARN_ON(in_irq()); 390 skb->destructor(skb); 391 } 392 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 393 nf_conntrack_put(skb->nfct); 394 nf_conntrack_put_reasm(skb->nfct_reasm); 395 #endif 396 #ifdef CONFIG_BRIDGE_NETFILTER 397 nf_bridge_put(skb->nf_bridge); 398 #endif 399 /* XXX: IS this still necessary? - JHS */ 400 #ifdef CONFIG_NET_SCHED 401 skb->tc_index = 0; 402 #ifdef CONFIG_NET_CLS_ACT 403 skb->tc_verd = 0; 404 #endif 405 #endif 406 } 407 408 /* Free everything but the sk_buff shell. */ 409 static void skb_release_all(struct sk_buff *skb) 410 { 411 skb_release_head_state(skb); 412 skb_release_data(skb); 413 } 414 415 /** 416 * __kfree_skb - private function 417 * @skb: buffer 418 * 419 * Free an sk_buff. Release anything attached to the buffer. 420 * Clean the state. This is an internal helper function. Users should 421 * always call kfree_skb 422 */ 423 424 void __kfree_skb(struct sk_buff *skb) 425 { 426 skb_release_all(skb); 427 kfree_skbmem(skb); 428 } 429 EXPORT_SYMBOL(__kfree_skb); 430 431 /** 432 * kfree_skb - free an sk_buff 433 * @skb: buffer to free 434 * 435 * Drop a reference to the buffer and free it if the usage count has 436 * hit zero. 437 */ 438 void kfree_skb(struct sk_buff *skb) 439 { 440 if (unlikely(!skb)) 441 return; 442 if (likely(atomic_read(&skb->users) == 1)) 443 smp_rmb(); 444 else if (likely(!atomic_dec_and_test(&skb->users))) 445 return; 446 trace_kfree_skb(skb, __builtin_return_address(0)); 447 __kfree_skb(skb); 448 } 449 EXPORT_SYMBOL(kfree_skb); 450 451 /** 452 * consume_skb - free an skbuff 453 * @skb: buffer to free 454 * 455 * Drop a ref to the buffer and free it if the usage count has hit zero 456 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 457 * is being dropped after a failure and notes that 458 */ 459 void consume_skb(struct sk_buff *skb) 460 { 461 if (unlikely(!skb)) 462 return; 463 if (likely(atomic_read(&skb->users) == 1)) 464 smp_rmb(); 465 else if (likely(!atomic_dec_and_test(&skb->users))) 466 return; 467 __kfree_skb(skb); 468 } 469 EXPORT_SYMBOL(consume_skb); 470 471 /** 472 * skb_recycle_check - check if skb can be reused for receive 473 * @skb: buffer 474 * @skb_size: minimum receive buffer size 475 * 476 * Checks that the skb passed in is not shared or cloned, and 477 * that it is linear and its head portion at least as large as 478 * skb_size so that it can be recycled as a receive buffer. 479 * If these conditions are met, this function does any necessary 480 * reference count dropping and cleans up the skbuff as if it 481 * just came from __alloc_skb(). 482 */ 483 int skb_recycle_check(struct sk_buff *skb, int skb_size) 484 { 485 struct skb_shared_info *shinfo; 486 487 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 488 return 0; 489 490 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 491 if (skb_end_pointer(skb) - skb->head < skb_size) 492 return 0; 493 494 if (skb_shared(skb) || skb_cloned(skb)) 495 return 0; 496 497 skb_release_head_state(skb); 498 shinfo = skb_shinfo(skb); 499 atomic_set(&shinfo->dataref, 1); 500 shinfo->nr_frags = 0; 501 shinfo->gso_size = 0; 502 shinfo->gso_segs = 0; 503 shinfo->gso_type = 0; 504 shinfo->ip6_frag_id = 0; 505 shinfo->frag_list = NULL; 506 507 memset(skb, 0, offsetof(struct sk_buff, tail)); 508 skb->data = skb->head + NET_SKB_PAD; 509 skb_reset_tail_pointer(skb); 510 511 return 1; 512 } 513 EXPORT_SYMBOL(skb_recycle_check); 514 515 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 516 { 517 new->tstamp = old->tstamp; 518 new->dev = old->dev; 519 new->transport_header = old->transport_header; 520 new->network_header = old->network_header; 521 new->mac_header = old->mac_header; 522 new->dst = dst_clone(old->dst); 523 #ifdef CONFIG_XFRM 524 new->sp = secpath_get(old->sp); 525 #endif 526 memcpy(new->cb, old->cb, sizeof(old->cb)); 527 new->csum_start = old->csum_start; 528 new->csum_offset = old->csum_offset; 529 new->local_df = old->local_df; 530 new->pkt_type = old->pkt_type; 531 new->ip_summed = old->ip_summed; 532 skb_copy_queue_mapping(new, old); 533 new->priority = old->priority; 534 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 535 new->ipvs_property = old->ipvs_property; 536 #endif 537 new->protocol = old->protocol; 538 new->mark = old->mark; 539 __nf_copy(new, old); 540 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 541 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 542 new->nf_trace = old->nf_trace; 543 #endif 544 #ifdef CONFIG_NET_SCHED 545 new->tc_index = old->tc_index; 546 #ifdef CONFIG_NET_CLS_ACT 547 new->tc_verd = old->tc_verd; 548 #endif 549 #endif 550 new->vlan_tci = old->vlan_tci; 551 552 skb_copy_secmark(new, old); 553 } 554 555 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 556 { 557 #define C(x) n->x = skb->x 558 559 n->next = n->prev = NULL; 560 n->sk = NULL; 561 __copy_skb_header(n, skb); 562 563 C(len); 564 C(data_len); 565 C(mac_len); 566 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 567 n->cloned = 1; 568 n->nohdr = 0; 569 n->destructor = NULL; 570 C(iif); 571 C(tail); 572 C(end); 573 C(head); 574 C(data); 575 C(truesize); 576 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 577 C(do_not_encrypt); 578 C(requeue); 579 #endif 580 atomic_set(&n->users, 1); 581 582 atomic_inc(&(skb_shinfo(skb)->dataref)); 583 skb->cloned = 1; 584 585 return n; 586 #undef C 587 } 588 589 /** 590 * skb_morph - morph one skb into another 591 * @dst: the skb to receive the contents 592 * @src: the skb to supply the contents 593 * 594 * This is identical to skb_clone except that the target skb is 595 * supplied by the user. 596 * 597 * The target skb is returned upon exit. 598 */ 599 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 600 { 601 skb_release_all(dst); 602 return __skb_clone(dst, src); 603 } 604 EXPORT_SYMBOL_GPL(skb_morph); 605 606 /** 607 * skb_clone - duplicate an sk_buff 608 * @skb: buffer to clone 609 * @gfp_mask: allocation priority 610 * 611 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 612 * copies share the same packet data but not structure. The new 613 * buffer has a reference count of 1. If the allocation fails the 614 * function returns %NULL otherwise the new buffer is returned. 615 * 616 * If this function is called from an interrupt gfp_mask() must be 617 * %GFP_ATOMIC. 618 */ 619 620 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 621 { 622 struct sk_buff *n; 623 624 n = skb + 1; 625 if (skb->fclone == SKB_FCLONE_ORIG && 626 n->fclone == SKB_FCLONE_UNAVAILABLE) { 627 atomic_t *fclone_ref = (atomic_t *) (n + 1); 628 n->fclone = SKB_FCLONE_CLONE; 629 atomic_inc(fclone_ref); 630 } else { 631 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 632 if (!n) 633 return NULL; 634 n->fclone = SKB_FCLONE_UNAVAILABLE; 635 } 636 637 return __skb_clone(n, skb); 638 } 639 EXPORT_SYMBOL(skb_clone); 640 641 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 642 { 643 #ifndef NET_SKBUFF_DATA_USES_OFFSET 644 /* 645 * Shift between the two data areas in bytes 646 */ 647 unsigned long offset = new->data - old->data; 648 #endif 649 650 __copy_skb_header(new, old); 651 652 #ifndef NET_SKBUFF_DATA_USES_OFFSET 653 /* {transport,network,mac}_header are relative to skb->head */ 654 new->transport_header += offset; 655 new->network_header += offset; 656 new->mac_header += offset; 657 #endif 658 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 659 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 660 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 661 } 662 663 /** 664 * skb_copy - create private copy of an sk_buff 665 * @skb: buffer to copy 666 * @gfp_mask: allocation priority 667 * 668 * Make a copy of both an &sk_buff and its data. This is used when the 669 * caller wishes to modify the data and needs a private copy of the 670 * data to alter. Returns %NULL on failure or the pointer to the buffer 671 * on success. The returned buffer has a reference count of 1. 672 * 673 * As by-product this function converts non-linear &sk_buff to linear 674 * one, so that &sk_buff becomes completely private and caller is allowed 675 * to modify all the data of returned buffer. This means that this 676 * function is not recommended for use in circumstances when only 677 * header is going to be modified. Use pskb_copy() instead. 678 */ 679 680 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 681 { 682 int headerlen = skb->data - skb->head; 683 /* 684 * Allocate the copy buffer 685 */ 686 struct sk_buff *n; 687 #ifdef NET_SKBUFF_DATA_USES_OFFSET 688 n = alloc_skb(skb->end + skb->data_len, gfp_mask); 689 #else 690 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); 691 #endif 692 if (!n) 693 return NULL; 694 695 /* Set the data pointer */ 696 skb_reserve(n, headerlen); 697 /* Set the tail pointer and length */ 698 skb_put(n, skb->len); 699 700 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 701 BUG(); 702 703 copy_skb_header(n, skb); 704 return n; 705 } 706 EXPORT_SYMBOL(skb_copy); 707 708 /** 709 * pskb_copy - create copy of an sk_buff with private head. 710 * @skb: buffer to copy 711 * @gfp_mask: allocation priority 712 * 713 * Make a copy of both an &sk_buff and part of its data, located 714 * in header. Fragmented data remain shared. This is used when 715 * the caller wishes to modify only header of &sk_buff and needs 716 * private copy of the header to alter. Returns %NULL on failure 717 * or the pointer to the buffer on success. 718 * The returned buffer has a reference count of 1. 719 */ 720 721 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 722 { 723 /* 724 * Allocate the copy buffer 725 */ 726 struct sk_buff *n; 727 #ifdef NET_SKBUFF_DATA_USES_OFFSET 728 n = alloc_skb(skb->end, gfp_mask); 729 #else 730 n = alloc_skb(skb->end - skb->head, gfp_mask); 731 #endif 732 if (!n) 733 goto out; 734 735 /* Set the data pointer */ 736 skb_reserve(n, skb->data - skb->head); 737 /* Set the tail pointer and length */ 738 skb_put(n, skb_headlen(skb)); 739 /* Copy the bytes */ 740 skb_copy_from_linear_data(skb, n->data, n->len); 741 742 n->truesize += skb->data_len; 743 n->data_len = skb->data_len; 744 n->len = skb->len; 745 746 if (skb_shinfo(skb)->nr_frags) { 747 int i; 748 749 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 750 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 751 get_page(skb_shinfo(n)->frags[i].page); 752 } 753 skb_shinfo(n)->nr_frags = i; 754 } 755 756 if (skb_shinfo(skb)->frag_list) { 757 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 758 skb_clone_fraglist(n); 759 } 760 761 copy_skb_header(n, skb); 762 out: 763 return n; 764 } 765 EXPORT_SYMBOL(pskb_copy); 766 767 /** 768 * pskb_expand_head - reallocate header of &sk_buff 769 * @skb: buffer to reallocate 770 * @nhead: room to add at head 771 * @ntail: room to add at tail 772 * @gfp_mask: allocation priority 773 * 774 * Expands (or creates identical copy, if &nhead and &ntail are zero) 775 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 776 * reference count of 1. Returns zero in the case of success or error, 777 * if expansion failed. In the last case, &sk_buff is not changed. 778 * 779 * All the pointers pointing into skb header may change and must be 780 * reloaded after call to this function. 781 */ 782 783 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 784 gfp_t gfp_mask) 785 { 786 int i; 787 u8 *data; 788 #ifdef NET_SKBUFF_DATA_USES_OFFSET 789 int size = nhead + skb->end + ntail; 790 #else 791 int size = nhead + (skb->end - skb->head) + ntail; 792 #endif 793 long off; 794 795 BUG_ON(nhead < 0); 796 797 if (skb_shared(skb)) 798 BUG(); 799 800 size = SKB_DATA_ALIGN(size); 801 802 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 803 if (!data) 804 goto nodata; 805 806 /* Copy only real data... and, alas, header. This should be 807 * optimized for the cases when header is void. */ 808 #ifdef NET_SKBUFF_DATA_USES_OFFSET 809 memcpy(data + nhead, skb->head, skb->tail); 810 #else 811 memcpy(data + nhead, skb->head, skb->tail - skb->head); 812 #endif 813 memcpy(data + size, skb_end_pointer(skb), 814 sizeof(struct skb_shared_info)); 815 816 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 817 get_page(skb_shinfo(skb)->frags[i].page); 818 819 if (skb_shinfo(skb)->frag_list) 820 skb_clone_fraglist(skb); 821 822 skb_release_data(skb); 823 824 off = (data + nhead) - skb->head; 825 826 skb->head = data; 827 skb->data += off; 828 #ifdef NET_SKBUFF_DATA_USES_OFFSET 829 skb->end = size; 830 off = nhead; 831 #else 832 skb->end = skb->head + size; 833 #endif 834 /* {transport,network,mac}_header and tail are relative to skb->head */ 835 skb->tail += off; 836 skb->transport_header += off; 837 skb->network_header += off; 838 skb->mac_header += off; 839 skb->csum_start += nhead; 840 skb->cloned = 0; 841 skb->hdr_len = 0; 842 skb->nohdr = 0; 843 atomic_set(&skb_shinfo(skb)->dataref, 1); 844 return 0; 845 846 nodata: 847 return -ENOMEM; 848 } 849 EXPORT_SYMBOL(pskb_expand_head); 850 851 /* Make private copy of skb with writable head and some headroom */ 852 853 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 854 { 855 struct sk_buff *skb2; 856 int delta = headroom - skb_headroom(skb); 857 858 if (delta <= 0) 859 skb2 = pskb_copy(skb, GFP_ATOMIC); 860 else { 861 skb2 = skb_clone(skb, GFP_ATOMIC); 862 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 863 GFP_ATOMIC)) { 864 kfree_skb(skb2); 865 skb2 = NULL; 866 } 867 } 868 return skb2; 869 } 870 EXPORT_SYMBOL(skb_realloc_headroom); 871 872 /** 873 * skb_copy_expand - copy and expand sk_buff 874 * @skb: buffer to copy 875 * @newheadroom: new free bytes at head 876 * @newtailroom: new free bytes at tail 877 * @gfp_mask: allocation priority 878 * 879 * Make a copy of both an &sk_buff and its data and while doing so 880 * allocate additional space. 881 * 882 * This is used when the caller wishes to modify the data and needs a 883 * private copy of the data to alter as well as more space for new fields. 884 * Returns %NULL on failure or the pointer to the buffer 885 * on success. The returned buffer has a reference count of 1. 886 * 887 * You must pass %GFP_ATOMIC as the allocation priority if this function 888 * is called from an interrupt. 889 */ 890 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 891 int newheadroom, int newtailroom, 892 gfp_t gfp_mask) 893 { 894 /* 895 * Allocate the copy buffer 896 */ 897 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 898 gfp_mask); 899 int oldheadroom = skb_headroom(skb); 900 int head_copy_len, head_copy_off; 901 int off; 902 903 if (!n) 904 return NULL; 905 906 skb_reserve(n, newheadroom); 907 908 /* Set the tail pointer and length */ 909 skb_put(n, skb->len); 910 911 head_copy_len = oldheadroom; 912 head_copy_off = 0; 913 if (newheadroom <= head_copy_len) 914 head_copy_len = newheadroom; 915 else 916 head_copy_off = newheadroom - head_copy_len; 917 918 /* Copy the linear header and data. */ 919 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 920 skb->len + head_copy_len)) 921 BUG(); 922 923 copy_skb_header(n, skb); 924 925 off = newheadroom - oldheadroom; 926 n->csum_start += off; 927 #ifdef NET_SKBUFF_DATA_USES_OFFSET 928 n->transport_header += off; 929 n->network_header += off; 930 n->mac_header += off; 931 #endif 932 933 return n; 934 } 935 EXPORT_SYMBOL(skb_copy_expand); 936 937 /** 938 * skb_pad - zero pad the tail of an skb 939 * @skb: buffer to pad 940 * @pad: space to pad 941 * 942 * Ensure that a buffer is followed by a padding area that is zero 943 * filled. Used by network drivers which may DMA or transfer data 944 * beyond the buffer end onto the wire. 945 * 946 * May return error in out of memory cases. The skb is freed on error. 947 */ 948 949 int skb_pad(struct sk_buff *skb, int pad) 950 { 951 int err; 952 int ntail; 953 954 /* If the skbuff is non linear tailroom is always zero.. */ 955 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 956 memset(skb->data+skb->len, 0, pad); 957 return 0; 958 } 959 960 ntail = skb->data_len + pad - (skb->end - skb->tail); 961 if (likely(skb_cloned(skb) || ntail > 0)) { 962 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 963 if (unlikely(err)) 964 goto free_skb; 965 } 966 967 /* FIXME: The use of this function with non-linear skb's really needs 968 * to be audited. 969 */ 970 err = skb_linearize(skb); 971 if (unlikely(err)) 972 goto free_skb; 973 974 memset(skb->data + skb->len, 0, pad); 975 return 0; 976 977 free_skb: 978 kfree_skb(skb); 979 return err; 980 } 981 EXPORT_SYMBOL(skb_pad); 982 983 /** 984 * skb_put - add data to a buffer 985 * @skb: buffer to use 986 * @len: amount of data to add 987 * 988 * This function extends the used data area of the buffer. If this would 989 * exceed the total buffer size the kernel will panic. A pointer to the 990 * first byte of the extra data is returned. 991 */ 992 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 993 { 994 unsigned char *tmp = skb_tail_pointer(skb); 995 SKB_LINEAR_ASSERT(skb); 996 skb->tail += len; 997 skb->len += len; 998 if (unlikely(skb->tail > skb->end)) 999 skb_over_panic(skb, len, __builtin_return_address(0)); 1000 return tmp; 1001 } 1002 EXPORT_SYMBOL(skb_put); 1003 1004 /** 1005 * skb_push - add data to the start of a buffer 1006 * @skb: buffer to use 1007 * @len: amount of data to add 1008 * 1009 * This function extends the used data area of the buffer at the buffer 1010 * start. If this would exceed the total buffer headroom the kernel will 1011 * panic. A pointer to the first byte of the extra data is returned. 1012 */ 1013 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1014 { 1015 skb->data -= len; 1016 skb->len += len; 1017 if (unlikely(skb->data<skb->head)) 1018 skb_under_panic(skb, len, __builtin_return_address(0)); 1019 return skb->data; 1020 } 1021 EXPORT_SYMBOL(skb_push); 1022 1023 /** 1024 * skb_pull - remove data from the start of a buffer 1025 * @skb: buffer to use 1026 * @len: amount of data to remove 1027 * 1028 * This function removes data from the start of a buffer, returning 1029 * the memory to the headroom. A pointer to the next data in the buffer 1030 * is returned. Once the data has been pulled future pushes will overwrite 1031 * the old data. 1032 */ 1033 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1034 { 1035 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1036 } 1037 EXPORT_SYMBOL(skb_pull); 1038 1039 /** 1040 * skb_trim - remove end from a buffer 1041 * @skb: buffer to alter 1042 * @len: new length 1043 * 1044 * Cut the length of a buffer down by removing data from the tail. If 1045 * the buffer is already under the length specified it is not modified. 1046 * The skb must be linear. 1047 */ 1048 void skb_trim(struct sk_buff *skb, unsigned int len) 1049 { 1050 if (skb->len > len) 1051 __skb_trim(skb, len); 1052 } 1053 EXPORT_SYMBOL(skb_trim); 1054 1055 /* Trims skb to length len. It can change skb pointers. 1056 */ 1057 1058 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1059 { 1060 struct sk_buff **fragp; 1061 struct sk_buff *frag; 1062 int offset = skb_headlen(skb); 1063 int nfrags = skb_shinfo(skb)->nr_frags; 1064 int i; 1065 int err; 1066 1067 if (skb_cloned(skb) && 1068 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1069 return err; 1070 1071 i = 0; 1072 if (offset >= len) 1073 goto drop_pages; 1074 1075 for (; i < nfrags; i++) { 1076 int end = offset + skb_shinfo(skb)->frags[i].size; 1077 1078 if (end < len) { 1079 offset = end; 1080 continue; 1081 } 1082 1083 skb_shinfo(skb)->frags[i++].size = len - offset; 1084 1085 drop_pages: 1086 skb_shinfo(skb)->nr_frags = i; 1087 1088 for (; i < nfrags; i++) 1089 put_page(skb_shinfo(skb)->frags[i].page); 1090 1091 if (skb_shinfo(skb)->frag_list) 1092 skb_drop_fraglist(skb); 1093 goto done; 1094 } 1095 1096 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1097 fragp = &frag->next) { 1098 int end = offset + frag->len; 1099 1100 if (skb_shared(frag)) { 1101 struct sk_buff *nfrag; 1102 1103 nfrag = skb_clone(frag, GFP_ATOMIC); 1104 if (unlikely(!nfrag)) 1105 return -ENOMEM; 1106 1107 nfrag->next = frag->next; 1108 kfree_skb(frag); 1109 frag = nfrag; 1110 *fragp = frag; 1111 } 1112 1113 if (end < len) { 1114 offset = end; 1115 continue; 1116 } 1117 1118 if (end > len && 1119 unlikely((err = pskb_trim(frag, len - offset)))) 1120 return err; 1121 1122 if (frag->next) 1123 skb_drop_list(&frag->next); 1124 break; 1125 } 1126 1127 done: 1128 if (len > skb_headlen(skb)) { 1129 skb->data_len -= skb->len - len; 1130 skb->len = len; 1131 } else { 1132 skb->len = len; 1133 skb->data_len = 0; 1134 skb_set_tail_pointer(skb, len); 1135 } 1136 1137 return 0; 1138 } 1139 EXPORT_SYMBOL(___pskb_trim); 1140 1141 /** 1142 * __pskb_pull_tail - advance tail of skb header 1143 * @skb: buffer to reallocate 1144 * @delta: number of bytes to advance tail 1145 * 1146 * The function makes a sense only on a fragmented &sk_buff, 1147 * it expands header moving its tail forward and copying necessary 1148 * data from fragmented part. 1149 * 1150 * &sk_buff MUST have reference count of 1. 1151 * 1152 * Returns %NULL (and &sk_buff does not change) if pull failed 1153 * or value of new tail of skb in the case of success. 1154 * 1155 * All the pointers pointing into skb header may change and must be 1156 * reloaded after call to this function. 1157 */ 1158 1159 /* Moves tail of skb head forward, copying data from fragmented part, 1160 * when it is necessary. 1161 * 1. It may fail due to malloc failure. 1162 * 2. It may change skb pointers. 1163 * 1164 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1165 */ 1166 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1167 { 1168 /* If skb has not enough free space at tail, get new one 1169 * plus 128 bytes for future expansions. If we have enough 1170 * room at tail, reallocate without expansion only if skb is cloned. 1171 */ 1172 int i, k, eat = (skb->tail + delta) - skb->end; 1173 1174 if (eat > 0 || skb_cloned(skb)) { 1175 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1176 GFP_ATOMIC)) 1177 return NULL; 1178 } 1179 1180 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1181 BUG(); 1182 1183 /* Optimization: no fragments, no reasons to preestimate 1184 * size of pulled pages. Superb. 1185 */ 1186 if (!skb_shinfo(skb)->frag_list) 1187 goto pull_pages; 1188 1189 /* Estimate size of pulled pages. */ 1190 eat = delta; 1191 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1192 if (skb_shinfo(skb)->frags[i].size >= eat) 1193 goto pull_pages; 1194 eat -= skb_shinfo(skb)->frags[i].size; 1195 } 1196 1197 /* If we need update frag list, we are in troubles. 1198 * Certainly, it possible to add an offset to skb data, 1199 * but taking into account that pulling is expected to 1200 * be very rare operation, it is worth to fight against 1201 * further bloating skb head and crucify ourselves here instead. 1202 * Pure masohism, indeed. 8)8) 1203 */ 1204 if (eat) { 1205 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1206 struct sk_buff *clone = NULL; 1207 struct sk_buff *insp = NULL; 1208 1209 do { 1210 BUG_ON(!list); 1211 1212 if (list->len <= eat) { 1213 /* Eaten as whole. */ 1214 eat -= list->len; 1215 list = list->next; 1216 insp = list; 1217 } else { 1218 /* Eaten partially. */ 1219 1220 if (skb_shared(list)) { 1221 /* Sucks! We need to fork list. :-( */ 1222 clone = skb_clone(list, GFP_ATOMIC); 1223 if (!clone) 1224 return NULL; 1225 insp = list->next; 1226 list = clone; 1227 } else { 1228 /* This may be pulled without 1229 * problems. */ 1230 insp = list; 1231 } 1232 if (!pskb_pull(list, eat)) { 1233 kfree_skb(clone); 1234 return NULL; 1235 } 1236 break; 1237 } 1238 } while (eat); 1239 1240 /* Free pulled out fragments. */ 1241 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1242 skb_shinfo(skb)->frag_list = list->next; 1243 kfree_skb(list); 1244 } 1245 /* And insert new clone at head. */ 1246 if (clone) { 1247 clone->next = list; 1248 skb_shinfo(skb)->frag_list = clone; 1249 } 1250 } 1251 /* Success! Now we may commit changes to skb data. */ 1252 1253 pull_pages: 1254 eat = delta; 1255 k = 0; 1256 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1257 if (skb_shinfo(skb)->frags[i].size <= eat) { 1258 put_page(skb_shinfo(skb)->frags[i].page); 1259 eat -= skb_shinfo(skb)->frags[i].size; 1260 } else { 1261 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1262 if (eat) { 1263 skb_shinfo(skb)->frags[k].page_offset += eat; 1264 skb_shinfo(skb)->frags[k].size -= eat; 1265 eat = 0; 1266 } 1267 k++; 1268 } 1269 } 1270 skb_shinfo(skb)->nr_frags = k; 1271 1272 skb->tail += delta; 1273 skb->data_len -= delta; 1274 1275 return skb_tail_pointer(skb); 1276 } 1277 EXPORT_SYMBOL(__pskb_pull_tail); 1278 1279 /* Copy some data bits from skb to kernel buffer. */ 1280 1281 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1282 { 1283 int i, copy; 1284 int start = skb_headlen(skb); 1285 1286 if (offset > (int)skb->len - len) 1287 goto fault; 1288 1289 /* Copy header. */ 1290 if ((copy = start - offset) > 0) { 1291 if (copy > len) 1292 copy = len; 1293 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1294 if ((len -= copy) == 0) 1295 return 0; 1296 offset += copy; 1297 to += copy; 1298 } 1299 1300 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1301 int end; 1302 1303 WARN_ON(start > offset + len); 1304 1305 end = start + skb_shinfo(skb)->frags[i].size; 1306 if ((copy = end - offset) > 0) { 1307 u8 *vaddr; 1308 1309 if (copy > len) 1310 copy = len; 1311 1312 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1313 memcpy(to, 1314 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1315 offset - start, copy); 1316 kunmap_skb_frag(vaddr); 1317 1318 if ((len -= copy) == 0) 1319 return 0; 1320 offset += copy; 1321 to += copy; 1322 } 1323 start = end; 1324 } 1325 1326 if (skb_shinfo(skb)->frag_list) { 1327 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1328 1329 for (; list; list = list->next) { 1330 int end; 1331 1332 WARN_ON(start > offset + len); 1333 1334 end = start + list->len; 1335 if ((copy = end - offset) > 0) { 1336 if (copy > len) 1337 copy = len; 1338 if (skb_copy_bits(list, offset - start, 1339 to, copy)) 1340 goto fault; 1341 if ((len -= copy) == 0) 1342 return 0; 1343 offset += copy; 1344 to += copy; 1345 } 1346 start = end; 1347 } 1348 } 1349 if (!len) 1350 return 0; 1351 1352 fault: 1353 return -EFAULT; 1354 } 1355 EXPORT_SYMBOL(skb_copy_bits); 1356 1357 /* 1358 * Callback from splice_to_pipe(), if we need to release some pages 1359 * at the end of the spd in case we error'ed out in filling the pipe. 1360 */ 1361 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1362 { 1363 put_page(spd->pages[i]); 1364 } 1365 1366 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1367 unsigned int *offset, 1368 struct sk_buff *skb) 1369 { 1370 struct sock *sk = skb->sk; 1371 struct page *p = sk->sk_sndmsg_page; 1372 unsigned int off; 1373 1374 if (!p) { 1375 new_page: 1376 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1377 if (!p) 1378 return NULL; 1379 1380 off = sk->sk_sndmsg_off = 0; 1381 /* hold one ref to this page until it's full */ 1382 } else { 1383 unsigned int mlen; 1384 1385 off = sk->sk_sndmsg_off; 1386 mlen = PAGE_SIZE - off; 1387 if (mlen < 64 && mlen < *len) { 1388 put_page(p); 1389 goto new_page; 1390 } 1391 1392 *len = min_t(unsigned int, *len, mlen); 1393 } 1394 1395 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1396 sk->sk_sndmsg_off += *len; 1397 *offset = off; 1398 get_page(p); 1399 1400 return p; 1401 } 1402 1403 /* 1404 * Fill page/offset/length into spd, if it can hold more pages. 1405 */ 1406 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1407 unsigned int *len, unsigned int offset, 1408 struct sk_buff *skb, int linear) 1409 { 1410 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1411 return 1; 1412 1413 if (linear) { 1414 page = linear_to_page(page, len, &offset, skb); 1415 if (!page) 1416 return 1; 1417 } else 1418 get_page(page); 1419 1420 spd->pages[spd->nr_pages] = page; 1421 spd->partial[spd->nr_pages].len = *len; 1422 spd->partial[spd->nr_pages].offset = offset; 1423 spd->nr_pages++; 1424 1425 return 0; 1426 } 1427 1428 static inline void __segment_seek(struct page **page, unsigned int *poff, 1429 unsigned int *plen, unsigned int off) 1430 { 1431 unsigned long n; 1432 1433 *poff += off; 1434 n = *poff / PAGE_SIZE; 1435 if (n) 1436 *page = nth_page(*page, n); 1437 1438 *poff = *poff % PAGE_SIZE; 1439 *plen -= off; 1440 } 1441 1442 static inline int __splice_segment(struct page *page, unsigned int poff, 1443 unsigned int plen, unsigned int *off, 1444 unsigned int *len, struct sk_buff *skb, 1445 struct splice_pipe_desc *spd, int linear) 1446 { 1447 if (!*len) 1448 return 1; 1449 1450 /* skip this segment if already processed */ 1451 if (*off >= plen) { 1452 *off -= plen; 1453 return 0; 1454 } 1455 1456 /* ignore any bits we already processed */ 1457 if (*off) { 1458 __segment_seek(&page, &poff, &plen, *off); 1459 *off = 0; 1460 } 1461 1462 do { 1463 unsigned int flen = min(*len, plen); 1464 1465 /* the linear region may spread across several pages */ 1466 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1467 1468 if (spd_fill_page(spd, page, &flen, poff, skb, linear)) 1469 return 1; 1470 1471 __segment_seek(&page, &poff, &plen, flen); 1472 *len -= flen; 1473 1474 } while (*len && plen); 1475 1476 return 0; 1477 } 1478 1479 /* 1480 * Map linear and fragment data from the skb to spd. It reports failure if the 1481 * pipe is full or if we already spliced the requested length. 1482 */ 1483 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1484 unsigned int *len, 1485 struct splice_pipe_desc *spd) 1486 { 1487 int seg; 1488 1489 /* 1490 * map the linear part 1491 */ 1492 if (__splice_segment(virt_to_page(skb->data), 1493 (unsigned long) skb->data & (PAGE_SIZE - 1), 1494 skb_headlen(skb), 1495 offset, len, skb, spd, 1)) 1496 return 1; 1497 1498 /* 1499 * then map the fragments 1500 */ 1501 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1502 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1503 1504 if (__splice_segment(f->page, f->page_offset, f->size, 1505 offset, len, skb, spd, 0)) 1506 return 1; 1507 } 1508 1509 return 0; 1510 } 1511 1512 /* 1513 * Map data from the skb to a pipe. Should handle both the linear part, 1514 * the fragments, and the frag list. It does NOT handle frag lists within 1515 * the frag list, if such a thing exists. We'd probably need to recurse to 1516 * handle that cleanly. 1517 */ 1518 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1519 struct pipe_inode_info *pipe, unsigned int tlen, 1520 unsigned int flags) 1521 { 1522 struct partial_page partial[PIPE_BUFFERS]; 1523 struct page *pages[PIPE_BUFFERS]; 1524 struct splice_pipe_desc spd = { 1525 .pages = pages, 1526 .partial = partial, 1527 .flags = flags, 1528 .ops = &sock_pipe_buf_ops, 1529 .spd_release = sock_spd_release, 1530 }; 1531 1532 /* 1533 * __skb_splice_bits() only fails if the output has no room left, 1534 * so no point in going over the frag_list for the error case. 1535 */ 1536 if (__skb_splice_bits(skb, &offset, &tlen, &spd)) 1537 goto done; 1538 else if (!tlen) 1539 goto done; 1540 1541 /* 1542 * now see if we have a frag_list to map 1543 */ 1544 if (skb_shinfo(skb)->frag_list) { 1545 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1546 1547 for (; list && tlen; list = list->next) { 1548 if (__skb_splice_bits(list, &offset, &tlen, &spd)) 1549 break; 1550 } 1551 } 1552 1553 done: 1554 if (spd.nr_pages) { 1555 struct sock *sk = skb->sk; 1556 int ret; 1557 1558 /* 1559 * Drop the socket lock, otherwise we have reverse 1560 * locking dependencies between sk_lock and i_mutex 1561 * here as compared to sendfile(). We enter here 1562 * with the socket lock held, and splice_to_pipe() will 1563 * grab the pipe inode lock. For sendfile() emulation, 1564 * we call into ->sendpage() with the i_mutex lock held 1565 * and networking will grab the socket lock. 1566 */ 1567 release_sock(sk); 1568 ret = splice_to_pipe(pipe, &spd); 1569 lock_sock(sk); 1570 return ret; 1571 } 1572 1573 return 0; 1574 } 1575 1576 /** 1577 * skb_store_bits - store bits from kernel buffer to skb 1578 * @skb: destination buffer 1579 * @offset: offset in destination 1580 * @from: source buffer 1581 * @len: number of bytes to copy 1582 * 1583 * Copy the specified number of bytes from the source buffer to the 1584 * destination skb. This function handles all the messy bits of 1585 * traversing fragment lists and such. 1586 */ 1587 1588 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1589 { 1590 int i, copy; 1591 int start = skb_headlen(skb); 1592 1593 if (offset > (int)skb->len - len) 1594 goto fault; 1595 1596 if ((copy = start - offset) > 0) { 1597 if (copy > len) 1598 copy = len; 1599 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1600 if ((len -= copy) == 0) 1601 return 0; 1602 offset += copy; 1603 from += copy; 1604 } 1605 1606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1607 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1608 int end; 1609 1610 WARN_ON(start > offset + len); 1611 1612 end = start + frag->size; 1613 if ((copy = end - offset) > 0) { 1614 u8 *vaddr; 1615 1616 if (copy > len) 1617 copy = len; 1618 1619 vaddr = kmap_skb_frag(frag); 1620 memcpy(vaddr + frag->page_offset + offset - start, 1621 from, copy); 1622 kunmap_skb_frag(vaddr); 1623 1624 if ((len -= copy) == 0) 1625 return 0; 1626 offset += copy; 1627 from += copy; 1628 } 1629 start = end; 1630 } 1631 1632 if (skb_shinfo(skb)->frag_list) { 1633 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1634 1635 for (; list; list = list->next) { 1636 int end; 1637 1638 WARN_ON(start > offset + len); 1639 1640 end = start + list->len; 1641 if ((copy = end - offset) > 0) { 1642 if (copy > len) 1643 copy = len; 1644 if (skb_store_bits(list, offset - start, 1645 from, copy)) 1646 goto fault; 1647 if ((len -= copy) == 0) 1648 return 0; 1649 offset += copy; 1650 from += copy; 1651 } 1652 start = end; 1653 } 1654 } 1655 if (!len) 1656 return 0; 1657 1658 fault: 1659 return -EFAULT; 1660 } 1661 EXPORT_SYMBOL(skb_store_bits); 1662 1663 /* Checksum skb data. */ 1664 1665 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1666 int len, __wsum csum) 1667 { 1668 int start = skb_headlen(skb); 1669 int i, copy = start - offset; 1670 int pos = 0; 1671 1672 /* Checksum header. */ 1673 if (copy > 0) { 1674 if (copy > len) 1675 copy = len; 1676 csum = csum_partial(skb->data + offset, copy, csum); 1677 if ((len -= copy) == 0) 1678 return csum; 1679 offset += copy; 1680 pos = copy; 1681 } 1682 1683 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1684 int end; 1685 1686 WARN_ON(start > offset + len); 1687 1688 end = start + skb_shinfo(skb)->frags[i].size; 1689 if ((copy = end - offset) > 0) { 1690 __wsum csum2; 1691 u8 *vaddr; 1692 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1693 1694 if (copy > len) 1695 copy = len; 1696 vaddr = kmap_skb_frag(frag); 1697 csum2 = csum_partial(vaddr + frag->page_offset + 1698 offset - start, copy, 0); 1699 kunmap_skb_frag(vaddr); 1700 csum = csum_block_add(csum, csum2, pos); 1701 if (!(len -= copy)) 1702 return csum; 1703 offset += copy; 1704 pos += copy; 1705 } 1706 start = end; 1707 } 1708 1709 if (skb_shinfo(skb)->frag_list) { 1710 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1711 1712 for (; list; list = list->next) { 1713 int end; 1714 1715 WARN_ON(start > offset + len); 1716 1717 end = start + list->len; 1718 if ((copy = end - offset) > 0) { 1719 __wsum csum2; 1720 if (copy > len) 1721 copy = len; 1722 csum2 = skb_checksum(list, offset - start, 1723 copy, 0); 1724 csum = csum_block_add(csum, csum2, pos); 1725 if ((len -= copy) == 0) 1726 return csum; 1727 offset += copy; 1728 pos += copy; 1729 } 1730 start = end; 1731 } 1732 } 1733 BUG_ON(len); 1734 1735 return csum; 1736 } 1737 EXPORT_SYMBOL(skb_checksum); 1738 1739 /* Both of above in one bottle. */ 1740 1741 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1742 u8 *to, int len, __wsum csum) 1743 { 1744 int start = skb_headlen(skb); 1745 int i, copy = start - offset; 1746 int pos = 0; 1747 1748 /* Copy header. */ 1749 if (copy > 0) { 1750 if (copy > len) 1751 copy = len; 1752 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1753 copy, csum); 1754 if ((len -= copy) == 0) 1755 return csum; 1756 offset += copy; 1757 to += copy; 1758 pos = copy; 1759 } 1760 1761 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1762 int end; 1763 1764 WARN_ON(start > offset + len); 1765 1766 end = start + skb_shinfo(skb)->frags[i].size; 1767 if ((copy = end - offset) > 0) { 1768 __wsum csum2; 1769 u8 *vaddr; 1770 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1771 1772 if (copy > len) 1773 copy = len; 1774 vaddr = kmap_skb_frag(frag); 1775 csum2 = csum_partial_copy_nocheck(vaddr + 1776 frag->page_offset + 1777 offset - start, to, 1778 copy, 0); 1779 kunmap_skb_frag(vaddr); 1780 csum = csum_block_add(csum, csum2, pos); 1781 if (!(len -= copy)) 1782 return csum; 1783 offset += copy; 1784 to += copy; 1785 pos += copy; 1786 } 1787 start = end; 1788 } 1789 1790 if (skb_shinfo(skb)->frag_list) { 1791 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1792 1793 for (; list; list = list->next) { 1794 __wsum csum2; 1795 int end; 1796 1797 WARN_ON(start > offset + len); 1798 1799 end = start + list->len; 1800 if ((copy = end - offset) > 0) { 1801 if (copy > len) 1802 copy = len; 1803 csum2 = skb_copy_and_csum_bits(list, 1804 offset - start, 1805 to, copy, 0); 1806 csum = csum_block_add(csum, csum2, pos); 1807 if ((len -= copy) == 0) 1808 return csum; 1809 offset += copy; 1810 to += copy; 1811 pos += copy; 1812 } 1813 start = end; 1814 } 1815 } 1816 BUG_ON(len); 1817 return csum; 1818 } 1819 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1820 1821 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1822 { 1823 __wsum csum; 1824 long csstart; 1825 1826 if (skb->ip_summed == CHECKSUM_PARTIAL) 1827 csstart = skb->csum_start - skb_headroom(skb); 1828 else 1829 csstart = skb_headlen(skb); 1830 1831 BUG_ON(csstart > skb_headlen(skb)); 1832 1833 skb_copy_from_linear_data(skb, to, csstart); 1834 1835 csum = 0; 1836 if (csstart != skb->len) 1837 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1838 skb->len - csstart, 0); 1839 1840 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1841 long csstuff = csstart + skb->csum_offset; 1842 1843 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1844 } 1845 } 1846 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1847 1848 /** 1849 * skb_dequeue - remove from the head of the queue 1850 * @list: list to dequeue from 1851 * 1852 * Remove the head of the list. The list lock is taken so the function 1853 * may be used safely with other locking list functions. The head item is 1854 * returned or %NULL if the list is empty. 1855 */ 1856 1857 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1858 { 1859 unsigned long flags; 1860 struct sk_buff *result; 1861 1862 spin_lock_irqsave(&list->lock, flags); 1863 result = __skb_dequeue(list); 1864 spin_unlock_irqrestore(&list->lock, flags); 1865 return result; 1866 } 1867 EXPORT_SYMBOL(skb_dequeue); 1868 1869 /** 1870 * skb_dequeue_tail - remove from the tail of the queue 1871 * @list: list to dequeue from 1872 * 1873 * Remove the tail of the list. The list lock is taken so the function 1874 * may be used safely with other locking list functions. The tail item is 1875 * returned or %NULL if the list is empty. 1876 */ 1877 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1878 { 1879 unsigned long flags; 1880 struct sk_buff *result; 1881 1882 spin_lock_irqsave(&list->lock, flags); 1883 result = __skb_dequeue_tail(list); 1884 spin_unlock_irqrestore(&list->lock, flags); 1885 return result; 1886 } 1887 EXPORT_SYMBOL(skb_dequeue_tail); 1888 1889 /** 1890 * skb_queue_purge - empty a list 1891 * @list: list to empty 1892 * 1893 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1894 * the list and one reference dropped. This function takes the list 1895 * lock and is atomic with respect to other list locking functions. 1896 */ 1897 void skb_queue_purge(struct sk_buff_head *list) 1898 { 1899 struct sk_buff *skb; 1900 while ((skb = skb_dequeue(list)) != NULL) 1901 kfree_skb(skb); 1902 } 1903 EXPORT_SYMBOL(skb_queue_purge); 1904 1905 /** 1906 * skb_queue_head - queue a buffer at the list head 1907 * @list: list to use 1908 * @newsk: buffer to queue 1909 * 1910 * Queue a buffer at the start of the list. This function takes the 1911 * list lock and can be used safely with other locking &sk_buff functions 1912 * safely. 1913 * 1914 * A buffer cannot be placed on two lists at the same time. 1915 */ 1916 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1917 { 1918 unsigned long flags; 1919 1920 spin_lock_irqsave(&list->lock, flags); 1921 __skb_queue_head(list, newsk); 1922 spin_unlock_irqrestore(&list->lock, flags); 1923 } 1924 EXPORT_SYMBOL(skb_queue_head); 1925 1926 /** 1927 * skb_queue_tail - queue a buffer at the list tail 1928 * @list: list to use 1929 * @newsk: buffer to queue 1930 * 1931 * Queue a buffer at the tail of the list. This function takes the 1932 * list lock and can be used safely with other locking &sk_buff functions 1933 * safely. 1934 * 1935 * A buffer cannot be placed on two lists at the same time. 1936 */ 1937 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1938 { 1939 unsigned long flags; 1940 1941 spin_lock_irqsave(&list->lock, flags); 1942 __skb_queue_tail(list, newsk); 1943 spin_unlock_irqrestore(&list->lock, flags); 1944 } 1945 EXPORT_SYMBOL(skb_queue_tail); 1946 1947 /** 1948 * skb_unlink - remove a buffer from a list 1949 * @skb: buffer to remove 1950 * @list: list to use 1951 * 1952 * Remove a packet from a list. The list locks are taken and this 1953 * function is atomic with respect to other list locked calls 1954 * 1955 * You must know what list the SKB is on. 1956 */ 1957 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1958 { 1959 unsigned long flags; 1960 1961 spin_lock_irqsave(&list->lock, flags); 1962 __skb_unlink(skb, list); 1963 spin_unlock_irqrestore(&list->lock, flags); 1964 } 1965 EXPORT_SYMBOL(skb_unlink); 1966 1967 /** 1968 * skb_append - append a buffer 1969 * @old: buffer to insert after 1970 * @newsk: buffer to insert 1971 * @list: list to use 1972 * 1973 * Place a packet after a given packet in a list. The list locks are taken 1974 * and this function is atomic with respect to other list locked calls. 1975 * A buffer cannot be placed on two lists at the same time. 1976 */ 1977 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1978 { 1979 unsigned long flags; 1980 1981 spin_lock_irqsave(&list->lock, flags); 1982 __skb_queue_after(list, old, newsk); 1983 spin_unlock_irqrestore(&list->lock, flags); 1984 } 1985 EXPORT_SYMBOL(skb_append); 1986 1987 /** 1988 * skb_insert - insert a buffer 1989 * @old: buffer to insert before 1990 * @newsk: buffer to insert 1991 * @list: list to use 1992 * 1993 * Place a packet before a given packet in a list. The list locks are 1994 * taken and this function is atomic with respect to other list locked 1995 * calls. 1996 * 1997 * A buffer cannot be placed on two lists at the same time. 1998 */ 1999 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2000 { 2001 unsigned long flags; 2002 2003 spin_lock_irqsave(&list->lock, flags); 2004 __skb_insert(newsk, old->prev, old, list); 2005 spin_unlock_irqrestore(&list->lock, flags); 2006 } 2007 EXPORT_SYMBOL(skb_insert); 2008 2009 static inline void skb_split_inside_header(struct sk_buff *skb, 2010 struct sk_buff* skb1, 2011 const u32 len, const int pos) 2012 { 2013 int i; 2014 2015 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2016 pos - len); 2017 /* And move data appendix as is. */ 2018 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2019 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2020 2021 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2022 skb_shinfo(skb)->nr_frags = 0; 2023 skb1->data_len = skb->data_len; 2024 skb1->len += skb1->data_len; 2025 skb->data_len = 0; 2026 skb->len = len; 2027 skb_set_tail_pointer(skb, len); 2028 } 2029 2030 static inline void skb_split_no_header(struct sk_buff *skb, 2031 struct sk_buff* skb1, 2032 const u32 len, int pos) 2033 { 2034 int i, k = 0; 2035 const int nfrags = skb_shinfo(skb)->nr_frags; 2036 2037 skb_shinfo(skb)->nr_frags = 0; 2038 skb1->len = skb1->data_len = skb->len - len; 2039 skb->len = len; 2040 skb->data_len = len - pos; 2041 2042 for (i = 0; i < nfrags; i++) { 2043 int size = skb_shinfo(skb)->frags[i].size; 2044 2045 if (pos + size > len) { 2046 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2047 2048 if (pos < len) { 2049 /* Split frag. 2050 * We have two variants in this case: 2051 * 1. Move all the frag to the second 2052 * part, if it is possible. F.e. 2053 * this approach is mandatory for TUX, 2054 * where splitting is expensive. 2055 * 2. Split is accurately. We make this. 2056 */ 2057 get_page(skb_shinfo(skb)->frags[i].page); 2058 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2059 skb_shinfo(skb1)->frags[0].size -= len - pos; 2060 skb_shinfo(skb)->frags[i].size = len - pos; 2061 skb_shinfo(skb)->nr_frags++; 2062 } 2063 k++; 2064 } else 2065 skb_shinfo(skb)->nr_frags++; 2066 pos += size; 2067 } 2068 skb_shinfo(skb1)->nr_frags = k; 2069 } 2070 2071 /** 2072 * skb_split - Split fragmented skb to two parts at length len. 2073 * @skb: the buffer to split 2074 * @skb1: the buffer to receive the second part 2075 * @len: new length for skb 2076 */ 2077 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2078 { 2079 int pos = skb_headlen(skb); 2080 2081 if (len < pos) /* Split line is inside header. */ 2082 skb_split_inside_header(skb, skb1, len, pos); 2083 else /* Second chunk has no header, nothing to copy. */ 2084 skb_split_no_header(skb, skb1, len, pos); 2085 } 2086 EXPORT_SYMBOL(skb_split); 2087 2088 /* Shifting from/to a cloned skb is a no-go. 2089 * 2090 * Caller cannot keep skb_shinfo related pointers past calling here! 2091 */ 2092 static int skb_prepare_for_shift(struct sk_buff *skb) 2093 { 2094 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2095 } 2096 2097 /** 2098 * skb_shift - Shifts paged data partially from skb to another 2099 * @tgt: buffer into which tail data gets added 2100 * @skb: buffer from which the paged data comes from 2101 * @shiftlen: shift up to this many bytes 2102 * 2103 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2104 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2105 * It's up to caller to free skb if everything was shifted. 2106 * 2107 * If @tgt runs out of frags, the whole operation is aborted. 2108 * 2109 * Skb cannot include anything else but paged data while tgt is allowed 2110 * to have non-paged data as well. 2111 * 2112 * TODO: full sized shift could be optimized but that would need 2113 * specialized skb free'er to handle frags without up-to-date nr_frags. 2114 */ 2115 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2116 { 2117 int from, to, merge, todo; 2118 struct skb_frag_struct *fragfrom, *fragto; 2119 2120 BUG_ON(shiftlen > skb->len); 2121 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2122 2123 todo = shiftlen; 2124 from = 0; 2125 to = skb_shinfo(tgt)->nr_frags; 2126 fragfrom = &skb_shinfo(skb)->frags[from]; 2127 2128 /* Actual merge is delayed until the point when we know we can 2129 * commit all, so that we don't have to undo partial changes 2130 */ 2131 if (!to || 2132 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2133 merge = -1; 2134 } else { 2135 merge = to - 1; 2136 2137 todo -= fragfrom->size; 2138 if (todo < 0) { 2139 if (skb_prepare_for_shift(skb) || 2140 skb_prepare_for_shift(tgt)) 2141 return 0; 2142 2143 /* All previous frag pointers might be stale! */ 2144 fragfrom = &skb_shinfo(skb)->frags[from]; 2145 fragto = &skb_shinfo(tgt)->frags[merge]; 2146 2147 fragto->size += shiftlen; 2148 fragfrom->size -= shiftlen; 2149 fragfrom->page_offset += shiftlen; 2150 2151 goto onlymerged; 2152 } 2153 2154 from++; 2155 } 2156 2157 /* Skip full, not-fitting skb to avoid expensive operations */ 2158 if ((shiftlen == skb->len) && 2159 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2160 return 0; 2161 2162 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2163 return 0; 2164 2165 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2166 if (to == MAX_SKB_FRAGS) 2167 return 0; 2168 2169 fragfrom = &skb_shinfo(skb)->frags[from]; 2170 fragto = &skb_shinfo(tgt)->frags[to]; 2171 2172 if (todo >= fragfrom->size) { 2173 *fragto = *fragfrom; 2174 todo -= fragfrom->size; 2175 from++; 2176 to++; 2177 2178 } else { 2179 get_page(fragfrom->page); 2180 fragto->page = fragfrom->page; 2181 fragto->page_offset = fragfrom->page_offset; 2182 fragto->size = todo; 2183 2184 fragfrom->page_offset += todo; 2185 fragfrom->size -= todo; 2186 todo = 0; 2187 2188 to++; 2189 break; 2190 } 2191 } 2192 2193 /* Ready to "commit" this state change to tgt */ 2194 skb_shinfo(tgt)->nr_frags = to; 2195 2196 if (merge >= 0) { 2197 fragfrom = &skb_shinfo(skb)->frags[0]; 2198 fragto = &skb_shinfo(tgt)->frags[merge]; 2199 2200 fragto->size += fragfrom->size; 2201 put_page(fragfrom->page); 2202 } 2203 2204 /* Reposition in the original skb */ 2205 to = 0; 2206 while (from < skb_shinfo(skb)->nr_frags) 2207 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2208 skb_shinfo(skb)->nr_frags = to; 2209 2210 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2211 2212 onlymerged: 2213 /* Most likely the tgt won't ever need its checksum anymore, skb on 2214 * the other hand might need it if it needs to be resent 2215 */ 2216 tgt->ip_summed = CHECKSUM_PARTIAL; 2217 skb->ip_summed = CHECKSUM_PARTIAL; 2218 2219 /* Yak, is it really working this way? Some helper please? */ 2220 skb->len -= shiftlen; 2221 skb->data_len -= shiftlen; 2222 skb->truesize -= shiftlen; 2223 tgt->len += shiftlen; 2224 tgt->data_len += shiftlen; 2225 tgt->truesize += shiftlen; 2226 2227 return shiftlen; 2228 } 2229 2230 /** 2231 * skb_prepare_seq_read - Prepare a sequential read of skb data 2232 * @skb: the buffer to read 2233 * @from: lower offset of data to be read 2234 * @to: upper offset of data to be read 2235 * @st: state variable 2236 * 2237 * Initializes the specified state variable. Must be called before 2238 * invoking skb_seq_read() for the first time. 2239 */ 2240 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2241 unsigned int to, struct skb_seq_state *st) 2242 { 2243 st->lower_offset = from; 2244 st->upper_offset = to; 2245 st->root_skb = st->cur_skb = skb; 2246 st->frag_idx = st->stepped_offset = 0; 2247 st->frag_data = NULL; 2248 } 2249 EXPORT_SYMBOL(skb_prepare_seq_read); 2250 2251 /** 2252 * skb_seq_read - Sequentially read skb data 2253 * @consumed: number of bytes consumed by the caller so far 2254 * @data: destination pointer for data to be returned 2255 * @st: state variable 2256 * 2257 * Reads a block of skb data at &consumed relative to the 2258 * lower offset specified to skb_prepare_seq_read(). Assigns 2259 * the head of the data block to &data and returns the length 2260 * of the block or 0 if the end of the skb data or the upper 2261 * offset has been reached. 2262 * 2263 * The caller is not required to consume all of the data 2264 * returned, i.e. &consumed is typically set to the number 2265 * of bytes already consumed and the next call to 2266 * skb_seq_read() will return the remaining part of the block. 2267 * 2268 * Note 1: The size of each block of data returned can be arbitary, 2269 * this limitation is the cost for zerocopy seqeuental 2270 * reads of potentially non linear data. 2271 * 2272 * Note 2: Fragment lists within fragments are not implemented 2273 * at the moment, state->root_skb could be replaced with 2274 * a stack for this purpose. 2275 */ 2276 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2277 struct skb_seq_state *st) 2278 { 2279 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2280 skb_frag_t *frag; 2281 2282 if (unlikely(abs_offset >= st->upper_offset)) 2283 return 0; 2284 2285 next_skb: 2286 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2287 2288 if (abs_offset < block_limit) { 2289 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2290 return block_limit - abs_offset; 2291 } 2292 2293 if (st->frag_idx == 0 && !st->frag_data) 2294 st->stepped_offset += skb_headlen(st->cur_skb); 2295 2296 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2297 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2298 block_limit = frag->size + st->stepped_offset; 2299 2300 if (abs_offset < block_limit) { 2301 if (!st->frag_data) 2302 st->frag_data = kmap_skb_frag(frag); 2303 2304 *data = (u8 *) st->frag_data + frag->page_offset + 2305 (abs_offset - st->stepped_offset); 2306 2307 return block_limit - abs_offset; 2308 } 2309 2310 if (st->frag_data) { 2311 kunmap_skb_frag(st->frag_data); 2312 st->frag_data = NULL; 2313 } 2314 2315 st->frag_idx++; 2316 st->stepped_offset += frag->size; 2317 } 2318 2319 if (st->frag_data) { 2320 kunmap_skb_frag(st->frag_data); 2321 st->frag_data = NULL; 2322 } 2323 2324 if (st->root_skb == st->cur_skb && 2325 skb_shinfo(st->root_skb)->frag_list) { 2326 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2327 st->frag_idx = 0; 2328 goto next_skb; 2329 } else if (st->cur_skb->next) { 2330 st->cur_skb = st->cur_skb->next; 2331 st->frag_idx = 0; 2332 goto next_skb; 2333 } 2334 2335 return 0; 2336 } 2337 EXPORT_SYMBOL(skb_seq_read); 2338 2339 /** 2340 * skb_abort_seq_read - Abort a sequential read of skb data 2341 * @st: state variable 2342 * 2343 * Must be called if skb_seq_read() was not called until it 2344 * returned 0. 2345 */ 2346 void skb_abort_seq_read(struct skb_seq_state *st) 2347 { 2348 if (st->frag_data) 2349 kunmap_skb_frag(st->frag_data); 2350 } 2351 EXPORT_SYMBOL(skb_abort_seq_read); 2352 2353 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2354 2355 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2356 struct ts_config *conf, 2357 struct ts_state *state) 2358 { 2359 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2360 } 2361 2362 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2363 { 2364 skb_abort_seq_read(TS_SKB_CB(state)); 2365 } 2366 2367 /** 2368 * skb_find_text - Find a text pattern in skb data 2369 * @skb: the buffer to look in 2370 * @from: search offset 2371 * @to: search limit 2372 * @config: textsearch configuration 2373 * @state: uninitialized textsearch state variable 2374 * 2375 * Finds a pattern in the skb data according to the specified 2376 * textsearch configuration. Use textsearch_next() to retrieve 2377 * subsequent occurrences of the pattern. Returns the offset 2378 * to the first occurrence or UINT_MAX if no match was found. 2379 */ 2380 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2381 unsigned int to, struct ts_config *config, 2382 struct ts_state *state) 2383 { 2384 unsigned int ret; 2385 2386 config->get_next_block = skb_ts_get_next_block; 2387 config->finish = skb_ts_finish; 2388 2389 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2390 2391 ret = textsearch_find(config, state); 2392 return (ret <= to - from ? ret : UINT_MAX); 2393 } 2394 EXPORT_SYMBOL(skb_find_text); 2395 2396 /** 2397 * skb_append_datato_frags: - append the user data to a skb 2398 * @sk: sock structure 2399 * @skb: skb structure to be appened with user data. 2400 * @getfrag: call back function to be used for getting the user data 2401 * @from: pointer to user message iov 2402 * @length: length of the iov message 2403 * 2404 * Description: This procedure append the user data in the fragment part 2405 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2406 */ 2407 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2408 int (*getfrag)(void *from, char *to, int offset, 2409 int len, int odd, struct sk_buff *skb), 2410 void *from, int length) 2411 { 2412 int frg_cnt = 0; 2413 skb_frag_t *frag = NULL; 2414 struct page *page = NULL; 2415 int copy, left; 2416 int offset = 0; 2417 int ret; 2418 2419 do { 2420 /* Return error if we don't have space for new frag */ 2421 frg_cnt = skb_shinfo(skb)->nr_frags; 2422 if (frg_cnt >= MAX_SKB_FRAGS) 2423 return -EFAULT; 2424 2425 /* allocate a new page for next frag */ 2426 page = alloc_pages(sk->sk_allocation, 0); 2427 2428 /* If alloc_page fails just return failure and caller will 2429 * free previous allocated pages by doing kfree_skb() 2430 */ 2431 if (page == NULL) 2432 return -ENOMEM; 2433 2434 /* initialize the next frag */ 2435 sk->sk_sndmsg_page = page; 2436 sk->sk_sndmsg_off = 0; 2437 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2438 skb->truesize += PAGE_SIZE; 2439 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2440 2441 /* get the new initialized frag */ 2442 frg_cnt = skb_shinfo(skb)->nr_frags; 2443 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2444 2445 /* copy the user data to page */ 2446 left = PAGE_SIZE - frag->page_offset; 2447 copy = (length > left)? left : length; 2448 2449 ret = getfrag(from, (page_address(frag->page) + 2450 frag->page_offset + frag->size), 2451 offset, copy, 0, skb); 2452 if (ret < 0) 2453 return -EFAULT; 2454 2455 /* copy was successful so update the size parameters */ 2456 sk->sk_sndmsg_off += copy; 2457 frag->size += copy; 2458 skb->len += copy; 2459 skb->data_len += copy; 2460 offset += copy; 2461 length -= copy; 2462 2463 } while (length > 0); 2464 2465 return 0; 2466 } 2467 EXPORT_SYMBOL(skb_append_datato_frags); 2468 2469 /** 2470 * skb_pull_rcsum - pull skb and update receive checksum 2471 * @skb: buffer to update 2472 * @len: length of data pulled 2473 * 2474 * This function performs an skb_pull on the packet and updates 2475 * the CHECKSUM_COMPLETE checksum. It should be used on 2476 * receive path processing instead of skb_pull unless you know 2477 * that the checksum difference is zero (e.g., a valid IP header) 2478 * or you are setting ip_summed to CHECKSUM_NONE. 2479 */ 2480 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2481 { 2482 BUG_ON(len > skb->len); 2483 skb->len -= len; 2484 BUG_ON(skb->len < skb->data_len); 2485 skb_postpull_rcsum(skb, skb->data, len); 2486 return skb->data += len; 2487 } 2488 2489 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2490 2491 /** 2492 * skb_segment - Perform protocol segmentation on skb. 2493 * @skb: buffer to segment 2494 * @features: features for the output path (see dev->features) 2495 * 2496 * This function performs segmentation on the given skb. It returns 2497 * a pointer to the first in a list of new skbs for the segments. 2498 * In case of error it returns ERR_PTR(err). 2499 */ 2500 struct sk_buff *skb_segment(struct sk_buff *skb, int features) 2501 { 2502 struct sk_buff *segs = NULL; 2503 struct sk_buff *tail = NULL; 2504 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2505 unsigned int mss = skb_shinfo(skb)->gso_size; 2506 unsigned int doffset = skb->data - skb_mac_header(skb); 2507 unsigned int offset = doffset; 2508 unsigned int headroom; 2509 unsigned int len; 2510 int sg = features & NETIF_F_SG; 2511 int nfrags = skb_shinfo(skb)->nr_frags; 2512 int err = -ENOMEM; 2513 int i = 0; 2514 int pos; 2515 2516 __skb_push(skb, doffset); 2517 headroom = skb_headroom(skb); 2518 pos = skb_headlen(skb); 2519 2520 do { 2521 struct sk_buff *nskb; 2522 skb_frag_t *frag; 2523 int hsize; 2524 int size; 2525 2526 len = skb->len - offset; 2527 if (len > mss) 2528 len = mss; 2529 2530 hsize = skb_headlen(skb) - offset; 2531 if (hsize < 0) 2532 hsize = 0; 2533 if (hsize > len || !sg) 2534 hsize = len; 2535 2536 if (!hsize && i >= nfrags) { 2537 BUG_ON(fskb->len != len); 2538 2539 pos += len; 2540 nskb = skb_clone(fskb, GFP_ATOMIC); 2541 fskb = fskb->next; 2542 2543 if (unlikely(!nskb)) 2544 goto err; 2545 2546 hsize = skb_end_pointer(nskb) - nskb->head; 2547 if (skb_cow_head(nskb, doffset + headroom)) { 2548 kfree_skb(nskb); 2549 goto err; 2550 } 2551 2552 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2553 hsize; 2554 skb_release_head_state(nskb); 2555 __skb_push(nskb, doffset); 2556 } else { 2557 nskb = alloc_skb(hsize + doffset + headroom, 2558 GFP_ATOMIC); 2559 2560 if (unlikely(!nskb)) 2561 goto err; 2562 2563 skb_reserve(nskb, headroom); 2564 __skb_put(nskb, doffset); 2565 } 2566 2567 if (segs) 2568 tail->next = nskb; 2569 else 2570 segs = nskb; 2571 tail = nskb; 2572 2573 __copy_skb_header(nskb, skb); 2574 nskb->mac_len = skb->mac_len; 2575 2576 skb_reset_mac_header(nskb); 2577 skb_set_network_header(nskb, skb->mac_len); 2578 nskb->transport_header = (nskb->network_header + 2579 skb_network_header_len(skb)); 2580 skb_copy_from_linear_data(skb, nskb->data, doffset); 2581 2582 if (fskb != skb_shinfo(skb)->frag_list) 2583 continue; 2584 2585 if (!sg) { 2586 nskb->ip_summed = CHECKSUM_NONE; 2587 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2588 skb_put(nskb, len), 2589 len, 0); 2590 continue; 2591 } 2592 2593 frag = skb_shinfo(nskb)->frags; 2594 2595 skb_copy_from_linear_data_offset(skb, offset, 2596 skb_put(nskb, hsize), hsize); 2597 2598 while (pos < offset + len && i < nfrags) { 2599 *frag = skb_shinfo(skb)->frags[i]; 2600 get_page(frag->page); 2601 size = frag->size; 2602 2603 if (pos < offset) { 2604 frag->page_offset += offset - pos; 2605 frag->size -= offset - pos; 2606 } 2607 2608 skb_shinfo(nskb)->nr_frags++; 2609 2610 if (pos + size <= offset + len) { 2611 i++; 2612 pos += size; 2613 } else { 2614 frag->size -= pos + size - (offset + len); 2615 goto skip_fraglist; 2616 } 2617 2618 frag++; 2619 } 2620 2621 if (pos < offset + len) { 2622 struct sk_buff *fskb2 = fskb; 2623 2624 BUG_ON(pos + fskb->len != offset + len); 2625 2626 pos += fskb->len; 2627 fskb = fskb->next; 2628 2629 if (fskb2->next) { 2630 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2631 if (!fskb2) 2632 goto err; 2633 } else 2634 skb_get(fskb2); 2635 2636 BUG_ON(skb_shinfo(nskb)->frag_list); 2637 skb_shinfo(nskb)->frag_list = fskb2; 2638 } 2639 2640 skip_fraglist: 2641 nskb->data_len = len - hsize; 2642 nskb->len += nskb->data_len; 2643 nskb->truesize += nskb->data_len; 2644 } while ((offset += len) < skb->len); 2645 2646 return segs; 2647 2648 err: 2649 while ((skb = segs)) { 2650 segs = skb->next; 2651 kfree_skb(skb); 2652 } 2653 return ERR_PTR(err); 2654 } 2655 EXPORT_SYMBOL_GPL(skb_segment); 2656 2657 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2658 { 2659 struct sk_buff *p = *head; 2660 struct sk_buff *nskb; 2661 unsigned int headroom; 2662 unsigned int len = skb_gro_len(skb); 2663 2664 if (p->len + len >= 65536) 2665 return -E2BIG; 2666 2667 if (skb_shinfo(p)->frag_list) 2668 goto merge; 2669 else if (skb_headlen(skb) <= skb_gro_offset(skb)) { 2670 if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > 2671 MAX_SKB_FRAGS) 2672 return -E2BIG; 2673 2674 skb_shinfo(skb)->frags[0].page_offset += 2675 skb_gro_offset(skb) - skb_headlen(skb); 2676 skb_shinfo(skb)->frags[0].size -= 2677 skb_gro_offset(skb) - skb_headlen(skb); 2678 2679 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2680 skb_shinfo(skb)->frags, 2681 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2682 2683 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; 2684 skb_shinfo(skb)->nr_frags = 0; 2685 2686 skb->truesize -= skb->data_len; 2687 skb->len -= skb->data_len; 2688 skb->data_len = 0; 2689 2690 NAPI_GRO_CB(skb)->free = 1; 2691 goto done; 2692 } 2693 2694 headroom = skb_headroom(p); 2695 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); 2696 if (unlikely(!nskb)) 2697 return -ENOMEM; 2698 2699 __copy_skb_header(nskb, p); 2700 nskb->mac_len = p->mac_len; 2701 2702 skb_reserve(nskb, headroom); 2703 __skb_put(nskb, skb_gro_offset(p)); 2704 2705 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2706 skb_set_network_header(nskb, skb_network_offset(p)); 2707 skb_set_transport_header(nskb, skb_transport_offset(p)); 2708 2709 __skb_pull(p, skb_gro_offset(p)); 2710 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2711 p->data - skb_mac_header(p)); 2712 2713 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2714 skb_shinfo(nskb)->frag_list = p; 2715 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; 2716 skb_header_release(p); 2717 nskb->prev = p; 2718 2719 nskb->data_len += p->len; 2720 nskb->truesize += p->len; 2721 nskb->len += p->len; 2722 2723 *head = nskb; 2724 nskb->next = p->next; 2725 p->next = NULL; 2726 2727 p = nskb; 2728 2729 merge: 2730 if (skb_gro_offset(skb) > skb_headlen(skb)) { 2731 skb_shinfo(skb)->frags[0].page_offset += 2732 skb_gro_offset(skb) - skb_headlen(skb); 2733 skb_shinfo(skb)->frags[0].size -= 2734 skb_gro_offset(skb) - skb_headlen(skb); 2735 skb_gro_reset_offset(skb); 2736 skb_gro_pull(skb, skb_headlen(skb)); 2737 } 2738 2739 __skb_pull(skb, skb_gro_offset(skb)); 2740 2741 p->prev->next = skb; 2742 p->prev = skb; 2743 skb_header_release(skb); 2744 2745 done: 2746 NAPI_GRO_CB(p)->count++; 2747 p->data_len += len; 2748 p->truesize += len; 2749 p->len += len; 2750 2751 NAPI_GRO_CB(skb)->same_flow = 1; 2752 return 0; 2753 } 2754 EXPORT_SYMBOL_GPL(skb_gro_receive); 2755 2756 void __init skb_init(void) 2757 { 2758 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2759 sizeof(struct sk_buff), 2760 0, 2761 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2762 NULL); 2763 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2764 (2*sizeof(struct sk_buff)) + 2765 sizeof(atomic_t), 2766 0, 2767 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2768 NULL); 2769 } 2770 2771 /** 2772 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2773 * @skb: Socket buffer containing the buffers to be mapped 2774 * @sg: The scatter-gather list to map into 2775 * @offset: The offset into the buffer's contents to start mapping 2776 * @len: Length of buffer space to be mapped 2777 * 2778 * Fill the specified scatter-gather list with mappings/pointers into a 2779 * region of the buffer space attached to a socket buffer. 2780 */ 2781 static int 2782 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2783 { 2784 int start = skb_headlen(skb); 2785 int i, copy = start - offset; 2786 int elt = 0; 2787 2788 if (copy > 0) { 2789 if (copy > len) 2790 copy = len; 2791 sg_set_buf(sg, skb->data + offset, copy); 2792 elt++; 2793 if ((len -= copy) == 0) 2794 return elt; 2795 offset += copy; 2796 } 2797 2798 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2799 int end; 2800 2801 WARN_ON(start > offset + len); 2802 2803 end = start + skb_shinfo(skb)->frags[i].size; 2804 if ((copy = end - offset) > 0) { 2805 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2806 2807 if (copy > len) 2808 copy = len; 2809 sg_set_page(&sg[elt], frag->page, copy, 2810 frag->page_offset+offset-start); 2811 elt++; 2812 if (!(len -= copy)) 2813 return elt; 2814 offset += copy; 2815 } 2816 start = end; 2817 } 2818 2819 if (skb_shinfo(skb)->frag_list) { 2820 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2821 2822 for (; list; list = list->next) { 2823 int end; 2824 2825 WARN_ON(start > offset + len); 2826 2827 end = start + list->len; 2828 if ((copy = end - offset) > 0) { 2829 if (copy > len) 2830 copy = len; 2831 elt += __skb_to_sgvec(list, sg+elt, offset - start, 2832 copy); 2833 if ((len -= copy) == 0) 2834 return elt; 2835 offset += copy; 2836 } 2837 start = end; 2838 } 2839 } 2840 BUG_ON(len); 2841 return elt; 2842 } 2843 2844 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2845 { 2846 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2847 2848 sg_mark_end(&sg[nsg - 1]); 2849 2850 return nsg; 2851 } 2852 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2853 2854 /** 2855 * skb_cow_data - Check that a socket buffer's data buffers are writable 2856 * @skb: The socket buffer to check. 2857 * @tailbits: Amount of trailing space to be added 2858 * @trailer: Returned pointer to the skb where the @tailbits space begins 2859 * 2860 * Make sure that the data buffers attached to a socket buffer are 2861 * writable. If they are not, private copies are made of the data buffers 2862 * and the socket buffer is set to use these instead. 2863 * 2864 * If @tailbits is given, make sure that there is space to write @tailbits 2865 * bytes of data beyond current end of socket buffer. @trailer will be 2866 * set to point to the skb in which this space begins. 2867 * 2868 * The number of scatterlist elements required to completely map the 2869 * COW'd and extended socket buffer will be returned. 2870 */ 2871 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2872 { 2873 int copyflag; 2874 int elt; 2875 struct sk_buff *skb1, **skb_p; 2876 2877 /* If skb is cloned or its head is paged, reallocate 2878 * head pulling out all the pages (pages are considered not writable 2879 * at the moment even if they are anonymous). 2880 */ 2881 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2882 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2883 return -ENOMEM; 2884 2885 /* Easy case. Most of packets will go this way. */ 2886 if (!skb_shinfo(skb)->frag_list) { 2887 /* A little of trouble, not enough of space for trailer. 2888 * This should not happen, when stack is tuned to generate 2889 * good frames. OK, on miss we reallocate and reserve even more 2890 * space, 128 bytes is fair. */ 2891 2892 if (skb_tailroom(skb) < tailbits && 2893 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2894 return -ENOMEM; 2895 2896 /* Voila! */ 2897 *trailer = skb; 2898 return 1; 2899 } 2900 2901 /* Misery. We are in troubles, going to mincer fragments... */ 2902 2903 elt = 1; 2904 skb_p = &skb_shinfo(skb)->frag_list; 2905 copyflag = 0; 2906 2907 while ((skb1 = *skb_p) != NULL) { 2908 int ntail = 0; 2909 2910 /* The fragment is partially pulled by someone, 2911 * this can happen on input. Copy it and everything 2912 * after it. */ 2913 2914 if (skb_shared(skb1)) 2915 copyflag = 1; 2916 2917 /* If the skb is the last, worry about trailer. */ 2918 2919 if (skb1->next == NULL && tailbits) { 2920 if (skb_shinfo(skb1)->nr_frags || 2921 skb_shinfo(skb1)->frag_list || 2922 skb_tailroom(skb1) < tailbits) 2923 ntail = tailbits + 128; 2924 } 2925 2926 if (copyflag || 2927 skb_cloned(skb1) || 2928 ntail || 2929 skb_shinfo(skb1)->nr_frags || 2930 skb_shinfo(skb1)->frag_list) { 2931 struct sk_buff *skb2; 2932 2933 /* Fuck, we are miserable poor guys... */ 2934 if (ntail == 0) 2935 skb2 = skb_copy(skb1, GFP_ATOMIC); 2936 else 2937 skb2 = skb_copy_expand(skb1, 2938 skb_headroom(skb1), 2939 ntail, 2940 GFP_ATOMIC); 2941 if (unlikely(skb2 == NULL)) 2942 return -ENOMEM; 2943 2944 if (skb1->sk) 2945 skb_set_owner_w(skb2, skb1->sk); 2946 2947 /* Looking around. Are we still alive? 2948 * OK, link new skb, drop old one */ 2949 2950 skb2->next = skb1->next; 2951 *skb_p = skb2; 2952 kfree_skb(skb1); 2953 skb1 = skb2; 2954 } 2955 elt++; 2956 *trailer = skb1; 2957 skb_p = &skb1->next; 2958 } 2959 2960 return elt; 2961 } 2962 EXPORT_SYMBOL_GPL(skb_cow_data); 2963 2964 void skb_tstamp_tx(struct sk_buff *orig_skb, 2965 struct skb_shared_hwtstamps *hwtstamps) 2966 { 2967 struct sock *sk = orig_skb->sk; 2968 struct sock_exterr_skb *serr; 2969 struct sk_buff *skb; 2970 int err; 2971 2972 if (!sk) 2973 return; 2974 2975 skb = skb_clone(orig_skb, GFP_ATOMIC); 2976 if (!skb) 2977 return; 2978 2979 if (hwtstamps) { 2980 *skb_hwtstamps(skb) = 2981 *hwtstamps; 2982 } else { 2983 /* 2984 * no hardware time stamps available, 2985 * so keep the skb_shared_tx and only 2986 * store software time stamp 2987 */ 2988 skb->tstamp = ktime_get_real(); 2989 } 2990 2991 serr = SKB_EXT_ERR(skb); 2992 memset(serr, 0, sizeof(*serr)); 2993 serr->ee.ee_errno = ENOMSG; 2994 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 2995 err = sock_queue_err_skb(sk, skb); 2996 if (err) 2997 kfree_skb(skb); 2998 } 2999 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3000 3001 3002 /** 3003 * skb_partial_csum_set - set up and verify partial csum values for packet 3004 * @skb: the skb to set 3005 * @start: the number of bytes after skb->data to start checksumming. 3006 * @off: the offset from start to place the checksum. 3007 * 3008 * For untrusted partially-checksummed packets, we need to make sure the values 3009 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3010 * 3011 * This function checks and sets those values and skb->ip_summed: if this 3012 * returns false you should drop the packet. 3013 */ 3014 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3015 { 3016 if (unlikely(start > skb->len - 2) || 3017 unlikely((int)start + off > skb->len - 2)) { 3018 if (net_ratelimit()) 3019 printk(KERN_WARNING 3020 "bad partial csum: csum=%u/%u len=%u\n", 3021 start, off, skb->len); 3022 return false; 3023 } 3024 skb->ip_summed = CHECKSUM_PARTIAL; 3025 skb->csum_start = skb_headroom(skb) + start; 3026 skb->csum_offset = off; 3027 return true; 3028 } 3029 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3030 3031 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3032 { 3033 if (net_ratelimit()) 3034 pr_warning("%s: received packets cannot be forwarded" 3035 " while LRO is enabled\n", skb->dev->name); 3036 } 3037 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3038