1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/mm.h> 43 #include <linux/interrupt.h> 44 #include <linux/in.h> 45 #include <linux/inet.h> 46 #include <linux/slab.h> 47 #include <linux/netdevice.h> 48 #ifdef CONFIG_NET_CLS_ACT 49 #include <net/pkt_sched.h> 50 #endif 51 #include <linux/string.h> 52 #include <linux/skbuff.h> 53 #include <linux/splice.h> 54 #include <linux/cache.h> 55 #include <linux/rtnetlink.h> 56 #include <linux/init.h> 57 #include <linux/scatterlist.h> 58 59 #include <net/protocol.h> 60 #include <net/dst.h> 61 #include <net/sock.h> 62 #include <net/checksum.h> 63 #include <net/xfrm.h> 64 65 #include <asm/uaccess.h> 66 #include <asm/system.h> 67 68 #include "kmap_skb.h" 69 70 static struct kmem_cache *skbuff_head_cache __read_mostly; 71 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 72 73 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 74 struct pipe_buffer *buf) 75 { 76 struct sk_buff *skb = (struct sk_buff *) buf->private; 77 78 kfree_skb(skb); 79 } 80 81 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 82 struct pipe_buffer *buf) 83 { 84 struct sk_buff *skb = (struct sk_buff *) buf->private; 85 86 skb_get(skb); 87 } 88 89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 90 struct pipe_buffer *buf) 91 { 92 return 1; 93 } 94 95 96 /* Pipe buffer operations for a socket. */ 97 static struct pipe_buf_operations sock_pipe_buf_ops = { 98 .can_merge = 0, 99 .map = generic_pipe_buf_map, 100 .unmap = generic_pipe_buf_unmap, 101 .confirm = generic_pipe_buf_confirm, 102 .release = sock_pipe_buf_release, 103 .steal = sock_pipe_buf_steal, 104 .get = sock_pipe_buf_get, 105 }; 106 107 /* 108 * Keep out-of-line to prevent kernel bloat. 109 * __builtin_return_address is not used because it is not always 110 * reliable. 111 */ 112 113 /** 114 * skb_over_panic - private function 115 * @skb: buffer 116 * @sz: size 117 * @here: address 118 * 119 * Out of line support code for skb_put(). Not user callable. 120 */ 121 void skb_over_panic(struct sk_buff *skb, int sz, void *here) 122 { 123 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 124 "data:%p tail:%#lx end:%#lx dev:%s\n", 125 here, skb->len, sz, skb->head, skb->data, 126 (unsigned long)skb->tail, (unsigned long)skb->end, 127 skb->dev ? skb->dev->name : "<NULL>"); 128 BUG(); 129 } 130 131 /** 132 * skb_under_panic - private function 133 * @skb: buffer 134 * @sz: size 135 * @here: address 136 * 137 * Out of line support code for skb_push(). Not user callable. 138 */ 139 140 void skb_under_panic(struct sk_buff *skb, int sz, void *here) 141 { 142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 143 "data:%p tail:%#lx end:%#lx dev:%s\n", 144 here, skb->len, sz, skb->head, skb->data, 145 (unsigned long)skb->tail, (unsigned long)skb->end, 146 skb->dev ? skb->dev->name : "<NULL>"); 147 BUG(); 148 } 149 150 void skb_truesize_bug(struct sk_buff *skb) 151 { 152 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) " 153 "len=%u, sizeof(sk_buff)=%Zd\n", 154 skb->truesize, skb->len, sizeof(struct sk_buff)); 155 } 156 EXPORT_SYMBOL(skb_truesize_bug); 157 158 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 159 * 'private' fields and also do memory statistics to find all the 160 * [BEEP] leaks. 161 * 162 */ 163 164 /** 165 * __alloc_skb - allocate a network buffer 166 * @size: size to allocate 167 * @gfp_mask: allocation mask 168 * @fclone: allocate from fclone cache instead of head cache 169 * and allocate a cloned (child) skb 170 * @node: numa node to allocate memory on 171 * 172 * Allocate a new &sk_buff. The returned buffer has no headroom and a 173 * tail room of size bytes. The object has a reference count of one. 174 * The return is the buffer. On a failure the return is %NULL. 175 * 176 * Buffers may only be allocated from interrupts using a @gfp_mask of 177 * %GFP_ATOMIC. 178 */ 179 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 180 int fclone, int node) 181 { 182 struct kmem_cache *cache; 183 struct skb_shared_info *shinfo; 184 struct sk_buff *skb; 185 u8 *data; 186 187 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 188 189 /* Get the HEAD */ 190 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 191 if (!skb) 192 goto out; 193 194 size = SKB_DATA_ALIGN(size); 195 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 196 gfp_mask, node); 197 if (!data) 198 goto nodata; 199 200 /* 201 * Only clear those fields we need to clear, not those that we will 202 * actually initialise below. Hence, don't put any more fields after 203 * the tail pointer in struct sk_buff! 204 */ 205 memset(skb, 0, offsetof(struct sk_buff, tail)); 206 skb->truesize = size + sizeof(struct sk_buff); 207 atomic_set(&skb->users, 1); 208 skb->head = data; 209 skb->data = data; 210 skb_reset_tail_pointer(skb); 211 skb->end = skb->tail + size; 212 /* make sure we initialize shinfo sequentially */ 213 shinfo = skb_shinfo(skb); 214 atomic_set(&shinfo->dataref, 1); 215 shinfo->nr_frags = 0; 216 shinfo->gso_size = 0; 217 shinfo->gso_segs = 0; 218 shinfo->gso_type = 0; 219 shinfo->ip6_frag_id = 0; 220 shinfo->frag_list = NULL; 221 222 if (fclone) { 223 struct sk_buff *child = skb + 1; 224 atomic_t *fclone_ref = (atomic_t *) (child + 1); 225 226 skb->fclone = SKB_FCLONE_ORIG; 227 atomic_set(fclone_ref, 1); 228 229 child->fclone = SKB_FCLONE_UNAVAILABLE; 230 } 231 out: 232 return skb; 233 nodata: 234 kmem_cache_free(cache, skb); 235 skb = NULL; 236 goto out; 237 } 238 239 /** 240 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 241 * @dev: network device to receive on 242 * @length: length to allocate 243 * @gfp_mask: get_free_pages mask, passed to alloc_skb 244 * 245 * Allocate a new &sk_buff and assign it a usage count of one. The 246 * buffer has unspecified headroom built in. Users should allocate 247 * the headroom they think they need without accounting for the 248 * built in space. The built in space is used for optimisations. 249 * 250 * %NULL is returned if there is no free memory. 251 */ 252 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 253 unsigned int length, gfp_t gfp_mask) 254 { 255 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 256 struct sk_buff *skb; 257 258 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 259 if (likely(skb)) { 260 skb_reserve(skb, NET_SKB_PAD); 261 skb->dev = dev; 262 } 263 return skb; 264 } 265 266 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) 267 { 268 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 269 struct page *page; 270 271 page = alloc_pages_node(node, gfp_mask, 0); 272 return page; 273 } 274 EXPORT_SYMBOL(__netdev_alloc_page); 275 276 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 277 int size) 278 { 279 skb_fill_page_desc(skb, i, page, off, size); 280 skb->len += size; 281 skb->data_len += size; 282 skb->truesize += size; 283 } 284 EXPORT_SYMBOL(skb_add_rx_frag); 285 286 /** 287 * dev_alloc_skb - allocate an skbuff for receiving 288 * @length: length to allocate 289 * 290 * Allocate a new &sk_buff and assign it a usage count of one. The 291 * buffer has unspecified headroom built in. Users should allocate 292 * the headroom they think they need without accounting for the 293 * built in space. The built in space is used for optimisations. 294 * 295 * %NULL is returned if there is no free memory. Although this function 296 * allocates memory it can be called from an interrupt. 297 */ 298 struct sk_buff *dev_alloc_skb(unsigned int length) 299 { 300 /* 301 * There is more code here than it seems: 302 * __dev_alloc_skb is an inline 303 */ 304 return __dev_alloc_skb(length, GFP_ATOMIC); 305 } 306 EXPORT_SYMBOL(dev_alloc_skb); 307 308 static void skb_drop_list(struct sk_buff **listp) 309 { 310 struct sk_buff *list = *listp; 311 312 *listp = NULL; 313 314 do { 315 struct sk_buff *this = list; 316 list = list->next; 317 kfree_skb(this); 318 } while (list); 319 } 320 321 static inline void skb_drop_fraglist(struct sk_buff *skb) 322 { 323 skb_drop_list(&skb_shinfo(skb)->frag_list); 324 } 325 326 static void skb_clone_fraglist(struct sk_buff *skb) 327 { 328 struct sk_buff *list; 329 330 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 331 skb_get(list); 332 } 333 334 static void skb_release_data(struct sk_buff *skb) 335 { 336 if (!skb->cloned || 337 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 338 &skb_shinfo(skb)->dataref)) { 339 if (skb_shinfo(skb)->nr_frags) { 340 int i; 341 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 342 put_page(skb_shinfo(skb)->frags[i].page); 343 } 344 345 if (skb_shinfo(skb)->frag_list) 346 skb_drop_fraglist(skb); 347 348 kfree(skb->head); 349 } 350 } 351 352 /* 353 * Free an skbuff by memory without cleaning the state. 354 */ 355 static void kfree_skbmem(struct sk_buff *skb) 356 { 357 struct sk_buff *other; 358 atomic_t *fclone_ref; 359 360 switch (skb->fclone) { 361 case SKB_FCLONE_UNAVAILABLE: 362 kmem_cache_free(skbuff_head_cache, skb); 363 break; 364 365 case SKB_FCLONE_ORIG: 366 fclone_ref = (atomic_t *) (skb + 2); 367 if (atomic_dec_and_test(fclone_ref)) 368 kmem_cache_free(skbuff_fclone_cache, skb); 369 break; 370 371 case SKB_FCLONE_CLONE: 372 fclone_ref = (atomic_t *) (skb + 1); 373 other = skb - 1; 374 375 /* The clone portion is available for 376 * fast-cloning again. 377 */ 378 skb->fclone = SKB_FCLONE_UNAVAILABLE; 379 380 if (atomic_dec_and_test(fclone_ref)) 381 kmem_cache_free(skbuff_fclone_cache, other); 382 break; 383 } 384 } 385 386 static void skb_release_head_state(struct sk_buff *skb) 387 { 388 dst_release(skb->dst); 389 #ifdef CONFIG_XFRM 390 secpath_put(skb->sp); 391 #endif 392 if (skb->destructor) { 393 WARN_ON(in_irq()); 394 skb->destructor(skb); 395 } 396 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 397 nf_conntrack_put(skb->nfct); 398 nf_conntrack_put_reasm(skb->nfct_reasm); 399 #endif 400 #ifdef CONFIG_BRIDGE_NETFILTER 401 nf_bridge_put(skb->nf_bridge); 402 #endif 403 /* XXX: IS this still necessary? - JHS */ 404 #ifdef CONFIG_NET_SCHED 405 skb->tc_index = 0; 406 #ifdef CONFIG_NET_CLS_ACT 407 skb->tc_verd = 0; 408 #endif 409 #endif 410 } 411 412 /* Free everything but the sk_buff shell. */ 413 static void skb_release_all(struct sk_buff *skb) 414 { 415 skb_release_head_state(skb); 416 skb_release_data(skb); 417 } 418 419 /** 420 * __kfree_skb - private function 421 * @skb: buffer 422 * 423 * Free an sk_buff. Release anything attached to the buffer. 424 * Clean the state. This is an internal helper function. Users should 425 * always call kfree_skb 426 */ 427 428 void __kfree_skb(struct sk_buff *skb) 429 { 430 skb_release_all(skb); 431 kfree_skbmem(skb); 432 } 433 434 /** 435 * kfree_skb - free an sk_buff 436 * @skb: buffer to free 437 * 438 * Drop a reference to the buffer and free it if the usage count has 439 * hit zero. 440 */ 441 void kfree_skb(struct sk_buff *skb) 442 { 443 if (unlikely(!skb)) 444 return; 445 if (likely(atomic_read(&skb->users) == 1)) 446 smp_rmb(); 447 else if (likely(!atomic_dec_and_test(&skb->users))) 448 return; 449 __kfree_skb(skb); 450 } 451 452 /** 453 * skb_recycle_check - check if skb can be reused for receive 454 * @skb: buffer 455 * @skb_size: minimum receive buffer size 456 * 457 * Checks that the skb passed in is not shared or cloned, and 458 * that it is linear and its head portion at least as large as 459 * skb_size so that it can be recycled as a receive buffer. 460 * If these conditions are met, this function does any necessary 461 * reference count dropping and cleans up the skbuff as if it 462 * just came from __alloc_skb(). 463 */ 464 int skb_recycle_check(struct sk_buff *skb, int skb_size) 465 { 466 struct skb_shared_info *shinfo; 467 468 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 469 return 0; 470 471 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 472 if (skb_end_pointer(skb) - skb->head < skb_size) 473 return 0; 474 475 if (skb_shared(skb) || skb_cloned(skb)) 476 return 0; 477 478 skb_release_head_state(skb); 479 shinfo = skb_shinfo(skb); 480 atomic_set(&shinfo->dataref, 1); 481 shinfo->nr_frags = 0; 482 shinfo->gso_size = 0; 483 shinfo->gso_segs = 0; 484 shinfo->gso_type = 0; 485 shinfo->ip6_frag_id = 0; 486 shinfo->frag_list = NULL; 487 488 memset(skb, 0, offsetof(struct sk_buff, tail)); 489 skb->data = skb->head + NET_SKB_PAD; 490 skb_reset_tail_pointer(skb); 491 492 return 1; 493 } 494 EXPORT_SYMBOL(skb_recycle_check); 495 496 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 497 { 498 new->tstamp = old->tstamp; 499 new->dev = old->dev; 500 new->transport_header = old->transport_header; 501 new->network_header = old->network_header; 502 new->mac_header = old->mac_header; 503 new->dst = dst_clone(old->dst); 504 #ifdef CONFIG_XFRM 505 new->sp = secpath_get(old->sp); 506 #endif 507 memcpy(new->cb, old->cb, sizeof(old->cb)); 508 new->csum_start = old->csum_start; 509 new->csum_offset = old->csum_offset; 510 new->local_df = old->local_df; 511 new->pkt_type = old->pkt_type; 512 new->ip_summed = old->ip_summed; 513 skb_copy_queue_mapping(new, old); 514 new->priority = old->priority; 515 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 516 new->ipvs_property = old->ipvs_property; 517 #endif 518 new->protocol = old->protocol; 519 new->mark = old->mark; 520 __nf_copy(new, old); 521 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 522 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 523 new->nf_trace = old->nf_trace; 524 #endif 525 #ifdef CONFIG_NET_SCHED 526 new->tc_index = old->tc_index; 527 #ifdef CONFIG_NET_CLS_ACT 528 new->tc_verd = old->tc_verd; 529 #endif 530 #endif 531 new->vlan_tci = old->vlan_tci; 532 533 skb_copy_secmark(new, old); 534 } 535 536 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 537 { 538 #define C(x) n->x = skb->x 539 540 n->next = n->prev = NULL; 541 n->sk = NULL; 542 __copy_skb_header(n, skb); 543 544 C(len); 545 C(data_len); 546 C(mac_len); 547 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 548 n->cloned = 1; 549 n->nohdr = 0; 550 n->destructor = NULL; 551 C(iif); 552 C(tail); 553 C(end); 554 C(head); 555 C(data); 556 C(truesize); 557 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 558 C(do_not_encrypt); 559 C(requeue); 560 #endif 561 atomic_set(&n->users, 1); 562 563 atomic_inc(&(skb_shinfo(skb)->dataref)); 564 skb->cloned = 1; 565 566 return n; 567 #undef C 568 } 569 570 /** 571 * skb_morph - morph one skb into another 572 * @dst: the skb to receive the contents 573 * @src: the skb to supply the contents 574 * 575 * This is identical to skb_clone except that the target skb is 576 * supplied by the user. 577 * 578 * The target skb is returned upon exit. 579 */ 580 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 581 { 582 skb_release_all(dst); 583 return __skb_clone(dst, src); 584 } 585 EXPORT_SYMBOL_GPL(skb_morph); 586 587 /** 588 * skb_clone - duplicate an sk_buff 589 * @skb: buffer to clone 590 * @gfp_mask: allocation priority 591 * 592 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 593 * copies share the same packet data but not structure. The new 594 * buffer has a reference count of 1. If the allocation fails the 595 * function returns %NULL otherwise the new buffer is returned. 596 * 597 * If this function is called from an interrupt gfp_mask() must be 598 * %GFP_ATOMIC. 599 */ 600 601 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 602 { 603 struct sk_buff *n; 604 605 n = skb + 1; 606 if (skb->fclone == SKB_FCLONE_ORIG && 607 n->fclone == SKB_FCLONE_UNAVAILABLE) { 608 atomic_t *fclone_ref = (atomic_t *) (n + 1); 609 n->fclone = SKB_FCLONE_CLONE; 610 atomic_inc(fclone_ref); 611 } else { 612 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 613 if (!n) 614 return NULL; 615 n->fclone = SKB_FCLONE_UNAVAILABLE; 616 } 617 618 return __skb_clone(n, skb); 619 } 620 621 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 622 { 623 #ifndef NET_SKBUFF_DATA_USES_OFFSET 624 /* 625 * Shift between the two data areas in bytes 626 */ 627 unsigned long offset = new->data - old->data; 628 #endif 629 630 __copy_skb_header(new, old); 631 632 #ifndef NET_SKBUFF_DATA_USES_OFFSET 633 /* {transport,network,mac}_header are relative to skb->head */ 634 new->transport_header += offset; 635 new->network_header += offset; 636 new->mac_header += offset; 637 #endif 638 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 639 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 640 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 641 } 642 643 /** 644 * skb_copy - create private copy of an sk_buff 645 * @skb: buffer to copy 646 * @gfp_mask: allocation priority 647 * 648 * Make a copy of both an &sk_buff and its data. This is used when the 649 * caller wishes to modify the data and needs a private copy of the 650 * data to alter. Returns %NULL on failure or the pointer to the buffer 651 * on success. The returned buffer has a reference count of 1. 652 * 653 * As by-product this function converts non-linear &sk_buff to linear 654 * one, so that &sk_buff becomes completely private and caller is allowed 655 * to modify all the data of returned buffer. This means that this 656 * function is not recommended for use in circumstances when only 657 * header is going to be modified. Use pskb_copy() instead. 658 */ 659 660 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 661 { 662 int headerlen = skb->data - skb->head; 663 /* 664 * Allocate the copy buffer 665 */ 666 struct sk_buff *n; 667 #ifdef NET_SKBUFF_DATA_USES_OFFSET 668 n = alloc_skb(skb->end + skb->data_len, gfp_mask); 669 #else 670 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); 671 #endif 672 if (!n) 673 return NULL; 674 675 /* Set the data pointer */ 676 skb_reserve(n, headerlen); 677 /* Set the tail pointer and length */ 678 skb_put(n, skb->len); 679 680 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 681 BUG(); 682 683 copy_skb_header(n, skb); 684 return n; 685 } 686 687 688 /** 689 * pskb_copy - create copy of an sk_buff with private head. 690 * @skb: buffer to copy 691 * @gfp_mask: allocation priority 692 * 693 * Make a copy of both an &sk_buff and part of its data, located 694 * in header. Fragmented data remain shared. This is used when 695 * the caller wishes to modify only header of &sk_buff and needs 696 * private copy of the header to alter. Returns %NULL on failure 697 * or the pointer to the buffer on success. 698 * The returned buffer has a reference count of 1. 699 */ 700 701 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 702 { 703 /* 704 * Allocate the copy buffer 705 */ 706 struct sk_buff *n; 707 #ifdef NET_SKBUFF_DATA_USES_OFFSET 708 n = alloc_skb(skb->end, gfp_mask); 709 #else 710 n = alloc_skb(skb->end - skb->head, gfp_mask); 711 #endif 712 if (!n) 713 goto out; 714 715 /* Set the data pointer */ 716 skb_reserve(n, skb->data - skb->head); 717 /* Set the tail pointer and length */ 718 skb_put(n, skb_headlen(skb)); 719 /* Copy the bytes */ 720 skb_copy_from_linear_data(skb, n->data, n->len); 721 722 n->truesize += skb->data_len; 723 n->data_len = skb->data_len; 724 n->len = skb->len; 725 726 if (skb_shinfo(skb)->nr_frags) { 727 int i; 728 729 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 730 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 731 get_page(skb_shinfo(n)->frags[i].page); 732 } 733 skb_shinfo(n)->nr_frags = i; 734 } 735 736 if (skb_shinfo(skb)->frag_list) { 737 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 738 skb_clone_fraglist(n); 739 } 740 741 copy_skb_header(n, skb); 742 out: 743 return n; 744 } 745 746 /** 747 * pskb_expand_head - reallocate header of &sk_buff 748 * @skb: buffer to reallocate 749 * @nhead: room to add at head 750 * @ntail: room to add at tail 751 * @gfp_mask: allocation priority 752 * 753 * Expands (or creates identical copy, if &nhead and &ntail are zero) 754 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 755 * reference count of 1. Returns zero in the case of success or error, 756 * if expansion failed. In the last case, &sk_buff is not changed. 757 * 758 * All the pointers pointing into skb header may change and must be 759 * reloaded after call to this function. 760 */ 761 762 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 763 gfp_t gfp_mask) 764 { 765 int i; 766 u8 *data; 767 #ifdef NET_SKBUFF_DATA_USES_OFFSET 768 int size = nhead + skb->end + ntail; 769 #else 770 int size = nhead + (skb->end - skb->head) + ntail; 771 #endif 772 long off; 773 774 BUG_ON(nhead < 0); 775 776 if (skb_shared(skb)) 777 BUG(); 778 779 size = SKB_DATA_ALIGN(size); 780 781 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 782 if (!data) 783 goto nodata; 784 785 /* Copy only real data... and, alas, header. This should be 786 * optimized for the cases when header is void. */ 787 #ifdef NET_SKBUFF_DATA_USES_OFFSET 788 memcpy(data + nhead, skb->head, skb->tail); 789 #else 790 memcpy(data + nhead, skb->head, skb->tail - skb->head); 791 #endif 792 memcpy(data + size, skb_end_pointer(skb), 793 sizeof(struct skb_shared_info)); 794 795 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 796 get_page(skb_shinfo(skb)->frags[i].page); 797 798 if (skb_shinfo(skb)->frag_list) 799 skb_clone_fraglist(skb); 800 801 skb_release_data(skb); 802 803 off = (data + nhead) - skb->head; 804 805 skb->head = data; 806 skb->data += off; 807 #ifdef NET_SKBUFF_DATA_USES_OFFSET 808 skb->end = size; 809 off = nhead; 810 #else 811 skb->end = skb->head + size; 812 #endif 813 /* {transport,network,mac}_header and tail are relative to skb->head */ 814 skb->tail += off; 815 skb->transport_header += off; 816 skb->network_header += off; 817 skb->mac_header += off; 818 skb->csum_start += nhead; 819 skb->cloned = 0; 820 skb->hdr_len = 0; 821 skb->nohdr = 0; 822 atomic_set(&skb_shinfo(skb)->dataref, 1); 823 return 0; 824 825 nodata: 826 return -ENOMEM; 827 } 828 829 /* Make private copy of skb with writable head and some headroom */ 830 831 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 832 { 833 struct sk_buff *skb2; 834 int delta = headroom - skb_headroom(skb); 835 836 if (delta <= 0) 837 skb2 = pskb_copy(skb, GFP_ATOMIC); 838 else { 839 skb2 = skb_clone(skb, GFP_ATOMIC); 840 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 841 GFP_ATOMIC)) { 842 kfree_skb(skb2); 843 skb2 = NULL; 844 } 845 } 846 return skb2; 847 } 848 849 850 /** 851 * skb_copy_expand - copy and expand sk_buff 852 * @skb: buffer to copy 853 * @newheadroom: new free bytes at head 854 * @newtailroom: new free bytes at tail 855 * @gfp_mask: allocation priority 856 * 857 * Make a copy of both an &sk_buff and its data and while doing so 858 * allocate additional space. 859 * 860 * This is used when the caller wishes to modify the data and needs a 861 * private copy of the data to alter as well as more space for new fields. 862 * Returns %NULL on failure or the pointer to the buffer 863 * on success. The returned buffer has a reference count of 1. 864 * 865 * You must pass %GFP_ATOMIC as the allocation priority if this function 866 * is called from an interrupt. 867 */ 868 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 869 int newheadroom, int newtailroom, 870 gfp_t gfp_mask) 871 { 872 /* 873 * Allocate the copy buffer 874 */ 875 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 876 gfp_mask); 877 int oldheadroom = skb_headroom(skb); 878 int head_copy_len, head_copy_off; 879 int off; 880 881 if (!n) 882 return NULL; 883 884 skb_reserve(n, newheadroom); 885 886 /* Set the tail pointer and length */ 887 skb_put(n, skb->len); 888 889 head_copy_len = oldheadroom; 890 head_copy_off = 0; 891 if (newheadroom <= head_copy_len) 892 head_copy_len = newheadroom; 893 else 894 head_copy_off = newheadroom - head_copy_len; 895 896 /* Copy the linear header and data. */ 897 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 898 skb->len + head_copy_len)) 899 BUG(); 900 901 copy_skb_header(n, skb); 902 903 off = newheadroom - oldheadroom; 904 n->csum_start += off; 905 #ifdef NET_SKBUFF_DATA_USES_OFFSET 906 n->transport_header += off; 907 n->network_header += off; 908 n->mac_header += off; 909 #endif 910 911 return n; 912 } 913 914 /** 915 * skb_pad - zero pad the tail of an skb 916 * @skb: buffer to pad 917 * @pad: space to pad 918 * 919 * Ensure that a buffer is followed by a padding area that is zero 920 * filled. Used by network drivers which may DMA or transfer data 921 * beyond the buffer end onto the wire. 922 * 923 * May return error in out of memory cases. The skb is freed on error. 924 */ 925 926 int skb_pad(struct sk_buff *skb, int pad) 927 { 928 int err; 929 int ntail; 930 931 /* If the skbuff is non linear tailroom is always zero.. */ 932 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 933 memset(skb->data+skb->len, 0, pad); 934 return 0; 935 } 936 937 ntail = skb->data_len + pad - (skb->end - skb->tail); 938 if (likely(skb_cloned(skb) || ntail > 0)) { 939 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 940 if (unlikely(err)) 941 goto free_skb; 942 } 943 944 /* FIXME: The use of this function with non-linear skb's really needs 945 * to be audited. 946 */ 947 err = skb_linearize(skb); 948 if (unlikely(err)) 949 goto free_skb; 950 951 memset(skb->data + skb->len, 0, pad); 952 return 0; 953 954 free_skb: 955 kfree_skb(skb); 956 return err; 957 } 958 959 /** 960 * skb_put - add data to a buffer 961 * @skb: buffer to use 962 * @len: amount of data to add 963 * 964 * This function extends the used data area of the buffer. If this would 965 * exceed the total buffer size the kernel will panic. A pointer to the 966 * first byte of the extra data is returned. 967 */ 968 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 969 { 970 unsigned char *tmp = skb_tail_pointer(skb); 971 SKB_LINEAR_ASSERT(skb); 972 skb->tail += len; 973 skb->len += len; 974 if (unlikely(skb->tail > skb->end)) 975 skb_over_panic(skb, len, __builtin_return_address(0)); 976 return tmp; 977 } 978 EXPORT_SYMBOL(skb_put); 979 980 /** 981 * skb_push - add data to the start of a buffer 982 * @skb: buffer to use 983 * @len: amount of data to add 984 * 985 * This function extends the used data area of the buffer at the buffer 986 * start. If this would exceed the total buffer headroom the kernel will 987 * panic. A pointer to the first byte of the extra data is returned. 988 */ 989 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 990 { 991 skb->data -= len; 992 skb->len += len; 993 if (unlikely(skb->data<skb->head)) 994 skb_under_panic(skb, len, __builtin_return_address(0)); 995 return skb->data; 996 } 997 EXPORT_SYMBOL(skb_push); 998 999 /** 1000 * skb_pull - remove data from the start of a buffer 1001 * @skb: buffer to use 1002 * @len: amount of data to remove 1003 * 1004 * This function removes data from the start of a buffer, returning 1005 * the memory to the headroom. A pointer to the next data in the buffer 1006 * is returned. Once the data has been pulled future pushes will overwrite 1007 * the old data. 1008 */ 1009 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1010 { 1011 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1012 } 1013 EXPORT_SYMBOL(skb_pull); 1014 1015 /** 1016 * skb_trim - remove end from a buffer 1017 * @skb: buffer to alter 1018 * @len: new length 1019 * 1020 * Cut the length of a buffer down by removing data from the tail. If 1021 * the buffer is already under the length specified it is not modified. 1022 * The skb must be linear. 1023 */ 1024 void skb_trim(struct sk_buff *skb, unsigned int len) 1025 { 1026 if (skb->len > len) 1027 __skb_trim(skb, len); 1028 } 1029 EXPORT_SYMBOL(skb_trim); 1030 1031 /* Trims skb to length len. It can change skb pointers. 1032 */ 1033 1034 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1035 { 1036 struct sk_buff **fragp; 1037 struct sk_buff *frag; 1038 int offset = skb_headlen(skb); 1039 int nfrags = skb_shinfo(skb)->nr_frags; 1040 int i; 1041 int err; 1042 1043 if (skb_cloned(skb) && 1044 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1045 return err; 1046 1047 i = 0; 1048 if (offset >= len) 1049 goto drop_pages; 1050 1051 for (; i < nfrags; i++) { 1052 int end = offset + skb_shinfo(skb)->frags[i].size; 1053 1054 if (end < len) { 1055 offset = end; 1056 continue; 1057 } 1058 1059 skb_shinfo(skb)->frags[i++].size = len - offset; 1060 1061 drop_pages: 1062 skb_shinfo(skb)->nr_frags = i; 1063 1064 for (; i < nfrags; i++) 1065 put_page(skb_shinfo(skb)->frags[i].page); 1066 1067 if (skb_shinfo(skb)->frag_list) 1068 skb_drop_fraglist(skb); 1069 goto done; 1070 } 1071 1072 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1073 fragp = &frag->next) { 1074 int end = offset + frag->len; 1075 1076 if (skb_shared(frag)) { 1077 struct sk_buff *nfrag; 1078 1079 nfrag = skb_clone(frag, GFP_ATOMIC); 1080 if (unlikely(!nfrag)) 1081 return -ENOMEM; 1082 1083 nfrag->next = frag->next; 1084 kfree_skb(frag); 1085 frag = nfrag; 1086 *fragp = frag; 1087 } 1088 1089 if (end < len) { 1090 offset = end; 1091 continue; 1092 } 1093 1094 if (end > len && 1095 unlikely((err = pskb_trim(frag, len - offset)))) 1096 return err; 1097 1098 if (frag->next) 1099 skb_drop_list(&frag->next); 1100 break; 1101 } 1102 1103 done: 1104 if (len > skb_headlen(skb)) { 1105 skb->data_len -= skb->len - len; 1106 skb->len = len; 1107 } else { 1108 skb->len = len; 1109 skb->data_len = 0; 1110 skb_set_tail_pointer(skb, len); 1111 } 1112 1113 return 0; 1114 } 1115 1116 /** 1117 * __pskb_pull_tail - advance tail of skb header 1118 * @skb: buffer to reallocate 1119 * @delta: number of bytes to advance tail 1120 * 1121 * The function makes a sense only on a fragmented &sk_buff, 1122 * it expands header moving its tail forward and copying necessary 1123 * data from fragmented part. 1124 * 1125 * &sk_buff MUST have reference count of 1. 1126 * 1127 * Returns %NULL (and &sk_buff does not change) if pull failed 1128 * or value of new tail of skb in the case of success. 1129 * 1130 * All the pointers pointing into skb header may change and must be 1131 * reloaded after call to this function. 1132 */ 1133 1134 /* Moves tail of skb head forward, copying data from fragmented part, 1135 * when it is necessary. 1136 * 1. It may fail due to malloc failure. 1137 * 2. It may change skb pointers. 1138 * 1139 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1140 */ 1141 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1142 { 1143 /* If skb has not enough free space at tail, get new one 1144 * plus 128 bytes for future expansions. If we have enough 1145 * room at tail, reallocate without expansion only if skb is cloned. 1146 */ 1147 int i, k, eat = (skb->tail + delta) - skb->end; 1148 1149 if (eat > 0 || skb_cloned(skb)) { 1150 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1151 GFP_ATOMIC)) 1152 return NULL; 1153 } 1154 1155 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1156 BUG(); 1157 1158 /* Optimization: no fragments, no reasons to preestimate 1159 * size of pulled pages. Superb. 1160 */ 1161 if (!skb_shinfo(skb)->frag_list) 1162 goto pull_pages; 1163 1164 /* Estimate size of pulled pages. */ 1165 eat = delta; 1166 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1167 if (skb_shinfo(skb)->frags[i].size >= eat) 1168 goto pull_pages; 1169 eat -= skb_shinfo(skb)->frags[i].size; 1170 } 1171 1172 /* If we need update frag list, we are in troubles. 1173 * Certainly, it possible to add an offset to skb data, 1174 * but taking into account that pulling is expected to 1175 * be very rare operation, it is worth to fight against 1176 * further bloating skb head and crucify ourselves here instead. 1177 * Pure masohism, indeed. 8)8) 1178 */ 1179 if (eat) { 1180 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1181 struct sk_buff *clone = NULL; 1182 struct sk_buff *insp = NULL; 1183 1184 do { 1185 BUG_ON(!list); 1186 1187 if (list->len <= eat) { 1188 /* Eaten as whole. */ 1189 eat -= list->len; 1190 list = list->next; 1191 insp = list; 1192 } else { 1193 /* Eaten partially. */ 1194 1195 if (skb_shared(list)) { 1196 /* Sucks! We need to fork list. :-( */ 1197 clone = skb_clone(list, GFP_ATOMIC); 1198 if (!clone) 1199 return NULL; 1200 insp = list->next; 1201 list = clone; 1202 } else { 1203 /* This may be pulled without 1204 * problems. */ 1205 insp = list; 1206 } 1207 if (!pskb_pull(list, eat)) { 1208 if (clone) 1209 kfree_skb(clone); 1210 return NULL; 1211 } 1212 break; 1213 } 1214 } while (eat); 1215 1216 /* Free pulled out fragments. */ 1217 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1218 skb_shinfo(skb)->frag_list = list->next; 1219 kfree_skb(list); 1220 } 1221 /* And insert new clone at head. */ 1222 if (clone) { 1223 clone->next = list; 1224 skb_shinfo(skb)->frag_list = clone; 1225 } 1226 } 1227 /* Success! Now we may commit changes to skb data. */ 1228 1229 pull_pages: 1230 eat = delta; 1231 k = 0; 1232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1233 if (skb_shinfo(skb)->frags[i].size <= eat) { 1234 put_page(skb_shinfo(skb)->frags[i].page); 1235 eat -= skb_shinfo(skb)->frags[i].size; 1236 } else { 1237 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1238 if (eat) { 1239 skb_shinfo(skb)->frags[k].page_offset += eat; 1240 skb_shinfo(skb)->frags[k].size -= eat; 1241 eat = 0; 1242 } 1243 k++; 1244 } 1245 } 1246 skb_shinfo(skb)->nr_frags = k; 1247 1248 skb->tail += delta; 1249 skb->data_len -= delta; 1250 1251 return skb_tail_pointer(skb); 1252 } 1253 1254 /* Copy some data bits from skb to kernel buffer. */ 1255 1256 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1257 { 1258 int i, copy; 1259 int start = skb_headlen(skb); 1260 1261 if (offset > (int)skb->len - len) 1262 goto fault; 1263 1264 /* Copy header. */ 1265 if ((copy = start - offset) > 0) { 1266 if (copy > len) 1267 copy = len; 1268 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1269 if ((len -= copy) == 0) 1270 return 0; 1271 offset += copy; 1272 to += copy; 1273 } 1274 1275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1276 int end; 1277 1278 WARN_ON(start > offset + len); 1279 1280 end = start + skb_shinfo(skb)->frags[i].size; 1281 if ((copy = end - offset) > 0) { 1282 u8 *vaddr; 1283 1284 if (copy > len) 1285 copy = len; 1286 1287 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1288 memcpy(to, 1289 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1290 offset - start, copy); 1291 kunmap_skb_frag(vaddr); 1292 1293 if ((len -= copy) == 0) 1294 return 0; 1295 offset += copy; 1296 to += copy; 1297 } 1298 start = end; 1299 } 1300 1301 if (skb_shinfo(skb)->frag_list) { 1302 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1303 1304 for (; list; list = list->next) { 1305 int end; 1306 1307 WARN_ON(start > offset + len); 1308 1309 end = start + list->len; 1310 if ((copy = end - offset) > 0) { 1311 if (copy > len) 1312 copy = len; 1313 if (skb_copy_bits(list, offset - start, 1314 to, copy)) 1315 goto fault; 1316 if ((len -= copy) == 0) 1317 return 0; 1318 offset += copy; 1319 to += copy; 1320 } 1321 start = end; 1322 } 1323 } 1324 if (!len) 1325 return 0; 1326 1327 fault: 1328 return -EFAULT; 1329 } 1330 1331 /* 1332 * Callback from splice_to_pipe(), if we need to release some pages 1333 * at the end of the spd in case we error'ed out in filling the pipe. 1334 */ 1335 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1336 { 1337 struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private; 1338 1339 kfree_skb(skb); 1340 } 1341 1342 /* 1343 * Fill page/offset/length into spd, if it can hold more pages. 1344 */ 1345 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1346 unsigned int len, unsigned int offset, 1347 struct sk_buff *skb) 1348 { 1349 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1350 return 1; 1351 1352 spd->pages[spd->nr_pages] = page; 1353 spd->partial[spd->nr_pages].len = len; 1354 spd->partial[spd->nr_pages].offset = offset; 1355 spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb); 1356 spd->nr_pages++; 1357 return 0; 1358 } 1359 1360 static inline void __segment_seek(struct page **page, unsigned int *poff, 1361 unsigned int *plen, unsigned int off) 1362 { 1363 *poff += off; 1364 *page += *poff / PAGE_SIZE; 1365 *poff = *poff % PAGE_SIZE; 1366 *plen -= off; 1367 } 1368 1369 static inline int __splice_segment(struct page *page, unsigned int poff, 1370 unsigned int plen, unsigned int *off, 1371 unsigned int *len, struct sk_buff *skb, 1372 struct splice_pipe_desc *spd) 1373 { 1374 if (!*len) 1375 return 1; 1376 1377 /* skip this segment if already processed */ 1378 if (*off >= plen) { 1379 *off -= plen; 1380 return 0; 1381 } 1382 1383 /* ignore any bits we already processed */ 1384 if (*off) { 1385 __segment_seek(&page, &poff, &plen, *off); 1386 *off = 0; 1387 } 1388 1389 do { 1390 unsigned int flen = min(*len, plen); 1391 1392 /* the linear region may spread across several pages */ 1393 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1394 1395 if (spd_fill_page(spd, page, flen, poff, skb)) 1396 return 1; 1397 1398 __segment_seek(&page, &poff, &plen, flen); 1399 *len -= flen; 1400 1401 } while (*len && plen); 1402 1403 return 0; 1404 } 1405 1406 /* 1407 * Map linear and fragment data from the skb to spd. It reports failure if the 1408 * pipe is full or if we already spliced the requested length. 1409 */ 1410 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1411 unsigned int *len, 1412 struct splice_pipe_desc *spd) 1413 { 1414 int seg; 1415 1416 /* 1417 * map the linear part 1418 */ 1419 if (__splice_segment(virt_to_page(skb->data), 1420 (unsigned long) skb->data & (PAGE_SIZE - 1), 1421 skb_headlen(skb), 1422 offset, len, skb, spd)) 1423 return 1; 1424 1425 /* 1426 * then map the fragments 1427 */ 1428 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1429 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1430 1431 if (__splice_segment(f->page, f->page_offset, f->size, 1432 offset, len, skb, spd)) 1433 return 1; 1434 } 1435 1436 return 0; 1437 } 1438 1439 /* 1440 * Map data from the skb to a pipe. Should handle both the linear part, 1441 * the fragments, and the frag list. It does NOT handle frag lists within 1442 * the frag list, if such a thing exists. We'd probably need to recurse to 1443 * handle that cleanly. 1444 */ 1445 int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, 1446 struct pipe_inode_info *pipe, unsigned int tlen, 1447 unsigned int flags) 1448 { 1449 struct partial_page partial[PIPE_BUFFERS]; 1450 struct page *pages[PIPE_BUFFERS]; 1451 struct splice_pipe_desc spd = { 1452 .pages = pages, 1453 .partial = partial, 1454 .flags = flags, 1455 .ops = &sock_pipe_buf_ops, 1456 .spd_release = sock_spd_release, 1457 }; 1458 struct sk_buff *skb; 1459 1460 /* 1461 * I'd love to avoid the clone here, but tcp_read_sock() 1462 * ignores reference counts and unconditonally kills the sk_buff 1463 * on return from the actor. 1464 */ 1465 skb = skb_clone(__skb, GFP_KERNEL); 1466 if (unlikely(!skb)) 1467 return -ENOMEM; 1468 1469 /* 1470 * __skb_splice_bits() only fails if the output has no room left, 1471 * so no point in going over the frag_list for the error case. 1472 */ 1473 if (__skb_splice_bits(skb, &offset, &tlen, &spd)) 1474 goto done; 1475 else if (!tlen) 1476 goto done; 1477 1478 /* 1479 * now see if we have a frag_list to map 1480 */ 1481 if (skb_shinfo(skb)->frag_list) { 1482 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1483 1484 for (; list && tlen; list = list->next) { 1485 if (__skb_splice_bits(list, &offset, &tlen, &spd)) 1486 break; 1487 } 1488 } 1489 1490 done: 1491 /* 1492 * drop our reference to the clone, the pipe consumption will 1493 * drop the rest. 1494 */ 1495 kfree_skb(skb); 1496 1497 if (spd.nr_pages) { 1498 int ret; 1499 struct sock *sk = __skb->sk; 1500 1501 /* 1502 * Drop the socket lock, otherwise we have reverse 1503 * locking dependencies between sk_lock and i_mutex 1504 * here as compared to sendfile(). We enter here 1505 * with the socket lock held, and splice_to_pipe() will 1506 * grab the pipe inode lock. For sendfile() emulation, 1507 * we call into ->sendpage() with the i_mutex lock held 1508 * and networking will grab the socket lock. 1509 */ 1510 release_sock(sk); 1511 ret = splice_to_pipe(pipe, &spd); 1512 lock_sock(sk); 1513 return ret; 1514 } 1515 1516 return 0; 1517 } 1518 1519 /** 1520 * skb_store_bits - store bits from kernel buffer to skb 1521 * @skb: destination buffer 1522 * @offset: offset in destination 1523 * @from: source buffer 1524 * @len: number of bytes to copy 1525 * 1526 * Copy the specified number of bytes from the source buffer to the 1527 * destination skb. This function handles all the messy bits of 1528 * traversing fragment lists and such. 1529 */ 1530 1531 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1532 { 1533 int i, copy; 1534 int start = skb_headlen(skb); 1535 1536 if (offset > (int)skb->len - len) 1537 goto fault; 1538 1539 if ((copy = start - offset) > 0) { 1540 if (copy > len) 1541 copy = len; 1542 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1543 if ((len -= copy) == 0) 1544 return 0; 1545 offset += copy; 1546 from += copy; 1547 } 1548 1549 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1550 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1551 int end; 1552 1553 WARN_ON(start > offset + len); 1554 1555 end = start + frag->size; 1556 if ((copy = end - offset) > 0) { 1557 u8 *vaddr; 1558 1559 if (copy > len) 1560 copy = len; 1561 1562 vaddr = kmap_skb_frag(frag); 1563 memcpy(vaddr + frag->page_offset + offset - start, 1564 from, copy); 1565 kunmap_skb_frag(vaddr); 1566 1567 if ((len -= copy) == 0) 1568 return 0; 1569 offset += copy; 1570 from += copy; 1571 } 1572 start = end; 1573 } 1574 1575 if (skb_shinfo(skb)->frag_list) { 1576 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1577 1578 for (; list; list = list->next) { 1579 int end; 1580 1581 WARN_ON(start > offset + len); 1582 1583 end = start + list->len; 1584 if ((copy = end - offset) > 0) { 1585 if (copy > len) 1586 copy = len; 1587 if (skb_store_bits(list, offset - start, 1588 from, copy)) 1589 goto fault; 1590 if ((len -= copy) == 0) 1591 return 0; 1592 offset += copy; 1593 from += copy; 1594 } 1595 start = end; 1596 } 1597 } 1598 if (!len) 1599 return 0; 1600 1601 fault: 1602 return -EFAULT; 1603 } 1604 1605 EXPORT_SYMBOL(skb_store_bits); 1606 1607 /* Checksum skb data. */ 1608 1609 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1610 int len, __wsum csum) 1611 { 1612 int start = skb_headlen(skb); 1613 int i, copy = start - offset; 1614 int pos = 0; 1615 1616 /* Checksum header. */ 1617 if (copy > 0) { 1618 if (copy > len) 1619 copy = len; 1620 csum = csum_partial(skb->data + offset, copy, csum); 1621 if ((len -= copy) == 0) 1622 return csum; 1623 offset += copy; 1624 pos = copy; 1625 } 1626 1627 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1628 int end; 1629 1630 WARN_ON(start > offset + len); 1631 1632 end = start + skb_shinfo(skb)->frags[i].size; 1633 if ((copy = end - offset) > 0) { 1634 __wsum csum2; 1635 u8 *vaddr; 1636 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1637 1638 if (copy > len) 1639 copy = len; 1640 vaddr = kmap_skb_frag(frag); 1641 csum2 = csum_partial(vaddr + frag->page_offset + 1642 offset - start, copy, 0); 1643 kunmap_skb_frag(vaddr); 1644 csum = csum_block_add(csum, csum2, pos); 1645 if (!(len -= copy)) 1646 return csum; 1647 offset += copy; 1648 pos += copy; 1649 } 1650 start = end; 1651 } 1652 1653 if (skb_shinfo(skb)->frag_list) { 1654 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1655 1656 for (; list; list = list->next) { 1657 int end; 1658 1659 WARN_ON(start > offset + len); 1660 1661 end = start + list->len; 1662 if ((copy = end - offset) > 0) { 1663 __wsum csum2; 1664 if (copy > len) 1665 copy = len; 1666 csum2 = skb_checksum(list, offset - start, 1667 copy, 0); 1668 csum = csum_block_add(csum, csum2, pos); 1669 if ((len -= copy) == 0) 1670 return csum; 1671 offset += copy; 1672 pos += copy; 1673 } 1674 start = end; 1675 } 1676 } 1677 BUG_ON(len); 1678 1679 return csum; 1680 } 1681 1682 /* Both of above in one bottle. */ 1683 1684 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1685 u8 *to, int len, __wsum csum) 1686 { 1687 int start = skb_headlen(skb); 1688 int i, copy = start - offset; 1689 int pos = 0; 1690 1691 /* Copy header. */ 1692 if (copy > 0) { 1693 if (copy > len) 1694 copy = len; 1695 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1696 copy, csum); 1697 if ((len -= copy) == 0) 1698 return csum; 1699 offset += copy; 1700 to += copy; 1701 pos = copy; 1702 } 1703 1704 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1705 int end; 1706 1707 WARN_ON(start > offset + len); 1708 1709 end = start + skb_shinfo(skb)->frags[i].size; 1710 if ((copy = end - offset) > 0) { 1711 __wsum csum2; 1712 u8 *vaddr; 1713 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1714 1715 if (copy > len) 1716 copy = len; 1717 vaddr = kmap_skb_frag(frag); 1718 csum2 = csum_partial_copy_nocheck(vaddr + 1719 frag->page_offset + 1720 offset - start, to, 1721 copy, 0); 1722 kunmap_skb_frag(vaddr); 1723 csum = csum_block_add(csum, csum2, pos); 1724 if (!(len -= copy)) 1725 return csum; 1726 offset += copy; 1727 to += copy; 1728 pos += copy; 1729 } 1730 start = end; 1731 } 1732 1733 if (skb_shinfo(skb)->frag_list) { 1734 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1735 1736 for (; list; list = list->next) { 1737 __wsum csum2; 1738 int end; 1739 1740 WARN_ON(start > offset + len); 1741 1742 end = start + list->len; 1743 if ((copy = end - offset) > 0) { 1744 if (copy > len) 1745 copy = len; 1746 csum2 = skb_copy_and_csum_bits(list, 1747 offset - start, 1748 to, copy, 0); 1749 csum = csum_block_add(csum, csum2, pos); 1750 if ((len -= copy) == 0) 1751 return csum; 1752 offset += copy; 1753 to += copy; 1754 pos += copy; 1755 } 1756 start = end; 1757 } 1758 } 1759 BUG_ON(len); 1760 return csum; 1761 } 1762 1763 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1764 { 1765 __wsum csum; 1766 long csstart; 1767 1768 if (skb->ip_summed == CHECKSUM_PARTIAL) 1769 csstart = skb->csum_start - skb_headroom(skb); 1770 else 1771 csstart = skb_headlen(skb); 1772 1773 BUG_ON(csstart > skb_headlen(skb)); 1774 1775 skb_copy_from_linear_data(skb, to, csstart); 1776 1777 csum = 0; 1778 if (csstart != skb->len) 1779 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1780 skb->len - csstart, 0); 1781 1782 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1783 long csstuff = csstart + skb->csum_offset; 1784 1785 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1786 } 1787 } 1788 1789 /** 1790 * skb_dequeue - remove from the head of the queue 1791 * @list: list to dequeue from 1792 * 1793 * Remove the head of the list. The list lock is taken so the function 1794 * may be used safely with other locking list functions. The head item is 1795 * returned or %NULL if the list is empty. 1796 */ 1797 1798 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1799 { 1800 unsigned long flags; 1801 struct sk_buff *result; 1802 1803 spin_lock_irqsave(&list->lock, flags); 1804 result = __skb_dequeue(list); 1805 spin_unlock_irqrestore(&list->lock, flags); 1806 return result; 1807 } 1808 1809 /** 1810 * skb_dequeue_tail - remove from the tail of the queue 1811 * @list: list to dequeue from 1812 * 1813 * Remove the tail of the list. The list lock is taken so the function 1814 * may be used safely with other locking list functions. The tail item is 1815 * returned or %NULL if the list is empty. 1816 */ 1817 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1818 { 1819 unsigned long flags; 1820 struct sk_buff *result; 1821 1822 spin_lock_irqsave(&list->lock, flags); 1823 result = __skb_dequeue_tail(list); 1824 spin_unlock_irqrestore(&list->lock, flags); 1825 return result; 1826 } 1827 1828 /** 1829 * skb_queue_purge - empty a list 1830 * @list: list to empty 1831 * 1832 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1833 * the list and one reference dropped. This function takes the list 1834 * lock and is atomic with respect to other list locking functions. 1835 */ 1836 void skb_queue_purge(struct sk_buff_head *list) 1837 { 1838 struct sk_buff *skb; 1839 while ((skb = skb_dequeue(list)) != NULL) 1840 kfree_skb(skb); 1841 } 1842 1843 /** 1844 * skb_queue_head - queue a buffer at the list head 1845 * @list: list to use 1846 * @newsk: buffer to queue 1847 * 1848 * Queue a buffer at the start of the list. This function takes the 1849 * list lock and can be used safely with other locking &sk_buff functions 1850 * safely. 1851 * 1852 * A buffer cannot be placed on two lists at the same time. 1853 */ 1854 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1855 { 1856 unsigned long flags; 1857 1858 spin_lock_irqsave(&list->lock, flags); 1859 __skb_queue_head(list, newsk); 1860 spin_unlock_irqrestore(&list->lock, flags); 1861 } 1862 1863 /** 1864 * skb_queue_tail - queue a buffer at the list tail 1865 * @list: list to use 1866 * @newsk: buffer to queue 1867 * 1868 * Queue a buffer at the tail of the list. This function takes the 1869 * list lock and can be used safely with other locking &sk_buff functions 1870 * safely. 1871 * 1872 * A buffer cannot be placed on two lists at the same time. 1873 */ 1874 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1875 { 1876 unsigned long flags; 1877 1878 spin_lock_irqsave(&list->lock, flags); 1879 __skb_queue_tail(list, newsk); 1880 spin_unlock_irqrestore(&list->lock, flags); 1881 } 1882 1883 /** 1884 * skb_unlink - remove a buffer from a list 1885 * @skb: buffer to remove 1886 * @list: list to use 1887 * 1888 * Remove a packet from a list. The list locks are taken and this 1889 * function is atomic with respect to other list locked calls 1890 * 1891 * You must know what list the SKB is on. 1892 */ 1893 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1894 { 1895 unsigned long flags; 1896 1897 spin_lock_irqsave(&list->lock, flags); 1898 __skb_unlink(skb, list); 1899 spin_unlock_irqrestore(&list->lock, flags); 1900 } 1901 1902 /** 1903 * skb_append - append a buffer 1904 * @old: buffer to insert after 1905 * @newsk: buffer to insert 1906 * @list: list to use 1907 * 1908 * Place a packet after a given packet in a list. The list locks are taken 1909 * and this function is atomic with respect to other list locked calls. 1910 * A buffer cannot be placed on two lists at the same time. 1911 */ 1912 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1913 { 1914 unsigned long flags; 1915 1916 spin_lock_irqsave(&list->lock, flags); 1917 __skb_queue_after(list, old, newsk); 1918 spin_unlock_irqrestore(&list->lock, flags); 1919 } 1920 1921 1922 /** 1923 * skb_insert - insert a buffer 1924 * @old: buffer to insert before 1925 * @newsk: buffer to insert 1926 * @list: list to use 1927 * 1928 * Place a packet before a given packet in a list. The list locks are 1929 * taken and this function is atomic with respect to other list locked 1930 * calls. 1931 * 1932 * A buffer cannot be placed on two lists at the same time. 1933 */ 1934 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1935 { 1936 unsigned long flags; 1937 1938 spin_lock_irqsave(&list->lock, flags); 1939 __skb_insert(newsk, old->prev, old, list); 1940 spin_unlock_irqrestore(&list->lock, flags); 1941 } 1942 1943 static inline void skb_split_inside_header(struct sk_buff *skb, 1944 struct sk_buff* skb1, 1945 const u32 len, const int pos) 1946 { 1947 int i; 1948 1949 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 1950 pos - len); 1951 /* And move data appendix as is. */ 1952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1953 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 1954 1955 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 1956 skb_shinfo(skb)->nr_frags = 0; 1957 skb1->data_len = skb->data_len; 1958 skb1->len += skb1->data_len; 1959 skb->data_len = 0; 1960 skb->len = len; 1961 skb_set_tail_pointer(skb, len); 1962 } 1963 1964 static inline void skb_split_no_header(struct sk_buff *skb, 1965 struct sk_buff* skb1, 1966 const u32 len, int pos) 1967 { 1968 int i, k = 0; 1969 const int nfrags = skb_shinfo(skb)->nr_frags; 1970 1971 skb_shinfo(skb)->nr_frags = 0; 1972 skb1->len = skb1->data_len = skb->len - len; 1973 skb->len = len; 1974 skb->data_len = len - pos; 1975 1976 for (i = 0; i < nfrags; i++) { 1977 int size = skb_shinfo(skb)->frags[i].size; 1978 1979 if (pos + size > len) { 1980 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 1981 1982 if (pos < len) { 1983 /* Split frag. 1984 * We have two variants in this case: 1985 * 1. Move all the frag to the second 1986 * part, if it is possible. F.e. 1987 * this approach is mandatory for TUX, 1988 * where splitting is expensive. 1989 * 2. Split is accurately. We make this. 1990 */ 1991 get_page(skb_shinfo(skb)->frags[i].page); 1992 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 1993 skb_shinfo(skb1)->frags[0].size -= len - pos; 1994 skb_shinfo(skb)->frags[i].size = len - pos; 1995 skb_shinfo(skb)->nr_frags++; 1996 } 1997 k++; 1998 } else 1999 skb_shinfo(skb)->nr_frags++; 2000 pos += size; 2001 } 2002 skb_shinfo(skb1)->nr_frags = k; 2003 } 2004 2005 /** 2006 * skb_split - Split fragmented skb to two parts at length len. 2007 * @skb: the buffer to split 2008 * @skb1: the buffer to receive the second part 2009 * @len: new length for skb 2010 */ 2011 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2012 { 2013 int pos = skb_headlen(skb); 2014 2015 if (len < pos) /* Split line is inside header. */ 2016 skb_split_inside_header(skb, skb1, len, pos); 2017 else /* Second chunk has no header, nothing to copy. */ 2018 skb_split_no_header(skb, skb1, len, pos); 2019 } 2020 2021 /* Shifting from/to a cloned skb is a no-go. 2022 * 2023 * Caller cannot keep skb_shinfo related pointers past calling here! 2024 */ 2025 static int skb_prepare_for_shift(struct sk_buff *skb) 2026 { 2027 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2028 } 2029 2030 /** 2031 * skb_shift - Shifts paged data partially from skb to another 2032 * @tgt: buffer into which tail data gets added 2033 * @skb: buffer from which the paged data comes from 2034 * @shiftlen: shift up to this many bytes 2035 * 2036 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2037 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2038 * It's up to caller to free skb if everything was shifted. 2039 * 2040 * If @tgt runs out of frags, the whole operation is aborted. 2041 * 2042 * Skb cannot include anything else but paged data while tgt is allowed 2043 * to have non-paged data as well. 2044 * 2045 * TODO: full sized shift could be optimized but that would need 2046 * specialized skb free'er to handle frags without up-to-date nr_frags. 2047 */ 2048 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2049 { 2050 int from, to, merge, todo; 2051 struct skb_frag_struct *fragfrom, *fragto; 2052 2053 BUG_ON(shiftlen > skb->len); 2054 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2055 2056 todo = shiftlen; 2057 from = 0; 2058 to = skb_shinfo(tgt)->nr_frags; 2059 fragfrom = &skb_shinfo(skb)->frags[from]; 2060 2061 /* Actual merge is delayed until the point when we know we can 2062 * commit all, so that we don't have to undo partial changes 2063 */ 2064 if (!to || 2065 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2066 merge = -1; 2067 } else { 2068 merge = to - 1; 2069 2070 todo -= fragfrom->size; 2071 if (todo < 0) { 2072 if (skb_prepare_for_shift(skb) || 2073 skb_prepare_for_shift(tgt)) 2074 return 0; 2075 2076 /* All previous frag pointers might be stale! */ 2077 fragfrom = &skb_shinfo(skb)->frags[from]; 2078 fragto = &skb_shinfo(tgt)->frags[merge]; 2079 2080 fragto->size += shiftlen; 2081 fragfrom->size -= shiftlen; 2082 fragfrom->page_offset += shiftlen; 2083 2084 goto onlymerged; 2085 } 2086 2087 from++; 2088 } 2089 2090 /* Skip full, not-fitting skb to avoid expensive operations */ 2091 if ((shiftlen == skb->len) && 2092 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2093 return 0; 2094 2095 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2096 return 0; 2097 2098 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2099 if (to == MAX_SKB_FRAGS) 2100 return 0; 2101 2102 fragfrom = &skb_shinfo(skb)->frags[from]; 2103 fragto = &skb_shinfo(tgt)->frags[to]; 2104 2105 if (todo >= fragfrom->size) { 2106 *fragto = *fragfrom; 2107 todo -= fragfrom->size; 2108 from++; 2109 to++; 2110 2111 } else { 2112 get_page(fragfrom->page); 2113 fragto->page = fragfrom->page; 2114 fragto->page_offset = fragfrom->page_offset; 2115 fragto->size = todo; 2116 2117 fragfrom->page_offset += todo; 2118 fragfrom->size -= todo; 2119 todo = 0; 2120 2121 to++; 2122 break; 2123 } 2124 } 2125 2126 /* Ready to "commit" this state change to tgt */ 2127 skb_shinfo(tgt)->nr_frags = to; 2128 2129 if (merge >= 0) { 2130 fragfrom = &skb_shinfo(skb)->frags[0]; 2131 fragto = &skb_shinfo(tgt)->frags[merge]; 2132 2133 fragto->size += fragfrom->size; 2134 put_page(fragfrom->page); 2135 } 2136 2137 /* Reposition in the original skb */ 2138 to = 0; 2139 while (from < skb_shinfo(skb)->nr_frags) 2140 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2141 skb_shinfo(skb)->nr_frags = to; 2142 2143 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2144 2145 onlymerged: 2146 /* Most likely the tgt won't ever need its checksum anymore, skb on 2147 * the other hand might need it if it needs to be resent 2148 */ 2149 tgt->ip_summed = CHECKSUM_PARTIAL; 2150 skb->ip_summed = CHECKSUM_PARTIAL; 2151 2152 /* Yak, is it really working this way? Some helper please? */ 2153 skb->len -= shiftlen; 2154 skb->data_len -= shiftlen; 2155 skb->truesize -= shiftlen; 2156 tgt->len += shiftlen; 2157 tgt->data_len += shiftlen; 2158 tgt->truesize += shiftlen; 2159 2160 return shiftlen; 2161 } 2162 2163 /** 2164 * skb_prepare_seq_read - Prepare a sequential read of skb data 2165 * @skb: the buffer to read 2166 * @from: lower offset of data to be read 2167 * @to: upper offset of data to be read 2168 * @st: state variable 2169 * 2170 * Initializes the specified state variable. Must be called before 2171 * invoking skb_seq_read() for the first time. 2172 */ 2173 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2174 unsigned int to, struct skb_seq_state *st) 2175 { 2176 st->lower_offset = from; 2177 st->upper_offset = to; 2178 st->root_skb = st->cur_skb = skb; 2179 st->frag_idx = st->stepped_offset = 0; 2180 st->frag_data = NULL; 2181 } 2182 2183 /** 2184 * skb_seq_read - Sequentially read skb data 2185 * @consumed: number of bytes consumed by the caller so far 2186 * @data: destination pointer for data to be returned 2187 * @st: state variable 2188 * 2189 * Reads a block of skb data at &consumed relative to the 2190 * lower offset specified to skb_prepare_seq_read(). Assigns 2191 * the head of the data block to &data and returns the length 2192 * of the block or 0 if the end of the skb data or the upper 2193 * offset has been reached. 2194 * 2195 * The caller is not required to consume all of the data 2196 * returned, i.e. &consumed is typically set to the number 2197 * of bytes already consumed and the next call to 2198 * skb_seq_read() will return the remaining part of the block. 2199 * 2200 * Note 1: The size of each block of data returned can be arbitary, 2201 * this limitation is the cost for zerocopy seqeuental 2202 * reads of potentially non linear data. 2203 * 2204 * Note 2: Fragment lists within fragments are not implemented 2205 * at the moment, state->root_skb could be replaced with 2206 * a stack for this purpose. 2207 */ 2208 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2209 struct skb_seq_state *st) 2210 { 2211 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2212 skb_frag_t *frag; 2213 2214 if (unlikely(abs_offset >= st->upper_offset)) 2215 return 0; 2216 2217 next_skb: 2218 block_limit = skb_headlen(st->cur_skb); 2219 2220 if (abs_offset < block_limit) { 2221 *data = st->cur_skb->data + abs_offset; 2222 return block_limit - abs_offset; 2223 } 2224 2225 if (st->frag_idx == 0 && !st->frag_data) 2226 st->stepped_offset += skb_headlen(st->cur_skb); 2227 2228 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2229 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2230 block_limit = frag->size + st->stepped_offset; 2231 2232 if (abs_offset < block_limit) { 2233 if (!st->frag_data) 2234 st->frag_data = kmap_skb_frag(frag); 2235 2236 *data = (u8 *) st->frag_data + frag->page_offset + 2237 (abs_offset - st->stepped_offset); 2238 2239 return block_limit - abs_offset; 2240 } 2241 2242 if (st->frag_data) { 2243 kunmap_skb_frag(st->frag_data); 2244 st->frag_data = NULL; 2245 } 2246 2247 st->frag_idx++; 2248 st->stepped_offset += frag->size; 2249 } 2250 2251 if (st->frag_data) { 2252 kunmap_skb_frag(st->frag_data); 2253 st->frag_data = NULL; 2254 } 2255 2256 if (st->cur_skb->next) { 2257 st->cur_skb = st->cur_skb->next; 2258 st->frag_idx = 0; 2259 goto next_skb; 2260 } else if (st->root_skb == st->cur_skb && 2261 skb_shinfo(st->root_skb)->frag_list) { 2262 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2263 goto next_skb; 2264 } 2265 2266 return 0; 2267 } 2268 2269 /** 2270 * skb_abort_seq_read - Abort a sequential read of skb data 2271 * @st: state variable 2272 * 2273 * Must be called if skb_seq_read() was not called until it 2274 * returned 0. 2275 */ 2276 void skb_abort_seq_read(struct skb_seq_state *st) 2277 { 2278 if (st->frag_data) 2279 kunmap_skb_frag(st->frag_data); 2280 } 2281 2282 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2283 2284 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2285 struct ts_config *conf, 2286 struct ts_state *state) 2287 { 2288 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2289 } 2290 2291 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2292 { 2293 skb_abort_seq_read(TS_SKB_CB(state)); 2294 } 2295 2296 /** 2297 * skb_find_text - Find a text pattern in skb data 2298 * @skb: the buffer to look in 2299 * @from: search offset 2300 * @to: search limit 2301 * @config: textsearch configuration 2302 * @state: uninitialized textsearch state variable 2303 * 2304 * Finds a pattern in the skb data according to the specified 2305 * textsearch configuration. Use textsearch_next() to retrieve 2306 * subsequent occurrences of the pattern. Returns the offset 2307 * to the first occurrence or UINT_MAX if no match was found. 2308 */ 2309 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2310 unsigned int to, struct ts_config *config, 2311 struct ts_state *state) 2312 { 2313 unsigned int ret; 2314 2315 config->get_next_block = skb_ts_get_next_block; 2316 config->finish = skb_ts_finish; 2317 2318 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2319 2320 ret = textsearch_find(config, state); 2321 return (ret <= to - from ? ret : UINT_MAX); 2322 } 2323 2324 /** 2325 * skb_append_datato_frags: - append the user data to a skb 2326 * @sk: sock structure 2327 * @skb: skb structure to be appened with user data. 2328 * @getfrag: call back function to be used for getting the user data 2329 * @from: pointer to user message iov 2330 * @length: length of the iov message 2331 * 2332 * Description: This procedure append the user data in the fragment part 2333 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2334 */ 2335 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2336 int (*getfrag)(void *from, char *to, int offset, 2337 int len, int odd, struct sk_buff *skb), 2338 void *from, int length) 2339 { 2340 int frg_cnt = 0; 2341 skb_frag_t *frag = NULL; 2342 struct page *page = NULL; 2343 int copy, left; 2344 int offset = 0; 2345 int ret; 2346 2347 do { 2348 /* Return error if we don't have space for new frag */ 2349 frg_cnt = skb_shinfo(skb)->nr_frags; 2350 if (frg_cnt >= MAX_SKB_FRAGS) 2351 return -EFAULT; 2352 2353 /* allocate a new page for next frag */ 2354 page = alloc_pages(sk->sk_allocation, 0); 2355 2356 /* If alloc_page fails just return failure and caller will 2357 * free previous allocated pages by doing kfree_skb() 2358 */ 2359 if (page == NULL) 2360 return -ENOMEM; 2361 2362 /* initialize the next frag */ 2363 sk->sk_sndmsg_page = page; 2364 sk->sk_sndmsg_off = 0; 2365 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2366 skb->truesize += PAGE_SIZE; 2367 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2368 2369 /* get the new initialized frag */ 2370 frg_cnt = skb_shinfo(skb)->nr_frags; 2371 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2372 2373 /* copy the user data to page */ 2374 left = PAGE_SIZE - frag->page_offset; 2375 copy = (length > left)? left : length; 2376 2377 ret = getfrag(from, (page_address(frag->page) + 2378 frag->page_offset + frag->size), 2379 offset, copy, 0, skb); 2380 if (ret < 0) 2381 return -EFAULT; 2382 2383 /* copy was successful so update the size parameters */ 2384 sk->sk_sndmsg_off += copy; 2385 frag->size += copy; 2386 skb->len += copy; 2387 skb->data_len += copy; 2388 offset += copy; 2389 length -= copy; 2390 2391 } while (length > 0); 2392 2393 return 0; 2394 } 2395 2396 /** 2397 * skb_pull_rcsum - pull skb and update receive checksum 2398 * @skb: buffer to update 2399 * @len: length of data pulled 2400 * 2401 * This function performs an skb_pull on the packet and updates 2402 * the CHECKSUM_COMPLETE checksum. It should be used on 2403 * receive path processing instead of skb_pull unless you know 2404 * that the checksum difference is zero (e.g., a valid IP header) 2405 * or you are setting ip_summed to CHECKSUM_NONE. 2406 */ 2407 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2408 { 2409 BUG_ON(len > skb->len); 2410 skb->len -= len; 2411 BUG_ON(skb->len < skb->data_len); 2412 skb_postpull_rcsum(skb, skb->data, len); 2413 return skb->data += len; 2414 } 2415 2416 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2417 2418 /** 2419 * skb_segment - Perform protocol segmentation on skb. 2420 * @skb: buffer to segment 2421 * @features: features for the output path (see dev->features) 2422 * 2423 * This function performs segmentation on the given skb. It returns 2424 * a pointer to the first in a list of new skbs for the segments. 2425 * In case of error it returns ERR_PTR(err). 2426 */ 2427 struct sk_buff *skb_segment(struct sk_buff *skb, int features) 2428 { 2429 struct sk_buff *segs = NULL; 2430 struct sk_buff *tail = NULL; 2431 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2432 unsigned int mss = skb_shinfo(skb)->gso_size; 2433 unsigned int doffset = skb->data - skb_mac_header(skb); 2434 unsigned int offset = doffset; 2435 unsigned int headroom; 2436 unsigned int len; 2437 int sg = features & NETIF_F_SG; 2438 int nfrags = skb_shinfo(skb)->nr_frags; 2439 int err = -ENOMEM; 2440 int i = 0; 2441 int pos; 2442 2443 __skb_push(skb, doffset); 2444 headroom = skb_headroom(skb); 2445 pos = skb_headlen(skb); 2446 2447 do { 2448 struct sk_buff *nskb; 2449 skb_frag_t *frag; 2450 int hsize; 2451 int size; 2452 2453 len = skb->len - offset; 2454 if (len > mss) 2455 len = mss; 2456 2457 hsize = skb_headlen(skb) - offset; 2458 if (hsize < 0) 2459 hsize = 0; 2460 if (hsize > len || !sg) 2461 hsize = len; 2462 2463 if (!hsize && i >= nfrags) { 2464 BUG_ON(fskb->len != len); 2465 2466 pos += len; 2467 nskb = skb_clone(fskb, GFP_ATOMIC); 2468 fskb = fskb->next; 2469 2470 if (unlikely(!nskb)) 2471 goto err; 2472 2473 hsize = skb_end_pointer(nskb) - nskb->head; 2474 if (skb_cow_head(nskb, doffset + headroom)) { 2475 kfree_skb(nskb); 2476 goto err; 2477 } 2478 2479 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2480 hsize; 2481 skb_release_head_state(nskb); 2482 __skb_push(nskb, doffset); 2483 } else { 2484 nskb = alloc_skb(hsize + doffset + headroom, 2485 GFP_ATOMIC); 2486 2487 if (unlikely(!nskb)) 2488 goto err; 2489 2490 skb_reserve(nskb, headroom); 2491 __skb_put(nskb, doffset); 2492 } 2493 2494 if (segs) 2495 tail->next = nskb; 2496 else 2497 segs = nskb; 2498 tail = nskb; 2499 2500 __copy_skb_header(nskb, skb); 2501 nskb->mac_len = skb->mac_len; 2502 2503 skb_reset_mac_header(nskb); 2504 skb_set_network_header(nskb, skb->mac_len); 2505 nskb->transport_header = (nskb->network_header + 2506 skb_network_header_len(skb)); 2507 skb_copy_from_linear_data(skb, nskb->data, doffset); 2508 2509 if (pos >= offset + len) 2510 continue; 2511 2512 if (!sg) { 2513 nskb->ip_summed = CHECKSUM_NONE; 2514 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2515 skb_put(nskb, len), 2516 len, 0); 2517 continue; 2518 } 2519 2520 frag = skb_shinfo(nskb)->frags; 2521 2522 skb_copy_from_linear_data_offset(skb, offset, 2523 skb_put(nskb, hsize), hsize); 2524 2525 while (pos < offset + len && i < nfrags) { 2526 *frag = skb_shinfo(skb)->frags[i]; 2527 get_page(frag->page); 2528 size = frag->size; 2529 2530 if (pos < offset) { 2531 frag->page_offset += offset - pos; 2532 frag->size -= offset - pos; 2533 } 2534 2535 skb_shinfo(nskb)->nr_frags++; 2536 2537 if (pos + size <= offset + len) { 2538 i++; 2539 pos += size; 2540 } else { 2541 frag->size -= pos + size - (offset + len); 2542 goto skip_fraglist; 2543 } 2544 2545 frag++; 2546 } 2547 2548 if (pos < offset + len) { 2549 struct sk_buff *fskb2 = fskb; 2550 2551 BUG_ON(pos + fskb->len != offset + len); 2552 2553 pos += fskb->len; 2554 fskb = fskb->next; 2555 2556 if (fskb2->next) { 2557 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2558 if (!fskb2) 2559 goto err; 2560 } else 2561 skb_get(fskb2); 2562 2563 BUG_ON(skb_shinfo(nskb)->frag_list); 2564 skb_shinfo(nskb)->frag_list = fskb2; 2565 } 2566 2567 skip_fraglist: 2568 nskb->data_len = len - hsize; 2569 nskb->len += nskb->data_len; 2570 nskb->truesize += nskb->data_len; 2571 } while ((offset += len) < skb->len); 2572 2573 return segs; 2574 2575 err: 2576 while ((skb = segs)) { 2577 segs = skb->next; 2578 kfree_skb(skb); 2579 } 2580 return ERR_PTR(err); 2581 } 2582 2583 EXPORT_SYMBOL_GPL(skb_segment); 2584 2585 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2586 { 2587 struct sk_buff *p = *head; 2588 struct sk_buff *nskb; 2589 unsigned int headroom; 2590 unsigned int hlen = p->data - skb_mac_header(p); 2591 2592 if (hlen + p->len + skb->len >= 65536) 2593 return -E2BIG; 2594 2595 if (skb_shinfo(p)->frag_list) 2596 goto merge; 2597 else if (!skb_headlen(p) && !skb_headlen(skb) && 2598 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags < 2599 MAX_SKB_FRAGS) { 2600 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2601 skb_shinfo(skb)->frags, 2602 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2603 2604 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; 2605 NAPI_GRO_CB(skb)->free = 1; 2606 goto done; 2607 } 2608 2609 headroom = skb_headroom(p); 2610 nskb = netdev_alloc_skb(p->dev, headroom); 2611 if (unlikely(!nskb)) 2612 return -ENOMEM; 2613 2614 __copy_skb_header(nskb, p); 2615 nskb->mac_len = p->mac_len; 2616 2617 skb_reserve(nskb, headroom); 2618 2619 skb_set_mac_header(nskb, -hlen); 2620 skb_set_network_header(nskb, skb_network_offset(p)); 2621 skb_set_transport_header(nskb, skb_transport_offset(p)); 2622 2623 memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen); 2624 2625 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2626 skb_shinfo(nskb)->frag_list = p; 2627 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; 2628 skb_header_release(p); 2629 nskb->prev = p; 2630 2631 nskb->data_len += p->len; 2632 nskb->truesize += p->len; 2633 nskb->len += p->len; 2634 2635 *head = nskb; 2636 nskb->next = p->next; 2637 p->next = NULL; 2638 2639 p = nskb; 2640 2641 merge: 2642 p->prev->next = skb; 2643 p->prev = skb; 2644 skb_header_release(skb); 2645 2646 done: 2647 NAPI_GRO_CB(p)->count++; 2648 p->data_len += skb->len; 2649 p->truesize += skb->len; 2650 p->len += skb->len; 2651 2652 NAPI_GRO_CB(skb)->same_flow = 1; 2653 return 0; 2654 } 2655 EXPORT_SYMBOL_GPL(skb_gro_receive); 2656 2657 void __init skb_init(void) 2658 { 2659 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2660 sizeof(struct sk_buff), 2661 0, 2662 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2663 NULL); 2664 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2665 (2*sizeof(struct sk_buff)) + 2666 sizeof(atomic_t), 2667 0, 2668 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2669 NULL); 2670 } 2671 2672 /** 2673 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2674 * @skb: Socket buffer containing the buffers to be mapped 2675 * @sg: The scatter-gather list to map into 2676 * @offset: The offset into the buffer's contents to start mapping 2677 * @len: Length of buffer space to be mapped 2678 * 2679 * Fill the specified scatter-gather list with mappings/pointers into a 2680 * region of the buffer space attached to a socket buffer. 2681 */ 2682 static int 2683 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2684 { 2685 int start = skb_headlen(skb); 2686 int i, copy = start - offset; 2687 int elt = 0; 2688 2689 if (copy > 0) { 2690 if (copy > len) 2691 copy = len; 2692 sg_set_buf(sg, skb->data + offset, copy); 2693 elt++; 2694 if ((len -= copy) == 0) 2695 return elt; 2696 offset += copy; 2697 } 2698 2699 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2700 int end; 2701 2702 WARN_ON(start > offset + len); 2703 2704 end = start + skb_shinfo(skb)->frags[i].size; 2705 if ((copy = end - offset) > 0) { 2706 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2707 2708 if (copy > len) 2709 copy = len; 2710 sg_set_page(&sg[elt], frag->page, copy, 2711 frag->page_offset+offset-start); 2712 elt++; 2713 if (!(len -= copy)) 2714 return elt; 2715 offset += copy; 2716 } 2717 start = end; 2718 } 2719 2720 if (skb_shinfo(skb)->frag_list) { 2721 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2722 2723 for (; list; list = list->next) { 2724 int end; 2725 2726 WARN_ON(start > offset + len); 2727 2728 end = start + list->len; 2729 if ((copy = end - offset) > 0) { 2730 if (copy > len) 2731 copy = len; 2732 elt += __skb_to_sgvec(list, sg+elt, offset - start, 2733 copy); 2734 if ((len -= copy) == 0) 2735 return elt; 2736 offset += copy; 2737 } 2738 start = end; 2739 } 2740 } 2741 BUG_ON(len); 2742 return elt; 2743 } 2744 2745 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2746 { 2747 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2748 2749 sg_mark_end(&sg[nsg - 1]); 2750 2751 return nsg; 2752 } 2753 2754 /** 2755 * skb_cow_data - Check that a socket buffer's data buffers are writable 2756 * @skb: The socket buffer to check. 2757 * @tailbits: Amount of trailing space to be added 2758 * @trailer: Returned pointer to the skb where the @tailbits space begins 2759 * 2760 * Make sure that the data buffers attached to a socket buffer are 2761 * writable. If they are not, private copies are made of the data buffers 2762 * and the socket buffer is set to use these instead. 2763 * 2764 * If @tailbits is given, make sure that there is space to write @tailbits 2765 * bytes of data beyond current end of socket buffer. @trailer will be 2766 * set to point to the skb in which this space begins. 2767 * 2768 * The number of scatterlist elements required to completely map the 2769 * COW'd and extended socket buffer will be returned. 2770 */ 2771 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2772 { 2773 int copyflag; 2774 int elt; 2775 struct sk_buff *skb1, **skb_p; 2776 2777 /* If skb is cloned or its head is paged, reallocate 2778 * head pulling out all the pages (pages are considered not writable 2779 * at the moment even if they are anonymous). 2780 */ 2781 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2782 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2783 return -ENOMEM; 2784 2785 /* Easy case. Most of packets will go this way. */ 2786 if (!skb_shinfo(skb)->frag_list) { 2787 /* A little of trouble, not enough of space for trailer. 2788 * This should not happen, when stack is tuned to generate 2789 * good frames. OK, on miss we reallocate and reserve even more 2790 * space, 128 bytes is fair. */ 2791 2792 if (skb_tailroom(skb) < tailbits && 2793 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2794 return -ENOMEM; 2795 2796 /* Voila! */ 2797 *trailer = skb; 2798 return 1; 2799 } 2800 2801 /* Misery. We are in troubles, going to mincer fragments... */ 2802 2803 elt = 1; 2804 skb_p = &skb_shinfo(skb)->frag_list; 2805 copyflag = 0; 2806 2807 while ((skb1 = *skb_p) != NULL) { 2808 int ntail = 0; 2809 2810 /* The fragment is partially pulled by someone, 2811 * this can happen on input. Copy it and everything 2812 * after it. */ 2813 2814 if (skb_shared(skb1)) 2815 copyflag = 1; 2816 2817 /* If the skb is the last, worry about trailer. */ 2818 2819 if (skb1->next == NULL && tailbits) { 2820 if (skb_shinfo(skb1)->nr_frags || 2821 skb_shinfo(skb1)->frag_list || 2822 skb_tailroom(skb1) < tailbits) 2823 ntail = tailbits + 128; 2824 } 2825 2826 if (copyflag || 2827 skb_cloned(skb1) || 2828 ntail || 2829 skb_shinfo(skb1)->nr_frags || 2830 skb_shinfo(skb1)->frag_list) { 2831 struct sk_buff *skb2; 2832 2833 /* Fuck, we are miserable poor guys... */ 2834 if (ntail == 0) 2835 skb2 = skb_copy(skb1, GFP_ATOMIC); 2836 else 2837 skb2 = skb_copy_expand(skb1, 2838 skb_headroom(skb1), 2839 ntail, 2840 GFP_ATOMIC); 2841 if (unlikely(skb2 == NULL)) 2842 return -ENOMEM; 2843 2844 if (skb1->sk) 2845 skb_set_owner_w(skb2, skb1->sk); 2846 2847 /* Looking around. Are we still alive? 2848 * OK, link new skb, drop old one */ 2849 2850 skb2->next = skb1->next; 2851 *skb_p = skb2; 2852 kfree_skb(skb1); 2853 skb1 = skb2; 2854 } 2855 elt++; 2856 *trailer = skb1; 2857 skb_p = &skb1->next; 2858 } 2859 2860 return elt; 2861 } 2862 2863 /** 2864 * skb_partial_csum_set - set up and verify partial csum values for packet 2865 * @skb: the skb to set 2866 * @start: the number of bytes after skb->data to start checksumming. 2867 * @off: the offset from start to place the checksum. 2868 * 2869 * For untrusted partially-checksummed packets, we need to make sure the values 2870 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 2871 * 2872 * This function checks and sets those values and skb->ip_summed: if this 2873 * returns false you should drop the packet. 2874 */ 2875 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 2876 { 2877 if (unlikely(start > skb->len - 2) || 2878 unlikely((int)start + off > skb->len - 2)) { 2879 if (net_ratelimit()) 2880 printk(KERN_WARNING 2881 "bad partial csum: csum=%u/%u len=%u\n", 2882 start, off, skb->len); 2883 return false; 2884 } 2885 skb->ip_summed = CHECKSUM_PARTIAL; 2886 skb->csum_start = skb_headroom(skb) + start; 2887 skb->csum_offset = off; 2888 return true; 2889 } 2890 2891 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 2892 { 2893 if (net_ratelimit()) 2894 pr_warning("%s: received packets cannot be forwarded" 2895 " while LRO is enabled\n", skb->dev->name); 2896 } 2897 2898 EXPORT_SYMBOL(___pskb_trim); 2899 EXPORT_SYMBOL(__kfree_skb); 2900 EXPORT_SYMBOL(kfree_skb); 2901 EXPORT_SYMBOL(__pskb_pull_tail); 2902 EXPORT_SYMBOL(__alloc_skb); 2903 EXPORT_SYMBOL(__netdev_alloc_skb); 2904 EXPORT_SYMBOL(pskb_copy); 2905 EXPORT_SYMBOL(pskb_expand_head); 2906 EXPORT_SYMBOL(skb_checksum); 2907 EXPORT_SYMBOL(skb_clone); 2908 EXPORT_SYMBOL(skb_copy); 2909 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2910 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2911 EXPORT_SYMBOL(skb_copy_bits); 2912 EXPORT_SYMBOL(skb_copy_expand); 2913 EXPORT_SYMBOL(skb_over_panic); 2914 EXPORT_SYMBOL(skb_pad); 2915 EXPORT_SYMBOL(skb_realloc_headroom); 2916 EXPORT_SYMBOL(skb_under_panic); 2917 EXPORT_SYMBOL(skb_dequeue); 2918 EXPORT_SYMBOL(skb_dequeue_tail); 2919 EXPORT_SYMBOL(skb_insert); 2920 EXPORT_SYMBOL(skb_queue_purge); 2921 EXPORT_SYMBOL(skb_queue_head); 2922 EXPORT_SYMBOL(skb_queue_tail); 2923 EXPORT_SYMBOL(skb_unlink); 2924 EXPORT_SYMBOL(skb_append); 2925 EXPORT_SYMBOL(skb_split); 2926 EXPORT_SYMBOL(skb_prepare_seq_read); 2927 EXPORT_SYMBOL(skb_seq_read); 2928 EXPORT_SYMBOL(skb_abort_seq_read); 2929 EXPORT_SYMBOL(skb_find_text); 2930 EXPORT_SYMBOL(skb_append_datato_frags); 2931 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 2932 2933 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2934 EXPORT_SYMBOL_GPL(skb_cow_data); 2935 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 2936