1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ 8 * 9 * Fixes: 10 * Alan Cox : Fixed the worst of the load 11 * balancer bugs. 12 * Dave Platt : Interrupt stacking fix. 13 * Richard Kooijman : Timestamp fixes. 14 * Alan Cox : Changed buffer format. 15 * Alan Cox : destructor hook for AF_UNIX etc. 16 * Linus Torvalds : Better skb_clone. 17 * Alan Cox : Added skb_copy. 18 * Alan Cox : Added all the changed routines Linus 19 * only put in the headers 20 * Ray VanTassle : Fixed --skb->lock in free 21 * Alan Cox : skb_copy copy arp field 22 * Andi Kleen : slabified it. 23 * Robert Olsson : Removed skb_head_pool 24 * 25 * NOTE: 26 * The __skb_ routines should be called with interrupts 27 * disabled, or you better be *real* sure that the operation is atomic 28 * with respect to whatever list is being frobbed (e.g. via lock_sock() 29 * or via disabling bottom half handlers, etc). 30 * 31 * This program is free software; you can redistribute it and/or 32 * modify it under the terms of the GNU General Public License 33 * as published by the Free Software Foundation; either version 34 * 2 of the License, or (at your option) any later version. 35 */ 36 37 /* 38 * The functions in this file will not compile correctly with gcc 2.4.x 39 */ 40 41 #include <linux/config.h> 42 #include <linux/module.h> 43 #include <linux/types.h> 44 #include <linux/kernel.h> 45 #include <linux/sched.h> 46 #include <linux/mm.h> 47 #include <linux/interrupt.h> 48 #include <linux/in.h> 49 #include <linux/inet.h> 50 #include <linux/slab.h> 51 #include <linux/netdevice.h> 52 #ifdef CONFIG_NET_CLS_ACT 53 #include <net/pkt_sched.h> 54 #endif 55 #include <linux/string.h> 56 #include <linux/skbuff.h> 57 #include <linux/cache.h> 58 #include <linux/rtnetlink.h> 59 #include <linux/init.h> 60 #include <linux/highmem.h> 61 62 #include <net/protocol.h> 63 #include <net/dst.h> 64 #include <net/sock.h> 65 #include <net/checksum.h> 66 #include <net/xfrm.h> 67 68 #include <asm/uaccess.h> 69 #include <asm/system.h> 70 71 static kmem_cache_t *skbuff_head_cache __read_mostly; 72 static kmem_cache_t *skbuff_fclone_cache __read_mostly; 73 74 /* 75 * Keep out-of-line to prevent kernel bloat. 76 * __builtin_return_address is not used because it is not always 77 * reliable. 78 */ 79 80 /** 81 * skb_over_panic - private function 82 * @skb: buffer 83 * @sz: size 84 * @here: address 85 * 86 * Out of line support code for skb_put(). Not user callable. 87 */ 88 void skb_over_panic(struct sk_buff *skb, int sz, void *here) 89 { 90 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 91 "data:%p tail:%p end:%p dev:%s\n", 92 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 93 skb->dev ? skb->dev->name : "<NULL>"); 94 BUG(); 95 } 96 97 /** 98 * skb_under_panic - private function 99 * @skb: buffer 100 * @sz: size 101 * @here: address 102 * 103 * Out of line support code for skb_push(). Not user callable. 104 */ 105 106 void skb_under_panic(struct sk_buff *skb, int sz, void *here) 107 { 108 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 109 "data:%p tail:%p end:%p dev:%s\n", 110 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 111 skb->dev ? skb->dev->name : "<NULL>"); 112 BUG(); 113 } 114 115 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 116 * 'private' fields and also do memory statistics to find all the 117 * [BEEP] leaks. 118 * 119 */ 120 121 /** 122 * __alloc_skb - allocate a network buffer 123 * @size: size to allocate 124 * @gfp_mask: allocation mask 125 * @fclone: allocate from fclone cache instead of head cache 126 * and allocate a cloned (child) skb 127 * 128 * Allocate a new &sk_buff. The returned buffer has no headroom and a 129 * tail room of size bytes. The object has a reference count of one. 130 * The return is the buffer. On a failure the return is %NULL. 131 * 132 * Buffers may only be allocated from interrupts using a @gfp_mask of 133 * %GFP_ATOMIC. 134 */ 135 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 136 int fclone) 137 { 138 struct skb_shared_info *shinfo; 139 struct sk_buff *skb; 140 u8 *data; 141 142 /* Get the HEAD */ 143 skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache, 144 gfp_mask & ~__GFP_DMA); 145 if (!skb) 146 goto out; 147 148 /* Get the DATA. Size must match skb_add_mtu(). */ 149 size = SKB_DATA_ALIGN(size); 150 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 151 if (!data) 152 goto nodata; 153 154 memset(skb, 0, offsetof(struct sk_buff, truesize)); 155 skb->truesize = size + sizeof(struct sk_buff); 156 atomic_set(&skb->users, 1); 157 skb->head = data; 158 skb->data = data; 159 skb->tail = data; 160 skb->end = data + size; 161 /* make sure we initialize shinfo sequentially */ 162 shinfo = skb_shinfo(skb); 163 atomic_set(&shinfo->dataref, 1); 164 shinfo->nr_frags = 0; 165 shinfo->tso_size = 0; 166 shinfo->tso_segs = 0; 167 shinfo->ufo_size = 0; 168 shinfo->ip6_frag_id = 0; 169 shinfo->frag_list = NULL; 170 171 if (fclone) { 172 struct sk_buff *child = skb + 1; 173 atomic_t *fclone_ref = (atomic_t *) (child + 1); 174 175 skb->fclone = SKB_FCLONE_ORIG; 176 atomic_set(fclone_ref, 1); 177 178 child->fclone = SKB_FCLONE_UNAVAILABLE; 179 } 180 out: 181 return skb; 182 nodata: 183 kmem_cache_free(skbuff_head_cache, skb); 184 skb = NULL; 185 goto out; 186 } 187 188 /** 189 * alloc_skb_from_cache - allocate a network buffer 190 * @cp: kmem_cache from which to allocate the data area 191 * (object size must be big enough for @size bytes + skb overheads) 192 * @size: size to allocate 193 * @gfp_mask: allocation mask 194 * 195 * Allocate a new &sk_buff. The returned buffer has no headroom and 196 * tail room of size bytes. The object has a reference count of one. 197 * The return is the buffer. On a failure the return is %NULL. 198 * 199 * Buffers may only be allocated from interrupts using a @gfp_mask of 200 * %GFP_ATOMIC. 201 */ 202 struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 203 unsigned int size, 204 gfp_t gfp_mask) 205 { 206 struct sk_buff *skb; 207 u8 *data; 208 209 /* Get the HEAD */ 210 skb = kmem_cache_alloc(skbuff_head_cache, 211 gfp_mask & ~__GFP_DMA); 212 if (!skb) 213 goto out; 214 215 /* Get the DATA. */ 216 size = SKB_DATA_ALIGN(size); 217 data = kmem_cache_alloc(cp, gfp_mask); 218 if (!data) 219 goto nodata; 220 221 memset(skb, 0, offsetof(struct sk_buff, truesize)); 222 skb->truesize = size + sizeof(struct sk_buff); 223 atomic_set(&skb->users, 1); 224 skb->head = data; 225 skb->data = data; 226 skb->tail = data; 227 skb->end = data + size; 228 229 atomic_set(&(skb_shinfo(skb)->dataref), 1); 230 skb_shinfo(skb)->nr_frags = 0; 231 skb_shinfo(skb)->tso_size = 0; 232 skb_shinfo(skb)->tso_segs = 0; 233 skb_shinfo(skb)->frag_list = NULL; 234 out: 235 return skb; 236 nodata: 237 kmem_cache_free(skbuff_head_cache, skb); 238 skb = NULL; 239 goto out; 240 } 241 242 243 static void skb_drop_fraglist(struct sk_buff *skb) 244 { 245 struct sk_buff *list = skb_shinfo(skb)->frag_list; 246 247 skb_shinfo(skb)->frag_list = NULL; 248 249 do { 250 struct sk_buff *this = list; 251 list = list->next; 252 kfree_skb(this); 253 } while (list); 254 } 255 256 static void skb_clone_fraglist(struct sk_buff *skb) 257 { 258 struct sk_buff *list; 259 260 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 261 skb_get(list); 262 } 263 264 void skb_release_data(struct sk_buff *skb) 265 { 266 if (!skb->cloned || 267 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 268 &skb_shinfo(skb)->dataref)) { 269 if (skb_shinfo(skb)->nr_frags) { 270 int i; 271 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 272 put_page(skb_shinfo(skb)->frags[i].page); 273 } 274 275 if (skb_shinfo(skb)->frag_list) 276 skb_drop_fraglist(skb); 277 278 kfree(skb->head); 279 } 280 } 281 282 /* 283 * Free an skbuff by memory without cleaning the state. 284 */ 285 void kfree_skbmem(struct sk_buff *skb) 286 { 287 struct sk_buff *other; 288 atomic_t *fclone_ref; 289 290 skb_release_data(skb); 291 switch (skb->fclone) { 292 case SKB_FCLONE_UNAVAILABLE: 293 kmem_cache_free(skbuff_head_cache, skb); 294 break; 295 296 case SKB_FCLONE_ORIG: 297 fclone_ref = (atomic_t *) (skb + 2); 298 if (atomic_dec_and_test(fclone_ref)) 299 kmem_cache_free(skbuff_fclone_cache, skb); 300 break; 301 302 case SKB_FCLONE_CLONE: 303 fclone_ref = (atomic_t *) (skb + 1); 304 other = skb - 1; 305 306 /* The clone portion is available for 307 * fast-cloning again. 308 */ 309 skb->fclone = SKB_FCLONE_UNAVAILABLE; 310 311 if (atomic_dec_and_test(fclone_ref)) 312 kmem_cache_free(skbuff_fclone_cache, other); 313 break; 314 }; 315 } 316 317 /** 318 * __kfree_skb - private function 319 * @skb: buffer 320 * 321 * Free an sk_buff. Release anything attached to the buffer. 322 * Clean the state. This is an internal helper function. Users should 323 * always call kfree_skb 324 */ 325 326 void __kfree_skb(struct sk_buff *skb) 327 { 328 dst_release(skb->dst); 329 #ifdef CONFIG_XFRM 330 secpath_put(skb->sp); 331 #endif 332 if (skb->destructor) { 333 WARN_ON(in_irq()); 334 skb->destructor(skb); 335 } 336 #ifdef CONFIG_NETFILTER 337 nf_conntrack_put(skb->nfct); 338 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 339 nf_conntrack_put_reasm(skb->nfct_reasm); 340 #endif 341 #ifdef CONFIG_BRIDGE_NETFILTER 342 nf_bridge_put(skb->nf_bridge); 343 #endif 344 #endif 345 /* XXX: IS this still necessary? - JHS */ 346 #ifdef CONFIG_NET_SCHED 347 skb->tc_index = 0; 348 #ifdef CONFIG_NET_CLS_ACT 349 skb->tc_verd = 0; 350 #endif 351 #endif 352 353 kfree_skbmem(skb); 354 } 355 356 /** 357 * skb_clone - duplicate an sk_buff 358 * @skb: buffer to clone 359 * @gfp_mask: allocation priority 360 * 361 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 362 * copies share the same packet data but not structure. The new 363 * buffer has a reference count of 1. If the allocation fails the 364 * function returns %NULL otherwise the new buffer is returned. 365 * 366 * If this function is called from an interrupt gfp_mask() must be 367 * %GFP_ATOMIC. 368 */ 369 370 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 371 { 372 struct sk_buff *n; 373 374 n = skb + 1; 375 if (skb->fclone == SKB_FCLONE_ORIG && 376 n->fclone == SKB_FCLONE_UNAVAILABLE) { 377 atomic_t *fclone_ref = (atomic_t *) (n + 1); 378 n->fclone = SKB_FCLONE_CLONE; 379 atomic_inc(fclone_ref); 380 } else { 381 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 382 if (!n) 383 return NULL; 384 n->fclone = SKB_FCLONE_UNAVAILABLE; 385 } 386 387 #define C(x) n->x = skb->x 388 389 n->next = n->prev = NULL; 390 n->sk = NULL; 391 C(tstamp); 392 C(dev); 393 C(h); 394 C(nh); 395 C(mac); 396 C(dst); 397 dst_clone(skb->dst); 398 C(sp); 399 #ifdef CONFIG_INET 400 secpath_get(skb->sp); 401 #endif 402 memcpy(n->cb, skb->cb, sizeof(skb->cb)); 403 C(len); 404 C(data_len); 405 C(csum); 406 C(local_df); 407 n->cloned = 1; 408 n->nohdr = 0; 409 C(pkt_type); 410 C(ip_summed); 411 C(priority); 412 C(protocol); 413 n->destructor = NULL; 414 #ifdef CONFIG_NETFILTER 415 C(nfmark); 416 C(nfct); 417 nf_conntrack_get(skb->nfct); 418 C(nfctinfo); 419 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 420 C(nfct_reasm); 421 nf_conntrack_get_reasm(skb->nfct_reasm); 422 #endif 423 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 424 C(ipvs_property); 425 #endif 426 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 427 C(nfct_reasm); 428 nf_conntrack_get_reasm(skb->nfct_reasm); 429 #endif 430 #ifdef CONFIG_BRIDGE_NETFILTER 431 C(nf_bridge); 432 nf_bridge_get(skb->nf_bridge); 433 #endif 434 #endif /*CONFIG_NETFILTER*/ 435 #ifdef CONFIG_NET_SCHED 436 C(tc_index); 437 #ifdef CONFIG_NET_CLS_ACT 438 n->tc_verd = SET_TC_VERD(skb->tc_verd,0); 439 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 440 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 441 C(input_dev); 442 #endif 443 444 #endif 445 C(truesize); 446 atomic_set(&n->users, 1); 447 C(head); 448 C(data); 449 C(tail); 450 C(end); 451 452 atomic_inc(&(skb_shinfo(skb)->dataref)); 453 skb->cloned = 1; 454 455 return n; 456 } 457 458 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 459 { 460 /* 461 * Shift between the two data areas in bytes 462 */ 463 unsigned long offset = new->data - old->data; 464 465 new->sk = NULL; 466 new->dev = old->dev; 467 new->priority = old->priority; 468 new->protocol = old->protocol; 469 new->dst = dst_clone(old->dst); 470 #ifdef CONFIG_INET 471 new->sp = secpath_get(old->sp); 472 #endif 473 new->h.raw = old->h.raw + offset; 474 new->nh.raw = old->nh.raw + offset; 475 new->mac.raw = old->mac.raw + offset; 476 memcpy(new->cb, old->cb, sizeof(old->cb)); 477 new->local_df = old->local_df; 478 new->fclone = SKB_FCLONE_UNAVAILABLE; 479 new->pkt_type = old->pkt_type; 480 new->tstamp = old->tstamp; 481 new->destructor = NULL; 482 #ifdef CONFIG_NETFILTER 483 new->nfmark = old->nfmark; 484 new->nfct = old->nfct; 485 nf_conntrack_get(old->nfct); 486 new->nfctinfo = old->nfctinfo; 487 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 488 new->nfct_reasm = old->nfct_reasm; 489 nf_conntrack_get_reasm(old->nfct_reasm); 490 #endif 491 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 492 new->ipvs_property = old->ipvs_property; 493 #endif 494 #ifdef CONFIG_BRIDGE_NETFILTER 495 new->nf_bridge = old->nf_bridge; 496 nf_bridge_get(old->nf_bridge); 497 #endif 498 #endif 499 #ifdef CONFIG_NET_SCHED 500 #ifdef CONFIG_NET_CLS_ACT 501 new->tc_verd = old->tc_verd; 502 #endif 503 new->tc_index = old->tc_index; 504 #endif 505 atomic_set(&new->users, 1); 506 skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; 507 skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; 508 } 509 510 /** 511 * skb_copy - create private copy of an sk_buff 512 * @skb: buffer to copy 513 * @gfp_mask: allocation priority 514 * 515 * Make a copy of both an &sk_buff and its data. This is used when the 516 * caller wishes to modify the data and needs a private copy of the 517 * data to alter. Returns %NULL on failure or the pointer to the buffer 518 * on success. The returned buffer has a reference count of 1. 519 * 520 * As by-product this function converts non-linear &sk_buff to linear 521 * one, so that &sk_buff becomes completely private and caller is allowed 522 * to modify all the data of returned buffer. This means that this 523 * function is not recommended for use in circumstances when only 524 * header is going to be modified. Use pskb_copy() instead. 525 */ 526 527 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 528 { 529 int headerlen = skb->data - skb->head; 530 /* 531 * Allocate the copy buffer 532 */ 533 struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, 534 gfp_mask); 535 if (!n) 536 return NULL; 537 538 /* Set the data pointer */ 539 skb_reserve(n, headerlen); 540 /* Set the tail pointer and length */ 541 skb_put(n, skb->len); 542 n->csum = skb->csum; 543 n->ip_summed = skb->ip_summed; 544 545 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 546 BUG(); 547 548 copy_skb_header(n, skb); 549 return n; 550 } 551 552 553 /** 554 * pskb_copy - create copy of an sk_buff with private head. 555 * @skb: buffer to copy 556 * @gfp_mask: allocation priority 557 * 558 * Make a copy of both an &sk_buff and part of its data, located 559 * in header. Fragmented data remain shared. This is used when 560 * the caller wishes to modify only header of &sk_buff and needs 561 * private copy of the header to alter. Returns %NULL on failure 562 * or the pointer to the buffer on success. 563 * The returned buffer has a reference count of 1. 564 */ 565 566 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 567 { 568 /* 569 * Allocate the copy buffer 570 */ 571 struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); 572 573 if (!n) 574 goto out; 575 576 /* Set the data pointer */ 577 skb_reserve(n, skb->data - skb->head); 578 /* Set the tail pointer and length */ 579 skb_put(n, skb_headlen(skb)); 580 /* Copy the bytes */ 581 memcpy(n->data, skb->data, n->len); 582 n->csum = skb->csum; 583 n->ip_summed = skb->ip_summed; 584 585 n->data_len = skb->data_len; 586 n->len = skb->len; 587 588 if (skb_shinfo(skb)->nr_frags) { 589 int i; 590 591 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 592 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 593 get_page(skb_shinfo(n)->frags[i].page); 594 } 595 skb_shinfo(n)->nr_frags = i; 596 } 597 598 if (skb_shinfo(skb)->frag_list) { 599 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 600 skb_clone_fraglist(n); 601 } 602 603 copy_skb_header(n, skb); 604 out: 605 return n; 606 } 607 608 /** 609 * pskb_expand_head - reallocate header of &sk_buff 610 * @skb: buffer to reallocate 611 * @nhead: room to add at head 612 * @ntail: room to add at tail 613 * @gfp_mask: allocation priority 614 * 615 * Expands (or creates identical copy, if &nhead and &ntail are zero) 616 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 617 * reference count of 1. Returns zero in the case of success or error, 618 * if expansion failed. In the last case, &sk_buff is not changed. 619 * 620 * All the pointers pointing into skb header may change and must be 621 * reloaded after call to this function. 622 */ 623 624 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 625 gfp_t gfp_mask) 626 { 627 int i; 628 u8 *data; 629 int size = nhead + (skb->end - skb->head) + ntail; 630 long off; 631 632 if (skb_shared(skb)) 633 BUG(); 634 635 size = SKB_DATA_ALIGN(size); 636 637 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 638 if (!data) 639 goto nodata; 640 641 /* Copy only real data... and, alas, header. This should be 642 * optimized for the cases when header is void. */ 643 memcpy(data + nhead, skb->head, skb->tail - skb->head); 644 memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); 645 646 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 647 get_page(skb_shinfo(skb)->frags[i].page); 648 649 if (skb_shinfo(skb)->frag_list) 650 skb_clone_fraglist(skb); 651 652 skb_release_data(skb); 653 654 off = (data + nhead) - skb->head; 655 656 skb->head = data; 657 skb->end = data + size; 658 skb->data += off; 659 skb->tail += off; 660 skb->mac.raw += off; 661 skb->h.raw += off; 662 skb->nh.raw += off; 663 skb->cloned = 0; 664 skb->nohdr = 0; 665 atomic_set(&skb_shinfo(skb)->dataref, 1); 666 return 0; 667 668 nodata: 669 return -ENOMEM; 670 } 671 672 /* Make private copy of skb with writable head and some headroom */ 673 674 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 675 { 676 struct sk_buff *skb2; 677 int delta = headroom - skb_headroom(skb); 678 679 if (delta <= 0) 680 skb2 = pskb_copy(skb, GFP_ATOMIC); 681 else { 682 skb2 = skb_clone(skb, GFP_ATOMIC); 683 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 684 GFP_ATOMIC)) { 685 kfree_skb(skb2); 686 skb2 = NULL; 687 } 688 } 689 return skb2; 690 } 691 692 693 /** 694 * skb_copy_expand - copy and expand sk_buff 695 * @skb: buffer to copy 696 * @newheadroom: new free bytes at head 697 * @newtailroom: new free bytes at tail 698 * @gfp_mask: allocation priority 699 * 700 * Make a copy of both an &sk_buff and its data and while doing so 701 * allocate additional space. 702 * 703 * This is used when the caller wishes to modify the data and needs a 704 * private copy of the data to alter as well as more space for new fields. 705 * Returns %NULL on failure or the pointer to the buffer 706 * on success. The returned buffer has a reference count of 1. 707 * 708 * You must pass %GFP_ATOMIC as the allocation priority if this function 709 * is called from an interrupt. 710 * 711 * BUG ALERT: ip_summed is not copied. Why does this work? Is it used 712 * only by netfilter in the cases when checksum is recalculated? --ANK 713 */ 714 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 715 int newheadroom, int newtailroom, 716 gfp_t gfp_mask) 717 { 718 /* 719 * Allocate the copy buffer 720 */ 721 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 722 gfp_mask); 723 int head_copy_len, head_copy_off; 724 725 if (!n) 726 return NULL; 727 728 skb_reserve(n, newheadroom); 729 730 /* Set the tail pointer and length */ 731 skb_put(n, skb->len); 732 733 head_copy_len = skb_headroom(skb); 734 head_copy_off = 0; 735 if (newheadroom <= head_copy_len) 736 head_copy_len = newheadroom; 737 else 738 head_copy_off = newheadroom - head_copy_len; 739 740 /* Copy the linear header and data. */ 741 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 742 skb->len + head_copy_len)) 743 BUG(); 744 745 copy_skb_header(n, skb); 746 747 return n; 748 } 749 750 /** 751 * skb_pad - zero pad the tail of an skb 752 * @skb: buffer to pad 753 * @pad: space to pad 754 * 755 * Ensure that a buffer is followed by a padding area that is zero 756 * filled. Used by network drivers which may DMA or transfer data 757 * beyond the buffer end onto the wire. 758 * 759 * May return NULL in out of memory cases. 760 */ 761 762 struct sk_buff *skb_pad(struct sk_buff *skb, int pad) 763 { 764 struct sk_buff *nskb; 765 766 /* If the skbuff is non linear tailroom is always zero.. */ 767 if (skb_tailroom(skb) >= pad) { 768 memset(skb->data+skb->len, 0, pad); 769 return skb; 770 } 771 772 nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); 773 kfree_skb(skb); 774 if (nskb) 775 memset(nskb->data+nskb->len, 0, pad); 776 return nskb; 777 } 778 779 /* Trims skb to length len. It can change skb pointers, if "realloc" is 1. 780 * If realloc==0 and trimming is impossible without change of data, 781 * it is BUG(). 782 */ 783 784 int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) 785 { 786 int offset = skb_headlen(skb); 787 int nfrags = skb_shinfo(skb)->nr_frags; 788 int i; 789 790 for (i = 0; i < nfrags; i++) { 791 int end = offset + skb_shinfo(skb)->frags[i].size; 792 if (end > len) { 793 if (skb_cloned(skb)) { 794 BUG_ON(!realloc); 795 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 796 return -ENOMEM; 797 } 798 if (len <= offset) { 799 put_page(skb_shinfo(skb)->frags[i].page); 800 skb_shinfo(skb)->nr_frags--; 801 } else { 802 skb_shinfo(skb)->frags[i].size = len - offset; 803 } 804 } 805 offset = end; 806 } 807 808 if (offset < len) { 809 skb->data_len -= skb->len - len; 810 skb->len = len; 811 } else { 812 if (len <= skb_headlen(skb)) { 813 skb->len = len; 814 skb->data_len = 0; 815 skb->tail = skb->data + len; 816 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) 817 skb_drop_fraglist(skb); 818 } else { 819 skb->data_len -= skb->len - len; 820 skb->len = len; 821 } 822 } 823 824 return 0; 825 } 826 827 /** 828 * __pskb_pull_tail - advance tail of skb header 829 * @skb: buffer to reallocate 830 * @delta: number of bytes to advance tail 831 * 832 * The function makes a sense only on a fragmented &sk_buff, 833 * it expands header moving its tail forward and copying necessary 834 * data from fragmented part. 835 * 836 * &sk_buff MUST have reference count of 1. 837 * 838 * Returns %NULL (and &sk_buff does not change) if pull failed 839 * or value of new tail of skb in the case of success. 840 * 841 * All the pointers pointing into skb header may change and must be 842 * reloaded after call to this function. 843 */ 844 845 /* Moves tail of skb head forward, copying data from fragmented part, 846 * when it is necessary. 847 * 1. It may fail due to malloc failure. 848 * 2. It may change skb pointers. 849 * 850 * It is pretty complicated. Luckily, it is called only in exceptional cases. 851 */ 852 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 853 { 854 /* If skb has not enough free space at tail, get new one 855 * plus 128 bytes for future expansions. If we have enough 856 * room at tail, reallocate without expansion only if skb is cloned. 857 */ 858 int i, k, eat = (skb->tail + delta) - skb->end; 859 860 if (eat > 0 || skb_cloned(skb)) { 861 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 862 GFP_ATOMIC)) 863 return NULL; 864 } 865 866 if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) 867 BUG(); 868 869 /* Optimization: no fragments, no reasons to preestimate 870 * size of pulled pages. Superb. 871 */ 872 if (!skb_shinfo(skb)->frag_list) 873 goto pull_pages; 874 875 /* Estimate size of pulled pages. */ 876 eat = delta; 877 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 878 if (skb_shinfo(skb)->frags[i].size >= eat) 879 goto pull_pages; 880 eat -= skb_shinfo(skb)->frags[i].size; 881 } 882 883 /* If we need update frag list, we are in troubles. 884 * Certainly, it possible to add an offset to skb data, 885 * but taking into account that pulling is expected to 886 * be very rare operation, it is worth to fight against 887 * further bloating skb head and crucify ourselves here instead. 888 * Pure masohism, indeed. 8)8) 889 */ 890 if (eat) { 891 struct sk_buff *list = skb_shinfo(skb)->frag_list; 892 struct sk_buff *clone = NULL; 893 struct sk_buff *insp = NULL; 894 895 do { 896 BUG_ON(!list); 897 898 if (list->len <= eat) { 899 /* Eaten as whole. */ 900 eat -= list->len; 901 list = list->next; 902 insp = list; 903 } else { 904 /* Eaten partially. */ 905 906 if (skb_shared(list)) { 907 /* Sucks! We need to fork list. :-( */ 908 clone = skb_clone(list, GFP_ATOMIC); 909 if (!clone) 910 return NULL; 911 insp = list->next; 912 list = clone; 913 } else { 914 /* This may be pulled without 915 * problems. */ 916 insp = list; 917 } 918 if (!pskb_pull(list, eat)) { 919 if (clone) 920 kfree_skb(clone); 921 return NULL; 922 } 923 break; 924 } 925 } while (eat); 926 927 /* Free pulled out fragments. */ 928 while ((list = skb_shinfo(skb)->frag_list) != insp) { 929 skb_shinfo(skb)->frag_list = list->next; 930 kfree_skb(list); 931 } 932 /* And insert new clone at head. */ 933 if (clone) { 934 clone->next = list; 935 skb_shinfo(skb)->frag_list = clone; 936 } 937 } 938 /* Success! Now we may commit changes to skb data. */ 939 940 pull_pages: 941 eat = delta; 942 k = 0; 943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 944 if (skb_shinfo(skb)->frags[i].size <= eat) { 945 put_page(skb_shinfo(skb)->frags[i].page); 946 eat -= skb_shinfo(skb)->frags[i].size; 947 } else { 948 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 949 if (eat) { 950 skb_shinfo(skb)->frags[k].page_offset += eat; 951 skb_shinfo(skb)->frags[k].size -= eat; 952 eat = 0; 953 } 954 k++; 955 } 956 } 957 skb_shinfo(skb)->nr_frags = k; 958 959 skb->tail += delta; 960 skb->data_len -= delta; 961 962 return skb->tail; 963 } 964 965 /* Copy some data bits from skb to kernel buffer. */ 966 967 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 968 { 969 int i, copy; 970 int start = skb_headlen(skb); 971 972 if (offset > (int)skb->len - len) 973 goto fault; 974 975 /* Copy header. */ 976 if ((copy = start - offset) > 0) { 977 if (copy > len) 978 copy = len; 979 memcpy(to, skb->data + offset, copy); 980 if ((len -= copy) == 0) 981 return 0; 982 offset += copy; 983 to += copy; 984 } 985 986 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 987 int end; 988 989 BUG_TRAP(start <= offset + len); 990 991 end = start + skb_shinfo(skb)->frags[i].size; 992 if ((copy = end - offset) > 0) { 993 u8 *vaddr; 994 995 if (copy > len) 996 copy = len; 997 998 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 999 memcpy(to, 1000 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1001 offset - start, copy); 1002 kunmap_skb_frag(vaddr); 1003 1004 if ((len -= copy) == 0) 1005 return 0; 1006 offset += copy; 1007 to += copy; 1008 } 1009 start = end; 1010 } 1011 1012 if (skb_shinfo(skb)->frag_list) { 1013 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1014 1015 for (; list; list = list->next) { 1016 int end; 1017 1018 BUG_TRAP(start <= offset + len); 1019 1020 end = start + list->len; 1021 if ((copy = end - offset) > 0) { 1022 if (copy > len) 1023 copy = len; 1024 if (skb_copy_bits(list, offset - start, 1025 to, copy)) 1026 goto fault; 1027 if ((len -= copy) == 0) 1028 return 0; 1029 offset += copy; 1030 to += copy; 1031 } 1032 start = end; 1033 } 1034 } 1035 if (!len) 1036 return 0; 1037 1038 fault: 1039 return -EFAULT; 1040 } 1041 1042 /** 1043 * skb_store_bits - store bits from kernel buffer to skb 1044 * @skb: destination buffer 1045 * @offset: offset in destination 1046 * @from: source buffer 1047 * @len: number of bytes to copy 1048 * 1049 * Copy the specified number of bytes from the source buffer to the 1050 * destination skb. This function handles all the messy bits of 1051 * traversing fragment lists and such. 1052 */ 1053 1054 int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len) 1055 { 1056 int i, copy; 1057 int start = skb_headlen(skb); 1058 1059 if (offset > (int)skb->len - len) 1060 goto fault; 1061 1062 if ((copy = start - offset) > 0) { 1063 if (copy > len) 1064 copy = len; 1065 memcpy(skb->data + offset, from, copy); 1066 if ((len -= copy) == 0) 1067 return 0; 1068 offset += copy; 1069 from += copy; 1070 } 1071 1072 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1073 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1074 int end; 1075 1076 BUG_TRAP(start <= offset + len); 1077 1078 end = start + frag->size; 1079 if ((copy = end - offset) > 0) { 1080 u8 *vaddr; 1081 1082 if (copy > len) 1083 copy = len; 1084 1085 vaddr = kmap_skb_frag(frag); 1086 memcpy(vaddr + frag->page_offset + offset - start, 1087 from, copy); 1088 kunmap_skb_frag(vaddr); 1089 1090 if ((len -= copy) == 0) 1091 return 0; 1092 offset += copy; 1093 from += copy; 1094 } 1095 start = end; 1096 } 1097 1098 if (skb_shinfo(skb)->frag_list) { 1099 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1100 1101 for (; list; list = list->next) { 1102 int end; 1103 1104 BUG_TRAP(start <= offset + len); 1105 1106 end = start + list->len; 1107 if ((copy = end - offset) > 0) { 1108 if (copy > len) 1109 copy = len; 1110 if (skb_store_bits(list, offset - start, 1111 from, copy)) 1112 goto fault; 1113 if ((len -= copy) == 0) 1114 return 0; 1115 offset += copy; 1116 from += copy; 1117 } 1118 start = end; 1119 } 1120 } 1121 if (!len) 1122 return 0; 1123 1124 fault: 1125 return -EFAULT; 1126 } 1127 1128 EXPORT_SYMBOL(skb_store_bits); 1129 1130 /* Checksum skb data. */ 1131 1132 unsigned int skb_checksum(const struct sk_buff *skb, int offset, 1133 int len, unsigned int csum) 1134 { 1135 int start = skb_headlen(skb); 1136 int i, copy = start - offset; 1137 int pos = 0; 1138 1139 /* Checksum header. */ 1140 if (copy > 0) { 1141 if (copy > len) 1142 copy = len; 1143 csum = csum_partial(skb->data + offset, copy, csum); 1144 if ((len -= copy) == 0) 1145 return csum; 1146 offset += copy; 1147 pos = copy; 1148 } 1149 1150 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1151 int end; 1152 1153 BUG_TRAP(start <= offset + len); 1154 1155 end = start + skb_shinfo(skb)->frags[i].size; 1156 if ((copy = end - offset) > 0) { 1157 unsigned int csum2; 1158 u8 *vaddr; 1159 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1160 1161 if (copy > len) 1162 copy = len; 1163 vaddr = kmap_skb_frag(frag); 1164 csum2 = csum_partial(vaddr + frag->page_offset + 1165 offset - start, copy, 0); 1166 kunmap_skb_frag(vaddr); 1167 csum = csum_block_add(csum, csum2, pos); 1168 if (!(len -= copy)) 1169 return csum; 1170 offset += copy; 1171 pos += copy; 1172 } 1173 start = end; 1174 } 1175 1176 if (skb_shinfo(skb)->frag_list) { 1177 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1178 1179 for (; list; list = list->next) { 1180 int end; 1181 1182 BUG_TRAP(start <= offset + len); 1183 1184 end = start + list->len; 1185 if ((copy = end - offset) > 0) { 1186 unsigned int csum2; 1187 if (copy > len) 1188 copy = len; 1189 csum2 = skb_checksum(list, offset - start, 1190 copy, 0); 1191 csum = csum_block_add(csum, csum2, pos); 1192 if ((len -= copy) == 0) 1193 return csum; 1194 offset += copy; 1195 pos += copy; 1196 } 1197 start = end; 1198 } 1199 } 1200 BUG_ON(len); 1201 1202 return csum; 1203 } 1204 1205 /* Both of above in one bottle. */ 1206 1207 unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1208 u8 *to, int len, unsigned int csum) 1209 { 1210 int start = skb_headlen(skb); 1211 int i, copy = start - offset; 1212 int pos = 0; 1213 1214 /* Copy header. */ 1215 if (copy > 0) { 1216 if (copy > len) 1217 copy = len; 1218 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1219 copy, csum); 1220 if ((len -= copy) == 0) 1221 return csum; 1222 offset += copy; 1223 to += copy; 1224 pos = copy; 1225 } 1226 1227 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1228 int end; 1229 1230 BUG_TRAP(start <= offset + len); 1231 1232 end = start + skb_shinfo(skb)->frags[i].size; 1233 if ((copy = end - offset) > 0) { 1234 unsigned int csum2; 1235 u8 *vaddr; 1236 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1237 1238 if (copy > len) 1239 copy = len; 1240 vaddr = kmap_skb_frag(frag); 1241 csum2 = csum_partial_copy_nocheck(vaddr + 1242 frag->page_offset + 1243 offset - start, to, 1244 copy, 0); 1245 kunmap_skb_frag(vaddr); 1246 csum = csum_block_add(csum, csum2, pos); 1247 if (!(len -= copy)) 1248 return csum; 1249 offset += copy; 1250 to += copy; 1251 pos += copy; 1252 } 1253 start = end; 1254 } 1255 1256 if (skb_shinfo(skb)->frag_list) { 1257 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1258 1259 for (; list; list = list->next) { 1260 unsigned int csum2; 1261 int end; 1262 1263 BUG_TRAP(start <= offset + len); 1264 1265 end = start + list->len; 1266 if ((copy = end - offset) > 0) { 1267 if (copy > len) 1268 copy = len; 1269 csum2 = skb_copy_and_csum_bits(list, 1270 offset - start, 1271 to, copy, 0); 1272 csum = csum_block_add(csum, csum2, pos); 1273 if ((len -= copy) == 0) 1274 return csum; 1275 offset += copy; 1276 to += copy; 1277 pos += copy; 1278 } 1279 start = end; 1280 } 1281 } 1282 BUG_ON(len); 1283 return csum; 1284 } 1285 1286 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1287 { 1288 unsigned int csum; 1289 long csstart; 1290 1291 if (skb->ip_summed == CHECKSUM_HW) 1292 csstart = skb->h.raw - skb->data; 1293 else 1294 csstart = skb_headlen(skb); 1295 1296 BUG_ON(csstart > skb_headlen(skb)); 1297 1298 memcpy(to, skb->data, csstart); 1299 1300 csum = 0; 1301 if (csstart != skb->len) 1302 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1303 skb->len - csstart, 0); 1304 1305 if (skb->ip_summed == CHECKSUM_HW) { 1306 long csstuff = csstart + skb->csum; 1307 1308 *((unsigned short *)(to + csstuff)) = csum_fold(csum); 1309 } 1310 } 1311 1312 /** 1313 * skb_dequeue - remove from the head of the queue 1314 * @list: list to dequeue from 1315 * 1316 * Remove the head of the list. The list lock is taken so the function 1317 * may be used safely with other locking list functions. The head item is 1318 * returned or %NULL if the list is empty. 1319 */ 1320 1321 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1322 { 1323 unsigned long flags; 1324 struct sk_buff *result; 1325 1326 spin_lock_irqsave(&list->lock, flags); 1327 result = __skb_dequeue(list); 1328 spin_unlock_irqrestore(&list->lock, flags); 1329 return result; 1330 } 1331 1332 /** 1333 * skb_dequeue_tail - remove from the tail of the queue 1334 * @list: list to dequeue from 1335 * 1336 * Remove the tail of the list. The list lock is taken so the function 1337 * may be used safely with other locking list functions. The tail item is 1338 * returned or %NULL if the list is empty. 1339 */ 1340 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1341 { 1342 unsigned long flags; 1343 struct sk_buff *result; 1344 1345 spin_lock_irqsave(&list->lock, flags); 1346 result = __skb_dequeue_tail(list); 1347 spin_unlock_irqrestore(&list->lock, flags); 1348 return result; 1349 } 1350 1351 /** 1352 * skb_queue_purge - empty a list 1353 * @list: list to empty 1354 * 1355 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1356 * the list and one reference dropped. This function takes the list 1357 * lock and is atomic with respect to other list locking functions. 1358 */ 1359 void skb_queue_purge(struct sk_buff_head *list) 1360 { 1361 struct sk_buff *skb; 1362 while ((skb = skb_dequeue(list)) != NULL) 1363 kfree_skb(skb); 1364 } 1365 1366 /** 1367 * skb_queue_head - queue a buffer at the list head 1368 * @list: list to use 1369 * @newsk: buffer to queue 1370 * 1371 * Queue a buffer at the start of the list. This function takes the 1372 * list lock and can be used safely with other locking &sk_buff functions 1373 * safely. 1374 * 1375 * A buffer cannot be placed on two lists at the same time. 1376 */ 1377 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1378 { 1379 unsigned long flags; 1380 1381 spin_lock_irqsave(&list->lock, flags); 1382 __skb_queue_head(list, newsk); 1383 spin_unlock_irqrestore(&list->lock, flags); 1384 } 1385 1386 /** 1387 * skb_queue_tail - queue a buffer at the list tail 1388 * @list: list to use 1389 * @newsk: buffer to queue 1390 * 1391 * Queue a buffer at the tail of the list. This function takes the 1392 * list lock and can be used safely with other locking &sk_buff functions 1393 * safely. 1394 * 1395 * A buffer cannot be placed on two lists at the same time. 1396 */ 1397 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1398 { 1399 unsigned long flags; 1400 1401 spin_lock_irqsave(&list->lock, flags); 1402 __skb_queue_tail(list, newsk); 1403 spin_unlock_irqrestore(&list->lock, flags); 1404 } 1405 1406 /** 1407 * skb_unlink - remove a buffer from a list 1408 * @skb: buffer to remove 1409 * @list: list to use 1410 * 1411 * Remove a packet from a list. The list locks are taken and this 1412 * function is atomic with respect to other list locked calls 1413 * 1414 * You must know what list the SKB is on. 1415 */ 1416 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1417 { 1418 unsigned long flags; 1419 1420 spin_lock_irqsave(&list->lock, flags); 1421 __skb_unlink(skb, list); 1422 spin_unlock_irqrestore(&list->lock, flags); 1423 } 1424 1425 /** 1426 * skb_append - append a buffer 1427 * @old: buffer to insert after 1428 * @newsk: buffer to insert 1429 * @list: list to use 1430 * 1431 * Place a packet after a given packet in a list. The list locks are taken 1432 * and this function is atomic with respect to other list locked calls. 1433 * A buffer cannot be placed on two lists at the same time. 1434 */ 1435 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1436 { 1437 unsigned long flags; 1438 1439 spin_lock_irqsave(&list->lock, flags); 1440 __skb_append(old, newsk, list); 1441 spin_unlock_irqrestore(&list->lock, flags); 1442 } 1443 1444 1445 /** 1446 * skb_insert - insert a buffer 1447 * @old: buffer to insert before 1448 * @newsk: buffer to insert 1449 * @list: list to use 1450 * 1451 * Place a packet before a given packet in a list. The list locks are 1452 * taken and this function is atomic with respect to other list locked 1453 * calls. 1454 * 1455 * A buffer cannot be placed on two lists at the same time. 1456 */ 1457 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1458 { 1459 unsigned long flags; 1460 1461 spin_lock_irqsave(&list->lock, flags); 1462 __skb_insert(newsk, old->prev, old, list); 1463 spin_unlock_irqrestore(&list->lock, flags); 1464 } 1465 1466 #if 0 1467 /* 1468 * Tune the memory allocator for a new MTU size. 1469 */ 1470 void skb_add_mtu(int mtu) 1471 { 1472 /* Must match allocation in alloc_skb */ 1473 mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info); 1474 1475 kmem_add_cache_size(mtu); 1476 } 1477 #endif 1478 1479 static inline void skb_split_inside_header(struct sk_buff *skb, 1480 struct sk_buff* skb1, 1481 const u32 len, const int pos) 1482 { 1483 int i; 1484 1485 memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len); 1486 1487 /* And move data appendix as is. */ 1488 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1489 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 1490 1491 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 1492 skb_shinfo(skb)->nr_frags = 0; 1493 skb1->data_len = skb->data_len; 1494 skb1->len += skb1->data_len; 1495 skb->data_len = 0; 1496 skb->len = len; 1497 skb->tail = skb->data + len; 1498 } 1499 1500 static inline void skb_split_no_header(struct sk_buff *skb, 1501 struct sk_buff* skb1, 1502 const u32 len, int pos) 1503 { 1504 int i, k = 0; 1505 const int nfrags = skb_shinfo(skb)->nr_frags; 1506 1507 skb_shinfo(skb)->nr_frags = 0; 1508 skb1->len = skb1->data_len = skb->len - len; 1509 skb->len = len; 1510 skb->data_len = len - pos; 1511 1512 for (i = 0; i < nfrags; i++) { 1513 int size = skb_shinfo(skb)->frags[i].size; 1514 1515 if (pos + size > len) { 1516 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 1517 1518 if (pos < len) { 1519 /* Split frag. 1520 * We have two variants in this case: 1521 * 1. Move all the frag to the second 1522 * part, if it is possible. F.e. 1523 * this approach is mandatory for TUX, 1524 * where splitting is expensive. 1525 * 2. Split is accurately. We make this. 1526 */ 1527 get_page(skb_shinfo(skb)->frags[i].page); 1528 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 1529 skb_shinfo(skb1)->frags[0].size -= len - pos; 1530 skb_shinfo(skb)->frags[i].size = len - pos; 1531 skb_shinfo(skb)->nr_frags++; 1532 } 1533 k++; 1534 } else 1535 skb_shinfo(skb)->nr_frags++; 1536 pos += size; 1537 } 1538 skb_shinfo(skb1)->nr_frags = k; 1539 } 1540 1541 /** 1542 * skb_split - Split fragmented skb to two parts at length len. 1543 * @skb: the buffer to split 1544 * @skb1: the buffer to receive the second part 1545 * @len: new length for skb 1546 */ 1547 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 1548 { 1549 int pos = skb_headlen(skb); 1550 1551 if (len < pos) /* Split line is inside header. */ 1552 skb_split_inside_header(skb, skb1, len, pos); 1553 else /* Second chunk has no header, nothing to copy. */ 1554 skb_split_no_header(skb, skb1, len, pos); 1555 } 1556 1557 /** 1558 * skb_prepare_seq_read - Prepare a sequential read of skb data 1559 * @skb: the buffer to read 1560 * @from: lower offset of data to be read 1561 * @to: upper offset of data to be read 1562 * @st: state variable 1563 * 1564 * Initializes the specified state variable. Must be called before 1565 * invoking skb_seq_read() for the first time. 1566 */ 1567 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 1568 unsigned int to, struct skb_seq_state *st) 1569 { 1570 st->lower_offset = from; 1571 st->upper_offset = to; 1572 st->root_skb = st->cur_skb = skb; 1573 st->frag_idx = st->stepped_offset = 0; 1574 st->frag_data = NULL; 1575 } 1576 1577 /** 1578 * skb_seq_read - Sequentially read skb data 1579 * @consumed: number of bytes consumed by the caller so far 1580 * @data: destination pointer for data to be returned 1581 * @st: state variable 1582 * 1583 * Reads a block of skb data at &consumed relative to the 1584 * lower offset specified to skb_prepare_seq_read(). Assigns 1585 * the head of the data block to &data and returns the length 1586 * of the block or 0 if the end of the skb data or the upper 1587 * offset has been reached. 1588 * 1589 * The caller is not required to consume all of the data 1590 * returned, i.e. &consumed is typically set to the number 1591 * of bytes already consumed and the next call to 1592 * skb_seq_read() will return the remaining part of the block. 1593 * 1594 * Note: The size of each block of data returned can be arbitary, 1595 * this limitation is the cost for zerocopy seqeuental 1596 * reads of potentially non linear data. 1597 * 1598 * Note: Fragment lists within fragments are not implemented 1599 * at the moment, state->root_skb could be replaced with 1600 * a stack for this purpose. 1601 */ 1602 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 1603 struct skb_seq_state *st) 1604 { 1605 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 1606 skb_frag_t *frag; 1607 1608 if (unlikely(abs_offset >= st->upper_offset)) 1609 return 0; 1610 1611 next_skb: 1612 block_limit = skb_headlen(st->cur_skb); 1613 1614 if (abs_offset < block_limit) { 1615 *data = st->cur_skb->data + abs_offset; 1616 return block_limit - abs_offset; 1617 } 1618 1619 if (st->frag_idx == 0 && !st->frag_data) 1620 st->stepped_offset += skb_headlen(st->cur_skb); 1621 1622 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 1623 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 1624 block_limit = frag->size + st->stepped_offset; 1625 1626 if (abs_offset < block_limit) { 1627 if (!st->frag_data) 1628 st->frag_data = kmap_skb_frag(frag); 1629 1630 *data = (u8 *) st->frag_data + frag->page_offset + 1631 (abs_offset - st->stepped_offset); 1632 1633 return block_limit - abs_offset; 1634 } 1635 1636 if (st->frag_data) { 1637 kunmap_skb_frag(st->frag_data); 1638 st->frag_data = NULL; 1639 } 1640 1641 st->frag_idx++; 1642 st->stepped_offset += frag->size; 1643 } 1644 1645 if (st->cur_skb->next) { 1646 st->cur_skb = st->cur_skb->next; 1647 st->frag_idx = 0; 1648 goto next_skb; 1649 } else if (st->root_skb == st->cur_skb && 1650 skb_shinfo(st->root_skb)->frag_list) { 1651 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 1652 goto next_skb; 1653 } 1654 1655 return 0; 1656 } 1657 1658 /** 1659 * skb_abort_seq_read - Abort a sequential read of skb data 1660 * @st: state variable 1661 * 1662 * Must be called if skb_seq_read() was not called until it 1663 * returned 0. 1664 */ 1665 void skb_abort_seq_read(struct skb_seq_state *st) 1666 { 1667 if (st->frag_data) 1668 kunmap_skb_frag(st->frag_data); 1669 } 1670 1671 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 1672 1673 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 1674 struct ts_config *conf, 1675 struct ts_state *state) 1676 { 1677 return skb_seq_read(offset, text, TS_SKB_CB(state)); 1678 } 1679 1680 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 1681 { 1682 skb_abort_seq_read(TS_SKB_CB(state)); 1683 } 1684 1685 /** 1686 * skb_find_text - Find a text pattern in skb data 1687 * @skb: the buffer to look in 1688 * @from: search offset 1689 * @to: search limit 1690 * @config: textsearch configuration 1691 * @state: uninitialized textsearch state variable 1692 * 1693 * Finds a pattern in the skb data according to the specified 1694 * textsearch configuration. Use textsearch_next() to retrieve 1695 * subsequent occurrences of the pattern. Returns the offset 1696 * to the first occurrence or UINT_MAX if no match was found. 1697 */ 1698 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 1699 unsigned int to, struct ts_config *config, 1700 struct ts_state *state) 1701 { 1702 config->get_next_block = skb_ts_get_next_block; 1703 config->finish = skb_ts_finish; 1704 1705 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 1706 1707 return textsearch_find(config, state); 1708 } 1709 1710 /** 1711 * skb_append_datato_frags: - append the user data to a skb 1712 * @sk: sock structure 1713 * @skb: skb structure to be appened with user data. 1714 * @getfrag: call back function to be used for getting the user data 1715 * @from: pointer to user message iov 1716 * @length: length of the iov message 1717 * 1718 * Description: This procedure append the user data in the fragment part 1719 * of the skb if any page alloc fails user this procedure returns -ENOMEM 1720 */ 1721 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 1722 int (*getfrag)(void *from, char *to, int offset, 1723 int len, int odd, struct sk_buff *skb), 1724 void *from, int length) 1725 { 1726 int frg_cnt = 0; 1727 skb_frag_t *frag = NULL; 1728 struct page *page = NULL; 1729 int copy, left; 1730 int offset = 0; 1731 int ret; 1732 1733 do { 1734 /* Return error if we don't have space for new frag */ 1735 frg_cnt = skb_shinfo(skb)->nr_frags; 1736 if (frg_cnt >= MAX_SKB_FRAGS) 1737 return -EFAULT; 1738 1739 /* allocate a new page for next frag */ 1740 page = alloc_pages(sk->sk_allocation, 0); 1741 1742 /* If alloc_page fails just return failure and caller will 1743 * free previous allocated pages by doing kfree_skb() 1744 */ 1745 if (page == NULL) 1746 return -ENOMEM; 1747 1748 /* initialize the next frag */ 1749 sk->sk_sndmsg_page = page; 1750 sk->sk_sndmsg_off = 0; 1751 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 1752 skb->truesize += PAGE_SIZE; 1753 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 1754 1755 /* get the new initialized frag */ 1756 frg_cnt = skb_shinfo(skb)->nr_frags; 1757 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 1758 1759 /* copy the user data to page */ 1760 left = PAGE_SIZE - frag->page_offset; 1761 copy = (length > left)? left : length; 1762 1763 ret = getfrag(from, (page_address(frag->page) + 1764 frag->page_offset + frag->size), 1765 offset, copy, 0, skb); 1766 if (ret < 0) 1767 return -EFAULT; 1768 1769 /* copy was successful so update the size parameters */ 1770 sk->sk_sndmsg_off += copy; 1771 frag->size += copy; 1772 skb->len += copy; 1773 skb->data_len += copy; 1774 offset += copy; 1775 length -= copy; 1776 1777 } while (length > 0); 1778 1779 return 0; 1780 } 1781 1782 void __init skb_init(void) 1783 { 1784 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 1785 sizeof(struct sk_buff), 1786 0, 1787 SLAB_HWCACHE_ALIGN, 1788 NULL, NULL); 1789 if (!skbuff_head_cache) 1790 panic("cannot create skbuff cache"); 1791 1792 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 1793 (2*sizeof(struct sk_buff)) + 1794 sizeof(atomic_t), 1795 0, 1796 SLAB_HWCACHE_ALIGN, 1797 NULL, NULL); 1798 if (!skbuff_fclone_cache) 1799 panic("cannot create skbuff cache"); 1800 } 1801 1802 EXPORT_SYMBOL(___pskb_trim); 1803 EXPORT_SYMBOL(__kfree_skb); 1804 EXPORT_SYMBOL(__pskb_pull_tail); 1805 EXPORT_SYMBOL(__alloc_skb); 1806 EXPORT_SYMBOL(pskb_copy); 1807 EXPORT_SYMBOL(pskb_expand_head); 1808 EXPORT_SYMBOL(skb_checksum); 1809 EXPORT_SYMBOL(skb_clone); 1810 EXPORT_SYMBOL(skb_clone_fraglist); 1811 EXPORT_SYMBOL(skb_copy); 1812 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1813 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1814 EXPORT_SYMBOL(skb_copy_bits); 1815 EXPORT_SYMBOL(skb_copy_expand); 1816 EXPORT_SYMBOL(skb_over_panic); 1817 EXPORT_SYMBOL(skb_pad); 1818 EXPORT_SYMBOL(skb_realloc_headroom); 1819 EXPORT_SYMBOL(skb_under_panic); 1820 EXPORT_SYMBOL(skb_dequeue); 1821 EXPORT_SYMBOL(skb_dequeue_tail); 1822 EXPORT_SYMBOL(skb_insert); 1823 EXPORT_SYMBOL(skb_queue_purge); 1824 EXPORT_SYMBOL(skb_queue_head); 1825 EXPORT_SYMBOL(skb_queue_tail); 1826 EXPORT_SYMBOL(skb_unlink); 1827 EXPORT_SYMBOL(skb_append); 1828 EXPORT_SYMBOL(skb_split); 1829 EXPORT_SYMBOL(skb_prepare_seq_read); 1830 EXPORT_SYMBOL(skb_seq_read); 1831 EXPORT_SYMBOL(skb_abort_seq_read); 1832 EXPORT_SYMBOL(skb_find_text); 1833 EXPORT_SYMBOL(skb_append_datato_frags); 1834