1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 #include <linux/if_vlan.h> 62 #include <linux/mpls.h> 63 #include <linux/kcov.h> 64 65 #include <net/protocol.h> 66 #include <net/dst.h> 67 #include <net/sock.h> 68 #include <net/checksum.h> 69 #include <net/ip6_checksum.h> 70 #include <net/xfrm.h> 71 #include <net/mpls.h> 72 #include <net/mptcp.h> 73 #include <net/page_pool.h> 74 75 #include <linux/uaccess.h> 76 #include <trace/events/skb.h> 77 #include <linux/highmem.h> 78 #include <linux/capability.h> 79 #include <linux/user_namespace.h> 80 #include <linux/indirect_call_wrapper.h> 81 82 #include "datagram.h" 83 #include "sock_destructor.h" 84 85 struct kmem_cache *skbuff_head_cache __ro_after_init; 86 static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 87 #ifdef CONFIG_SKB_EXTENSIONS 88 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 89 #endif 90 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 91 EXPORT_SYMBOL(sysctl_max_skb_frags); 92 93 /** 94 * skb_panic - private function for out-of-line support 95 * @skb: buffer 96 * @sz: size 97 * @addr: address 98 * @msg: skb_over_panic or skb_under_panic 99 * 100 * Out-of-line support for skb_put() and skb_push(). 101 * Called via the wrapper skb_over_panic() or skb_under_panic(). 102 * Keep out of line to prevent kernel bloat. 103 * __builtin_return_address is not used because it is not always reliable. 104 */ 105 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 106 const char msg[]) 107 { 108 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 109 msg, addr, skb->len, sz, skb->head, skb->data, 110 (unsigned long)skb->tail, (unsigned long)skb->end, 111 skb->dev ? skb->dev->name : "<NULL>"); 112 BUG(); 113 } 114 115 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 116 { 117 skb_panic(skb, sz, addr, __func__); 118 } 119 120 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 121 { 122 skb_panic(skb, sz, addr, __func__); 123 } 124 125 #define NAPI_SKB_CACHE_SIZE 64 126 #define NAPI_SKB_CACHE_BULK 16 127 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 128 129 struct napi_alloc_cache { 130 struct page_frag_cache page; 131 unsigned int skb_count; 132 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 133 }; 134 135 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 136 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 137 138 static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, 139 unsigned int align_mask) 140 { 141 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 142 143 return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); 144 } 145 146 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 147 { 148 fragsz = SKB_DATA_ALIGN(fragsz); 149 150 return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); 151 } 152 EXPORT_SYMBOL(__napi_alloc_frag_align); 153 154 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 155 { 156 struct page_frag_cache *nc; 157 void *data; 158 159 fragsz = SKB_DATA_ALIGN(fragsz); 160 if (in_hardirq() || irqs_disabled()) { 161 nc = this_cpu_ptr(&netdev_alloc_cache); 162 data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); 163 } else { 164 local_bh_disable(); 165 data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); 166 local_bh_enable(); 167 } 168 return data; 169 } 170 EXPORT_SYMBOL(__netdev_alloc_frag_align); 171 172 static struct sk_buff *napi_skb_cache_get(void) 173 { 174 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 175 struct sk_buff *skb; 176 177 if (unlikely(!nc->skb_count)) 178 nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, 179 GFP_ATOMIC, 180 NAPI_SKB_CACHE_BULK, 181 nc->skb_cache); 182 if (unlikely(!nc->skb_count)) 183 return NULL; 184 185 skb = nc->skb_cache[--nc->skb_count]; 186 kasan_unpoison_object_data(skbuff_head_cache, skb); 187 188 return skb; 189 } 190 191 /* Caller must provide SKB that is memset cleared */ 192 static void __build_skb_around(struct sk_buff *skb, void *data, 193 unsigned int frag_size) 194 { 195 struct skb_shared_info *shinfo; 196 unsigned int size = frag_size ? : ksize(data); 197 198 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 199 200 /* Assumes caller memset cleared SKB */ 201 skb->truesize = SKB_TRUESIZE(size); 202 refcount_set(&skb->users, 1); 203 skb->head = data; 204 skb->data = data; 205 skb_reset_tail_pointer(skb); 206 skb->end = skb->tail + size; 207 skb->mac_header = (typeof(skb->mac_header))~0U; 208 skb->transport_header = (typeof(skb->transport_header))~0U; 209 210 /* make sure we initialize shinfo sequentially */ 211 shinfo = skb_shinfo(skb); 212 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 213 atomic_set(&shinfo->dataref, 1); 214 215 skb_set_kcov_handle(skb, kcov_common_handle()); 216 } 217 218 /** 219 * __build_skb - build a network buffer 220 * @data: data buffer provided by caller 221 * @frag_size: size of data, or 0 if head was kmalloced 222 * 223 * Allocate a new &sk_buff. Caller provides space holding head and 224 * skb_shared_info. @data must have been allocated by kmalloc() only if 225 * @frag_size is 0, otherwise data should come from the page allocator 226 * or vmalloc() 227 * The return is the new skb buffer. 228 * On a failure the return is %NULL, and @data is not freed. 229 * Notes : 230 * Before IO, driver allocates only data buffer where NIC put incoming frame 231 * Driver should add room at head (NET_SKB_PAD) and 232 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 233 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 234 * before giving packet to stack. 235 * RX rings only contains data buffers, not full skbs. 236 */ 237 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 238 { 239 struct sk_buff *skb; 240 241 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 242 if (unlikely(!skb)) 243 return NULL; 244 245 memset(skb, 0, offsetof(struct sk_buff, tail)); 246 __build_skb_around(skb, data, frag_size); 247 248 return skb; 249 } 250 251 /* build_skb() is wrapper over __build_skb(), that specifically 252 * takes care of skb->head and skb->pfmemalloc 253 * This means that if @frag_size is not zero, then @data must be backed 254 * by a page fragment, not kmalloc() or vmalloc() 255 */ 256 struct sk_buff *build_skb(void *data, unsigned int frag_size) 257 { 258 struct sk_buff *skb = __build_skb(data, frag_size); 259 260 if (skb && frag_size) { 261 skb->head_frag = 1; 262 if (page_is_pfmemalloc(virt_to_head_page(data))) 263 skb->pfmemalloc = 1; 264 } 265 return skb; 266 } 267 EXPORT_SYMBOL(build_skb); 268 269 /** 270 * build_skb_around - build a network buffer around provided skb 271 * @skb: sk_buff provide by caller, must be memset cleared 272 * @data: data buffer provided by caller 273 * @frag_size: size of data, or 0 if head was kmalloced 274 */ 275 struct sk_buff *build_skb_around(struct sk_buff *skb, 276 void *data, unsigned int frag_size) 277 { 278 if (unlikely(!skb)) 279 return NULL; 280 281 __build_skb_around(skb, data, frag_size); 282 283 if (frag_size) { 284 skb->head_frag = 1; 285 if (page_is_pfmemalloc(virt_to_head_page(data))) 286 skb->pfmemalloc = 1; 287 } 288 return skb; 289 } 290 EXPORT_SYMBOL(build_skb_around); 291 292 /** 293 * __napi_build_skb - build a network buffer 294 * @data: data buffer provided by caller 295 * @frag_size: size of data, or 0 if head was kmalloced 296 * 297 * Version of __build_skb() that uses NAPI percpu caches to obtain 298 * skbuff_head instead of inplace allocation. 299 * 300 * Returns a new &sk_buff on success, %NULL on allocation failure. 301 */ 302 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 303 { 304 struct sk_buff *skb; 305 306 skb = napi_skb_cache_get(); 307 if (unlikely(!skb)) 308 return NULL; 309 310 memset(skb, 0, offsetof(struct sk_buff, tail)); 311 __build_skb_around(skb, data, frag_size); 312 313 return skb; 314 } 315 316 /** 317 * napi_build_skb - build a network buffer 318 * @data: data buffer provided by caller 319 * @frag_size: size of data, or 0 if head was kmalloced 320 * 321 * Version of __napi_build_skb() that takes care of skb->head_frag 322 * and skb->pfmemalloc when the data is a page or page fragment. 323 * 324 * Returns a new &sk_buff on success, %NULL on allocation failure. 325 */ 326 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 327 { 328 struct sk_buff *skb = __napi_build_skb(data, frag_size); 329 330 if (likely(skb) && frag_size) { 331 skb->head_frag = 1; 332 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 333 } 334 335 return skb; 336 } 337 EXPORT_SYMBOL(napi_build_skb); 338 339 /* 340 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 341 * the caller if emergency pfmemalloc reserves are being used. If it is and 342 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 343 * may be used. Otherwise, the packet data may be discarded until enough 344 * memory is free 345 */ 346 static void *kmalloc_reserve(size_t size, gfp_t flags, int node, 347 bool *pfmemalloc) 348 { 349 void *obj; 350 bool ret_pfmemalloc = false; 351 352 /* 353 * Try a regular allocation, when that fails and we're not entitled 354 * to the reserves, fail. 355 */ 356 obj = kmalloc_node_track_caller(size, 357 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 358 node); 359 if (obj || !(gfp_pfmemalloc_allowed(flags))) 360 goto out; 361 362 /* Try again but now we are using pfmemalloc reserves */ 363 ret_pfmemalloc = true; 364 obj = kmalloc_node_track_caller(size, flags, node); 365 366 out: 367 if (pfmemalloc) 368 *pfmemalloc = ret_pfmemalloc; 369 370 return obj; 371 } 372 373 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 374 * 'private' fields and also do memory statistics to find all the 375 * [BEEP] leaks. 376 * 377 */ 378 379 /** 380 * __alloc_skb - allocate a network buffer 381 * @size: size to allocate 382 * @gfp_mask: allocation mask 383 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 384 * instead of head cache and allocate a cloned (child) skb. 385 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 386 * allocations in case the data is required for writeback 387 * @node: numa node to allocate memory on 388 * 389 * Allocate a new &sk_buff. The returned buffer has no headroom and a 390 * tail room of at least size bytes. The object has a reference count 391 * of one. The return is the buffer. On a failure the return is %NULL. 392 * 393 * Buffers may only be allocated from interrupts using a @gfp_mask of 394 * %GFP_ATOMIC. 395 */ 396 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 397 int flags, int node) 398 { 399 struct kmem_cache *cache; 400 struct sk_buff *skb; 401 u8 *data; 402 bool pfmemalloc; 403 404 cache = (flags & SKB_ALLOC_FCLONE) 405 ? skbuff_fclone_cache : skbuff_head_cache; 406 407 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 408 gfp_mask |= __GFP_MEMALLOC; 409 410 /* Get the HEAD */ 411 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 412 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 413 skb = napi_skb_cache_get(); 414 else 415 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 416 if (unlikely(!skb)) 417 return NULL; 418 prefetchw(skb); 419 420 /* We do our best to align skb_shared_info on a separate cache 421 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 422 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 423 * Both skb->head and skb_shared_info are cache line aligned. 424 */ 425 size = SKB_DATA_ALIGN(size); 426 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 427 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 428 if (unlikely(!data)) 429 goto nodata; 430 /* kmalloc(size) might give us more room than requested. 431 * Put skb_shared_info exactly at the end of allocated zone, 432 * to allow max possible filling before reallocation. 433 */ 434 size = SKB_WITH_OVERHEAD(ksize(data)); 435 prefetchw(data + size); 436 437 /* 438 * Only clear those fields we need to clear, not those that we will 439 * actually initialise below. Hence, don't put any more fields after 440 * the tail pointer in struct sk_buff! 441 */ 442 memset(skb, 0, offsetof(struct sk_buff, tail)); 443 __build_skb_around(skb, data, 0); 444 skb->pfmemalloc = pfmemalloc; 445 446 if (flags & SKB_ALLOC_FCLONE) { 447 struct sk_buff_fclones *fclones; 448 449 fclones = container_of(skb, struct sk_buff_fclones, skb1); 450 451 skb->fclone = SKB_FCLONE_ORIG; 452 refcount_set(&fclones->fclone_ref, 1); 453 454 fclones->skb2.fclone = SKB_FCLONE_CLONE; 455 } 456 457 return skb; 458 459 nodata: 460 kmem_cache_free(cache, skb); 461 return NULL; 462 } 463 EXPORT_SYMBOL(__alloc_skb); 464 465 /** 466 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 467 * @dev: network device to receive on 468 * @len: length to allocate 469 * @gfp_mask: get_free_pages mask, passed to alloc_skb 470 * 471 * Allocate a new &sk_buff and assign it a usage count of one. The 472 * buffer has NET_SKB_PAD headroom built in. Users should allocate 473 * the headroom they think they need without accounting for the 474 * built in space. The built in space is used for optimisations. 475 * 476 * %NULL is returned if there is no free memory. 477 */ 478 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 479 gfp_t gfp_mask) 480 { 481 struct page_frag_cache *nc; 482 struct sk_buff *skb; 483 bool pfmemalloc; 484 void *data; 485 486 len += NET_SKB_PAD; 487 488 /* If requested length is either too small or too big, 489 * we use kmalloc() for skb->head allocation. 490 */ 491 if (len <= SKB_WITH_OVERHEAD(1024) || 492 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 493 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 494 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 495 if (!skb) 496 goto skb_fail; 497 goto skb_success; 498 } 499 500 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 501 len = SKB_DATA_ALIGN(len); 502 503 if (sk_memalloc_socks()) 504 gfp_mask |= __GFP_MEMALLOC; 505 506 if (in_hardirq() || irqs_disabled()) { 507 nc = this_cpu_ptr(&netdev_alloc_cache); 508 data = page_frag_alloc(nc, len, gfp_mask); 509 pfmemalloc = nc->pfmemalloc; 510 } else { 511 local_bh_disable(); 512 nc = this_cpu_ptr(&napi_alloc_cache.page); 513 data = page_frag_alloc(nc, len, gfp_mask); 514 pfmemalloc = nc->pfmemalloc; 515 local_bh_enable(); 516 } 517 518 if (unlikely(!data)) 519 return NULL; 520 521 skb = __build_skb(data, len); 522 if (unlikely(!skb)) { 523 skb_free_frag(data); 524 return NULL; 525 } 526 527 if (pfmemalloc) 528 skb->pfmemalloc = 1; 529 skb->head_frag = 1; 530 531 skb_success: 532 skb_reserve(skb, NET_SKB_PAD); 533 skb->dev = dev; 534 535 skb_fail: 536 return skb; 537 } 538 EXPORT_SYMBOL(__netdev_alloc_skb); 539 540 /** 541 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 542 * @napi: napi instance this buffer was allocated for 543 * @len: length to allocate 544 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 545 * 546 * Allocate a new sk_buff for use in NAPI receive. This buffer will 547 * attempt to allocate the head from a special reserved region used 548 * only for NAPI Rx allocation. By doing this we can save several 549 * CPU cycles by avoiding having to disable and re-enable IRQs. 550 * 551 * %NULL is returned if there is no free memory. 552 */ 553 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 554 gfp_t gfp_mask) 555 { 556 struct napi_alloc_cache *nc; 557 struct sk_buff *skb; 558 void *data; 559 560 len += NET_SKB_PAD + NET_IP_ALIGN; 561 562 /* If requested length is either too small or too big, 563 * we use kmalloc() for skb->head allocation. 564 */ 565 if (len <= SKB_WITH_OVERHEAD(1024) || 566 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 567 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 568 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 569 NUMA_NO_NODE); 570 if (!skb) 571 goto skb_fail; 572 goto skb_success; 573 } 574 575 nc = this_cpu_ptr(&napi_alloc_cache); 576 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 577 len = SKB_DATA_ALIGN(len); 578 579 if (sk_memalloc_socks()) 580 gfp_mask |= __GFP_MEMALLOC; 581 582 data = page_frag_alloc(&nc->page, len, gfp_mask); 583 if (unlikely(!data)) 584 return NULL; 585 586 skb = __napi_build_skb(data, len); 587 if (unlikely(!skb)) { 588 skb_free_frag(data); 589 return NULL; 590 } 591 592 if (nc->page.pfmemalloc) 593 skb->pfmemalloc = 1; 594 skb->head_frag = 1; 595 596 skb_success: 597 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 598 skb->dev = napi->dev; 599 600 skb_fail: 601 return skb; 602 } 603 EXPORT_SYMBOL(__napi_alloc_skb); 604 605 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 606 int size, unsigned int truesize) 607 { 608 skb_fill_page_desc(skb, i, page, off, size); 609 skb->len += size; 610 skb->data_len += size; 611 skb->truesize += truesize; 612 } 613 EXPORT_SYMBOL(skb_add_rx_frag); 614 615 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 616 unsigned int truesize) 617 { 618 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 619 620 skb_frag_size_add(frag, size); 621 skb->len += size; 622 skb->data_len += size; 623 skb->truesize += truesize; 624 } 625 EXPORT_SYMBOL(skb_coalesce_rx_frag); 626 627 static void skb_drop_list(struct sk_buff **listp) 628 { 629 kfree_skb_list(*listp); 630 *listp = NULL; 631 } 632 633 static inline void skb_drop_fraglist(struct sk_buff *skb) 634 { 635 skb_drop_list(&skb_shinfo(skb)->frag_list); 636 } 637 638 static void skb_clone_fraglist(struct sk_buff *skb) 639 { 640 struct sk_buff *list; 641 642 skb_walk_frags(skb, list) 643 skb_get(list); 644 } 645 646 static void skb_free_head(struct sk_buff *skb) 647 { 648 unsigned char *head = skb->head; 649 650 if (skb->head_frag) { 651 if (skb_pp_recycle(skb, head)) 652 return; 653 skb_free_frag(head); 654 } else { 655 kfree(head); 656 } 657 } 658 659 static void skb_release_data(struct sk_buff *skb) 660 { 661 struct skb_shared_info *shinfo = skb_shinfo(skb); 662 int i; 663 664 if (skb->cloned && 665 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 666 &shinfo->dataref)) 667 goto exit; 668 669 skb_zcopy_clear(skb, true); 670 671 for (i = 0; i < shinfo->nr_frags; i++) 672 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); 673 674 if (shinfo->frag_list) 675 kfree_skb_list(shinfo->frag_list); 676 677 skb_free_head(skb); 678 exit: 679 /* When we clone an SKB we copy the reycling bit. The pp_recycle 680 * bit is only set on the head though, so in order to avoid races 681 * while trying to recycle fragments on __skb_frag_unref() we need 682 * to make one SKB responsible for triggering the recycle path. 683 * So disable the recycling bit if an SKB is cloned and we have 684 * additional references to to the fragmented part of the SKB. 685 * Eventually the last SKB will have the recycling bit set and it's 686 * dataref set to 0, which will trigger the recycling 687 */ 688 skb->pp_recycle = 0; 689 } 690 691 /* 692 * Free an skbuff by memory without cleaning the state. 693 */ 694 static void kfree_skbmem(struct sk_buff *skb) 695 { 696 struct sk_buff_fclones *fclones; 697 698 switch (skb->fclone) { 699 case SKB_FCLONE_UNAVAILABLE: 700 kmem_cache_free(skbuff_head_cache, skb); 701 return; 702 703 case SKB_FCLONE_ORIG: 704 fclones = container_of(skb, struct sk_buff_fclones, skb1); 705 706 /* We usually free the clone (TX completion) before original skb 707 * This test would have no chance to be true for the clone, 708 * while here, branch prediction will be good. 709 */ 710 if (refcount_read(&fclones->fclone_ref) == 1) 711 goto fastpath; 712 break; 713 714 default: /* SKB_FCLONE_CLONE */ 715 fclones = container_of(skb, struct sk_buff_fclones, skb2); 716 break; 717 } 718 if (!refcount_dec_and_test(&fclones->fclone_ref)) 719 return; 720 fastpath: 721 kmem_cache_free(skbuff_fclone_cache, fclones); 722 } 723 724 void skb_release_head_state(struct sk_buff *skb) 725 { 726 skb_dst_drop(skb); 727 if (skb->destructor) { 728 WARN_ON(in_hardirq()); 729 skb->destructor(skb); 730 } 731 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 732 nf_conntrack_put(skb_nfct(skb)); 733 #endif 734 skb_ext_put(skb); 735 } 736 737 /* Free everything but the sk_buff shell. */ 738 static void skb_release_all(struct sk_buff *skb) 739 { 740 skb_release_head_state(skb); 741 if (likely(skb->head)) 742 skb_release_data(skb); 743 } 744 745 /** 746 * __kfree_skb - private function 747 * @skb: buffer 748 * 749 * Free an sk_buff. Release anything attached to the buffer. 750 * Clean the state. This is an internal helper function. Users should 751 * always call kfree_skb 752 */ 753 754 void __kfree_skb(struct sk_buff *skb) 755 { 756 skb_release_all(skb); 757 kfree_skbmem(skb); 758 } 759 EXPORT_SYMBOL(__kfree_skb); 760 761 /** 762 * kfree_skb - free an sk_buff 763 * @skb: buffer to free 764 * 765 * Drop a reference to the buffer and free it if the usage count has 766 * hit zero. 767 */ 768 void kfree_skb(struct sk_buff *skb) 769 { 770 if (!skb_unref(skb)) 771 return; 772 773 trace_kfree_skb(skb, __builtin_return_address(0)); 774 __kfree_skb(skb); 775 } 776 EXPORT_SYMBOL(kfree_skb); 777 778 void kfree_skb_list(struct sk_buff *segs) 779 { 780 while (segs) { 781 struct sk_buff *next = segs->next; 782 783 kfree_skb(segs); 784 segs = next; 785 } 786 } 787 EXPORT_SYMBOL(kfree_skb_list); 788 789 /* Dump skb information and contents. 790 * 791 * Must only be called from net_ratelimit()-ed paths. 792 * 793 * Dumps whole packets if full_pkt, only headers otherwise. 794 */ 795 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 796 { 797 struct skb_shared_info *sh = skb_shinfo(skb); 798 struct net_device *dev = skb->dev; 799 struct sock *sk = skb->sk; 800 struct sk_buff *list_skb; 801 bool has_mac, has_trans; 802 int headroom, tailroom; 803 int i, len, seg_len; 804 805 if (full_pkt) 806 len = skb->len; 807 else 808 len = min_t(int, skb->len, MAX_HEADER + 128); 809 810 headroom = skb_headroom(skb); 811 tailroom = skb_tailroom(skb); 812 813 has_mac = skb_mac_header_was_set(skb); 814 has_trans = skb_transport_header_was_set(skb); 815 816 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 817 "mac=(%d,%d) net=(%d,%d) trans=%d\n" 818 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 819 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 820 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 821 level, skb->len, headroom, skb_headlen(skb), tailroom, 822 has_mac ? skb->mac_header : -1, 823 has_mac ? skb_mac_header_len(skb) : -1, 824 skb->network_header, 825 has_trans ? skb_network_header_len(skb) : -1, 826 has_trans ? skb->transport_header : -1, 827 sh->tx_flags, sh->nr_frags, 828 sh->gso_size, sh->gso_type, sh->gso_segs, 829 skb->csum, skb->ip_summed, skb->csum_complete_sw, 830 skb->csum_valid, skb->csum_level, 831 skb->hash, skb->sw_hash, skb->l4_hash, 832 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 833 834 if (dev) 835 printk("%sdev name=%s feat=0x%pNF\n", 836 level, dev->name, &dev->features); 837 if (sk) 838 printk("%ssk family=%hu type=%u proto=%u\n", 839 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 840 841 if (full_pkt && headroom) 842 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 843 16, 1, skb->head, headroom, false); 844 845 seg_len = min_t(int, skb_headlen(skb), len); 846 if (seg_len) 847 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 848 16, 1, skb->data, seg_len, false); 849 len -= seg_len; 850 851 if (full_pkt && tailroom) 852 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 853 16, 1, skb_tail_pointer(skb), tailroom, false); 854 855 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 856 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 857 u32 p_off, p_len, copied; 858 struct page *p; 859 u8 *vaddr; 860 861 skb_frag_foreach_page(frag, skb_frag_off(frag), 862 skb_frag_size(frag), p, p_off, p_len, 863 copied) { 864 seg_len = min_t(int, p_len, len); 865 vaddr = kmap_atomic(p); 866 print_hex_dump(level, "skb frag: ", 867 DUMP_PREFIX_OFFSET, 868 16, 1, vaddr + p_off, seg_len, false); 869 kunmap_atomic(vaddr); 870 len -= seg_len; 871 if (!len) 872 break; 873 } 874 } 875 876 if (full_pkt && skb_has_frag_list(skb)) { 877 printk("skb fraglist:\n"); 878 skb_walk_frags(skb, list_skb) 879 skb_dump(level, list_skb, true); 880 } 881 } 882 EXPORT_SYMBOL(skb_dump); 883 884 /** 885 * skb_tx_error - report an sk_buff xmit error 886 * @skb: buffer that triggered an error 887 * 888 * Report xmit error if a device callback is tracking this skb. 889 * skb must be freed afterwards. 890 */ 891 void skb_tx_error(struct sk_buff *skb) 892 { 893 skb_zcopy_clear(skb, true); 894 } 895 EXPORT_SYMBOL(skb_tx_error); 896 897 #ifdef CONFIG_TRACEPOINTS 898 /** 899 * consume_skb - free an skbuff 900 * @skb: buffer to free 901 * 902 * Drop a ref to the buffer and free it if the usage count has hit zero 903 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 904 * is being dropped after a failure and notes that 905 */ 906 void consume_skb(struct sk_buff *skb) 907 { 908 if (!skb_unref(skb)) 909 return; 910 911 trace_consume_skb(skb); 912 __kfree_skb(skb); 913 } 914 EXPORT_SYMBOL(consume_skb); 915 #endif 916 917 /** 918 * __consume_stateless_skb - free an skbuff, assuming it is stateless 919 * @skb: buffer to free 920 * 921 * Alike consume_skb(), but this variant assumes that this is the last 922 * skb reference and all the head states have been already dropped 923 */ 924 void __consume_stateless_skb(struct sk_buff *skb) 925 { 926 trace_consume_skb(skb); 927 skb_release_data(skb); 928 kfree_skbmem(skb); 929 } 930 931 static void napi_skb_cache_put(struct sk_buff *skb) 932 { 933 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 934 u32 i; 935 936 kasan_poison_object_data(skbuff_head_cache, skb); 937 nc->skb_cache[nc->skb_count++] = skb; 938 939 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 940 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 941 kasan_unpoison_object_data(skbuff_head_cache, 942 nc->skb_cache[i]); 943 944 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, 945 nc->skb_cache + NAPI_SKB_CACHE_HALF); 946 nc->skb_count = NAPI_SKB_CACHE_HALF; 947 } 948 } 949 950 void __kfree_skb_defer(struct sk_buff *skb) 951 { 952 skb_release_all(skb); 953 napi_skb_cache_put(skb); 954 } 955 956 void napi_skb_free_stolen_head(struct sk_buff *skb) 957 { 958 if (unlikely(skb->slow_gro)) { 959 nf_reset_ct(skb); 960 skb_dst_drop(skb); 961 skb_ext_put(skb); 962 skb_orphan(skb); 963 skb->slow_gro = 0; 964 } 965 napi_skb_cache_put(skb); 966 } 967 968 void napi_consume_skb(struct sk_buff *skb, int budget) 969 { 970 /* Zero budget indicate non-NAPI context called us, like netpoll */ 971 if (unlikely(!budget)) { 972 dev_consume_skb_any(skb); 973 return; 974 } 975 976 lockdep_assert_in_softirq(); 977 978 if (!skb_unref(skb)) 979 return; 980 981 /* if reaching here SKB is ready to free */ 982 trace_consume_skb(skb); 983 984 /* if SKB is a clone, don't handle this case */ 985 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 986 __kfree_skb(skb); 987 return; 988 } 989 990 skb_release_all(skb); 991 napi_skb_cache_put(skb); 992 } 993 EXPORT_SYMBOL(napi_consume_skb); 994 995 /* Make sure a field is enclosed inside headers_start/headers_end section */ 996 #define CHECK_SKB_FIELD(field) \ 997 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 998 offsetof(struct sk_buff, headers_start)); \ 999 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 1000 offsetof(struct sk_buff, headers_end)); \ 1001 1002 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1003 { 1004 new->tstamp = old->tstamp; 1005 /* We do not copy old->sk */ 1006 new->dev = old->dev; 1007 memcpy(new->cb, old->cb, sizeof(old->cb)); 1008 skb_dst_copy(new, old); 1009 __skb_ext_copy(new, old); 1010 __nf_copy(new, old, false); 1011 1012 /* Note : this field could be in headers_start/headers_end section 1013 * It is not yet because we do not want to have a 16 bit hole 1014 */ 1015 new->queue_mapping = old->queue_mapping; 1016 1017 memcpy(&new->headers_start, &old->headers_start, 1018 offsetof(struct sk_buff, headers_end) - 1019 offsetof(struct sk_buff, headers_start)); 1020 CHECK_SKB_FIELD(protocol); 1021 CHECK_SKB_FIELD(csum); 1022 CHECK_SKB_FIELD(hash); 1023 CHECK_SKB_FIELD(priority); 1024 CHECK_SKB_FIELD(skb_iif); 1025 CHECK_SKB_FIELD(vlan_proto); 1026 CHECK_SKB_FIELD(vlan_tci); 1027 CHECK_SKB_FIELD(transport_header); 1028 CHECK_SKB_FIELD(network_header); 1029 CHECK_SKB_FIELD(mac_header); 1030 CHECK_SKB_FIELD(inner_protocol); 1031 CHECK_SKB_FIELD(inner_transport_header); 1032 CHECK_SKB_FIELD(inner_network_header); 1033 CHECK_SKB_FIELD(inner_mac_header); 1034 CHECK_SKB_FIELD(mark); 1035 #ifdef CONFIG_NETWORK_SECMARK 1036 CHECK_SKB_FIELD(secmark); 1037 #endif 1038 #ifdef CONFIG_NET_RX_BUSY_POLL 1039 CHECK_SKB_FIELD(napi_id); 1040 #endif 1041 #ifdef CONFIG_XPS 1042 CHECK_SKB_FIELD(sender_cpu); 1043 #endif 1044 #ifdef CONFIG_NET_SCHED 1045 CHECK_SKB_FIELD(tc_index); 1046 #endif 1047 1048 } 1049 1050 /* 1051 * You should not add any new code to this function. Add it to 1052 * __copy_skb_header above instead. 1053 */ 1054 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1055 { 1056 #define C(x) n->x = skb->x 1057 1058 n->next = n->prev = NULL; 1059 n->sk = NULL; 1060 __copy_skb_header(n, skb); 1061 1062 C(len); 1063 C(data_len); 1064 C(mac_len); 1065 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1066 n->cloned = 1; 1067 n->nohdr = 0; 1068 n->peeked = 0; 1069 C(pfmemalloc); 1070 C(pp_recycle); 1071 n->destructor = NULL; 1072 C(tail); 1073 C(end); 1074 C(head); 1075 C(head_frag); 1076 C(data); 1077 C(truesize); 1078 refcount_set(&n->users, 1); 1079 1080 atomic_inc(&(skb_shinfo(skb)->dataref)); 1081 skb->cloned = 1; 1082 1083 return n; 1084 #undef C 1085 } 1086 1087 /** 1088 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1089 * @first: first sk_buff of the msg 1090 */ 1091 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1092 { 1093 struct sk_buff *n; 1094 1095 n = alloc_skb(0, GFP_ATOMIC); 1096 if (!n) 1097 return NULL; 1098 1099 n->len = first->len; 1100 n->data_len = first->len; 1101 n->truesize = first->truesize; 1102 1103 skb_shinfo(n)->frag_list = first; 1104 1105 __copy_skb_header(n, first); 1106 n->destructor = NULL; 1107 1108 return n; 1109 } 1110 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1111 1112 /** 1113 * skb_morph - morph one skb into another 1114 * @dst: the skb to receive the contents 1115 * @src: the skb to supply the contents 1116 * 1117 * This is identical to skb_clone except that the target skb is 1118 * supplied by the user. 1119 * 1120 * The target skb is returned upon exit. 1121 */ 1122 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1123 { 1124 skb_release_all(dst); 1125 return __skb_clone(dst, src); 1126 } 1127 EXPORT_SYMBOL_GPL(skb_morph); 1128 1129 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1130 { 1131 unsigned long max_pg, num_pg, new_pg, old_pg; 1132 struct user_struct *user; 1133 1134 if (capable(CAP_IPC_LOCK) || !size) 1135 return 0; 1136 1137 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1138 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1139 user = mmp->user ? : current_user(); 1140 1141 do { 1142 old_pg = atomic_long_read(&user->locked_vm); 1143 new_pg = old_pg + num_pg; 1144 if (new_pg > max_pg) 1145 return -ENOBUFS; 1146 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != 1147 old_pg); 1148 1149 if (!mmp->user) { 1150 mmp->user = get_uid(user); 1151 mmp->num_pg = num_pg; 1152 } else { 1153 mmp->num_pg += num_pg; 1154 } 1155 1156 return 0; 1157 } 1158 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1159 1160 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1161 { 1162 if (mmp->user) { 1163 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1164 free_uid(mmp->user); 1165 } 1166 } 1167 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1168 1169 struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1170 { 1171 struct ubuf_info *uarg; 1172 struct sk_buff *skb; 1173 1174 WARN_ON_ONCE(!in_task()); 1175 1176 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1177 if (!skb) 1178 return NULL; 1179 1180 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1181 uarg = (void *)skb->cb; 1182 uarg->mmp.user = NULL; 1183 1184 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1185 kfree_skb(skb); 1186 return NULL; 1187 } 1188 1189 uarg->callback = msg_zerocopy_callback; 1190 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1191 uarg->len = 1; 1192 uarg->bytelen = size; 1193 uarg->zerocopy = 1; 1194 uarg->flags = SKBFL_ZEROCOPY_FRAG; 1195 refcount_set(&uarg->refcnt, 1); 1196 sock_hold(sk); 1197 1198 return uarg; 1199 } 1200 EXPORT_SYMBOL_GPL(msg_zerocopy_alloc); 1201 1202 static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) 1203 { 1204 return container_of((void *)uarg, struct sk_buff, cb); 1205 } 1206 1207 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1208 struct ubuf_info *uarg) 1209 { 1210 if (uarg) { 1211 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1212 u32 bytelen, next; 1213 1214 /* realloc only when socket is locked (TCP, UDP cork), 1215 * so uarg->len and sk_zckey access is serialized 1216 */ 1217 if (!sock_owned_by_user(sk)) { 1218 WARN_ON_ONCE(1); 1219 return NULL; 1220 } 1221 1222 bytelen = uarg->bytelen + size; 1223 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1224 /* TCP can create new skb to attach new uarg */ 1225 if (sk->sk_type == SOCK_STREAM) 1226 goto new_alloc; 1227 return NULL; 1228 } 1229 1230 next = (u32)atomic_read(&sk->sk_zckey); 1231 if ((u32)(uarg->id + uarg->len) == next) { 1232 if (mm_account_pinned_pages(&uarg->mmp, size)) 1233 return NULL; 1234 uarg->len++; 1235 uarg->bytelen = bytelen; 1236 atomic_set(&sk->sk_zckey, ++next); 1237 1238 /* no extra ref when appending to datagram (MSG_MORE) */ 1239 if (sk->sk_type == SOCK_STREAM) 1240 net_zcopy_get(uarg); 1241 1242 return uarg; 1243 } 1244 } 1245 1246 new_alloc: 1247 return msg_zerocopy_alloc(sk, size); 1248 } 1249 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1250 1251 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1252 { 1253 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1254 u32 old_lo, old_hi; 1255 u64 sum_len; 1256 1257 old_lo = serr->ee.ee_info; 1258 old_hi = serr->ee.ee_data; 1259 sum_len = old_hi - old_lo + 1ULL + len; 1260 1261 if (sum_len >= (1ULL << 32)) 1262 return false; 1263 1264 if (lo != old_hi + 1) 1265 return false; 1266 1267 serr->ee.ee_data += len; 1268 return true; 1269 } 1270 1271 static void __msg_zerocopy_callback(struct ubuf_info *uarg) 1272 { 1273 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1274 struct sock_exterr_skb *serr; 1275 struct sock *sk = skb->sk; 1276 struct sk_buff_head *q; 1277 unsigned long flags; 1278 bool is_zerocopy; 1279 u32 lo, hi; 1280 u16 len; 1281 1282 mm_unaccount_pinned_pages(&uarg->mmp); 1283 1284 /* if !len, there was only 1 call, and it was aborted 1285 * so do not queue a completion notification 1286 */ 1287 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1288 goto release; 1289 1290 len = uarg->len; 1291 lo = uarg->id; 1292 hi = uarg->id + len - 1; 1293 is_zerocopy = uarg->zerocopy; 1294 1295 serr = SKB_EXT_ERR(skb); 1296 memset(serr, 0, sizeof(*serr)); 1297 serr->ee.ee_errno = 0; 1298 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1299 serr->ee.ee_data = hi; 1300 serr->ee.ee_info = lo; 1301 if (!is_zerocopy) 1302 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1303 1304 q = &sk->sk_error_queue; 1305 spin_lock_irqsave(&q->lock, flags); 1306 tail = skb_peek_tail(q); 1307 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1308 !skb_zerocopy_notify_extend(tail, lo, len)) { 1309 __skb_queue_tail(q, skb); 1310 skb = NULL; 1311 } 1312 spin_unlock_irqrestore(&q->lock, flags); 1313 1314 sk_error_report(sk); 1315 1316 release: 1317 consume_skb(skb); 1318 sock_put(sk); 1319 } 1320 1321 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 1322 bool success) 1323 { 1324 uarg->zerocopy = uarg->zerocopy & success; 1325 1326 if (refcount_dec_and_test(&uarg->refcnt)) 1327 __msg_zerocopy_callback(uarg); 1328 } 1329 EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 1330 1331 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1332 { 1333 struct sock *sk = skb_from_uarg(uarg)->sk; 1334 1335 atomic_dec(&sk->sk_zckey); 1336 uarg->len--; 1337 1338 if (have_uref) 1339 msg_zerocopy_callback(NULL, uarg, true); 1340 } 1341 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1342 1343 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) 1344 { 1345 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); 1346 } 1347 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); 1348 1349 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1350 struct msghdr *msg, int len, 1351 struct ubuf_info *uarg) 1352 { 1353 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1354 struct iov_iter orig_iter = msg->msg_iter; 1355 int err, orig_len = skb->len; 1356 1357 /* An skb can only point to one uarg. This edge case happens when 1358 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 1359 */ 1360 if (orig_uarg && uarg != orig_uarg) 1361 return -EEXIST; 1362 1363 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 1364 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1365 struct sock *save_sk = skb->sk; 1366 1367 /* Streams do not free skb on error. Reset to prev state. */ 1368 msg->msg_iter = orig_iter; 1369 skb->sk = sk; 1370 ___pskb_trim(skb, orig_len); 1371 skb->sk = save_sk; 1372 return err; 1373 } 1374 1375 skb_zcopy_set(skb, uarg, NULL); 1376 return skb->len - orig_len; 1377 } 1378 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1379 1380 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1381 gfp_t gfp_mask) 1382 { 1383 if (skb_zcopy(orig)) { 1384 if (skb_zcopy(nskb)) { 1385 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1386 if (!gfp_mask) { 1387 WARN_ON_ONCE(1); 1388 return -ENOMEM; 1389 } 1390 if (skb_uarg(nskb) == skb_uarg(orig)) 1391 return 0; 1392 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1393 return -EIO; 1394 } 1395 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1396 } 1397 return 0; 1398 } 1399 1400 /** 1401 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1402 * @skb: the skb to modify 1403 * @gfp_mask: allocation priority 1404 * 1405 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1406 * It will copy all frags into kernel and drop the reference 1407 * to userspace pages. 1408 * 1409 * If this function is called from an interrupt gfp_mask() must be 1410 * %GFP_ATOMIC. 1411 * 1412 * Returns 0 on success or a negative error code on failure 1413 * to allocate kernel memory to copy to. 1414 */ 1415 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1416 { 1417 int num_frags = skb_shinfo(skb)->nr_frags; 1418 struct page *page, *head = NULL; 1419 int i, new_frags; 1420 u32 d_off; 1421 1422 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1423 return -EINVAL; 1424 1425 if (!num_frags) 1426 goto release; 1427 1428 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1429 for (i = 0; i < new_frags; i++) { 1430 page = alloc_page(gfp_mask); 1431 if (!page) { 1432 while (head) { 1433 struct page *next = (struct page *)page_private(head); 1434 put_page(head); 1435 head = next; 1436 } 1437 return -ENOMEM; 1438 } 1439 set_page_private(page, (unsigned long)head); 1440 head = page; 1441 } 1442 1443 page = head; 1444 d_off = 0; 1445 for (i = 0; i < num_frags; i++) { 1446 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1447 u32 p_off, p_len, copied; 1448 struct page *p; 1449 u8 *vaddr; 1450 1451 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1452 p, p_off, p_len, copied) { 1453 u32 copy, done = 0; 1454 vaddr = kmap_atomic(p); 1455 1456 while (done < p_len) { 1457 if (d_off == PAGE_SIZE) { 1458 d_off = 0; 1459 page = (struct page *)page_private(page); 1460 } 1461 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); 1462 memcpy(page_address(page) + d_off, 1463 vaddr + p_off + done, copy); 1464 done += copy; 1465 d_off += copy; 1466 } 1467 kunmap_atomic(vaddr); 1468 } 1469 } 1470 1471 /* skb frags release userspace buffers */ 1472 for (i = 0; i < num_frags; i++) 1473 skb_frag_unref(skb, i); 1474 1475 /* skb frags point to kernel buffers */ 1476 for (i = 0; i < new_frags - 1; i++) { 1477 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); 1478 head = (struct page *)page_private(head); 1479 } 1480 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 1481 skb_shinfo(skb)->nr_frags = new_frags; 1482 1483 release: 1484 skb_zcopy_clear(skb, false); 1485 return 0; 1486 } 1487 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1488 1489 /** 1490 * skb_clone - duplicate an sk_buff 1491 * @skb: buffer to clone 1492 * @gfp_mask: allocation priority 1493 * 1494 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1495 * copies share the same packet data but not structure. The new 1496 * buffer has a reference count of 1. If the allocation fails the 1497 * function returns %NULL otherwise the new buffer is returned. 1498 * 1499 * If this function is called from an interrupt gfp_mask() must be 1500 * %GFP_ATOMIC. 1501 */ 1502 1503 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1504 { 1505 struct sk_buff_fclones *fclones = container_of(skb, 1506 struct sk_buff_fclones, 1507 skb1); 1508 struct sk_buff *n; 1509 1510 if (skb_orphan_frags(skb, gfp_mask)) 1511 return NULL; 1512 1513 if (skb->fclone == SKB_FCLONE_ORIG && 1514 refcount_read(&fclones->fclone_ref) == 1) { 1515 n = &fclones->skb2; 1516 refcount_set(&fclones->fclone_ref, 2); 1517 } else { 1518 if (skb_pfmemalloc(skb)) 1519 gfp_mask |= __GFP_MEMALLOC; 1520 1521 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1522 if (!n) 1523 return NULL; 1524 1525 n->fclone = SKB_FCLONE_UNAVAILABLE; 1526 } 1527 1528 return __skb_clone(n, skb); 1529 } 1530 EXPORT_SYMBOL(skb_clone); 1531 1532 void skb_headers_offset_update(struct sk_buff *skb, int off) 1533 { 1534 /* Only adjust this if it actually is csum_start rather than csum */ 1535 if (skb->ip_summed == CHECKSUM_PARTIAL) 1536 skb->csum_start += off; 1537 /* {transport,network,mac}_header and tail are relative to skb->head */ 1538 skb->transport_header += off; 1539 skb->network_header += off; 1540 if (skb_mac_header_was_set(skb)) 1541 skb->mac_header += off; 1542 skb->inner_transport_header += off; 1543 skb->inner_network_header += off; 1544 skb->inner_mac_header += off; 1545 } 1546 EXPORT_SYMBOL(skb_headers_offset_update); 1547 1548 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 1549 { 1550 __copy_skb_header(new, old); 1551 1552 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1553 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1554 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1555 } 1556 EXPORT_SYMBOL(skb_copy_header); 1557 1558 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1559 { 1560 if (skb_pfmemalloc(skb)) 1561 return SKB_ALLOC_RX; 1562 return 0; 1563 } 1564 1565 /** 1566 * skb_copy - create private copy of an sk_buff 1567 * @skb: buffer to copy 1568 * @gfp_mask: allocation priority 1569 * 1570 * Make a copy of both an &sk_buff and its data. This is used when the 1571 * caller wishes to modify the data and needs a private copy of the 1572 * data to alter. Returns %NULL on failure or the pointer to the buffer 1573 * on success. The returned buffer has a reference count of 1. 1574 * 1575 * As by-product this function converts non-linear &sk_buff to linear 1576 * one, so that &sk_buff becomes completely private and caller is allowed 1577 * to modify all the data of returned buffer. This means that this 1578 * function is not recommended for use in circumstances when only 1579 * header is going to be modified. Use pskb_copy() instead. 1580 */ 1581 1582 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1583 { 1584 int headerlen = skb_headroom(skb); 1585 unsigned int size = skb_end_offset(skb) + skb->data_len; 1586 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1587 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1588 1589 if (!n) 1590 return NULL; 1591 1592 /* Set the data pointer */ 1593 skb_reserve(n, headerlen); 1594 /* Set the tail pointer and length */ 1595 skb_put(n, skb->len); 1596 1597 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 1598 1599 skb_copy_header(n, skb); 1600 return n; 1601 } 1602 EXPORT_SYMBOL(skb_copy); 1603 1604 /** 1605 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1606 * @skb: buffer to copy 1607 * @headroom: headroom of new skb 1608 * @gfp_mask: allocation priority 1609 * @fclone: if true allocate the copy of the skb from the fclone 1610 * cache instead of the head cache; it is recommended to set this 1611 * to true for the cases where the copy will likely be cloned 1612 * 1613 * Make a copy of both an &sk_buff and part of its data, located 1614 * in header. Fragmented data remain shared. This is used when 1615 * the caller wishes to modify only header of &sk_buff and needs 1616 * private copy of the header to alter. Returns %NULL on failure 1617 * or the pointer to the buffer on success. 1618 * The returned buffer has a reference count of 1. 1619 */ 1620 1621 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1622 gfp_t gfp_mask, bool fclone) 1623 { 1624 unsigned int size = skb_headlen(skb) + headroom; 1625 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1626 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1627 1628 if (!n) 1629 goto out; 1630 1631 /* Set the data pointer */ 1632 skb_reserve(n, headroom); 1633 /* Set the tail pointer and length */ 1634 skb_put(n, skb_headlen(skb)); 1635 /* Copy the bytes */ 1636 skb_copy_from_linear_data(skb, n->data, n->len); 1637 1638 n->truesize += skb->data_len; 1639 n->data_len = skb->data_len; 1640 n->len = skb->len; 1641 1642 if (skb_shinfo(skb)->nr_frags) { 1643 int i; 1644 1645 if (skb_orphan_frags(skb, gfp_mask) || 1646 skb_zerocopy_clone(n, skb, gfp_mask)) { 1647 kfree_skb(n); 1648 n = NULL; 1649 goto out; 1650 } 1651 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1652 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1653 skb_frag_ref(skb, i); 1654 } 1655 skb_shinfo(n)->nr_frags = i; 1656 } 1657 1658 if (skb_has_frag_list(skb)) { 1659 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1660 skb_clone_fraglist(n); 1661 } 1662 1663 skb_copy_header(n, skb); 1664 out: 1665 return n; 1666 } 1667 EXPORT_SYMBOL(__pskb_copy_fclone); 1668 1669 /** 1670 * pskb_expand_head - reallocate header of &sk_buff 1671 * @skb: buffer to reallocate 1672 * @nhead: room to add at head 1673 * @ntail: room to add at tail 1674 * @gfp_mask: allocation priority 1675 * 1676 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1677 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1678 * reference count of 1. Returns zero in the case of success or error, 1679 * if expansion failed. In the last case, &sk_buff is not changed. 1680 * 1681 * All the pointers pointing into skb header may change and must be 1682 * reloaded after call to this function. 1683 */ 1684 1685 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1686 gfp_t gfp_mask) 1687 { 1688 int i, osize = skb_end_offset(skb); 1689 int size = osize + nhead + ntail; 1690 long off; 1691 u8 *data; 1692 1693 BUG_ON(nhead < 0); 1694 1695 BUG_ON(skb_shared(skb)); 1696 1697 size = SKB_DATA_ALIGN(size); 1698 1699 if (skb_pfmemalloc(skb)) 1700 gfp_mask |= __GFP_MEMALLOC; 1701 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1702 gfp_mask, NUMA_NO_NODE, NULL); 1703 if (!data) 1704 goto nodata; 1705 size = SKB_WITH_OVERHEAD(ksize(data)); 1706 1707 /* Copy only real data... and, alas, header. This should be 1708 * optimized for the cases when header is void. 1709 */ 1710 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1711 1712 memcpy((struct skb_shared_info *)(data + size), 1713 skb_shinfo(skb), 1714 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1715 1716 /* 1717 * if shinfo is shared we must drop the old head gracefully, but if it 1718 * is not we can just drop the old head and let the existing refcount 1719 * be since all we did is relocate the values 1720 */ 1721 if (skb_cloned(skb)) { 1722 if (skb_orphan_frags(skb, gfp_mask)) 1723 goto nofrags; 1724 if (skb_zcopy(skb)) 1725 refcount_inc(&skb_uarg(skb)->refcnt); 1726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1727 skb_frag_ref(skb, i); 1728 1729 if (skb_has_frag_list(skb)) 1730 skb_clone_fraglist(skb); 1731 1732 skb_release_data(skb); 1733 } else { 1734 skb_free_head(skb); 1735 } 1736 off = (data + nhead) - skb->head; 1737 1738 skb->head = data; 1739 skb->head_frag = 0; 1740 skb->data += off; 1741 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1742 skb->end = size; 1743 off = nhead; 1744 #else 1745 skb->end = skb->head + size; 1746 #endif 1747 skb->tail += off; 1748 skb_headers_offset_update(skb, nhead); 1749 skb->cloned = 0; 1750 skb->hdr_len = 0; 1751 skb->nohdr = 0; 1752 atomic_set(&skb_shinfo(skb)->dataref, 1); 1753 1754 skb_metadata_clear(skb); 1755 1756 /* It is not generally safe to change skb->truesize. 1757 * For the moment, we really care of rx path, or 1758 * when skb is orphaned (not attached to a socket). 1759 */ 1760 if (!skb->sk || skb->destructor == sock_edemux) 1761 skb->truesize += size - osize; 1762 1763 return 0; 1764 1765 nofrags: 1766 kfree(data); 1767 nodata: 1768 return -ENOMEM; 1769 } 1770 EXPORT_SYMBOL(pskb_expand_head); 1771 1772 /* Make private copy of skb with writable head and some headroom */ 1773 1774 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1775 { 1776 struct sk_buff *skb2; 1777 int delta = headroom - skb_headroom(skb); 1778 1779 if (delta <= 0) 1780 skb2 = pskb_copy(skb, GFP_ATOMIC); 1781 else { 1782 skb2 = skb_clone(skb, GFP_ATOMIC); 1783 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1784 GFP_ATOMIC)) { 1785 kfree_skb(skb2); 1786 skb2 = NULL; 1787 } 1788 } 1789 return skb2; 1790 } 1791 EXPORT_SYMBOL(skb_realloc_headroom); 1792 1793 /** 1794 * skb_expand_head - reallocate header of &sk_buff 1795 * @skb: buffer to reallocate 1796 * @headroom: needed headroom 1797 * 1798 * Unlike skb_realloc_headroom, this one does not allocate a new skb 1799 * if possible; copies skb->sk to new skb as needed 1800 * and frees original skb in case of failures. 1801 * 1802 * It expect increased headroom and generates warning otherwise. 1803 */ 1804 1805 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 1806 { 1807 int delta = headroom - skb_headroom(skb); 1808 int osize = skb_end_offset(skb); 1809 struct sock *sk = skb->sk; 1810 1811 if (WARN_ONCE(delta <= 0, 1812 "%s is expecting an increase in the headroom", __func__)) 1813 return skb; 1814 1815 delta = SKB_DATA_ALIGN(delta); 1816 /* pskb_expand_head() might crash, if skb is shared. */ 1817 if (skb_shared(skb) || !is_skb_wmem(skb)) { 1818 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1819 1820 if (unlikely(!nskb)) 1821 goto fail; 1822 1823 if (sk) 1824 skb_set_owner_w(nskb, sk); 1825 consume_skb(skb); 1826 skb = nskb; 1827 } 1828 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 1829 goto fail; 1830 1831 if (sk && is_skb_wmem(skb)) { 1832 delta = skb_end_offset(skb) - osize; 1833 refcount_add(delta, &sk->sk_wmem_alloc); 1834 skb->truesize += delta; 1835 } 1836 return skb; 1837 1838 fail: 1839 kfree_skb(skb); 1840 return NULL; 1841 } 1842 EXPORT_SYMBOL(skb_expand_head); 1843 1844 /** 1845 * skb_copy_expand - copy and expand sk_buff 1846 * @skb: buffer to copy 1847 * @newheadroom: new free bytes at head 1848 * @newtailroom: new free bytes at tail 1849 * @gfp_mask: allocation priority 1850 * 1851 * Make a copy of both an &sk_buff and its data and while doing so 1852 * allocate additional space. 1853 * 1854 * This is used when the caller wishes to modify the data and needs a 1855 * private copy of the data to alter as well as more space for new fields. 1856 * Returns %NULL on failure or the pointer to the buffer 1857 * on success. The returned buffer has a reference count of 1. 1858 * 1859 * You must pass %GFP_ATOMIC as the allocation priority if this function 1860 * is called from an interrupt. 1861 */ 1862 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1863 int newheadroom, int newtailroom, 1864 gfp_t gfp_mask) 1865 { 1866 /* 1867 * Allocate the copy buffer 1868 */ 1869 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1870 gfp_mask, skb_alloc_rx_flag(skb), 1871 NUMA_NO_NODE); 1872 int oldheadroom = skb_headroom(skb); 1873 int head_copy_len, head_copy_off; 1874 1875 if (!n) 1876 return NULL; 1877 1878 skb_reserve(n, newheadroom); 1879 1880 /* Set the tail pointer and length */ 1881 skb_put(n, skb->len); 1882 1883 head_copy_len = oldheadroom; 1884 head_copy_off = 0; 1885 if (newheadroom <= head_copy_len) 1886 head_copy_len = newheadroom; 1887 else 1888 head_copy_off = newheadroom - head_copy_len; 1889 1890 /* Copy the linear header and data. */ 1891 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1892 skb->len + head_copy_len)); 1893 1894 skb_copy_header(n, skb); 1895 1896 skb_headers_offset_update(n, newheadroom - oldheadroom); 1897 1898 return n; 1899 } 1900 EXPORT_SYMBOL(skb_copy_expand); 1901 1902 /** 1903 * __skb_pad - zero pad the tail of an skb 1904 * @skb: buffer to pad 1905 * @pad: space to pad 1906 * @free_on_error: free buffer on error 1907 * 1908 * Ensure that a buffer is followed by a padding area that is zero 1909 * filled. Used by network drivers which may DMA or transfer data 1910 * beyond the buffer end onto the wire. 1911 * 1912 * May return error in out of memory cases. The skb is freed on error 1913 * if @free_on_error is true. 1914 */ 1915 1916 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 1917 { 1918 int err; 1919 int ntail; 1920 1921 /* If the skbuff is non linear tailroom is always zero.. */ 1922 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1923 memset(skb->data+skb->len, 0, pad); 1924 return 0; 1925 } 1926 1927 ntail = skb->data_len + pad - (skb->end - skb->tail); 1928 if (likely(skb_cloned(skb) || ntail > 0)) { 1929 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1930 if (unlikely(err)) 1931 goto free_skb; 1932 } 1933 1934 /* FIXME: The use of this function with non-linear skb's really needs 1935 * to be audited. 1936 */ 1937 err = skb_linearize(skb); 1938 if (unlikely(err)) 1939 goto free_skb; 1940 1941 memset(skb->data + skb->len, 0, pad); 1942 return 0; 1943 1944 free_skb: 1945 if (free_on_error) 1946 kfree_skb(skb); 1947 return err; 1948 } 1949 EXPORT_SYMBOL(__skb_pad); 1950 1951 /** 1952 * pskb_put - add data to the tail of a potentially fragmented buffer 1953 * @skb: start of the buffer to use 1954 * @tail: tail fragment of the buffer to use 1955 * @len: amount of data to add 1956 * 1957 * This function extends the used data area of the potentially 1958 * fragmented buffer. @tail must be the last fragment of @skb -- or 1959 * @skb itself. If this would exceed the total buffer size the kernel 1960 * will panic. A pointer to the first byte of the extra data is 1961 * returned. 1962 */ 1963 1964 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1965 { 1966 if (tail != skb) { 1967 skb->data_len += len; 1968 skb->len += len; 1969 } 1970 return skb_put(tail, len); 1971 } 1972 EXPORT_SYMBOL_GPL(pskb_put); 1973 1974 /** 1975 * skb_put - add data to a buffer 1976 * @skb: buffer to use 1977 * @len: amount of data to add 1978 * 1979 * This function extends the used data area of the buffer. If this would 1980 * exceed the total buffer size the kernel will panic. A pointer to the 1981 * first byte of the extra data is returned. 1982 */ 1983 void *skb_put(struct sk_buff *skb, unsigned int len) 1984 { 1985 void *tmp = skb_tail_pointer(skb); 1986 SKB_LINEAR_ASSERT(skb); 1987 skb->tail += len; 1988 skb->len += len; 1989 if (unlikely(skb->tail > skb->end)) 1990 skb_over_panic(skb, len, __builtin_return_address(0)); 1991 return tmp; 1992 } 1993 EXPORT_SYMBOL(skb_put); 1994 1995 /** 1996 * skb_push - add data to the start of a buffer 1997 * @skb: buffer to use 1998 * @len: amount of data to add 1999 * 2000 * This function extends the used data area of the buffer at the buffer 2001 * start. If this would exceed the total buffer headroom the kernel will 2002 * panic. A pointer to the first byte of the extra data is returned. 2003 */ 2004 void *skb_push(struct sk_buff *skb, unsigned int len) 2005 { 2006 skb->data -= len; 2007 skb->len += len; 2008 if (unlikely(skb->data < skb->head)) 2009 skb_under_panic(skb, len, __builtin_return_address(0)); 2010 return skb->data; 2011 } 2012 EXPORT_SYMBOL(skb_push); 2013 2014 /** 2015 * skb_pull - remove data from the start of a buffer 2016 * @skb: buffer to use 2017 * @len: amount of data to remove 2018 * 2019 * This function removes data from the start of a buffer, returning 2020 * the memory to the headroom. A pointer to the next data in the buffer 2021 * is returned. Once the data has been pulled future pushes will overwrite 2022 * the old data. 2023 */ 2024 void *skb_pull(struct sk_buff *skb, unsigned int len) 2025 { 2026 return skb_pull_inline(skb, len); 2027 } 2028 EXPORT_SYMBOL(skb_pull); 2029 2030 /** 2031 * skb_trim - remove end from a buffer 2032 * @skb: buffer to alter 2033 * @len: new length 2034 * 2035 * Cut the length of a buffer down by removing data from the tail. If 2036 * the buffer is already under the length specified it is not modified. 2037 * The skb must be linear. 2038 */ 2039 void skb_trim(struct sk_buff *skb, unsigned int len) 2040 { 2041 if (skb->len > len) 2042 __skb_trim(skb, len); 2043 } 2044 EXPORT_SYMBOL(skb_trim); 2045 2046 /* Trims skb to length len. It can change skb pointers. 2047 */ 2048 2049 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2050 { 2051 struct sk_buff **fragp; 2052 struct sk_buff *frag; 2053 int offset = skb_headlen(skb); 2054 int nfrags = skb_shinfo(skb)->nr_frags; 2055 int i; 2056 int err; 2057 2058 if (skb_cloned(skb) && 2059 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2060 return err; 2061 2062 i = 0; 2063 if (offset >= len) 2064 goto drop_pages; 2065 2066 for (; i < nfrags; i++) { 2067 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2068 2069 if (end < len) { 2070 offset = end; 2071 continue; 2072 } 2073 2074 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2075 2076 drop_pages: 2077 skb_shinfo(skb)->nr_frags = i; 2078 2079 for (; i < nfrags; i++) 2080 skb_frag_unref(skb, i); 2081 2082 if (skb_has_frag_list(skb)) 2083 skb_drop_fraglist(skb); 2084 goto done; 2085 } 2086 2087 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2088 fragp = &frag->next) { 2089 int end = offset + frag->len; 2090 2091 if (skb_shared(frag)) { 2092 struct sk_buff *nfrag; 2093 2094 nfrag = skb_clone(frag, GFP_ATOMIC); 2095 if (unlikely(!nfrag)) 2096 return -ENOMEM; 2097 2098 nfrag->next = frag->next; 2099 consume_skb(frag); 2100 frag = nfrag; 2101 *fragp = frag; 2102 } 2103 2104 if (end < len) { 2105 offset = end; 2106 continue; 2107 } 2108 2109 if (end > len && 2110 unlikely((err = pskb_trim(frag, len - offset)))) 2111 return err; 2112 2113 if (frag->next) 2114 skb_drop_list(&frag->next); 2115 break; 2116 } 2117 2118 done: 2119 if (len > skb_headlen(skb)) { 2120 skb->data_len -= skb->len - len; 2121 skb->len = len; 2122 } else { 2123 skb->len = len; 2124 skb->data_len = 0; 2125 skb_set_tail_pointer(skb, len); 2126 } 2127 2128 if (!skb->sk || skb->destructor == sock_edemux) 2129 skb_condense(skb); 2130 return 0; 2131 } 2132 EXPORT_SYMBOL(___pskb_trim); 2133 2134 /* Note : use pskb_trim_rcsum() instead of calling this directly 2135 */ 2136 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2137 { 2138 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2139 int delta = skb->len - len; 2140 2141 skb->csum = csum_block_sub(skb->csum, 2142 skb_checksum(skb, len, delta, 0), 2143 len); 2144 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2145 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2146 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2147 2148 if (offset + sizeof(__sum16) > hdlen) 2149 return -EINVAL; 2150 } 2151 return __pskb_trim(skb, len); 2152 } 2153 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2154 2155 /** 2156 * __pskb_pull_tail - advance tail of skb header 2157 * @skb: buffer to reallocate 2158 * @delta: number of bytes to advance tail 2159 * 2160 * The function makes a sense only on a fragmented &sk_buff, 2161 * it expands header moving its tail forward and copying necessary 2162 * data from fragmented part. 2163 * 2164 * &sk_buff MUST have reference count of 1. 2165 * 2166 * Returns %NULL (and &sk_buff does not change) if pull failed 2167 * or value of new tail of skb in the case of success. 2168 * 2169 * All the pointers pointing into skb header may change and must be 2170 * reloaded after call to this function. 2171 */ 2172 2173 /* Moves tail of skb head forward, copying data from fragmented part, 2174 * when it is necessary. 2175 * 1. It may fail due to malloc failure. 2176 * 2. It may change skb pointers. 2177 * 2178 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2179 */ 2180 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2181 { 2182 /* If skb has not enough free space at tail, get new one 2183 * plus 128 bytes for future expansions. If we have enough 2184 * room at tail, reallocate without expansion only if skb is cloned. 2185 */ 2186 int i, k, eat = (skb->tail + delta) - skb->end; 2187 2188 if (eat > 0 || skb_cloned(skb)) { 2189 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2190 GFP_ATOMIC)) 2191 return NULL; 2192 } 2193 2194 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2195 skb_tail_pointer(skb), delta)); 2196 2197 /* Optimization: no fragments, no reasons to preestimate 2198 * size of pulled pages. Superb. 2199 */ 2200 if (!skb_has_frag_list(skb)) 2201 goto pull_pages; 2202 2203 /* Estimate size of pulled pages. */ 2204 eat = delta; 2205 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2206 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2207 2208 if (size >= eat) 2209 goto pull_pages; 2210 eat -= size; 2211 } 2212 2213 /* If we need update frag list, we are in troubles. 2214 * Certainly, it is possible to add an offset to skb data, 2215 * but taking into account that pulling is expected to 2216 * be very rare operation, it is worth to fight against 2217 * further bloating skb head and crucify ourselves here instead. 2218 * Pure masohism, indeed. 8)8) 2219 */ 2220 if (eat) { 2221 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2222 struct sk_buff *clone = NULL; 2223 struct sk_buff *insp = NULL; 2224 2225 do { 2226 if (list->len <= eat) { 2227 /* Eaten as whole. */ 2228 eat -= list->len; 2229 list = list->next; 2230 insp = list; 2231 } else { 2232 /* Eaten partially. */ 2233 2234 if (skb_shared(list)) { 2235 /* Sucks! We need to fork list. :-( */ 2236 clone = skb_clone(list, GFP_ATOMIC); 2237 if (!clone) 2238 return NULL; 2239 insp = list->next; 2240 list = clone; 2241 } else { 2242 /* This may be pulled without 2243 * problems. */ 2244 insp = list; 2245 } 2246 if (!pskb_pull(list, eat)) { 2247 kfree_skb(clone); 2248 return NULL; 2249 } 2250 break; 2251 } 2252 } while (eat); 2253 2254 /* Free pulled out fragments. */ 2255 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2256 skb_shinfo(skb)->frag_list = list->next; 2257 kfree_skb(list); 2258 } 2259 /* And insert new clone at head. */ 2260 if (clone) { 2261 clone->next = list; 2262 skb_shinfo(skb)->frag_list = clone; 2263 } 2264 } 2265 /* Success! Now we may commit changes to skb data. */ 2266 2267 pull_pages: 2268 eat = delta; 2269 k = 0; 2270 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2271 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2272 2273 if (size <= eat) { 2274 skb_frag_unref(skb, i); 2275 eat -= size; 2276 } else { 2277 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2278 2279 *frag = skb_shinfo(skb)->frags[i]; 2280 if (eat) { 2281 skb_frag_off_add(frag, eat); 2282 skb_frag_size_sub(frag, eat); 2283 if (!i) 2284 goto end; 2285 eat = 0; 2286 } 2287 k++; 2288 } 2289 } 2290 skb_shinfo(skb)->nr_frags = k; 2291 2292 end: 2293 skb->tail += delta; 2294 skb->data_len -= delta; 2295 2296 if (!skb->data_len) 2297 skb_zcopy_clear(skb, false); 2298 2299 return skb_tail_pointer(skb); 2300 } 2301 EXPORT_SYMBOL(__pskb_pull_tail); 2302 2303 /** 2304 * skb_copy_bits - copy bits from skb to kernel buffer 2305 * @skb: source skb 2306 * @offset: offset in source 2307 * @to: destination buffer 2308 * @len: number of bytes to copy 2309 * 2310 * Copy the specified number of bytes from the source skb to the 2311 * destination buffer. 2312 * 2313 * CAUTION ! : 2314 * If its prototype is ever changed, 2315 * check arch/{*}/net/{*}.S files, 2316 * since it is called from BPF assembly code. 2317 */ 2318 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2319 { 2320 int start = skb_headlen(skb); 2321 struct sk_buff *frag_iter; 2322 int i, copy; 2323 2324 if (offset > (int)skb->len - len) 2325 goto fault; 2326 2327 /* Copy header. */ 2328 if ((copy = start - offset) > 0) { 2329 if (copy > len) 2330 copy = len; 2331 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2332 if ((len -= copy) == 0) 2333 return 0; 2334 offset += copy; 2335 to += copy; 2336 } 2337 2338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2339 int end; 2340 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2341 2342 WARN_ON(start > offset + len); 2343 2344 end = start + skb_frag_size(f); 2345 if ((copy = end - offset) > 0) { 2346 u32 p_off, p_len, copied; 2347 struct page *p; 2348 u8 *vaddr; 2349 2350 if (copy > len) 2351 copy = len; 2352 2353 skb_frag_foreach_page(f, 2354 skb_frag_off(f) + offset - start, 2355 copy, p, p_off, p_len, copied) { 2356 vaddr = kmap_atomic(p); 2357 memcpy(to + copied, vaddr + p_off, p_len); 2358 kunmap_atomic(vaddr); 2359 } 2360 2361 if ((len -= copy) == 0) 2362 return 0; 2363 offset += copy; 2364 to += copy; 2365 } 2366 start = end; 2367 } 2368 2369 skb_walk_frags(skb, frag_iter) { 2370 int end; 2371 2372 WARN_ON(start > offset + len); 2373 2374 end = start + frag_iter->len; 2375 if ((copy = end - offset) > 0) { 2376 if (copy > len) 2377 copy = len; 2378 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2379 goto fault; 2380 if ((len -= copy) == 0) 2381 return 0; 2382 offset += copy; 2383 to += copy; 2384 } 2385 start = end; 2386 } 2387 2388 if (!len) 2389 return 0; 2390 2391 fault: 2392 return -EFAULT; 2393 } 2394 EXPORT_SYMBOL(skb_copy_bits); 2395 2396 /* 2397 * Callback from splice_to_pipe(), if we need to release some pages 2398 * at the end of the spd in case we error'ed out in filling the pipe. 2399 */ 2400 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 2401 { 2402 put_page(spd->pages[i]); 2403 } 2404 2405 static struct page *linear_to_page(struct page *page, unsigned int *len, 2406 unsigned int *offset, 2407 struct sock *sk) 2408 { 2409 struct page_frag *pfrag = sk_page_frag(sk); 2410 2411 if (!sk_page_frag_refill(sk, pfrag)) 2412 return NULL; 2413 2414 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 2415 2416 memcpy(page_address(pfrag->page) + pfrag->offset, 2417 page_address(page) + *offset, *len); 2418 *offset = pfrag->offset; 2419 pfrag->offset += *len; 2420 2421 return pfrag->page; 2422 } 2423 2424 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 2425 struct page *page, 2426 unsigned int offset) 2427 { 2428 return spd->nr_pages && 2429 spd->pages[spd->nr_pages - 1] == page && 2430 (spd->partial[spd->nr_pages - 1].offset + 2431 spd->partial[spd->nr_pages - 1].len == offset); 2432 } 2433 2434 /* 2435 * Fill page/offset/length into spd, if it can hold more pages. 2436 */ 2437 static bool spd_fill_page(struct splice_pipe_desc *spd, 2438 struct pipe_inode_info *pipe, struct page *page, 2439 unsigned int *len, unsigned int offset, 2440 bool linear, 2441 struct sock *sk) 2442 { 2443 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 2444 return true; 2445 2446 if (linear) { 2447 page = linear_to_page(page, len, &offset, sk); 2448 if (!page) 2449 return true; 2450 } 2451 if (spd_can_coalesce(spd, page, offset)) { 2452 spd->partial[spd->nr_pages - 1].len += *len; 2453 return false; 2454 } 2455 get_page(page); 2456 spd->pages[spd->nr_pages] = page; 2457 spd->partial[spd->nr_pages].len = *len; 2458 spd->partial[spd->nr_pages].offset = offset; 2459 spd->nr_pages++; 2460 2461 return false; 2462 } 2463 2464 static bool __splice_segment(struct page *page, unsigned int poff, 2465 unsigned int plen, unsigned int *off, 2466 unsigned int *len, 2467 struct splice_pipe_desc *spd, bool linear, 2468 struct sock *sk, 2469 struct pipe_inode_info *pipe) 2470 { 2471 if (!*len) 2472 return true; 2473 2474 /* skip this segment if already processed */ 2475 if (*off >= plen) { 2476 *off -= plen; 2477 return false; 2478 } 2479 2480 /* ignore any bits we already processed */ 2481 poff += *off; 2482 plen -= *off; 2483 *off = 0; 2484 2485 do { 2486 unsigned int flen = min(*len, plen); 2487 2488 if (spd_fill_page(spd, pipe, page, &flen, poff, 2489 linear, sk)) 2490 return true; 2491 poff += flen; 2492 plen -= flen; 2493 *len -= flen; 2494 } while (*len && plen); 2495 2496 return false; 2497 } 2498 2499 /* 2500 * Map linear and fragment data from the skb to spd. It reports true if the 2501 * pipe is full or if we already spliced the requested length. 2502 */ 2503 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 2504 unsigned int *offset, unsigned int *len, 2505 struct splice_pipe_desc *spd, struct sock *sk) 2506 { 2507 int seg; 2508 struct sk_buff *iter; 2509 2510 /* map the linear part : 2511 * If skb->head_frag is set, this 'linear' part is backed by a 2512 * fragment, and if the head is not shared with any clones then 2513 * we can avoid a copy since we own the head portion of this page. 2514 */ 2515 if (__splice_segment(virt_to_page(skb->data), 2516 (unsigned long) skb->data & (PAGE_SIZE - 1), 2517 skb_headlen(skb), 2518 offset, len, spd, 2519 skb_head_is_locked(skb), 2520 sk, pipe)) 2521 return true; 2522 2523 /* 2524 * then map the fragments 2525 */ 2526 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 2527 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 2528 2529 if (__splice_segment(skb_frag_page(f), 2530 skb_frag_off(f), skb_frag_size(f), 2531 offset, len, spd, false, sk, pipe)) 2532 return true; 2533 } 2534 2535 skb_walk_frags(skb, iter) { 2536 if (*offset >= iter->len) { 2537 *offset -= iter->len; 2538 continue; 2539 } 2540 /* __skb_splice_bits() only fails if the output has no room 2541 * left, so no point in going over the frag_list for the error 2542 * case. 2543 */ 2544 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 2545 return true; 2546 } 2547 2548 return false; 2549 } 2550 2551 /* 2552 * Map data from the skb to a pipe. Should handle both the linear part, 2553 * the fragments, and the frag list. 2554 */ 2555 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 2556 struct pipe_inode_info *pipe, unsigned int tlen, 2557 unsigned int flags) 2558 { 2559 struct partial_page partial[MAX_SKB_FRAGS]; 2560 struct page *pages[MAX_SKB_FRAGS]; 2561 struct splice_pipe_desc spd = { 2562 .pages = pages, 2563 .partial = partial, 2564 .nr_pages_max = MAX_SKB_FRAGS, 2565 .ops = &nosteal_pipe_buf_ops, 2566 .spd_release = sock_spd_release, 2567 }; 2568 int ret = 0; 2569 2570 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 2571 2572 if (spd.nr_pages) 2573 ret = splice_to_pipe(pipe, &spd); 2574 2575 return ret; 2576 } 2577 EXPORT_SYMBOL_GPL(skb_splice_bits); 2578 2579 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg, 2580 struct kvec *vec, size_t num, size_t size) 2581 { 2582 struct socket *sock = sk->sk_socket; 2583 2584 if (!sock) 2585 return -EINVAL; 2586 return kernel_sendmsg(sock, msg, vec, num, size); 2587 } 2588 2589 static int sendpage_unlocked(struct sock *sk, struct page *page, int offset, 2590 size_t size, int flags) 2591 { 2592 struct socket *sock = sk->sk_socket; 2593 2594 if (!sock) 2595 return -EINVAL; 2596 return kernel_sendpage(sock, page, offset, size, flags); 2597 } 2598 2599 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg, 2600 struct kvec *vec, size_t num, size_t size); 2601 typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset, 2602 size_t size, int flags); 2603 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 2604 int len, sendmsg_func sendmsg, sendpage_func sendpage) 2605 { 2606 unsigned int orig_len = len; 2607 struct sk_buff *head = skb; 2608 unsigned short fragidx; 2609 int slen, ret; 2610 2611 do_frag_list: 2612 2613 /* Deal with head data */ 2614 while (offset < skb_headlen(skb) && len) { 2615 struct kvec kv; 2616 struct msghdr msg; 2617 2618 slen = min_t(int, len, skb_headlen(skb) - offset); 2619 kv.iov_base = skb->data + offset; 2620 kv.iov_len = slen; 2621 memset(&msg, 0, sizeof(msg)); 2622 msg.msg_flags = MSG_DONTWAIT; 2623 2624 ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked, 2625 sendmsg_unlocked, sk, &msg, &kv, 1, slen); 2626 if (ret <= 0) 2627 goto error; 2628 2629 offset += ret; 2630 len -= ret; 2631 } 2632 2633 /* All the data was skb head? */ 2634 if (!len) 2635 goto out; 2636 2637 /* Make offset relative to start of frags */ 2638 offset -= skb_headlen(skb); 2639 2640 /* Find where we are in frag list */ 2641 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 2642 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 2643 2644 if (offset < skb_frag_size(frag)) 2645 break; 2646 2647 offset -= skb_frag_size(frag); 2648 } 2649 2650 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 2651 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 2652 2653 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 2654 2655 while (slen) { 2656 ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked, 2657 sendpage_unlocked, sk, 2658 skb_frag_page(frag), 2659 skb_frag_off(frag) + offset, 2660 slen, MSG_DONTWAIT); 2661 if (ret <= 0) 2662 goto error; 2663 2664 len -= ret; 2665 offset += ret; 2666 slen -= ret; 2667 } 2668 2669 offset = 0; 2670 } 2671 2672 if (len) { 2673 /* Process any frag lists */ 2674 2675 if (skb == head) { 2676 if (skb_has_frag_list(skb)) { 2677 skb = skb_shinfo(skb)->frag_list; 2678 goto do_frag_list; 2679 } 2680 } else if (skb->next) { 2681 skb = skb->next; 2682 goto do_frag_list; 2683 } 2684 } 2685 2686 out: 2687 return orig_len - len; 2688 2689 error: 2690 return orig_len == len ? ret : orig_len - len; 2691 } 2692 2693 /* Send skb data on a socket. Socket must be locked. */ 2694 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 2695 int len) 2696 { 2697 return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, 2698 kernel_sendpage_locked); 2699 } 2700 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 2701 2702 /* Send skb data on a socket. Socket must be unlocked. */ 2703 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 2704 { 2705 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 2706 sendpage_unlocked); 2707 } 2708 2709 /** 2710 * skb_store_bits - store bits from kernel buffer to skb 2711 * @skb: destination buffer 2712 * @offset: offset in destination 2713 * @from: source buffer 2714 * @len: number of bytes to copy 2715 * 2716 * Copy the specified number of bytes from the source buffer to the 2717 * destination skb. This function handles all the messy bits of 2718 * traversing fragment lists and such. 2719 */ 2720 2721 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2722 { 2723 int start = skb_headlen(skb); 2724 struct sk_buff *frag_iter; 2725 int i, copy; 2726 2727 if (offset > (int)skb->len - len) 2728 goto fault; 2729 2730 if ((copy = start - offset) > 0) { 2731 if (copy > len) 2732 copy = len; 2733 skb_copy_to_linear_data_offset(skb, offset, from, copy); 2734 if ((len -= copy) == 0) 2735 return 0; 2736 offset += copy; 2737 from += copy; 2738 } 2739 2740 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2741 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2742 int end; 2743 2744 WARN_ON(start > offset + len); 2745 2746 end = start + skb_frag_size(frag); 2747 if ((copy = end - offset) > 0) { 2748 u32 p_off, p_len, copied; 2749 struct page *p; 2750 u8 *vaddr; 2751 2752 if (copy > len) 2753 copy = len; 2754 2755 skb_frag_foreach_page(frag, 2756 skb_frag_off(frag) + offset - start, 2757 copy, p, p_off, p_len, copied) { 2758 vaddr = kmap_atomic(p); 2759 memcpy(vaddr + p_off, from + copied, p_len); 2760 kunmap_atomic(vaddr); 2761 } 2762 2763 if ((len -= copy) == 0) 2764 return 0; 2765 offset += copy; 2766 from += copy; 2767 } 2768 start = end; 2769 } 2770 2771 skb_walk_frags(skb, frag_iter) { 2772 int end; 2773 2774 WARN_ON(start > offset + len); 2775 2776 end = start + frag_iter->len; 2777 if ((copy = end - offset) > 0) { 2778 if (copy > len) 2779 copy = len; 2780 if (skb_store_bits(frag_iter, offset - start, 2781 from, copy)) 2782 goto fault; 2783 if ((len -= copy) == 0) 2784 return 0; 2785 offset += copy; 2786 from += copy; 2787 } 2788 start = end; 2789 } 2790 if (!len) 2791 return 0; 2792 2793 fault: 2794 return -EFAULT; 2795 } 2796 EXPORT_SYMBOL(skb_store_bits); 2797 2798 /* Checksum skb data. */ 2799 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2800 __wsum csum, const struct skb_checksum_ops *ops) 2801 { 2802 int start = skb_headlen(skb); 2803 int i, copy = start - offset; 2804 struct sk_buff *frag_iter; 2805 int pos = 0; 2806 2807 /* Checksum header. */ 2808 if (copy > 0) { 2809 if (copy > len) 2810 copy = len; 2811 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 2812 skb->data + offset, copy, csum); 2813 if ((len -= copy) == 0) 2814 return csum; 2815 offset += copy; 2816 pos = copy; 2817 } 2818 2819 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2820 int end; 2821 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2822 2823 WARN_ON(start > offset + len); 2824 2825 end = start + skb_frag_size(frag); 2826 if ((copy = end - offset) > 0) { 2827 u32 p_off, p_len, copied; 2828 struct page *p; 2829 __wsum csum2; 2830 u8 *vaddr; 2831 2832 if (copy > len) 2833 copy = len; 2834 2835 skb_frag_foreach_page(frag, 2836 skb_frag_off(frag) + offset - start, 2837 copy, p, p_off, p_len, copied) { 2838 vaddr = kmap_atomic(p); 2839 csum2 = INDIRECT_CALL_1(ops->update, 2840 csum_partial_ext, 2841 vaddr + p_off, p_len, 0); 2842 kunmap_atomic(vaddr); 2843 csum = INDIRECT_CALL_1(ops->combine, 2844 csum_block_add_ext, csum, 2845 csum2, pos, p_len); 2846 pos += p_len; 2847 } 2848 2849 if (!(len -= copy)) 2850 return csum; 2851 offset += copy; 2852 } 2853 start = end; 2854 } 2855 2856 skb_walk_frags(skb, frag_iter) { 2857 int end; 2858 2859 WARN_ON(start > offset + len); 2860 2861 end = start + frag_iter->len; 2862 if ((copy = end - offset) > 0) { 2863 __wsum csum2; 2864 if (copy > len) 2865 copy = len; 2866 csum2 = __skb_checksum(frag_iter, offset - start, 2867 copy, 0, ops); 2868 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 2869 csum, csum2, pos, copy); 2870 if ((len -= copy) == 0) 2871 return csum; 2872 offset += copy; 2873 pos += copy; 2874 } 2875 start = end; 2876 } 2877 BUG_ON(len); 2878 2879 return csum; 2880 } 2881 EXPORT_SYMBOL(__skb_checksum); 2882 2883 __wsum skb_checksum(const struct sk_buff *skb, int offset, 2884 int len, __wsum csum) 2885 { 2886 const struct skb_checksum_ops ops = { 2887 .update = csum_partial_ext, 2888 .combine = csum_block_add_ext, 2889 }; 2890 2891 return __skb_checksum(skb, offset, len, csum, &ops); 2892 } 2893 EXPORT_SYMBOL(skb_checksum); 2894 2895 /* Both of above in one bottle. */ 2896 2897 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2898 u8 *to, int len) 2899 { 2900 int start = skb_headlen(skb); 2901 int i, copy = start - offset; 2902 struct sk_buff *frag_iter; 2903 int pos = 0; 2904 __wsum csum = 0; 2905 2906 /* Copy header. */ 2907 if (copy > 0) { 2908 if (copy > len) 2909 copy = len; 2910 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2911 copy); 2912 if ((len -= copy) == 0) 2913 return csum; 2914 offset += copy; 2915 to += copy; 2916 pos = copy; 2917 } 2918 2919 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2920 int end; 2921 2922 WARN_ON(start > offset + len); 2923 2924 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2925 if ((copy = end - offset) > 0) { 2926 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2927 u32 p_off, p_len, copied; 2928 struct page *p; 2929 __wsum csum2; 2930 u8 *vaddr; 2931 2932 if (copy > len) 2933 copy = len; 2934 2935 skb_frag_foreach_page(frag, 2936 skb_frag_off(frag) + offset - start, 2937 copy, p, p_off, p_len, copied) { 2938 vaddr = kmap_atomic(p); 2939 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 2940 to + copied, 2941 p_len); 2942 kunmap_atomic(vaddr); 2943 csum = csum_block_add(csum, csum2, pos); 2944 pos += p_len; 2945 } 2946 2947 if (!(len -= copy)) 2948 return csum; 2949 offset += copy; 2950 to += copy; 2951 } 2952 start = end; 2953 } 2954 2955 skb_walk_frags(skb, frag_iter) { 2956 __wsum csum2; 2957 int end; 2958 2959 WARN_ON(start > offset + len); 2960 2961 end = start + frag_iter->len; 2962 if ((copy = end - offset) > 0) { 2963 if (copy > len) 2964 copy = len; 2965 csum2 = skb_copy_and_csum_bits(frag_iter, 2966 offset - start, 2967 to, copy); 2968 csum = csum_block_add(csum, csum2, pos); 2969 if ((len -= copy) == 0) 2970 return csum; 2971 offset += copy; 2972 to += copy; 2973 pos += copy; 2974 } 2975 start = end; 2976 } 2977 BUG_ON(len); 2978 return csum; 2979 } 2980 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2981 2982 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 2983 { 2984 __sum16 sum; 2985 2986 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 2987 /* See comments in __skb_checksum_complete(). */ 2988 if (likely(!sum)) { 2989 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 2990 !skb->csum_complete_sw) 2991 netdev_rx_csum_fault(skb->dev, skb); 2992 } 2993 if (!skb_shared(skb)) 2994 skb->csum_valid = !sum; 2995 return sum; 2996 } 2997 EXPORT_SYMBOL(__skb_checksum_complete_head); 2998 2999 /* This function assumes skb->csum already holds pseudo header's checksum, 3000 * which has been changed from the hardware checksum, for example, by 3001 * __skb_checksum_validate_complete(). And, the original skb->csum must 3002 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3003 * 3004 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3005 * zero. The new checksum is stored back into skb->csum unless the skb is 3006 * shared. 3007 */ 3008 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3009 { 3010 __wsum csum; 3011 __sum16 sum; 3012 3013 csum = skb_checksum(skb, 0, skb->len, 0); 3014 3015 sum = csum_fold(csum_add(skb->csum, csum)); 3016 /* This check is inverted, because we already knew the hardware 3017 * checksum is invalid before calling this function. So, if the 3018 * re-computed checksum is valid instead, then we have a mismatch 3019 * between the original skb->csum and skb_checksum(). This means either 3020 * the original hardware checksum is incorrect or we screw up skb->csum 3021 * when moving skb->data around. 3022 */ 3023 if (likely(!sum)) { 3024 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3025 !skb->csum_complete_sw) 3026 netdev_rx_csum_fault(skb->dev, skb); 3027 } 3028 3029 if (!skb_shared(skb)) { 3030 /* Save full packet checksum */ 3031 skb->csum = csum; 3032 skb->ip_summed = CHECKSUM_COMPLETE; 3033 skb->csum_complete_sw = 1; 3034 skb->csum_valid = !sum; 3035 } 3036 3037 return sum; 3038 } 3039 EXPORT_SYMBOL(__skb_checksum_complete); 3040 3041 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3042 { 3043 net_warn_ratelimited( 3044 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3045 __func__); 3046 return 0; 3047 } 3048 3049 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3050 int offset, int len) 3051 { 3052 net_warn_ratelimited( 3053 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3054 __func__); 3055 return 0; 3056 } 3057 3058 static const struct skb_checksum_ops default_crc32c_ops = { 3059 .update = warn_crc32c_csum_update, 3060 .combine = warn_crc32c_csum_combine, 3061 }; 3062 3063 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3064 &default_crc32c_ops; 3065 EXPORT_SYMBOL(crc32c_csum_stub); 3066 3067 /** 3068 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3069 * @from: source buffer 3070 * 3071 * Calculates the amount of linear headroom needed in the 'to' skb passed 3072 * into skb_zerocopy(). 3073 */ 3074 unsigned int 3075 skb_zerocopy_headlen(const struct sk_buff *from) 3076 { 3077 unsigned int hlen = 0; 3078 3079 if (!from->head_frag || 3080 skb_headlen(from) < L1_CACHE_BYTES || 3081 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3082 hlen = skb_headlen(from); 3083 if (!hlen) 3084 hlen = from->len; 3085 } 3086 3087 if (skb_has_frag_list(from)) 3088 hlen = from->len; 3089 3090 return hlen; 3091 } 3092 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3093 3094 /** 3095 * skb_zerocopy - Zero copy skb to skb 3096 * @to: destination buffer 3097 * @from: source buffer 3098 * @len: number of bytes to copy from source buffer 3099 * @hlen: size of linear headroom in destination buffer 3100 * 3101 * Copies up to `len` bytes from `from` to `to` by creating references 3102 * to the frags in the source buffer. 3103 * 3104 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3105 * headroom in the `to` buffer. 3106 * 3107 * Return value: 3108 * 0: everything is OK 3109 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3110 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3111 */ 3112 int 3113 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3114 { 3115 int i, j = 0; 3116 int plen = 0; /* length of skb->head fragment */ 3117 int ret; 3118 struct page *page; 3119 unsigned int offset; 3120 3121 BUG_ON(!from->head_frag && !hlen); 3122 3123 /* dont bother with small payloads */ 3124 if (len <= skb_tailroom(to)) 3125 return skb_copy_bits(from, 0, skb_put(to, len), len); 3126 3127 if (hlen) { 3128 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3129 if (unlikely(ret)) 3130 return ret; 3131 len -= hlen; 3132 } else { 3133 plen = min_t(int, skb_headlen(from), len); 3134 if (plen) { 3135 page = virt_to_head_page(from->head); 3136 offset = from->data - (unsigned char *)page_address(page); 3137 __skb_fill_page_desc(to, 0, page, offset, plen); 3138 get_page(page); 3139 j = 1; 3140 len -= plen; 3141 } 3142 } 3143 3144 to->truesize += len + plen; 3145 to->len += len + plen; 3146 to->data_len += len + plen; 3147 3148 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3149 skb_tx_error(from); 3150 return -ENOMEM; 3151 } 3152 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3153 3154 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3155 int size; 3156 3157 if (!len) 3158 break; 3159 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3160 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3161 len); 3162 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3163 len -= size; 3164 skb_frag_ref(to, j); 3165 j++; 3166 } 3167 skb_shinfo(to)->nr_frags = j; 3168 3169 return 0; 3170 } 3171 EXPORT_SYMBOL_GPL(skb_zerocopy); 3172 3173 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3174 { 3175 __wsum csum; 3176 long csstart; 3177 3178 if (skb->ip_summed == CHECKSUM_PARTIAL) 3179 csstart = skb_checksum_start_offset(skb); 3180 else 3181 csstart = skb_headlen(skb); 3182 3183 BUG_ON(csstart > skb_headlen(skb)); 3184 3185 skb_copy_from_linear_data(skb, to, csstart); 3186 3187 csum = 0; 3188 if (csstart != skb->len) 3189 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3190 skb->len - csstart); 3191 3192 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3193 long csstuff = csstart + skb->csum_offset; 3194 3195 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3196 } 3197 } 3198 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3199 3200 /** 3201 * skb_dequeue - remove from the head of the queue 3202 * @list: list to dequeue from 3203 * 3204 * Remove the head of the list. The list lock is taken so the function 3205 * may be used safely with other locking list functions. The head item is 3206 * returned or %NULL if the list is empty. 3207 */ 3208 3209 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3210 { 3211 unsigned long flags; 3212 struct sk_buff *result; 3213 3214 spin_lock_irqsave(&list->lock, flags); 3215 result = __skb_dequeue(list); 3216 spin_unlock_irqrestore(&list->lock, flags); 3217 return result; 3218 } 3219 EXPORT_SYMBOL(skb_dequeue); 3220 3221 /** 3222 * skb_dequeue_tail - remove from the tail of the queue 3223 * @list: list to dequeue from 3224 * 3225 * Remove the tail of the list. The list lock is taken so the function 3226 * may be used safely with other locking list functions. The tail item is 3227 * returned or %NULL if the list is empty. 3228 */ 3229 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3230 { 3231 unsigned long flags; 3232 struct sk_buff *result; 3233 3234 spin_lock_irqsave(&list->lock, flags); 3235 result = __skb_dequeue_tail(list); 3236 spin_unlock_irqrestore(&list->lock, flags); 3237 return result; 3238 } 3239 EXPORT_SYMBOL(skb_dequeue_tail); 3240 3241 /** 3242 * skb_queue_purge - empty a list 3243 * @list: list to empty 3244 * 3245 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3246 * the list and one reference dropped. This function takes the list 3247 * lock and is atomic with respect to other list locking functions. 3248 */ 3249 void skb_queue_purge(struct sk_buff_head *list) 3250 { 3251 struct sk_buff *skb; 3252 while ((skb = skb_dequeue(list)) != NULL) 3253 kfree_skb(skb); 3254 } 3255 EXPORT_SYMBOL(skb_queue_purge); 3256 3257 /** 3258 * skb_rbtree_purge - empty a skb rbtree 3259 * @root: root of the rbtree to empty 3260 * Return value: the sum of truesizes of all purged skbs. 3261 * 3262 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3263 * the list and one reference dropped. This function does not take 3264 * any lock. Synchronization should be handled by the caller (e.g., TCP 3265 * out-of-order queue is protected by the socket lock). 3266 */ 3267 unsigned int skb_rbtree_purge(struct rb_root *root) 3268 { 3269 struct rb_node *p = rb_first(root); 3270 unsigned int sum = 0; 3271 3272 while (p) { 3273 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3274 3275 p = rb_next(p); 3276 rb_erase(&skb->rbnode, root); 3277 sum += skb->truesize; 3278 kfree_skb(skb); 3279 } 3280 return sum; 3281 } 3282 3283 /** 3284 * skb_queue_head - queue a buffer at the list head 3285 * @list: list to use 3286 * @newsk: buffer to queue 3287 * 3288 * Queue a buffer at the start of the list. This function takes the 3289 * list lock and can be used safely with other locking &sk_buff functions 3290 * safely. 3291 * 3292 * A buffer cannot be placed on two lists at the same time. 3293 */ 3294 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3295 { 3296 unsigned long flags; 3297 3298 spin_lock_irqsave(&list->lock, flags); 3299 __skb_queue_head(list, newsk); 3300 spin_unlock_irqrestore(&list->lock, flags); 3301 } 3302 EXPORT_SYMBOL(skb_queue_head); 3303 3304 /** 3305 * skb_queue_tail - queue a buffer at the list tail 3306 * @list: list to use 3307 * @newsk: buffer to queue 3308 * 3309 * Queue a buffer at the tail of the list. This function takes the 3310 * list lock and can be used safely with other locking &sk_buff functions 3311 * safely. 3312 * 3313 * A buffer cannot be placed on two lists at the same time. 3314 */ 3315 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3316 { 3317 unsigned long flags; 3318 3319 spin_lock_irqsave(&list->lock, flags); 3320 __skb_queue_tail(list, newsk); 3321 spin_unlock_irqrestore(&list->lock, flags); 3322 } 3323 EXPORT_SYMBOL(skb_queue_tail); 3324 3325 /** 3326 * skb_unlink - remove a buffer from a list 3327 * @skb: buffer to remove 3328 * @list: list to use 3329 * 3330 * Remove a packet from a list. The list locks are taken and this 3331 * function is atomic with respect to other list locked calls 3332 * 3333 * You must know what list the SKB is on. 3334 */ 3335 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3336 { 3337 unsigned long flags; 3338 3339 spin_lock_irqsave(&list->lock, flags); 3340 __skb_unlink(skb, list); 3341 spin_unlock_irqrestore(&list->lock, flags); 3342 } 3343 EXPORT_SYMBOL(skb_unlink); 3344 3345 /** 3346 * skb_append - append a buffer 3347 * @old: buffer to insert after 3348 * @newsk: buffer to insert 3349 * @list: list to use 3350 * 3351 * Place a packet after a given packet in a list. The list locks are taken 3352 * and this function is atomic with respect to other list locked calls. 3353 * A buffer cannot be placed on two lists at the same time. 3354 */ 3355 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 3356 { 3357 unsigned long flags; 3358 3359 spin_lock_irqsave(&list->lock, flags); 3360 __skb_queue_after(list, old, newsk); 3361 spin_unlock_irqrestore(&list->lock, flags); 3362 } 3363 EXPORT_SYMBOL(skb_append); 3364 3365 static inline void skb_split_inside_header(struct sk_buff *skb, 3366 struct sk_buff* skb1, 3367 const u32 len, const int pos) 3368 { 3369 int i; 3370 3371 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3372 pos - len); 3373 /* And move data appendix as is. */ 3374 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 3375 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 3376 3377 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 3378 skb_shinfo(skb)->nr_frags = 0; 3379 skb1->data_len = skb->data_len; 3380 skb1->len += skb1->data_len; 3381 skb->data_len = 0; 3382 skb->len = len; 3383 skb_set_tail_pointer(skb, len); 3384 } 3385 3386 static inline void skb_split_no_header(struct sk_buff *skb, 3387 struct sk_buff* skb1, 3388 const u32 len, int pos) 3389 { 3390 int i, k = 0; 3391 const int nfrags = skb_shinfo(skb)->nr_frags; 3392 3393 skb_shinfo(skb)->nr_frags = 0; 3394 skb1->len = skb1->data_len = skb->len - len; 3395 skb->len = len; 3396 skb->data_len = len - pos; 3397 3398 for (i = 0; i < nfrags; i++) { 3399 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 3400 3401 if (pos + size > len) { 3402 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 3403 3404 if (pos < len) { 3405 /* Split frag. 3406 * We have two variants in this case: 3407 * 1. Move all the frag to the second 3408 * part, if it is possible. F.e. 3409 * this approach is mandatory for TUX, 3410 * where splitting is expensive. 3411 * 2. Split is accurately. We make this. 3412 */ 3413 skb_frag_ref(skb, i); 3414 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 3415 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 3416 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 3417 skb_shinfo(skb)->nr_frags++; 3418 } 3419 k++; 3420 } else 3421 skb_shinfo(skb)->nr_frags++; 3422 pos += size; 3423 } 3424 skb_shinfo(skb1)->nr_frags = k; 3425 } 3426 3427 /** 3428 * skb_split - Split fragmented skb to two parts at length len. 3429 * @skb: the buffer to split 3430 * @skb1: the buffer to receive the second part 3431 * @len: new length for skb 3432 */ 3433 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 3434 { 3435 int pos = skb_headlen(skb); 3436 3437 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; 3438 skb_zerocopy_clone(skb1, skb, 0); 3439 if (len < pos) /* Split line is inside header. */ 3440 skb_split_inside_header(skb, skb1, len, pos); 3441 else /* Second chunk has no header, nothing to copy. */ 3442 skb_split_no_header(skb, skb1, len, pos); 3443 } 3444 EXPORT_SYMBOL(skb_split); 3445 3446 /* Shifting from/to a cloned skb is a no-go. 3447 * 3448 * Caller cannot keep skb_shinfo related pointers past calling here! 3449 */ 3450 static int skb_prepare_for_shift(struct sk_buff *skb) 3451 { 3452 int ret = 0; 3453 3454 if (skb_cloned(skb)) { 3455 /* Save and restore truesize: pskb_expand_head() may reallocate 3456 * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we 3457 * cannot change truesize at this point. 3458 */ 3459 unsigned int save_truesize = skb->truesize; 3460 3461 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3462 skb->truesize = save_truesize; 3463 } 3464 return ret; 3465 } 3466 3467 /** 3468 * skb_shift - Shifts paged data partially from skb to another 3469 * @tgt: buffer into which tail data gets added 3470 * @skb: buffer from which the paged data comes from 3471 * @shiftlen: shift up to this many bytes 3472 * 3473 * Attempts to shift up to shiftlen worth of bytes, which may be less than 3474 * the length of the skb, from skb to tgt. Returns number bytes shifted. 3475 * It's up to caller to free skb if everything was shifted. 3476 * 3477 * If @tgt runs out of frags, the whole operation is aborted. 3478 * 3479 * Skb cannot include anything else but paged data while tgt is allowed 3480 * to have non-paged data as well. 3481 * 3482 * TODO: full sized shift could be optimized but that would need 3483 * specialized skb free'er to handle frags without up-to-date nr_frags. 3484 */ 3485 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 3486 { 3487 int from, to, merge, todo; 3488 skb_frag_t *fragfrom, *fragto; 3489 3490 BUG_ON(shiftlen > skb->len); 3491 3492 if (skb_headlen(skb)) 3493 return 0; 3494 if (skb_zcopy(tgt) || skb_zcopy(skb)) 3495 return 0; 3496 3497 todo = shiftlen; 3498 from = 0; 3499 to = skb_shinfo(tgt)->nr_frags; 3500 fragfrom = &skb_shinfo(skb)->frags[from]; 3501 3502 /* Actual merge is delayed until the point when we know we can 3503 * commit all, so that we don't have to undo partial changes 3504 */ 3505 if (!to || 3506 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 3507 skb_frag_off(fragfrom))) { 3508 merge = -1; 3509 } else { 3510 merge = to - 1; 3511 3512 todo -= skb_frag_size(fragfrom); 3513 if (todo < 0) { 3514 if (skb_prepare_for_shift(skb) || 3515 skb_prepare_for_shift(tgt)) 3516 return 0; 3517 3518 /* All previous frag pointers might be stale! */ 3519 fragfrom = &skb_shinfo(skb)->frags[from]; 3520 fragto = &skb_shinfo(tgt)->frags[merge]; 3521 3522 skb_frag_size_add(fragto, shiftlen); 3523 skb_frag_size_sub(fragfrom, shiftlen); 3524 skb_frag_off_add(fragfrom, shiftlen); 3525 3526 goto onlymerged; 3527 } 3528 3529 from++; 3530 } 3531 3532 /* Skip full, not-fitting skb to avoid expensive operations */ 3533 if ((shiftlen == skb->len) && 3534 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 3535 return 0; 3536 3537 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 3538 return 0; 3539 3540 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 3541 if (to == MAX_SKB_FRAGS) 3542 return 0; 3543 3544 fragfrom = &skb_shinfo(skb)->frags[from]; 3545 fragto = &skb_shinfo(tgt)->frags[to]; 3546 3547 if (todo >= skb_frag_size(fragfrom)) { 3548 *fragto = *fragfrom; 3549 todo -= skb_frag_size(fragfrom); 3550 from++; 3551 to++; 3552 3553 } else { 3554 __skb_frag_ref(fragfrom); 3555 skb_frag_page_copy(fragto, fragfrom); 3556 skb_frag_off_copy(fragto, fragfrom); 3557 skb_frag_size_set(fragto, todo); 3558 3559 skb_frag_off_add(fragfrom, todo); 3560 skb_frag_size_sub(fragfrom, todo); 3561 todo = 0; 3562 3563 to++; 3564 break; 3565 } 3566 } 3567 3568 /* Ready to "commit" this state change to tgt */ 3569 skb_shinfo(tgt)->nr_frags = to; 3570 3571 if (merge >= 0) { 3572 fragfrom = &skb_shinfo(skb)->frags[0]; 3573 fragto = &skb_shinfo(tgt)->frags[merge]; 3574 3575 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 3576 __skb_frag_unref(fragfrom, skb->pp_recycle); 3577 } 3578 3579 /* Reposition in the original skb */ 3580 to = 0; 3581 while (from < skb_shinfo(skb)->nr_frags) 3582 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 3583 skb_shinfo(skb)->nr_frags = to; 3584 3585 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 3586 3587 onlymerged: 3588 /* Most likely the tgt won't ever need its checksum anymore, skb on 3589 * the other hand might need it if it needs to be resent 3590 */ 3591 tgt->ip_summed = CHECKSUM_PARTIAL; 3592 skb->ip_summed = CHECKSUM_PARTIAL; 3593 3594 /* Yak, is it really working this way? Some helper please? */ 3595 skb->len -= shiftlen; 3596 skb->data_len -= shiftlen; 3597 skb->truesize -= shiftlen; 3598 tgt->len += shiftlen; 3599 tgt->data_len += shiftlen; 3600 tgt->truesize += shiftlen; 3601 3602 return shiftlen; 3603 } 3604 3605 /** 3606 * skb_prepare_seq_read - Prepare a sequential read of skb data 3607 * @skb: the buffer to read 3608 * @from: lower offset of data to be read 3609 * @to: upper offset of data to be read 3610 * @st: state variable 3611 * 3612 * Initializes the specified state variable. Must be called before 3613 * invoking skb_seq_read() for the first time. 3614 */ 3615 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 3616 unsigned int to, struct skb_seq_state *st) 3617 { 3618 st->lower_offset = from; 3619 st->upper_offset = to; 3620 st->root_skb = st->cur_skb = skb; 3621 st->frag_idx = st->stepped_offset = 0; 3622 st->frag_data = NULL; 3623 st->frag_off = 0; 3624 } 3625 EXPORT_SYMBOL(skb_prepare_seq_read); 3626 3627 /** 3628 * skb_seq_read - Sequentially read skb data 3629 * @consumed: number of bytes consumed by the caller so far 3630 * @data: destination pointer for data to be returned 3631 * @st: state variable 3632 * 3633 * Reads a block of skb data at @consumed relative to the 3634 * lower offset specified to skb_prepare_seq_read(). Assigns 3635 * the head of the data block to @data and returns the length 3636 * of the block or 0 if the end of the skb data or the upper 3637 * offset has been reached. 3638 * 3639 * The caller is not required to consume all of the data 3640 * returned, i.e. @consumed is typically set to the number 3641 * of bytes already consumed and the next call to 3642 * skb_seq_read() will return the remaining part of the block. 3643 * 3644 * Note 1: The size of each block of data returned can be arbitrary, 3645 * this limitation is the cost for zerocopy sequential 3646 * reads of potentially non linear data. 3647 * 3648 * Note 2: Fragment lists within fragments are not implemented 3649 * at the moment, state->root_skb could be replaced with 3650 * a stack for this purpose. 3651 */ 3652 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 3653 struct skb_seq_state *st) 3654 { 3655 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 3656 skb_frag_t *frag; 3657 3658 if (unlikely(abs_offset >= st->upper_offset)) { 3659 if (st->frag_data) { 3660 kunmap_atomic(st->frag_data); 3661 st->frag_data = NULL; 3662 } 3663 return 0; 3664 } 3665 3666 next_skb: 3667 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 3668 3669 if (abs_offset < block_limit && !st->frag_data) { 3670 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 3671 return block_limit - abs_offset; 3672 } 3673 3674 if (st->frag_idx == 0 && !st->frag_data) 3675 st->stepped_offset += skb_headlen(st->cur_skb); 3676 3677 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 3678 unsigned int pg_idx, pg_off, pg_sz; 3679 3680 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 3681 3682 pg_idx = 0; 3683 pg_off = skb_frag_off(frag); 3684 pg_sz = skb_frag_size(frag); 3685 3686 if (skb_frag_must_loop(skb_frag_page(frag))) { 3687 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 3688 pg_off = offset_in_page(pg_off + st->frag_off); 3689 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 3690 PAGE_SIZE - pg_off); 3691 } 3692 3693 block_limit = pg_sz + st->stepped_offset; 3694 if (abs_offset < block_limit) { 3695 if (!st->frag_data) 3696 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 3697 3698 *data = (u8 *)st->frag_data + pg_off + 3699 (abs_offset - st->stepped_offset); 3700 3701 return block_limit - abs_offset; 3702 } 3703 3704 if (st->frag_data) { 3705 kunmap_atomic(st->frag_data); 3706 st->frag_data = NULL; 3707 } 3708 3709 st->stepped_offset += pg_sz; 3710 st->frag_off += pg_sz; 3711 if (st->frag_off == skb_frag_size(frag)) { 3712 st->frag_off = 0; 3713 st->frag_idx++; 3714 } 3715 } 3716 3717 if (st->frag_data) { 3718 kunmap_atomic(st->frag_data); 3719 st->frag_data = NULL; 3720 } 3721 3722 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 3723 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 3724 st->frag_idx = 0; 3725 goto next_skb; 3726 } else if (st->cur_skb->next) { 3727 st->cur_skb = st->cur_skb->next; 3728 st->frag_idx = 0; 3729 goto next_skb; 3730 } 3731 3732 return 0; 3733 } 3734 EXPORT_SYMBOL(skb_seq_read); 3735 3736 /** 3737 * skb_abort_seq_read - Abort a sequential read of skb data 3738 * @st: state variable 3739 * 3740 * Must be called if skb_seq_read() was not called until it 3741 * returned 0. 3742 */ 3743 void skb_abort_seq_read(struct skb_seq_state *st) 3744 { 3745 if (st->frag_data) 3746 kunmap_atomic(st->frag_data); 3747 } 3748 EXPORT_SYMBOL(skb_abort_seq_read); 3749 3750 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 3751 3752 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 3753 struct ts_config *conf, 3754 struct ts_state *state) 3755 { 3756 return skb_seq_read(offset, text, TS_SKB_CB(state)); 3757 } 3758 3759 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 3760 { 3761 skb_abort_seq_read(TS_SKB_CB(state)); 3762 } 3763 3764 /** 3765 * skb_find_text - Find a text pattern in skb data 3766 * @skb: the buffer to look in 3767 * @from: search offset 3768 * @to: search limit 3769 * @config: textsearch configuration 3770 * 3771 * Finds a pattern in the skb data according to the specified 3772 * textsearch configuration. Use textsearch_next() to retrieve 3773 * subsequent occurrences of the pattern. Returns the offset 3774 * to the first occurrence or UINT_MAX if no match was found. 3775 */ 3776 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 3777 unsigned int to, struct ts_config *config) 3778 { 3779 struct ts_state state; 3780 unsigned int ret; 3781 3782 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 3783 3784 config->get_next_block = skb_ts_get_next_block; 3785 config->finish = skb_ts_finish; 3786 3787 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 3788 3789 ret = textsearch_find(config, &state); 3790 return (ret <= to - from ? ret : UINT_MAX); 3791 } 3792 EXPORT_SYMBOL(skb_find_text); 3793 3794 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3795 int offset, size_t size) 3796 { 3797 int i = skb_shinfo(skb)->nr_frags; 3798 3799 if (skb_can_coalesce(skb, i, page, offset)) { 3800 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3801 } else if (i < MAX_SKB_FRAGS) { 3802 get_page(page); 3803 skb_fill_page_desc(skb, i, page, offset, size); 3804 } else { 3805 return -EMSGSIZE; 3806 } 3807 3808 return 0; 3809 } 3810 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3811 3812 /** 3813 * skb_pull_rcsum - pull skb and update receive checksum 3814 * @skb: buffer to update 3815 * @len: length of data pulled 3816 * 3817 * This function performs an skb_pull on the packet and updates 3818 * the CHECKSUM_COMPLETE checksum. It should be used on 3819 * receive path processing instead of skb_pull unless you know 3820 * that the checksum difference is zero (e.g., a valid IP header) 3821 * or you are setting ip_summed to CHECKSUM_NONE. 3822 */ 3823 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3824 { 3825 unsigned char *data = skb->data; 3826 3827 BUG_ON(len > skb->len); 3828 __skb_pull(skb, len); 3829 skb_postpull_rcsum(skb, data, len); 3830 return skb->data; 3831 } 3832 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3833 3834 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 3835 { 3836 skb_frag_t head_frag; 3837 struct page *page; 3838 3839 page = virt_to_head_page(frag_skb->head); 3840 __skb_frag_set_page(&head_frag, page); 3841 skb_frag_off_set(&head_frag, frag_skb->data - 3842 (unsigned char *)page_address(page)); 3843 skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); 3844 return head_frag; 3845 } 3846 3847 struct sk_buff *skb_segment_list(struct sk_buff *skb, 3848 netdev_features_t features, 3849 unsigned int offset) 3850 { 3851 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 3852 unsigned int tnl_hlen = skb_tnl_header_len(skb); 3853 unsigned int delta_truesize = 0; 3854 unsigned int delta_len = 0; 3855 struct sk_buff *tail = NULL; 3856 struct sk_buff *nskb, *tmp; 3857 int err; 3858 3859 skb_push(skb, -skb_network_offset(skb) + offset); 3860 3861 skb_shinfo(skb)->frag_list = NULL; 3862 3863 do { 3864 nskb = list_skb; 3865 list_skb = list_skb->next; 3866 3867 err = 0; 3868 if (skb_shared(nskb)) { 3869 tmp = skb_clone(nskb, GFP_ATOMIC); 3870 if (tmp) { 3871 consume_skb(nskb); 3872 nskb = tmp; 3873 err = skb_unclone(nskb, GFP_ATOMIC); 3874 } else { 3875 err = -ENOMEM; 3876 } 3877 } 3878 3879 if (!tail) 3880 skb->next = nskb; 3881 else 3882 tail->next = nskb; 3883 3884 if (unlikely(err)) { 3885 nskb->next = list_skb; 3886 goto err_linearize; 3887 } 3888 3889 tail = nskb; 3890 3891 delta_len += nskb->len; 3892 delta_truesize += nskb->truesize; 3893 3894 skb_push(nskb, -skb_network_offset(nskb) + offset); 3895 3896 skb_release_head_state(nskb); 3897 __copy_skb_header(nskb, skb); 3898 3899 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 3900 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 3901 nskb->data - tnl_hlen, 3902 offset + tnl_hlen); 3903 3904 if (skb_needs_linearize(nskb, features) && 3905 __skb_linearize(nskb)) 3906 goto err_linearize; 3907 3908 } while (list_skb); 3909 3910 skb->truesize = skb->truesize - delta_truesize; 3911 skb->data_len = skb->data_len - delta_len; 3912 skb->len = skb->len - delta_len; 3913 3914 skb_gso_reset(skb); 3915 3916 skb->prev = tail; 3917 3918 if (skb_needs_linearize(skb, features) && 3919 __skb_linearize(skb)) 3920 goto err_linearize; 3921 3922 skb_get(skb); 3923 3924 return skb; 3925 3926 err_linearize: 3927 kfree_skb_list(skb->next); 3928 skb->next = NULL; 3929 return ERR_PTR(-ENOMEM); 3930 } 3931 EXPORT_SYMBOL_GPL(skb_segment_list); 3932 3933 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) 3934 { 3935 if (unlikely(p->len + skb->len >= 65536)) 3936 return -E2BIG; 3937 3938 if (NAPI_GRO_CB(p)->last == p) 3939 skb_shinfo(p)->frag_list = skb; 3940 else 3941 NAPI_GRO_CB(p)->last->next = skb; 3942 3943 skb_pull(skb, skb_gro_offset(skb)); 3944 3945 NAPI_GRO_CB(p)->last = skb; 3946 NAPI_GRO_CB(p)->count++; 3947 p->data_len += skb->len; 3948 3949 /* sk owenrship - if any - completely transferred to the aggregated packet */ 3950 skb->destructor = NULL; 3951 p->truesize += skb->truesize; 3952 p->len += skb->len; 3953 3954 NAPI_GRO_CB(skb)->same_flow = 1; 3955 3956 return 0; 3957 } 3958 3959 /** 3960 * skb_segment - Perform protocol segmentation on skb. 3961 * @head_skb: buffer to segment 3962 * @features: features for the output path (see dev->features) 3963 * 3964 * This function performs segmentation on the given skb. It returns 3965 * a pointer to the first in a list of new skbs for the segments. 3966 * In case of error it returns ERR_PTR(err). 3967 */ 3968 struct sk_buff *skb_segment(struct sk_buff *head_skb, 3969 netdev_features_t features) 3970 { 3971 struct sk_buff *segs = NULL; 3972 struct sk_buff *tail = NULL; 3973 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3974 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3975 unsigned int mss = skb_shinfo(head_skb)->gso_size; 3976 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 3977 struct sk_buff *frag_skb = head_skb; 3978 unsigned int offset = doffset; 3979 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3980 unsigned int partial_segs = 0; 3981 unsigned int headroom; 3982 unsigned int len = head_skb->len; 3983 __be16 proto; 3984 bool csum, sg; 3985 int nfrags = skb_shinfo(head_skb)->nr_frags; 3986 int err = -ENOMEM; 3987 int i = 0; 3988 int pos; 3989 3990 if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && 3991 (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { 3992 /* gso_size is untrusted, and we have a frag_list with a linear 3993 * non head_frag head. 3994 * 3995 * (we assume checking the first list_skb member suffices; 3996 * i.e if either of the list_skb members have non head_frag 3997 * head, then the first one has too). 3998 * 3999 * If head_skb's headlen does not fit requested gso_size, it 4000 * means that the frag_list members do NOT terminate on exact 4001 * gso_size boundaries. Hence we cannot perform skb_frag_t page 4002 * sharing. Therefore we must fallback to copying the frag_list 4003 * skbs; we do so by disabling SG. 4004 */ 4005 if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) 4006 features &= ~NETIF_F_SG; 4007 } 4008 4009 __skb_push(head_skb, doffset); 4010 proto = skb_network_protocol(head_skb, NULL); 4011 if (unlikely(!proto)) 4012 return ERR_PTR(-EINVAL); 4013 4014 sg = !!(features & NETIF_F_SG); 4015 csum = !!can_checksum_protocol(features, proto); 4016 4017 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4018 if (!(features & NETIF_F_GSO_PARTIAL)) { 4019 struct sk_buff *iter; 4020 unsigned int frag_len; 4021 4022 if (!list_skb || 4023 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4024 goto normal; 4025 4026 /* If we get here then all the required 4027 * GSO features except frag_list are supported. 4028 * Try to split the SKB to multiple GSO SKBs 4029 * with no frag_list. 4030 * Currently we can do that only when the buffers don't 4031 * have a linear part and all the buffers except 4032 * the last are of the same length. 4033 */ 4034 frag_len = list_skb->len; 4035 skb_walk_frags(head_skb, iter) { 4036 if (frag_len != iter->len && iter->next) 4037 goto normal; 4038 if (skb_headlen(iter) && !iter->head_frag) 4039 goto normal; 4040 4041 len -= iter->len; 4042 } 4043 4044 if (len != frag_len) 4045 goto normal; 4046 } 4047 4048 /* GSO partial only requires that we trim off any excess that 4049 * doesn't fit into an MSS sized block, so take care of that 4050 * now. 4051 */ 4052 partial_segs = len / mss; 4053 if (partial_segs > 1) 4054 mss *= partial_segs; 4055 else 4056 partial_segs = 0; 4057 } 4058 4059 normal: 4060 headroom = skb_headroom(head_skb); 4061 pos = skb_headlen(head_skb); 4062 4063 do { 4064 struct sk_buff *nskb; 4065 skb_frag_t *nskb_frag; 4066 int hsize; 4067 int size; 4068 4069 if (unlikely(mss == GSO_BY_FRAGS)) { 4070 len = list_skb->len; 4071 } else { 4072 len = head_skb->len - offset; 4073 if (len > mss) 4074 len = mss; 4075 } 4076 4077 hsize = skb_headlen(head_skb) - offset; 4078 4079 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4080 (skb_headlen(list_skb) == len || sg)) { 4081 BUG_ON(skb_headlen(list_skb) > len); 4082 4083 i = 0; 4084 nfrags = skb_shinfo(list_skb)->nr_frags; 4085 frag = skb_shinfo(list_skb)->frags; 4086 frag_skb = list_skb; 4087 pos += skb_headlen(list_skb); 4088 4089 while (pos < offset + len) { 4090 BUG_ON(i >= nfrags); 4091 4092 size = skb_frag_size(frag); 4093 if (pos + size > offset + len) 4094 break; 4095 4096 i++; 4097 pos += size; 4098 frag++; 4099 } 4100 4101 nskb = skb_clone(list_skb, GFP_ATOMIC); 4102 list_skb = list_skb->next; 4103 4104 if (unlikely(!nskb)) 4105 goto err; 4106 4107 if (unlikely(pskb_trim(nskb, len))) { 4108 kfree_skb(nskb); 4109 goto err; 4110 } 4111 4112 hsize = skb_end_offset(nskb); 4113 if (skb_cow_head(nskb, doffset + headroom)) { 4114 kfree_skb(nskb); 4115 goto err; 4116 } 4117 4118 nskb->truesize += skb_end_offset(nskb) - hsize; 4119 skb_release_head_state(nskb); 4120 __skb_push(nskb, doffset); 4121 } else { 4122 if (hsize < 0) 4123 hsize = 0; 4124 if (hsize > len || !sg) 4125 hsize = len; 4126 4127 nskb = __alloc_skb(hsize + doffset + headroom, 4128 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4129 NUMA_NO_NODE); 4130 4131 if (unlikely(!nskb)) 4132 goto err; 4133 4134 skb_reserve(nskb, headroom); 4135 __skb_put(nskb, doffset); 4136 } 4137 4138 if (segs) 4139 tail->next = nskb; 4140 else 4141 segs = nskb; 4142 tail = nskb; 4143 4144 __copy_skb_header(nskb, head_skb); 4145 4146 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4147 skb_reset_mac_len(nskb); 4148 4149 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4150 nskb->data - tnl_hlen, 4151 doffset + tnl_hlen); 4152 4153 if (nskb->len == len + doffset) 4154 goto perform_csum_check; 4155 4156 if (!sg) { 4157 if (!csum) { 4158 if (!nskb->remcsum_offload) 4159 nskb->ip_summed = CHECKSUM_NONE; 4160 SKB_GSO_CB(nskb)->csum = 4161 skb_copy_and_csum_bits(head_skb, offset, 4162 skb_put(nskb, 4163 len), 4164 len); 4165 SKB_GSO_CB(nskb)->csum_start = 4166 skb_headroom(nskb) + doffset; 4167 } else { 4168 skb_copy_bits(head_skb, offset, 4169 skb_put(nskb, len), 4170 len); 4171 } 4172 continue; 4173 } 4174 4175 nskb_frag = skb_shinfo(nskb)->frags; 4176 4177 skb_copy_from_linear_data_offset(head_skb, offset, 4178 skb_put(nskb, hsize), hsize); 4179 4180 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4181 SKBFL_SHARED_FRAG; 4182 4183 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4184 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4185 goto err; 4186 4187 while (pos < offset + len) { 4188 if (i >= nfrags) { 4189 i = 0; 4190 nfrags = skb_shinfo(list_skb)->nr_frags; 4191 frag = skb_shinfo(list_skb)->frags; 4192 frag_skb = list_skb; 4193 if (!skb_headlen(list_skb)) { 4194 BUG_ON(!nfrags); 4195 } else { 4196 BUG_ON(!list_skb->head_frag); 4197 4198 /* to make room for head_frag. */ 4199 i--; 4200 frag--; 4201 } 4202 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4203 skb_zerocopy_clone(nskb, frag_skb, 4204 GFP_ATOMIC)) 4205 goto err; 4206 4207 list_skb = list_skb->next; 4208 } 4209 4210 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4211 MAX_SKB_FRAGS)) { 4212 net_warn_ratelimited( 4213 "skb_segment: too many frags: %u %u\n", 4214 pos, mss); 4215 err = -EINVAL; 4216 goto err; 4217 } 4218 4219 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4220 __skb_frag_ref(nskb_frag); 4221 size = skb_frag_size(nskb_frag); 4222 4223 if (pos < offset) { 4224 skb_frag_off_add(nskb_frag, offset - pos); 4225 skb_frag_size_sub(nskb_frag, offset - pos); 4226 } 4227 4228 skb_shinfo(nskb)->nr_frags++; 4229 4230 if (pos + size <= offset + len) { 4231 i++; 4232 frag++; 4233 pos += size; 4234 } else { 4235 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4236 goto skip_fraglist; 4237 } 4238 4239 nskb_frag++; 4240 } 4241 4242 skip_fraglist: 4243 nskb->data_len = len - hsize; 4244 nskb->len += nskb->data_len; 4245 nskb->truesize += nskb->data_len; 4246 4247 perform_csum_check: 4248 if (!csum) { 4249 if (skb_has_shared_frag(nskb) && 4250 __skb_linearize(nskb)) 4251 goto err; 4252 4253 if (!nskb->remcsum_offload) 4254 nskb->ip_summed = CHECKSUM_NONE; 4255 SKB_GSO_CB(nskb)->csum = 4256 skb_checksum(nskb, doffset, 4257 nskb->len - doffset, 0); 4258 SKB_GSO_CB(nskb)->csum_start = 4259 skb_headroom(nskb) + doffset; 4260 } 4261 } while ((offset += len) < head_skb->len); 4262 4263 /* Some callers want to get the end of the list. 4264 * Put it in segs->prev to avoid walking the list. 4265 * (see validate_xmit_skb_list() for example) 4266 */ 4267 segs->prev = tail; 4268 4269 if (partial_segs) { 4270 struct sk_buff *iter; 4271 int type = skb_shinfo(head_skb)->gso_type; 4272 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4273 4274 /* Update type to add partial and then remove dodgy if set */ 4275 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4276 type &= ~SKB_GSO_DODGY; 4277 4278 /* Update GSO info and prepare to start updating headers on 4279 * our way back down the stack of protocols. 4280 */ 4281 for (iter = segs; iter; iter = iter->next) { 4282 skb_shinfo(iter)->gso_size = gso_size; 4283 skb_shinfo(iter)->gso_segs = partial_segs; 4284 skb_shinfo(iter)->gso_type = type; 4285 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4286 } 4287 4288 if (tail->len - doffset <= gso_size) 4289 skb_shinfo(tail)->gso_size = 0; 4290 else if (tail != segs) 4291 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4292 } 4293 4294 /* Following permits correct backpressure, for protocols 4295 * using skb_set_owner_w(). 4296 * Idea is to tranfert ownership from head_skb to last segment. 4297 */ 4298 if (head_skb->destructor == sock_wfree) { 4299 swap(tail->truesize, head_skb->truesize); 4300 swap(tail->destructor, head_skb->destructor); 4301 swap(tail->sk, head_skb->sk); 4302 } 4303 return segs; 4304 4305 err: 4306 kfree_skb_list(segs); 4307 return ERR_PTR(err); 4308 } 4309 EXPORT_SYMBOL_GPL(skb_segment); 4310 4311 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 4312 { 4313 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 4314 unsigned int offset = skb_gro_offset(skb); 4315 unsigned int headlen = skb_headlen(skb); 4316 unsigned int len = skb_gro_len(skb); 4317 unsigned int delta_truesize; 4318 unsigned int new_truesize; 4319 struct sk_buff *lp; 4320 4321 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) 4322 return -E2BIG; 4323 4324 lp = NAPI_GRO_CB(p)->last; 4325 pinfo = skb_shinfo(lp); 4326 4327 if (headlen <= offset) { 4328 skb_frag_t *frag; 4329 skb_frag_t *frag2; 4330 int i = skbinfo->nr_frags; 4331 int nr_frags = pinfo->nr_frags + i; 4332 4333 if (nr_frags > MAX_SKB_FRAGS) 4334 goto merge; 4335 4336 offset -= headlen; 4337 pinfo->nr_frags = nr_frags; 4338 skbinfo->nr_frags = 0; 4339 4340 frag = pinfo->frags + nr_frags; 4341 frag2 = skbinfo->frags + i; 4342 do { 4343 *--frag = *--frag2; 4344 } while (--i); 4345 4346 skb_frag_off_add(frag, offset); 4347 skb_frag_size_sub(frag, offset); 4348 4349 /* all fragments truesize : remove (head size + sk_buff) */ 4350 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4351 delta_truesize = skb->truesize - new_truesize; 4352 4353 skb->truesize = new_truesize; 4354 skb->len -= skb->data_len; 4355 skb->data_len = 0; 4356 4357 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 4358 goto done; 4359 } else if (skb->head_frag) { 4360 int nr_frags = pinfo->nr_frags; 4361 skb_frag_t *frag = pinfo->frags + nr_frags; 4362 struct page *page = virt_to_head_page(skb->head); 4363 unsigned int first_size = headlen - offset; 4364 unsigned int first_offset; 4365 4366 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 4367 goto merge; 4368 4369 first_offset = skb->data - 4370 (unsigned char *)page_address(page) + 4371 offset; 4372 4373 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 4374 4375 __skb_frag_set_page(frag, page); 4376 skb_frag_off_set(frag, first_offset); 4377 skb_frag_size_set(frag, first_size); 4378 4379 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 4380 /* We dont need to clear skbinfo->nr_frags here */ 4381 4382 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4383 delta_truesize = skb->truesize - new_truesize; 4384 skb->truesize = new_truesize; 4385 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 4386 goto done; 4387 } 4388 4389 merge: 4390 /* sk owenrship - if any - completely transferred to the aggregated packet */ 4391 skb->destructor = NULL; 4392 delta_truesize = skb->truesize; 4393 if (offset > headlen) { 4394 unsigned int eat = offset - headlen; 4395 4396 skb_frag_off_add(&skbinfo->frags[0], eat); 4397 skb_frag_size_sub(&skbinfo->frags[0], eat); 4398 skb->data_len -= eat; 4399 skb->len -= eat; 4400 offset = headlen; 4401 } 4402 4403 __skb_pull(skb, offset); 4404 4405 if (NAPI_GRO_CB(p)->last == p) 4406 skb_shinfo(p)->frag_list = skb; 4407 else 4408 NAPI_GRO_CB(p)->last->next = skb; 4409 NAPI_GRO_CB(p)->last = skb; 4410 __skb_header_release(skb); 4411 lp = p; 4412 4413 done: 4414 NAPI_GRO_CB(p)->count++; 4415 p->data_len += len; 4416 p->truesize += delta_truesize; 4417 p->len += len; 4418 if (lp != p) { 4419 lp->data_len += len; 4420 lp->truesize += delta_truesize; 4421 lp->len += len; 4422 } 4423 NAPI_GRO_CB(skb)->same_flow = 1; 4424 return 0; 4425 } 4426 4427 #ifdef CONFIG_SKB_EXTENSIONS 4428 #define SKB_EXT_ALIGN_VALUE 8 4429 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4430 4431 static const u8 skb_ext_type_len[] = { 4432 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4433 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4434 #endif 4435 #ifdef CONFIG_XFRM 4436 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4437 #endif 4438 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4439 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4440 #endif 4441 #if IS_ENABLED(CONFIG_MPTCP) 4442 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4443 #endif 4444 }; 4445 4446 static __always_inline unsigned int skb_ext_total_length(void) 4447 { 4448 return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + 4449 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4450 skb_ext_type_len[SKB_EXT_BRIDGE_NF] + 4451 #endif 4452 #ifdef CONFIG_XFRM 4453 skb_ext_type_len[SKB_EXT_SEC_PATH] + 4454 #endif 4455 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4456 skb_ext_type_len[TC_SKB_EXT] + 4457 #endif 4458 #if IS_ENABLED(CONFIG_MPTCP) 4459 skb_ext_type_len[SKB_EXT_MPTCP] + 4460 #endif 4461 0; 4462 } 4463 4464 static void skb_extensions_init(void) 4465 { 4466 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4467 BUILD_BUG_ON(skb_ext_total_length() > 255); 4468 4469 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4470 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4471 0, 4472 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4473 NULL); 4474 } 4475 #else 4476 static void skb_extensions_init(void) {} 4477 #endif 4478 4479 void __init skb_init(void) 4480 { 4481 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", 4482 sizeof(struct sk_buff), 4483 0, 4484 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4485 offsetof(struct sk_buff, cb), 4486 sizeof_field(struct sk_buff, cb), 4487 NULL); 4488 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4489 sizeof(struct sk_buff_fclones), 4490 0, 4491 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4492 NULL); 4493 skb_extensions_init(); 4494 } 4495 4496 static int 4497 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 4498 unsigned int recursion_level) 4499 { 4500 int start = skb_headlen(skb); 4501 int i, copy = start - offset; 4502 struct sk_buff *frag_iter; 4503 int elt = 0; 4504 4505 if (unlikely(recursion_level >= 24)) 4506 return -EMSGSIZE; 4507 4508 if (copy > 0) { 4509 if (copy > len) 4510 copy = len; 4511 sg_set_buf(sg, skb->data + offset, copy); 4512 elt++; 4513 if ((len -= copy) == 0) 4514 return elt; 4515 offset += copy; 4516 } 4517 4518 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4519 int end; 4520 4521 WARN_ON(start > offset + len); 4522 4523 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 4524 if ((copy = end - offset) > 0) { 4525 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4526 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4527 return -EMSGSIZE; 4528 4529 if (copy > len) 4530 copy = len; 4531 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 4532 skb_frag_off(frag) + offset - start); 4533 elt++; 4534 if (!(len -= copy)) 4535 return elt; 4536 offset += copy; 4537 } 4538 start = end; 4539 } 4540 4541 skb_walk_frags(skb, frag_iter) { 4542 int end, ret; 4543 4544 WARN_ON(start > offset + len); 4545 4546 end = start + frag_iter->len; 4547 if ((copy = end - offset) > 0) { 4548 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4549 return -EMSGSIZE; 4550 4551 if (copy > len) 4552 copy = len; 4553 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 4554 copy, recursion_level + 1); 4555 if (unlikely(ret < 0)) 4556 return ret; 4557 elt += ret; 4558 if ((len -= copy) == 0) 4559 return elt; 4560 offset += copy; 4561 } 4562 start = end; 4563 } 4564 BUG_ON(len); 4565 return elt; 4566 } 4567 4568 /** 4569 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 4570 * @skb: Socket buffer containing the buffers to be mapped 4571 * @sg: The scatter-gather list to map into 4572 * @offset: The offset into the buffer's contents to start mapping 4573 * @len: Length of buffer space to be mapped 4574 * 4575 * Fill the specified scatter-gather list with mappings/pointers into a 4576 * region of the buffer space attached to a socket buffer. Returns either 4577 * the number of scatterlist items used, or -EMSGSIZE if the contents 4578 * could not fit. 4579 */ 4580 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 4581 { 4582 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 4583 4584 if (nsg <= 0) 4585 return nsg; 4586 4587 sg_mark_end(&sg[nsg - 1]); 4588 4589 return nsg; 4590 } 4591 EXPORT_SYMBOL_GPL(skb_to_sgvec); 4592 4593 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 4594 * sglist without mark the sg which contain last skb data as the end. 4595 * So the caller can mannipulate sg list as will when padding new data after 4596 * the first call without calling sg_unmark_end to expend sg list. 4597 * 4598 * Scenario to use skb_to_sgvec_nomark: 4599 * 1. sg_init_table 4600 * 2. skb_to_sgvec_nomark(payload1) 4601 * 3. skb_to_sgvec_nomark(payload2) 4602 * 4603 * This is equivalent to: 4604 * 1. sg_init_table 4605 * 2. skb_to_sgvec(payload1) 4606 * 3. sg_unmark_end 4607 * 4. skb_to_sgvec(payload2) 4608 * 4609 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 4610 * is more preferable. 4611 */ 4612 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 4613 int offset, int len) 4614 { 4615 return __skb_to_sgvec(skb, sg, offset, len, 0); 4616 } 4617 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 4618 4619 4620 4621 /** 4622 * skb_cow_data - Check that a socket buffer's data buffers are writable 4623 * @skb: The socket buffer to check. 4624 * @tailbits: Amount of trailing space to be added 4625 * @trailer: Returned pointer to the skb where the @tailbits space begins 4626 * 4627 * Make sure that the data buffers attached to a socket buffer are 4628 * writable. If they are not, private copies are made of the data buffers 4629 * and the socket buffer is set to use these instead. 4630 * 4631 * If @tailbits is given, make sure that there is space to write @tailbits 4632 * bytes of data beyond current end of socket buffer. @trailer will be 4633 * set to point to the skb in which this space begins. 4634 * 4635 * The number of scatterlist elements required to completely map the 4636 * COW'd and extended socket buffer will be returned. 4637 */ 4638 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 4639 { 4640 int copyflag; 4641 int elt; 4642 struct sk_buff *skb1, **skb_p; 4643 4644 /* If skb is cloned or its head is paged, reallocate 4645 * head pulling out all the pages (pages are considered not writable 4646 * at the moment even if they are anonymous). 4647 */ 4648 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 4649 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 4650 return -ENOMEM; 4651 4652 /* Easy case. Most of packets will go this way. */ 4653 if (!skb_has_frag_list(skb)) { 4654 /* A little of trouble, not enough of space for trailer. 4655 * This should not happen, when stack is tuned to generate 4656 * good frames. OK, on miss we reallocate and reserve even more 4657 * space, 128 bytes is fair. */ 4658 4659 if (skb_tailroom(skb) < tailbits && 4660 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 4661 return -ENOMEM; 4662 4663 /* Voila! */ 4664 *trailer = skb; 4665 return 1; 4666 } 4667 4668 /* Misery. We are in troubles, going to mincer fragments... */ 4669 4670 elt = 1; 4671 skb_p = &skb_shinfo(skb)->frag_list; 4672 copyflag = 0; 4673 4674 while ((skb1 = *skb_p) != NULL) { 4675 int ntail = 0; 4676 4677 /* The fragment is partially pulled by someone, 4678 * this can happen on input. Copy it and everything 4679 * after it. */ 4680 4681 if (skb_shared(skb1)) 4682 copyflag = 1; 4683 4684 /* If the skb is the last, worry about trailer. */ 4685 4686 if (skb1->next == NULL && tailbits) { 4687 if (skb_shinfo(skb1)->nr_frags || 4688 skb_has_frag_list(skb1) || 4689 skb_tailroom(skb1) < tailbits) 4690 ntail = tailbits + 128; 4691 } 4692 4693 if (copyflag || 4694 skb_cloned(skb1) || 4695 ntail || 4696 skb_shinfo(skb1)->nr_frags || 4697 skb_has_frag_list(skb1)) { 4698 struct sk_buff *skb2; 4699 4700 /* Fuck, we are miserable poor guys... */ 4701 if (ntail == 0) 4702 skb2 = skb_copy(skb1, GFP_ATOMIC); 4703 else 4704 skb2 = skb_copy_expand(skb1, 4705 skb_headroom(skb1), 4706 ntail, 4707 GFP_ATOMIC); 4708 if (unlikely(skb2 == NULL)) 4709 return -ENOMEM; 4710 4711 if (skb1->sk) 4712 skb_set_owner_w(skb2, skb1->sk); 4713 4714 /* Looking around. Are we still alive? 4715 * OK, link new skb, drop old one */ 4716 4717 skb2->next = skb1->next; 4718 *skb_p = skb2; 4719 kfree_skb(skb1); 4720 skb1 = skb2; 4721 } 4722 elt++; 4723 *trailer = skb1; 4724 skb_p = &skb1->next; 4725 } 4726 4727 return elt; 4728 } 4729 EXPORT_SYMBOL_GPL(skb_cow_data); 4730 4731 static void sock_rmem_free(struct sk_buff *skb) 4732 { 4733 struct sock *sk = skb->sk; 4734 4735 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 4736 } 4737 4738 static void skb_set_err_queue(struct sk_buff *skb) 4739 { 4740 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 4741 * So, it is safe to (mis)use it to mark skbs on the error queue. 4742 */ 4743 skb->pkt_type = PACKET_OUTGOING; 4744 BUILD_BUG_ON(PACKET_OUTGOING == 0); 4745 } 4746 4747 /* 4748 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 4749 */ 4750 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 4751 { 4752 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 4753 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 4754 return -ENOMEM; 4755 4756 skb_orphan(skb); 4757 skb->sk = sk; 4758 skb->destructor = sock_rmem_free; 4759 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 4760 skb_set_err_queue(skb); 4761 4762 /* before exiting rcu section, make sure dst is refcounted */ 4763 skb_dst_force(skb); 4764 4765 skb_queue_tail(&sk->sk_error_queue, skb); 4766 if (!sock_flag(sk, SOCK_DEAD)) 4767 sk_error_report(sk); 4768 return 0; 4769 } 4770 EXPORT_SYMBOL(sock_queue_err_skb); 4771 4772 static bool is_icmp_err_skb(const struct sk_buff *skb) 4773 { 4774 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 4775 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 4776 } 4777 4778 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 4779 { 4780 struct sk_buff_head *q = &sk->sk_error_queue; 4781 struct sk_buff *skb, *skb_next = NULL; 4782 bool icmp_next = false; 4783 unsigned long flags; 4784 4785 spin_lock_irqsave(&q->lock, flags); 4786 skb = __skb_dequeue(q); 4787 if (skb && (skb_next = skb_peek(q))) { 4788 icmp_next = is_icmp_err_skb(skb_next); 4789 if (icmp_next) 4790 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 4791 } 4792 spin_unlock_irqrestore(&q->lock, flags); 4793 4794 if (is_icmp_err_skb(skb) && !icmp_next) 4795 sk->sk_err = 0; 4796 4797 if (skb_next) 4798 sk_error_report(sk); 4799 4800 return skb; 4801 } 4802 EXPORT_SYMBOL(sock_dequeue_err_skb); 4803 4804 /** 4805 * skb_clone_sk - create clone of skb, and take reference to socket 4806 * @skb: the skb to clone 4807 * 4808 * This function creates a clone of a buffer that holds a reference on 4809 * sk_refcnt. Buffers created via this function are meant to be 4810 * returned using sock_queue_err_skb, or free via kfree_skb. 4811 * 4812 * When passing buffers allocated with this function to sock_queue_err_skb 4813 * it is necessary to wrap the call with sock_hold/sock_put in order to 4814 * prevent the socket from being released prior to being enqueued on 4815 * the sk_error_queue. 4816 */ 4817 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 4818 { 4819 struct sock *sk = skb->sk; 4820 struct sk_buff *clone; 4821 4822 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 4823 return NULL; 4824 4825 clone = skb_clone(skb, GFP_ATOMIC); 4826 if (!clone) { 4827 sock_put(sk); 4828 return NULL; 4829 } 4830 4831 clone->sk = sk; 4832 clone->destructor = sock_efree; 4833 4834 return clone; 4835 } 4836 EXPORT_SYMBOL(skb_clone_sk); 4837 4838 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 4839 struct sock *sk, 4840 int tstype, 4841 bool opt_stats) 4842 { 4843 struct sock_exterr_skb *serr; 4844 int err; 4845 4846 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 4847 4848 serr = SKB_EXT_ERR(skb); 4849 memset(serr, 0, sizeof(*serr)); 4850 serr->ee.ee_errno = ENOMSG; 4851 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 4852 serr->ee.ee_info = tstype; 4853 serr->opt_stats = opt_stats; 4854 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 4855 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 4856 serr->ee.ee_data = skb_shinfo(skb)->tskey; 4857 if (sk->sk_protocol == IPPROTO_TCP && 4858 sk->sk_type == SOCK_STREAM) 4859 serr->ee.ee_data -= sk->sk_tskey; 4860 } 4861 4862 err = sock_queue_err_skb(sk, skb); 4863 4864 if (err) 4865 kfree_skb(skb); 4866 } 4867 4868 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 4869 { 4870 bool ret; 4871 4872 if (likely(sysctl_tstamp_allow_data || tsonly)) 4873 return true; 4874 4875 read_lock_bh(&sk->sk_callback_lock); 4876 ret = sk->sk_socket && sk->sk_socket->file && 4877 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 4878 read_unlock_bh(&sk->sk_callback_lock); 4879 return ret; 4880 } 4881 4882 void skb_complete_tx_timestamp(struct sk_buff *skb, 4883 struct skb_shared_hwtstamps *hwtstamps) 4884 { 4885 struct sock *sk = skb->sk; 4886 4887 if (!skb_may_tx_timestamp(sk, false)) 4888 goto err; 4889 4890 /* Take a reference to prevent skb_orphan() from freeing the socket, 4891 * but only if the socket refcount is not zero. 4892 */ 4893 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 4894 *skb_hwtstamps(skb) = *hwtstamps; 4895 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 4896 sock_put(sk); 4897 return; 4898 } 4899 4900 err: 4901 kfree_skb(skb); 4902 } 4903 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 4904 4905 void __skb_tstamp_tx(struct sk_buff *orig_skb, 4906 const struct sk_buff *ack_skb, 4907 struct skb_shared_hwtstamps *hwtstamps, 4908 struct sock *sk, int tstype) 4909 { 4910 struct sk_buff *skb; 4911 bool tsonly, opt_stats = false; 4912 4913 if (!sk) 4914 return; 4915 4916 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 4917 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 4918 return; 4919 4920 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 4921 if (!skb_may_tx_timestamp(sk, tsonly)) 4922 return; 4923 4924 if (tsonly) { 4925 #ifdef CONFIG_INET 4926 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 4927 sk->sk_protocol == IPPROTO_TCP && 4928 sk->sk_type == SOCK_STREAM) { 4929 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 4930 ack_skb); 4931 opt_stats = true; 4932 } else 4933 #endif 4934 skb = alloc_skb(0, GFP_ATOMIC); 4935 } else { 4936 skb = skb_clone(orig_skb, GFP_ATOMIC); 4937 } 4938 if (!skb) 4939 return; 4940 4941 if (tsonly) { 4942 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 4943 SKBTX_ANY_TSTAMP; 4944 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 4945 } 4946 4947 if (hwtstamps) 4948 *skb_hwtstamps(skb) = *hwtstamps; 4949 else 4950 skb->tstamp = ktime_get_real(); 4951 4952 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 4953 } 4954 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 4955 4956 void skb_tstamp_tx(struct sk_buff *orig_skb, 4957 struct skb_shared_hwtstamps *hwtstamps) 4958 { 4959 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 4960 SCM_TSTAMP_SND); 4961 } 4962 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 4963 4964 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 4965 { 4966 struct sock *sk = skb->sk; 4967 struct sock_exterr_skb *serr; 4968 int err = 1; 4969 4970 skb->wifi_acked_valid = 1; 4971 skb->wifi_acked = acked; 4972 4973 serr = SKB_EXT_ERR(skb); 4974 memset(serr, 0, sizeof(*serr)); 4975 serr->ee.ee_errno = ENOMSG; 4976 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 4977 4978 /* Take a reference to prevent skb_orphan() from freeing the socket, 4979 * but only if the socket refcount is not zero. 4980 */ 4981 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 4982 err = sock_queue_err_skb(sk, skb); 4983 sock_put(sk); 4984 } 4985 if (err) 4986 kfree_skb(skb); 4987 } 4988 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 4989 4990 /** 4991 * skb_partial_csum_set - set up and verify partial csum values for packet 4992 * @skb: the skb to set 4993 * @start: the number of bytes after skb->data to start checksumming. 4994 * @off: the offset from start to place the checksum. 4995 * 4996 * For untrusted partially-checksummed packets, we need to make sure the values 4997 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 4998 * 4999 * This function checks and sets those values and skb->ip_summed: if this 5000 * returns false you should drop the packet. 5001 */ 5002 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5003 { 5004 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5005 u32 csum_start = skb_headroom(skb) + (u32)start; 5006 5007 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { 5008 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5009 start, off, skb_headroom(skb), skb_headlen(skb)); 5010 return false; 5011 } 5012 skb->ip_summed = CHECKSUM_PARTIAL; 5013 skb->csum_start = csum_start; 5014 skb->csum_offset = off; 5015 skb_set_transport_header(skb, start); 5016 return true; 5017 } 5018 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5019 5020 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5021 unsigned int max) 5022 { 5023 if (skb_headlen(skb) >= len) 5024 return 0; 5025 5026 /* If we need to pullup then pullup to the max, so we 5027 * won't need to do it again. 5028 */ 5029 if (max > skb->len) 5030 max = skb->len; 5031 5032 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5033 return -ENOMEM; 5034 5035 if (skb_headlen(skb) < len) 5036 return -EPROTO; 5037 5038 return 0; 5039 } 5040 5041 #define MAX_TCP_HDR_LEN (15 * 4) 5042 5043 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5044 typeof(IPPROTO_IP) proto, 5045 unsigned int off) 5046 { 5047 int err; 5048 5049 switch (proto) { 5050 case IPPROTO_TCP: 5051 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5052 off + MAX_TCP_HDR_LEN); 5053 if (!err && !skb_partial_csum_set(skb, off, 5054 offsetof(struct tcphdr, 5055 check))) 5056 err = -EPROTO; 5057 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5058 5059 case IPPROTO_UDP: 5060 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5061 off + sizeof(struct udphdr)); 5062 if (!err && !skb_partial_csum_set(skb, off, 5063 offsetof(struct udphdr, 5064 check))) 5065 err = -EPROTO; 5066 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5067 } 5068 5069 return ERR_PTR(-EPROTO); 5070 } 5071 5072 /* This value should be large enough to cover a tagged ethernet header plus 5073 * maximally sized IP and TCP or UDP headers. 5074 */ 5075 #define MAX_IP_HDR_LEN 128 5076 5077 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5078 { 5079 unsigned int off; 5080 bool fragment; 5081 __sum16 *csum; 5082 int err; 5083 5084 fragment = false; 5085 5086 err = skb_maybe_pull_tail(skb, 5087 sizeof(struct iphdr), 5088 MAX_IP_HDR_LEN); 5089 if (err < 0) 5090 goto out; 5091 5092 if (ip_is_fragment(ip_hdr(skb))) 5093 fragment = true; 5094 5095 off = ip_hdrlen(skb); 5096 5097 err = -EPROTO; 5098 5099 if (fragment) 5100 goto out; 5101 5102 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5103 if (IS_ERR(csum)) 5104 return PTR_ERR(csum); 5105 5106 if (recalculate) 5107 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5108 ip_hdr(skb)->daddr, 5109 skb->len - off, 5110 ip_hdr(skb)->protocol, 0); 5111 err = 0; 5112 5113 out: 5114 return err; 5115 } 5116 5117 /* This value should be large enough to cover a tagged ethernet header plus 5118 * an IPv6 header, all options, and a maximal TCP or UDP header. 5119 */ 5120 #define MAX_IPV6_HDR_LEN 256 5121 5122 #define OPT_HDR(type, skb, off) \ 5123 (type *)(skb_network_header(skb) + (off)) 5124 5125 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5126 { 5127 int err; 5128 u8 nexthdr; 5129 unsigned int off; 5130 unsigned int len; 5131 bool fragment; 5132 bool done; 5133 __sum16 *csum; 5134 5135 fragment = false; 5136 done = false; 5137 5138 off = sizeof(struct ipv6hdr); 5139 5140 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5141 if (err < 0) 5142 goto out; 5143 5144 nexthdr = ipv6_hdr(skb)->nexthdr; 5145 5146 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5147 while (off <= len && !done) { 5148 switch (nexthdr) { 5149 case IPPROTO_DSTOPTS: 5150 case IPPROTO_HOPOPTS: 5151 case IPPROTO_ROUTING: { 5152 struct ipv6_opt_hdr *hp; 5153 5154 err = skb_maybe_pull_tail(skb, 5155 off + 5156 sizeof(struct ipv6_opt_hdr), 5157 MAX_IPV6_HDR_LEN); 5158 if (err < 0) 5159 goto out; 5160 5161 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5162 nexthdr = hp->nexthdr; 5163 off += ipv6_optlen(hp); 5164 break; 5165 } 5166 case IPPROTO_AH: { 5167 struct ip_auth_hdr *hp; 5168 5169 err = skb_maybe_pull_tail(skb, 5170 off + 5171 sizeof(struct ip_auth_hdr), 5172 MAX_IPV6_HDR_LEN); 5173 if (err < 0) 5174 goto out; 5175 5176 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5177 nexthdr = hp->nexthdr; 5178 off += ipv6_authlen(hp); 5179 break; 5180 } 5181 case IPPROTO_FRAGMENT: { 5182 struct frag_hdr *hp; 5183 5184 err = skb_maybe_pull_tail(skb, 5185 off + 5186 sizeof(struct frag_hdr), 5187 MAX_IPV6_HDR_LEN); 5188 if (err < 0) 5189 goto out; 5190 5191 hp = OPT_HDR(struct frag_hdr, skb, off); 5192 5193 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5194 fragment = true; 5195 5196 nexthdr = hp->nexthdr; 5197 off += sizeof(struct frag_hdr); 5198 break; 5199 } 5200 default: 5201 done = true; 5202 break; 5203 } 5204 } 5205 5206 err = -EPROTO; 5207 5208 if (!done || fragment) 5209 goto out; 5210 5211 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5212 if (IS_ERR(csum)) 5213 return PTR_ERR(csum); 5214 5215 if (recalculate) 5216 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5217 &ipv6_hdr(skb)->daddr, 5218 skb->len - off, nexthdr, 0); 5219 err = 0; 5220 5221 out: 5222 return err; 5223 } 5224 5225 /** 5226 * skb_checksum_setup - set up partial checksum offset 5227 * @skb: the skb to set up 5228 * @recalculate: if true the pseudo-header checksum will be recalculated 5229 */ 5230 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5231 { 5232 int err; 5233 5234 switch (skb->protocol) { 5235 case htons(ETH_P_IP): 5236 err = skb_checksum_setup_ipv4(skb, recalculate); 5237 break; 5238 5239 case htons(ETH_P_IPV6): 5240 err = skb_checksum_setup_ipv6(skb, recalculate); 5241 break; 5242 5243 default: 5244 err = -EPROTO; 5245 break; 5246 } 5247 5248 return err; 5249 } 5250 EXPORT_SYMBOL(skb_checksum_setup); 5251 5252 /** 5253 * skb_checksum_maybe_trim - maybe trims the given skb 5254 * @skb: the skb to check 5255 * @transport_len: the data length beyond the network header 5256 * 5257 * Checks whether the given skb has data beyond the given transport length. 5258 * If so, returns a cloned skb trimmed to this transport length. 5259 * Otherwise returns the provided skb. Returns NULL in error cases 5260 * (e.g. transport_len exceeds skb length or out-of-memory). 5261 * 5262 * Caller needs to set the skb transport header and free any returned skb if it 5263 * differs from the provided skb. 5264 */ 5265 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5266 unsigned int transport_len) 5267 { 5268 struct sk_buff *skb_chk; 5269 unsigned int len = skb_transport_offset(skb) + transport_len; 5270 int ret; 5271 5272 if (skb->len < len) 5273 return NULL; 5274 else if (skb->len == len) 5275 return skb; 5276 5277 skb_chk = skb_clone(skb, GFP_ATOMIC); 5278 if (!skb_chk) 5279 return NULL; 5280 5281 ret = pskb_trim_rcsum(skb_chk, len); 5282 if (ret) { 5283 kfree_skb(skb_chk); 5284 return NULL; 5285 } 5286 5287 return skb_chk; 5288 } 5289 5290 /** 5291 * skb_checksum_trimmed - validate checksum of an skb 5292 * @skb: the skb to check 5293 * @transport_len: the data length beyond the network header 5294 * @skb_chkf: checksum function to use 5295 * 5296 * Applies the given checksum function skb_chkf to the provided skb. 5297 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5298 * 5299 * If the skb has data beyond the given transport length, then a 5300 * trimmed & cloned skb is checked and returned. 5301 * 5302 * Caller needs to set the skb transport header and free any returned skb if it 5303 * differs from the provided skb. 5304 */ 5305 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5306 unsigned int transport_len, 5307 __sum16(*skb_chkf)(struct sk_buff *skb)) 5308 { 5309 struct sk_buff *skb_chk; 5310 unsigned int offset = skb_transport_offset(skb); 5311 __sum16 ret; 5312 5313 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5314 if (!skb_chk) 5315 goto err; 5316 5317 if (!pskb_may_pull(skb_chk, offset)) 5318 goto err; 5319 5320 skb_pull_rcsum(skb_chk, offset); 5321 ret = skb_chkf(skb_chk); 5322 skb_push_rcsum(skb_chk, offset); 5323 5324 if (ret) 5325 goto err; 5326 5327 return skb_chk; 5328 5329 err: 5330 if (skb_chk && skb_chk != skb) 5331 kfree_skb(skb_chk); 5332 5333 return NULL; 5334 5335 } 5336 EXPORT_SYMBOL(skb_checksum_trimmed); 5337 5338 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5339 { 5340 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5341 skb->dev->name); 5342 } 5343 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5344 5345 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5346 { 5347 if (head_stolen) { 5348 skb_release_head_state(skb); 5349 kmem_cache_free(skbuff_head_cache, skb); 5350 } else { 5351 __kfree_skb(skb); 5352 } 5353 } 5354 EXPORT_SYMBOL(kfree_skb_partial); 5355 5356 /** 5357 * skb_try_coalesce - try to merge skb to prior one 5358 * @to: prior buffer 5359 * @from: buffer to add 5360 * @fragstolen: pointer to boolean 5361 * @delta_truesize: how much more was allocated than was requested 5362 */ 5363 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5364 bool *fragstolen, int *delta_truesize) 5365 { 5366 struct skb_shared_info *to_shinfo, *from_shinfo; 5367 int i, delta, len = from->len; 5368 5369 *fragstolen = false; 5370 5371 if (skb_cloned(to)) 5372 return false; 5373 5374 /* The page pool signature of struct page will eventually figure out 5375 * which pages can be recycled or not but for now let's prohibit slab 5376 * allocated and page_pool allocated SKBs from being coalesced. 5377 */ 5378 if (to->pp_recycle != from->pp_recycle) 5379 return false; 5380 5381 if (len <= skb_tailroom(to)) { 5382 if (len) 5383 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5384 *delta_truesize = 0; 5385 return true; 5386 } 5387 5388 to_shinfo = skb_shinfo(to); 5389 from_shinfo = skb_shinfo(from); 5390 if (to_shinfo->frag_list || from_shinfo->frag_list) 5391 return false; 5392 if (skb_zcopy(to) || skb_zcopy(from)) 5393 return false; 5394 5395 if (skb_headlen(from) != 0) { 5396 struct page *page; 5397 unsigned int offset; 5398 5399 if (to_shinfo->nr_frags + 5400 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5401 return false; 5402 5403 if (skb_head_is_locked(from)) 5404 return false; 5405 5406 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5407 5408 page = virt_to_head_page(from->head); 5409 offset = from->data - (unsigned char *)page_address(page); 5410 5411 skb_fill_page_desc(to, to_shinfo->nr_frags, 5412 page, offset, skb_headlen(from)); 5413 *fragstolen = true; 5414 } else { 5415 if (to_shinfo->nr_frags + 5416 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5417 return false; 5418 5419 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5420 } 5421 5422 WARN_ON_ONCE(delta < len); 5423 5424 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5425 from_shinfo->frags, 5426 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5427 to_shinfo->nr_frags += from_shinfo->nr_frags; 5428 5429 if (!skb_cloned(from)) 5430 from_shinfo->nr_frags = 0; 5431 5432 /* if the skb is not cloned this does nothing 5433 * since we set nr_frags to 0. 5434 */ 5435 for (i = 0; i < from_shinfo->nr_frags; i++) 5436 __skb_frag_ref(&from_shinfo->frags[i]); 5437 5438 to->truesize += delta; 5439 to->len += len; 5440 to->data_len += len; 5441 5442 *delta_truesize = delta; 5443 return true; 5444 } 5445 EXPORT_SYMBOL(skb_try_coalesce); 5446 5447 /** 5448 * skb_scrub_packet - scrub an skb 5449 * 5450 * @skb: buffer to clean 5451 * @xnet: packet is crossing netns 5452 * 5453 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 5454 * into/from a tunnel. Some information have to be cleared during these 5455 * operations. 5456 * skb_scrub_packet can also be used to clean a skb before injecting it in 5457 * another namespace (@xnet == true). We have to clear all information in the 5458 * skb that could impact namespace isolation. 5459 */ 5460 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5461 { 5462 skb->pkt_type = PACKET_HOST; 5463 skb->skb_iif = 0; 5464 skb->ignore_df = 0; 5465 skb_dst_drop(skb); 5466 skb_ext_reset(skb); 5467 nf_reset_ct(skb); 5468 nf_reset_trace(skb); 5469 5470 #ifdef CONFIG_NET_SWITCHDEV 5471 skb->offload_fwd_mark = 0; 5472 skb->offload_l3_fwd_mark = 0; 5473 #endif 5474 5475 if (!xnet) 5476 return; 5477 5478 ipvs_reset(skb); 5479 skb->mark = 0; 5480 skb->tstamp = 0; 5481 } 5482 EXPORT_SYMBOL_GPL(skb_scrub_packet); 5483 5484 /** 5485 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 5486 * 5487 * @skb: GSO skb 5488 * 5489 * skb_gso_transport_seglen is used to determine the real size of the 5490 * individual segments, including Layer4 headers (TCP/UDP). 5491 * 5492 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 5493 */ 5494 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 5495 { 5496 const struct skb_shared_info *shinfo = skb_shinfo(skb); 5497 unsigned int thlen = 0; 5498 5499 if (skb->encapsulation) { 5500 thlen = skb_inner_transport_header(skb) - 5501 skb_transport_header(skb); 5502 5503 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 5504 thlen += inner_tcp_hdrlen(skb); 5505 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 5506 thlen = tcp_hdrlen(skb); 5507 } else if (unlikely(skb_is_gso_sctp(skb))) { 5508 thlen = sizeof(struct sctphdr); 5509 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 5510 thlen = sizeof(struct udphdr); 5511 } 5512 /* UFO sets gso_size to the size of the fragmentation 5513 * payload, i.e. the size of the L4 (UDP) header is already 5514 * accounted for. 5515 */ 5516 return thlen + shinfo->gso_size; 5517 } 5518 5519 /** 5520 * skb_gso_network_seglen - Return length of individual segments of a gso packet 5521 * 5522 * @skb: GSO skb 5523 * 5524 * skb_gso_network_seglen is used to determine the real size of the 5525 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 5526 * 5527 * The MAC/L2 header is not accounted for. 5528 */ 5529 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 5530 { 5531 unsigned int hdr_len = skb_transport_header(skb) - 5532 skb_network_header(skb); 5533 5534 return hdr_len + skb_gso_transport_seglen(skb); 5535 } 5536 5537 /** 5538 * skb_gso_mac_seglen - Return length of individual segments of a gso packet 5539 * 5540 * @skb: GSO skb 5541 * 5542 * skb_gso_mac_seglen is used to determine the real size of the 5543 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 5544 * headers (TCP/UDP). 5545 */ 5546 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 5547 { 5548 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 5549 5550 return hdr_len + skb_gso_transport_seglen(skb); 5551 } 5552 5553 /** 5554 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 5555 * 5556 * There are a couple of instances where we have a GSO skb, and we 5557 * want to determine what size it would be after it is segmented. 5558 * 5559 * We might want to check: 5560 * - L3+L4+payload size (e.g. IP forwarding) 5561 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) 5562 * 5563 * This is a helper to do that correctly considering GSO_BY_FRAGS. 5564 * 5565 * @skb: GSO skb 5566 * 5567 * @seg_len: The segmented length (from skb_gso_*_seglen). In the 5568 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. 5569 * 5570 * @max_len: The maximum permissible length. 5571 * 5572 * Returns true if the segmented length <= max length. 5573 */ 5574 static inline bool skb_gso_size_check(const struct sk_buff *skb, 5575 unsigned int seg_len, 5576 unsigned int max_len) { 5577 const struct skb_shared_info *shinfo = skb_shinfo(skb); 5578 const struct sk_buff *iter; 5579 5580 if (shinfo->gso_size != GSO_BY_FRAGS) 5581 return seg_len <= max_len; 5582 5583 /* Undo this so we can re-use header sizes */ 5584 seg_len -= GSO_BY_FRAGS; 5585 5586 skb_walk_frags(skb, iter) { 5587 if (seg_len + skb_headlen(iter) > max_len) 5588 return false; 5589 } 5590 5591 return true; 5592 } 5593 5594 /** 5595 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? 5596 * 5597 * @skb: GSO skb 5598 * @mtu: MTU to validate against 5599 * 5600 * skb_gso_validate_network_len validates if a given skb will fit a 5601 * wanted MTU once split. It considers L3 headers, L4 headers, and the 5602 * payload. 5603 */ 5604 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) 5605 { 5606 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5607 } 5608 EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); 5609 5610 /** 5611 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 5612 * 5613 * @skb: GSO skb 5614 * @len: length to validate against 5615 * 5616 * skb_gso_validate_mac_len validates if a given skb will fit a wanted 5617 * length once split, including L2, L3 and L4 headers and the payload. 5618 */ 5619 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) 5620 { 5621 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); 5622 } 5623 EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); 5624 5625 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5626 { 5627 int mac_len, meta_len; 5628 void *meta; 5629 5630 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5631 kfree_skb(skb); 5632 return NULL; 5633 } 5634 5635 mac_len = skb->data - skb_mac_header(skb); 5636 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 5637 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 5638 mac_len - VLAN_HLEN - ETH_TLEN); 5639 } 5640 5641 meta_len = skb_metadata_len(skb); 5642 if (meta_len) { 5643 meta = skb_metadata_end(skb) - meta_len; 5644 memmove(meta + VLAN_HLEN, meta, meta_len); 5645 } 5646 5647 skb->mac_header += VLAN_HLEN; 5648 return skb; 5649 } 5650 5651 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 5652 { 5653 struct vlan_hdr *vhdr; 5654 u16 vlan_tci; 5655 5656 if (unlikely(skb_vlan_tag_present(skb))) { 5657 /* vlan_tci is already set-up so leave this for another time */ 5658 return skb; 5659 } 5660 5661 skb = skb_share_check(skb, GFP_ATOMIC); 5662 if (unlikely(!skb)) 5663 goto err_free; 5664 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 5665 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 5666 goto err_free; 5667 5668 vhdr = (struct vlan_hdr *)skb->data; 5669 vlan_tci = ntohs(vhdr->h_vlan_TCI); 5670 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 5671 5672 skb_pull_rcsum(skb, VLAN_HLEN); 5673 vlan_set_encap_proto(skb, vhdr); 5674 5675 skb = skb_reorder_vlan_header(skb); 5676 if (unlikely(!skb)) 5677 goto err_free; 5678 5679 skb_reset_network_header(skb); 5680 if (!skb_transport_header_was_set(skb)) 5681 skb_reset_transport_header(skb); 5682 skb_reset_mac_len(skb); 5683 5684 return skb; 5685 5686 err_free: 5687 kfree_skb(skb); 5688 return NULL; 5689 } 5690 EXPORT_SYMBOL(skb_vlan_untag); 5691 5692 int skb_ensure_writable(struct sk_buff *skb, int write_len) 5693 { 5694 if (!pskb_may_pull(skb, write_len)) 5695 return -ENOMEM; 5696 5697 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 5698 return 0; 5699 5700 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5701 } 5702 EXPORT_SYMBOL(skb_ensure_writable); 5703 5704 /* remove VLAN header from packet and update csum accordingly. 5705 * expects a non skb_vlan_tag_present skb with a vlan tag payload 5706 */ 5707 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 5708 { 5709 struct vlan_hdr *vhdr; 5710 int offset = skb->data - skb_mac_header(skb); 5711 int err; 5712 5713 if (WARN_ONCE(offset, 5714 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 5715 offset)) { 5716 return -EINVAL; 5717 } 5718 5719 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 5720 if (unlikely(err)) 5721 return err; 5722 5723 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 5724 5725 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 5726 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 5727 5728 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 5729 __skb_pull(skb, VLAN_HLEN); 5730 5731 vlan_set_encap_proto(skb, vhdr); 5732 skb->mac_header += VLAN_HLEN; 5733 5734 if (skb_network_offset(skb) < ETH_HLEN) 5735 skb_set_network_header(skb, ETH_HLEN); 5736 5737 skb_reset_mac_len(skb); 5738 5739 return err; 5740 } 5741 EXPORT_SYMBOL(__skb_vlan_pop); 5742 5743 /* Pop a vlan tag either from hwaccel or from payload. 5744 * Expects skb->data at mac header. 5745 */ 5746 int skb_vlan_pop(struct sk_buff *skb) 5747 { 5748 u16 vlan_tci; 5749 __be16 vlan_proto; 5750 int err; 5751 5752 if (likely(skb_vlan_tag_present(skb))) { 5753 __vlan_hwaccel_clear_tag(skb); 5754 } else { 5755 if (unlikely(!eth_type_vlan(skb->protocol))) 5756 return 0; 5757 5758 err = __skb_vlan_pop(skb, &vlan_tci); 5759 if (err) 5760 return err; 5761 } 5762 /* move next vlan tag to hw accel tag */ 5763 if (likely(!eth_type_vlan(skb->protocol))) 5764 return 0; 5765 5766 vlan_proto = skb->protocol; 5767 err = __skb_vlan_pop(skb, &vlan_tci); 5768 if (unlikely(err)) 5769 return err; 5770 5771 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 5772 return 0; 5773 } 5774 EXPORT_SYMBOL(skb_vlan_pop); 5775 5776 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 5777 * Expects skb->data at mac header. 5778 */ 5779 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 5780 { 5781 if (skb_vlan_tag_present(skb)) { 5782 int offset = skb->data - skb_mac_header(skb); 5783 int err; 5784 5785 if (WARN_ONCE(offset, 5786 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 5787 offset)) { 5788 return -EINVAL; 5789 } 5790 5791 err = __vlan_insert_tag(skb, skb->vlan_proto, 5792 skb_vlan_tag_get(skb)); 5793 if (err) 5794 return err; 5795 5796 skb->protocol = skb->vlan_proto; 5797 skb->mac_len += VLAN_HLEN; 5798 5799 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 5800 } 5801 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 5802 return 0; 5803 } 5804 EXPORT_SYMBOL(skb_vlan_push); 5805 5806 /** 5807 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 5808 * 5809 * @skb: Socket buffer to modify 5810 * 5811 * Drop the Ethernet header of @skb. 5812 * 5813 * Expects that skb->data points to the mac header and that no VLAN tags are 5814 * present. 5815 * 5816 * Returns 0 on success, -errno otherwise. 5817 */ 5818 int skb_eth_pop(struct sk_buff *skb) 5819 { 5820 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 5821 skb_network_offset(skb) < ETH_HLEN) 5822 return -EPROTO; 5823 5824 skb_pull_rcsum(skb, ETH_HLEN); 5825 skb_reset_mac_header(skb); 5826 skb_reset_mac_len(skb); 5827 5828 return 0; 5829 } 5830 EXPORT_SYMBOL(skb_eth_pop); 5831 5832 /** 5833 * skb_eth_push() - Add a new Ethernet header at the head of a packet 5834 * 5835 * @skb: Socket buffer to modify 5836 * @dst: Destination MAC address of the new header 5837 * @src: Source MAC address of the new header 5838 * 5839 * Prepend @skb with a new Ethernet header. 5840 * 5841 * Expects that skb->data points to the mac header, which must be empty. 5842 * 5843 * Returns 0 on success, -errno otherwise. 5844 */ 5845 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 5846 const unsigned char *src) 5847 { 5848 struct ethhdr *eth; 5849 int err; 5850 5851 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 5852 return -EPROTO; 5853 5854 err = skb_cow_head(skb, sizeof(*eth)); 5855 if (err < 0) 5856 return err; 5857 5858 skb_push(skb, sizeof(*eth)); 5859 skb_reset_mac_header(skb); 5860 skb_reset_mac_len(skb); 5861 5862 eth = eth_hdr(skb); 5863 ether_addr_copy(eth->h_dest, dst); 5864 ether_addr_copy(eth->h_source, src); 5865 eth->h_proto = skb->protocol; 5866 5867 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 5868 5869 return 0; 5870 } 5871 EXPORT_SYMBOL(skb_eth_push); 5872 5873 /* Update the ethertype of hdr and the skb csum value if required. */ 5874 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 5875 __be16 ethertype) 5876 { 5877 if (skb->ip_summed == CHECKSUM_COMPLETE) { 5878 __be16 diff[] = { ~hdr->h_proto, ethertype }; 5879 5880 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5881 } 5882 5883 hdr->h_proto = ethertype; 5884 } 5885 5886 /** 5887 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 5888 * the packet 5889 * 5890 * @skb: buffer 5891 * @mpls_lse: MPLS label stack entry to push 5892 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 5893 * @mac_len: length of the MAC header 5894 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 5895 * ethernet 5896 * 5897 * Expects skb->data at mac header. 5898 * 5899 * Returns 0 on success, -errno otherwise. 5900 */ 5901 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 5902 int mac_len, bool ethernet) 5903 { 5904 struct mpls_shim_hdr *lse; 5905 int err; 5906 5907 if (unlikely(!eth_p_mpls(mpls_proto))) 5908 return -EINVAL; 5909 5910 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 5911 if (skb->encapsulation) 5912 return -EINVAL; 5913 5914 err = skb_cow_head(skb, MPLS_HLEN); 5915 if (unlikely(err)) 5916 return err; 5917 5918 if (!skb->inner_protocol) { 5919 skb_set_inner_network_header(skb, skb_network_offset(skb)); 5920 skb_set_inner_protocol(skb, skb->protocol); 5921 } 5922 5923 skb_push(skb, MPLS_HLEN); 5924 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 5925 mac_len); 5926 skb_reset_mac_header(skb); 5927 skb_set_network_header(skb, mac_len); 5928 skb_reset_mac_len(skb); 5929 5930 lse = mpls_hdr(skb); 5931 lse->label_stack_entry = mpls_lse; 5932 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 5933 5934 if (ethernet && mac_len >= ETH_HLEN) 5935 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 5936 skb->protocol = mpls_proto; 5937 5938 return 0; 5939 } 5940 EXPORT_SYMBOL_GPL(skb_mpls_push); 5941 5942 /** 5943 * skb_mpls_pop() - pop the outermost MPLS header 5944 * 5945 * @skb: buffer 5946 * @next_proto: ethertype of header after popped MPLS header 5947 * @mac_len: length of the MAC header 5948 * @ethernet: flag to indicate if the packet is ethernet 5949 * 5950 * Expects skb->data at mac header. 5951 * 5952 * Returns 0 on success, -errno otherwise. 5953 */ 5954 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 5955 bool ethernet) 5956 { 5957 int err; 5958 5959 if (unlikely(!eth_p_mpls(skb->protocol))) 5960 return 0; 5961 5962 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 5963 if (unlikely(err)) 5964 return err; 5965 5966 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 5967 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 5968 mac_len); 5969 5970 __skb_pull(skb, MPLS_HLEN); 5971 skb_reset_mac_header(skb); 5972 skb_set_network_header(skb, mac_len); 5973 5974 if (ethernet && mac_len >= ETH_HLEN) { 5975 struct ethhdr *hdr; 5976 5977 /* use mpls_hdr() to get ethertype to account for VLANs. */ 5978 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 5979 skb_mod_eth_type(skb, hdr, next_proto); 5980 } 5981 skb->protocol = next_proto; 5982 5983 return 0; 5984 } 5985 EXPORT_SYMBOL_GPL(skb_mpls_pop); 5986 5987 /** 5988 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 5989 * 5990 * @skb: buffer 5991 * @mpls_lse: new MPLS label stack entry to update to 5992 * 5993 * Expects skb->data at mac header. 5994 * 5995 * Returns 0 on success, -errno otherwise. 5996 */ 5997 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 5998 { 5999 int err; 6000 6001 if (unlikely(!eth_p_mpls(skb->protocol))) 6002 return -EINVAL; 6003 6004 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6005 if (unlikely(err)) 6006 return err; 6007 6008 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6009 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6010 6011 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6012 } 6013 6014 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6015 6016 return 0; 6017 } 6018 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6019 6020 /** 6021 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6022 * 6023 * @skb: buffer 6024 * 6025 * Expects skb->data at mac header. 6026 * 6027 * Returns 0 on success, -errno otherwise. 6028 */ 6029 int skb_mpls_dec_ttl(struct sk_buff *skb) 6030 { 6031 u32 lse; 6032 u8 ttl; 6033 6034 if (unlikely(!eth_p_mpls(skb->protocol))) 6035 return -EINVAL; 6036 6037 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6038 return -ENOMEM; 6039 6040 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6041 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6042 if (!--ttl) 6043 return -EINVAL; 6044 6045 lse &= ~MPLS_LS_TTL_MASK; 6046 lse |= ttl << MPLS_LS_TTL_SHIFT; 6047 6048 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6049 } 6050 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6051 6052 /** 6053 * alloc_skb_with_frags - allocate skb with page frags 6054 * 6055 * @header_len: size of linear part 6056 * @data_len: needed length in frags 6057 * @max_page_order: max page order desired. 6058 * @errcode: pointer to error code if any 6059 * @gfp_mask: allocation mask 6060 * 6061 * This can be used to allocate a paged skb, given a maximal order for frags. 6062 */ 6063 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6064 unsigned long data_len, 6065 int max_page_order, 6066 int *errcode, 6067 gfp_t gfp_mask) 6068 { 6069 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 6070 unsigned long chunk; 6071 struct sk_buff *skb; 6072 struct page *page; 6073 int i; 6074 6075 *errcode = -EMSGSIZE; 6076 /* Note this test could be relaxed, if we succeed to allocate 6077 * high order pages... 6078 */ 6079 if (npages > MAX_SKB_FRAGS) 6080 return NULL; 6081 6082 *errcode = -ENOBUFS; 6083 skb = alloc_skb(header_len, gfp_mask); 6084 if (!skb) 6085 return NULL; 6086 6087 skb->truesize += npages << PAGE_SHIFT; 6088 6089 for (i = 0; npages > 0; i++) { 6090 int order = max_page_order; 6091 6092 while (order) { 6093 if (npages >= 1 << order) { 6094 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6095 __GFP_COMP | 6096 __GFP_NOWARN, 6097 order); 6098 if (page) 6099 goto fill_page; 6100 /* Do not retry other high order allocations */ 6101 order = 1; 6102 max_page_order = 0; 6103 } 6104 order--; 6105 } 6106 page = alloc_page(gfp_mask); 6107 if (!page) 6108 goto failure; 6109 fill_page: 6110 chunk = min_t(unsigned long, data_len, 6111 PAGE_SIZE << order); 6112 skb_fill_page_desc(skb, i, page, 0, chunk); 6113 data_len -= chunk; 6114 npages -= 1 << order; 6115 } 6116 return skb; 6117 6118 failure: 6119 kfree_skb(skb); 6120 return NULL; 6121 } 6122 EXPORT_SYMBOL(alloc_skb_with_frags); 6123 6124 /* carve out the first off bytes from skb when off < headlen */ 6125 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6126 const int headlen, gfp_t gfp_mask) 6127 { 6128 int i; 6129 int size = skb_end_offset(skb); 6130 int new_hlen = headlen - off; 6131 u8 *data; 6132 6133 size = SKB_DATA_ALIGN(size); 6134 6135 if (skb_pfmemalloc(skb)) 6136 gfp_mask |= __GFP_MEMALLOC; 6137 data = kmalloc_reserve(size + 6138 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 6139 gfp_mask, NUMA_NO_NODE, NULL); 6140 if (!data) 6141 return -ENOMEM; 6142 6143 size = SKB_WITH_OVERHEAD(ksize(data)); 6144 6145 /* Copy real data, and all frags */ 6146 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6147 skb->len -= off; 6148 6149 memcpy((struct skb_shared_info *)(data + size), 6150 skb_shinfo(skb), 6151 offsetof(struct skb_shared_info, 6152 frags[skb_shinfo(skb)->nr_frags])); 6153 if (skb_cloned(skb)) { 6154 /* drop the old head gracefully */ 6155 if (skb_orphan_frags(skb, gfp_mask)) { 6156 kfree(data); 6157 return -ENOMEM; 6158 } 6159 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6160 skb_frag_ref(skb, i); 6161 if (skb_has_frag_list(skb)) 6162 skb_clone_fraglist(skb); 6163 skb_release_data(skb); 6164 } else { 6165 /* we can reuse existing recount- all we did was 6166 * relocate values 6167 */ 6168 skb_free_head(skb); 6169 } 6170 6171 skb->head = data; 6172 skb->data = data; 6173 skb->head_frag = 0; 6174 #ifdef NET_SKBUFF_DATA_USES_OFFSET 6175 skb->end = size; 6176 #else 6177 skb->end = skb->head + size; 6178 #endif 6179 skb_set_tail_pointer(skb, skb_headlen(skb)); 6180 skb_headers_offset_update(skb, 0); 6181 skb->cloned = 0; 6182 skb->hdr_len = 0; 6183 skb->nohdr = 0; 6184 atomic_set(&skb_shinfo(skb)->dataref, 1); 6185 6186 return 0; 6187 } 6188 6189 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6190 6191 /* carve out the first eat bytes from skb's frag_list. May recurse into 6192 * pskb_carve() 6193 */ 6194 static int pskb_carve_frag_list(struct sk_buff *skb, 6195 struct skb_shared_info *shinfo, int eat, 6196 gfp_t gfp_mask) 6197 { 6198 struct sk_buff *list = shinfo->frag_list; 6199 struct sk_buff *clone = NULL; 6200 struct sk_buff *insp = NULL; 6201 6202 do { 6203 if (!list) { 6204 pr_err("Not enough bytes to eat. Want %d\n", eat); 6205 return -EFAULT; 6206 } 6207 if (list->len <= eat) { 6208 /* Eaten as whole. */ 6209 eat -= list->len; 6210 list = list->next; 6211 insp = list; 6212 } else { 6213 /* Eaten partially. */ 6214 if (skb_shared(list)) { 6215 clone = skb_clone(list, gfp_mask); 6216 if (!clone) 6217 return -ENOMEM; 6218 insp = list->next; 6219 list = clone; 6220 } else { 6221 /* This may be pulled without problems. */ 6222 insp = list; 6223 } 6224 if (pskb_carve(list, eat, gfp_mask) < 0) { 6225 kfree_skb(clone); 6226 return -ENOMEM; 6227 } 6228 break; 6229 } 6230 } while (eat); 6231 6232 /* Free pulled out fragments. */ 6233 while ((list = shinfo->frag_list) != insp) { 6234 shinfo->frag_list = list->next; 6235 kfree_skb(list); 6236 } 6237 /* And insert new clone at head. */ 6238 if (clone) { 6239 clone->next = list; 6240 shinfo->frag_list = clone; 6241 } 6242 return 0; 6243 } 6244 6245 /* carve off first len bytes from skb. Split line (off) is in the 6246 * non-linear part of skb 6247 */ 6248 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6249 int pos, gfp_t gfp_mask) 6250 { 6251 int i, k = 0; 6252 int size = skb_end_offset(skb); 6253 u8 *data; 6254 const int nfrags = skb_shinfo(skb)->nr_frags; 6255 struct skb_shared_info *shinfo; 6256 6257 size = SKB_DATA_ALIGN(size); 6258 6259 if (skb_pfmemalloc(skb)) 6260 gfp_mask |= __GFP_MEMALLOC; 6261 data = kmalloc_reserve(size + 6262 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 6263 gfp_mask, NUMA_NO_NODE, NULL); 6264 if (!data) 6265 return -ENOMEM; 6266 6267 size = SKB_WITH_OVERHEAD(ksize(data)); 6268 6269 memcpy((struct skb_shared_info *)(data + size), 6270 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6271 if (skb_orphan_frags(skb, gfp_mask)) { 6272 kfree(data); 6273 return -ENOMEM; 6274 } 6275 shinfo = (struct skb_shared_info *)(data + size); 6276 for (i = 0; i < nfrags; i++) { 6277 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6278 6279 if (pos + fsize > off) { 6280 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6281 6282 if (pos < off) { 6283 /* Split frag. 6284 * We have two variants in this case: 6285 * 1. Move all the frag to the second 6286 * part, if it is possible. F.e. 6287 * this approach is mandatory for TUX, 6288 * where splitting is expensive. 6289 * 2. Split is accurately. We make this. 6290 */ 6291 skb_frag_off_add(&shinfo->frags[0], off - pos); 6292 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6293 } 6294 skb_frag_ref(skb, i); 6295 k++; 6296 } 6297 pos += fsize; 6298 } 6299 shinfo->nr_frags = k; 6300 if (skb_has_frag_list(skb)) 6301 skb_clone_fraglist(skb); 6302 6303 /* split line is in frag list */ 6304 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6305 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6306 if (skb_has_frag_list(skb)) 6307 kfree_skb_list(skb_shinfo(skb)->frag_list); 6308 kfree(data); 6309 return -ENOMEM; 6310 } 6311 skb_release_data(skb); 6312 6313 skb->head = data; 6314 skb->head_frag = 0; 6315 skb->data = data; 6316 #ifdef NET_SKBUFF_DATA_USES_OFFSET 6317 skb->end = size; 6318 #else 6319 skb->end = skb->head + size; 6320 #endif 6321 skb_reset_tail_pointer(skb); 6322 skb_headers_offset_update(skb, 0); 6323 skb->cloned = 0; 6324 skb->hdr_len = 0; 6325 skb->nohdr = 0; 6326 skb->len -= off; 6327 skb->data_len = skb->len; 6328 atomic_set(&skb_shinfo(skb)->dataref, 1); 6329 return 0; 6330 } 6331 6332 /* remove len bytes from the beginning of the skb */ 6333 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6334 { 6335 int headlen = skb_headlen(skb); 6336 6337 if (len < headlen) 6338 return pskb_carve_inside_header(skb, len, headlen, gfp); 6339 else 6340 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6341 } 6342 6343 /* Extract to_copy bytes starting at off from skb, and return this in 6344 * a new skb 6345 */ 6346 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6347 int to_copy, gfp_t gfp) 6348 { 6349 struct sk_buff *clone = skb_clone(skb, gfp); 6350 6351 if (!clone) 6352 return NULL; 6353 6354 if (pskb_carve(clone, off, gfp) < 0 || 6355 pskb_trim(clone, to_copy)) { 6356 kfree_skb(clone); 6357 return NULL; 6358 } 6359 return clone; 6360 } 6361 EXPORT_SYMBOL(pskb_extract); 6362 6363 /** 6364 * skb_condense - try to get rid of fragments/frag_list if possible 6365 * @skb: buffer 6366 * 6367 * Can be used to save memory before skb is added to a busy queue. 6368 * If packet has bytes in frags and enough tail room in skb->head, 6369 * pull all of them, so that we can free the frags right now and adjust 6370 * truesize. 6371 * Notes: 6372 * We do not reallocate skb->head thus can not fail. 6373 * Caller must re-evaluate skb->truesize if needed. 6374 */ 6375 void skb_condense(struct sk_buff *skb) 6376 { 6377 if (skb->data_len) { 6378 if (skb->data_len > skb->end - skb->tail || 6379 skb_cloned(skb)) 6380 return; 6381 6382 /* Nice, we can free page frag(s) right now */ 6383 __pskb_pull_tail(skb, skb->data_len); 6384 } 6385 /* At this point, skb->truesize might be over estimated, 6386 * because skb had a fragment, and fragments do not tell 6387 * their truesize. 6388 * When we pulled its content into skb->head, fragment 6389 * was freed, but __pskb_pull_tail() could not possibly 6390 * adjust skb->truesize, not knowing the frag truesize. 6391 */ 6392 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6393 } 6394 6395 #ifdef CONFIG_SKB_EXTENSIONS 6396 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6397 { 6398 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6399 } 6400 6401 /** 6402 * __skb_ext_alloc - allocate a new skb extensions storage 6403 * 6404 * @flags: See kmalloc(). 6405 * 6406 * Returns the newly allocated pointer. The pointer can later attached to a 6407 * skb via __skb_ext_set(). 6408 * Note: caller must handle the skb_ext as an opaque data. 6409 */ 6410 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6411 { 6412 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6413 6414 if (new) { 6415 memset(new->offset, 0, sizeof(new->offset)); 6416 refcount_set(&new->refcnt, 1); 6417 } 6418 6419 return new; 6420 } 6421 6422 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6423 unsigned int old_active) 6424 { 6425 struct skb_ext *new; 6426 6427 if (refcount_read(&old->refcnt) == 1) 6428 return old; 6429 6430 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6431 if (!new) 6432 return NULL; 6433 6434 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6435 refcount_set(&new->refcnt, 1); 6436 6437 #ifdef CONFIG_XFRM 6438 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6439 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6440 unsigned int i; 6441 6442 for (i = 0; i < sp->len; i++) 6443 xfrm_state_hold(sp->xvec[i]); 6444 } 6445 #endif 6446 __skb_ext_put(old); 6447 return new; 6448 } 6449 6450 /** 6451 * __skb_ext_set - attach the specified extension storage to this skb 6452 * @skb: buffer 6453 * @id: extension id 6454 * @ext: extension storage previously allocated via __skb_ext_alloc() 6455 * 6456 * Existing extensions, if any, are cleared. 6457 * 6458 * Returns the pointer to the extension. 6459 */ 6460 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6461 struct skb_ext *ext) 6462 { 6463 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6464 6465 skb_ext_put(skb); 6466 newlen = newoff + skb_ext_type_len[id]; 6467 ext->chunks = newlen; 6468 ext->offset[id] = newoff; 6469 skb->extensions = ext; 6470 skb->active_extensions = 1 << id; 6471 return skb_ext_get_ptr(ext, id); 6472 } 6473 6474 /** 6475 * skb_ext_add - allocate space for given extension, COW if needed 6476 * @skb: buffer 6477 * @id: extension to allocate space for 6478 * 6479 * Allocates enough space for the given extension. 6480 * If the extension is already present, a pointer to that extension 6481 * is returned. 6482 * 6483 * If the skb was cloned, COW applies and the returned memory can be 6484 * modified without changing the extension space of clones buffers. 6485 * 6486 * Returns pointer to the extension or NULL on allocation failure. 6487 */ 6488 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6489 { 6490 struct skb_ext *new, *old = NULL; 6491 unsigned int newlen, newoff; 6492 6493 if (skb->active_extensions) { 6494 old = skb->extensions; 6495 6496 new = skb_ext_maybe_cow(old, skb->active_extensions); 6497 if (!new) 6498 return NULL; 6499 6500 if (__skb_ext_exist(new, id)) 6501 goto set_active; 6502 6503 newoff = new->chunks; 6504 } else { 6505 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6506 6507 new = __skb_ext_alloc(GFP_ATOMIC); 6508 if (!new) 6509 return NULL; 6510 } 6511 6512 newlen = newoff + skb_ext_type_len[id]; 6513 new->chunks = newlen; 6514 new->offset[id] = newoff; 6515 set_active: 6516 skb->slow_gro = 1; 6517 skb->extensions = new; 6518 skb->active_extensions |= 1 << id; 6519 return skb_ext_get_ptr(new, id); 6520 } 6521 EXPORT_SYMBOL(skb_ext_add); 6522 6523 #ifdef CONFIG_XFRM 6524 static void skb_ext_put_sp(struct sec_path *sp) 6525 { 6526 unsigned int i; 6527 6528 for (i = 0; i < sp->len; i++) 6529 xfrm_state_put(sp->xvec[i]); 6530 } 6531 #endif 6532 6533 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6534 { 6535 struct skb_ext *ext = skb->extensions; 6536 6537 skb->active_extensions &= ~(1 << id); 6538 if (skb->active_extensions == 0) { 6539 skb->extensions = NULL; 6540 __skb_ext_put(ext); 6541 #ifdef CONFIG_XFRM 6542 } else if (id == SKB_EXT_SEC_PATH && 6543 refcount_read(&ext->refcnt) == 1) { 6544 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6545 6546 skb_ext_put_sp(sp); 6547 sp->len = 0; 6548 #endif 6549 } 6550 } 6551 EXPORT_SYMBOL(__skb_ext_del); 6552 6553 void __skb_ext_put(struct skb_ext *ext) 6554 { 6555 /* If this is last clone, nothing can increment 6556 * it after check passes. Avoids one atomic op. 6557 */ 6558 if (refcount_read(&ext->refcnt) == 1) 6559 goto free_now; 6560 6561 if (!refcount_dec_and_test(&ext->refcnt)) 6562 return; 6563 free_now: 6564 #ifdef CONFIG_XFRM 6565 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6566 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6567 #endif 6568 6569 kmem_cache_free(skbuff_ext_cache, ext); 6570 } 6571 EXPORT_SYMBOL(__skb_ext_put); 6572 #endif /* CONFIG_SKB_EXTENSIONS */ 6573