1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 #include <linux/bitfield.h> 62 #include <linux/if_vlan.h> 63 #include <linux/mpls.h> 64 #include <linux/kcov.h> 65 #include <linux/iov_iter.h> 66 67 #include <net/protocol.h> 68 #include <net/dst.h> 69 #include <net/sock.h> 70 #include <net/checksum.h> 71 #include <net/gso.h> 72 #include <net/ip6_checksum.h> 73 #include <net/xfrm.h> 74 #include <net/mpls.h> 75 #include <net/mptcp.h> 76 #include <net/mctp.h> 77 #include <net/page_pool/helpers.h> 78 #include <net/dropreason.h> 79 80 #include <linux/uaccess.h> 81 #include <trace/events/skb.h> 82 #include <linux/highmem.h> 83 #include <linux/capability.h> 84 #include <linux/user_namespace.h> 85 #include <linux/indirect_call_wrapper.h> 86 #include <linux/textsearch.h> 87 88 #include "dev.h" 89 #include "sock_destructor.h" 90 91 struct kmem_cache *skbuff_cache __ro_after_init; 92 static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 93 #ifdef CONFIG_SKB_EXTENSIONS 94 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 95 #endif 96 97 98 static struct kmem_cache *skb_small_head_cache __ro_after_init; 99 100 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) 101 102 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 103 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 104 * size, and we can differentiate heads from skb_small_head_cache 105 * vs system slabs by looking at their size (skb_end_offset()). 106 */ 107 #define SKB_SMALL_HEAD_CACHE_SIZE \ 108 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 109 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 110 SKB_SMALL_HEAD_SIZE) 111 112 #define SKB_SMALL_HEAD_HEADROOM \ 113 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 114 115 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 116 EXPORT_SYMBOL(sysctl_max_skb_frags); 117 118 #undef FN 119 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 120 static const char * const drop_reasons[] = { 121 [SKB_CONSUMED] = "CONSUMED", 122 DEFINE_DROP_REASON(FN, FN) 123 }; 124 125 static const struct drop_reason_list drop_reasons_core = { 126 .reasons = drop_reasons, 127 .n_reasons = ARRAY_SIZE(drop_reasons), 128 }; 129 130 const struct drop_reason_list __rcu * 131 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 132 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 133 }; 134 EXPORT_SYMBOL(drop_reasons_by_subsys); 135 136 /** 137 * drop_reasons_register_subsys - register another drop reason subsystem 138 * @subsys: the subsystem to register, must not be the core 139 * @list: the list of drop reasons within the subsystem, must point to 140 * a statically initialized list 141 */ 142 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 143 const struct drop_reason_list *list) 144 { 145 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 146 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 147 "invalid subsystem %d\n", subsys)) 148 return; 149 150 /* must point to statically allocated memory, so INIT is OK */ 151 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 152 } 153 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 154 155 /** 156 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 157 * @subsys: the subsystem to remove, must not be the core 158 * 159 * Note: This will synchronize_rcu() to ensure no users when it returns. 160 */ 161 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 162 { 163 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 164 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 165 "invalid subsystem %d\n", subsys)) 166 return; 167 168 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 169 170 synchronize_rcu(); 171 } 172 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 173 174 /** 175 * skb_panic - private function for out-of-line support 176 * @skb: buffer 177 * @sz: size 178 * @addr: address 179 * @msg: skb_over_panic or skb_under_panic 180 * 181 * Out-of-line support for skb_put() and skb_push(). 182 * Called via the wrapper skb_over_panic() or skb_under_panic(). 183 * Keep out of line to prevent kernel bloat. 184 * __builtin_return_address is not used because it is not always reliable. 185 */ 186 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 187 const char msg[]) 188 { 189 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 190 msg, addr, skb->len, sz, skb->head, skb->data, 191 (unsigned long)skb->tail, (unsigned long)skb->end, 192 skb->dev ? skb->dev->name : "<NULL>"); 193 BUG(); 194 } 195 196 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 197 { 198 skb_panic(skb, sz, addr, __func__); 199 } 200 201 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 202 { 203 skb_panic(skb, sz, addr, __func__); 204 } 205 206 #define NAPI_SKB_CACHE_SIZE 64 207 #define NAPI_SKB_CACHE_BULK 16 208 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 209 210 #if PAGE_SIZE == SZ_4K 211 212 #define NAPI_HAS_SMALL_PAGE_FRAG 1 213 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) 214 215 /* specialized page frag allocator using a single order 0 page 216 * and slicing it into 1K sized fragment. Constrained to systems 217 * with a very limited amount of 1K fragments fitting a single 218 * page - to avoid excessive truesize underestimation 219 */ 220 221 struct page_frag_1k { 222 void *va; 223 u16 offset; 224 bool pfmemalloc; 225 }; 226 227 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) 228 { 229 struct page *page; 230 int offset; 231 232 offset = nc->offset - SZ_1K; 233 if (likely(offset >= 0)) 234 goto use_frag; 235 236 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 237 if (!page) 238 return NULL; 239 240 nc->va = page_address(page); 241 nc->pfmemalloc = page_is_pfmemalloc(page); 242 offset = PAGE_SIZE - SZ_1K; 243 page_ref_add(page, offset / SZ_1K); 244 245 use_frag: 246 nc->offset = offset; 247 return nc->va + offset; 248 } 249 #else 250 251 /* the small page is actually unused in this build; add dummy helpers 252 * to please the compiler and avoid later preprocessor's conditionals 253 */ 254 #define NAPI_HAS_SMALL_PAGE_FRAG 0 255 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false 256 257 struct page_frag_1k { 258 }; 259 260 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) 261 { 262 return NULL; 263 } 264 265 #endif 266 267 struct napi_alloc_cache { 268 struct page_frag_cache page; 269 struct page_frag_1k page_small; 270 unsigned int skb_count; 271 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 272 }; 273 274 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 275 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 276 277 /* Double check that napi_get_frags() allocates skbs with 278 * skb->head being backed by slab, not a page fragment. 279 * This is to make sure bug fixed in 3226b158e67c 280 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 281 * does not accidentally come back. 282 */ 283 void napi_get_frags_check(struct napi_struct *napi) 284 { 285 struct sk_buff *skb; 286 287 local_bh_disable(); 288 skb = napi_get_frags(napi); 289 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); 290 napi_free_frags(napi); 291 local_bh_enable(); 292 } 293 294 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 295 { 296 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 297 298 fragsz = SKB_DATA_ALIGN(fragsz); 299 300 return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); 301 } 302 EXPORT_SYMBOL(__napi_alloc_frag_align); 303 304 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 305 { 306 void *data; 307 308 fragsz = SKB_DATA_ALIGN(fragsz); 309 if (in_hardirq() || irqs_disabled()) { 310 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 311 312 data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); 313 } else { 314 struct napi_alloc_cache *nc; 315 316 local_bh_disable(); 317 nc = this_cpu_ptr(&napi_alloc_cache); 318 data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); 319 local_bh_enable(); 320 } 321 return data; 322 } 323 EXPORT_SYMBOL(__netdev_alloc_frag_align); 324 325 static struct sk_buff *napi_skb_cache_get(void) 326 { 327 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 328 struct sk_buff *skb; 329 330 if (unlikely(!nc->skb_count)) { 331 nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache, 332 GFP_ATOMIC, 333 NAPI_SKB_CACHE_BULK, 334 nc->skb_cache); 335 if (unlikely(!nc->skb_count)) 336 return NULL; 337 } 338 339 skb = nc->skb_cache[--nc->skb_count]; 340 kasan_unpoison_object_data(skbuff_cache, skb); 341 342 return skb; 343 } 344 345 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 346 unsigned int size) 347 { 348 struct skb_shared_info *shinfo; 349 350 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 351 352 /* Assumes caller memset cleared SKB */ 353 skb->truesize = SKB_TRUESIZE(size); 354 refcount_set(&skb->users, 1); 355 skb->head = data; 356 skb->data = data; 357 skb_reset_tail_pointer(skb); 358 skb_set_end_offset(skb, size); 359 skb->mac_header = (typeof(skb->mac_header))~0U; 360 skb->transport_header = (typeof(skb->transport_header))~0U; 361 skb->alloc_cpu = raw_smp_processor_id(); 362 /* make sure we initialize shinfo sequentially */ 363 shinfo = skb_shinfo(skb); 364 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 365 atomic_set(&shinfo->dataref, 1); 366 367 skb_set_kcov_handle(skb, kcov_common_handle()); 368 } 369 370 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 371 unsigned int *size) 372 { 373 void *resized; 374 375 /* Must find the allocation size (and grow it to match). */ 376 *size = ksize(data); 377 /* krealloc() will immediately return "data" when 378 * "ksize(data)" is requested: it is the existing upper 379 * bounds. As a result, GFP_ATOMIC will be ignored. Note 380 * that this "new" pointer needs to be passed back to the 381 * caller for use so the __alloc_size hinting will be 382 * tracked correctly. 383 */ 384 resized = krealloc(data, *size, GFP_ATOMIC); 385 WARN_ON_ONCE(resized != data); 386 return resized; 387 } 388 389 /* build_skb() variant which can operate on slab buffers. 390 * Note that this should be used sparingly as slab buffers 391 * cannot be combined efficiently by GRO! 392 */ 393 struct sk_buff *slab_build_skb(void *data) 394 { 395 struct sk_buff *skb; 396 unsigned int size; 397 398 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); 399 if (unlikely(!skb)) 400 return NULL; 401 402 memset(skb, 0, offsetof(struct sk_buff, tail)); 403 data = __slab_build_skb(skb, data, &size); 404 __finalize_skb_around(skb, data, size); 405 406 return skb; 407 } 408 EXPORT_SYMBOL(slab_build_skb); 409 410 /* Caller must provide SKB that is memset cleared */ 411 static void __build_skb_around(struct sk_buff *skb, void *data, 412 unsigned int frag_size) 413 { 414 unsigned int size = frag_size; 415 416 /* frag_size == 0 is considered deprecated now. Callers 417 * using slab buffer should use slab_build_skb() instead. 418 */ 419 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 420 data = __slab_build_skb(skb, data, &size); 421 422 __finalize_skb_around(skb, data, size); 423 } 424 425 /** 426 * __build_skb - build a network buffer 427 * @data: data buffer provided by caller 428 * @frag_size: size of data (must not be 0) 429 * 430 * Allocate a new &sk_buff. Caller provides space holding head and 431 * skb_shared_info. @data must have been allocated from the page 432 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 433 * allocation is deprecated, and callers should use slab_build_skb() 434 * instead.) 435 * The return is the new skb buffer. 436 * On a failure the return is %NULL, and @data is not freed. 437 * Notes : 438 * Before IO, driver allocates only data buffer where NIC put incoming frame 439 * Driver should add room at head (NET_SKB_PAD) and 440 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 441 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 442 * before giving packet to stack. 443 * RX rings only contains data buffers, not full skbs. 444 */ 445 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 446 { 447 struct sk_buff *skb; 448 449 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); 450 if (unlikely(!skb)) 451 return NULL; 452 453 memset(skb, 0, offsetof(struct sk_buff, tail)); 454 __build_skb_around(skb, data, frag_size); 455 456 return skb; 457 } 458 459 /* build_skb() is wrapper over __build_skb(), that specifically 460 * takes care of skb->head and skb->pfmemalloc 461 */ 462 struct sk_buff *build_skb(void *data, unsigned int frag_size) 463 { 464 struct sk_buff *skb = __build_skb(data, frag_size); 465 466 if (likely(skb && frag_size)) { 467 skb->head_frag = 1; 468 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 469 } 470 return skb; 471 } 472 EXPORT_SYMBOL(build_skb); 473 474 /** 475 * build_skb_around - build a network buffer around provided skb 476 * @skb: sk_buff provide by caller, must be memset cleared 477 * @data: data buffer provided by caller 478 * @frag_size: size of data 479 */ 480 struct sk_buff *build_skb_around(struct sk_buff *skb, 481 void *data, unsigned int frag_size) 482 { 483 if (unlikely(!skb)) 484 return NULL; 485 486 __build_skb_around(skb, data, frag_size); 487 488 if (frag_size) { 489 skb->head_frag = 1; 490 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 491 } 492 return skb; 493 } 494 EXPORT_SYMBOL(build_skb_around); 495 496 /** 497 * __napi_build_skb - build a network buffer 498 * @data: data buffer provided by caller 499 * @frag_size: size of data 500 * 501 * Version of __build_skb() that uses NAPI percpu caches to obtain 502 * skbuff_head instead of inplace allocation. 503 * 504 * Returns a new &sk_buff on success, %NULL on allocation failure. 505 */ 506 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 507 { 508 struct sk_buff *skb; 509 510 skb = napi_skb_cache_get(); 511 if (unlikely(!skb)) 512 return NULL; 513 514 memset(skb, 0, offsetof(struct sk_buff, tail)); 515 __build_skb_around(skb, data, frag_size); 516 517 return skb; 518 } 519 520 /** 521 * napi_build_skb - build a network buffer 522 * @data: data buffer provided by caller 523 * @frag_size: size of data 524 * 525 * Version of __napi_build_skb() that takes care of skb->head_frag 526 * and skb->pfmemalloc when the data is a page or page fragment. 527 * 528 * Returns a new &sk_buff on success, %NULL on allocation failure. 529 */ 530 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 531 { 532 struct sk_buff *skb = __napi_build_skb(data, frag_size); 533 534 if (likely(skb) && frag_size) { 535 skb->head_frag = 1; 536 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 537 } 538 539 return skb; 540 } 541 EXPORT_SYMBOL(napi_build_skb); 542 543 /* 544 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 545 * the caller if emergency pfmemalloc reserves are being used. If it is and 546 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 547 * may be used. Otherwise, the packet data may be discarded until enough 548 * memory is free 549 */ 550 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 551 bool *pfmemalloc) 552 { 553 bool ret_pfmemalloc = false; 554 size_t obj_size; 555 void *obj; 556 557 obj_size = SKB_HEAD_ALIGN(*size); 558 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 559 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 560 obj = kmem_cache_alloc_node(skb_small_head_cache, 561 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 562 node); 563 *size = SKB_SMALL_HEAD_CACHE_SIZE; 564 if (obj || !(gfp_pfmemalloc_allowed(flags))) 565 goto out; 566 /* Try again but now we are using pfmemalloc reserves */ 567 ret_pfmemalloc = true; 568 obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node); 569 goto out; 570 } 571 572 obj_size = kmalloc_size_roundup(obj_size); 573 /* The following cast might truncate high-order bits of obj_size, this 574 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 575 */ 576 *size = (unsigned int)obj_size; 577 578 /* 579 * Try a regular allocation, when that fails and we're not entitled 580 * to the reserves, fail. 581 */ 582 obj = kmalloc_node_track_caller(obj_size, 583 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 584 node); 585 if (obj || !(gfp_pfmemalloc_allowed(flags))) 586 goto out; 587 588 /* Try again but now we are using pfmemalloc reserves */ 589 ret_pfmemalloc = true; 590 obj = kmalloc_node_track_caller(obj_size, flags, node); 591 592 out: 593 if (pfmemalloc) 594 *pfmemalloc = ret_pfmemalloc; 595 596 return obj; 597 } 598 599 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 600 * 'private' fields and also do memory statistics to find all the 601 * [BEEP] leaks. 602 * 603 */ 604 605 /** 606 * __alloc_skb - allocate a network buffer 607 * @size: size to allocate 608 * @gfp_mask: allocation mask 609 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 610 * instead of head cache and allocate a cloned (child) skb. 611 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 612 * allocations in case the data is required for writeback 613 * @node: numa node to allocate memory on 614 * 615 * Allocate a new &sk_buff. The returned buffer has no headroom and a 616 * tail room of at least size bytes. The object has a reference count 617 * of one. The return is the buffer. On a failure the return is %NULL. 618 * 619 * Buffers may only be allocated from interrupts using a @gfp_mask of 620 * %GFP_ATOMIC. 621 */ 622 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 623 int flags, int node) 624 { 625 struct kmem_cache *cache; 626 struct sk_buff *skb; 627 bool pfmemalloc; 628 u8 *data; 629 630 cache = (flags & SKB_ALLOC_FCLONE) 631 ? skbuff_fclone_cache : skbuff_cache; 632 633 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 634 gfp_mask |= __GFP_MEMALLOC; 635 636 /* Get the HEAD */ 637 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 638 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 639 skb = napi_skb_cache_get(); 640 else 641 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 642 if (unlikely(!skb)) 643 return NULL; 644 prefetchw(skb); 645 646 /* We do our best to align skb_shared_info on a separate cache 647 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 648 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 649 * Both skb->head and skb_shared_info are cache line aligned. 650 */ 651 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 652 if (unlikely(!data)) 653 goto nodata; 654 /* kmalloc_size_roundup() might give us more room than requested. 655 * Put skb_shared_info exactly at the end of allocated zone, 656 * to allow max possible filling before reallocation. 657 */ 658 prefetchw(data + SKB_WITH_OVERHEAD(size)); 659 660 /* 661 * Only clear those fields we need to clear, not those that we will 662 * actually initialise below. Hence, don't put any more fields after 663 * the tail pointer in struct sk_buff! 664 */ 665 memset(skb, 0, offsetof(struct sk_buff, tail)); 666 __build_skb_around(skb, data, size); 667 skb->pfmemalloc = pfmemalloc; 668 669 if (flags & SKB_ALLOC_FCLONE) { 670 struct sk_buff_fclones *fclones; 671 672 fclones = container_of(skb, struct sk_buff_fclones, skb1); 673 674 skb->fclone = SKB_FCLONE_ORIG; 675 refcount_set(&fclones->fclone_ref, 1); 676 } 677 678 return skb; 679 680 nodata: 681 kmem_cache_free(cache, skb); 682 return NULL; 683 } 684 EXPORT_SYMBOL(__alloc_skb); 685 686 /** 687 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 688 * @dev: network device to receive on 689 * @len: length to allocate 690 * @gfp_mask: get_free_pages mask, passed to alloc_skb 691 * 692 * Allocate a new &sk_buff and assign it a usage count of one. The 693 * buffer has NET_SKB_PAD headroom built in. Users should allocate 694 * the headroom they think they need without accounting for the 695 * built in space. The built in space is used for optimisations. 696 * 697 * %NULL is returned if there is no free memory. 698 */ 699 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 700 gfp_t gfp_mask) 701 { 702 struct page_frag_cache *nc; 703 struct sk_buff *skb; 704 bool pfmemalloc; 705 void *data; 706 707 len += NET_SKB_PAD; 708 709 /* If requested length is either too small or too big, 710 * we use kmalloc() for skb->head allocation. 711 */ 712 if (len <= SKB_WITH_OVERHEAD(1024) || 713 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 714 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 715 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 716 if (!skb) 717 goto skb_fail; 718 goto skb_success; 719 } 720 721 len = SKB_HEAD_ALIGN(len); 722 723 if (sk_memalloc_socks()) 724 gfp_mask |= __GFP_MEMALLOC; 725 726 if (in_hardirq() || irqs_disabled()) { 727 nc = this_cpu_ptr(&netdev_alloc_cache); 728 data = page_frag_alloc(nc, len, gfp_mask); 729 pfmemalloc = nc->pfmemalloc; 730 } else { 731 local_bh_disable(); 732 nc = this_cpu_ptr(&napi_alloc_cache.page); 733 data = page_frag_alloc(nc, len, gfp_mask); 734 pfmemalloc = nc->pfmemalloc; 735 local_bh_enable(); 736 } 737 738 if (unlikely(!data)) 739 return NULL; 740 741 skb = __build_skb(data, len); 742 if (unlikely(!skb)) { 743 skb_free_frag(data); 744 return NULL; 745 } 746 747 if (pfmemalloc) 748 skb->pfmemalloc = 1; 749 skb->head_frag = 1; 750 751 skb_success: 752 skb_reserve(skb, NET_SKB_PAD); 753 skb->dev = dev; 754 755 skb_fail: 756 return skb; 757 } 758 EXPORT_SYMBOL(__netdev_alloc_skb); 759 760 /** 761 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 762 * @napi: napi instance this buffer was allocated for 763 * @len: length to allocate 764 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 765 * 766 * Allocate a new sk_buff for use in NAPI receive. This buffer will 767 * attempt to allocate the head from a special reserved region used 768 * only for NAPI Rx allocation. By doing this we can save several 769 * CPU cycles by avoiding having to disable and re-enable IRQs. 770 * 771 * %NULL is returned if there is no free memory. 772 */ 773 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 774 gfp_t gfp_mask) 775 { 776 struct napi_alloc_cache *nc; 777 struct sk_buff *skb; 778 bool pfmemalloc; 779 void *data; 780 781 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 782 len += NET_SKB_PAD + NET_IP_ALIGN; 783 784 /* If requested length is either too small or too big, 785 * we use kmalloc() for skb->head allocation. 786 * When the small frag allocator is available, prefer it over kmalloc 787 * for small fragments 788 */ 789 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || 790 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 791 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 792 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 793 NUMA_NO_NODE); 794 if (!skb) 795 goto skb_fail; 796 goto skb_success; 797 } 798 799 nc = this_cpu_ptr(&napi_alloc_cache); 800 801 if (sk_memalloc_socks()) 802 gfp_mask |= __GFP_MEMALLOC; 803 804 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { 805 /* we are artificially inflating the allocation size, but 806 * that is not as bad as it may look like, as: 807 * - 'len' less than GRO_MAX_HEAD makes little sense 808 * - On most systems, larger 'len' values lead to fragment 809 * size above 512 bytes 810 * - kmalloc would use the kmalloc-1k slab for such values 811 * - Builds with smaller GRO_MAX_HEAD will very likely do 812 * little networking, as that implies no WiFi and no 813 * tunnels support, and 32 bits arches. 814 */ 815 len = SZ_1K; 816 817 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); 818 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); 819 } else { 820 len = SKB_HEAD_ALIGN(len); 821 822 data = page_frag_alloc(&nc->page, len, gfp_mask); 823 pfmemalloc = nc->page.pfmemalloc; 824 } 825 826 if (unlikely(!data)) 827 return NULL; 828 829 skb = __napi_build_skb(data, len); 830 if (unlikely(!skb)) { 831 skb_free_frag(data); 832 return NULL; 833 } 834 835 if (pfmemalloc) 836 skb->pfmemalloc = 1; 837 skb->head_frag = 1; 838 839 skb_success: 840 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 841 skb->dev = napi->dev; 842 843 skb_fail: 844 return skb; 845 } 846 EXPORT_SYMBOL(__napi_alloc_skb); 847 848 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 849 int size, unsigned int truesize) 850 { 851 DEBUG_NET_WARN_ON_ONCE(size > truesize); 852 853 skb_fill_page_desc(skb, i, page, off, size); 854 skb->len += size; 855 skb->data_len += size; 856 skb->truesize += truesize; 857 } 858 EXPORT_SYMBOL(skb_add_rx_frag); 859 860 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 861 unsigned int truesize) 862 { 863 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 864 865 DEBUG_NET_WARN_ON_ONCE(size > truesize); 866 867 skb_frag_size_add(frag, size); 868 skb->len += size; 869 skb->data_len += size; 870 skb->truesize += truesize; 871 } 872 EXPORT_SYMBOL(skb_coalesce_rx_frag); 873 874 static void skb_drop_list(struct sk_buff **listp) 875 { 876 kfree_skb_list(*listp); 877 *listp = NULL; 878 } 879 880 static inline void skb_drop_fraglist(struct sk_buff *skb) 881 { 882 skb_drop_list(&skb_shinfo(skb)->frag_list); 883 } 884 885 static void skb_clone_fraglist(struct sk_buff *skb) 886 { 887 struct sk_buff *list; 888 889 skb_walk_frags(skb, list) 890 skb_get(list); 891 } 892 893 static bool is_pp_page(struct page *page) 894 { 895 return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; 896 } 897 898 #if IS_ENABLED(CONFIG_PAGE_POOL) 899 bool napi_pp_put_page(struct page *page, bool napi_safe) 900 { 901 bool allow_direct = false; 902 struct page_pool *pp; 903 904 page = compound_head(page); 905 906 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 907 * in order to preserve any existing bits, such as bit 0 for the 908 * head page of compound page and bit 1 for pfmemalloc page, so 909 * mask those bits for freeing side when doing below checking, 910 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 911 * to avoid recycling the pfmemalloc page. 912 */ 913 if (unlikely(!is_pp_page(page))) 914 return false; 915 916 pp = page->pp; 917 918 /* Allow direct recycle if we have reasons to believe that we are 919 * in the same context as the consumer would run, so there's 920 * no possible race. 921 * __page_pool_put_page() makes sure we're not in hardirq context 922 * and interrupts are enabled prior to accessing the cache. 923 */ 924 if (napi_safe || in_softirq()) { 925 const struct napi_struct *napi = READ_ONCE(pp->p.napi); 926 927 allow_direct = napi && 928 READ_ONCE(napi->list_owner) == smp_processor_id(); 929 } 930 931 /* Driver set this to memory recycling info. Reset it on recycle. 932 * This will *not* work for NIC using a split-page memory model. 933 * The page will be returned to the pool here regardless of the 934 * 'flipped' fragment being in use or not. 935 */ 936 page_pool_put_full_page(pp, page, allow_direct); 937 938 return true; 939 } 940 EXPORT_SYMBOL(napi_pp_put_page); 941 #endif 942 943 static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) 944 { 945 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 946 return false; 947 return napi_pp_put_page(virt_to_page(data), napi_safe); 948 } 949 950 /** 951 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb 952 * @skb: page pool aware skb 953 * 954 * Increase the fragment reference count (pp_ref_count) of a skb. This is 955 * intended to gain fragment references only for page pool aware skbs, 956 * i.e. when skb->pp_recycle is true, and not for fragments in a 957 * non-pp-recycling skb. It has a fallback to increase references on normal 958 * pages, as page pool aware skbs may also have normal page fragments. 959 */ 960 static int skb_pp_frag_ref(struct sk_buff *skb) 961 { 962 struct skb_shared_info *shinfo; 963 struct page *head_page; 964 int i; 965 966 if (!skb->pp_recycle) 967 return -EINVAL; 968 969 shinfo = skb_shinfo(skb); 970 971 for (i = 0; i < shinfo->nr_frags; i++) { 972 head_page = compound_head(skb_frag_page(&shinfo->frags[i])); 973 if (likely(is_pp_page(head_page))) 974 page_pool_ref_page(head_page); 975 else 976 page_ref_inc(head_page); 977 } 978 return 0; 979 } 980 981 static void skb_kfree_head(void *head, unsigned int end_offset) 982 { 983 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 984 kmem_cache_free(skb_small_head_cache, head); 985 else 986 kfree(head); 987 } 988 989 static void skb_free_head(struct sk_buff *skb, bool napi_safe) 990 { 991 unsigned char *head = skb->head; 992 993 if (skb->head_frag) { 994 if (skb_pp_recycle(skb, head, napi_safe)) 995 return; 996 skb_free_frag(head); 997 } else { 998 skb_kfree_head(head, skb_end_offset(skb)); 999 } 1000 } 1001 1002 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, 1003 bool napi_safe) 1004 { 1005 struct skb_shared_info *shinfo = skb_shinfo(skb); 1006 int i; 1007 1008 if (skb->cloned && 1009 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 1010 &shinfo->dataref)) 1011 goto exit; 1012 1013 if (skb_zcopy(skb)) { 1014 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1015 1016 skb_zcopy_clear(skb, true); 1017 if (skip_unref) 1018 goto free_head; 1019 } 1020 1021 for (i = 0; i < shinfo->nr_frags; i++) 1022 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe); 1023 1024 free_head: 1025 if (shinfo->frag_list) 1026 kfree_skb_list_reason(shinfo->frag_list, reason); 1027 1028 skb_free_head(skb, napi_safe); 1029 exit: 1030 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1031 * bit is only set on the head though, so in order to avoid races 1032 * while trying to recycle fragments on __skb_frag_unref() we need 1033 * to make one SKB responsible for triggering the recycle path. 1034 * So disable the recycling bit if an SKB is cloned and we have 1035 * additional references to the fragmented part of the SKB. 1036 * Eventually the last SKB will have the recycling bit set and it's 1037 * dataref set to 0, which will trigger the recycling 1038 */ 1039 skb->pp_recycle = 0; 1040 } 1041 1042 /* 1043 * Free an skbuff by memory without cleaning the state. 1044 */ 1045 static void kfree_skbmem(struct sk_buff *skb) 1046 { 1047 struct sk_buff_fclones *fclones; 1048 1049 switch (skb->fclone) { 1050 case SKB_FCLONE_UNAVAILABLE: 1051 kmem_cache_free(skbuff_cache, skb); 1052 return; 1053 1054 case SKB_FCLONE_ORIG: 1055 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1056 1057 /* We usually free the clone (TX completion) before original skb 1058 * This test would have no chance to be true for the clone, 1059 * while here, branch prediction will be good. 1060 */ 1061 if (refcount_read(&fclones->fclone_ref) == 1) 1062 goto fastpath; 1063 break; 1064 1065 default: /* SKB_FCLONE_CLONE */ 1066 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1067 break; 1068 } 1069 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1070 return; 1071 fastpath: 1072 kmem_cache_free(skbuff_fclone_cache, fclones); 1073 } 1074 1075 void skb_release_head_state(struct sk_buff *skb) 1076 { 1077 skb_dst_drop(skb); 1078 if (skb->destructor) { 1079 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1080 skb->destructor(skb); 1081 } 1082 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1083 nf_conntrack_put(skb_nfct(skb)); 1084 #endif 1085 skb_ext_put(skb); 1086 } 1087 1088 /* Free everything but the sk_buff shell. */ 1089 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason, 1090 bool napi_safe) 1091 { 1092 skb_release_head_state(skb); 1093 if (likely(skb->head)) 1094 skb_release_data(skb, reason, napi_safe); 1095 } 1096 1097 /** 1098 * __kfree_skb - private function 1099 * @skb: buffer 1100 * 1101 * Free an sk_buff. Release anything attached to the buffer. 1102 * Clean the state. This is an internal helper function. Users should 1103 * always call kfree_skb 1104 */ 1105 1106 void __kfree_skb(struct sk_buff *skb) 1107 { 1108 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false); 1109 kfree_skbmem(skb); 1110 } 1111 EXPORT_SYMBOL(__kfree_skb); 1112 1113 static __always_inline 1114 bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1115 { 1116 if (unlikely(!skb_unref(skb))) 1117 return false; 1118 1119 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1120 u32_get_bits(reason, 1121 SKB_DROP_REASON_SUBSYS_MASK) >= 1122 SKB_DROP_REASON_SUBSYS_NUM); 1123 1124 if (reason == SKB_CONSUMED) 1125 trace_consume_skb(skb, __builtin_return_address(0)); 1126 else 1127 trace_kfree_skb(skb, __builtin_return_address(0), reason); 1128 return true; 1129 } 1130 1131 /** 1132 * kfree_skb_reason - free an sk_buff with special reason 1133 * @skb: buffer to free 1134 * @reason: reason why this skb is dropped 1135 * 1136 * Drop a reference to the buffer and free it if the usage count has 1137 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' 1138 * tracepoint. 1139 */ 1140 void __fix_address 1141 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1142 { 1143 if (__kfree_skb_reason(skb, reason)) 1144 __kfree_skb(skb); 1145 } 1146 EXPORT_SYMBOL(kfree_skb_reason); 1147 1148 #define KFREE_SKB_BULK_SIZE 16 1149 1150 struct skb_free_array { 1151 unsigned int skb_count; 1152 void *skb_array[KFREE_SKB_BULK_SIZE]; 1153 }; 1154 1155 static void kfree_skb_add_bulk(struct sk_buff *skb, 1156 struct skb_free_array *sa, 1157 enum skb_drop_reason reason) 1158 { 1159 /* if SKB is a clone, don't handle this case */ 1160 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1161 __kfree_skb(skb); 1162 return; 1163 } 1164 1165 skb_release_all(skb, reason, false); 1166 sa->skb_array[sa->skb_count++] = skb; 1167 1168 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1169 kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE, 1170 sa->skb_array); 1171 sa->skb_count = 0; 1172 } 1173 } 1174 1175 void __fix_address 1176 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1177 { 1178 struct skb_free_array sa; 1179 1180 sa.skb_count = 0; 1181 1182 while (segs) { 1183 struct sk_buff *next = segs->next; 1184 1185 if (__kfree_skb_reason(segs, reason)) { 1186 skb_poison_list(segs); 1187 kfree_skb_add_bulk(segs, &sa, reason); 1188 } 1189 1190 segs = next; 1191 } 1192 1193 if (sa.skb_count) 1194 kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array); 1195 } 1196 EXPORT_SYMBOL(kfree_skb_list_reason); 1197 1198 /* Dump skb information and contents. 1199 * 1200 * Must only be called from net_ratelimit()-ed paths. 1201 * 1202 * Dumps whole packets if full_pkt, only headers otherwise. 1203 */ 1204 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1205 { 1206 struct skb_shared_info *sh = skb_shinfo(skb); 1207 struct net_device *dev = skb->dev; 1208 struct sock *sk = skb->sk; 1209 struct sk_buff *list_skb; 1210 bool has_mac, has_trans; 1211 int headroom, tailroom; 1212 int i, len, seg_len; 1213 1214 if (full_pkt) 1215 len = skb->len; 1216 else 1217 len = min_t(int, skb->len, MAX_HEADER + 128); 1218 1219 headroom = skb_headroom(skb); 1220 tailroom = skb_tailroom(skb); 1221 1222 has_mac = skb_mac_header_was_set(skb); 1223 has_trans = skb_transport_header_was_set(skb); 1224 1225 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1226 "mac=(%d,%d) net=(%d,%d) trans=%d\n" 1227 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1228 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1229 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 1230 level, skb->len, headroom, skb_headlen(skb), tailroom, 1231 has_mac ? skb->mac_header : -1, 1232 has_mac ? skb_mac_header_len(skb) : -1, 1233 skb->network_header, 1234 has_trans ? skb_network_header_len(skb) : -1, 1235 has_trans ? skb->transport_header : -1, 1236 sh->tx_flags, sh->nr_frags, 1237 sh->gso_size, sh->gso_type, sh->gso_segs, 1238 skb->csum, skb->ip_summed, skb->csum_complete_sw, 1239 skb->csum_valid, skb->csum_level, 1240 skb->hash, skb->sw_hash, skb->l4_hash, 1241 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 1242 1243 if (dev) 1244 printk("%sdev name=%s feat=%pNF\n", 1245 level, dev->name, &dev->features); 1246 if (sk) 1247 printk("%ssk family=%hu type=%u proto=%u\n", 1248 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1249 1250 if (full_pkt && headroom) 1251 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1252 16, 1, skb->head, headroom, false); 1253 1254 seg_len = min_t(int, skb_headlen(skb), len); 1255 if (seg_len) 1256 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1257 16, 1, skb->data, seg_len, false); 1258 len -= seg_len; 1259 1260 if (full_pkt && tailroom) 1261 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1262 16, 1, skb_tail_pointer(skb), tailroom, false); 1263 1264 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1265 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1266 u32 p_off, p_len, copied; 1267 struct page *p; 1268 u8 *vaddr; 1269 1270 skb_frag_foreach_page(frag, skb_frag_off(frag), 1271 skb_frag_size(frag), p, p_off, p_len, 1272 copied) { 1273 seg_len = min_t(int, p_len, len); 1274 vaddr = kmap_atomic(p); 1275 print_hex_dump(level, "skb frag: ", 1276 DUMP_PREFIX_OFFSET, 1277 16, 1, vaddr + p_off, seg_len, false); 1278 kunmap_atomic(vaddr); 1279 len -= seg_len; 1280 if (!len) 1281 break; 1282 } 1283 } 1284 1285 if (full_pkt && skb_has_frag_list(skb)) { 1286 printk("skb fraglist:\n"); 1287 skb_walk_frags(skb, list_skb) 1288 skb_dump(level, list_skb, true); 1289 } 1290 } 1291 EXPORT_SYMBOL(skb_dump); 1292 1293 /** 1294 * skb_tx_error - report an sk_buff xmit error 1295 * @skb: buffer that triggered an error 1296 * 1297 * Report xmit error if a device callback is tracking this skb. 1298 * skb must be freed afterwards. 1299 */ 1300 void skb_tx_error(struct sk_buff *skb) 1301 { 1302 if (skb) { 1303 skb_zcopy_downgrade_managed(skb); 1304 skb_zcopy_clear(skb, true); 1305 } 1306 } 1307 EXPORT_SYMBOL(skb_tx_error); 1308 1309 #ifdef CONFIG_TRACEPOINTS 1310 /** 1311 * consume_skb - free an skbuff 1312 * @skb: buffer to free 1313 * 1314 * Drop a ref to the buffer and free it if the usage count has hit zero 1315 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1316 * is being dropped after a failure and notes that 1317 */ 1318 void consume_skb(struct sk_buff *skb) 1319 { 1320 if (!skb_unref(skb)) 1321 return; 1322 1323 trace_consume_skb(skb, __builtin_return_address(0)); 1324 __kfree_skb(skb); 1325 } 1326 EXPORT_SYMBOL(consume_skb); 1327 #endif 1328 1329 /** 1330 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1331 * @skb: buffer to free 1332 * 1333 * Alike consume_skb(), but this variant assumes that this is the last 1334 * skb reference and all the head states have been already dropped 1335 */ 1336 void __consume_stateless_skb(struct sk_buff *skb) 1337 { 1338 trace_consume_skb(skb, __builtin_return_address(0)); 1339 skb_release_data(skb, SKB_CONSUMED, false); 1340 kfree_skbmem(skb); 1341 } 1342 1343 static void napi_skb_cache_put(struct sk_buff *skb) 1344 { 1345 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1346 u32 i; 1347 1348 kasan_poison_object_data(skbuff_cache, skb); 1349 nc->skb_cache[nc->skb_count++] = skb; 1350 1351 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1352 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1353 kasan_unpoison_object_data(skbuff_cache, 1354 nc->skb_cache[i]); 1355 1356 kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF, 1357 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1358 nc->skb_count = NAPI_SKB_CACHE_HALF; 1359 } 1360 } 1361 1362 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1363 { 1364 skb_release_all(skb, reason, true); 1365 napi_skb_cache_put(skb); 1366 } 1367 1368 void napi_skb_free_stolen_head(struct sk_buff *skb) 1369 { 1370 if (unlikely(skb->slow_gro)) { 1371 nf_reset_ct(skb); 1372 skb_dst_drop(skb); 1373 skb_ext_put(skb); 1374 skb_orphan(skb); 1375 skb->slow_gro = 0; 1376 } 1377 napi_skb_cache_put(skb); 1378 } 1379 1380 void napi_consume_skb(struct sk_buff *skb, int budget) 1381 { 1382 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1383 if (unlikely(!budget)) { 1384 dev_consume_skb_any(skb); 1385 return; 1386 } 1387 1388 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1389 1390 if (!skb_unref(skb)) 1391 return; 1392 1393 /* if reaching here SKB is ready to free */ 1394 trace_consume_skb(skb, __builtin_return_address(0)); 1395 1396 /* if SKB is a clone, don't handle this case */ 1397 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1398 __kfree_skb(skb); 1399 return; 1400 } 1401 1402 skb_release_all(skb, SKB_CONSUMED, !!budget); 1403 napi_skb_cache_put(skb); 1404 } 1405 EXPORT_SYMBOL(napi_consume_skb); 1406 1407 /* Make sure a field is contained by headers group */ 1408 #define CHECK_SKB_FIELD(field) \ 1409 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1410 offsetof(struct sk_buff, headers.field)); \ 1411 1412 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1413 { 1414 new->tstamp = old->tstamp; 1415 /* We do not copy old->sk */ 1416 new->dev = old->dev; 1417 memcpy(new->cb, old->cb, sizeof(old->cb)); 1418 skb_dst_copy(new, old); 1419 __skb_ext_copy(new, old); 1420 __nf_copy(new, old, false); 1421 1422 /* Note : this field could be in the headers group. 1423 * It is not yet because we do not want to have a 16 bit hole 1424 */ 1425 new->queue_mapping = old->queue_mapping; 1426 1427 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1428 CHECK_SKB_FIELD(protocol); 1429 CHECK_SKB_FIELD(csum); 1430 CHECK_SKB_FIELD(hash); 1431 CHECK_SKB_FIELD(priority); 1432 CHECK_SKB_FIELD(skb_iif); 1433 CHECK_SKB_FIELD(vlan_proto); 1434 CHECK_SKB_FIELD(vlan_tci); 1435 CHECK_SKB_FIELD(transport_header); 1436 CHECK_SKB_FIELD(network_header); 1437 CHECK_SKB_FIELD(mac_header); 1438 CHECK_SKB_FIELD(inner_protocol); 1439 CHECK_SKB_FIELD(inner_transport_header); 1440 CHECK_SKB_FIELD(inner_network_header); 1441 CHECK_SKB_FIELD(inner_mac_header); 1442 CHECK_SKB_FIELD(mark); 1443 #ifdef CONFIG_NETWORK_SECMARK 1444 CHECK_SKB_FIELD(secmark); 1445 #endif 1446 #ifdef CONFIG_NET_RX_BUSY_POLL 1447 CHECK_SKB_FIELD(napi_id); 1448 #endif 1449 CHECK_SKB_FIELD(alloc_cpu); 1450 #ifdef CONFIG_XPS 1451 CHECK_SKB_FIELD(sender_cpu); 1452 #endif 1453 #ifdef CONFIG_NET_SCHED 1454 CHECK_SKB_FIELD(tc_index); 1455 #endif 1456 1457 } 1458 1459 /* 1460 * You should not add any new code to this function. Add it to 1461 * __copy_skb_header above instead. 1462 */ 1463 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1464 { 1465 #define C(x) n->x = skb->x 1466 1467 n->next = n->prev = NULL; 1468 n->sk = NULL; 1469 __copy_skb_header(n, skb); 1470 1471 C(len); 1472 C(data_len); 1473 C(mac_len); 1474 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1475 n->cloned = 1; 1476 n->nohdr = 0; 1477 n->peeked = 0; 1478 C(pfmemalloc); 1479 C(pp_recycle); 1480 n->destructor = NULL; 1481 C(tail); 1482 C(end); 1483 C(head); 1484 C(head_frag); 1485 C(data); 1486 C(truesize); 1487 refcount_set(&n->users, 1); 1488 1489 atomic_inc(&(skb_shinfo(skb)->dataref)); 1490 skb->cloned = 1; 1491 1492 return n; 1493 #undef C 1494 } 1495 1496 /** 1497 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1498 * @first: first sk_buff of the msg 1499 */ 1500 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1501 { 1502 struct sk_buff *n; 1503 1504 n = alloc_skb(0, GFP_ATOMIC); 1505 if (!n) 1506 return NULL; 1507 1508 n->len = first->len; 1509 n->data_len = first->len; 1510 n->truesize = first->truesize; 1511 1512 skb_shinfo(n)->frag_list = first; 1513 1514 __copy_skb_header(n, first); 1515 n->destructor = NULL; 1516 1517 return n; 1518 } 1519 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1520 1521 /** 1522 * skb_morph - morph one skb into another 1523 * @dst: the skb to receive the contents 1524 * @src: the skb to supply the contents 1525 * 1526 * This is identical to skb_clone except that the target skb is 1527 * supplied by the user. 1528 * 1529 * The target skb is returned upon exit. 1530 */ 1531 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1532 { 1533 skb_release_all(dst, SKB_CONSUMED, false); 1534 return __skb_clone(dst, src); 1535 } 1536 EXPORT_SYMBOL_GPL(skb_morph); 1537 1538 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1539 { 1540 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1541 struct user_struct *user; 1542 1543 if (capable(CAP_IPC_LOCK) || !size) 1544 return 0; 1545 1546 rlim = rlimit(RLIMIT_MEMLOCK); 1547 if (rlim == RLIM_INFINITY) 1548 return 0; 1549 1550 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1551 max_pg = rlim >> PAGE_SHIFT; 1552 user = mmp->user ? : current_user(); 1553 1554 old_pg = atomic_long_read(&user->locked_vm); 1555 do { 1556 new_pg = old_pg + num_pg; 1557 if (new_pg > max_pg) 1558 return -ENOBUFS; 1559 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1560 1561 if (!mmp->user) { 1562 mmp->user = get_uid(user); 1563 mmp->num_pg = num_pg; 1564 } else { 1565 mmp->num_pg += num_pg; 1566 } 1567 1568 return 0; 1569 } 1570 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1571 1572 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1573 { 1574 if (mmp->user) { 1575 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1576 free_uid(mmp->user); 1577 } 1578 } 1579 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1580 1581 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1582 { 1583 struct ubuf_info_msgzc *uarg; 1584 struct sk_buff *skb; 1585 1586 WARN_ON_ONCE(!in_task()); 1587 1588 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1589 if (!skb) 1590 return NULL; 1591 1592 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1593 uarg = (void *)skb->cb; 1594 uarg->mmp.user = NULL; 1595 1596 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1597 kfree_skb(skb); 1598 return NULL; 1599 } 1600 1601 uarg->ubuf.callback = msg_zerocopy_callback; 1602 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1603 uarg->len = 1; 1604 uarg->bytelen = size; 1605 uarg->zerocopy = 1; 1606 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1607 refcount_set(&uarg->ubuf.refcnt, 1); 1608 sock_hold(sk); 1609 1610 return &uarg->ubuf; 1611 } 1612 1613 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1614 { 1615 return container_of((void *)uarg, struct sk_buff, cb); 1616 } 1617 1618 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1619 struct ubuf_info *uarg) 1620 { 1621 if (uarg) { 1622 struct ubuf_info_msgzc *uarg_zc; 1623 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1624 u32 bytelen, next; 1625 1626 /* there might be non MSG_ZEROCOPY users */ 1627 if (uarg->callback != msg_zerocopy_callback) 1628 return NULL; 1629 1630 /* realloc only when socket is locked (TCP, UDP cork), 1631 * so uarg->len and sk_zckey access is serialized 1632 */ 1633 if (!sock_owned_by_user(sk)) { 1634 WARN_ON_ONCE(1); 1635 return NULL; 1636 } 1637 1638 uarg_zc = uarg_to_msgzc(uarg); 1639 bytelen = uarg_zc->bytelen + size; 1640 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1641 /* TCP can create new skb to attach new uarg */ 1642 if (sk->sk_type == SOCK_STREAM) 1643 goto new_alloc; 1644 return NULL; 1645 } 1646 1647 next = (u32)atomic_read(&sk->sk_zckey); 1648 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1649 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1650 return NULL; 1651 uarg_zc->len++; 1652 uarg_zc->bytelen = bytelen; 1653 atomic_set(&sk->sk_zckey, ++next); 1654 1655 /* no extra ref when appending to datagram (MSG_MORE) */ 1656 if (sk->sk_type == SOCK_STREAM) 1657 net_zcopy_get(uarg); 1658 1659 return uarg; 1660 } 1661 } 1662 1663 new_alloc: 1664 return msg_zerocopy_alloc(sk, size); 1665 } 1666 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1667 1668 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1669 { 1670 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1671 u32 old_lo, old_hi; 1672 u64 sum_len; 1673 1674 old_lo = serr->ee.ee_info; 1675 old_hi = serr->ee.ee_data; 1676 sum_len = old_hi - old_lo + 1ULL + len; 1677 1678 if (sum_len >= (1ULL << 32)) 1679 return false; 1680 1681 if (lo != old_hi + 1) 1682 return false; 1683 1684 serr->ee.ee_data += len; 1685 return true; 1686 } 1687 1688 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1689 { 1690 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1691 struct sock_exterr_skb *serr; 1692 struct sock *sk = skb->sk; 1693 struct sk_buff_head *q; 1694 unsigned long flags; 1695 bool is_zerocopy; 1696 u32 lo, hi; 1697 u16 len; 1698 1699 mm_unaccount_pinned_pages(&uarg->mmp); 1700 1701 /* if !len, there was only 1 call, and it was aborted 1702 * so do not queue a completion notification 1703 */ 1704 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1705 goto release; 1706 1707 len = uarg->len; 1708 lo = uarg->id; 1709 hi = uarg->id + len - 1; 1710 is_zerocopy = uarg->zerocopy; 1711 1712 serr = SKB_EXT_ERR(skb); 1713 memset(serr, 0, sizeof(*serr)); 1714 serr->ee.ee_errno = 0; 1715 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1716 serr->ee.ee_data = hi; 1717 serr->ee.ee_info = lo; 1718 if (!is_zerocopy) 1719 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1720 1721 q = &sk->sk_error_queue; 1722 spin_lock_irqsave(&q->lock, flags); 1723 tail = skb_peek_tail(q); 1724 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1725 !skb_zerocopy_notify_extend(tail, lo, len)) { 1726 __skb_queue_tail(q, skb); 1727 skb = NULL; 1728 } 1729 spin_unlock_irqrestore(&q->lock, flags); 1730 1731 sk_error_report(sk); 1732 1733 release: 1734 consume_skb(skb); 1735 sock_put(sk); 1736 } 1737 1738 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 1739 bool success) 1740 { 1741 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1742 1743 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1744 1745 if (refcount_dec_and_test(&uarg->refcnt)) 1746 __msg_zerocopy_callback(uarg_zc); 1747 } 1748 EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 1749 1750 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1751 { 1752 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1753 1754 atomic_dec(&sk->sk_zckey); 1755 uarg_to_msgzc(uarg)->len--; 1756 1757 if (have_uref) 1758 msg_zerocopy_callback(NULL, uarg, true); 1759 } 1760 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1761 1762 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1763 struct msghdr *msg, int len, 1764 struct ubuf_info *uarg) 1765 { 1766 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1767 int err, orig_len = skb->len; 1768 1769 /* An skb can only point to one uarg. This edge case happens when 1770 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 1771 */ 1772 if (orig_uarg && uarg != orig_uarg) 1773 return -EEXIST; 1774 1775 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1776 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1777 struct sock *save_sk = skb->sk; 1778 1779 /* Streams do not free skb on error. Reset to prev state. */ 1780 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1781 skb->sk = sk; 1782 ___pskb_trim(skb, orig_len); 1783 skb->sk = save_sk; 1784 return err; 1785 } 1786 1787 skb_zcopy_set(skb, uarg, NULL); 1788 return skb->len - orig_len; 1789 } 1790 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1791 1792 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1793 { 1794 int i; 1795 1796 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1797 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1798 skb_frag_ref(skb, i); 1799 } 1800 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1801 1802 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1803 gfp_t gfp_mask) 1804 { 1805 if (skb_zcopy(orig)) { 1806 if (skb_zcopy(nskb)) { 1807 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1808 if (!gfp_mask) { 1809 WARN_ON_ONCE(1); 1810 return -ENOMEM; 1811 } 1812 if (skb_uarg(nskb) == skb_uarg(orig)) 1813 return 0; 1814 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1815 return -EIO; 1816 } 1817 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1818 } 1819 return 0; 1820 } 1821 1822 /** 1823 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1824 * @skb: the skb to modify 1825 * @gfp_mask: allocation priority 1826 * 1827 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1828 * It will copy all frags into kernel and drop the reference 1829 * to userspace pages. 1830 * 1831 * If this function is called from an interrupt gfp_mask() must be 1832 * %GFP_ATOMIC. 1833 * 1834 * Returns 0 on success or a negative error code on failure 1835 * to allocate kernel memory to copy to. 1836 */ 1837 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1838 { 1839 int num_frags = skb_shinfo(skb)->nr_frags; 1840 struct page *page, *head = NULL; 1841 int i, order, psize, new_frags; 1842 u32 d_off; 1843 1844 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1845 return -EINVAL; 1846 1847 if (!num_frags) 1848 goto release; 1849 1850 /* We might have to allocate high order pages, so compute what minimum 1851 * page order is needed. 1852 */ 1853 order = 0; 1854 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1855 order++; 1856 psize = (PAGE_SIZE << order); 1857 1858 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1859 for (i = 0; i < new_frags; i++) { 1860 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1861 if (!page) { 1862 while (head) { 1863 struct page *next = (struct page *)page_private(head); 1864 put_page(head); 1865 head = next; 1866 } 1867 return -ENOMEM; 1868 } 1869 set_page_private(page, (unsigned long)head); 1870 head = page; 1871 } 1872 1873 page = head; 1874 d_off = 0; 1875 for (i = 0; i < num_frags; i++) { 1876 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1877 u32 p_off, p_len, copied; 1878 struct page *p; 1879 u8 *vaddr; 1880 1881 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1882 p, p_off, p_len, copied) { 1883 u32 copy, done = 0; 1884 vaddr = kmap_atomic(p); 1885 1886 while (done < p_len) { 1887 if (d_off == psize) { 1888 d_off = 0; 1889 page = (struct page *)page_private(page); 1890 } 1891 copy = min_t(u32, psize - d_off, p_len - done); 1892 memcpy(page_address(page) + d_off, 1893 vaddr + p_off + done, copy); 1894 done += copy; 1895 d_off += copy; 1896 } 1897 kunmap_atomic(vaddr); 1898 } 1899 } 1900 1901 /* skb frags release userspace buffers */ 1902 for (i = 0; i < num_frags; i++) 1903 skb_frag_unref(skb, i); 1904 1905 /* skb frags point to kernel buffers */ 1906 for (i = 0; i < new_frags - 1; i++) { 1907 __skb_fill_page_desc(skb, i, head, 0, psize); 1908 head = (struct page *)page_private(head); 1909 } 1910 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 1911 skb_shinfo(skb)->nr_frags = new_frags; 1912 1913 release: 1914 skb_zcopy_clear(skb, false); 1915 return 0; 1916 } 1917 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1918 1919 /** 1920 * skb_clone - duplicate an sk_buff 1921 * @skb: buffer to clone 1922 * @gfp_mask: allocation priority 1923 * 1924 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1925 * copies share the same packet data but not structure. The new 1926 * buffer has a reference count of 1. If the allocation fails the 1927 * function returns %NULL otherwise the new buffer is returned. 1928 * 1929 * If this function is called from an interrupt gfp_mask() must be 1930 * %GFP_ATOMIC. 1931 */ 1932 1933 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1934 { 1935 struct sk_buff_fclones *fclones = container_of(skb, 1936 struct sk_buff_fclones, 1937 skb1); 1938 struct sk_buff *n; 1939 1940 if (skb_orphan_frags(skb, gfp_mask)) 1941 return NULL; 1942 1943 if (skb->fclone == SKB_FCLONE_ORIG && 1944 refcount_read(&fclones->fclone_ref) == 1) { 1945 n = &fclones->skb2; 1946 refcount_set(&fclones->fclone_ref, 2); 1947 n->fclone = SKB_FCLONE_CLONE; 1948 } else { 1949 if (skb_pfmemalloc(skb)) 1950 gfp_mask |= __GFP_MEMALLOC; 1951 1952 n = kmem_cache_alloc(skbuff_cache, gfp_mask); 1953 if (!n) 1954 return NULL; 1955 1956 n->fclone = SKB_FCLONE_UNAVAILABLE; 1957 } 1958 1959 return __skb_clone(n, skb); 1960 } 1961 EXPORT_SYMBOL(skb_clone); 1962 1963 void skb_headers_offset_update(struct sk_buff *skb, int off) 1964 { 1965 /* Only adjust this if it actually is csum_start rather than csum */ 1966 if (skb->ip_summed == CHECKSUM_PARTIAL) 1967 skb->csum_start += off; 1968 /* {transport,network,mac}_header and tail are relative to skb->head */ 1969 skb->transport_header += off; 1970 skb->network_header += off; 1971 if (skb_mac_header_was_set(skb)) 1972 skb->mac_header += off; 1973 skb->inner_transport_header += off; 1974 skb->inner_network_header += off; 1975 skb->inner_mac_header += off; 1976 } 1977 EXPORT_SYMBOL(skb_headers_offset_update); 1978 1979 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 1980 { 1981 __copy_skb_header(new, old); 1982 1983 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1984 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1985 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1986 } 1987 EXPORT_SYMBOL(skb_copy_header); 1988 1989 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1990 { 1991 if (skb_pfmemalloc(skb)) 1992 return SKB_ALLOC_RX; 1993 return 0; 1994 } 1995 1996 /** 1997 * skb_copy - create private copy of an sk_buff 1998 * @skb: buffer to copy 1999 * @gfp_mask: allocation priority 2000 * 2001 * Make a copy of both an &sk_buff and its data. This is used when the 2002 * caller wishes to modify the data and needs a private copy of the 2003 * data to alter. Returns %NULL on failure or the pointer to the buffer 2004 * on success. The returned buffer has a reference count of 1. 2005 * 2006 * As by-product this function converts non-linear &sk_buff to linear 2007 * one, so that &sk_buff becomes completely private and caller is allowed 2008 * to modify all the data of returned buffer. This means that this 2009 * function is not recommended for use in circumstances when only 2010 * header is going to be modified. Use pskb_copy() instead. 2011 */ 2012 2013 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2014 { 2015 int headerlen = skb_headroom(skb); 2016 unsigned int size = skb_end_offset(skb) + skb->data_len; 2017 struct sk_buff *n = __alloc_skb(size, gfp_mask, 2018 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2019 2020 if (!n) 2021 return NULL; 2022 2023 /* Set the data pointer */ 2024 skb_reserve(n, headerlen); 2025 /* Set the tail pointer and length */ 2026 skb_put(n, skb->len); 2027 2028 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2029 2030 skb_copy_header(n, skb); 2031 return n; 2032 } 2033 EXPORT_SYMBOL(skb_copy); 2034 2035 /** 2036 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2037 * @skb: buffer to copy 2038 * @headroom: headroom of new skb 2039 * @gfp_mask: allocation priority 2040 * @fclone: if true allocate the copy of the skb from the fclone 2041 * cache instead of the head cache; it is recommended to set this 2042 * to true for the cases where the copy will likely be cloned 2043 * 2044 * Make a copy of both an &sk_buff and part of its data, located 2045 * in header. Fragmented data remain shared. This is used when 2046 * the caller wishes to modify only header of &sk_buff and needs 2047 * private copy of the header to alter. Returns %NULL on failure 2048 * or the pointer to the buffer on success. 2049 * The returned buffer has a reference count of 1. 2050 */ 2051 2052 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2053 gfp_t gfp_mask, bool fclone) 2054 { 2055 unsigned int size = skb_headlen(skb) + headroom; 2056 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2057 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2058 2059 if (!n) 2060 goto out; 2061 2062 /* Set the data pointer */ 2063 skb_reserve(n, headroom); 2064 /* Set the tail pointer and length */ 2065 skb_put(n, skb_headlen(skb)); 2066 /* Copy the bytes */ 2067 skb_copy_from_linear_data(skb, n->data, n->len); 2068 2069 n->truesize += skb->data_len; 2070 n->data_len = skb->data_len; 2071 n->len = skb->len; 2072 2073 if (skb_shinfo(skb)->nr_frags) { 2074 int i; 2075 2076 if (skb_orphan_frags(skb, gfp_mask) || 2077 skb_zerocopy_clone(n, skb, gfp_mask)) { 2078 kfree_skb(n); 2079 n = NULL; 2080 goto out; 2081 } 2082 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2083 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2084 skb_frag_ref(skb, i); 2085 } 2086 skb_shinfo(n)->nr_frags = i; 2087 } 2088 2089 if (skb_has_frag_list(skb)) { 2090 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2091 skb_clone_fraglist(n); 2092 } 2093 2094 skb_copy_header(n, skb); 2095 out: 2096 return n; 2097 } 2098 EXPORT_SYMBOL(__pskb_copy_fclone); 2099 2100 /** 2101 * pskb_expand_head - reallocate header of &sk_buff 2102 * @skb: buffer to reallocate 2103 * @nhead: room to add at head 2104 * @ntail: room to add at tail 2105 * @gfp_mask: allocation priority 2106 * 2107 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2108 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2109 * reference count of 1. Returns zero in the case of success or error, 2110 * if expansion failed. In the last case, &sk_buff is not changed. 2111 * 2112 * All the pointers pointing into skb header may change and must be 2113 * reloaded after call to this function. 2114 */ 2115 2116 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2117 gfp_t gfp_mask) 2118 { 2119 unsigned int osize = skb_end_offset(skb); 2120 unsigned int size = osize + nhead + ntail; 2121 long off; 2122 u8 *data; 2123 int i; 2124 2125 BUG_ON(nhead < 0); 2126 2127 BUG_ON(skb_shared(skb)); 2128 2129 skb_zcopy_downgrade_managed(skb); 2130 2131 if (skb_pfmemalloc(skb)) 2132 gfp_mask |= __GFP_MEMALLOC; 2133 2134 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2135 if (!data) 2136 goto nodata; 2137 size = SKB_WITH_OVERHEAD(size); 2138 2139 /* Copy only real data... and, alas, header. This should be 2140 * optimized for the cases when header is void. 2141 */ 2142 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2143 2144 memcpy((struct skb_shared_info *)(data + size), 2145 skb_shinfo(skb), 2146 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2147 2148 /* 2149 * if shinfo is shared we must drop the old head gracefully, but if it 2150 * is not we can just drop the old head and let the existing refcount 2151 * be since all we did is relocate the values 2152 */ 2153 if (skb_cloned(skb)) { 2154 if (skb_orphan_frags(skb, gfp_mask)) 2155 goto nofrags; 2156 if (skb_zcopy(skb)) 2157 refcount_inc(&skb_uarg(skb)->refcnt); 2158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2159 skb_frag_ref(skb, i); 2160 2161 if (skb_has_frag_list(skb)) 2162 skb_clone_fraglist(skb); 2163 2164 skb_release_data(skb, SKB_CONSUMED, false); 2165 } else { 2166 skb_free_head(skb, false); 2167 } 2168 off = (data + nhead) - skb->head; 2169 2170 skb->head = data; 2171 skb->head_frag = 0; 2172 skb->data += off; 2173 2174 skb_set_end_offset(skb, size); 2175 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2176 off = nhead; 2177 #endif 2178 skb->tail += off; 2179 skb_headers_offset_update(skb, nhead); 2180 skb->cloned = 0; 2181 skb->hdr_len = 0; 2182 skb->nohdr = 0; 2183 atomic_set(&skb_shinfo(skb)->dataref, 1); 2184 2185 skb_metadata_clear(skb); 2186 2187 /* It is not generally safe to change skb->truesize. 2188 * For the moment, we really care of rx path, or 2189 * when skb is orphaned (not attached to a socket). 2190 */ 2191 if (!skb->sk || skb->destructor == sock_edemux) 2192 skb->truesize += size - osize; 2193 2194 return 0; 2195 2196 nofrags: 2197 skb_kfree_head(data, size); 2198 nodata: 2199 return -ENOMEM; 2200 } 2201 EXPORT_SYMBOL(pskb_expand_head); 2202 2203 /* Make private copy of skb with writable head and some headroom */ 2204 2205 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2206 { 2207 struct sk_buff *skb2; 2208 int delta = headroom - skb_headroom(skb); 2209 2210 if (delta <= 0) 2211 skb2 = pskb_copy(skb, GFP_ATOMIC); 2212 else { 2213 skb2 = skb_clone(skb, GFP_ATOMIC); 2214 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2215 GFP_ATOMIC)) { 2216 kfree_skb(skb2); 2217 skb2 = NULL; 2218 } 2219 } 2220 return skb2; 2221 } 2222 EXPORT_SYMBOL(skb_realloc_headroom); 2223 2224 /* Note: We plan to rework this in linux-6.4 */ 2225 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2226 { 2227 unsigned int saved_end_offset, saved_truesize; 2228 struct skb_shared_info *shinfo; 2229 int res; 2230 2231 saved_end_offset = skb_end_offset(skb); 2232 saved_truesize = skb->truesize; 2233 2234 res = pskb_expand_head(skb, 0, 0, pri); 2235 if (res) 2236 return res; 2237 2238 skb->truesize = saved_truesize; 2239 2240 if (likely(skb_end_offset(skb) == saved_end_offset)) 2241 return 0; 2242 2243 /* We can not change skb->end if the original or new value 2244 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2245 */ 2246 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2247 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2248 /* We think this path should not be taken. 2249 * Add a temporary trace to warn us just in case. 2250 */ 2251 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2252 saved_end_offset, skb_end_offset(skb)); 2253 WARN_ON_ONCE(1); 2254 return 0; 2255 } 2256 2257 shinfo = skb_shinfo(skb); 2258 2259 /* We are about to change back skb->end, 2260 * we need to move skb_shinfo() to its new location. 2261 */ 2262 memmove(skb->head + saved_end_offset, 2263 shinfo, 2264 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2265 2266 skb_set_end_offset(skb, saved_end_offset); 2267 2268 return 0; 2269 } 2270 2271 /** 2272 * skb_expand_head - reallocate header of &sk_buff 2273 * @skb: buffer to reallocate 2274 * @headroom: needed headroom 2275 * 2276 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2277 * if possible; copies skb->sk to new skb as needed 2278 * and frees original skb in case of failures. 2279 * 2280 * It expect increased headroom and generates warning otherwise. 2281 */ 2282 2283 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2284 { 2285 int delta = headroom - skb_headroom(skb); 2286 int osize = skb_end_offset(skb); 2287 struct sock *sk = skb->sk; 2288 2289 if (WARN_ONCE(delta <= 0, 2290 "%s is expecting an increase in the headroom", __func__)) 2291 return skb; 2292 2293 delta = SKB_DATA_ALIGN(delta); 2294 /* pskb_expand_head() might crash, if skb is shared. */ 2295 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2296 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2297 2298 if (unlikely(!nskb)) 2299 goto fail; 2300 2301 if (sk) 2302 skb_set_owner_w(nskb, sk); 2303 consume_skb(skb); 2304 skb = nskb; 2305 } 2306 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2307 goto fail; 2308 2309 if (sk && is_skb_wmem(skb)) { 2310 delta = skb_end_offset(skb) - osize; 2311 refcount_add(delta, &sk->sk_wmem_alloc); 2312 skb->truesize += delta; 2313 } 2314 return skb; 2315 2316 fail: 2317 kfree_skb(skb); 2318 return NULL; 2319 } 2320 EXPORT_SYMBOL(skb_expand_head); 2321 2322 /** 2323 * skb_copy_expand - copy and expand sk_buff 2324 * @skb: buffer to copy 2325 * @newheadroom: new free bytes at head 2326 * @newtailroom: new free bytes at tail 2327 * @gfp_mask: allocation priority 2328 * 2329 * Make a copy of both an &sk_buff and its data and while doing so 2330 * allocate additional space. 2331 * 2332 * This is used when the caller wishes to modify the data and needs a 2333 * private copy of the data to alter as well as more space for new fields. 2334 * Returns %NULL on failure or the pointer to the buffer 2335 * on success. The returned buffer has a reference count of 1. 2336 * 2337 * You must pass %GFP_ATOMIC as the allocation priority if this function 2338 * is called from an interrupt. 2339 */ 2340 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2341 int newheadroom, int newtailroom, 2342 gfp_t gfp_mask) 2343 { 2344 /* 2345 * Allocate the copy buffer 2346 */ 2347 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 2348 gfp_mask, skb_alloc_rx_flag(skb), 2349 NUMA_NO_NODE); 2350 int oldheadroom = skb_headroom(skb); 2351 int head_copy_len, head_copy_off; 2352 2353 if (!n) 2354 return NULL; 2355 2356 skb_reserve(n, newheadroom); 2357 2358 /* Set the tail pointer and length */ 2359 skb_put(n, skb->len); 2360 2361 head_copy_len = oldheadroom; 2362 head_copy_off = 0; 2363 if (newheadroom <= head_copy_len) 2364 head_copy_len = newheadroom; 2365 else 2366 head_copy_off = newheadroom - head_copy_len; 2367 2368 /* Copy the linear header and data. */ 2369 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2370 skb->len + head_copy_len)); 2371 2372 skb_copy_header(n, skb); 2373 2374 skb_headers_offset_update(n, newheadroom - oldheadroom); 2375 2376 return n; 2377 } 2378 EXPORT_SYMBOL(skb_copy_expand); 2379 2380 /** 2381 * __skb_pad - zero pad the tail of an skb 2382 * @skb: buffer to pad 2383 * @pad: space to pad 2384 * @free_on_error: free buffer on error 2385 * 2386 * Ensure that a buffer is followed by a padding area that is zero 2387 * filled. Used by network drivers which may DMA or transfer data 2388 * beyond the buffer end onto the wire. 2389 * 2390 * May return error in out of memory cases. The skb is freed on error 2391 * if @free_on_error is true. 2392 */ 2393 2394 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2395 { 2396 int err; 2397 int ntail; 2398 2399 /* If the skbuff is non linear tailroom is always zero.. */ 2400 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2401 memset(skb->data+skb->len, 0, pad); 2402 return 0; 2403 } 2404 2405 ntail = skb->data_len + pad - (skb->end - skb->tail); 2406 if (likely(skb_cloned(skb) || ntail > 0)) { 2407 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2408 if (unlikely(err)) 2409 goto free_skb; 2410 } 2411 2412 /* FIXME: The use of this function with non-linear skb's really needs 2413 * to be audited. 2414 */ 2415 err = skb_linearize(skb); 2416 if (unlikely(err)) 2417 goto free_skb; 2418 2419 memset(skb->data + skb->len, 0, pad); 2420 return 0; 2421 2422 free_skb: 2423 if (free_on_error) 2424 kfree_skb(skb); 2425 return err; 2426 } 2427 EXPORT_SYMBOL(__skb_pad); 2428 2429 /** 2430 * pskb_put - add data to the tail of a potentially fragmented buffer 2431 * @skb: start of the buffer to use 2432 * @tail: tail fragment of the buffer to use 2433 * @len: amount of data to add 2434 * 2435 * This function extends the used data area of the potentially 2436 * fragmented buffer. @tail must be the last fragment of @skb -- or 2437 * @skb itself. If this would exceed the total buffer size the kernel 2438 * will panic. A pointer to the first byte of the extra data is 2439 * returned. 2440 */ 2441 2442 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2443 { 2444 if (tail != skb) { 2445 skb->data_len += len; 2446 skb->len += len; 2447 } 2448 return skb_put(tail, len); 2449 } 2450 EXPORT_SYMBOL_GPL(pskb_put); 2451 2452 /** 2453 * skb_put - add data to a buffer 2454 * @skb: buffer to use 2455 * @len: amount of data to add 2456 * 2457 * This function extends the used data area of the buffer. If this would 2458 * exceed the total buffer size the kernel will panic. A pointer to the 2459 * first byte of the extra data is returned. 2460 */ 2461 void *skb_put(struct sk_buff *skb, unsigned int len) 2462 { 2463 void *tmp = skb_tail_pointer(skb); 2464 SKB_LINEAR_ASSERT(skb); 2465 skb->tail += len; 2466 skb->len += len; 2467 if (unlikely(skb->tail > skb->end)) 2468 skb_over_panic(skb, len, __builtin_return_address(0)); 2469 return tmp; 2470 } 2471 EXPORT_SYMBOL(skb_put); 2472 2473 /** 2474 * skb_push - add data to the start of a buffer 2475 * @skb: buffer to use 2476 * @len: amount of data to add 2477 * 2478 * This function extends the used data area of the buffer at the buffer 2479 * start. If this would exceed the total buffer headroom the kernel will 2480 * panic. A pointer to the first byte of the extra data is returned. 2481 */ 2482 void *skb_push(struct sk_buff *skb, unsigned int len) 2483 { 2484 skb->data -= len; 2485 skb->len += len; 2486 if (unlikely(skb->data < skb->head)) 2487 skb_under_panic(skb, len, __builtin_return_address(0)); 2488 return skb->data; 2489 } 2490 EXPORT_SYMBOL(skb_push); 2491 2492 /** 2493 * skb_pull - remove data from the start of a buffer 2494 * @skb: buffer to use 2495 * @len: amount of data to remove 2496 * 2497 * This function removes data from the start of a buffer, returning 2498 * the memory to the headroom. A pointer to the next data in the buffer 2499 * is returned. Once the data has been pulled future pushes will overwrite 2500 * the old data. 2501 */ 2502 void *skb_pull(struct sk_buff *skb, unsigned int len) 2503 { 2504 return skb_pull_inline(skb, len); 2505 } 2506 EXPORT_SYMBOL(skb_pull); 2507 2508 /** 2509 * skb_pull_data - remove data from the start of a buffer returning its 2510 * original position. 2511 * @skb: buffer to use 2512 * @len: amount of data to remove 2513 * 2514 * This function removes data from the start of a buffer, returning 2515 * the memory to the headroom. A pointer to the original data in the buffer 2516 * is returned after checking if there is enough data to pull. Once the 2517 * data has been pulled future pushes will overwrite the old data. 2518 */ 2519 void *skb_pull_data(struct sk_buff *skb, size_t len) 2520 { 2521 void *data = skb->data; 2522 2523 if (skb->len < len) 2524 return NULL; 2525 2526 skb_pull(skb, len); 2527 2528 return data; 2529 } 2530 EXPORT_SYMBOL(skb_pull_data); 2531 2532 /** 2533 * skb_trim - remove end from a buffer 2534 * @skb: buffer to alter 2535 * @len: new length 2536 * 2537 * Cut the length of a buffer down by removing data from the tail. If 2538 * the buffer is already under the length specified it is not modified. 2539 * The skb must be linear. 2540 */ 2541 void skb_trim(struct sk_buff *skb, unsigned int len) 2542 { 2543 if (skb->len > len) 2544 __skb_trim(skb, len); 2545 } 2546 EXPORT_SYMBOL(skb_trim); 2547 2548 /* Trims skb to length len. It can change skb pointers. 2549 */ 2550 2551 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2552 { 2553 struct sk_buff **fragp; 2554 struct sk_buff *frag; 2555 int offset = skb_headlen(skb); 2556 int nfrags = skb_shinfo(skb)->nr_frags; 2557 int i; 2558 int err; 2559 2560 if (skb_cloned(skb) && 2561 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2562 return err; 2563 2564 i = 0; 2565 if (offset >= len) 2566 goto drop_pages; 2567 2568 for (; i < nfrags; i++) { 2569 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2570 2571 if (end < len) { 2572 offset = end; 2573 continue; 2574 } 2575 2576 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2577 2578 drop_pages: 2579 skb_shinfo(skb)->nr_frags = i; 2580 2581 for (; i < nfrags; i++) 2582 skb_frag_unref(skb, i); 2583 2584 if (skb_has_frag_list(skb)) 2585 skb_drop_fraglist(skb); 2586 goto done; 2587 } 2588 2589 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2590 fragp = &frag->next) { 2591 int end = offset + frag->len; 2592 2593 if (skb_shared(frag)) { 2594 struct sk_buff *nfrag; 2595 2596 nfrag = skb_clone(frag, GFP_ATOMIC); 2597 if (unlikely(!nfrag)) 2598 return -ENOMEM; 2599 2600 nfrag->next = frag->next; 2601 consume_skb(frag); 2602 frag = nfrag; 2603 *fragp = frag; 2604 } 2605 2606 if (end < len) { 2607 offset = end; 2608 continue; 2609 } 2610 2611 if (end > len && 2612 unlikely((err = pskb_trim(frag, len - offset)))) 2613 return err; 2614 2615 if (frag->next) 2616 skb_drop_list(&frag->next); 2617 break; 2618 } 2619 2620 done: 2621 if (len > skb_headlen(skb)) { 2622 skb->data_len -= skb->len - len; 2623 skb->len = len; 2624 } else { 2625 skb->len = len; 2626 skb->data_len = 0; 2627 skb_set_tail_pointer(skb, len); 2628 } 2629 2630 if (!skb->sk || skb->destructor == sock_edemux) 2631 skb_condense(skb); 2632 return 0; 2633 } 2634 EXPORT_SYMBOL(___pskb_trim); 2635 2636 /* Note : use pskb_trim_rcsum() instead of calling this directly 2637 */ 2638 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2639 { 2640 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2641 int delta = skb->len - len; 2642 2643 skb->csum = csum_block_sub(skb->csum, 2644 skb_checksum(skb, len, delta, 0), 2645 len); 2646 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2647 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2648 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2649 2650 if (offset + sizeof(__sum16) > hdlen) 2651 return -EINVAL; 2652 } 2653 return __pskb_trim(skb, len); 2654 } 2655 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2656 2657 /** 2658 * __pskb_pull_tail - advance tail of skb header 2659 * @skb: buffer to reallocate 2660 * @delta: number of bytes to advance tail 2661 * 2662 * The function makes a sense only on a fragmented &sk_buff, 2663 * it expands header moving its tail forward and copying necessary 2664 * data from fragmented part. 2665 * 2666 * &sk_buff MUST have reference count of 1. 2667 * 2668 * Returns %NULL (and &sk_buff does not change) if pull failed 2669 * or value of new tail of skb in the case of success. 2670 * 2671 * All the pointers pointing into skb header may change and must be 2672 * reloaded after call to this function. 2673 */ 2674 2675 /* Moves tail of skb head forward, copying data from fragmented part, 2676 * when it is necessary. 2677 * 1. It may fail due to malloc failure. 2678 * 2. It may change skb pointers. 2679 * 2680 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2681 */ 2682 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2683 { 2684 /* If skb has not enough free space at tail, get new one 2685 * plus 128 bytes for future expansions. If we have enough 2686 * room at tail, reallocate without expansion only if skb is cloned. 2687 */ 2688 int i, k, eat = (skb->tail + delta) - skb->end; 2689 2690 if (eat > 0 || skb_cloned(skb)) { 2691 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2692 GFP_ATOMIC)) 2693 return NULL; 2694 } 2695 2696 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2697 skb_tail_pointer(skb), delta)); 2698 2699 /* Optimization: no fragments, no reasons to preestimate 2700 * size of pulled pages. Superb. 2701 */ 2702 if (!skb_has_frag_list(skb)) 2703 goto pull_pages; 2704 2705 /* Estimate size of pulled pages. */ 2706 eat = delta; 2707 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2708 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2709 2710 if (size >= eat) 2711 goto pull_pages; 2712 eat -= size; 2713 } 2714 2715 /* If we need update frag list, we are in troubles. 2716 * Certainly, it is possible to add an offset to skb data, 2717 * but taking into account that pulling is expected to 2718 * be very rare operation, it is worth to fight against 2719 * further bloating skb head and crucify ourselves here instead. 2720 * Pure masohism, indeed. 8)8) 2721 */ 2722 if (eat) { 2723 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2724 struct sk_buff *clone = NULL; 2725 struct sk_buff *insp = NULL; 2726 2727 do { 2728 if (list->len <= eat) { 2729 /* Eaten as whole. */ 2730 eat -= list->len; 2731 list = list->next; 2732 insp = list; 2733 } else { 2734 /* Eaten partially. */ 2735 if (skb_is_gso(skb) && !list->head_frag && 2736 skb_headlen(list)) 2737 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2738 2739 if (skb_shared(list)) { 2740 /* Sucks! We need to fork list. :-( */ 2741 clone = skb_clone(list, GFP_ATOMIC); 2742 if (!clone) 2743 return NULL; 2744 insp = list->next; 2745 list = clone; 2746 } else { 2747 /* This may be pulled without 2748 * problems. */ 2749 insp = list; 2750 } 2751 if (!pskb_pull(list, eat)) { 2752 kfree_skb(clone); 2753 return NULL; 2754 } 2755 break; 2756 } 2757 } while (eat); 2758 2759 /* Free pulled out fragments. */ 2760 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2761 skb_shinfo(skb)->frag_list = list->next; 2762 consume_skb(list); 2763 } 2764 /* And insert new clone at head. */ 2765 if (clone) { 2766 clone->next = list; 2767 skb_shinfo(skb)->frag_list = clone; 2768 } 2769 } 2770 /* Success! Now we may commit changes to skb data. */ 2771 2772 pull_pages: 2773 eat = delta; 2774 k = 0; 2775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2776 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2777 2778 if (size <= eat) { 2779 skb_frag_unref(skb, i); 2780 eat -= size; 2781 } else { 2782 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2783 2784 *frag = skb_shinfo(skb)->frags[i]; 2785 if (eat) { 2786 skb_frag_off_add(frag, eat); 2787 skb_frag_size_sub(frag, eat); 2788 if (!i) 2789 goto end; 2790 eat = 0; 2791 } 2792 k++; 2793 } 2794 } 2795 skb_shinfo(skb)->nr_frags = k; 2796 2797 end: 2798 skb->tail += delta; 2799 skb->data_len -= delta; 2800 2801 if (!skb->data_len) 2802 skb_zcopy_clear(skb, false); 2803 2804 return skb_tail_pointer(skb); 2805 } 2806 EXPORT_SYMBOL(__pskb_pull_tail); 2807 2808 /** 2809 * skb_copy_bits - copy bits from skb to kernel buffer 2810 * @skb: source skb 2811 * @offset: offset in source 2812 * @to: destination buffer 2813 * @len: number of bytes to copy 2814 * 2815 * Copy the specified number of bytes from the source skb to the 2816 * destination buffer. 2817 * 2818 * CAUTION ! : 2819 * If its prototype is ever changed, 2820 * check arch/{*}/net/{*}.S files, 2821 * since it is called from BPF assembly code. 2822 */ 2823 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2824 { 2825 int start = skb_headlen(skb); 2826 struct sk_buff *frag_iter; 2827 int i, copy; 2828 2829 if (offset > (int)skb->len - len) 2830 goto fault; 2831 2832 /* Copy header. */ 2833 if ((copy = start - offset) > 0) { 2834 if (copy > len) 2835 copy = len; 2836 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2837 if ((len -= copy) == 0) 2838 return 0; 2839 offset += copy; 2840 to += copy; 2841 } 2842 2843 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2844 int end; 2845 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2846 2847 WARN_ON(start > offset + len); 2848 2849 end = start + skb_frag_size(f); 2850 if ((copy = end - offset) > 0) { 2851 u32 p_off, p_len, copied; 2852 struct page *p; 2853 u8 *vaddr; 2854 2855 if (copy > len) 2856 copy = len; 2857 2858 skb_frag_foreach_page(f, 2859 skb_frag_off(f) + offset - start, 2860 copy, p, p_off, p_len, copied) { 2861 vaddr = kmap_atomic(p); 2862 memcpy(to + copied, vaddr + p_off, p_len); 2863 kunmap_atomic(vaddr); 2864 } 2865 2866 if ((len -= copy) == 0) 2867 return 0; 2868 offset += copy; 2869 to += copy; 2870 } 2871 start = end; 2872 } 2873 2874 skb_walk_frags(skb, frag_iter) { 2875 int end; 2876 2877 WARN_ON(start > offset + len); 2878 2879 end = start + frag_iter->len; 2880 if ((copy = end - offset) > 0) { 2881 if (copy > len) 2882 copy = len; 2883 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2884 goto fault; 2885 if ((len -= copy) == 0) 2886 return 0; 2887 offset += copy; 2888 to += copy; 2889 } 2890 start = end; 2891 } 2892 2893 if (!len) 2894 return 0; 2895 2896 fault: 2897 return -EFAULT; 2898 } 2899 EXPORT_SYMBOL(skb_copy_bits); 2900 2901 /* 2902 * Callback from splice_to_pipe(), if we need to release some pages 2903 * at the end of the spd in case we error'ed out in filling the pipe. 2904 */ 2905 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 2906 { 2907 put_page(spd->pages[i]); 2908 } 2909 2910 static struct page *linear_to_page(struct page *page, unsigned int *len, 2911 unsigned int *offset, 2912 struct sock *sk) 2913 { 2914 struct page_frag *pfrag = sk_page_frag(sk); 2915 2916 if (!sk_page_frag_refill(sk, pfrag)) 2917 return NULL; 2918 2919 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 2920 2921 memcpy(page_address(pfrag->page) + pfrag->offset, 2922 page_address(page) + *offset, *len); 2923 *offset = pfrag->offset; 2924 pfrag->offset += *len; 2925 2926 return pfrag->page; 2927 } 2928 2929 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 2930 struct page *page, 2931 unsigned int offset) 2932 { 2933 return spd->nr_pages && 2934 spd->pages[spd->nr_pages - 1] == page && 2935 (spd->partial[spd->nr_pages - 1].offset + 2936 spd->partial[spd->nr_pages - 1].len == offset); 2937 } 2938 2939 /* 2940 * Fill page/offset/length into spd, if it can hold more pages. 2941 */ 2942 static bool spd_fill_page(struct splice_pipe_desc *spd, 2943 struct pipe_inode_info *pipe, struct page *page, 2944 unsigned int *len, unsigned int offset, 2945 bool linear, 2946 struct sock *sk) 2947 { 2948 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 2949 return true; 2950 2951 if (linear) { 2952 page = linear_to_page(page, len, &offset, sk); 2953 if (!page) 2954 return true; 2955 } 2956 if (spd_can_coalesce(spd, page, offset)) { 2957 spd->partial[spd->nr_pages - 1].len += *len; 2958 return false; 2959 } 2960 get_page(page); 2961 spd->pages[spd->nr_pages] = page; 2962 spd->partial[spd->nr_pages].len = *len; 2963 spd->partial[spd->nr_pages].offset = offset; 2964 spd->nr_pages++; 2965 2966 return false; 2967 } 2968 2969 static bool __splice_segment(struct page *page, unsigned int poff, 2970 unsigned int plen, unsigned int *off, 2971 unsigned int *len, 2972 struct splice_pipe_desc *spd, bool linear, 2973 struct sock *sk, 2974 struct pipe_inode_info *pipe) 2975 { 2976 if (!*len) 2977 return true; 2978 2979 /* skip this segment if already processed */ 2980 if (*off >= plen) { 2981 *off -= plen; 2982 return false; 2983 } 2984 2985 /* ignore any bits we already processed */ 2986 poff += *off; 2987 plen -= *off; 2988 *off = 0; 2989 2990 do { 2991 unsigned int flen = min(*len, plen); 2992 2993 if (spd_fill_page(spd, pipe, page, &flen, poff, 2994 linear, sk)) 2995 return true; 2996 poff += flen; 2997 plen -= flen; 2998 *len -= flen; 2999 } while (*len && plen); 3000 3001 return false; 3002 } 3003 3004 /* 3005 * Map linear and fragment data from the skb to spd. It reports true if the 3006 * pipe is full or if we already spliced the requested length. 3007 */ 3008 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3009 unsigned int *offset, unsigned int *len, 3010 struct splice_pipe_desc *spd, struct sock *sk) 3011 { 3012 int seg; 3013 struct sk_buff *iter; 3014 3015 /* map the linear part : 3016 * If skb->head_frag is set, this 'linear' part is backed by a 3017 * fragment, and if the head is not shared with any clones then 3018 * we can avoid a copy since we own the head portion of this page. 3019 */ 3020 if (__splice_segment(virt_to_page(skb->data), 3021 (unsigned long) skb->data & (PAGE_SIZE - 1), 3022 skb_headlen(skb), 3023 offset, len, spd, 3024 skb_head_is_locked(skb), 3025 sk, pipe)) 3026 return true; 3027 3028 /* 3029 * then map the fragments 3030 */ 3031 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3032 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3033 3034 if (__splice_segment(skb_frag_page(f), 3035 skb_frag_off(f), skb_frag_size(f), 3036 offset, len, spd, false, sk, pipe)) 3037 return true; 3038 } 3039 3040 skb_walk_frags(skb, iter) { 3041 if (*offset >= iter->len) { 3042 *offset -= iter->len; 3043 continue; 3044 } 3045 /* __skb_splice_bits() only fails if the output has no room 3046 * left, so no point in going over the frag_list for the error 3047 * case. 3048 */ 3049 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3050 return true; 3051 } 3052 3053 return false; 3054 } 3055 3056 /* 3057 * Map data from the skb to a pipe. Should handle both the linear part, 3058 * the fragments, and the frag list. 3059 */ 3060 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3061 struct pipe_inode_info *pipe, unsigned int tlen, 3062 unsigned int flags) 3063 { 3064 struct partial_page partial[MAX_SKB_FRAGS]; 3065 struct page *pages[MAX_SKB_FRAGS]; 3066 struct splice_pipe_desc spd = { 3067 .pages = pages, 3068 .partial = partial, 3069 .nr_pages_max = MAX_SKB_FRAGS, 3070 .ops = &nosteal_pipe_buf_ops, 3071 .spd_release = sock_spd_release, 3072 }; 3073 int ret = 0; 3074 3075 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3076 3077 if (spd.nr_pages) 3078 ret = splice_to_pipe(pipe, &spd); 3079 3080 return ret; 3081 } 3082 EXPORT_SYMBOL_GPL(skb_splice_bits); 3083 3084 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3085 { 3086 struct socket *sock = sk->sk_socket; 3087 size_t size = msg_data_left(msg); 3088 3089 if (!sock) 3090 return -EINVAL; 3091 3092 if (!sock->ops->sendmsg_locked) 3093 return sock_no_sendmsg_locked(sk, msg, size); 3094 3095 return sock->ops->sendmsg_locked(sk, msg, size); 3096 } 3097 3098 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3099 { 3100 struct socket *sock = sk->sk_socket; 3101 3102 if (!sock) 3103 return -EINVAL; 3104 return sock_sendmsg(sock, msg); 3105 } 3106 3107 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3108 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3109 int len, sendmsg_func sendmsg) 3110 { 3111 unsigned int orig_len = len; 3112 struct sk_buff *head = skb; 3113 unsigned short fragidx; 3114 int slen, ret; 3115 3116 do_frag_list: 3117 3118 /* Deal with head data */ 3119 while (offset < skb_headlen(skb) && len) { 3120 struct kvec kv; 3121 struct msghdr msg; 3122 3123 slen = min_t(int, len, skb_headlen(skb) - offset); 3124 kv.iov_base = skb->data + offset; 3125 kv.iov_len = slen; 3126 memset(&msg, 0, sizeof(msg)); 3127 msg.msg_flags = MSG_DONTWAIT; 3128 3129 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3130 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3131 sendmsg_unlocked, sk, &msg); 3132 if (ret <= 0) 3133 goto error; 3134 3135 offset += ret; 3136 len -= ret; 3137 } 3138 3139 /* All the data was skb head? */ 3140 if (!len) 3141 goto out; 3142 3143 /* Make offset relative to start of frags */ 3144 offset -= skb_headlen(skb); 3145 3146 /* Find where we are in frag list */ 3147 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3148 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3149 3150 if (offset < skb_frag_size(frag)) 3151 break; 3152 3153 offset -= skb_frag_size(frag); 3154 } 3155 3156 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3157 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3158 3159 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3160 3161 while (slen) { 3162 struct bio_vec bvec; 3163 struct msghdr msg = { 3164 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3165 }; 3166 3167 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3168 skb_frag_off(frag) + offset); 3169 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3170 slen); 3171 3172 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3173 sendmsg_unlocked, sk, &msg); 3174 if (ret <= 0) 3175 goto error; 3176 3177 len -= ret; 3178 offset += ret; 3179 slen -= ret; 3180 } 3181 3182 offset = 0; 3183 } 3184 3185 if (len) { 3186 /* Process any frag lists */ 3187 3188 if (skb == head) { 3189 if (skb_has_frag_list(skb)) { 3190 skb = skb_shinfo(skb)->frag_list; 3191 goto do_frag_list; 3192 } 3193 } else if (skb->next) { 3194 skb = skb->next; 3195 goto do_frag_list; 3196 } 3197 } 3198 3199 out: 3200 return orig_len - len; 3201 3202 error: 3203 return orig_len == len ? ret : orig_len - len; 3204 } 3205 3206 /* Send skb data on a socket. Socket must be locked. */ 3207 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3208 int len) 3209 { 3210 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3211 } 3212 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3213 3214 /* Send skb data on a socket. Socket must be unlocked. */ 3215 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3216 { 3217 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3218 } 3219 3220 /** 3221 * skb_store_bits - store bits from kernel buffer to skb 3222 * @skb: destination buffer 3223 * @offset: offset in destination 3224 * @from: source buffer 3225 * @len: number of bytes to copy 3226 * 3227 * Copy the specified number of bytes from the source buffer to the 3228 * destination skb. This function handles all the messy bits of 3229 * traversing fragment lists and such. 3230 */ 3231 3232 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3233 { 3234 int start = skb_headlen(skb); 3235 struct sk_buff *frag_iter; 3236 int i, copy; 3237 3238 if (offset > (int)skb->len - len) 3239 goto fault; 3240 3241 if ((copy = start - offset) > 0) { 3242 if (copy > len) 3243 copy = len; 3244 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3245 if ((len -= copy) == 0) 3246 return 0; 3247 offset += copy; 3248 from += copy; 3249 } 3250 3251 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3252 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3253 int end; 3254 3255 WARN_ON(start > offset + len); 3256 3257 end = start + skb_frag_size(frag); 3258 if ((copy = end - offset) > 0) { 3259 u32 p_off, p_len, copied; 3260 struct page *p; 3261 u8 *vaddr; 3262 3263 if (copy > len) 3264 copy = len; 3265 3266 skb_frag_foreach_page(frag, 3267 skb_frag_off(frag) + offset - start, 3268 copy, p, p_off, p_len, copied) { 3269 vaddr = kmap_atomic(p); 3270 memcpy(vaddr + p_off, from + copied, p_len); 3271 kunmap_atomic(vaddr); 3272 } 3273 3274 if ((len -= copy) == 0) 3275 return 0; 3276 offset += copy; 3277 from += copy; 3278 } 3279 start = end; 3280 } 3281 3282 skb_walk_frags(skb, frag_iter) { 3283 int end; 3284 3285 WARN_ON(start > offset + len); 3286 3287 end = start + frag_iter->len; 3288 if ((copy = end - offset) > 0) { 3289 if (copy > len) 3290 copy = len; 3291 if (skb_store_bits(frag_iter, offset - start, 3292 from, copy)) 3293 goto fault; 3294 if ((len -= copy) == 0) 3295 return 0; 3296 offset += copy; 3297 from += copy; 3298 } 3299 start = end; 3300 } 3301 if (!len) 3302 return 0; 3303 3304 fault: 3305 return -EFAULT; 3306 } 3307 EXPORT_SYMBOL(skb_store_bits); 3308 3309 /* Checksum skb data. */ 3310 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3311 __wsum csum, const struct skb_checksum_ops *ops) 3312 { 3313 int start = skb_headlen(skb); 3314 int i, copy = start - offset; 3315 struct sk_buff *frag_iter; 3316 int pos = 0; 3317 3318 /* Checksum header. */ 3319 if (copy > 0) { 3320 if (copy > len) 3321 copy = len; 3322 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3323 skb->data + offset, copy, csum); 3324 if ((len -= copy) == 0) 3325 return csum; 3326 offset += copy; 3327 pos = copy; 3328 } 3329 3330 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3331 int end; 3332 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3333 3334 WARN_ON(start > offset + len); 3335 3336 end = start + skb_frag_size(frag); 3337 if ((copy = end - offset) > 0) { 3338 u32 p_off, p_len, copied; 3339 struct page *p; 3340 __wsum csum2; 3341 u8 *vaddr; 3342 3343 if (copy > len) 3344 copy = len; 3345 3346 skb_frag_foreach_page(frag, 3347 skb_frag_off(frag) + offset - start, 3348 copy, p, p_off, p_len, copied) { 3349 vaddr = kmap_atomic(p); 3350 csum2 = INDIRECT_CALL_1(ops->update, 3351 csum_partial_ext, 3352 vaddr + p_off, p_len, 0); 3353 kunmap_atomic(vaddr); 3354 csum = INDIRECT_CALL_1(ops->combine, 3355 csum_block_add_ext, csum, 3356 csum2, pos, p_len); 3357 pos += p_len; 3358 } 3359 3360 if (!(len -= copy)) 3361 return csum; 3362 offset += copy; 3363 } 3364 start = end; 3365 } 3366 3367 skb_walk_frags(skb, frag_iter) { 3368 int end; 3369 3370 WARN_ON(start > offset + len); 3371 3372 end = start + frag_iter->len; 3373 if ((copy = end - offset) > 0) { 3374 __wsum csum2; 3375 if (copy > len) 3376 copy = len; 3377 csum2 = __skb_checksum(frag_iter, offset - start, 3378 copy, 0, ops); 3379 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3380 csum, csum2, pos, copy); 3381 if ((len -= copy) == 0) 3382 return csum; 3383 offset += copy; 3384 pos += copy; 3385 } 3386 start = end; 3387 } 3388 BUG_ON(len); 3389 3390 return csum; 3391 } 3392 EXPORT_SYMBOL(__skb_checksum); 3393 3394 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3395 int len, __wsum csum) 3396 { 3397 const struct skb_checksum_ops ops = { 3398 .update = csum_partial_ext, 3399 .combine = csum_block_add_ext, 3400 }; 3401 3402 return __skb_checksum(skb, offset, len, csum, &ops); 3403 } 3404 EXPORT_SYMBOL(skb_checksum); 3405 3406 /* Both of above in one bottle. */ 3407 3408 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3409 u8 *to, int len) 3410 { 3411 int start = skb_headlen(skb); 3412 int i, copy = start - offset; 3413 struct sk_buff *frag_iter; 3414 int pos = 0; 3415 __wsum csum = 0; 3416 3417 /* Copy header. */ 3418 if (copy > 0) { 3419 if (copy > len) 3420 copy = len; 3421 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3422 copy); 3423 if ((len -= copy) == 0) 3424 return csum; 3425 offset += copy; 3426 to += copy; 3427 pos = copy; 3428 } 3429 3430 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3431 int end; 3432 3433 WARN_ON(start > offset + len); 3434 3435 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3436 if ((copy = end - offset) > 0) { 3437 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3438 u32 p_off, p_len, copied; 3439 struct page *p; 3440 __wsum csum2; 3441 u8 *vaddr; 3442 3443 if (copy > len) 3444 copy = len; 3445 3446 skb_frag_foreach_page(frag, 3447 skb_frag_off(frag) + offset - start, 3448 copy, p, p_off, p_len, copied) { 3449 vaddr = kmap_atomic(p); 3450 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3451 to + copied, 3452 p_len); 3453 kunmap_atomic(vaddr); 3454 csum = csum_block_add(csum, csum2, pos); 3455 pos += p_len; 3456 } 3457 3458 if (!(len -= copy)) 3459 return csum; 3460 offset += copy; 3461 to += copy; 3462 } 3463 start = end; 3464 } 3465 3466 skb_walk_frags(skb, frag_iter) { 3467 __wsum csum2; 3468 int end; 3469 3470 WARN_ON(start > offset + len); 3471 3472 end = start + frag_iter->len; 3473 if ((copy = end - offset) > 0) { 3474 if (copy > len) 3475 copy = len; 3476 csum2 = skb_copy_and_csum_bits(frag_iter, 3477 offset - start, 3478 to, copy); 3479 csum = csum_block_add(csum, csum2, pos); 3480 if ((len -= copy) == 0) 3481 return csum; 3482 offset += copy; 3483 to += copy; 3484 pos += copy; 3485 } 3486 start = end; 3487 } 3488 BUG_ON(len); 3489 return csum; 3490 } 3491 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3492 3493 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3494 { 3495 __sum16 sum; 3496 3497 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3498 /* See comments in __skb_checksum_complete(). */ 3499 if (likely(!sum)) { 3500 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3501 !skb->csum_complete_sw) 3502 netdev_rx_csum_fault(skb->dev, skb); 3503 } 3504 if (!skb_shared(skb)) 3505 skb->csum_valid = !sum; 3506 return sum; 3507 } 3508 EXPORT_SYMBOL(__skb_checksum_complete_head); 3509 3510 /* This function assumes skb->csum already holds pseudo header's checksum, 3511 * which has been changed from the hardware checksum, for example, by 3512 * __skb_checksum_validate_complete(). And, the original skb->csum must 3513 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3514 * 3515 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3516 * zero. The new checksum is stored back into skb->csum unless the skb is 3517 * shared. 3518 */ 3519 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3520 { 3521 __wsum csum; 3522 __sum16 sum; 3523 3524 csum = skb_checksum(skb, 0, skb->len, 0); 3525 3526 sum = csum_fold(csum_add(skb->csum, csum)); 3527 /* This check is inverted, because we already knew the hardware 3528 * checksum is invalid before calling this function. So, if the 3529 * re-computed checksum is valid instead, then we have a mismatch 3530 * between the original skb->csum and skb_checksum(). This means either 3531 * the original hardware checksum is incorrect or we screw up skb->csum 3532 * when moving skb->data around. 3533 */ 3534 if (likely(!sum)) { 3535 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3536 !skb->csum_complete_sw) 3537 netdev_rx_csum_fault(skb->dev, skb); 3538 } 3539 3540 if (!skb_shared(skb)) { 3541 /* Save full packet checksum */ 3542 skb->csum = csum; 3543 skb->ip_summed = CHECKSUM_COMPLETE; 3544 skb->csum_complete_sw = 1; 3545 skb->csum_valid = !sum; 3546 } 3547 3548 return sum; 3549 } 3550 EXPORT_SYMBOL(__skb_checksum_complete); 3551 3552 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3553 { 3554 net_warn_ratelimited( 3555 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3556 __func__); 3557 return 0; 3558 } 3559 3560 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3561 int offset, int len) 3562 { 3563 net_warn_ratelimited( 3564 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3565 __func__); 3566 return 0; 3567 } 3568 3569 static const struct skb_checksum_ops default_crc32c_ops = { 3570 .update = warn_crc32c_csum_update, 3571 .combine = warn_crc32c_csum_combine, 3572 }; 3573 3574 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3575 &default_crc32c_ops; 3576 EXPORT_SYMBOL(crc32c_csum_stub); 3577 3578 /** 3579 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3580 * @from: source buffer 3581 * 3582 * Calculates the amount of linear headroom needed in the 'to' skb passed 3583 * into skb_zerocopy(). 3584 */ 3585 unsigned int 3586 skb_zerocopy_headlen(const struct sk_buff *from) 3587 { 3588 unsigned int hlen = 0; 3589 3590 if (!from->head_frag || 3591 skb_headlen(from) < L1_CACHE_BYTES || 3592 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3593 hlen = skb_headlen(from); 3594 if (!hlen) 3595 hlen = from->len; 3596 } 3597 3598 if (skb_has_frag_list(from)) 3599 hlen = from->len; 3600 3601 return hlen; 3602 } 3603 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3604 3605 /** 3606 * skb_zerocopy - Zero copy skb to skb 3607 * @to: destination buffer 3608 * @from: source buffer 3609 * @len: number of bytes to copy from source buffer 3610 * @hlen: size of linear headroom in destination buffer 3611 * 3612 * Copies up to `len` bytes from `from` to `to` by creating references 3613 * to the frags in the source buffer. 3614 * 3615 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3616 * headroom in the `to` buffer. 3617 * 3618 * Return value: 3619 * 0: everything is OK 3620 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3621 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3622 */ 3623 int 3624 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3625 { 3626 int i, j = 0; 3627 int plen = 0; /* length of skb->head fragment */ 3628 int ret; 3629 struct page *page; 3630 unsigned int offset; 3631 3632 BUG_ON(!from->head_frag && !hlen); 3633 3634 /* dont bother with small payloads */ 3635 if (len <= skb_tailroom(to)) 3636 return skb_copy_bits(from, 0, skb_put(to, len), len); 3637 3638 if (hlen) { 3639 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3640 if (unlikely(ret)) 3641 return ret; 3642 len -= hlen; 3643 } else { 3644 plen = min_t(int, skb_headlen(from), len); 3645 if (plen) { 3646 page = virt_to_head_page(from->head); 3647 offset = from->data - (unsigned char *)page_address(page); 3648 __skb_fill_page_desc(to, 0, page, offset, plen); 3649 get_page(page); 3650 j = 1; 3651 len -= plen; 3652 } 3653 } 3654 3655 skb_len_add(to, len + plen); 3656 3657 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3658 skb_tx_error(from); 3659 return -ENOMEM; 3660 } 3661 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3662 3663 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3664 int size; 3665 3666 if (!len) 3667 break; 3668 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3669 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3670 len); 3671 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3672 len -= size; 3673 skb_frag_ref(to, j); 3674 j++; 3675 } 3676 skb_shinfo(to)->nr_frags = j; 3677 3678 return 0; 3679 } 3680 EXPORT_SYMBOL_GPL(skb_zerocopy); 3681 3682 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3683 { 3684 __wsum csum; 3685 long csstart; 3686 3687 if (skb->ip_summed == CHECKSUM_PARTIAL) 3688 csstart = skb_checksum_start_offset(skb); 3689 else 3690 csstart = skb_headlen(skb); 3691 3692 BUG_ON(csstart > skb_headlen(skb)); 3693 3694 skb_copy_from_linear_data(skb, to, csstart); 3695 3696 csum = 0; 3697 if (csstart != skb->len) 3698 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3699 skb->len - csstart); 3700 3701 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3702 long csstuff = csstart + skb->csum_offset; 3703 3704 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3705 } 3706 } 3707 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3708 3709 /** 3710 * skb_dequeue - remove from the head of the queue 3711 * @list: list to dequeue from 3712 * 3713 * Remove the head of the list. The list lock is taken so the function 3714 * may be used safely with other locking list functions. The head item is 3715 * returned or %NULL if the list is empty. 3716 */ 3717 3718 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3719 { 3720 unsigned long flags; 3721 struct sk_buff *result; 3722 3723 spin_lock_irqsave(&list->lock, flags); 3724 result = __skb_dequeue(list); 3725 spin_unlock_irqrestore(&list->lock, flags); 3726 return result; 3727 } 3728 EXPORT_SYMBOL(skb_dequeue); 3729 3730 /** 3731 * skb_dequeue_tail - remove from the tail of the queue 3732 * @list: list to dequeue from 3733 * 3734 * Remove the tail of the list. The list lock is taken so the function 3735 * may be used safely with other locking list functions. The tail item is 3736 * returned or %NULL if the list is empty. 3737 */ 3738 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3739 { 3740 unsigned long flags; 3741 struct sk_buff *result; 3742 3743 spin_lock_irqsave(&list->lock, flags); 3744 result = __skb_dequeue_tail(list); 3745 spin_unlock_irqrestore(&list->lock, flags); 3746 return result; 3747 } 3748 EXPORT_SYMBOL(skb_dequeue_tail); 3749 3750 /** 3751 * skb_queue_purge_reason - empty a list 3752 * @list: list to empty 3753 * @reason: drop reason 3754 * 3755 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3756 * the list and one reference dropped. This function takes the list 3757 * lock and is atomic with respect to other list locking functions. 3758 */ 3759 void skb_queue_purge_reason(struct sk_buff_head *list, 3760 enum skb_drop_reason reason) 3761 { 3762 struct sk_buff_head tmp; 3763 unsigned long flags; 3764 3765 if (skb_queue_empty_lockless(list)) 3766 return; 3767 3768 __skb_queue_head_init(&tmp); 3769 3770 spin_lock_irqsave(&list->lock, flags); 3771 skb_queue_splice_init(list, &tmp); 3772 spin_unlock_irqrestore(&list->lock, flags); 3773 3774 __skb_queue_purge_reason(&tmp, reason); 3775 } 3776 EXPORT_SYMBOL(skb_queue_purge_reason); 3777 3778 /** 3779 * skb_rbtree_purge - empty a skb rbtree 3780 * @root: root of the rbtree to empty 3781 * Return value: the sum of truesizes of all purged skbs. 3782 * 3783 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3784 * the list and one reference dropped. This function does not take 3785 * any lock. Synchronization should be handled by the caller (e.g., TCP 3786 * out-of-order queue is protected by the socket lock). 3787 */ 3788 unsigned int skb_rbtree_purge(struct rb_root *root) 3789 { 3790 struct rb_node *p = rb_first(root); 3791 unsigned int sum = 0; 3792 3793 while (p) { 3794 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3795 3796 p = rb_next(p); 3797 rb_erase(&skb->rbnode, root); 3798 sum += skb->truesize; 3799 kfree_skb(skb); 3800 } 3801 return sum; 3802 } 3803 3804 void skb_errqueue_purge(struct sk_buff_head *list) 3805 { 3806 struct sk_buff *skb, *next; 3807 struct sk_buff_head kill; 3808 unsigned long flags; 3809 3810 __skb_queue_head_init(&kill); 3811 3812 spin_lock_irqsave(&list->lock, flags); 3813 skb_queue_walk_safe(list, skb, next) { 3814 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3815 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3816 continue; 3817 __skb_unlink(skb, list); 3818 __skb_queue_tail(&kill, skb); 3819 } 3820 spin_unlock_irqrestore(&list->lock, flags); 3821 __skb_queue_purge(&kill); 3822 } 3823 EXPORT_SYMBOL(skb_errqueue_purge); 3824 3825 /** 3826 * skb_queue_head - queue a buffer at the list head 3827 * @list: list to use 3828 * @newsk: buffer to queue 3829 * 3830 * Queue a buffer at the start of the list. This function takes the 3831 * list lock and can be used safely with other locking &sk_buff functions 3832 * safely. 3833 * 3834 * A buffer cannot be placed on two lists at the same time. 3835 */ 3836 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3837 { 3838 unsigned long flags; 3839 3840 spin_lock_irqsave(&list->lock, flags); 3841 __skb_queue_head(list, newsk); 3842 spin_unlock_irqrestore(&list->lock, flags); 3843 } 3844 EXPORT_SYMBOL(skb_queue_head); 3845 3846 /** 3847 * skb_queue_tail - queue a buffer at the list tail 3848 * @list: list to use 3849 * @newsk: buffer to queue 3850 * 3851 * Queue a buffer at the tail of the list. This function takes the 3852 * list lock and can be used safely with other locking &sk_buff functions 3853 * safely. 3854 * 3855 * A buffer cannot be placed on two lists at the same time. 3856 */ 3857 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3858 { 3859 unsigned long flags; 3860 3861 spin_lock_irqsave(&list->lock, flags); 3862 __skb_queue_tail(list, newsk); 3863 spin_unlock_irqrestore(&list->lock, flags); 3864 } 3865 EXPORT_SYMBOL(skb_queue_tail); 3866 3867 /** 3868 * skb_unlink - remove a buffer from a list 3869 * @skb: buffer to remove 3870 * @list: list to use 3871 * 3872 * Remove a packet from a list. The list locks are taken and this 3873 * function is atomic with respect to other list locked calls 3874 * 3875 * You must know what list the SKB is on. 3876 */ 3877 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3878 { 3879 unsigned long flags; 3880 3881 spin_lock_irqsave(&list->lock, flags); 3882 __skb_unlink(skb, list); 3883 spin_unlock_irqrestore(&list->lock, flags); 3884 } 3885 EXPORT_SYMBOL(skb_unlink); 3886 3887 /** 3888 * skb_append - append a buffer 3889 * @old: buffer to insert after 3890 * @newsk: buffer to insert 3891 * @list: list to use 3892 * 3893 * Place a packet after a given packet in a list. The list locks are taken 3894 * and this function is atomic with respect to other list locked calls. 3895 * A buffer cannot be placed on two lists at the same time. 3896 */ 3897 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 3898 { 3899 unsigned long flags; 3900 3901 spin_lock_irqsave(&list->lock, flags); 3902 __skb_queue_after(list, old, newsk); 3903 spin_unlock_irqrestore(&list->lock, flags); 3904 } 3905 EXPORT_SYMBOL(skb_append); 3906 3907 static inline void skb_split_inside_header(struct sk_buff *skb, 3908 struct sk_buff* skb1, 3909 const u32 len, const int pos) 3910 { 3911 int i; 3912 3913 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3914 pos - len); 3915 /* And move data appendix as is. */ 3916 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 3917 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 3918 3919 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 3920 skb_shinfo(skb)->nr_frags = 0; 3921 skb1->data_len = skb->data_len; 3922 skb1->len += skb1->data_len; 3923 skb->data_len = 0; 3924 skb->len = len; 3925 skb_set_tail_pointer(skb, len); 3926 } 3927 3928 static inline void skb_split_no_header(struct sk_buff *skb, 3929 struct sk_buff* skb1, 3930 const u32 len, int pos) 3931 { 3932 int i, k = 0; 3933 const int nfrags = skb_shinfo(skb)->nr_frags; 3934 3935 skb_shinfo(skb)->nr_frags = 0; 3936 skb1->len = skb1->data_len = skb->len - len; 3937 skb->len = len; 3938 skb->data_len = len - pos; 3939 3940 for (i = 0; i < nfrags; i++) { 3941 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 3942 3943 if (pos + size > len) { 3944 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 3945 3946 if (pos < len) { 3947 /* Split frag. 3948 * We have two variants in this case: 3949 * 1. Move all the frag to the second 3950 * part, if it is possible. F.e. 3951 * this approach is mandatory for TUX, 3952 * where splitting is expensive. 3953 * 2. Split is accurately. We make this. 3954 */ 3955 skb_frag_ref(skb, i); 3956 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 3957 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 3958 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 3959 skb_shinfo(skb)->nr_frags++; 3960 } 3961 k++; 3962 } else 3963 skb_shinfo(skb)->nr_frags++; 3964 pos += size; 3965 } 3966 skb_shinfo(skb1)->nr_frags = k; 3967 } 3968 3969 /** 3970 * skb_split - Split fragmented skb to two parts at length len. 3971 * @skb: the buffer to split 3972 * @skb1: the buffer to receive the second part 3973 * @len: new length for skb 3974 */ 3975 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 3976 { 3977 int pos = skb_headlen(skb); 3978 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 3979 3980 skb_zcopy_downgrade_managed(skb); 3981 3982 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 3983 skb_zerocopy_clone(skb1, skb, 0); 3984 if (len < pos) /* Split line is inside header. */ 3985 skb_split_inside_header(skb, skb1, len, pos); 3986 else /* Second chunk has no header, nothing to copy. */ 3987 skb_split_no_header(skb, skb1, len, pos); 3988 } 3989 EXPORT_SYMBOL(skb_split); 3990 3991 /* Shifting from/to a cloned skb is a no-go. 3992 * 3993 * Caller cannot keep skb_shinfo related pointers past calling here! 3994 */ 3995 static int skb_prepare_for_shift(struct sk_buff *skb) 3996 { 3997 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 3998 } 3999 4000 /** 4001 * skb_shift - Shifts paged data partially from skb to another 4002 * @tgt: buffer into which tail data gets added 4003 * @skb: buffer from which the paged data comes from 4004 * @shiftlen: shift up to this many bytes 4005 * 4006 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4007 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4008 * It's up to caller to free skb if everything was shifted. 4009 * 4010 * If @tgt runs out of frags, the whole operation is aborted. 4011 * 4012 * Skb cannot include anything else but paged data while tgt is allowed 4013 * to have non-paged data as well. 4014 * 4015 * TODO: full sized shift could be optimized but that would need 4016 * specialized skb free'er to handle frags without up-to-date nr_frags. 4017 */ 4018 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4019 { 4020 int from, to, merge, todo; 4021 skb_frag_t *fragfrom, *fragto; 4022 4023 BUG_ON(shiftlen > skb->len); 4024 4025 if (skb_headlen(skb)) 4026 return 0; 4027 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4028 return 0; 4029 4030 todo = shiftlen; 4031 from = 0; 4032 to = skb_shinfo(tgt)->nr_frags; 4033 fragfrom = &skb_shinfo(skb)->frags[from]; 4034 4035 /* Actual merge is delayed until the point when we know we can 4036 * commit all, so that we don't have to undo partial changes 4037 */ 4038 if (!to || 4039 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4040 skb_frag_off(fragfrom))) { 4041 merge = -1; 4042 } else { 4043 merge = to - 1; 4044 4045 todo -= skb_frag_size(fragfrom); 4046 if (todo < 0) { 4047 if (skb_prepare_for_shift(skb) || 4048 skb_prepare_for_shift(tgt)) 4049 return 0; 4050 4051 /* All previous frag pointers might be stale! */ 4052 fragfrom = &skb_shinfo(skb)->frags[from]; 4053 fragto = &skb_shinfo(tgt)->frags[merge]; 4054 4055 skb_frag_size_add(fragto, shiftlen); 4056 skb_frag_size_sub(fragfrom, shiftlen); 4057 skb_frag_off_add(fragfrom, shiftlen); 4058 4059 goto onlymerged; 4060 } 4061 4062 from++; 4063 } 4064 4065 /* Skip full, not-fitting skb to avoid expensive operations */ 4066 if ((shiftlen == skb->len) && 4067 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4068 return 0; 4069 4070 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4071 return 0; 4072 4073 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4074 if (to == MAX_SKB_FRAGS) 4075 return 0; 4076 4077 fragfrom = &skb_shinfo(skb)->frags[from]; 4078 fragto = &skb_shinfo(tgt)->frags[to]; 4079 4080 if (todo >= skb_frag_size(fragfrom)) { 4081 *fragto = *fragfrom; 4082 todo -= skb_frag_size(fragfrom); 4083 from++; 4084 to++; 4085 4086 } else { 4087 __skb_frag_ref(fragfrom); 4088 skb_frag_page_copy(fragto, fragfrom); 4089 skb_frag_off_copy(fragto, fragfrom); 4090 skb_frag_size_set(fragto, todo); 4091 4092 skb_frag_off_add(fragfrom, todo); 4093 skb_frag_size_sub(fragfrom, todo); 4094 todo = 0; 4095 4096 to++; 4097 break; 4098 } 4099 } 4100 4101 /* Ready to "commit" this state change to tgt */ 4102 skb_shinfo(tgt)->nr_frags = to; 4103 4104 if (merge >= 0) { 4105 fragfrom = &skb_shinfo(skb)->frags[0]; 4106 fragto = &skb_shinfo(tgt)->frags[merge]; 4107 4108 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4109 __skb_frag_unref(fragfrom, skb->pp_recycle); 4110 } 4111 4112 /* Reposition in the original skb */ 4113 to = 0; 4114 while (from < skb_shinfo(skb)->nr_frags) 4115 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4116 skb_shinfo(skb)->nr_frags = to; 4117 4118 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4119 4120 onlymerged: 4121 /* Most likely the tgt won't ever need its checksum anymore, skb on 4122 * the other hand might need it if it needs to be resent 4123 */ 4124 tgt->ip_summed = CHECKSUM_PARTIAL; 4125 skb->ip_summed = CHECKSUM_PARTIAL; 4126 4127 skb_len_add(skb, -shiftlen); 4128 skb_len_add(tgt, shiftlen); 4129 4130 return shiftlen; 4131 } 4132 4133 /** 4134 * skb_prepare_seq_read - Prepare a sequential read of skb data 4135 * @skb: the buffer to read 4136 * @from: lower offset of data to be read 4137 * @to: upper offset of data to be read 4138 * @st: state variable 4139 * 4140 * Initializes the specified state variable. Must be called before 4141 * invoking skb_seq_read() for the first time. 4142 */ 4143 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4144 unsigned int to, struct skb_seq_state *st) 4145 { 4146 st->lower_offset = from; 4147 st->upper_offset = to; 4148 st->root_skb = st->cur_skb = skb; 4149 st->frag_idx = st->stepped_offset = 0; 4150 st->frag_data = NULL; 4151 st->frag_off = 0; 4152 } 4153 EXPORT_SYMBOL(skb_prepare_seq_read); 4154 4155 /** 4156 * skb_seq_read - Sequentially read skb data 4157 * @consumed: number of bytes consumed by the caller so far 4158 * @data: destination pointer for data to be returned 4159 * @st: state variable 4160 * 4161 * Reads a block of skb data at @consumed relative to the 4162 * lower offset specified to skb_prepare_seq_read(). Assigns 4163 * the head of the data block to @data and returns the length 4164 * of the block or 0 if the end of the skb data or the upper 4165 * offset has been reached. 4166 * 4167 * The caller is not required to consume all of the data 4168 * returned, i.e. @consumed is typically set to the number 4169 * of bytes already consumed and the next call to 4170 * skb_seq_read() will return the remaining part of the block. 4171 * 4172 * Note 1: The size of each block of data returned can be arbitrary, 4173 * this limitation is the cost for zerocopy sequential 4174 * reads of potentially non linear data. 4175 * 4176 * Note 2: Fragment lists within fragments are not implemented 4177 * at the moment, state->root_skb could be replaced with 4178 * a stack for this purpose. 4179 */ 4180 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4181 struct skb_seq_state *st) 4182 { 4183 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4184 skb_frag_t *frag; 4185 4186 if (unlikely(abs_offset >= st->upper_offset)) { 4187 if (st->frag_data) { 4188 kunmap_atomic(st->frag_data); 4189 st->frag_data = NULL; 4190 } 4191 return 0; 4192 } 4193 4194 next_skb: 4195 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4196 4197 if (abs_offset < block_limit && !st->frag_data) { 4198 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4199 return block_limit - abs_offset; 4200 } 4201 4202 if (st->frag_idx == 0 && !st->frag_data) 4203 st->stepped_offset += skb_headlen(st->cur_skb); 4204 4205 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4206 unsigned int pg_idx, pg_off, pg_sz; 4207 4208 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4209 4210 pg_idx = 0; 4211 pg_off = skb_frag_off(frag); 4212 pg_sz = skb_frag_size(frag); 4213 4214 if (skb_frag_must_loop(skb_frag_page(frag))) { 4215 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4216 pg_off = offset_in_page(pg_off + st->frag_off); 4217 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4218 PAGE_SIZE - pg_off); 4219 } 4220 4221 block_limit = pg_sz + st->stepped_offset; 4222 if (abs_offset < block_limit) { 4223 if (!st->frag_data) 4224 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4225 4226 *data = (u8 *)st->frag_data + pg_off + 4227 (abs_offset - st->stepped_offset); 4228 4229 return block_limit - abs_offset; 4230 } 4231 4232 if (st->frag_data) { 4233 kunmap_atomic(st->frag_data); 4234 st->frag_data = NULL; 4235 } 4236 4237 st->stepped_offset += pg_sz; 4238 st->frag_off += pg_sz; 4239 if (st->frag_off == skb_frag_size(frag)) { 4240 st->frag_off = 0; 4241 st->frag_idx++; 4242 } 4243 } 4244 4245 if (st->frag_data) { 4246 kunmap_atomic(st->frag_data); 4247 st->frag_data = NULL; 4248 } 4249 4250 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4251 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4252 st->frag_idx = 0; 4253 goto next_skb; 4254 } else if (st->cur_skb->next) { 4255 st->cur_skb = st->cur_skb->next; 4256 st->frag_idx = 0; 4257 goto next_skb; 4258 } 4259 4260 return 0; 4261 } 4262 EXPORT_SYMBOL(skb_seq_read); 4263 4264 /** 4265 * skb_abort_seq_read - Abort a sequential read of skb data 4266 * @st: state variable 4267 * 4268 * Must be called if skb_seq_read() was not called until it 4269 * returned 0. 4270 */ 4271 void skb_abort_seq_read(struct skb_seq_state *st) 4272 { 4273 if (st->frag_data) 4274 kunmap_atomic(st->frag_data); 4275 } 4276 EXPORT_SYMBOL(skb_abort_seq_read); 4277 4278 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4279 4280 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4281 struct ts_config *conf, 4282 struct ts_state *state) 4283 { 4284 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4285 } 4286 4287 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4288 { 4289 skb_abort_seq_read(TS_SKB_CB(state)); 4290 } 4291 4292 /** 4293 * skb_find_text - Find a text pattern in skb data 4294 * @skb: the buffer to look in 4295 * @from: search offset 4296 * @to: search limit 4297 * @config: textsearch configuration 4298 * 4299 * Finds a pattern in the skb data according to the specified 4300 * textsearch configuration. Use textsearch_next() to retrieve 4301 * subsequent occurrences of the pattern. Returns the offset 4302 * to the first occurrence or UINT_MAX if no match was found. 4303 */ 4304 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4305 unsigned int to, struct ts_config *config) 4306 { 4307 unsigned int patlen = config->ops->get_pattern_len(config); 4308 struct ts_state state; 4309 unsigned int ret; 4310 4311 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4312 4313 config->get_next_block = skb_ts_get_next_block; 4314 config->finish = skb_ts_finish; 4315 4316 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4317 4318 ret = textsearch_find(config, &state); 4319 return (ret + patlen <= to - from ? ret : UINT_MAX); 4320 } 4321 EXPORT_SYMBOL(skb_find_text); 4322 4323 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4324 int offset, size_t size, size_t max_frags) 4325 { 4326 int i = skb_shinfo(skb)->nr_frags; 4327 4328 if (skb_can_coalesce(skb, i, page, offset)) { 4329 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4330 } else if (i < max_frags) { 4331 skb_zcopy_downgrade_managed(skb); 4332 get_page(page); 4333 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4334 } else { 4335 return -EMSGSIZE; 4336 } 4337 4338 return 0; 4339 } 4340 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4341 4342 /** 4343 * skb_pull_rcsum - pull skb and update receive checksum 4344 * @skb: buffer to update 4345 * @len: length of data pulled 4346 * 4347 * This function performs an skb_pull on the packet and updates 4348 * the CHECKSUM_COMPLETE checksum. It should be used on 4349 * receive path processing instead of skb_pull unless you know 4350 * that the checksum difference is zero (e.g., a valid IP header) 4351 * or you are setting ip_summed to CHECKSUM_NONE. 4352 */ 4353 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4354 { 4355 unsigned char *data = skb->data; 4356 4357 BUG_ON(len > skb->len); 4358 __skb_pull(skb, len); 4359 skb_postpull_rcsum(skb, data, len); 4360 return skb->data; 4361 } 4362 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4363 4364 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4365 { 4366 skb_frag_t head_frag; 4367 struct page *page; 4368 4369 page = virt_to_head_page(frag_skb->head); 4370 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4371 (unsigned char *)page_address(page), 4372 skb_headlen(frag_skb)); 4373 return head_frag; 4374 } 4375 4376 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4377 netdev_features_t features, 4378 unsigned int offset) 4379 { 4380 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4381 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4382 unsigned int delta_truesize = 0; 4383 unsigned int delta_len = 0; 4384 struct sk_buff *tail = NULL; 4385 struct sk_buff *nskb, *tmp; 4386 int len_diff, err; 4387 4388 skb_push(skb, -skb_network_offset(skb) + offset); 4389 4390 /* Ensure the head is writeable before touching the shared info */ 4391 err = skb_unclone(skb, GFP_ATOMIC); 4392 if (err) 4393 goto err_linearize; 4394 4395 skb_shinfo(skb)->frag_list = NULL; 4396 4397 while (list_skb) { 4398 nskb = list_skb; 4399 list_skb = list_skb->next; 4400 4401 err = 0; 4402 delta_truesize += nskb->truesize; 4403 if (skb_shared(nskb)) { 4404 tmp = skb_clone(nskb, GFP_ATOMIC); 4405 if (tmp) { 4406 consume_skb(nskb); 4407 nskb = tmp; 4408 err = skb_unclone(nskb, GFP_ATOMIC); 4409 } else { 4410 err = -ENOMEM; 4411 } 4412 } 4413 4414 if (!tail) 4415 skb->next = nskb; 4416 else 4417 tail->next = nskb; 4418 4419 if (unlikely(err)) { 4420 nskb->next = list_skb; 4421 goto err_linearize; 4422 } 4423 4424 tail = nskb; 4425 4426 delta_len += nskb->len; 4427 4428 skb_push(nskb, -skb_network_offset(nskb) + offset); 4429 4430 skb_release_head_state(nskb); 4431 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4432 __copy_skb_header(nskb, skb); 4433 4434 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4435 nskb->transport_header += len_diff; 4436 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4437 nskb->data - tnl_hlen, 4438 offset + tnl_hlen); 4439 4440 if (skb_needs_linearize(nskb, features) && 4441 __skb_linearize(nskb)) 4442 goto err_linearize; 4443 } 4444 4445 skb->truesize = skb->truesize - delta_truesize; 4446 skb->data_len = skb->data_len - delta_len; 4447 skb->len = skb->len - delta_len; 4448 4449 skb_gso_reset(skb); 4450 4451 skb->prev = tail; 4452 4453 if (skb_needs_linearize(skb, features) && 4454 __skb_linearize(skb)) 4455 goto err_linearize; 4456 4457 skb_get(skb); 4458 4459 return skb; 4460 4461 err_linearize: 4462 kfree_skb_list(skb->next); 4463 skb->next = NULL; 4464 return ERR_PTR(-ENOMEM); 4465 } 4466 EXPORT_SYMBOL_GPL(skb_segment_list); 4467 4468 /** 4469 * skb_segment - Perform protocol segmentation on skb. 4470 * @head_skb: buffer to segment 4471 * @features: features for the output path (see dev->features) 4472 * 4473 * This function performs segmentation on the given skb. It returns 4474 * a pointer to the first in a list of new skbs for the segments. 4475 * In case of error it returns ERR_PTR(err). 4476 */ 4477 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4478 netdev_features_t features) 4479 { 4480 struct sk_buff *segs = NULL; 4481 struct sk_buff *tail = NULL; 4482 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4483 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4484 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4485 unsigned int offset = doffset; 4486 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4487 unsigned int partial_segs = 0; 4488 unsigned int headroom; 4489 unsigned int len = head_skb->len; 4490 struct sk_buff *frag_skb; 4491 skb_frag_t *frag; 4492 __be16 proto; 4493 bool csum, sg; 4494 int err = -ENOMEM; 4495 int i = 0; 4496 int nfrags, pos; 4497 4498 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4499 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4500 struct sk_buff *check_skb; 4501 4502 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4503 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4504 /* gso_size is untrusted, and we have a frag_list with 4505 * a linear non head_frag item. 4506 * 4507 * If head_skb's headlen does not fit requested gso_size, 4508 * it means that the frag_list members do NOT terminate 4509 * on exact gso_size boundaries. Hence we cannot perform 4510 * skb_frag_t page sharing. Therefore we must fallback to 4511 * copying the frag_list skbs; we do so by disabling SG. 4512 */ 4513 features &= ~NETIF_F_SG; 4514 break; 4515 } 4516 } 4517 } 4518 4519 __skb_push(head_skb, doffset); 4520 proto = skb_network_protocol(head_skb, NULL); 4521 if (unlikely(!proto)) 4522 return ERR_PTR(-EINVAL); 4523 4524 sg = !!(features & NETIF_F_SG); 4525 csum = !!can_checksum_protocol(features, proto); 4526 4527 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4528 if (!(features & NETIF_F_GSO_PARTIAL)) { 4529 struct sk_buff *iter; 4530 unsigned int frag_len; 4531 4532 if (!list_skb || 4533 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4534 goto normal; 4535 4536 /* If we get here then all the required 4537 * GSO features except frag_list are supported. 4538 * Try to split the SKB to multiple GSO SKBs 4539 * with no frag_list. 4540 * Currently we can do that only when the buffers don't 4541 * have a linear part and all the buffers except 4542 * the last are of the same length. 4543 */ 4544 frag_len = list_skb->len; 4545 skb_walk_frags(head_skb, iter) { 4546 if (frag_len != iter->len && iter->next) 4547 goto normal; 4548 if (skb_headlen(iter) && !iter->head_frag) 4549 goto normal; 4550 4551 len -= iter->len; 4552 } 4553 4554 if (len != frag_len) 4555 goto normal; 4556 } 4557 4558 /* GSO partial only requires that we trim off any excess that 4559 * doesn't fit into an MSS sized block, so take care of that 4560 * now. 4561 * Cap len to not accidentally hit GSO_BY_FRAGS. 4562 */ 4563 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4564 if (partial_segs > 1) 4565 mss *= partial_segs; 4566 else 4567 partial_segs = 0; 4568 } 4569 4570 normal: 4571 headroom = skb_headroom(head_skb); 4572 pos = skb_headlen(head_skb); 4573 4574 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4575 return ERR_PTR(-ENOMEM); 4576 4577 nfrags = skb_shinfo(head_skb)->nr_frags; 4578 frag = skb_shinfo(head_skb)->frags; 4579 frag_skb = head_skb; 4580 4581 do { 4582 struct sk_buff *nskb; 4583 skb_frag_t *nskb_frag; 4584 int hsize; 4585 int size; 4586 4587 if (unlikely(mss == GSO_BY_FRAGS)) { 4588 len = list_skb->len; 4589 } else { 4590 len = head_skb->len - offset; 4591 if (len > mss) 4592 len = mss; 4593 } 4594 4595 hsize = skb_headlen(head_skb) - offset; 4596 4597 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4598 (skb_headlen(list_skb) == len || sg)) { 4599 BUG_ON(skb_headlen(list_skb) > len); 4600 4601 nskb = skb_clone(list_skb, GFP_ATOMIC); 4602 if (unlikely(!nskb)) 4603 goto err; 4604 4605 i = 0; 4606 nfrags = skb_shinfo(list_skb)->nr_frags; 4607 frag = skb_shinfo(list_skb)->frags; 4608 frag_skb = list_skb; 4609 pos += skb_headlen(list_skb); 4610 4611 while (pos < offset + len) { 4612 BUG_ON(i >= nfrags); 4613 4614 size = skb_frag_size(frag); 4615 if (pos + size > offset + len) 4616 break; 4617 4618 i++; 4619 pos += size; 4620 frag++; 4621 } 4622 4623 list_skb = list_skb->next; 4624 4625 if (unlikely(pskb_trim(nskb, len))) { 4626 kfree_skb(nskb); 4627 goto err; 4628 } 4629 4630 hsize = skb_end_offset(nskb); 4631 if (skb_cow_head(nskb, doffset + headroom)) { 4632 kfree_skb(nskb); 4633 goto err; 4634 } 4635 4636 nskb->truesize += skb_end_offset(nskb) - hsize; 4637 skb_release_head_state(nskb); 4638 __skb_push(nskb, doffset); 4639 } else { 4640 if (hsize < 0) 4641 hsize = 0; 4642 if (hsize > len || !sg) 4643 hsize = len; 4644 4645 nskb = __alloc_skb(hsize + doffset + headroom, 4646 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4647 NUMA_NO_NODE); 4648 4649 if (unlikely(!nskb)) 4650 goto err; 4651 4652 skb_reserve(nskb, headroom); 4653 __skb_put(nskb, doffset); 4654 } 4655 4656 if (segs) 4657 tail->next = nskb; 4658 else 4659 segs = nskb; 4660 tail = nskb; 4661 4662 __copy_skb_header(nskb, head_skb); 4663 4664 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4665 skb_reset_mac_len(nskb); 4666 4667 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4668 nskb->data - tnl_hlen, 4669 doffset + tnl_hlen); 4670 4671 if (nskb->len == len + doffset) 4672 goto perform_csum_check; 4673 4674 if (!sg) { 4675 if (!csum) { 4676 if (!nskb->remcsum_offload) 4677 nskb->ip_summed = CHECKSUM_NONE; 4678 SKB_GSO_CB(nskb)->csum = 4679 skb_copy_and_csum_bits(head_skb, offset, 4680 skb_put(nskb, 4681 len), 4682 len); 4683 SKB_GSO_CB(nskb)->csum_start = 4684 skb_headroom(nskb) + doffset; 4685 } else { 4686 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4687 goto err; 4688 } 4689 continue; 4690 } 4691 4692 nskb_frag = skb_shinfo(nskb)->frags; 4693 4694 skb_copy_from_linear_data_offset(head_skb, offset, 4695 skb_put(nskb, hsize), hsize); 4696 4697 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4698 SKBFL_SHARED_FRAG; 4699 4700 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4701 goto err; 4702 4703 while (pos < offset + len) { 4704 if (i >= nfrags) { 4705 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4706 skb_zerocopy_clone(nskb, list_skb, 4707 GFP_ATOMIC)) 4708 goto err; 4709 4710 i = 0; 4711 nfrags = skb_shinfo(list_skb)->nr_frags; 4712 frag = skb_shinfo(list_skb)->frags; 4713 frag_skb = list_skb; 4714 if (!skb_headlen(list_skb)) { 4715 BUG_ON(!nfrags); 4716 } else { 4717 BUG_ON(!list_skb->head_frag); 4718 4719 /* to make room for head_frag. */ 4720 i--; 4721 frag--; 4722 } 4723 4724 list_skb = list_skb->next; 4725 } 4726 4727 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4728 MAX_SKB_FRAGS)) { 4729 net_warn_ratelimited( 4730 "skb_segment: too many frags: %u %u\n", 4731 pos, mss); 4732 err = -EINVAL; 4733 goto err; 4734 } 4735 4736 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4737 __skb_frag_ref(nskb_frag); 4738 size = skb_frag_size(nskb_frag); 4739 4740 if (pos < offset) { 4741 skb_frag_off_add(nskb_frag, offset - pos); 4742 skb_frag_size_sub(nskb_frag, offset - pos); 4743 } 4744 4745 skb_shinfo(nskb)->nr_frags++; 4746 4747 if (pos + size <= offset + len) { 4748 i++; 4749 frag++; 4750 pos += size; 4751 } else { 4752 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4753 goto skip_fraglist; 4754 } 4755 4756 nskb_frag++; 4757 } 4758 4759 skip_fraglist: 4760 nskb->data_len = len - hsize; 4761 nskb->len += nskb->data_len; 4762 nskb->truesize += nskb->data_len; 4763 4764 perform_csum_check: 4765 if (!csum) { 4766 if (skb_has_shared_frag(nskb) && 4767 __skb_linearize(nskb)) 4768 goto err; 4769 4770 if (!nskb->remcsum_offload) 4771 nskb->ip_summed = CHECKSUM_NONE; 4772 SKB_GSO_CB(nskb)->csum = 4773 skb_checksum(nskb, doffset, 4774 nskb->len - doffset, 0); 4775 SKB_GSO_CB(nskb)->csum_start = 4776 skb_headroom(nskb) + doffset; 4777 } 4778 } while ((offset += len) < head_skb->len); 4779 4780 /* Some callers want to get the end of the list. 4781 * Put it in segs->prev to avoid walking the list. 4782 * (see validate_xmit_skb_list() for example) 4783 */ 4784 segs->prev = tail; 4785 4786 if (partial_segs) { 4787 struct sk_buff *iter; 4788 int type = skb_shinfo(head_skb)->gso_type; 4789 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4790 4791 /* Update type to add partial and then remove dodgy if set */ 4792 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4793 type &= ~SKB_GSO_DODGY; 4794 4795 /* Update GSO info and prepare to start updating headers on 4796 * our way back down the stack of protocols. 4797 */ 4798 for (iter = segs; iter; iter = iter->next) { 4799 skb_shinfo(iter)->gso_size = gso_size; 4800 skb_shinfo(iter)->gso_segs = partial_segs; 4801 skb_shinfo(iter)->gso_type = type; 4802 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4803 } 4804 4805 if (tail->len - doffset <= gso_size) 4806 skb_shinfo(tail)->gso_size = 0; 4807 else if (tail != segs) 4808 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4809 } 4810 4811 /* Following permits correct backpressure, for protocols 4812 * using skb_set_owner_w(). 4813 * Idea is to tranfert ownership from head_skb to last segment. 4814 */ 4815 if (head_skb->destructor == sock_wfree) { 4816 swap(tail->truesize, head_skb->truesize); 4817 swap(tail->destructor, head_skb->destructor); 4818 swap(tail->sk, head_skb->sk); 4819 } 4820 return segs; 4821 4822 err: 4823 kfree_skb_list(segs); 4824 return ERR_PTR(err); 4825 } 4826 EXPORT_SYMBOL_GPL(skb_segment); 4827 4828 #ifdef CONFIG_SKB_EXTENSIONS 4829 #define SKB_EXT_ALIGN_VALUE 8 4830 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4831 4832 static const u8 skb_ext_type_len[] = { 4833 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4834 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4835 #endif 4836 #ifdef CONFIG_XFRM 4837 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4838 #endif 4839 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4840 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4841 #endif 4842 #if IS_ENABLED(CONFIG_MPTCP) 4843 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4844 #endif 4845 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 4846 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 4847 #endif 4848 }; 4849 4850 static __always_inline unsigned int skb_ext_total_length(void) 4851 { 4852 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 4853 int i; 4854 4855 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 4856 l += skb_ext_type_len[i]; 4857 4858 return l; 4859 } 4860 4861 static void skb_extensions_init(void) 4862 { 4863 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4864 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 4865 BUILD_BUG_ON(skb_ext_total_length() > 255); 4866 #endif 4867 4868 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4869 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4870 0, 4871 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4872 NULL); 4873 } 4874 #else 4875 static void skb_extensions_init(void) {} 4876 #endif 4877 4878 /* The SKB kmem_cache slab is critical for network performance. Never 4879 * merge/alias the slab with similar sized objects. This avoids fragmentation 4880 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 4881 */ 4882 #ifndef CONFIG_SLUB_TINY 4883 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 4884 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 4885 #define FLAG_SKB_NO_MERGE 0 4886 #endif 4887 4888 void __init skb_init(void) 4889 { 4890 skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 4891 sizeof(struct sk_buff), 4892 0, 4893 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 4894 FLAG_SKB_NO_MERGE, 4895 offsetof(struct sk_buff, cb), 4896 sizeof_field(struct sk_buff, cb), 4897 NULL); 4898 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4899 sizeof(struct sk_buff_fclones), 4900 0, 4901 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4902 NULL); 4903 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 4904 * struct skb_shared_info is located at the end of skb->head, 4905 * and should not be copied to/from user. 4906 */ 4907 skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 4908 SKB_SMALL_HEAD_CACHE_SIZE, 4909 0, 4910 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 4911 0, 4912 SKB_SMALL_HEAD_HEADROOM, 4913 NULL); 4914 skb_extensions_init(); 4915 } 4916 4917 static int 4918 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 4919 unsigned int recursion_level) 4920 { 4921 int start = skb_headlen(skb); 4922 int i, copy = start - offset; 4923 struct sk_buff *frag_iter; 4924 int elt = 0; 4925 4926 if (unlikely(recursion_level >= 24)) 4927 return -EMSGSIZE; 4928 4929 if (copy > 0) { 4930 if (copy > len) 4931 copy = len; 4932 sg_set_buf(sg, skb->data + offset, copy); 4933 elt++; 4934 if ((len -= copy) == 0) 4935 return elt; 4936 offset += copy; 4937 } 4938 4939 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4940 int end; 4941 4942 WARN_ON(start > offset + len); 4943 4944 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 4945 if ((copy = end - offset) > 0) { 4946 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4947 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4948 return -EMSGSIZE; 4949 4950 if (copy > len) 4951 copy = len; 4952 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 4953 skb_frag_off(frag) + offset - start); 4954 elt++; 4955 if (!(len -= copy)) 4956 return elt; 4957 offset += copy; 4958 } 4959 start = end; 4960 } 4961 4962 skb_walk_frags(skb, frag_iter) { 4963 int end, ret; 4964 4965 WARN_ON(start > offset + len); 4966 4967 end = start + frag_iter->len; 4968 if ((copy = end - offset) > 0) { 4969 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4970 return -EMSGSIZE; 4971 4972 if (copy > len) 4973 copy = len; 4974 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 4975 copy, recursion_level + 1); 4976 if (unlikely(ret < 0)) 4977 return ret; 4978 elt += ret; 4979 if ((len -= copy) == 0) 4980 return elt; 4981 offset += copy; 4982 } 4983 start = end; 4984 } 4985 BUG_ON(len); 4986 return elt; 4987 } 4988 4989 /** 4990 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 4991 * @skb: Socket buffer containing the buffers to be mapped 4992 * @sg: The scatter-gather list to map into 4993 * @offset: The offset into the buffer's contents to start mapping 4994 * @len: Length of buffer space to be mapped 4995 * 4996 * Fill the specified scatter-gather list with mappings/pointers into a 4997 * region of the buffer space attached to a socket buffer. Returns either 4998 * the number of scatterlist items used, or -EMSGSIZE if the contents 4999 * could not fit. 5000 */ 5001 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5002 { 5003 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5004 5005 if (nsg <= 0) 5006 return nsg; 5007 5008 sg_mark_end(&sg[nsg - 1]); 5009 5010 return nsg; 5011 } 5012 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5013 5014 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5015 * sglist without mark the sg which contain last skb data as the end. 5016 * So the caller can mannipulate sg list as will when padding new data after 5017 * the first call without calling sg_unmark_end to expend sg list. 5018 * 5019 * Scenario to use skb_to_sgvec_nomark: 5020 * 1. sg_init_table 5021 * 2. skb_to_sgvec_nomark(payload1) 5022 * 3. skb_to_sgvec_nomark(payload2) 5023 * 5024 * This is equivalent to: 5025 * 1. sg_init_table 5026 * 2. skb_to_sgvec(payload1) 5027 * 3. sg_unmark_end 5028 * 4. skb_to_sgvec(payload2) 5029 * 5030 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 5031 * is more preferable. 5032 */ 5033 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5034 int offset, int len) 5035 { 5036 return __skb_to_sgvec(skb, sg, offset, len, 0); 5037 } 5038 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5039 5040 5041 5042 /** 5043 * skb_cow_data - Check that a socket buffer's data buffers are writable 5044 * @skb: The socket buffer to check. 5045 * @tailbits: Amount of trailing space to be added 5046 * @trailer: Returned pointer to the skb where the @tailbits space begins 5047 * 5048 * Make sure that the data buffers attached to a socket buffer are 5049 * writable. If they are not, private copies are made of the data buffers 5050 * and the socket buffer is set to use these instead. 5051 * 5052 * If @tailbits is given, make sure that there is space to write @tailbits 5053 * bytes of data beyond current end of socket buffer. @trailer will be 5054 * set to point to the skb in which this space begins. 5055 * 5056 * The number of scatterlist elements required to completely map the 5057 * COW'd and extended socket buffer will be returned. 5058 */ 5059 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5060 { 5061 int copyflag; 5062 int elt; 5063 struct sk_buff *skb1, **skb_p; 5064 5065 /* If skb is cloned or its head is paged, reallocate 5066 * head pulling out all the pages (pages are considered not writable 5067 * at the moment even if they are anonymous). 5068 */ 5069 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5070 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5071 return -ENOMEM; 5072 5073 /* Easy case. Most of packets will go this way. */ 5074 if (!skb_has_frag_list(skb)) { 5075 /* A little of trouble, not enough of space for trailer. 5076 * This should not happen, when stack is tuned to generate 5077 * good frames. OK, on miss we reallocate and reserve even more 5078 * space, 128 bytes is fair. */ 5079 5080 if (skb_tailroom(skb) < tailbits && 5081 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5082 return -ENOMEM; 5083 5084 /* Voila! */ 5085 *trailer = skb; 5086 return 1; 5087 } 5088 5089 /* Misery. We are in troubles, going to mincer fragments... */ 5090 5091 elt = 1; 5092 skb_p = &skb_shinfo(skb)->frag_list; 5093 copyflag = 0; 5094 5095 while ((skb1 = *skb_p) != NULL) { 5096 int ntail = 0; 5097 5098 /* The fragment is partially pulled by someone, 5099 * this can happen on input. Copy it and everything 5100 * after it. */ 5101 5102 if (skb_shared(skb1)) 5103 copyflag = 1; 5104 5105 /* If the skb is the last, worry about trailer. */ 5106 5107 if (skb1->next == NULL && tailbits) { 5108 if (skb_shinfo(skb1)->nr_frags || 5109 skb_has_frag_list(skb1) || 5110 skb_tailroom(skb1) < tailbits) 5111 ntail = tailbits + 128; 5112 } 5113 5114 if (copyflag || 5115 skb_cloned(skb1) || 5116 ntail || 5117 skb_shinfo(skb1)->nr_frags || 5118 skb_has_frag_list(skb1)) { 5119 struct sk_buff *skb2; 5120 5121 /* Fuck, we are miserable poor guys... */ 5122 if (ntail == 0) 5123 skb2 = skb_copy(skb1, GFP_ATOMIC); 5124 else 5125 skb2 = skb_copy_expand(skb1, 5126 skb_headroom(skb1), 5127 ntail, 5128 GFP_ATOMIC); 5129 if (unlikely(skb2 == NULL)) 5130 return -ENOMEM; 5131 5132 if (skb1->sk) 5133 skb_set_owner_w(skb2, skb1->sk); 5134 5135 /* Looking around. Are we still alive? 5136 * OK, link new skb, drop old one */ 5137 5138 skb2->next = skb1->next; 5139 *skb_p = skb2; 5140 kfree_skb(skb1); 5141 skb1 = skb2; 5142 } 5143 elt++; 5144 *trailer = skb1; 5145 skb_p = &skb1->next; 5146 } 5147 5148 return elt; 5149 } 5150 EXPORT_SYMBOL_GPL(skb_cow_data); 5151 5152 static void sock_rmem_free(struct sk_buff *skb) 5153 { 5154 struct sock *sk = skb->sk; 5155 5156 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5157 } 5158 5159 static void skb_set_err_queue(struct sk_buff *skb) 5160 { 5161 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5162 * So, it is safe to (mis)use it to mark skbs on the error queue. 5163 */ 5164 skb->pkt_type = PACKET_OUTGOING; 5165 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5166 } 5167 5168 /* 5169 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5170 */ 5171 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5172 { 5173 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5174 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5175 return -ENOMEM; 5176 5177 skb_orphan(skb); 5178 skb->sk = sk; 5179 skb->destructor = sock_rmem_free; 5180 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5181 skb_set_err_queue(skb); 5182 5183 /* before exiting rcu section, make sure dst is refcounted */ 5184 skb_dst_force(skb); 5185 5186 skb_queue_tail(&sk->sk_error_queue, skb); 5187 if (!sock_flag(sk, SOCK_DEAD)) 5188 sk_error_report(sk); 5189 return 0; 5190 } 5191 EXPORT_SYMBOL(sock_queue_err_skb); 5192 5193 static bool is_icmp_err_skb(const struct sk_buff *skb) 5194 { 5195 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5196 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5197 } 5198 5199 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5200 { 5201 struct sk_buff_head *q = &sk->sk_error_queue; 5202 struct sk_buff *skb, *skb_next = NULL; 5203 bool icmp_next = false; 5204 unsigned long flags; 5205 5206 if (skb_queue_empty_lockless(q)) 5207 return NULL; 5208 5209 spin_lock_irqsave(&q->lock, flags); 5210 skb = __skb_dequeue(q); 5211 if (skb && (skb_next = skb_peek(q))) { 5212 icmp_next = is_icmp_err_skb(skb_next); 5213 if (icmp_next) 5214 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5215 } 5216 spin_unlock_irqrestore(&q->lock, flags); 5217 5218 if (is_icmp_err_skb(skb) && !icmp_next) 5219 sk->sk_err = 0; 5220 5221 if (skb_next) 5222 sk_error_report(sk); 5223 5224 return skb; 5225 } 5226 EXPORT_SYMBOL(sock_dequeue_err_skb); 5227 5228 /** 5229 * skb_clone_sk - create clone of skb, and take reference to socket 5230 * @skb: the skb to clone 5231 * 5232 * This function creates a clone of a buffer that holds a reference on 5233 * sk_refcnt. Buffers created via this function are meant to be 5234 * returned using sock_queue_err_skb, or free via kfree_skb. 5235 * 5236 * When passing buffers allocated with this function to sock_queue_err_skb 5237 * it is necessary to wrap the call with sock_hold/sock_put in order to 5238 * prevent the socket from being released prior to being enqueued on 5239 * the sk_error_queue. 5240 */ 5241 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5242 { 5243 struct sock *sk = skb->sk; 5244 struct sk_buff *clone; 5245 5246 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5247 return NULL; 5248 5249 clone = skb_clone(skb, GFP_ATOMIC); 5250 if (!clone) { 5251 sock_put(sk); 5252 return NULL; 5253 } 5254 5255 clone->sk = sk; 5256 clone->destructor = sock_efree; 5257 5258 return clone; 5259 } 5260 EXPORT_SYMBOL(skb_clone_sk); 5261 5262 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5263 struct sock *sk, 5264 int tstype, 5265 bool opt_stats) 5266 { 5267 struct sock_exterr_skb *serr; 5268 int err; 5269 5270 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5271 5272 serr = SKB_EXT_ERR(skb); 5273 memset(serr, 0, sizeof(*serr)); 5274 serr->ee.ee_errno = ENOMSG; 5275 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5276 serr->ee.ee_info = tstype; 5277 serr->opt_stats = opt_stats; 5278 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5279 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5280 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5281 if (sk_is_tcp(sk)) 5282 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5283 } 5284 5285 err = sock_queue_err_skb(sk, skb); 5286 5287 if (err) 5288 kfree_skb(skb); 5289 } 5290 5291 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5292 { 5293 bool ret; 5294 5295 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) 5296 return true; 5297 5298 read_lock_bh(&sk->sk_callback_lock); 5299 ret = sk->sk_socket && sk->sk_socket->file && 5300 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5301 read_unlock_bh(&sk->sk_callback_lock); 5302 return ret; 5303 } 5304 5305 void skb_complete_tx_timestamp(struct sk_buff *skb, 5306 struct skb_shared_hwtstamps *hwtstamps) 5307 { 5308 struct sock *sk = skb->sk; 5309 5310 if (!skb_may_tx_timestamp(sk, false)) 5311 goto err; 5312 5313 /* Take a reference to prevent skb_orphan() from freeing the socket, 5314 * but only if the socket refcount is not zero. 5315 */ 5316 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5317 *skb_hwtstamps(skb) = *hwtstamps; 5318 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5319 sock_put(sk); 5320 return; 5321 } 5322 5323 err: 5324 kfree_skb(skb); 5325 } 5326 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5327 5328 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5329 const struct sk_buff *ack_skb, 5330 struct skb_shared_hwtstamps *hwtstamps, 5331 struct sock *sk, int tstype) 5332 { 5333 struct sk_buff *skb; 5334 bool tsonly, opt_stats = false; 5335 u32 tsflags; 5336 5337 if (!sk) 5338 return; 5339 5340 tsflags = READ_ONCE(sk->sk_tsflags); 5341 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5342 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5343 return; 5344 5345 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5346 if (!skb_may_tx_timestamp(sk, tsonly)) 5347 return; 5348 5349 if (tsonly) { 5350 #ifdef CONFIG_INET 5351 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5352 sk_is_tcp(sk)) { 5353 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5354 ack_skb); 5355 opt_stats = true; 5356 } else 5357 #endif 5358 skb = alloc_skb(0, GFP_ATOMIC); 5359 } else { 5360 skb = skb_clone(orig_skb, GFP_ATOMIC); 5361 5362 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5363 kfree_skb(skb); 5364 return; 5365 } 5366 } 5367 if (!skb) 5368 return; 5369 5370 if (tsonly) { 5371 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5372 SKBTX_ANY_TSTAMP; 5373 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5374 } 5375 5376 if (hwtstamps) 5377 *skb_hwtstamps(skb) = *hwtstamps; 5378 else 5379 __net_timestamp(skb); 5380 5381 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5382 } 5383 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5384 5385 void skb_tstamp_tx(struct sk_buff *orig_skb, 5386 struct skb_shared_hwtstamps *hwtstamps) 5387 { 5388 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5389 SCM_TSTAMP_SND); 5390 } 5391 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5392 5393 #ifdef CONFIG_WIRELESS 5394 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5395 { 5396 struct sock *sk = skb->sk; 5397 struct sock_exterr_skb *serr; 5398 int err = 1; 5399 5400 skb->wifi_acked_valid = 1; 5401 skb->wifi_acked = acked; 5402 5403 serr = SKB_EXT_ERR(skb); 5404 memset(serr, 0, sizeof(*serr)); 5405 serr->ee.ee_errno = ENOMSG; 5406 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5407 5408 /* Take a reference to prevent skb_orphan() from freeing the socket, 5409 * but only if the socket refcount is not zero. 5410 */ 5411 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5412 err = sock_queue_err_skb(sk, skb); 5413 sock_put(sk); 5414 } 5415 if (err) 5416 kfree_skb(skb); 5417 } 5418 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5419 #endif /* CONFIG_WIRELESS */ 5420 5421 /** 5422 * skb_partial_csum_set - set up and verify partial csum values for packet 5423 * @skb: the skb to set 5424 * @start: the number of bytes after skb->data to start checksumming. 5425 * @off: the offset from start to place the checksum. 5426 * 5427 * For untrusted partially-checksummed packets, we need to make sure the values 5428 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5429 * 5430 * This function checks and sets those values and skb->ip_summed: if this 5431 * returns false you should drop the packet. 5432 */ 5433 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5434 { 5435 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5436 u32 csum_start = skb_headroom(skb) + (u32)start; 5437 5438 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5439 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5440 start, off, skb_headroom(skb), skb_headlen(skb)); 5441 return false; 5442 } 5443 skb->ip_summed = CHECKSUM_PARTIAL; 5444 skb->csum_start = csum_start; 5445 skb->csum_offset = off; 5446 skb->transport_header = csum_start; 5447 return true; 5448 } 5449 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5450 5451 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5452 unsigned int max) 5453 { 5454 if (skb_headlen(skb) >= len) 5455 return 0; 5456 5457 /* If we need to pullup then pullup to the max, so we 5458 * won't need to do it again. 5459 */ 5460 if (max > skb->len) 5461 max = skb->len; 5462 5463 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5464 return -ENOMEM; 5465 5466 if (skb_headlen(skb) < len) 5467 return -EPROTO; 5468 5469 return 0; 5470 } 5471 5472 #define MAX_TCP_HDR_LEN (15 * 4) 5473 5474 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5475 typeof(IPPROTO_IP) proto, 5476 unsigned int off) 5477 { 5478 int err; 5479 5480 switch (proto) { 5481 case IPPROTO_TCP: 5482 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5483 off + MAX_TCP_HDR_LEN); 5484 if (!err && !skb_partial_csum_set(skb, off, 5485 offsetof(struct tcphdr, 5486 check))) 5487 err = -EPROTO; 5488 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5489 5490 case IPPROTO_UDP: 5491 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5492 off + sizeof(struct udphdr)); 5493 if (!err && !skb_partial_csum_set(skb, off, 5494 offsetof(struct udphdr, 5495 check))) 5496 err = -EPROTO; 5497 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5498 } 5499 5500 return ERR_PTR(-EPROTO); 5501 } 5502 5503 /* This value should be large enough to cover a tagged ethernet header plus 5504 * maximally sized IP and TCP or UDP headers. 5505 */ 5506 #define MAX_IP_HDR_LEN 128 5507 5508 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5509 { 5510 unsigned int off; 5511 bool fragment; 5512 __sum16 *csum; 5513 int err; 5514 5515 fragment = false; 5516 5517 err = skb_maybe_pull_tail(skb, 5518 sizeof(struct iphdr), 5519 MAX_IP_HDR_LEN); 5520 if (err < 0) 5521 goto out; 5522 5523 if (ip_is_fragment(ip_hdr(skb))) 5524 fragment = true; 5525 5526 off = ip_hdrlen(skb); 5527 5528 err = -EPROTO; 5529 5530 if (fragment) 5531 goto out; 5532 5533 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5534 if (IS_ERR(csum)) 5535 return PTR_ERR(csum); 5536 5537 if (recalculate) 5538 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5539 ip_hdr(skb)->daddr, 5540 skb->len - off, 5541 ip_hdr(skb)->protocol, 0); 5542 err = 0; 5543 5544 out: 5545 return err; 5546 } 5547 5548 /* This value should be large enough to cover a tagged ethernet header plus 5549 * an IPv6 header, all options, and a maximal TCP or UDP header. 5550 */ 5551 #define MAX_IPV6_HDR_LEN 256 5552 5553 #define OPT_HDR(type, skb, off) \ 5554 (type *)(skb_network_header(skb) + (off)) 5555 5556 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5557 { 5558 int err; 5559 u8 nexthdr; 5560 unsigned int off; 5561 unsigned int len; 5562 bool fragment; 5563 bool done; 5564 __sum16 *csum; 5565 5566 fragment = false; 5567 done = false; 5568 5569 off = sizeof(struct ipv6hdr); 5570 5571 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5572 if (err < 0) 5573 goto out; 5574 5575 nexthdr = ipv6_hdr(skb)->nexthdr; 5576 5577 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5578 while (off <= len && !done) { 5579 switch (nexthdr) { 5580 case IPPROTO_DSTOPTS: 5581 case IPPROTO_HOPOPTS: 5582 case IPPROTO_ROUTING: { 5583 struct ipv6_opt_hdr *hp; 5584 5585 err = skb_maybe_pull_tail(skb, 5586 off + 5587 sizeof(struct ipv6_opt_hdr), 5588 MAX_IPV6_HDR_LEN); 5589 if (err < 0) 5590 goto out; 5591 5592 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5593 nexthdr = hp->nexthdr; 5594 off += ipv6_optlen(hp); 5595 break; 5596 } 5597 case IPPROTO_AH: { 5598 struct ip_auth_hdr *hp; 5599 5600 err = skb_maybe_pull_tail(skb, 5601 off + 5602 sizeof(struct ip_auth_hdr), 5603 MAX_IPV6_HDR_LEN); 5604 if (err < 0) 5605 goto out; 5606 5607 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5608 nexthdr = hp->nexthdr; 5609 off += ipv6_authlen(hp); 5610 break; 5611 } 5612 case IPPROTO_FRAGMENT: { 5613 struct frag_hdr *hp; 5614 5615 err = skb_maybe_pull_tail(skb, 5616 off + 5617 sizeof(struct frag_hdr), 5618 MAX_IPV6_HDR_LEN); 5619 if (err < 0) 5620 goto out; 5621 5622 hp = OPT_HDR(struct frag_hdr, skb, off); 5623 5624 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5625 fragment = true; 5626 5627 nexthdr = hp->nexthdr; 5628 off += sizeof(struct frag_hdr); 5629 break; 5630 } 5631 default: 5632 done = true; 5633 break; 5634 } 5635 } 5636 5637 err = -EPROTO; 5638 5639 if (!done || fragment) 5640 goto out; 5641 5642 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5643 if (IS_ERR(csum)) 5644 return PTR_ERR(csum); 5645 5646 if (recalculate) 5647 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5648 &ipv6_hdr(skb)->daddr, 5649 skb->len - off, nexthdr, 0); 5650 err = 0; 5651 5652 out: 5653 return err; 5654 } 5655 5656 /** 5657 * skb_checksum_setup - set up partial checksum offset 5658 * @skb: the skb to set up 5659 * @recalculate: if true the pseudo-header checksum will be recalculated 5660 */ 5661 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5662 { 5663 int err; 5664 5665 switch (skb->protocol) { 5666 case htons(ETH_P_IP): 5667 err = skb_checksum_setup_ipv4(skb, recalculate); 5668 break; 5669 5670 case htons(ETH_P_IPV6): 5671 err = skb_checksum_setup_ipv6(skb, recalculate); 5672 break; 5673 5674 default: 5675 err = -EPROTO; 5676 break; 5677 } 5678 5679 return err; 5680 } 5681 EXPORT_SYMBOL(skb_checksum_setup); 5682 5683 /** 5684 * skb_checksum_maybe_trim - maybe trims the given skb 5685 * @skb: the skb to check 5686 * @transport_len: the data length beyond the network header 5687 * 5688 * Checks whether the given skb has data beyond the given transport length. 5689 * If so, returns a cloned skb trimmed to this transport length. 5690 * Otherwise returns the provided skb. Returns NULL in error cases 5691 * (e.g. transport_len exceeds skb length or out-of-memory). 5692 * 5693 * Caller needs to set the skb transport header and free any returned skb if it 5694 * differs from the provided skb. 5695 */ 5696 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5697 unsigned int transport_len) 5698 { 5699 struct sk_buff *skb_chk; 5700 unsigned int len = skb_transport_offset(skb) + transport_len; 5701 int ret; 5702 5703 if (skb->len < len) 5704 return NULL; 5705 else if (skb->len == len) 5706 return skb; 5707 5708 skb_chk = skb_clone(skb, GFP_ATOMIC); 5709 if (!skb_chk) 5710 return NULL; 5711 5712 ret = pskb_trim_rcsum(skb_chk, len); 5713 if (ret) { 5714 kfree_skb(skb_chk); 5715 return NULL; 5716 } 5717 5718 return skb_chk; 5719 } 5720 5721 /** 5722 * skb_checksum_trimmed - validate checksum of an skb 5723 * @skb: the skb to check 5724 * @transport_len: the data length beyond the network header 5725 * @skb_chkf: checksum function to use 5726 * 5727 * Applies the given checksum function skb_chkf to the provided skb. 5728 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5729 * 5730 * If the skb has data beyond the given transport length, then a 5731 * trimmed & cloned skb is checked and returned. 5732 * 5733 * Caller needs to set the skb transport header and free any returned skb if it 5734 * differs from the provided skb. 5735 */ 5736 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5737 unsigned int transport_len, 5738 __sum16(*skb_chkf)(struct sk_buff *skb)) 5739 { 5740 struct sk_buff *skb_chk; 5741 unsigned int offset = skb_transport_offset(skb); 5742 __sum16 ret; 5743 5744 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5745 if (!skb_chk) 5746 goto err; 5747 5748 if (!pskb_may_pull(skb_chk, offset)) 5749 goto err; 5750 5751 skb_pull_rcsum(skb_chk, offset); 5752 ret = skb_chkf(skb_chk); 5753 skb_push_rcsum(skb_chk, offset); 5754 5755 if (ret) 5756 goto err; 5757 5758 return skb_chk; 5759 5760 err: 5761 if (skb_chk && skb_chk != skb) 5762 kfree_skb(skb_chk); 5763 5764 return NULL; 5765 5766 } 5767 EXPORT_SYMBOL(skb_checksum_trimmed); 5768 5769 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5770 { 5771 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5772 skb->dev->name); 5773 } 5774 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5775 5776 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5777 { 5778 if (head_stolen) { 5779 skb_release_head_state(skb); 5780 kmem_cache_free(skbuff_cache, skb); 5781 } else { 5782 __kfree_skb(skb); 5783 } 5784 } 5785 EXPORT_SYMBOL(kfree_skb_partial); 5786 5787 /** 5788 * skb_try_coalesce - try to merge skb to prior one 5789 * @to: prior buffer 5790 * @from: buffer to add 5791 * @fragstolen: pointer to boolean 5792 * @delta_truesize: how much more was allocated than was requested 5793 */ 5794 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5795 bool *fragstolen, int *delta_truesize) 5796 { 5797 struct skb_shared_info *to_shinfo, *from_shinfo; 5798 int i, delta, len = from->len; 5799 5800 *fragstolen = false; 5801 5802 if (skb_cloned(to)) 5803 return false; 5804 5805 /* In general, avoid mixing page_pool and non-page_pool allocated 5806 * pages within the same SKB. In theory we could take full 5807 * references if @from is cloned and !@to->pp_recycle but its 5808 * tricky (due to potential race with the clone disappearing) and 5809 * rare, so not worth dealing with. 5810 */ 5811 if (to->pp_recycle != from->pp_recycle) 5812 return false; 5813 5814 if (len <= skb_tailroom(to)) { 5815 if (len) 5816 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5817 *delta_truesize = 0; 5818 return true; 5819 } 5820 5821 to_shinfo = skb_shinfo(to); 5822 from_shinfo = skb_shinfo(from); 5823 if (to_shinfo->frag_list || from_shinfo->frag_list) 5824 return false; 5825 if (skb_zcopy(to) || skb_zcopy(from)) 5826 return false; 5827 5828 if (skb_headlen(from) != 0) { 5829 struct page *page; 5830 unsigned int offset; 5831 5832 if (to_shinfo->nr_frags + 5833 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5834 return false; 5835 5836 if (skb_head_is_locked(from)) 5837 return false; 5838 5839 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5840 5841 page = virt_to_head_page(from->head); 5842 offset = from->data - (unsigned char *)page_address(page); 5843 5844 skb_fill_page_desc(to, to_shinfo->nr_frags, 5845 page, offset, skb_headlen(from)); 5846 *fragstolen = true; 5847 } else { 5848 if (to_shinfo->nr_frags + 5849 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5850 return false; 5851 5852 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5853 } 5854 5855 WARN_ON_ONCE(delta < len); 5856 5857 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5858 from_shinfo->frags, 5859 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5860 to_shinfo->nr_frags += from_shinfo->nr_frags; 5861 5862 if (!skb_cloned(from)) 5863 from_shinfo->nr_frags = 0; 5864 5865 /* if the skb is not cloned this does nothing 5866 * since we set nr_frags to 0. 5867 */ 5868 if (skb_pp_frag_ref(from)) { 5869 for (i = 0; i < from_shinfo->nr_frags; i++) 5870 __skb_frag_ref(&from_shinfo->frags[i]); 5871 } 5872 5873 to->truesize += delta; 5874 to->len += len; 5875 to->data_len += len; 5876 5877 *delta_truesize = delta; 5878 return true; 5879 } 5880 EXPORT_SYMBOL(skb_try_coalesce); 5881 5882 /** 5883 * skb_scrub_packet - scrub an skb 5884 * 5885 * @skb: buffer to clean 5886 * @xnet: packet is crossing netns 5887 * 5888 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 5889 * into/from a tunnel. Some information have to be cleared during these 5890 * operations. 5891 * skb_scrub_packet can also be used to clean a skb before injecting it in 5892 * another namespace (@xnet == true). We have to clear all information in the 5893 * skb that could impact namespace isolation. 5894 */ 5895 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5896 { 5897 skb->pkt_type = PACKET_HOST; 5898 skb->skb_iif = 0; 5899 skb->ignore_df = 0; 5900 skb_dst_drop(skb); 5901 skb_ext_reset(skb); 5902 nf_reset_ct(skb); 5903 nf_reset_trace(skb); 5904 5905 #ifdef CONFIG_NET_SWITCHDEV 5906 skb->offload_fwd_mark = 0; 5907 skb->offload_l3_fwd_mark = 0; 5908 #endif 5909 5910 if (!xnet) 5911 return; 5912 5913 ipvs_reset(skb); 5914 skb->mark = 0; 5915 skb_clear_tstamp(skb); 5916 } 5917 EXPORT_SYMBOL_GPL(skb_scrub_packet); 5918 5919 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5920 { 5921 int mac_len, meta_len; 5922 void *meta; 5923 5924 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5925 kfree_skb(skb); 5926 return NULL; 5927 } 5928 5929 mac_len = skb->data - skb_mac_header(skb); 5930 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 5931 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 5932 mac_len - VLAN_HLEN - ETH_TLEN); 5933 } 5934 5935 meta_len = skb_metadata_len(skb); 5936 if (meta_len) { 5937 meta = skb_metadata_end(skb) - meta_len; 5938 memmove(meta + VLAN_HLEN, meta, meta_len); 5939 } 5940 5941 skb->mac_header += VLAN_HLEN; 5942 return skb; 5943 } 5944 5945 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 5946 { 5947 struct vlan_hdr *vhdr; 5948 u16 vlan_tci; 5949 5950 if (unlikely(skb_vlan_tag_present(skb))) { 5951 /* vlan_tci is already set-up so leave this for another time */ 5952 return skb; 5953 } 5954 5955 skb = skb_share_check(skb, GFP_ATOMIC); 5956 if (unlikely(!skb)) 5957 goto err_free; 5958 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 5959 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 5960 goto err_free; 5961 5962 vhdr = (struct vlan_hdr *)skb->data; 5963 vlan_tci = ntohs(vhdr->h_vlan_TCI); 5964 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 5965 5966 skb_pull_rcsum(skb, VLAN_HLEN); 5967 vlan_set_encap_proto(skb, vhdr); 5968 5969 skb = skb_reorder_vlan_header(skb); 5970 if (unlikely(!skb)) 5971 goto err_free; 5972 5973 skb_reset_network_header(skb); 5974 if (!skb_transport_header_was_set(skb)) 5975 skb_reset_transport_header(skb); 5976 skb_reset_mac_len(skb); 5977 5978 return skb; 5979 5980 err_free: 5981 kfree_skb(skb); 5982 return NULL; 5983 } 5984 EXPORT_SYMBOL(skb_vlan_untag); 5985 5986 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 5987 { 5988 if (!pskb_may_pull(skb, write_len)) 5989 return -ENOMEM; 5990 5991 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 5992 return 0; 5993 5994 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5995 } 5996 EXPORT_SYMBOL(skb_ensure_writable); 5997 5998 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 5999 { 6000 int needed_headroom = dev->needed_headroom; 6001 int needed_tailroom = dev->needed_tailroom; 6002 6003 /* For tail taggers, we need to pad short frames ourselves, to ensure 6004 * that the tail tag does not fail at its role of being at the end of 6005 * the packet, once the conduit interface pads the frame. Account for 6006 * that pad length here, and pad later. 6007 */ 6008 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6009 needed_tailroom += ETH_ZLEN - skb->len; 6010 /* skb_headroom() returns unsigned int... */ 6011 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6012 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6013 6014 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6015 /* No reallocation needed, yay! */ 6016 return 0; 6017 6018 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6019 GFP_ATOMIC); 6020 } 6021 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6022 6023 /* remove VLAN header from packet and update csum accordingly. 6024 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6025 */ 6026 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6027 { 6028 int offset = skb->data - skb_mac_header(skb); 6029 int err; 6030 6031 if (WARN_ONCE(offset, 6032 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6033 offset)) { 6034 return -EINVAL; 6035 } 6036 6037 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6038 if (unlikely(err)) 6039 return err; 6040 6041 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6042 6043 vlan_remove_tag(skb, vlan_tci); 6044 6045 skb->mac_header += VLAN_HLEN; 6046 6047 if (skb_network_offset(skb) < ETH_HLEN) 6048 skb_set_network_header(skb, ETH_HLEN); 6049 6050 skb_reset_mac_len(skb); 6051 6052 return err; 6053 } 6054 EXPORT_SYMBOL(__skb_vlan_pop); 6055 6056 /* Pop a vlan tag either from hwaccel or from payload. 6057 * Expects skb->data at mac header. 6058 */ 6059 int skb_vlan_pop(struct sk_buff *skb) 6060 { 6061 u16 vlan_tci; 6062 __be16 vlan_proto; 6063 int err; 6064 6065 if (likely(skb_vlan_tag_present(skb))) { 6066 __vlan_hwaccel_clear_tag(skb); 6067 } else { 6068 if (unlikely(!eth_type_vlan(skb->protocol))) 6069 return 0; 6070 6071 err = __skb_vlan_pop(skb, &vlan_tci); 6072 if (err) 6073 return err; 6074 } 6075 /* move next vlan tag to hw accel tag */ 6076 if (likely(!eth_type_vlan(skb->protocol))) 6077 return 0; 6078 6079 vlan_proto = skb->protocol; 6080 err = __skb_vlan_pop(skb, &vlan_tci); 6081 if (unlikely(err)) 6082 return err; 6083 6084 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6085 return 0; 6086 } 6087 EXPORT_SYMBOL(skb_vlan_pop); 6088 6089 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6090 * Expects skb->data at mac header. 6091 */ 6092 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6093 { 6094 if (skb_vlan_tag_present(skb)) { 6095 int offset = skb->data - skb_mac_header(skb); 6096 int err; 6097 6098 if (WARN_ONCE(offset, 6099 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6100 offset)) { 6101 return -EINVAL; 6102 } 6103 6104 err = __vlan_insert_tag(skb, skb->vlan_proto, 6105 skb_vlan_tag_get(skb)); 6106 if (err) 6107 return err; 6108 6109 skb->protocol = skb->vlan_proto; 6110 skb->mac_len += VLAN_HLEN; 6111 6112 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6113 } 6114 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6115 return 0; 6116 } 6117 EXPORT_SYMBOL(skb_vlan_push); 6118 6119 /** 6120 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6121 * 6122 * @skb: Socket buffer to modify 6123 * 6124 * Drop the Ethernet header of @skb. 6125 * 6126 * Expects that skb->data points to the mac header and that no VLAN tags are 6127 * present. 6128 * 6129 * Returns 0 on success, -errno otherwise. 6130 */ 6131 int skb_eth_pop(struct sk_buff *skb) 6132 { 6133 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6134 skb_network_offset(skb) < ETH_HLEN) 6135 return -EPROTO; 6136 6137 skb_pull_rcsum(skb, ETH_HLEN); 6138 skb_reset_mac_header(skb); 6139 skb_reset_mac_len(skb); 6140 6141 return 0; 6142 } 6143 EXPORT_SYMBOL(skb_eth_pop); 6144 6145 /** 6146 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6147 * 6148 * @skb: Socket buffer to modify 6149 * @dst: Destination MAC address of the new header 6150 * @src: Source MAC address of the new header 6151 * 6152 * Prepend @skb with a new Ethernet header. 6153 * 6154 * Expects that skb->data points to the mac header, which must be empty. 6155 * 6156 * Returns 0 on success, -errno otherwise. 6157 */ 6158 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6159 const unsigned char *src) 6160 { 6161 struct ethhdr *eth; 6162 int err; 6163 6164 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6165 return -EPROTO; 6166 6167 err = skb_cow_head(skb, sizeof(*eth)); 6168 if (err < 0) 6169 return err; 6170 6171 skb_push(skb, sizeof(*eth)); 6172 skb_reset_mac_header(skb); 6173 skb_reset_mac_len(skb); 6174 6175 eth = eth_hdr(skb); 6176 ether_addr_copy(eth->h_dest, dst); 6177 ether_addr_copy(eth->h_source, src); 6178 eth->h_proto = skb->protocol; 6179 6180 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6181 6182 return 0; 6183 } 6184 EXPORT_SYMBOL(skb_eth_push); 6185 6186 /* Update the ethertype of hdr and the skb csum value if required. */ 6187 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6188 __be16 ethertype) 6189 { 6190 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6191 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6192 6193 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6194 } 6195 6196 hdr->h_proto = ethertype; 6197 } 6198 6199 /** 6200 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6201 * the packet 6202 * 6203 * @skb: buffer 6204 * @mpls_lse: MPLS label stack entry to push 6205 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6206 * @mac_len: length of the MAC header 6207 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6208 * ethernet 6209 * 6210 * Expects skb->data at mac header. 6211 * 6212 * Returns 0 on success, -errno otherwise. 6213 */ 6214 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6215 int mac_len, bool ethernet) 6216 { 6217 struct mpls_shim_hdr *lse; 6218 int err; 6219 6220 if (unlikely(!eth_p_mpls(mpls_proto))) 6221 return -EINVAL; 6222 6223 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6224 if (skb->encapsulation) 6225 return -EINVAL; 6226 6227 err = skb_cow_head(skb, MPLS_HLEN); 6228 if (unlikely(err)) 6229 return err; 6230 6231 if (!skb->inner_protocol) { 6232 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6233 skb_set_inner_protocol(skb, skb->protocol); 6234 } 6235 6236 skb_push(skb, MPLS_HLEN); 6237 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6238 mac_len); 6239 skb_reset_mac_header(skb); 6240 skb_set_network_header(skb, mac_len); 6241 skb_reset_mac_len(skb); 6242 6243 lse = mpls_hdr(skb); 6244 lse->label_stack_entry = mpls_lse; 6245 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6246 6247 if (ethernet && mac_len >= ETH_HLEN) 6248 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6249 skb->protocol = mpls_proto; 6250 6251 return 0; 6252 } 6253 EXPORT_SYMBOL_GPL(skb_mpls_push); 6254 6255 /** 6256 * skb_mpls_pop() - pop the outermost MPLS header 6257 * 6258 * @skb: buffer 6259 * @next_proto: ethertype of header after popped MPLS header 6260 * @mac_len: length of the MAC header 6261 * @ethernet: flag to indicate if the packet is ethernet 6262 * 6263 * Expects skb->data at mac header. 6264 * 6265 * Returns 0 on success, -errno otherwise. 6266 */ 6267 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6268 bool ethernet) 6269 { 6270 int err; 6271 6272 if (unlikely(!eth_p_mpls(skb->protocol))) 6273 return 0; 6274 6275 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6276 if (unlikely(err)) 6277 return err; 6278 6279 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6280 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6281 mac_len); 6282 6283 __skb_pull(skb, MPLS_HLEN); 6284 skb_reset_mac_header(skb); 6285 skb_set_network_header(skb, mac_len); 6286 6287 if (ethernet && mac_len >= ETH_HLEN) { 6288 struct ethhdr *hdr; 6289 6290 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6291 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6292 skb_mod_eth_type(skb, hdr, next_proto); 6293 } 6294 skb->protocol = next_proto; 6295 6296 return 0; 6297 } 6298 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6299 6300 /** 6301 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6302 * 6303 * @skb: buffer 6304 * @mpls_lse: new MPLS label stack entry to update to 6305 * 6306 * Expects skb->data at mac header. 6307 * 6308 * Returns 0 on success, -errno otherwise. 6309 */ 6310 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6311 { 6312 int err; 6313 6314 if (unlikely(!eth_p_mpls(skb->protocol))) 6315 return -EINVAL; 6316 6317 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6318 if (unlikely(err)) 6319 return err; 6320 6321 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6322 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6323 6324 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6325 } 6326 6327 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6328 6329 return 0; 6330 } 6331 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6332 6333 /** 6334 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6335 * 6336 * @skb: buffer 6337 * 6338 * Expects skb->data at mac header. 6339 * 6340 * Returns 0 on success, -errno otherwise. 6341 */ 6342 int skb_mpls_dec_ttl(struct sk_buff *skb) 6343 { 6344 u32 lse; 6345 u8 ttl; 6346 6347 if (unlikely(!eth_p_mpls(skb->protocol))) 6348 return -EINVAL; 6349 6350 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6351 return -ENOMEM; 6352 6353 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6354 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6355 if (!--ttl) 6356 return -EINVAL; 6357 6358 lse &= ~MPLS_LS_TTL_MASK; 6359 lse |= ttl << MPLS_LS_TTL_SHIFT; 6360 6361 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6362 } 6363 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6364 6365 /** 6366 * alloc_skb_with_frags - allocate skb with page frags 6367 * 6368 * @header_len: size of linear part 6369 * @data_len: needed length in frags 6370 * @order: max page order desired. 6371 * @errcode: pointer to error code if any 6372 * @gfp_mask: allocation mask 6373 * 6374 * This can be used to allocate a paged skb, given a maximal order for frags. 6375 */ 6376 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6377 unsigned long data_len, 6378 int order, 6379 int *errcode, 6380 gfp_t gfp_mask) 6381 { 6382 unsigned long chunk; 6383 struct sk_buff *skb; 6384 struct page *page; 6385 int nr_frags = 0; 6386 6387 *errcode = -EMSGSIZE; 6388 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6389 return NULL; 6390 6391 *errcode = -ENOBUFS; 6392 skb = alloc_skb(header_len, gfp_mask); 6393 if (!skb) 6394 return NULL; 6395 6396 while (data_len) { 6397 if (nr_frags == MAX_SKB_FRAGS - 1) 6398 goto failure; 6399 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6400 order--; 6401 6402 if (order) { 6403 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6404 __GFP_COMP | 6405 __GFP_NOWARN, 6406 order); 6407 if (!page) { 6408 order--; 6409 continue; 6410 } 6411 } else { 6412 page = alloc_page(gfp_mask); 6413 if (!page) 6414 goto failure; 6415 } 6416 chunk = min_t(unsigned long, data_len, 6417 PAGE_SIZE << order); 6418 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6419 nr_frags++; 6420 skb->truesize += (PAGE_SIZE << order); 6421 data_len -= chunk; 6422 } 6423 return skb; 6424 6425 failure: 6426 kfree_skb(skb); 6427 return NULL; 6428 } 6429 EXPORT_SYMBOL(alloc_skb_with_frags); 6430 6431 /* carve out the first off bytes from skb when off < headlen */ 6432 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6433 const int headlen, gfp_t gfp_mask) 6434 { 6435 int i; 6436 unsigned int size = skb_end_offset(skb); 6437 int new_hlen = headlen - off; 6438 u8 *data; 6439 6440 if (skb_pfmemalloc(skb)) 6441 gfp_mask |= __GFP_MEMALLOC; 6442 6443 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6444 if (!data) 6445 return -ENOMEM; 6446 size = SKB_WITH_OVERHEAD(size); 6447 6448 /* Copy real data, and all frags */ 6449 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6450 skb->len -= off; 6451 6452 memcpy((struct skb_shared_info *)(data + size), 6453 skb_shinfo(skb), 6454 offsetof(struct skb_shared_info, 6455 frags[skb_shinfo(skb)->nr_frags])); 6456 if (skb_cloned(skb)) { 6457 /* drop the old head gracefully */ 6458 if (skb_orphan_frags(skb, gfp_mask)) { 6459 skb_kfree_head(data, size); 6460 return -ENOMEM; 6461 } 6462 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6463 skb_frag_ref(skb, i); 6464 if (skb_has_frag_list(skb)) 6465 skb_clone_fraglist(skb); 6466 skb_release_data(skb, SKB_CONSUMED, false); 6467 } else { 6468 /* we can reuse existing recount- all we did was 6469 * relocate values 6470 */ 6471 skb_free_head(skb, false); 6472 } 6473 6474 skb->head = data; 6475 skb->data = data; 6476 skb->head_frag = 0; 6477 skb_set_end_offset(skb, size); 6478 skb_set_tail_pointer(skb, skb_headlen(skb)); 6479 skb_headers_offset_update(skb, 0); 6480 skb->cloned = 0; 6481 skb->hdr_len = 0; 6482 skb->nohdr = 0; 6483 atomic_set(&skb_shinfo(skb)->dataref, 1); 6484 6485 return 0; 6486 } 6487 6488 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6489 6490 /* carve out the first eat bytes from skb's frag_list. May recurse into 6491 * pskb_carve() 6492 */ 6493 static int pskb_carve_frag_list(struct sk_buff *skb, 6494 struct skb_shared_info *shinfo, int eat, 6495 gfp_t gfp_mask) 6496 { 6497 struct sk_buff *list = shinfo->frag_list; 6498 struct sk_buff *clone = NULL; 6499 struct sk_buff *insp = NULL; 6500 6501 do { 6502 if (!list) { 6503 pr_err("Not enough bytes to eat. Want %d\n", eat); 6504 return -EFAULT; 6505 } 6506 if (list->len <= eat) { 6507 /* Eaten as whole. */ 6508 eat -= list->len; 6509 list = list->next; 6510 insp = list; 6511 } else { 6512 /* Eaten partially. */ 6513 if (skb_shared(list)) { 6514 clone = skb_clone(list, gfp_mask); 6515 if (!clone) 6516 return -ENOMEM; 6517 insp = list->next; 6518 list = clone; 6519 } else { 6520 /* This may be pulled without problems. */ 6521 insp = list; 6522 } 6523 if (pskb_carve(list, eat, gfp_mask) < 0) { 6524 kfree_skb(clone); 6525 return -ENOMEM; 6526 } 6527 break; 6528 } 6529 } while (eat); 6530 6531 /* Free pulled out fragments. */ 6532 while ((list = shinfo->frag_list) != insp) { 6533 shinfo->frag_list = list->next; 6534 consume_skb(list); 6535 } 6536 /* And insert new clone at head. */ 6537 if (clone) { 6538 clone->next = list; 6539 shinfo->frag_list = clone; 6540 } 6541 return 0; 6542 } 6543 6544 /* carve off first len bytes from skb. Split line (off) is in the 6545 * non-linear part of skb 6546 */ 6547 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6548 int pos, gfp_t gfp_mask) 6549 { 6550 int i, k = 0; 6551 unsigned int size = skb_end_offset(skb); 6552 u8 *data; 6553 const int nfrags = skb_shinfo(skb)->nr_frags; 6554 struct skb_shared_info *shinfo; 6555 6556 if (skb_pfmemalloc(skb)) 6557 gfp_mask |= __GFP_MEMALLOC; 6558 6559 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6560 if (!data) 6561 return -ENOMEM; 6562 size = SKB_WITH_OVERHEAD(size); 6563 6564 memcpy((struct skb_shared_info *)(data + size), 6565 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6566 if (skb_orphan_frags(skb, gfp_mask)) { 6567 skb_kfree_head(data, size); 6568 return -ENOMEM; 6569 } 6570 shinfo = (struct skb_shared_info *)(data + size); 6571 for (i = 0; i < nfrags; i++) { 6572 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6573 6574 if (pos + fsize > off) { 6575 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6576 6577 if (pos < off) { 6578 /* Split frag. 6579 * We have two variants in this case: 6580 * 1. Move all the frag to the second 6581 * part, if it is possible. F.e. 6582 * this approach is mandatory for TUX, 6583 * where splitting is expensive. 6584 * 2. Split is accurately. We make this. 6585 */ 6586 skb_frag_off_add(&shinfo->frags[0], off - pos); 6587 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6588 } 6589 skb_frag_ref(skb, i); 6590 k++; 6591 } 6592 pos += fsize; 6593 } 6594 shinfo->nr_frags = k; 6595 if (skb_has_frag_list(skb)) 6596 skb_clone_fraglist(skb); 6597 6598 /* split line is in frag list */ 6599 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6600 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6601 if (skb_has_frag_list(skb)) 6602 kfree_skb_list(skb_shinfo(skb)->frag_list); 6603 skb_kfree_head(data, size); 6604 return -ENOMEM; 6605 } 6606 skb_release_data(skb, SKB_CONSUMED, false); 6607 6608 skb->head = data; 6609 skb->head_frag = 0; 6610 skb->data = data; 6611 skb_set_end_offset(skb, size); 6612 skb_reset_tail_pointer(skb); 6613 skb_headers_offset_update(skb, 0); 6614 skb->cloned = 0; 6615 skb->hdr_len = 0; 6616 skb->nohdr = 0; 6617 skb->len -= off; 6618 skb->data_len = skb->len; 6619 atomic_set(&skb_shinfo(skb)->dataref, 1); 6620 return 0; 6621 } 6622 6623 /* remove len bytes from the beginning of the skb */ 6624 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6625 { 6626 int headlen = skb_headlen(skb); 6627 6628 if (len < headlen) 6629 return pskb_carve_inside_header(skb, len, headlen, gfp); 6630 else 6631 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6632 } 6633 6634 /* Extract to_copy bytes starting at off from skb, and return this in 6635 * a new skb 6636 */ 6637 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6638 int to_copy, gfp_t gfp) 6639 { 6640 struct sk_buff *clone = skb_clone(skb, gfp); 6641 6642 if (!clone) 6643 return NULL; 6644 6645 if (pskb_carve(clone, off, gfp) < 0 || 6646 pskb_trim(clone, to_copy)) { 6647 kfree_skb(clone); 6648 return NULL; 6649 } 6650 return clone; 6651 } 6652 EXPORT_SYMBOL(pskb_extract); 6653 6654 /** 6655 * skb_condense - try to get rid of fragments/frag_list if possible 6656 * @skb: buffer 6657 * 6658 * Can be used to save memory before skb is added to a busy queue. 6659 * If packet has bytes in frags and enough tail room in skb->head, 6660 * pull all of them, so that we can free the frags right now and adjust 6661 * truesize. 6662 * Notes: 6663 * We do not reallocate skb->head thus can not fail. 6664 * Caller must re-evaluate skb->truesize if needed. 6665 */ 6666 void skb_condense(struct sk_buff *skb) 6667 { 6668 if (skb->data_len) { 6669 if (skb->data_len > skb->end - skb->tail || 6670 skb_cloned(skb)) 6671 return; 6672 6673 /* Nice, we can free page frag(s) right now */ 6674 __pskb_pull_tail(skb, skb->data_len); 6675 } 6676 /* At this point, skb->truesize might be over estimated, 6677 * because skb had a fragment, and fragments do not tell 6678 * their truesize. 6679 * When we pulled its content into skb->head, fragment 6680 * was freed, but __pskb_pull_tail() could not possibly 6681 * adjust skb->truesize, not knowing the frag truesize. 6682 */ 6683 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6684 } 6685 EXPORT_SYMBOL(skb_condense); 6686 6687 #ifdef CONFIG_SKB_EXTENSIONS 6688 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6689 { 6690 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6691 } 6692 6693 /** 6694 * __skb_ext_alloc - allocate a new skb extensions storage 6695 * 6696 * @flags: See kmalloc(). 6697 * 6698 * Returns the newly allocated pointer. The pointer can later attached to a 6699 * skb via __skb_ext_set(). 6700 * Note: caller must handle the skb_ext as an opaque data. 6701 */ 6702 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6703 { 6704 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6705 6706 if (new) { 6707 memset(new->offset, 0, sizeof(new->offset)); 6708 refcount_set(&new->refcnt, 1); 6709 } 6710 6711 return new; 6712 } 6713 6714 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6715 unsigned int old_active) 6716 { 6717 struct skb_ext *new; 6718 6719 if (refcount_read(&old->refcnt) == 1) 6720 return old; 6721 6722 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6723 if (!new) 6724 return NULL; 6725 6726 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6727 refcount_set(&new->refcnt, 1); 6728 6729 #ifdef CONFIG_XFRM 6730 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6731 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6732 unsigned int i; 6733 6734 for (i = 0; i < sp->len; i++) 6735 xfrm_state_hold(sp->xvec[i]); 6736 } 6737 #endif 6738 __skb_ext_put(old); 6739 return new; 6740 } 6741 6742 /** 6743 * __skb_ext_set - attach the specified extension storage to this skb 6744 * @skb: buffer 6745 * @id: extension id 6746 * @ext: extension storage previously allocated via __skb_ext_alloc() 6747 * 6748 * Existing extensions, if any, are cleared. 6749 * 6750 * Returns the pointer to the extension. 6751 */ 6752 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6753 struct skb_ext *ext) 6754 { 6755 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6756 6757 skb_ext_put(skb); 6758 newlen = newoff + skb_ext_type_len[id]; 6759 ext->chunks = newlen; 6760 ext->offset[id] = newoff; 6761 skb->extensions = ext; 6762 skb->active_extensions = 1 << id; 6763 return skb_ext_get_ptr(ext, id); 6764 } 6765 6766 /** 6767 * skb_ext_add - allocate space for given extension, COW if needed 6768 * @skb: buffer 6769 * @id: extension to allocate space for 6770 * 6771 * Allocates enough space for the given extension. 6772 * If the extension is already present, a pointer to that extension 6773 * is returned. 6774 * 6775 * If the skb was cloned, COW applies and the returned memory can be 6776 * modified without changing the extension space of clones buffers. 6777 * 6778 * Returns pointer to the extension or NULL on allocation failure. 6779 */ 6780 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6781 { 6782 struct skb_ext *new, *old = NULL; 6783 unsigned int newlen, newoff; 6784 6785 if (skb->active_extensions) { 6786 old = skb->extensions; 6787 6788 new = skb_ext_maybe_cow(old, skb->active_extensions); 6789 if (!new) 6790 return NULL; 6791 6792 if (__skb_ext_exist(new, id)) 6793 goto set_active; 6794 6795 newoff = new->chunks; 6796 } else { 6797 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6798 6799 new = __skb_ext_alloc(GFP_ATOMIC); 6800 if (!new) 6801 return NULL; 6802 } 6803 6804 newlen = newoff + skb_ext_type_len[id]; 6805 new->chunks = newlen; 6806 new->offset[id] = newoff; 6807 set_active: 6808 skb->slow_gro = 1; 6809 skb->extensions = new; 6810 skb->active_extensions |= 1 << id; 6811 return skb_ext_get_ptr(new, id); 6812 } 6813 EXPORT_SYMBOL(skb_ext_add); 6814 6815 #ifdef CONFIG_XFRM 6816 static void skb_ext_put_sp(struct sec_path *sp) 6817 { 6818 unsigned int i; 6819 6820 for (i = 0; i < sp->len; i++) 6821 xfrm_state_put(sp->xvec[i]); 6822 } 6823 #endif 6824 6825 #ifdef CONFIG_MCTP_FLOWS 6826 static void skb_ext_put_mctp(struct mctp_flow *flow) 6827 { 6828 if (flow->key) 6829 mctp_key_unref(flow->key); 6830 } 6831 #endif 6832 6833 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6834 { 6835 struct skb_ext *ext = skb->extensions; 6836 6837 skb->active_extensions &= ~(1 << id); 6838 if (skb->active_extensions == 0) { 6839 skb->extensions = NULL; 6840 __skb_ext_put(ext); 6841 #ifdef CONFIG_XFRM 6842 } else if (id == SKB_EXT_SEC_PATH && 6843 refcount_read(&ext->refcnt) == 1) { 6844 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6845 6846 skb_ext_put_sp(sp); 6847 sp->len = 0; 6848 #endif 6849 } 6850 } 6851 EXPORT_SYMBOL(__skb_ext_del); 6852 6853 void __skb_ext_put(struct skb_ext *ext) 6854 { 6855 /* If this is last clone, nothing can increment 6856 * it after check passes. Avoids one atomic op. 6857 */ 6858 if (refcount_read(&ext->refcnt) == 1) 6859 goto free_now; 6860 6861 if (!refcount_dec_and_test(&ext->refcnt)) 6862 return; 6863 free_now: 6864 #ifdef CONFIG_XFRM 6865 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6866 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6867 #endif 6868 #ifdef CONFIG_MCTP_FLOWS 6869 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 6870 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 6871 #endif 6872 6873 kmem_cache_free(skbuff_ext_cache, ext); 6874 } 6875 EXPORT_SYMBOL(__skb_ext_put); 6876 #endif /* CONFIG_SKB_EXTENSIONS */ 6877 6878 /** 6879 * skb_attempt_defer_free - queue skb for remote freeing 6880 * @skb: buffer 6881 * 6882 * Put @skb in a per-cpu list, using the cpu which 6883 * allocated the skb/pages to reduce false sharing 6884 * and memory zone spinlock contention. 6885 */ 6886 void skb_attempt_defer_free(struct sk_buff *skb) 6887 { 6888 int cpu = skb->alloc_cpu; 6889 struct softnet_data *sd; 6890 unsigned int defer_max; 6891 bool kick; 6892 6893 if (WARN_ON_ONCE(cpu >= nr_cpu_ids) || 6894 !cpu_online(cpu) || 6895 cpu == raw_smp_processor_id()) { 6896 nodefer: __kfree_skb(skb); 6897 return; 6898 } 6899 6900 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 6901 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 6902 6903 sd = &per_cpu(softnet_data, cpu); 6904 defer_max = READ_ONCE(sysctl_skb_defer_max); 6905 if (READ_ONCE(sd->defer_count) >= defer_max) 6906 goto nodefer; 6907 6908 spin_lock_bh(&sd->defer_lock); 6909 /* Send an IPI every time queue reaches half capacity. */ 6910 kick = sd->defer_count == (defer_max >> 1); 6911 /* Paired with the READ_ONCE() few lines above */ 6912 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 6913 6914 skb->next = sd->defer_list; 6915 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 6916 WRITE_ONCE(sd->defer_list, skb); 6917 spin_unlock_bh(&sd->defer_lock); 6918 6919 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 6920 * if we are unlucky enough (this seems very unlikely). 6921 */ 6922 if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) 6923 smp_call_function_single_async(cpu, &sd->defer_csd); 6924 } 6925 6926 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 6927 size_t offset, size_t len) 6928 { 6929 const char *kaddr; 6930 __wsum csum; 6931 6932 kaddr = kmap_local_page(page); 6933 csum = csum_partial(kaddr + offset, len, 0); 6934 kunmap_local(kaddr); 6935 skb->csum = csum_block_add(skb->csum, csum, skb->len); 6936 } 6937 6938 /** 6939 * skb_splice_from_iter - Splice (or copy) pages to skbuff 6940 * @skb: The buffer to add pages to 6941 * @iter: Iterator representing the pages to be added 6942 * @maxsize: Maximum amount of pages to be added 6943 * @gfp: Allocation flags 6944 * 6945 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 6946 * extracts pages from an iterator and adds them to the socket buffer if 6947 * possible, copying them to fragments if not possible (such as if they're slab 6948 * pages). 6949 * 6950 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 6951 * insufficient space in the buffer to transfer anything. 6952 */ 6953 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 6954 ssize_t maxsize, gfp_t gfp) 6955 { 6956 size_t frag_limit = READ_ONCE(sysctl_max_skb_frags); 6957 struct page *pages[8], **ppages = pages; 6958 ssize_t spliced = 0, ret = 0; 6959 unsigned int i; 6960 6961 while (iter->count > 0) { 6962 ssize_t space, nr, len; 6963 size_t off; 6964 6965 ret = -EMSGSIZE; 6966 space = frag_limit - skb_shinfo(skb)->nr_frags; 6967 if (space < 0) 6968 break; 6969 6970 /* We might be able to coalesce without increasing nr_frags */ 6971 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 6972 6973 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 6974 if (len <= 0) { 6975 ret = len ?: -EIO; 6976 break; 6977 } 6978 6979 i = 0; 6980 do { 6981 struct page *page = pages[i++]; 6982 size_t part = min_t(size_t, PAGE_SIZE - off, len); 6983 6984 ret = -EIO; 6985 if (WARN_ON_ONCE(!sendpage_ok(page))) 6986 goto out; 6987 6988 ret = skb_append_pagefrags(skb, page, off, part, 6989 frag_limit); 6990 if (ret < 0) { 6991 iov_iter_revert(iter, len); 6992 goto out; 6993 } 6994 6995 if (skb->ip_summed == CHECKSUM_NONE) 6996 skb_splice_csum_page(skb, page, off, part); 6997 6998 off = 0; 6999 spliced += part; 7000 maxsize -= part; 7001 len -= part; 7002 } while (len > 0); 7003 7004 if (maxsize <= 0) 7005 break; 7006 } 7007 7008 out: 7009 skb_len_add(skb, spliced); 7010 return spliced ?: ret; 7011 } 7012 EXPORT_SYMBOL(skb_splice_from_iter); 7013 7014 static __always_inline 7015 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7016 size_t len, void *to, void *priv2) 7017 { 7018 __wsum *csum = priv2; 7019 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7020 7021 *csum = csum_block_add(*csum, next, progress); 7022 return 0; 7023 } 7024 7025 static __always_inline 7026 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7027 size_t len, void *to, void *priv2) 7028 { 7029 __wsum next, *csum = priv2; 7030 7031 next = csum_and_copy_from_user(iter_from, to + progress, len); 7032 *csum = csum_block_add(*csum, next, progress); 7033 return next ? 0 : len; 7034 } 7035 7036 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7037 __wsum *csum, struct iov_iter *i) 7038 { 7039 size_t copied; 7040 7041 if (WARN_ON_ONCE(!i->data_source)) 7042 return false; 7043 copied = iterate_and_advance2(i, bytes, addr, csum, 7044 copy_from_user_iter_csum, 7045 memcpy_from_iter_csum); 7046 if (likely(copied == bytes)) 7047 return true; 7048 iov_iter_revert(i, copied); 7049 return false; 7050 } 7051 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7052