1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/skbuff_ref.h> 55 #include <linux/splice.h> 56 #include <linux/cache.h> 57 #include <linux/rtnetlink.h> 58 #include <linux/init.h> 59 #include <linux/scatterlist.h> 60 #include <linux/errqueue.h> 61 #include <linux/prefetch.h> 62 #include <linux/bitfield.h> 63 #include <linux/if_vlan.h> 64 #include <linux/mpls.h> 65 #include <linux/kcov.h> 66 #include <linux/iov_iter.h> 67 68 #include <net/protocol.h> 69 #include <net/dst.h> 70 #include <net/sock.h> 71 #include <net/checksum.h> 72 #include <net/gso.h> 73 #include <net/hotdata.h> 74 #include <net/ip6_checksum.h> 75 #include <net/xfrm.h> 76 #include <net/mpls.h> 77 #include <net/mptcp.h> 78 #include <net/mctp.h> 79 #include <net/page_pool/helpers.h> 80 #include <net/dropreason.h> 81 82 #include <linux/uaccess.h> 83 #include <trace/events/skb.h> 84 #include <linux/highmem.h> 85 #include <linux/capability.h> 86 #include <linux/user_namespace.h> 87 #include <linux/indirect_call_wrapper.h> 88 #include <linux/textsearch.h> 89 90 #include "dev.h" 91 #include "netmem_priv.h" 92 #include "sock_destructor.h" 93 94 #ifdef CONFIG_SKB_EXTENSIONS 95 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 96 #endif 97 98 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) 99 100 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 101 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 102 * size, and we can differentiate heads from skb_small_head_cache 103 * vs system slabs by looking at their size (skb_end_offset()). 104 */ 105 #define SKB_SMALL_HEAD_CACHE_SIZE \ 106 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 107 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 108 SKB_SMALL_HEAD_SIZE) 109 110 #define SKB_SMALL_HEAD_HEADROOM \ 111 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 112 113 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use 114 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the 115 * netmem is a page. 116 */ 117 static_assert(offsetof(struct bio_vec, bv_page) == 118 offsetof(skb_frag_t, netmem)); 119 static_assert(sizeof_field(struct bio_vec, bv_page) == 120 sizeof_field(skb_frag_t, netmem)); 121 122 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); 123 static_assert(sizeof_field(struct bio_vec, bv_len) == 124 sizeof_field(skb_frag_t, len)); 125 126 static_assert(offsetof(struct bio_vec, bv_offset) == 127 offsetof(skb_frag_t, offset)); 128 static_assert(sizeof_field(struct bio_vec, bv_offset) == 129 sizeof_field(skb_frag_t, offset)); 130 131 #undef FN 132 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 133 static const char * const drop_reasons[] = { 134 [SKB_CONSUMED] = "CONSUMED", 135 DEFINE_DROP_REASON(FN, FN) 136 }; 137 138 static const struct drop_reason_list drop_reasons_core = { 139 .reasons = drop_reasons, 140 .n_reasons = ARRAY_SIZE(drop_reasons), 141 }; 142 143 const struct drop_reason_list __rcu * 144 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 145 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 146 }; 147 EXPORT_SYMBOL(drop_reasons_by_subsys); 148 149 /** 150 * drop_reasons_register_subsys - register another drop reason subsystem 151 * @subsys: the subsystem to register, must not be the core 152 * @list: the list of drop reasons within the subsystem, must point to 153 * a statically initialized list 154 */ 155 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 156 const struct drop_reason_list *list) 157 { 158 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 159 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 160 "invalid subsystem %d\n", subsys)) 161 return; 162 163 /* must point to statically allocated memory, so INIT is OK */ 164 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 165 } 166 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 167 168 /** 169 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 170 * @subsys: the subsystem to remove, must not be the core 171 * 172 * Note: This will synchronize_rcu() to ensure no users when it returns. 173 */ 174 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 175 { 176 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 177 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 178 "invalid subsystem %d\n", subsys)) 179 return; 180 181 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 182 183 synchronize_rcu(); 184 } 185 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 186 187 /** 188 * skb_panic - private function for out-of-line support 189 * @skb: buffer 190 * @sz: size 191 * @addr: address 192 * @msg: skb_over_panic or skb_under_panic 193 * 194 * Out-of-line support for skb_put() and skb_push(). 195 * Called via the wrapper skb_over_panic() or skb_under_panic(). 196 * Keep out of line to prevent kernel bloat. 197 * __builtin_return_address is not used because it is not always reliable. 198 */ 199 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 200 const char msg[]) 201 { 202 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 203 msg, addr, skb->len, sz, skb->head, skb->data, 204 (unsigned long)skb->tail, (unsigned long)skb->end, 205 skb->dev ? skb->dev->name : "<NULL>"); 206 BUG(); 207 } 208 209 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 210 { 211 skb_panic(skb, sz, addr, __func__); 212 } 213 214 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 215 { 216 skb_panic(skb, sz, addr, __func__); 217 } 218 219 #define NAPI_SKB_CACHE_SIZE 64 220 #define NAPI_SKB_CACHE_BULK 16 221 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 222 223 #if PAGE_SIZE == SZ_4K 224 225 #define NAPI_HAS_SMALL_PAGE_FRAG 1 226 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) 227 228 /* specialized page frag allocator using a single order 0 page 229 * and slicing it into 1K sized fragment. Constrained to systems 230 * with a very limited amount of 1K fragments fitting a single 231 * page - to avoid excessive truesize underestimation 232 */ 233 234 struct page_frag_1k { 235 void *va; 236 u16 offset; 237 bool pfmemalloc; 238 }; 239 240 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) 241 { 242 struct page *page; 243 int offset; 244 245 offset = nc->offset - SZ_1K; 246 if (likely(offset >= 0)) 247 goto use_frag; 248 249 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 250 if (!page) 251 return NULL; 252 253 nc->va = page_address(page); 254 nc->pfmemalloc = page_is_pfmemalloc(page); 255 offset = PAGE_SIZE - SZ_1K; 256 page_ref_add(page, offset / SZ_1K); 257 258 use_frag: 259 nc->offset = offset; 260 return nc->va + offset; 261 } 262 #else 263 264 /* the small page is actually unused in this build; add dummy helpers 265 * to please the compiler and avoid later preprocessor's conditionals 266 */ 267 #define NAPI_HAS_SMALL_PAGE_FRAG 0 268 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false 269 270 struct page_frag_1k { 271 }; 272 273 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) 274 { 275 return NULL; 276 } 277 278 #endif 279 280 struct napi_alloc_cache { 281 local_lock_t bh_lock; 282 struct page_frag_cache page; 283 struct page_frag_1k page_small; 284 unsigned int skb_count; 285 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 286 }; 287 288 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 289 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = { 290 .bh_lock = INIT_LOCAL_LOCK(bh_lock), 291 }; 292 293 /* Double check that napi_get_frags() allocates skbs with 294 * skb->head being backed by slab, not a page fragment. 295 * This is to make sure bug fixed in 3226b158e67c 296 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 297 * does not accidentally come back. 298 */ 299 void napi_get_frags_check(struct napi_struct *napi) 300 { 301 struct sk_buff *skb; 302 303 local_bh_disable(); 304 skb = napi_get_frags(napi); 305 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); 306 napi_free_frags(napi); 307 local_bh_enable(); 308 } 309 310 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 311 { 312 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 313 void *data; 314 315 fragsz = SKB_DATA_ALIGN(fragsz); 316 317 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 318 data = __page_frag_alloc_align(&nc->page, fragsz, 319 GFP_ATOMIC | __GFP_NOWARN, align_mask); 320 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 321 return data; 322 323 } 324 EXPORT_SYMBOL(__napi_alloc_frag_align); 325 326 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 327 { 328 void *data; 329 330 if (in_hardirq() || irqs_disabled()) { 331 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 332 333 fragsz = SKB_DATA_ALIGN(fragsz); 334 data = __page_frag_alloc_align(nc, fragsz, 335 GFP_ATOMIC | __GFP_NOWARN, 336 align_mask); 337 } else { 338 local_bh_disable(); 339 data = __napi_alloc_frag_align(fragsz, align_mask); 340 local_bh_enable(); 341 } 342 return data; 343 } 344 EXPORT_SYMBOL(__netdev_alloc_frag_align); 345 346 static struct sk_buff *napi_skb_cache_get(void) 347 { 348 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 349 struct sk_buff *skb; 350 351 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 352 if (unlikely(!nc->skb_count)) { 353 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 354 GFP_ATOMIC | __GFP_NOWARN, 355 NAPI_SKB_CACHE_BULK, 356 nc->skb_cache); 357 if (unlikely(!nc->skb_count)) { 358 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 359 return NULL; 360 } 361 } 362 363 skb = nc->skb_cache[--nc->skb_count]; 364 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 365 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); 366 367 return skb; 368 } 369 370 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 371 unsigned int size) 372 { 373 struct skb_shared_info *shinfo; 374 375 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 376 377 /* Assumes caller memset cleared SKB */ 378 skb->truesize = SKB_TRUESIZE(size); 379 refcount_set(&skb->users, 1); 380 skb->head = data; 381 skb->data = data; 382 skb_reset_tail_pointer(skb); 383 skb_set_end_offset(skb, size); 384 skb->mac_header = (typeof(skb->mac_header))~0U; 385 skb->transport_header = (typeof(skb->transport_header))~0U; 386 skb->alloc_cpu = raw_smp_processor_id(); 387 /* make sure we initialize shinfo sequentially */ 388 shinfo = skb_shinfo(skb); 389 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 390 atomic_set(&shinfo->dataref, 1); 391 392 skb_set_kcov_handle(skb, kcov_common_handle()); 393 } 394 395 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 396 unsigned int *size) 397 { 398 void *resized; 399 400 /* Must find the allocation size (and grow it to match). */ 401 *size = ksize(data); 402 /* krealloc() will immediately return "data" when 403 * "ksize(data)" is requested: it is the existing upper 404 * bounds. As a result, GFP_ATOMIC will be ignored. Note 405 * that this "new" pointer needs to be passed back to the 406 * caller for use so the __alloc_size hinting will be 407 * tracked correctly. 408 */ 409 resized = krealloc(data, *size, GFP_ATOMIC); 410 WARN_ON_ONCE(resized != data); 411 return resized; 412 } 413 414 /* build_skb() variant which can operate on slab buffers. 415 * Note that this should be used sparingly as slab buffers 416 * cannot be combined efficiently by GRO! 417 */ 418 struct sk_buff *slab_build_skb(void *data) 419 { 420 struct sk_buff *skb; 421 unsigned int size; 422 423 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, 424 GFP_ATOMIC | __GFP_NOWARN); 425 if (unlikely(!skb)) 426 return NULL; 427 428 memset(skb, 0, offsetof(struct sk_buff, tail)); 429 data = __slab_build_skb(skb, data, &size); 430 __finalize_skb_around(skb, data, size); 431 432 return skb; 433 } 434 EXPORT_SYMBOL(slab_build_skb); 435 436 /* Caller must provide SKB that is memset cleared */ 437 static void __build_skb_around(struct sk_buff *skb, void *data, 438 unsigned int frag_size) 439 { 440 unsigned int size = frag_size; 441 442 /* frag_size == 0 is considered deprecated now. Callers 443 * using slab buffer should use slab_build_skb() instead. 444 */ 445 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 446 data = __slab_build_skb(skb, data, &size); 447 448 __finalize_skb_around(skb, data, size); 449 } 450 451 /** 452 * __build_skb - build a network buffer 453 * @data: data buffer provided by caller 454 * @frag_size: size of data (must not be 0) 455 * 456 * Allocate a new &sk_buff. Caller provides space holding head and 457 * skb_shared_info. @data must have been allocated from the page 458 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 459 * allocation is deprecated, and callers should use slab_build_skb() 460 * instead.) 461 * The return is the new skb buffer. 462 * On a failure the return is %NULL, and @data is not freed. 463 * Notes : 464 * Before IO, driver allocates only data buffer where NIC put incoming frame 465 * Driver should add room at head (NET_SKB_PAD) and 466 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 467 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 468 * before giving packet to stack. 469 * RX rings only contains data buffers, not full skbs. 470 */ 471 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 472 { 473 struct sk_buff *skb; 474 475 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, 476 GFP_ATOMIC | __GFP_NOWARN); 477 if (unlikely(!skb)) 478 return NULL; 479 480 memset(skb, 0, offsetof(struct sk_buff, tail)); 481 __build_skb_around(skb, data, frag_size); 482 483 return skb; 484 } 485 486 /* build_skb() is wrapper over __build_skb(), that specifically 487 * takes care of skb->head and skb->pfmemalloc 488 */ 489 struct sk_buff *build_skb(void *data, unsigned int frag_size) 490 { 491 struct sk_buff *skb = __build_skb(data, frag_size); 492 493 if (likely(skb && frag_size)) { 494 skb->head_frag = 1; 495 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 496 } 497 return skb; 498 } 499 EXPORT_SYMBOL(build_skb); 500 501 /** 502 * build_skb_around - build a network buffer around provided skb 503 * @skb: sk_buff provide by caller, must be memset cleared 504 * @data: data buffer provided by caller 505 * @frag_size: size of data 506 */ 507 struct sk_buff *build_skb_around(struct sk_buff *skb, 508 void *data, unsigned int frag_size) 509 { 510 if (unlikely(!skb)) 511 return NULL; 512 513 __build_skb_around(skb, data, frag_size); 514 515 if (frag_size) { 516 skb->head_frag = 1; 517 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 518 } 519 return skb; 520 } 521 EXPORT_SYMBOL(build_skb_around); 522 523 /** 524 * __napi_build_skb - build a network buffer 525 * @data: data buffer provided by caller 526 * @frag_size: size of data 527 * 528 * Version of __build_skb() that uses NAPI percpu caches to obtain 529 * skbuff_head instead of inplace allocation. 530 * 531 * Returns a new &sk_buff on success, %NULL on allocation failure. 532 */ 533 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 534 { 535 struct sk_buff *skb; 536 537 skb = napi_skb_cache_get(); 538 if (unlikely(!skb)) 539 return NULL; 540 541 memset(skb, 0, offsetof(struct sk_buff, tail)); 542 __build_skb_around(skb, data, frag_size); 543 544 return skb; 545 } 546 547 /** 548 * napi_build_skb - build a network buffer 549 * @data: data buffer provided by caller 550 * @frag_size: size of data 551 * 552 * Version of __napi_build_skb() that takes care of skb->head_frag 553 * and skb->pfmemalloc when the data is a page or page fragment. 554 * 555 * Returns a new &sk_buff on success, %NULL on allocation failure. 556 */ 557 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 558 { 559 struct sk_buff *skb = __napi_build_skb(data, frag_size); 560 561 if (likely(skb) && frag_size) { 562 skb->head_frag = 1; 563 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 564 } 565 566 return skb; 567 } 568 EXPORT_SYMBOL(napi_build_skb); 569 570 /* 571 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 572 * the caller if emergency pfmemalloc reserves are being used. If it is and 573 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 574 * may be used. Otherwise, the packet data may be discarded until enough 575 * memory is free 576 */ 577 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 578 bool *pfmemalloc) 579 { 580 bool ret_pfmemalloc = false; 581 size_t obj_size; 582 void *obj; 583 584 obj_size = SKB_HEAD_ALIGN(*size); 585 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 586 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 587 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, 588 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 589 node); 590 *size = SKB_SMALL_HEAD_CACHE_SIZE; 591 if (obj || !(gfp_pfmemalloc_allowed(flags))) 592 goto out; 593 /* Try again but now we are using pfmemalloc reserves */ 594 ret_pfmemalloc = true; 595 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); 596 goto out; 597 } 598 599 obj_size = kmalloc_size_roundup(obj_size); 600 /* The following cast might truncate high-order bits of obj_size, this 601 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 602 */ 603 *size = (unsigned int)obj_size; 604 605 /* 606 * Try a regular allocation, when that fails and we're not entitled 607 * to the reserves, fail. 608 */ 609 obj = kmalloc_node_track_caller(obj_size, 610 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 611 node); 612 if (obj || !(gfp_pfmemalloc_allowed(flags))) 613 goto out; 614 615 /* Try again but now we are using pfmemalloc reserves */ 616 ret_pfmemalloc = true; 617 obj = kmalloc_node_track_caller(obj_size, flags, node); 618 619 out: 620 if (pfmemalloc) 621 *pfmemalloc = ret_pfmemalloc; 622 623 return obj; 624 } 625 626 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 627 * 'private' fields and also do memory statistics to find all the 628 * [BEEP] leaks. 629 * 630 */ 631 632 /** 633 * __alloc_skb - allocate a network buffer 634 * @size: size to allocate 635 * @gfp_mask: allocation mask 636 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 637 * instead of head cache and allocate a cloned (child) skb. 638 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 639 * allocations in case the data is required for writeback 640 * @node: numa node to allocate memory on 641 * 642 * Allocate a new &sk_buff. The returned buffer has no headroom and a 643 * tail room of at least size bytes. The object has a reference count 644 * of one. The return is the buffer. On a failure the return is %NULL. 645 * 646 * Buffers may only be allocated from interrupts using a @gfp_mask of 647 * %GFP_ATOMIC. 648 */ 649 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 650 int flags, int node) 651 { 652 struct kmem_cache *cache; 653 struct sk_buff *skb; 654 bool pfmemalloc; 655 u8 *data; 656 657 cache = (flags & SKB_ALLOC_FCLONE) 658 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; 659 660 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 661 gfp_mask |= __GFP_MEMALLOC; 662 663 /* Get the HEAD */ 664 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 665 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 666 skb = napi_skb_cache_get(); 667 else 668 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 669 if (unlikely(!skb)) 670 return NULL; 671 prefetchw(skb); 672 673 /* We do our best to align skb_shared_info on a separate cache 674 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 675 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 676 * Both skb->head and skb_shared_info are cache line aligned. 677 */ 678 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 679 if (unlikely(!data)) 680 goto nodata; 681 /* kmalloc_size_roundup() might give us more room than requested. 682 * Put skb_shared_info exactly at the end of allocated zone, 683 * to allow max possible filling before reallocation. 684 */ 685 prefetchw(data + SKB_WITH_OVERHEAD(size)); 686 687 /* 688 * Only clear those fields we need to clear, not those that we will 689 * actually initialise below. Hence, don't put any more fields after 690 * the tail pointer in struct sk_buff! 691 */ 692 memset(skb, 0, offsetof(struct sk_buff, tail)); 693 __build_skb_around(skb, data, size); 694 skb->pfmemalloc = pfmemalloc; 695 696 if (flags & SKB_ALLOC_FCLONE) { 697 struct sk_buff_fclones *fclones; 698 699 fclones = container_of(skb, struct sk_buff_fclones, skb1); 700 701 skb->fclone = SKB_FCLONE_ORIG; 702 refcount_set(&fclones->fclone_ref, 1); 703 } 704 705 return skb; 706 707 nodata: 708 kmem_cache_free(cache, skb); 709 return NULL; 710 } 711 EXPORT_SYMBOL(__alloc_skb); 712 713 /** 714 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 715 * @dev: network device to receive on 716 * @len: length to allocate 717 * @gfp_mask: get_free_pages mask, passed to alloc_skb 718 * 719 * Allocate a new &sk_buff and assign it a usage count of one. The 720 * buffer has NET_SKB_PAD headroom built in. Users should allocate 721 * the headroom they think they need without accounting for the 722 * built in space. The built in space is used for optimisations. 723 * 724 * %NULL is returned if there is no free memory. 725 */ 726 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 727 gfp_t gfp_mask) 728 { 729 struct page_frag_cache *nc; 730 struct sk_buff *skb; 731 bool pfmemalloc; 732 void *data; 733 734 len += NET_SKB_PAD; 735 736 /* If requested length is either too small or too big, 737 * we use kmalloc() for skb->head allocation. 738 */ 739 if (len <= SKB_WITH_OVERHEAD(1024) || 740 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 741 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 742 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 743 if (!skb) 744 goto skb_fail; 745 goto skb_success; 746 } 747 748 len = SKB_HEAD_ALIGN(len); 749 750 if (sk_memalloc_socks()) 751 gfp_mask |= __GFP_MEMALLOC; 752 753 if (in_hardirq() || irqs_disabled()) { 754 nc = this_cpu_ptr(&netdev_alloc_cache); 755 data = page_frag_alloc(nc, len, gfp_mask); 756 pfmemalloc = nc->pfmemalloc; 757 } else { 758 local_bh_disable(); 759 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 760 761 nc = this_cpu_ptr(&napi_alloc_cache.page); 762 data = page_frag_alloc(nc, len, gfp_mask); 763 pfmemalloc = nc->pfmemalloc; 764 765 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 766 local_bh_enable(); 767 } 768 769 if (unlikely(!data)) 770 return NULL; 771 772 skb = __build_skb(data, len); 773 if (unlikely(!skb)) { 774 skb_free_frag(data); 775 return NULL; 776 } 777 778 if (pfmemalloc) 779 skb->pfmemalloc = 1; 780 skb->head_frag = 1; 781 782 skb_success: 783 skb_reserve(skb, NET_SKB_PAD); 784 skb->dev = dev; 785 786 skb_fail: 787 return skb; 788 } 789 EXPORT_SYMBOL(__netdev_alloc_skb); 790 791 /** 792 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 793 * @napi: napi instance this buffer was allocated for 794 * @len: length to allocate 795 * 796 * Allocate a new sk_buff for use in NAPI receive. This buffer will 797 * attempt to allocate the head from a special reserved region used 798 * only for NAPI Rx allocation. By doing this we can save several 799 * CPU cycles by avoiding having to disable and re-enable IRQs. 800 * 801 * %NULL is returned if there is no free memory. 802 */ 803 struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) 804 { 805 gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; 806 struct napi_alloc_cache *nc; 807 struct sk_buff *skb; 808 bool pfmemalloc; 809 void *data; 810 811 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 812 len += NET_SKB_PAD + NET_IP_ALIGN; 813 814 /* If requested length is either too small or too big, 815 * we use kmalloc() for skb->head allocation. 816 * When the small frag allocator is available, prefer it over kmalloc 817 * for small fragments 818 */ 819 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || 820 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 821 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 822 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 823 NUMA_NO_NODE); 824 if (!skb) 825 goto skb_fail; 826 goto skb_success; 827 } 828 829 if (sk_memalloc_socks()) 830 gfp_mask |= __GFP_MEMALLOC; 831 832 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 833 nc = this_cpu_ptr(&napi_alloc_cache); 834 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { 835 /* we are artificially inflating the allocation size, but 836 * that is not as bad as it may look like, as: 837 * - 'len' less than GRO_MAX_HEAD makes little sense 838 * - On most systems, larger 'len' values lead to fragment 839 * size above 512 bytes 840 * - kmalloc would use the kmalloc-1k slab for such values 841 * - Builds with smaller GRO_MAX_HEAD will very likely do 842 * little networking, as that implies no WiFi and no 843 * tunnels support, and 32 bits arches. 844 */ 845 len = SZ_1K; 846 847 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); 848 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); 849 } else { 850 len = SKB_HEAD_ALIGN(len); 851 852 data = page_frag_alloc(&nc->page, len, gfp_mask); 853 pfmemalloc = nc->page.pfmemalloc; 854 } 855 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 856 857 if (unlikely(!data)) 858 return NULL; 859 860 skb = __napi_build_skb(data, len); 861 if (unlikely(!skb)) { 862 skb_free_frag(data); 863 return NULL; 864 } 865 866 if (pfmemalloc) 867 skb->pfmemalloc = 1; 868 skb->head_frag = 1; 869 870 skb_success: 871 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 872 skb->dev = napi->dev; 873 874 skb_fail: 875 return skb; 876 } 877 EXPORT_SYMBOL(napi_alloc_skb); 878 879 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, 880 int off, int size, unsigned int truesize) 881 { 882 DEBUG_NET_WARN_ON_ONCE(size > truesize); 883 884 skb_fill_netmem_desc(skb, i, netmem, off, size); 885 skb->len += size; 886 skb->data_len += size; 887 skb->truesize += truesize; 888 } 889 EXPORT_SYMBOL(skb_add_rx_frag_netmem); 890 891 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 892 unsigned int truesize) 893 { 894 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 895 896 DEBUG_NET_WARN_ON_ONCE(size > truesize); 897 898 skb_frag_size_add(frag, size); 899 skb->len += size; 900 skb->data_len += size; 901 skb->truesize += truesize; 902 } 903 EXPORT_SYMBOL(skb_coalesce_rx_frag); 904 905 static void skb_drop_list(struct sk_buff **listp) 906 { 907 kfree_skb_list(*listp); 908 *listp = NULL; 909 } 910 911 static inline void skb_drop_fraglist(struct sk_buff *skb) 912 { 913 skb_drop_list(&skb_shinfo(skb)->frag_list); 914 } 915 916 static void skb_clone_fraglist(struct sk_buff *skb) 917 { 918 struct sk_buff *list; 919 920 skb_walk_frags(skb, list) 921 skb_get(list); 922 } 923 924 static bool is_pp_netmem(netmem_ref netmem) 925 { 926 return (netmem_get_pp_magic(netmem) & ~0x3UL) == PP_SIGNATURE; 927 } 928 929 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 930 unsigned int headroom) 931 { 932 #if IS_ENABLED(CONFIG_PAGE_POOL) 933 u32 size, truesize, len, max_head_size, off; 934 struct sk_buff *skb = *pskb, *nskb; 935 int err, i, head_off; 936 void *data; 937 938 /* XDP does not support fraglist so we need to linearize 939 * the skb. 940 */ 941 if (skb_has_frag_list(skb)) 942 return -EOPNOTSUPP; 943 944 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); 945 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) 946 return -ENOMEM; 947 948 size = min_t(u32, skb->len, max_head_size); 949 truesize = SKB_HEAD_ALIGN(size) + headroom; 950 data = page_pool_dev_alloc_va(pool, &truesize); 951 if (!data) 952 return -ENOMEM; 953 954 nskb = napi_build_skb(data, truesize); 955 if (!nskb) { 956 page_pool_free_va(pool, data, true); 957 return -ENOMEM; 958 } 959 960 skb_reserve(nskb, headroom); 961 skb_copy_header(nskb, skb); 962 skb_mark_for_recycle(nskb); 963 964 err = skb_copy_bits(skb, 0, nskb->data, size); 965 if (err) { 966 consume_skb(nskb); 967 return err; 968 } 969 skb_put(nskb, size); 970 971 head_off = skb_headroom(nskb) - skb_headroom(skb); 972 skb_headers_offset_update(nskb, head_off); 973 974 off = size; 975 len = skb->len - off; 976 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { 977 struct page *page; 978 u32 page_off; 979 980 size = min_t(u32, len, PAGE_SIZE); 981 truesize = size; 982 983 page = page_pool_dev_alloc(pool, &page_off, &truesize); 984 if (!page) { 985 consume_skb(nskb); 986 return -ENOMEM; 987 } 988 989 skb_add_rx_frag(nskb, i, page, page_off, size, truesize); 990 err = skb_copy_bits(skb, off, page_address(page) + page_off, 991 size); 992 if (err) { 993 consume_skb(nskb); 994 return err; 995 } 996 997 len -= size; 998 off += size; 999 } 1000 1001 consume_skb(skb); 1002 *pskb = nskb; 1003 1004 return 0; 1005 #else 1006 return -EOPNOTSUPP; 1007 #endif 1008 } 1009 EXPORT_SYMBOL(skb_pp_cow_data); 1010 1011 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, 1012 struct bpf_prog *prog) 1013 { 1014 if (!prog->aux->xdp_has_frags) 1015 return -EINVAL; 1016 1017 return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); 1018 } 1019 EXPORT_SYMBOL(skb_cow_data_for_xdp); 1020 1021 #if IS_ENABLED(CONFIG_PAGE_POOL) 1022 bool napi_pp_put_page(netmem_ref netmem) 1023 { 1024 netmem = netmem_compound_head(netmem); 1025 1026 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 1027 * in order to preserve any existing bits, such as bit 0 for the 1028 * head page of compound page and bit 1 for pfmemalloc page, so 1029 * mask those bits for freeing side when doing below checking, 1030 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 1031 * to avoid recycling the pfmemalloc page. 1032 */ 1033 if (unlikely(!is_pp_netmem(netmem))) 1034 return false; 1035 1036 page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false); 1037 1038 return true; 1039 } 1040 EXPORT_SYMBOL(napi_pp_put_page); 1041 #endif 1042 1043 static bool skb_pp_recycle(struct sk_buff *skb, void *data) 1044 { 1045 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 1046 return false; 1047 return napi_pp_put_page(page_to_netmem(virt_to_page(data))); 1048 } 1049 1050 /** 1051 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb 1052 * @skb: page pool aware skb 1053 * 1054 * Increase the fragment reference count (pp_ref_count) of a skb. This is 1055 * intended to gain fragment references only for page pool aware skbs, 1056 * i.e. when skb->pp_recycle is true, and not for fragments in a 1057 * non-pp-recycling skb. It has a fallback to increase references on normal 1058 * pages, as page pool aware skbs may also have normal page fragments. 1059 */ 1060 static int skb_pp_frag_ref(struct sk_buff *skb) 1061 { 1062 struct skb_shared_info *shinfo; 1063 netmem_ref head_netmem; 1064 int i; 1065 1066 if (!skb->pp_recycle) 1067 return -EINVAL; 1068 1069 shinfo = skb_shinfo(skb); 1070 1071 for (i = 0; i < shinfo->nr_frags; i++) { 1072 head_netmem = netmem_compound_head(shinfo->frags[i].netmem); 1073 if (likely(is_pp_netmem(head_netmem))) 1074 page_pool_ref_netmem(head_netmem); 1075 else 1076 page_ref_inc(netmem_to_page(head_netmem)); 1077 } 1078 return 0; 1079 } 1080 1081 static void skb_kfree_head(void *head, unsigned int end_offset) 1082 { 1083 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1084 kmem_cache_free(net_hotdata.skb_small_head_cache, head); 1085 else 1086 kfree(head); 1087 } 1088 1089 static void skb_free_head(struct sk_buff *skb) 1090 { 1091 unsigned char *head = skb->head; 1092 1093 if (skb->head_frag) { 1094 if (skb_pp_recycle(skb, head)) 1095 return; 1096 skb_free_frag(head); 1097 } else { 1098 skb_kfree_head(head, skb_end_offset(skb)); 1099 } 1100 } 1101 1102 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) 1103 { 1104 struct skb_shared_info *shinfo = skb_shinfo(skb); 1105 int i; 1106 1107 if (!skb_data_unref(skb, shinfo)) 1108 goto exit; 1109 1110 if (skb_zcopy(skb)) { 1111 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1112 1113 skb_zcopy_clear(skb, true); 1114 if (skip_unref) 1115 goto free_head; 1116 } 1117 1118 for (i = 0; i < shinfo->nr_frags; i++) 1119 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); 1120 1121 free_head: 1122 if (shinfo->frag_list) 1123 kfree_skb_list_reason(shinfo->frag_list, reason); 1124 1125 skb_free_head(skb); 1126 exit: 1127 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1128 * bit is only set on the head though, so in order to avoid races 1129 * while trying to recycle fragments on __skb_frag_unref() we need 1130 * to make one SKB responsible for triggering the recycle path. 1131 * So disable the recycling bit if an SKB is cloned and we have 1132 * additional references to the fragmented part of the SKB. 1133 * Eventually the last SKB will have the recycling bit set and it's 1134 * dataref set to 0, which will trigger the recycling 1135 */ 1136 skb->pp_recycle = 0; 1137 } 1138 1139 /* 1140 * Free an skbuff by memory without cleaning the state. 1141 */ 1142 static void kfree_skbmem(struct sk_buff *skb) 1143 { 1144 struct sk_buff_fclones *fclones; 1145 1146 switch (skb->fclone) { 1147 case SKB_FCLONE_UNAVAILABLE: 1148 kmem_cache_free(net_hotdata.skbuff_cache, skb); 1149 return; 1150 1151 case SKB_FCLONE_ORIG: 1152 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1153 1154 /* We usually free the clone (TX completion) before original skb 1155 * This test would have no chance to be true for the clone, 1156 * while here, branch prediction will be good. 1157 */ 1158 if (refcount_read(&fclones->fclone_ref) == 1) 1159 goto fastpath; 1160 break; 1161 1162 default: /* SKB_FCLONE_CLONE */ 1163 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1164 break; 1165 } 1166 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1167 return; 1168 fastpath: 1169 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); 1170 } 1171 1172 void skb_release_head_state(struct sk_buff *skb) 1173 { 1174 skb_dst_drop(skb); 1175 if (skb->destructor) { 1176 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1177 skb->destructor(skb); 1178 } 1179 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1180 nf_conntrack_put(skb_nfct(skb)); 1181 #endif 1182 skb_ext_put(skb); 1183 } 1184 1185 /* Free everything but the sk_buff shell. */ 1186 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) 1187 { 1188 skb_release_head_state(skb); 1189 if (likely(skb->head)) 1190 skb_release_data(skb, reason); 1191 } 1192 1193 /** 1194 * __kfree_skb - private function 1195 * @skb: buffer 1196 * 1197 * Free an sk_buff. Release anything attached to the buffer. 1198 * Clean the state. This is an internal helper function. Users should 1199 * always call kfree_skb 1200 */ 1201 1202 void __kfree_skb(struct sk_buff *skb) 1203 { 1204 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); 1205 kfree_skbmem(skb); 1206 } 1207 EXPORT_SYMBOL(__kfree_skb); 1208 1209 static __always_inline 1210 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, 1211 enum skb_drop_reason reason) 1212 { 1213 if (unlikely(!skb_unref(skb))) 1214 return false; 1215 1216 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1217 u32_get_bits(reason, 1218 SKB_DROP_REASON_SUBSYS_MASK) >= 1219 SKB_DROP_REASON_SUBSYS_NUM); 1220 1221 if (reason == SKB_CONSUMED) 1222 trace_consume_skb(skb, __builtin_return_address(0)); 1223 else 1224 trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); 1225 return true; 1226 } 1227 1228 /** 1229 * sk_skb_reason_drop - free an sk_buff with special reason 1230 * @sk: the socket to receive @skb, or NULL if not applicable 1231 * @skb: buffer to free 1232 * @reason: reason why this skb is dropped 1233 * 1234 * Drop a reference to the buffer and free it if the usage count has hit 1235 * zero. Meanwhile, pass the receiving socket and drop reason to 1236 * 'kfree_skb' tracepoint. 1237 */ 1238 void __fix_address 1239 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) 1240 { 1241 if (__sk_skb_reason_drop(sk, skb, reason)) 1242 __kfree_skb(skb); 1243 } 1244 EXPORT_SYMBOL(sk_skb_reason_drop); 1245 1246 #define KFREE_SKB_BULK_SIZE 16 1247 1248 struct skb_free_array { 1249 unsigned int skb_count; 1250 void *skb_array[KFREE_SKB_BULK_SIZE]; 1251 }; 1252 1253 static void kfree_skb_add_bulk(struct sk_buff *skb, 1254 struct skb_free_array *sa, 1255 enum skb_drop_reason reason) 1256 { 1257 /* if SKB is a clone, don't handle this case */ 1258 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1259 __kfree_skb(skb); 1260 return; 1261 } 1262 1263 skb_release_all(skb, reason); 1264 sa->skb_array[sa->skb_count++] = skb; 1265 1266 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1267 kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, 1268 sa->skb_array); 1269 sa->skb_count = 0; 1270 } 1271 } 1272 1273 void __fix_address 1274 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1275 { 1276 struct skb_free_array sa; 1277 1278 sa.skb_count = 0; 1279 1280 while (segs) { 1281 struct sk_buff *next = segs->next; 1282 1283 if (__sk_skb_reason_drop(NULL, segs, reason)) { 1284 skb_poison_list(segs); 1285 kfree_skb_add_bulk(segs, &sa, reason); 1286 } 1287 1288 segs = next; 1289 } 1290 1291 if (sa.skb_count) 1292 kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array); 1293 } 1294 EXPORT_SYMBOL(kfree_skb_list_reason); 1295 1296 /* Dump skb information and contents. 1297 * 1298 * Must only be called from net_ratelimit()-ed paths. 1299 * 1300 * Dumps whole packets if full_pkt, only headers otherwise. 1301 */ 1302 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1303 { 1304 struct skb_shared_info *sh = skb_shinfo(skb); 1305 struct net_device *dev = skb->dev; 1306 struct sock *sk = skb->sk; 1307 struct sk_buff *list_skb; 1308 bool has_mac, has_trans; 1309 int headroom, tailroom; 1310 int i, len, seg_len; 1311 1312 if (full_pkt) 1313 len = skb->len; 1314 else 1315 len = min_t(int, skb->len, MAX_HEADER + 128); 1316 1317 headroom = skb_headroom(skb); 1318 tailroom = skb_tailroom(skb); 1319 1320 has_mac = skb_mac_header_was_set(skb); 1321 has_trans = skb_transport_header_was_set(skb); 1322 1323 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1324 "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n" 1325 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1326 "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1327 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n" 1328 "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n" 1329 "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n", 1330 level, skb->len, headroom, skb_headlen(skb), tailroom, 1331 has_mac ? skb->mac_header : -1, 1332 has_mac ? skb_mac_header_len(skb) : -1, 1333 skb->mac_len, 1334 skb->network_header, 1335 has_trans ? skb_network_header_len(skb) : -1, 1336 has_trans ? skb->transport_header : -1, 1337 sh->tx_flags, sh->nr_frags, 1338 sh->gso_size, sh->gso_type, sh->gso_segs, 1339 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, 1340 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, 1341 skb->hash, skb->sw_hash, skb->l4_hash, 1342 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, 1343 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, 1344 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, 1345 skb->inner_network_header, skb->inner_transport_header); 1346 1347 if (dev) 1348 printk("%sdev name=%s feat=%pNF\n", 1349 level, dev->name, &dev->features); 1350 if (sk) 1351 printk("%ssk family=%hu type=%u proto=%u\n", 1352 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1353 1354 if (full_pkt && headroom) 1355 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1356 16, 1, skb->head, headroom, false); 1357 1358 seg_len = min_t(int, skb_headlen(skb), len); 1359 if (seg_len) 1360 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1361 16, 1, skb->data, seg_len, false); 1362 len -= seg_len; 1363 1364 if (full_pkt && tailroom) 1365 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1366 16, 1, skb_tail_pointer(skb), tailroom, false); 1367 1368 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1369 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1370 u32 p_off, p_len, copied; 1371 struct page *p; 1372 u8 *vaddr; 1373 1374 skb_frag_foreach_page(frag, skb_frag_off(frag), 1375 skb_frag_size(frag), p, p_off, p_len, 1376 copied) { 1377 seg_len = min_t(int, p_len, len); 1378 vaddr = kmap_atomic(p); 1379 print_hex_dump(level, "skb frag: ", 1380 DUMP_PREFIX_OFFSET, 1381 16, 1, vaddr + p_off, seg_len, false); 1382 kunmap_atomic(vaddr); 1383 len -= seg_len; 1384 if (!len) 1385 break; 1386 } 1387 } 1388 1389 if (full_pkt && skb_has_frag_list(skb)) { 1390 printk("skb fraglist:\n"); 1391 skb_walk_frags(skb, list_skb) 1392 skb_dump(level, list_skb, true); 1393 } 1394 } 1395 EXPORT_SYMBOL(skb_dump); 1396 1397 /** 1398 * skb_tx_error - report an sk_buff xmit error 1399 * @skb: buffer that triggered an error 1400 * 1401 * Report xmit error if a device callback is tracking this skb. 1402 * skb must be freed afterwards. 1403 */ 1404 void skb_tx_error(struct sk_buff *skb) 1405 { 1406 if (skb) { 1407 skb_zcopy_downgrade_managed(skb); 1408 skb_zcopy_clear(skb, true); 1409 } 1410 } 1411 EXPORT_SYMBOL(skb_tx_error); 1412 1413 #ifdef CONFIG_TRACEPOINTS 1414 /** 1415 * consume_skb - free an skbuff 1416 * @skb: buffer to free 1417 * 1418 * Drop a ref to the buffer and free it if the usage count has hit zero 1419 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1420 * is being dropped after a failure and notes that 1421 */ 1422 void consume_skb(struct sk_buff *skb) 1423 { 1424 if (!skb_unref(skb)) 1425 return; 1426 1427 trace_consume_skb(skb, __builtin_return_address(0)); 1428 __kfree_skb(skb); 1429 } 1430 EXPORT_SYMBOL(consume_skb); 1431 #endif 1432 1433 /** 1434 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1435 * @skb: buffer to free 1436 * 1437 * Alike consume_skb(), but this variant assumes that this is the last 1438 * skb reference and all the head states have been already dropped 1439 */ 1440 void __consume_stateless_skb(struct sk_buff *skb) 1441 { 1442 trace_consume_skb(skb, __builtin_return_address(0)); 1443 skb_release_data(skb, SKB_CONSUMED); 1444 kfree_skbmem(skb); 1445 } 1446 1447 static void napi_skb_cache_put(struct sk_buff *skb) 1448 { 1449 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1450 u32 i; 1451 1452 if (!kasan_mempool_poison_object(skb)) 1453 return; 1454 1455 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 1456 nc->skb_cache[nc->skb_count++] = skb; 1457 1458 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1459 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1460 kasan_mempool_unpoison_object(nc->skb_cache[i], 1461 kmem_cache_size(net_hotdata.skbuff_cache)); 1462 1463 kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, 1464 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1465 nc->skb_count = NAPI_SKB_CACHE_HALF; 1466 } 1467 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 1468 } 1469 1470 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1471 { 1472 skb_release_all(skb, reason); 1473 napi_skb_cache_put(skb); 1474 } 1475 1476 void napi_skb_free_stolen_head(struct sk_buff *skb) 1477 { 1478 if (unlikely(skb->slow_gro)) { 1479 nf_reset_ct(skb); 1480 skb_dst_drop(skb); 1481 skb_ext_put(skb); 1482 skb_orphan(skb); 1483 skb->slow_gro = 0; 1484 } 1485 napi_skb_cache_put(skb); 1486 } 1487 1488 void napi_consume_skb(struct sk_buff *skb, int budget) 1489 { 1490 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1491 if (unlikely(!budget)) { 1492 dev_consume_skb_any(skb); 1493 return; 1494 } 1495 1496 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1497 1498 if (!skb_unref(skb)) 1499 return; 1500 1501 /* if reaching here SKB is ready to free */ 1502 trace_consume_skb(skb, __builtin_return_address(0)); 1503 1504 /* if SKB is a clone, don't handle this case */ 1505 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1506 __kfree_skb(skb); 1507 return; 1508 } 1509 1510 skb_release_all(skb, SKB_CONSUMED); 1511 napi_skb_cache_put(skb); 1512 } 1513 EXPORT_SYMBOL(napi_consume_skb); 1514 1515 /* Make sure a field is contained by headers group */ 1516 #define CHECK_SKB_FIELD(field) \ 1517 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1518 offsetof(struct sk_buff, headers.field)); \ 1519 1520 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1521 { 1522 new->tstamp = old->tstamp; 1523 /* We do not copy old->sk */ 1524 new->dev = old->dev; 1525 memcpy(new->cb, old->cb, sizeof(old->cb)); 1526 skb_dst_copy(new, old); 1527 __skb_ext_copy(new, old); 1528 __nf_copy(new, old, false); 1529 1530 /* Note : this field could be in the headers group. 1531 * It is not yet because we do not want to have a 16 bit hole 1532 */ 1533 new->queue_mapping = old->queue_mapping; 1534 1535 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1536 CHECK_SKB_FIELD(protocol); 1537 CHECK_SKB_FIELD(csum); 1538 CHECK_SKB_FIELD(hash); 1539 CHECK_SKB_FIELD(priority); 1540 CHECK_SKB_FIELD(skb_iif); 1541 CHECK_SKB_FIELD(vlan_proto); 1542 CHECK_SKB_FIELD(vlan_tci); 1543 CHECK_SKB_FIELD(transport_header); 1544 CHECK_SKB_FIELD(network_header); 1545 CHECK_SKB_FIELD(mac_header); 1546 CHECK_SKB_FIELD(inner_protocol); 1547 CHECK_SKB_FIELD(inner_transport_header); 1548 CHECK_SKB_FIELD(inner_network_header); 1549 CHECK_SKB_FIELD(inner_mac_header); 1550 CHECK_SKB_FIELD(mark); 1551 #ifdef CONFIG_NETWORK_SECMARK 1552 CHECK_SKB_FIELD(secmark); 1553 #endif 1554 #ifdef CONFIG_NET_RX_BUSY_POLL 1555 CHECK_SKB_FIELD(napi_id); 1556 #endif 1557 CHECK_SKB_FIELD(alloc_cpu); 1558 #ifdef CONFIG_XPS 1559 CHECK_SKB_FIELD(sender_cpu); 1560 #endif 1561 #ifdef CONFIG_NET_SCHED 1562 CHECK_SKB_FIELD(tc_index); 1563 #endif 1564 1565 } 1566 1567 /* 1568 * You should not add any new code to this function. Add it to 1569 * __copy_skb_header above instead. 1570 */ 1571 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1572 { 1573 #define C(x) n->x = skb->x 1574 1575 n->next = n->prev = NULL; 1576 n->sk = NULL; 1577 __copy_skb_header(n, skb); 1578 1579 C(len); 1580 C(data_len); 1581 C(mac_len); 1582 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1583 n->cloned = 1; 1584 n->nohdr = 0; 1585 n->peeked = 0; 1586 C(pfmemalloc); 1587 C(pp_recycle); 1588 n->destructor = NULL; 1589 C(tail); 1590 C(end); 1591 C(head); 1592 C(head_frag); 1593 C(data); 1594 C(truesize); 1595 refcount_set(&n->users, 1); 1596 1597 atomic_inc(&(skb_shinfo(skb)->dataref)); 1598 skb->cloned = 1; 1599 1600 return n; 1601 #undef C 1602 } 1603 1604 /** 1605 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1606 * @first: first sk_buff of the msg 1607 */ 1608 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1609 { 1610 struct sk_buff *n; 1611 1612 n = alloc_skb(0, GFP_ATOMIC); 1613 if (!n) 1614 return NULL; 1615 1616 n->len = first->len; 1617 n->data_len = first->len; 1618 n->truesize = first->truesize; 1619 1620 skb_shinfo(n)->frag_list = first; 1621 1622 __copy_skb_header(n, first); 1623 n->destructor = NULL; 1624 1625 return n; 1626 } 1627 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1628 1629 /** 1630 * skb_morph - morph one skb into another 1631 * @dst: the skb to receive the contents 1632 * @src: the skb to supply the contents 1633 * 1634 * This is identical to skb_clone except that the target skb is 1635 * supplied by the user. 1636 * 1637 * The target skb is returned upon exit. 1638 */ 1639 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1640 { 1641 skb_release_all(dst, SKB_CONSUMED); 1642 return __skb_clone(dst, src); 1643 } 1644 EXPORT_SYMBOL_GPL(skb_morph); 1645 1646 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1647 { 1648 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1649 struct user_struct *user; 1650 1651 if (capable(CAP_IPC_LOCK) || !size) 1652 return 0; 1653 1654 rlim = rlimit(RLIMIT_MEMLOCK); 1655 if (rlim == RLIM_INFINITY) 1656 return 0; 1657 1658 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1659 max_pg = rlim >> PAGE_SHIFT; 1660 user = mmp->user ? : current_user(); 1661 1662 old_pg = atomic_long_read(&user->locked_vm); 1663 do { 1664 new_pg = old_pg + num_pg; 1665 if (new_pg > max_pg) 1666 return -ENOBUFS; 1667 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1668 1669 if (!mmp->user) { 1670 mmp->user = get_uid(user); 1671 mmp->num_pg = num_pg; 1672 } else { 1673 mmp->num_pg += num_pg; 1674 } 1675 1676 return 0; 1677 } 1678 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1679 1680 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1681 { 1682 if (mmp->user) { 1683 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1684 free_uid(mmp->user); 1685 } 1686 } 1687 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1688 1689 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1690 { 1691 struct ubuf_info_msgzc *uarg; 1692 struct sk_buff *skb; 1693 1694 WARN_ON_ONCE(!in_task()); 1695 1696 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1697 if (!skb) 1698 return NULL; 1699 1700 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1701 uarg = (void *)skb->cb; 1702 uarg->mmp.user = NULL; 1703 1704 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1705 kfree_skb(skb); 1706 return NULL; 1707 } 1708 1709 uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; 1710 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1711 uarg->len = 1; 1712 uarg->bytelen = size; 1713 uarg->zerocopy = 1; 1714 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1715 refcount_set(&uarg->ubuf.refcnt, 1); 1716 sock_hold(sk); 1717 1718 return &uarg->ubuf; 1719 } 1720 1721 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1722 { 1723 return container_of((void *)uarg, struct sk_buff, cb); 1724 } 1725 1726 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1727 struct ubuf_info *uarg) 1728 { 1729 if (uarg) { 1730 struct ubuf_info_msgzc *uarg_zc; 1731 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1732 u32 bytelen, next; 1733 1734 /* there might be non MSG_ZEROCOPY users */ 1735 if (uarg->ops != &msg_zerocopy_ubuf_ops) 1736 return NULL; 1737 1738 /* realloc only when socket is locked (TCP, UDP cork), 1739 * so uarg->len and sk_zckey access is serialized 1740 */ 1741 if (!sock_owned_by_user(sk)) { 1742 WARN_ON_ONCE(1); 1743 return NULL; 1744 } 1745 1746 uarg_zc = uarg_to_msgzc(uarg); 1747 bytelen = uarg_zc->bytelen + size; 1748 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1749 /* TCP can create new skb to attach new uarg */ 1750 if (sk->sk_type == SOCK_STREAM) 1751 goto new_alloc; 1752 return NULL; 1753 } 1754 1755 next = (u32)atomic_read(&sk->sk_zckey); 1756 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1757 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1758 return NULL; 1759 uarg_zc->len++; 1760 uarg_zc->bytelen = bytelen; 1761 atomic_set(&sk->sk_zckey, ++next); 1762 1763 /* no extra ref when appending to datagram (MSG_MORE) */ 1764 if (sk->sk_type == SOCK_STREAM) 1765 net_zcopy_get(uarg); 1766 1767 return uarg; 1768 } 1769 } 1770 1771 new_alloc: 1772 return msg_zerocopy_alloc(sk, size); 1773 } 1774 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1775 1776 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1777 { 1778 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1779 u32 old_lo, old_hi; 1780 u64 sum_len; 1781 1782 old_lo = serr->ee.ee_info; 1783 old_hi = serr->ee.ee_data; 1784 sum_len = old_hi - old_lo + 1ULL + len; 1785 1786 if (sum_len >= (1ULL << 32)) 1787 return false; 1788 1789 if (lo != old_hi + 1) 1790 return false; 1791 1792 serr->ee.ee_data += len; 1793 return true; 1794 } 1795 1796 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1797 { 1798 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1799 struct sock_exterr_skb *serr; 1800 struct sock *sk = skb->sk; 1801 struct sk_buff_head *q; 1802 unsigned long flags; 1803 bool is_zerocopy; 1804 u32 lo, hi; 1805 u16 len; 1806 1807 mm_unaccount_pinned_pages(&uarg->mmp); 1808 1809 /* if !len, there was only 1 call, and it was aborted 1810 * so do not queue a completion notification 1811 */ 1812 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1813 goto release; 1814 1815 len = uarg->len; 1816 lo = uarg->id; 1817 hi = uarg->id + len - 1; 1818 is_zerocopy = uarg->zerocopy; 1819 1820 serr = SKB_EXT_ERR(skb); 1821 memset(serr, 0, sizeof(*serr)); 1822 serr->ee.ee_errno = 0; 1823 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1824 serr->ee.ee_data = hi; 1825 serr->ee.ee_info = lo; 1826 if (!is_zerocopy) 1827 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1828 1829 q = &sk->sk_error_queue; 1830 spin_lock_irqsave(&q->lock, flags); 1831 tail = skb_peek_tail(q); 1832 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1833 !skb_zerocopy_notify_extend(tail, lo, len)) { 1834 __skb_queue_tail(q, skb); 1835 skb = NULL; 1836 } 1837 spin_unlock_irqrestore(&q->lock, flags); 1838 1839 sk_error_report(sk); 1840 1841 release: 1842 consume_skb(skb); 1843 sock_put(sk); 1844 } 1845 1846 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, 1847 bool success) 1848 { 1849 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1850 1851 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1852 1853 if (refcount_dec_and_test(&uarg->refcnt)) 1854 __msg_zerocopy_callback(uarg_zc); 1855 } 1856 1857 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1858 { 1859 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1860 1861 atomic_dec(&sk->sk_zckey); 1862 uarg_to_msgzc(uarg)->len--; 1863 1864 if (have_uref) 1865 msg_zerocopy_complete(NULL, uarg, true); 1866 } 1867 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1868 1869 const struct ubuf_info_ops msg_zerocopy_ubuf_ops = { 1870 .complete = msg_zerocopy_complete, 1871 }; 1872 EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops); 1873 1874 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1875 struct msghdr *msg, int len, 1876 struct ubuf_info *uarg) 1877 { 1878 int err, orig_len = skb->len; 1879 1880 if (uarg->ops->link_skb) { 1881 err = uarg->ops->link_skb(skb, uarg); 1882 if (err) 1883 return err; 1884 } else { 1885 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1886 1887 /* An skb can only point to one uarg. This edge case happens 1888 * when TCP appends to an skb, but zerocopy_realloc triggered 1889 * a new alloc. 1890 */ 1891 if (orig_uarg && uarg != orig_uarg) 1892 return -EEXIST; 1893 } 1894 1895 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1896 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1897 struct sock *save_sk = skb->sk; 1898 1899 /* Streams do not free skb on error. Reset to prev state. */ 1900 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1901 skb->sk = sk; 1902 ___pskb_trim(skb, orig_len); 1903 skb->sk = save_sk; 1904 return err; 1905 } 1906 1907 skb_zcopy_set(skb, uarg, NULL); 1908 return skb->len - orig_len; 1909 } 1910 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1911 1912 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1913 { 1914 int i; 1915 1916 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1917 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1918 skb_frag_ref(skb, i); 1919 } 1920 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1921 1922 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1923 gfp_t gfp_mask) 1924 { 1925 if (skb_zcopy(orig)) { 1926 if (skb_zcopy(nskb)) { 1927 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1928 if (!gfp_mask) { 1929 WARN_ON_ONCE(1); 1930 return -ENOMEM; 1931 } 1932 if (skb_uarg(nskb) == skb_uarg(orig)) 1933 return 0; 1934 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1935 return -EIO; 1936 } 1937 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1938 } 1939 return 0; 1940 } 1941 1942 /** 1943 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1944 * @skb: the skb to modify 1945 * @gfp_mask: allocation priority 1946 * 1947 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1948 * It will copy all frags into kernel and drop the reference 1949 * to userspace pages. 1950 * 1951 * If this function is called from an interrupt gfp_mask() must be 1952 * %GFP_ATOMIC. 1953 * 1954 * Returns 0 on success or a negative error code on failure 1955 * to allocate kernel memory to copy to. 1956 */ 1957 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1958 { 1959 int num_frags = skb_shinfo(skb)->nr_frags; 1960 struct page *page, *head = NULL; 1961 int i, order, psize, new_frags; 1962 u32 d_off; 1963 1964 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1965 return -EINVAL; 1966 1967 if (!num_frags) 1968 goto release; 1969 1970 /* We might have to allocate high order pages, so compute what minimum 1971 * page order is needed. 1972 */ 1973 order = 0; 1974 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1975 order++; 1976 psize = (PAGE_SIZE << order); 1977 1978 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1979 for (i = 0; i < new_frags; i++) { 1980 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1981 if (!page) { 1982 while (head) { 1983 struct page *next = (struct page *)page_private(head); 1984 put_page(head); 1985 head = next; 1986 } 1987 return -ENOMEM; 1988 } 1989 set_page_private(page, (unsigned long)head); 1990 head = page; 1991 } 1992 1993 page = head; 1994 d_off = 0; 1995 for (i = 0; i < num_frags; i++) { 1996 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1997 u32 p_off, p_len, copied; 1998 struct page *p; 1999 u8 *vaddr; 2000 2001 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 2002 p, p_off, p_len, copied) { 2003 u32 copy, done = 0; 2004 vaddr = kmap_atomic(p); 2005 2006 while (done < p_len) { 2007 if (d_off == psize) { 2008 d_off = 0; 2009 page = (struct page *)page_private(page); 2010 } 2011 copy = min_t(u32, psize - d_off, p_len - done); 2012 memcpy(page_address(page) + d_off, 2013 vaddr + p_off + done, copy); 2014 done += copy; 2015 d_off += copy; 2016 } 2017 kunmap_atomic(vaddr); 2018 } 2019 } 2020 2021 /* skb frags release userspace buffers */ 2022 for (i = 0; i < num_frags; i++) 2023 skb_frag_unref(skb, i); 2024 2025 /* skb frags point to kernel buffers */ 2026 for (i = 0; i < new_frags - 1; i++) { 2027 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); 2028 head = (struct page *)page_private(head); 2029 } 2030 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, 2031 d_off); 2032 skb_shinfo(skb)->nr_frags = new_frags; 2033 2034 release: 2035 skb_zcopy_clear(skb, false); 2036 return 0; 2037 } 2038 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 2039 2040 /** 2041 * skb_clone - duplicate an sk_buff 2042 * @skb: buffer to clone 2043 * @gfp_mask: allocation priority 2044 * 2045 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 2046 * copies share the same packet data but not structure. The new 2047 * buffer has a reference count of 1. If the allocation fails the 2048 * function returns %NULL otherwise the new buffer is returned. 2049 * 2050 * If this function is called from an interrupt gfp_mask() must be 2051 * %GFP_ATOMIC. 2052 */ 2053 2054 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 2055 { 2056 struct sk_buff_fclones *fclones = container_of(skb, 2057 struct sk_buff_fclones, 2058 skb1); 2059 struct sk_buff *n; 2060 2061 if (skb_orphan_frags(skb, gfp_mask)) 2062 return NULL; 2063 2064 if (skb->fclone == SKB_FCLONE_ORIG && 2065 refcount_read(&fclones->fclone_ref) == 1) { 2066 n = &fclones->skb2; 2067 refcount_set(&fclones->fclone_ref, 2); 2068 n->fclone = SKB_FCLONE_CLONE; 2069 } else { 2070 if (skb_pfmemalloc(skb)) 2071 gfp_mask |= __GFP_MEMALLOC; 2072 2073 n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); 2074 if (!n) 2075 return NULL; 2076 2077 n->fclone = SKB_FCLONE_UNAVAILABLE; 2078 } 2079 2080 return __skb_clone(n, skb); 2081 } 2082 EXPORT_SYMBOL(skb_clone); 2083 2084 void skb_headers_offset_update(struct sk_buff *skb, int off) 2085 { 2086 /* Only adjust this if it actually is csum_start rather than csum */ 2087 if (skb->ip_summed == CHECKSUM_PARTIAL) 2088 skb->csum_start += off; 2089 /* {transport,network,mac}_header and tail are relative to skb->head */ 2090 skb->transport_header += off; 2091 skb->network_header += off; 2092 if (skb_mac_header_was_set(skb)) 2093 skb->mac_header += off; 2094 skb->inner_transport_header += off; 2095 skb->inner_network_header += off; 2096 skb->inner_mac_header += off; 2097 } 2098 EXPORT_SYMBOL(skb_headers_offset_update); 2099 2100 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 2101 { 2102 __copy_skb_header(new, old); 2103 2104 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 2105 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 2106 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 2107 } 2108 EXPORT_SYMBOL(skb_copy_header); 2109 2110 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 2111 { 2112 if (skb_pfmemalloc(skb)) 2113 return SKB_ALLOC_RX; 2114 return 0; 2115 } 2116 2117 /** 2118 * skb_copy - create private copy of an sk_buff 2119 * @skb: buffer to copy 2120 * @gfp_mask: allocation priority 2121 * 2122 * Make a copy of both an &sk_buff and its data. This is used when the 2123 * caller wishes to modify the data and needs a private copy of the 2124 * data to alter. Returns %NULL on failure or the pointer to the buffer 2125 * on success. The returned buffer has a reference count of 1. 2126 * 2127 * As by-product this function converts non-linear &sk_buff to linear 2128 * one, so that &sk_buff becomes completely private and caller is allowed 2129 * to modify all the data of returned buffer. This means that this 2130 * function is not recommended for use in circumstances when only 2131 * header is going to be modified. Use pskb_copy() instead. 2132 */ 2133 2134 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2135 { 2136 struct sk_buff *n; 2137 unsigned int size; 2138 int headerlen; 2139 2140 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2141 return NULL; 2142 2143 headerlen = skb_headroom(skb); 2144 size = skb_end_offset(skb) + skb->data_len; 2145 n = __alloc_skb(size, gfp_mask, 2146 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2147 if (!n) 2148 return NULL; 2149 2150 /* Set the data pointer */ 2151 skb_reserve(n, headerlen); 2152 /* Set the tail pointer and length */ 2153 skb_put(n, skb->len); 2154 2155 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2156 2157 skb_copy_header(n, skb); 2158 return n; 2159 } 2160 EXPORT_SYMBOL(skb_copy); 2161 2162 /** 2163 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2164 * @skb: buffer to copy 2165 * @headroom: headroom of new skb 2166 * @gfp_mask: allocation priority 2167 * @fclone: if true allocate the copy of the skb from the fclone 2168 * cache instead of the head cache; it is recommended to set this 2169 * to true for the cases where the copy will likely be cloned 2170 * 2171 * Make a copy of both an &sk_buff and part of its data, located 2172 * in header. Fragmented data remain shared. This is used when 2173 * the caller wishes to modify only header of &sk_buff and needs 2174 * private copy of the header to alter. Returns %NULL on failure 2175 * or the pointer to the buffer on success. 2176 * The returned buffer has a reference count of 1. 2177 */ 2178 2179 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2180 gfp_t gfp_mask, bool fclone) 2181 { 2182 unsigned int size = skb_headlen(skb) + headroom; 2183 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2184 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2185 2186 if (!n) 2187 goto out; 2188 2189 /* Set the data pointer */ 2190 skb_reserve(n, headroom); 2191 /* Set the tail pointer and length */ 2192 skb_put(n, skb_headlen(skb)); 2193 /* Copy the bytes */ 2194 skb_copy_from_linear_data(skb, n->data, n->len); 2195 2196 n->truesize += skb->data_len; 2197 n->data_len = skb->data_len; 2198 n->len = skb->len; 2199 2200 if (skb_shinfo(skb)->nr_frags) { 2201 int i; 2202 2203 if (skb_orphan_frags(skb, gfp_mask) || 2204 skb_zerocopy_clone(n, skb, gfp_mask)) { 2205 kfree_skb(n); 2206 n = NULL; 2207 goto out; 2208 } 2209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2210 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2211 skb_frag_ref(skb, i); 2212 } 2213 skb_shinfo(n)->nr_frags = i; 2214 } 2215 2216 if (skb_has_frag_list(skb)) { 2217 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2218 skb_clone_fraglist(n); 2219 } 2220 2221 skb_copy_header(n, skb); 2222 out: 2223 return n; 2224 } 2225 EXPORT_SYMBOL(__pskb_copy_fclone); 2226 2227 /** 2228 * pskb_expand_head - reallocate header of &sk_buff 2229 * @skb: buffer to reallocate 2230 * @nhead: room to add at head 2231 * @ntail: room to add at tail 2232 * @gfp_mask: allocation priority 2233 * 2234 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2235 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2236 * reference count of 1. Returns zero in the case of success or error, 2237 * if expansion failed. In the last case, &sk_buff is not changed. 2238 * 2239 * All the pointers pointing into skb header may change and must be 2240 * reloaded after call to this function. 2241 */ 2242 2243 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2244 gfp_t gfp_mask) 2245 { 2246 unsigned int osize = skb_end_offset(skb); 2247 unsigned int size = osize + nhead + ntail; 2248 long off; 2249 u8 *data; 2250 int i; 2251 2252 BUG_ON(nhead < 0); 2253 2254 BUG_ON(skb_shared(skb)); 2255 2256 skb_zcopy_downgrade_managed(skb); 2257 2258 if (skb_pfmemalloc(skb)) 2259 gfp_mask |= __GFP_MEMALLOC; 2260 2261 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2262 if (!data) 2263 goto nodata; 2264 size = SKB_WITH_OVERHEAD(size); 2265 2266 /* Copy only real data... and, alas, header. This should be 2267 * optimized for the cases when header is void. 2268 */ 2269 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2270 2271 memcpy((struct skb_shared_info *)(data + size), 2272 skb_shinfo(skb), 2273 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2274 2275 /* 2276 * if shinfo is shared we must drop the old head gracefully, but if it 2277 * is not we can just drop the old head and let the existing refcount 2278 * be since all we did is relocate the values 2279 */ 2280 if (skb_cloned(skb)) { 2281 if (skb_orphan_frags(skb, gfp_mask)) 2282 goto nofrags; 2283 if (skb_zcopy(skb)) 2284 refcount_inc(&skb_uarg(skb)->refcnt); 2285 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2286 skb_frag_ref(skb, i); 2287 2288 if (skb_has_frag_list(skb)) 2289 skb_clone_fraglist(skb); 2290 2291 skb_release_data(skb, SKB_CONSUMED); 2292 } else { 2293 skb_free_head(skb); 2294 } 2295 off = (data + nhead) - skb->head; 2296 2297 skb->head = data; 2298 skb->head_frag = 0; 2299 skb->data += off; 2300 2301 skb_set_end_offset(skb, size); 2302 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2303 off = nhead; 2304 #endif 2305 skb->tail += off; 2306 skb_headers_offset_update(skb, nhead); 2307 skb->cloned = 0; 2308 skb->hdr_len = 0; 2309 skb->nohdr = 0; 2310 atomic_set(&skb_shinfo(skb)->dataref, 1); 2311 2312 skb_metadata_clear(skb); 2313 2314 /* It is not generally safe to change skb->truesize. 2315 * For the moment, we really care of rx path, or 2316 * when skb is orphaned (not attached to a socket). 2317 */ 2318 if (!skb->sk || skb->destructor == sock_edemux) 2319 skb->truesize += size - osize; 2320 2321 return 0; 2322 2323 nofrags: 2324 skb_kfree_head(data, size); 2325 nodata: 2326 return -ENOMEM; 2327 } 2328 EXPORT_SYMBOL(pskb_expand_head); 2329 2330 /* Make private copy of skb with writable head and some headroom */ 2331 2332 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2333 { 2334 struct sk_buff *skb2; 2335 int delta = headroom - skb_headroom(skb); 2336 2337 if (delta <= 0) 2338 skb2 = pskb_copy(skb, GFP_ATOMIC); 2339 else { 2340 skb2 = skb_clone(skb, GFP_ATOMIC); 2341 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2342 GFP_ATOMIC)) { 2343 kfree_skb(skb2); 2344 skb2 = NULL; 2345 } 2346 } 2347 return skb2; 2348 } 2349 EXPORT_SYMBOL(skb_realloc_headroom); 2350 2351 /* Note: We plan to rework this in linux-6.4 */ 2352 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2353 { 2354 unsigned int saved_end_offset, saved_truesize; 2355 struct skb_shared_info *shinfo; 2356 int res; 2357 2358 saved_end_offset = skb_end_offset(skb); 2359 saved_truesize = skb->truesize; 2360 2361 res = pskb_expand_head(skb, 0, 0, pri); 2362 if (res) 2363 return res; 2364 2365 skb->truesize = saved_truesize; 2366 2367 if (likely(skb_end_offset(skb) == saved_end_offset)) 2368 return 0; 2369 2370 /* We can not change skb->end if the original or new value 2371 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2372 */ 2373 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2374 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2375 /* We think this path should not be taken. 2376 * Add a temporary trace to warn us just in case. 2377 */ 2378 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2379 saved_end_offset, skb_end_offset(skb)); 2380 WARN_ON_ONCE(1); 2381 return 0; 2382 } 2383 2384 shinfo = skb_shinfo(skb); 2385 2386 /* We are about to change back skb->end, 2387 * we need to move skb_shinfo() to its new location. 2388 */ 2389 memmove(skb->head + saved_end_offset, 2390 shinfo, 2391 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2392 2393 skb_set_end_offset(skb, saved_end_offset); 2394 2395 return 0; 2396 } 2397 2398 /** 2399 * skb_expand_head - reallocate header of &sk_buff 2400 * @skb: buffer to reallocate 2401 * @headroom: needed headroom 2402 * 2403 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2404 * if possible; copies skb->sk to new skb as needed 2405 * and frees original skb in case of failures. 2406 * 2407 * It expect increased headroom and generates warning otherwise. 2408 */ 2409 2410 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2411 { 2412 int delta = headroom - skb_headroom(skb); 2413 int osize = skb_end_offset(skb); 2414 struct sock *sk = skb->sk; 2415 2416 if (WARN_ONCE(delta <= 0, 2417 "%s is expecting an increase in the headroom", __func__)) 2418 return skb; 2419 2420 delta = SKB_DATA_ALIGN(delta); 2421 /* pskb_expand_head() might crash, if skb is shared. */ 2422 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2423 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2424 2425 if (unlikely(!nskb)) 2426 goto fail; 2427 2428 if (sk) 2429 skb_set_owner_w(nskb, sk); 2430 consume_skb(skb); 2431 skb = nskb; 2432 } 2433 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2434 goto fail; 2435 2436 if (sk && is_skb_wmem(skb)) { 2437 delta = skb_end_offset(skb) - osize; 2438 refcount_add(delta, &sk->sk_wmem_alloc); 2439 skb->truesize += delta; 2440 } 2441 return skb; 2442 2443 fail: 2444 kfree_skb(skb); 2445 return NULL; 2446 } 2447 EXPORT_SYMBOL(skb_expand_head); 2448 2449 /** 2450 * skb_copy_expand - copy and expand sk_buff 2451 * @skb: buffer to copy 2452 * @newheadroom: new free bytes at head 2453 * @newtailroom: new free bytes at tail 2454 * @gfp_mask: allocation priority 2455 * 2456 * Make a copy of both an &sk_buff and its data and while doing so 2457 * allocate additional space. 2458 * 2459 * This is used when the caller wishes to modify the data and needs a 2460 * private copy of the data to alter as well as more space for new fields. 2461 * Returns %NULL on failure or the pointer to the buffer 2462 * on success. The returned buffer has a reference count of 1. 2463 * 2464 * You must pass %GFP_ATOMIC as the allocation priority if this function 2465 * is called from an interrupt. 2466 */ 2467 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2468 int newheadroom, int newtailroom, 2469 gfp_t gfp_mask) 2470 { 2471 /* 2472 * Allocate the copy buffer 2473 */ 2474 int head_copy_len, head_copy_off; 2475 struct sk_buff *n; 2476 int oldheadroom; 2477 2478 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2479 return NULL; 2480 2481 oldheadroom = skb_headroom(skb); 2482 n = __alloc_skb(newheadroom + skb->len + newtailroom, 2483 gfp_mask, skb_alloc_rx_flag(skb), 2484 NUMA_NO_NODE); 2485 if (!n) 2486 return NULL; 2487 2488 skb_reserve(n, newheadroom); 2489 2490 /* Set the tail pointer and length */ 2491 skb_put(n, skb->len); 2492 2493 head_copy_len = oldheadroom; 2494 head_copy_off = 0; 2495 if (newheadroom <= head_copy_len) 2496 head_copy_len = newheadroom; 2497 else 2498 head_copy_off = newheadroom - head_copy_len; 2499 2500 /* Copy the linear header and data. */ 2501 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2502 skb->len + head_copy_len)); 2503 2504 skb_copy_header(n, skb); 2505 2506 skb_headers_offset_update(n, newheadroom - oldheadroom); 2507 2508 return n; 2509 } 2510 EXPORT_SYMBOL(skb_copy_expand); 2511 2512 /** 2513 * __skb_pad - zero pad the tail of an skb 2514 * @skb: buffer to pad 2515 * @pad: space to pad 2516 * @free_on_error: free buffer on error 2517 * 2518 * Ensure that a buffer is followed by a padding area that is zero 2519 * filled. Used by network drivers which may DMA or transfer data 2520 * beyond the buffer end onto the wire. 2521 * 2522 * May return error in out of memory cases. The skb is freed on error 2523 * if @free_on_error is true. 2524 */ 2525 2526 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2527 { 2528 int err; 2529 int ntail; 2530 2531 /* If the skbuff is non linear tailroom is always zero.. */ 2532 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2533 memset(skb->data+skb->len, 0, pad); 2534 return 0; 2535 } 2536 2537 ntail = skb->data_len + pad - (skb->end - skb->tail); 2538 if (likely(skb_cloned(skb) || ntail > 0)) { 2539 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2540 if (unlikely(err)) 2541 goto free_skb; 2542 } 2543 2544 /* FIXME: The use of this function with non-linear skb's really needs 2545 * to be audited. 2546 */ 2547 err = skb_linearize(skb); 2548 if (unlikely(err)) 2549 goto free_skb; 2550 2551 memset(skb->data + skb->len, 0, pad); 2552 return 0; 2553 2554 free_skb: 2555 if (free_on_error) 2556 kfree_skb(skb); 2557 return err; 2558 } 2559 EXPORT_SYMBOL(__skb_pad); 2560 2561 /** 2562 * pskb_put - add data to the tail of a potentially fragmented buffer 2563 * @skb: start of the buffer to use 2564 * @tail: tail fragment of the buffer to use 2565 * @len: amount of data to add 2566 * 2567 * This function extends the used data area of the potentially 2568 * fragmented buffer. @tail must be the last fragment of @skb -- or 2569 * @skb itself. If this would exceed the total buffer size the kernel 2570 * will panic. A pointer to the first byte of the extra data is 2571 * returned. 2572 */ 2573 2574 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2575 { 2576 if (tail != skb) { 2577 skb->data_len += len; 2578 skb->len += len; 2579 } 2580 return skb_put(tail, len); 2581 } 2582 EXPORT_SYMBOL_GPL(pskb_put); 2583 2584 /** 2585 * skb_put - add data to a buffer 2586 * @skb: buffer to use 2587 * @len: amount of data to add 2588 * 2589 * This function extends the used data area of the buffer. If this would 2590 * exceed the total buffer size the kernel will panic. A pointer to the 2591 * first byte of the extra data is returned. 2592 */ 2593 void *skb_put(struct sk_buff *skb, unsigned int len) 2594 { 2595 void *tmp = skb_tail_pointer(skb); 2596 SKB_LINEAR_ASSERT(skb); 2597 skb->tail += len; 2598 skb->len += len; 2599 if (unlikely(skb->tail > skb->end)) 2600 skb_over_panic(skb, len, __builtin_return_address(0)); 2601 return tmp; 2602 } 2603 EXPORT_SYMBOL(skb_put); 2604 2605 /** 2606 * skb_push - add data to the start of a buffer 2607 * @skb: buffer to use 2608 * @len: amount of data to add 2609 * 2610 * This function extends the used data area of the buffer at the buffer 2611 * start. If this would exceed the total buffer headroom the kernel will 2612 * panic. A pointer to the first byte of the extra data is returned. 2613 */ 2614 void *skb_push(struct sk_buff *skb, unsigned int len) 2615 { 2616 skb->data -= len; 2617 skb->len += len; 2618 if (unlikely(skb->data < skb->head)) 2619 skb_under_panic(skb, len, __builtin_return_address(0)); 2620 return skb->data; 2621 } 2622 EXPORT_SYMBOL(skb_push); 2623 2624 /** 2625 * skb_pull - remove data from the start of a buffer 2626 * @skb: buffer to use 2627 * @len: amount of data to remove 2628 * 2629 * This function removes data from the start of a buffer, returning 2630 * the memory to the headroom. A pointer to the next data in the buffer 2631 * is returned. Once the data has been pulled future pushes will overwrite 2632 * the old data. 2633 */ 2634 void *skb_pull(struct sk_buff *skb, unsigned int len) 2635 { 2636 return skb_pull_inline(skb, len); 2637 } 2638 EXPORT_SYMBOL(skb_pull); 2639 2640 /** 2641 * skb_pull_data - remove data from the start of a buffer returning its 2642 * original position. 2643 * @skb: buffer to use 2644 * @len: amount of data to remove 2645 * 2646 * This function removes data from the start of a buffer, returning 2647 * the memory to the headroom. A pointer to the original data in the buffer 2648 * is returned after checking if there is enough data to pull. Once the 2649 * data has been pulled future pushes will overwrite the old data. 2650 */ 2651 void *skb_pull_data(struct sk_buff *skb, size_t len) 2652 { 2653 void *data = skb->data; 2654 2655 if (skb->len < len) 2656 return NULL; 2657 2658 skb_pull(skb, len); 2659 2660 return data; 2661 } 2662 EXPORT_SYMBOL(skb_pull_data); 2663 2664 /** 2665 * skb_trim - remove end from a buffer 2666 * @skb: buffer to alter 2667 * @len: new length 2668 * 2669 * Cut the length of a buffer down by removing data from the tail. If 2670 * the buffer is already under the length specified it is not modified. 2671 * The skb must be linear. 2672 */ 2673 void skb_trim(struct sk_buff *skb, unsigned int len) 2674 { 2675 if (skb->len > len) 2676 __skb_trim(skb, len); 2677 } 2678 EXPORT_SYMBOL(skb_trim); 2679 2680 /* Trims skb to length len. It can change skb pointers. 2681 */ 2682 2683 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2684 { 2685 struct sk_buff **fragp; 2686 struct sk_buff *frag; 2687 int offset = skb_headlen(skb); 2688 int nfrags = skb_shinfo(skb)->nr_frags; 2689 int i; 2690 int err; 2691 2692 if (skb_cloned(skb) && 2693 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2694 return err; 2695 2696 i = 0; 2697 if (offset >= len) 2698 goto drop_pages; 2699 2700 for (; i < nfrags; i++) { 2701 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2702 2703 if (end < len) { 2704 offset = end; 2705 continue; 2706 } 2707 2708 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2709 2710 drop_pages: 2711 skb_shinfo(skb)->nr_frags = i; 2712 2713 for (; i < nfrags; i++) 2714 skb_frag_unref(skb, i); 2715 2716 if (skb_has_frag_list(skb)) 2717 skb_drop_fraglist(skb); 2718 goto done; 2719 } 2720 2721 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2722 fragp = &frag->next) { 2723 int end = offset + frag->len; 2724 2725 if (skb_shared(frag)) { 2726 struct sk_buff *nfrag; 2727 2728 nfrag = skb_clone(frag, GFP_ATOMIC); 2729 if (unlikely(!nfrag)) 2730 return -ENOMEM; 2731 2732 nfrag->next = frag->next; 2733 consume_skb(frag); 2734 frag = nfrag; 2735 *fragp = frag; 2736 } 2737 2738 if (end < len) { 2739 offset = end; 2740 continue; 2741 } 2742 2743 if (end > len && 2744 unlikely((err = pskb_trim(frag, len - offset)))) 2745 return err; 2746 2747 if (frag->next) 2748 skb_drop_list(&frag->next); 2749 break; 2750 } 2751 2752 done: 2753 if (len > skb_headlen(skb)) { 2754 skb->data_len -= skb->len - len; 2755 skb->len = len; 2756 } else { 2757 skb->len = len; 2758 skb->data_len = 0; 2759 skb_set_tail_pointer(skb, len); 2760 } 2761 2762 if (!skb->sk || skb->destructor == sock_edemux) 2763 skb_condense(skb); 2764 return 0; 2765 } 2766 EXPORT_SYMBOL(___pskb_trim); 2767 2768 /* Note : use pskb_trim_rcsum() instead of calling this directly 2769 */ 2770 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2771 { 2772 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2773 int delta = skb->len - len; 2774 2775 skb->csum = csum_block_sub(skb->csum, 2776 skb_checksum(skb, len, delta, 0), 2777 len); 2778 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2779 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2780 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2781 2782 if (offset + sizeof(__sum16) > hdlen) 2783 return -EINVAL; 2784 } 2785 return __pskb_trim(skb, len); 2786 } 2787 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2788 2789 /** 2790 * __pskb_pull_tail - advance tail of skb header 2791 * @skb: buffer to reallocate 2792 * @delta: number of bytes to advance tail 2793 * 2794 * The function makes a sense only on a fragmented &sk_buff, 2795 * it expands header moving its tail forward and copying necessary 2796 * data from fragmented part. 2797 * 2798 * &sk_buff MUST have reference count of 1. 2799 * 2800 * Returns %NULL (and &sk_buff does not change) if pull failed 2801 * or value of new tail of skb in the case of success. 2802 * 2803 * All the pointers pointing into skb header may change and must be 2804 * reloaded after call to this function. 2805 */ 2806 2807 /* Moves tail of skb head forward, copying data from fragmented part, 2808 * when it is necessary. 2809 * 1. It may fail due to malloc failure. 2810 * 2. It may change skb pointers. 2811 * 2812 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2813 */ 2814 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2815 { 2816 /* If skb has not enough free space at tail, get new one 2817 * plus 128 bytes for future expansions. If we have enough 2818 * room at tail, reallocate without expansion only if skb is cloned. 2819 */ 2820 int i, k, eat = (skb->tail + delta) - skb->end; 2821 2822 if (eat > 0 || skb_cloned(skb)) { 2823 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2824 GFP_ATOMIC)) 2825 return NULL; 2826 } 2827 2828 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2829 skb_tail_pointer(skb), delta)); 2830 2831 /* Optimization: no fragments, no reasons to preestimate 2832 * size of pulled pages. Superb. 2833 */ 2834 if (!skb_has_frag_list(skb)) 2835 goto pull_pages; 2836 2837 /* Estimate size of pulled pages. */ 2838 eat = delta; 2839 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2840 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2841 2842 if (size >= eat) 2843 goto pull_pages; 2844 eat -= size; 2845 } 2846 2847 /* If we need update frag list, we are in troubles. 2848 * Certainly, it is possible to add an offset to skb data, 2849 * but taking into account that pulling is expected to 2850 * be very rare operation, it is worth to fight against 2851 * further bloating skb head and crucify ourselves here instead. 2852 * Pure masohism, indeed. 8)8) 2853 */ 2854 if (eat) { 2855 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2856 struct sk_buff *clone = NULL; 2857 struct sk_buff *insp = NULL; 2858 2859 do { 2860 if (list->len <= eat) { 2861 /* Eaten as whole. */ 2862 eat -= list->len; 2863 list = list->next; 2864 insp = list; 2865 } else { 2866 /* Eaten partially. */ 2867 if (skb_is_gso(skb) && !list->head_frag && 2868 skb_headlen(list)) 2869 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2870 2871 if (skb_shared(list)) { 2872 /* Sucks! We need to fork list. :-( */ 2873 clone = skb_clone(list, GFP_ATOMIC); 2874 if (!clone) 2875 return NULL; 2876 insp = list->next; 2877 list = clone; 2878 } else { 2879 /* This may be pulled without 2880 * problems. */ 2881 insp = list; 2882 } 2883 if (!pskb_pull(list, eat)) { 2884 kfree_skb(clone); 2885 return NULL; 2886 } 2887 break; 2888 } 2889 } while (eat); 2890 2891 /* Free pulled out fragments. */ 2892 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2893 skb_shinfo(skb)->frag_list = list->next; 2894 consume_skb(list); 2895 } 2896 /* And insert new clone at head. */ 2897 if (clone) { 2898 clone->next = list; 2899 skb_shinfo(skb)->frag_list = clone; 2900 } 2901 } 2902 /* Success! Now we may commit changes to skb data. */ 2903 2904 pull_pages: 2905 eat = delta; 2906 k = 0; 2907 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2908 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2909 2910 if (size <= eat) { 2911 skb_frag_unref(skb, i); 2912 eat -= size; 2913 } else { 2914 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2915 2916 *frag = skb_shinfo(skb)->frags[i]; 2917 if (eat) { 2918 skb_frag_off_add(frag, eat); 2919 skb_frag_size_sub(frag, eat); 2920 if (!i) 2921 goto end; 2922 eat = 0; 2923 } 2924 k++; 2925 } 2926 } 2927 skb_shinfo(skb)->nr_frags = k; 2928 2929 end: 2930 skb->tail += delta; 2931 skb->data_len -= delta; 2932 2933 if (!skb->data_len) 2934 skb_zcopy_clear(skb, false); 2935 2936 return skb_tail_pointer(skb); 2937 } 2938 EXPORT_SYMBOL(__pskb_pull_tail); 2939 2940 /** 2941 * skb_copy_bits - copy bits from skb to kernel buffer 2942 * @skb: source skb 2943 * @offset: offset in source 2944 * @to: destination buffer 2945 * @len: number of bytes to copy 2946 * 2947 * Copy the specified number of bytes from the source skb to the 2948 * destination buffer. 2949 * 2950 * CAUTION ! : 2951 * If its prototype is ever changed, 2952 * check arch/{*}/net/{*}.S files, 2953 * since it is called from BPF assembly code. 2954 */ 2955 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2956 { 2957 int start = skb_headlen(skb); 2958 struct sk_buff *frag_iter; 2959 int i, copy; 2960 2961 if (offset > (int)skb->len - len) 2962 goto fault; 2963 2964 /* Copy header. */ 2965 if ((copy = start - offset) > 0) { 2966 if (copy > len) 2967 copy = len; 2968 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2969 if ((len -= copy) == 0) 2970 return 0; 2971 offset += copy; 2972 to += copy; 2973 } 2974 2975 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2976 int end; 2977 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2978 2979 WARN_ON(start > offset + len); 2980 2981 end = start + skb_frag_size(f); 2982 if ((copy = end - offset) > 0) { 2983 u32 p_off, p_len, copied; 2984 struct page *p; 2985 u8 *vaddr; 2986 2987 if (copy > len) 2988 copy = len; 2989 2990 skb_frag_foreach_page(f, 2991 skb_frag_off(f) + offset - start, 2992 copy, p, p_off, p_len, copied) { 2993 vaddr = kmap_atomic(p); 2994 memcpy(to + copied, vaddr + p_off, p_len); 2995 kunmap_atomic(vaddr); 2996 } 2997 2998 if ((len -= copy) == 0) 2999 return 0; 3000 offset += copy; 3001 to += copy; 3002 } 3003 start = end; 3004 } 3005 3006 skb_walk_frags(skb, frag_iter) { 3007 int end; 3008 3009 WARN_ON(start > offset + len); 3010 3011 end = start + frag_iter->len; 3012 if ((copy = end - offset) > 0) { 3013 if (copy > len) 3014 copy = len; 3015 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 3016 goto fault; 3017 if ((len -= copy) == 0) 3018 return 0; 3019 offset += copy; 3020 to += copy; 3021 } 3022 start = end; 3023 } 3024 3025 if (!len) 3026 return 0; 3027 3028 fault: 3029 return -EFAULT; 3030 } 3031 EXPORT_SYMBOL(skb_copy_bits); 3032 3033 /* 3034 * Callback from splice_to_pipe(), if we need to release some pages 3035 * at the end of the spd in case we error'ed out in filling the pipe. 3036 */ 3037 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 3038 { 3039 put_page(spd->pages[i]); 3040 } 3041 3042 static struct page *linear_to_page(struct page *page, unsigned int *len, 3043 unsigned int *offset, 3044 struct sock *sk) 3045 { 3046 struct page_frag *pfrag = sk_page_frag(sk); 3047 3048 if (!sk_page_frag_refill(sk, pfrag)) 3049 return NULL; 3050 3051 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 3052 3053 memcpy(page_address(pfrag->page) + pfrag->offset, 3054 page_address(page) + *offset, *len); 3055 *offset = pfrag->offset; 3056 pfrag->offset += *len; 3057 3058 return pfrag->page; 3059 } 3060 3061 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 3062 struct page *page, 3063 unsigned int offset) 3064 { 3065 return spd->nr_pages && 3066 spd->pages[spd->nr_pages - 1] == page && 3067 (spd->partial[spd->nr_pages - 1].offset + 3068 spd->partial[spd->nr_pages - 1].len == offset); 3069 } 3070 3071 /* 3072 * Fill page/offset/length into spd, if it can hold more pages. 3073 */ 3074 static bool spd_fill_page(struct splice_pipe_desc *spd, 3075 struct pipe_inode_info *pipe, struct page *page, 3076 unsigned int *len, unsigned int offset, 3077 bool linear, 3078 struct sock *sk) 3079 { 3080 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 3081 return true; 3082 3083 if (linear) { 3084 page = linear_to_page(page, len, &offset, sk); 3085 if (!page) 3086 return true; 3087 } 3088 if (spd_can_coalesce(spd, page, offset)) { 3089 spd->partial[spd->nr_pages - 1].len += *len; 3090 return false; 3091 } 3092 get_page(page); 3093 spd->pages[spd->nr_pages] = page; 3094 spd->partial[spd->nr_pages].len = *len; 3095 spd->partial[spd->nr_pages].offset = offset; 3096 spd->nr_pages++; 3097 3098 return false; 3099 } 3100 3101 static bool __splice_segment(struct page *page, unsigned int poff, 3102 unsigned int plen, unsigned int *off, 3103 unsigned int *len, 3104 struct splice_pipe_desc *spd, bool linear, 3105 struct sock *sk, 3106 struct pipe_inode_info *pipe) 3107 { 3108 if (!*len) 3109 return true; 3110 3111 /* skip this segment if already processed */ 3112 if (*off >= plen) { 3113 *off -= plen; 3114 return false; 3115 } 3116 3117 /* ignore any bits we already processed */ 3118 poff += *off; 3119 plen -= *off; 3120 *off = 0; 3121 3122 do { 3123 unsigned int flen = min(*len, plen); 3124 3125 if (spd_fill_page(spd, pipe, page, &flen, poff, 3126 linear, sk)) 3127 return true; 3128 poff += flen; 3129 plen -= flen; 3130 *len -= flen; 3131 } while (*len && plen); 3132 3133 return false; 3134 } 3135 3136 /* 3137 * Map linear and fragment data from the skb to spd. It reports true if the 3138 * pipe is full or if we already spliced the requested length. 3139 */ 3140 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3141 unsigned int *offset, unsigned int *len, 3142 struct splice_pipe_desc *spd, struct sock *sk) 3143 { 3144 int seg; 3145 struct sk_buff *iter; 3146 3147 /* map the linear part : 3148 * If skb->head_frag is set, this 'linear' part is backed by a 3149 * fragment, and if the head is not shared with any clones then 3150 * we can avoid a copy since we own the head portion of this page. 3151 */ 3152 if (__splice_segment(virt_to_page(skb->data), 3153 (unsigned long) skb->data & (PAGE_SIZE - 1), 3154 skb_headlen(skb), 3155 offset, len, spd, 3156 skb_head_is_locked(skb), 3157 sk, pipe)) 3158 return true; 3159 3160 /* 3161 * then map the fragments 3162 */ 3163 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3164 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3165 3166 if (__splice_segment(skb_frag_page(f), 3167 skb_frag_off(f), skb_frag_size(f), 3168 offset, len, spd, false, sk, pipe)) 3169 return true; 3170 } 3171 3172 skb_walk_frags(skb, iter) { 3173 if (*offset >= iter->len) { 3174 *offset -= iter->len; 3175 continue; 3176 } 3177 /* __skb_splice_bits() only fails if the output has no room 3178 * left, so no point in going over the frag_list for the error 3179 * case. 3180 */ 3181 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3182 return true; 3183 } 3184 3185 return false; 3186 } 3187 3188 /* 3189 * Map data from the skb to a pipe. Should handle both the linear part, 3190 * the fragments, and the frag list. 3191 */ 3192 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3193 struct pipe_inode_info *pipe, unsigned int tlen, 3194 unsigned int flags) 3195 { 3196 struct partial_page partial[MAX_SKB_FRAGS]; 3197 struct page *pages[MAX_SKB_FRAGS]; 3198 struct splice_pipe_desc spd = { 3199 .pages = pages, 3200 .partial = partial, 3201 .nr_pages_max = MAX_SKB_FRAGS, 3202 .ops = &nosteal_pipe_buf_ops, 3203 .spd_release = sock_spd_release, 3204 }; 3205 int ret = 0; 3206 3207 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3208 3209 if (spd.nr_pages) 3210 ret = splice_to_pipe(pipe, &spd); 3211 3212 return ret; 3213 } 3214 EXPORT_SYMBOL_GPL(skb_splice_bits); 3215 3216 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3217 { 3218 struct socket *sock = sk->sk_socket; 3219 size_t size = msg_data_left(msg); 3220 3221 if (!sock) 3222 return -EINVAL; 3223 3224 if (!sock->ops->sendmsg_locked) 3225 return sock_no_sendmsg_locked(sk, msg, size); 3226 3227 return sock->ops->sendmsg_locked(sk, msg, size); 3228 } 3229 3230 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3231 { 3232 struct socket *sock = sk->sk_socket; 3233 3234 if (!sock) 3235 return -EINVAL; 3236 return sock_sendmsg(sock, msg); 3237 } 3238 3239 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3240 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3241 int len, sendmsg_func sendmsg) 3242 { 3243 unsigned int orig_len = len; 3244 struct sk_buff *head = skb; 3245 unsigned short fragidx; 3246 int slen, ret; 3247 3248 do_frag_list: 3249 3250 /* Deal with head data */ 3251 while (offset < skb_headlen(skb) && len) { 3252 struct kvec kv; 3253 struct msghdr msg; 3254 3255 slen = min_t(int, len, skb_headlen(skb) - offset); 3256 kv.iov_base = skb->data + offset; 3257 kv.iov_len = slen; 3258 memset(&msg, 0, sizeof(msg)); 3259 msg.msg_flags = MSG_DONTWAIT; 3260 3261 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3262 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3263 sendmsg_unlocked, sk, &msg); 3264 if (ret <= 0) 3265 goto error; 3266 3267 offset += ret; 3268 len -= ret; 3269 } 3270 3271 /* All the data was skb head? */ 3272 if (!len) 3273 goto out; 3274 3275 /* Make offset relative to start of frags */ 3276 offset -= skb_headlen(skb); 3277 3278 /* Find where we are in frag list */ 3279 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3280 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3281 3282 if (offset < skb_frag_size(frag)) 3283 break; 3284 3285 offset -= skb_frag_size(frag); 3286 } 3287 3288 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3289 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3290 3291 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3292 3293 while (slen) { 3294 struct bio_vec bvec; 3295 struct msghdr msg = { 3296 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3297 }; 3298 3299 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3300 skb_frag_off(frag) + offset); 3301 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3302 slen); 3303 3304 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3305 sendmsg_unlocked, sk, &msg); 3306 if (ret <= 0) 3307 goto error; 3308 3309 len -= ret; 3310 offset += ret; 3311 slen -= ret; 3312 } 3313 3314 offset = 0; 3315 } 3316 3317 if (len) { 3318 /* Process any frag lists */ 3319 3320 if (skb == head) { 3321 if (skb_has_frag_list(skb)) { 3322 skb = skb_shinfo(skb)->frag_list; 3323 goto do_frag_list; 3324 } 3325 } else if (skb->next) { 3326 skb = skb->next; 3327 goto do_frag_list; 3328 } 3329 } 3330 3331 out: 3332 return orig_len - len; 3333 3334 error: 3335 return orig_len == len ? ret : orig_len - len; 3336 } 3337 3338 /* Send skb data on a socket. Socket must be locked. */ 3339 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3340 int len) 3341 { 3342 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3343 } 3344 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3345 3346 /* Send skb data on a socket. Socket must be unlocked. */ 3347 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3348 { 3349 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3350 } 3351 3352 /** 3353 * skb_store_bits - store bits from kernel buffer to skb 3354 * @skb: destination buffer 3355 * @offset: offset in destination 3356 * @from: source buffer 3357 * @len: number of bytes to copy 3358 * 3359 * Copy the specified number of bytes from the source buffer to the 3360 * destination skb. This function handles all the messy bits of 3361 * traversing fragment lists and such. 3362 */ 3363 3364 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3365 { 3366 int start = skb_headlen(skb); 3367 struct sk_buff *frag_iter; 3368 int i, copy; 3369 3370 if (offset > (int)skb->len - len) 3371 goto fault; 3372 3373 if ((copy = start - offset) > 0) { 3374 if (copy > len) 3375 copy = len; 3376 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3377 if ((len -= copy) == 0) 3378 return 0; 3379 offset += copy; 3380 from += copy; 3381 } 3382 3383 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3384 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3385 int end; 3386 3387 WARN_ON(start > offset + len); 3388 3389 end = start + skb_frag_size(frag); 3390 if ((copy = end - offset) > 0) { 3391 u32 p_off, p_len, copied; 3392 struct page *p; 3393 u8 *vaddr; 3394 3395 if (copy > len) 3396 copy = len; 3397 3398 skb_frag_foreach_page(frag, 3399 skb_frag_off(frag) + offset - start, 3400 copy, p, p_off, p_len, copied) { 3401 vaddr = kmap_atomic(p); 3402 memcpy(vaddr + p_off, from + copied, p_len); 3403 kunmap_atomic(vaddr); 3404 } 3405 3406 if ((len -= copy) == 0) 3407 return 0; 3408 offset += copy; 3409 from += copy; 3410 } 3411 start = end; 3412 } 3413 3414 skb_walk_frags(skb, frag_iter) { 3415 int end; 3416 3417 WARN_ON(start > offset + len); 3418 3419 end = start + frag_iter->len; 3420 if ((copy = end - offset) > 0) { 3421 if (copy > len) 3422 copy = len; 3423 if (skb_store_bits(frag_iter, offset - start, 3424 from, copy)) 3425 goto fault; 3426 if ((len -= copy) == 0) 3427 return 0; 3428 offset += copy; 3429 from += copy; 3430 } 3431 start = end; 3432 } 3433 if (!len) 3434 return 0; 3435 3436 fault: 3437 return -EFAULT; 3438 } 3439 EXPORT_SYMBOL(skb_store_bits); 3440 3441 /* Checksum skb data. */ 3442 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3443 __wsum csum, const struct skb_checksum_ops *ops) 3444 { 3445 int start = skb_headlen(skb); 3446 int i, copy = start - offset; 3447 struct sk_buff *frag_iter; 3448 int pos = 0; 3449 3450 /* Checksum header. */ 3451 if (copy > 0) { 3452 if (copy > len) 3453 copy = len; 3454 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3455 skb->data + offset, copy, csum); 3456 if ((len -= copy) == 0) 3457 return csum; 3458 offset += copy; 3459 pos = copy; 3460 } 3461 3462 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3463 int end; 3464 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3465 3466 WARN_ON(start > offset + len); 3467 3468 end = start + skb_frag_size(frag); 3469 if ((copy = end - offset) > 0) { 3470 u32 p_off, p_len, copied; 3471 struct page *p; 3472 __wsum csum2; 3473 u8 *vaddr; 3474 3475 if (copy > len) 3476 copy = len; 3477 3478 skb_frag_foreach_page(frag, 3479 skb_frag_off(frag) + offset - start, 3480 copy, p, p_off, p_len, copied) { 3481 vaddr = kmap_atomic(p); 3482 csum2 = INDIRECT_CALL_1(ops->update, 3483 csum_partial_ext, 3484 vaddr + p_off, p_len, 0); 3485 kunmap_atomic(vaddr); 3486 csum = INDIRECT_CALL_1(ops->combine, 3487 csum_block_add_ext, csum, 3488 csum2, pos, p_len); 3489 pos += p_len; 3490 } 3491 3492 if (!(len -= copy)) 3493 return csum; 3494 offset += copy; 3495 } 3496 start = end; 3497 } 3498 3499 skb_walk_frags(skb, frag_iter) { 3500 int end; 3501 3502 WARN_ON(start > offset + len); 3503 3504 end = start + frag_iter->len; 3505 if ((copy = end - offset) > 0) { 3506 __wsum csum2; 3507 if (copy > len) 3508 copy = len; 3509 csum2 = __skb_checksum(frag_iter, offset - start, 3510 copy, 0, ops); 3511 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3512 csum, csum2, pos, copy); 3513 if ((len -= copy) == 0) 3514 return csum; 3515 offset += copy; 3516 pos += copy; 3517 } 3518 start = end; 3519 } 3520 BUG_ON(len); 3521 3522 return csum; 3523 } 3524 EXPORT_SYMBOL(__skb_checksum); 3525 3526 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3527 int len, __wsum csum) 3528 { 3529 const struct skb_checksum_ops ops = { 3530 .update = csum_partial_ext, 3531 .combine = csum_block_add_ext, 3532 }; 3533 3534 return __skb_checksum(skb, offset, len, csum, &ops); 3535 } 3536 EXPORT_SYMBOL(skb_checksum); 3537 3538 /* Both of above in one bottle. */ 3539 3540 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3541 u8 *to, int len) 3542 { 3543 int start = skb_headlen(skb); 3544 int i, copy = start - offset; 3545 struct sk_buff *frag_iter; 3546 int pos = 0; 3547 __wsum csum = 0; 3548 3549 /* Copy header. */ 3550 if (copy > 0) { 3551 if (copy > len) 3552 copy = len; 3553 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3554 copy); 3555 if ((len -= copy) == 0) 3556 return csum; 3557 offset += copy; 3558 to += copy; 3559 pos = copy; 3560 } 3561 3562 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3563 int end; 3564 3565 WARN_ON(start > offset + len); 3566 3567 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3568 if ((copy = end - offset) > 0) { 3569 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3570 u32 p_off, p_len, copied; 3571 struct page *p; 3572 __wsum csum2; 3573 u8 *vaddr; 3574 3575 if (copy > len) 3576 copy = len; 3577 3578 skb_frag_foreach_page(frag, 3579 skb_frag_off(frag) + offset - start, 3580 copy, p, p_off, p_len, copied) { 3581 vaddr = kmap_atomic(p); 3582 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3583 to + copied, 3584 p_len); 3585 kunmap_atomic(vaddr); 3586 csum = csum_block_add(csum, csum2, pos); 3587 pos += p_len; 3588 } 3589 3590 if (!(len -= copy)) 3591 return csum; 3592 offset += copy; 3593 to += copy; 3594 } 3595 start = end; 3596 } 3597 3598 skb_walk_frags(skb, frag_iter) { 3599 __wsum csum2; 3600 int end; 3601 3602 WARN_ON(start > offset + len); 3603 3604 end = start + frag_iter->len; 3605 if ((copy = end - offset) > 0) { 3606 if (copy > len) 3607 copy = len; 3608 csum2 = skb_copy_and_csum_bits(frag_iter, 3609 offset - start, 3610 to, copy); 3611 csum = csum_block_add(csum, csum2, pos); 3612 if ((len -= copy) == 0) 3613 return csum; 3614 offset += copy; 3615 to += copy; 3616 pos += copy; 3617 } 3618 start = end; 3619 } 3620 BUG_ON(len); 3621 return csum; 3622 } 3623 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3624 3625 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3626 { 3627 __sum16 sum; 3628 3629 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3630 /* See comments in __skb_checksum_complete(). */ 3631 if (likely(!sum)) { 3632 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3633 !skb->csum_complete_sw) 3634 netdev_rx_csum_fault(skb->dev, skb); 3635 } 3636 if (!skb_shared(skb)) 3637 skb->csum_valid = !sum; 3638 return sum; 3639 } 3640 EXPORT_SYMBOL(__skb_checksum_complete_head); 3641 3642 /* This function assumes skb->csum already holds pseudo header's checksum, 3643 * which has been changed from the hardware checksum, for example, by 3644 * __skb_checksum_validate_complete(). And, the original skb->csum must 3645 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3646 * 3647 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3648 * zero. The new checksum is stored back into skb->csum unless the skb is 3649 * shared. 3650 */ 3651 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3652 { 3653 __wsum csum; 3654 __sum16 sum; 3655 3656 csum = skb_checksum(skb, 0, skb->len, 0); 3657 3658 sum = csum_fold(csum_add(skb->csum, csum)); 3659 /* This check is inverted, because we already knew the hardware 3660 * checksum is invalid before calling this function. So, if the 3661 * re-computed checksum is valid instead, then we have a mismatch 3662 * between the original skb->csum and skb_checksum(). This means either 3663 * the original hardware checksum is incorrect or we screw up skb->csum 3664 * when moving skb->data around. 3665 */ 3666 if (likely(!sum)) { 3667 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3668 !skb->csum_complete_sw) 3669 netdev_rx_csum_fault(skb->dev, skb); 3670 } 3671 3672 if (!skb_shared(skb)) { 3673 /* Save full packet checksum */ 3674 skb->csum = csum; 3675 skb->ip_summed = CHECKSUM_COMPLETE; 3676 skb->csum_complete_sw = 1; 3677 skb->csum_valid = !sum; 3678 } 3679 3680 return sum; 3681 } 3682 EXPORT_SYMBOL(__skb_checksum_complete); 3683 3684 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3685 { 3686 net_warn_ratelimited( 3687 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3688 __func__); 3689 return 0; 3690 } 3691 3692 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3693 int offset, int len) 3694 { 3695 net_warn_ratelimited( 3696 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3697 __func__); 3698 return 0; 3699 } 3700 3701 static const struct skb_checksum_ops default_crc32c_ops = { 3702 .update = warn_crc32c_csum_update, 3703 .combine = warn_crc32c_csum_combine, 3704 }; 3705 3706 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3707 &default_crc32c_ops; 3708 EXPORT_SYMBOL(crc32c_csum_stub); 3709 3710 /** 3711 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3712 * @from: source buffer 3713 * 3714 * Calculates the amount of linear headroom needed in the 'to' skb passed 3715 * into skb_zerocopy(). 3716 */ 3717 unsigned int 3718 skb_zerocopy_headlen(const struct sk_buff *from) 3719 { 3720 unsigned int hlen = 0; 3721 3722 if (!from->head_frag || 3723 skb_headlen(from) < L1_CACHE_BYTES || 3724 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3725 hlen = skb_headlen(from); 3726 if (!hlen) 3727 hlen = from->len; 3728 } 3729 3730 if (skb_has_frag_list(from)) 3731 hlen = from->len; 3732 3733 return hlen; 3734 } 3735 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3736 3737 /** 3738 * skb_zerocopy - Zero copy skb to skb 3739 * @to: destination buffer 3740 * @from: source buffer 3741 * @len: number of bytes to copy from source buffer 3742 * @hlen: size of linear headroom in destination buffer 3743 * 3744 * Copies up to `len` bytes from `from` to `to` by creating references 3745 * to the frags in the source buffer. 3746 * 3747 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3748 * headroom in the `to` buffer. 3749 * 3750 * Return value: 3751 * 0: everything is OK 3752 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3753 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3754 */ 3755 int 3756 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3757 { 3758 int i, j = 0; 3759 int plen = 0; /* length of skb->head fragment */ 3760 int ret; 3761 struct page *page; 3762 unsigned int offset; 3763 3764 BUG_ON(!from->head_frag && !hlen); 3765 3766 /* dont bother with small payloads */ 3767 if (len <= skb_tailroom(to)) 3768 return skb_copy_bits(from, 0, skb_put(to, len), len); 3769 3770 if (hlen) { 3771 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3772 if (unlikely(ret)) 3773 return ret; 3774 len -= hlen; 3775 } else { 3776 plen = min_t(int, skb_headlen(from), len); 3777 if (plen) { 3778 page = virt_to_head_page(from->head); 3779 offset = from->data - (unsigned char *)page_address(page); 3780 __skb_fill_netmem_desc(to, 0, page_to_netmem(page), 3781 offset, plen); 3782 get_page(page); 3783 j = 1; 3784 len -= plen; 3785 } 3786 } 3787 3788 skb_len_add(to, len + plen); 3789 3790 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3791 skb_tx_error(from); 3792 return -ENOMEM; 3793 } 3794 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3795 3796 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3797 int size; 3798 3799 if (!len) 3800 break; 3801 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3802 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3803 len); 3804 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3805 len -= size; 3806 skb_frag_ref(to, j); 3807 j++; 3808 } 3809 skb_shinfo(to)->nr_frags = j; 3810 3811 return 0; 3812 } 3813 EXPORT_SYMBOL_GPL(skb_zerocopy); 3814 3815 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3816 { 3817 __wsum csum; 3818 long csstart; 3819 3820 if (skb->ip_summed == CHECKSUM_PARTIAL) 3821 csstart = skb_checksum_start_offset(skb); 3822 else 3823 csstart = skb_headlen(skb); 3824 3825 BUG_ON(csstart > skb_headlen(skb)); 3826 3827 skb_copy_from_linear_data(skb, to, csstart); 3828 3829 csum = 0; 3830 if (csstart != skb->len) 3831 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3832 skb->len - csstart); 3833 3834 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3835 long csstuff = csstart + skb->csum_offset; 3836 3837 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3838 } 3839 } 3840 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3841 3842 /** 3843 * skb_dequeue - remove from the head of the queue 3844 * @list: list to dequeue from 3845 * 3846 * Remove the head of the list. The list lock is taken so the function 3847 * may be used safely with other locking list functions. The head item is 3848 * returned or %NULL if the list is empty. 3849 */ 3850 3851 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3852 { 3853 unsigned long flags; 3854 struct sk_buff *result; 3855 3856 spin_lock_irqsave(&list->lock, flags); 3857 result = __skb_dequeue(list); 3858 spin_unlock_irqrestore(&list->lock, flags); 3859 return result; 3860 } 3861 EXPORT_SYMBOL(skb_dequeue); 3862 3863 /** 3864 * skb_dequeue_tail - remove from the tail of the queue 3865 * @list: list to dequeue from 3866 * 3867 * Remove the tail of the list. The list lock is taken so the function 3868 * may be used safely with other locking list functions. The tail item is 3869 * returned or %NULL if the list is empty. 3870 */ 3871 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3872 { 3873 unsigned long flags; 3874 struct sk_buff *result; 3875 3876 spin_lock_irqsave(&list->lock, flags); 3877 result = __skb_dequeue_tail(list); 3878 spin_unlock_irqrestore(&list->lock, flags); 3879 return result; 3880 } 3881 EXPORT_SYMBOL(skb_dequeue_tail); 3882 3883 /** 3884 * skb_queue_purge_reason - empty a list 3885 * @list: list to empty 3886 * @reason: drop reason 3887 * 3888 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3889 * the list and one reference dropped. This function takes the list 3890 * lock and is atomic with respect to other list locking functions. 3891 */ 3892 void skb_queue_purge_reason(struct sk_buff_head *list, 3893 enum skb_drop_reason reason) 3894 { 3895 struct sk_buff_head tmp; 3896 unsigned long flags; 3897 3898 if (skb_queue_empty_lockless(list)) 3899 return; 3900 3901 __skb_queue_head_init(&tmp); 3902 3903 spin_lock_irqsave(&list->lock, flags); 3904 skb_queue_splice_init(list, &tmp); 3905 spin_unlock_irqrestore(&list->lock, flags); 3906 3907 __skb_queue_purge_reason(&tmp, reason); 3908 } 3909 EXPORT_SYMBOL(skb_queue_purge_reason); 3910 3911 /** 3912 * skb_rbtree_purge - empty a skb rbtree 3913 * @root: root of the rbtree to empty 3914 * Return value: the sum of truesizes of all purged skbs. 3915 * 3916 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3917 * the list and one reference dropped. This function does not take 3918 * any lock. Synchronization should be handled by the caller (e.g., TCP 3919 * out-of-order queue is protected by the socket lock). 3920 */ 3921 unsigned int skb_rbtree_purge(struct rb_root *root) 3922 { 3923 struct rb_node *p = rb_first(root); 3924 unsigned int sum = 0; 3925 3926 while (p) { 3927 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3928 3929 p = rb_next(p); 3930 rb_erase(&skb->rbnode, root); 3931 sum += skb->truesize; 3932 kfree_skb(skb); 3933 } 3934 return sum; 3935 } 3936 3937 void skb_errqueue_purge(struct sk_buff_head *list) 3938 { 3939 struct sk_buff *skb, *next; 3940 struct sk_buff_head kill; 3941 unsigned long flags; 3942 3943 __skb_queue_head_init(&kill); 3944 3945 spin_lock_irqsave(&list->lock, flags); 3946 skb_queue_walk_safe(list, skb, next) { 3947 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3948 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3949 continue; 3950 __skb_unlink(skb, list); 3951 __skb_queue_tail(&kill, skb); 3952 } 3953 spin_unlock_irqrestore(&list->lock, flags); 3954 __skb_queue_purge(&kill); 3955 } 3956 EXPORT_SYMBOL(skb_errqueue_purge); 3957 3958 /** 3959 * skb_queue_head - queue a buffer at the list head 3960 * @list: list to use 3961 * @newsk: buffer to queue 3962 * 3963 * Queue a buffer at the start of the list. This function takes the 3964 * list lock and can be used safely with other locking &sk_buff functions 3965 * safely. 3966 * 3967 * A buffer cannot be placed on two lists at the same time. 3968 */ 3969 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3970 { 3971 unsigned long flags; 3972 3973 spin_lock_irqsave(&list->lock, flags); 3974 __skb_queue_head(list, newsk); 3975 spin_unlock_irqrestore(&list->lock, flags); 3976 } 3977 EXPORT_SYMBOL(skb_queue_head); 3978 3979 /** 3980 * skb_queue_tail - queue a buffer at the list tail 3981 * @list: list to use 3982 * @newsk: buffer to queue 3983 * 3984 * Queue a buffer at the tail of the list. This function takes the 3985 * list lock and can be used safely with other locking &sk_buff functions 3986 * safely. 3987 * 3988 * A buffer cannot be placed on two lists at the same time. 3989 */ 3990 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3991 { 3992 unsigned long flags; 3993 3994 spin_lock_irqsave(&list->lock, flags); 3995 __skb_queue_tail(list, newsk); 3996 spin_unlock_irqrestore(&list->lock, flags); 3997 } 3998 EXPORT_SYMBOL(skb_queue_tail); 3999 4000 /** 4001 * skb_unlink - remove a buffer from a list 4002 * @skb: buffer to remove 4003 * @list: list to use 4004 * 4005 * Remove a packet from a list. The list locks are taken and this 4006 * function is atomic with respect to other list locked calls 4007 * 4008 * You must know what list the SKB is on. 4009 */ 4010 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 4011 { 4012 unsigned long flags; 4013 4014 spin_lock_irqsave(&list->lock, flags); 4015 __skb_unlink(skb, list); 4016 spin_unlock_irqrestore(&list->lock, flags); 4017 } 4018 EXPORT_SYMBOL(skb_unlink); 4019 4020 /** 4021 * skb_append - append a buffer 4022 * @old: buffer to insert after 4023 * @newsk: buffer to insert 4024 * @list: list to use 4025 * 4026 * Place a packet after a given packet in a list. The list locks are taken 4027 * and this function is atomic with respect to other list locked calls. 4028 * A buffer cannot be placed on two lists at the same time. 4029 */ 4030 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 4031 { 4032 unsigned long flags; 4033 4034 spin_lock_irqsave(&list->lock, flags); 4035 __skb_queue_after(list, old, newsk); 4036 spin_unlock_irqrestore(&list->lock, flags); 4037 } 4038 EXPORT_SYMBOL(skb_append); 4039 4040 static inline void skb_split_inside_header(struct sk_buff *skb, 4041 struct sk_buff* skb1, 4042 const u32 len, const int pos) 4043 { 4044 int i; 4045 4046 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 4047 pos - len); 4048 /* And move data appendix as is. */ 4049 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4050 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 4051 4052 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 4053 skb_shinfo(skb)->nr_frags = 0; 4054 skb1->data_len = skb->data_len; 4055 skb1->len += skb1->data_len; 4056 skb->data_len = 0; 4057 skb->len = len; 4058 skb_set_tail_pointer(skb, len); 4059 } 4060 4061 static inline void skb_split_no_header(struct sk_buff *skb, 4062 struct sk_buff* skb1, 4063 const u32 len, int pos) 4064 { 4065 int i, k = 0; 4066 const int nfrags = skb_shinfo(skb)->nr_frags; 4067 4068 skb_shinfo(skb)->nr_frags = 0; 4069 skb1->len = skb1->data_len = skb->len - len; 4070 skb->len = len; 4071 skb->data_len = len - pos; 4072 4073 for (i = 0; i < nfrags; i++) { 4074 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4075 4076 if (pos + size > len) { 4077 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 4078 4079 if (pos < len) { 4080 /* Split frag. 4081 * We have two variants in this case: 4082 * 1. Move all the frag to the second 4083 * part, if it is possible. F.e. 4084 * this approach is mandatory for TUX, 4085 * where splitting is expensive. 4086 * 2. Split is accurately. We make this. 4087 */ 4088 skb_frag_ref(skb, i); 4089 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 4090 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 4091 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 4092 skb_shinfo(skb)->nr_frags++; 4093 } 4094 k++; 4095 } else 4096 skb_shinfo(skb)->nr_frags++; 4097 pos += size; 4098 } 4099 skb_shinfo(skb1)->nr_frags = k; 4100 } 4101 4102 /** 4103 * skb_split - Split fragmented skb to two parts at length len. 4104 * @skb: the buffer to split 4105 * @skb1: the buffer to receive the second part 4106 * @len: new length for skb 4107 */ 4108 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 4109 { 4110 int pos = skb_headlen(skb); 4111 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 4112 4113 skb_zcopy_downgrade_managed(skb); 4114 4115 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 4116 skb_zerocopy_clone(skb1, skb, 0); 4117 if (len < pos) /* Split line is inside header. */ 4118 skb_split_inside_header(skb, skb1, len, pos); 4119 else /* Second chunk has no header, nothing to copy. */ 4120 skb_split_no_header(skb, skb1, len, pos); 4121 } 4122 EXPORT_SYMBOL(skb_split); 4123 4124 /* Shifting from/to a cloned skb is a no-go. 4125 * 4126 * Caller cannot keep skb_shinfo related pointers past calling here! 4127 */ 4128 static int skb_prepare_for_shift(struct sk_buff *skb) 4129 { 4130 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 4131 } 4132 4133 /** 4134 * skb_shift - Shifts paged data partially from skb to another 4135 * @tgt: buffer into which tail data gets added 4136 * @skb: buffer from which the paged data comes from 4137 * @shiftlen: shift up to this many bytes 4138 * 4139 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4140 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4141 * It's up to caller to free skb if everything was shifted. 4142 * 4143 * If @tgt runs out of frags, the whole operation is aborted. 4144 * 4145 * Skb cannot include anything else but paged data while tgt is allowed 4146 * to have non-paged data as well. 4147 * 4148 * TODO: full sized shift could be optimized but that would need 4149 * specialized skb free'er to handle frags without up-to-date nr_frags. 4150 */ 4151 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4152 { 4153 int from, to, merge, todo; 4154 skb_frag_t *fragfrom, *fragto; 4155 4156 BUG_ON(shiftlen > skb->len); 4157 4158 if (skb_headlen(skb)) 4159 return 0; 4160 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4161 return 0; 4162 4163 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); 4164 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); 4165 4166 todo = shiftlen; 4167 from = 0; 4168 to = skb_shinfo(tgt)->nr_frags; 4169 fragfrom = &skb_shinfo(skb)->frags[from]; 4170 4171 /* Actual merge is delayed until the point when we know we can 4172 * commit all, so that we don't have to undo partial changes 4173 */ 4174 if (!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4175 skb_frag_off(fragfrom))) { 4176 merge = -1; 4177 } else { 4178 merge = to - 1; 4179 4180 todo -= skb_frag_size(fragfrom); 4181 if (todo < 0) { 4182 if (skb_prepare_for_shift(skb) || 4183 skb_prepare_for_shift(tgt)) 4184 return 0; 4185 4186 /* All previous frag pointers might be stale! */ 4187 fragfrom = &skb_shinfo(skb)->frags[from]; 4188 fragto = &skb_shinfo(tgt)->frags[merge]; 4189 4190 skb_frag_size_add(fragto, shiftlen); 4191 skb_frag_size_sub(fragfrom, shiftlen); 4192 skb_frag_off_add(fragfrom, shiftlen); 4193 4194 goto onlymerged; 4195 } 4196 4197 from++; 4198 } 4199 4200 /* Skip full, not-fitting skb to avoid expensive operations */ 4201 if ((shiftlen == skb->len) && 4202 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4203 return 0; 4204 4205 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4206 return 0; 4207 4208 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4209 if (to == MAX_SKB_FRAGS) 4210 return 0; 4211 4212 fragfrom = &skb_shinfo(skb)->frags[from]; 4213 fragto = &skb_shinfo(tgt)->frags[to]; 4214 4215 if (todo >= skb_frag_size(fragfrom)) { 4216 *fragto = *fragfrom; 4217 todo -= skb_frag_size(fragfrom); 4218 from++; 4219 to++; 4220 4221 } else { 4222 __skb_frag_ref(fragfrom); 4223 skb_frag_page_copy(fragto, fragfrom); 4224 skb_frag_off_copy(fragto, fragfrom); 4225 skb_frag_size_set(fragto, todo); 4226 4227 skb_frag_off_add(fragfrom, todo); 4228 skb_frag_size_sub(fragfrom, todo); 4229 todo = 0; 4230 4231 to++; 4232 break; 4233 } 4234 } 4235 4236 /* Ready to "commit" this state change to tgt */ 4237 skb_shinfo(tgt)->nr_frags = to; 4238 4239 if (merge >= 0) { 4240 fragfrom = &skb_shinfo(skb)->frags[0]; 4241 fragto = &skb_shinfo(tgt)->frags[merge]; 4242 4243 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4244 __skb_frag_unref(fragfrom, skb->pp_recycle); 4245 } 4246 4247 /* Reposition in the original skb */ 4248 to = 0; 4249 while (from < skb_shinfo(skb)->nr_frags) 4250 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4251 skb_shinfo(skb)->nr_frags = to; 4252 4253 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4254 4255 onlymerged: 4256 /* Most likely the tgt won't ever need its checksum anymore, skb on 4257 * the other hand might need it if it needs to be resent 4258 */ 4259 tgt->ip_summed = CHECKSUM_PARTIAL; 4260 skb->ip_summed = CHECKSUM_PARTIAL; 4261 4262 skb_len_add(skb, -shiftlen); 4263 skb_len_add(tgt, shiftlen); 4264 4265 return shiftlen; 4266 } 4267 4268 /** 4269 * skb_prepare_seq_read - Prepare a sequential read of skb data 4270 * @skb: the buffer to read 4271 * @from: lower offset of data to be read 4272 * @to: upper offset of data to be read 4273 * @st: state variable 4274 * 4275 * Initializes the specified state variable. Must be called before 4276 * invoking skb_seq_read() for the first time. 4277 */ 4278 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4279 unsigned int to, struct skb_seq_state *st) 4280 { 4281 st->lower_offset = from; 4282 st->upper_offset = to; 4283 st->root_skb = st->cur_skb = skb; 4284 st->frag_idx = st->stepped_offset = 0; 4285 st->frag_data = NULL; 4286 st->frag_off = 0; 4287 } 4288 EXPORT_SYMBOL(skb_prepare_seq_read); 4289 4290 /** 4291 * skb_seq_read - Sequentially read skb data 4292 * @consumed: number of bytes consumed by the caller so far 4293 * @data: destination pointer for data to be returned 4294 * @st: state variable 4295 * 4296 * Reads a block of skb data at @consumed relative to the 4297 * lower offset specified to skb_prepare_seq_read(). Assigns 4298 * the head of the data block to @data and returns the length 4299 * of the block or 0 if the end of the skb data or the upper 4300 * offset has been reached. 4301 * 4302 * The caller is not required to consume all of the data 4303 * returned, i.e. @consumed is typically set to the number 4304 * of bytes already consumed and the next call to 4305 * skb_seq_read() will return the remaining part of the block. 4306 * 4307 * Note 1: The size of each block of data returned can be arbitrary, 4308 * this limitation is the cost for zerocopy sequential 4309 * reads of potentially non linear data. 4310 * 4311 * Note 2: Fragment lists within fragments are not implemented 4312 * at the moment, state->root_skb could be replaced with 4313 * a stack for this purpose. 4314 */ 4315 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4316 struct skb_seq_state *st) 4317 { 4318 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4319 skb_frag_t *frag; 4320 4321 if (unlikely(abs_offset >= st->upper_offset)) { 4322 if (st->frag_data) { 4323 kunmap_atomic(st->frag_data); 4324 st->frag_data = NULL; 4325 } 4326 return 0; 4327 } 4328 4329 next_skb: 4330 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4331 4332 if (abs_offset < block_limit && !st->frag_data) { 4333 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4334 return block_limit - abs_offset; 4335 } 4336 4337 if (st->frag_idx == 0 && !st->frag_data) 4338 st->stepped_offset += skb_headlen(st->cur_skb); 4339 4340 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4341 unsigned int pg_idx, pg_off, pg_sz; 4342 4343 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4344 4345 pg_idx = 0; 4346 pg_off = skb_frag_off(frag); 4347 pg_sz = skb_frag_size(frag); 4348 4349 if (skb_frag_must_loop(skb_frag_page(frag))) { 4350 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4351 pg_off = offset_in_page(pg_off + st->frag_off); 4352 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4353 PAGE_SIZE - pg_off); 4354 } 4355 4356 block_limit = pg_sz + st->stepped_offset; 4357 if (abs_offset < block_limit) { 4358 if (!st->frag_data) 4359 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4360 4361 *data = (u8 *)st->frag_data + pg_off + 4362 (abs_offset - st->stepped_offset); 4363 4364 return block_limit - abs_offset; 4365 } 4366 4367 if (st->frag_data) { 4368 kunmap_atomic(st->frag_data); 4369 st->frag_data = NULL; 4370 } 4371 4372 st->stepped_offset += pg_sz; 4373 st->frag_off += pg_sz; 4374 if (st->frag_off == skb_frag_size(frag)) { 4375 st->frag_off = 0; 4376 st->frag_idx++; 4377 } 4378 } 4379 4380 if (st->frag_data) { 4381 kunmap_atomic(st->frag_data); 4382 st->frag_data = NULL; 4383 } 4384 4385 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4386 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4387 st->frag_idx = 0; 4388 goto next_skb; 4389 } else if (st->cur_skb->next) { 4390 st->cur_skb = st->cur_skb->next; 4391 st->frag_idx = 0; 4392 goto next_skb; 4393 } 4394 4395 return 0; 4396 } 4397 EXPORT_SYMBOL(skb_seq_read); 4398 4399 /** 4400 * skb_abort_seq_read - Abort a sequential read of skb data 4401 * @st: state variable 4402 * 4403 * Must be called if skb_seq_read() was not called until it 4404 * returned 0. 4405 */ 4406 void skb_abort_seq_read(struct skb_seq_state *st) 4407 { 4408 if (st->frag_data) 4409 kunmap_atomic(st->frag_data); 4410 } 4411 EXPORT_SYMBOL(skb_abort_seq_read); 4412 4413 /** 4414 * skb_copy_seq_read() - copy from a skb_seq_state to a buffer 4415 * @st: source skb_seq_state 4416 * @offset: offset in source 4417 * @to: destination buffer 4418 * @len: number of bytes to copy 4419 * 4420 * Copy @len bytes from @offset bytes into the source @st to the destination 4421 * buffer @to. `offset` should increase (or be unchanged) with each subsequent 4422 * call to this function. If offset needs to decrease from the previous use `st` 4423 * should be reset first. 4424 * 4425 * Return: 0 on success or -EINVAL if the copy ended early 4426 */ 4427 int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len) 4428 { 4429 const u8 *data; 4430 u32 sqlen; 4431 4432 for (;;) { 4433 sqlen = skb_seq_read(offset, &data, st); 4434 if (sqlen == 0) 4435 return -EINVAL; 4436 if (sqlen >= len) { 4437 memcpy(to, data, len); 4438 return 0; 4439 } 4440 memcpy(to, data, sqlen); 4441 to += sqlen; 4442 offset += sqlen; 4443 len -= sqlen; 4444 } 4445 } 4446 EXPORT_SYMBOL(skb_copy_seq_read); 4447 4448 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4449 4450 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4451 struct ts_config *conf, 4452 struct ts_state *state) 4453 { 4454 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4455 } 4456 4457 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4458 { 4459 skb_abort_seq_read(TS_SKB_CB(state)); 4460 } 4461 4462 /** 4463 * skb_find_text - Find a text pattern in skb data 4464 * @skb: the buffer to look in 4465 * @from: search offset 4466 * @to: search limit 4467 * @config: textsearch configuration 4468 * 4469 * Finds a pattern in the skb data according to the specified 4470 * textsearch configuration. Use textsearch_next() to retrieve 4471 * subsequent occurrences of the pattern. Returns the offset 4472 * to the first occurrence or UINT_MAX if no match was found. 4473 */ 4474 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4475 unsigned int to, struct ts_config *config) 4476 { 4477 unsigned int patlen = config->ops->get_pattern_len(config); 4478 struct ts_state state; 4479 unsigned int ret; 4480 4481 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4482 4483 config->get_next_block = skb_ts_get_next_block; 4484 config->finish = skb_ts_finish; 4485 4486 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4487 4488 ret = textsearch_find(config, &state); 4489 return (ret + patlen <= to - from ? ret : UINT_MAX); 4490 } 4491 EXPORT_SYMBOL(skb_find_text); 4492 4493 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4494 int offset, size_t size, size_t max_frags) 4495 { 4496 int i = skb_shinfo(skb)->nr_frags; 4497 4498 if (skb_can_coalesce(skb, i, page, offset)) { 4499 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4500 } else if (i < max_frags) { 4501 skb_zcopy_downgrade_managed(skb); 4502 get_page(page); 4503 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4504 } else { 4505 return -EMSGSIZE; 4506 } 4507 4508 return 0; 4509 } 4510 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4511 4512 /** 4513 * skb_pull_rcsum - pull skb and update receive checksum 4514 * @skb: buffer to update 4515 * @len: length of data pulled 4516 * 4517 * This function performs an skb_pull on the packet and updates 4518 * the CHECKSUM_COMPLETE checksum. It should be used on 4519 * receive path processing instead of skb_pull unless you know 4520 * that the checksum difference is zero (e.g., a valid IP header) 4521 * or you are setting ip_summed to CHECKSUM_NONE. 4522 */ 4523 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4524 { 4525 unsigned char *data = skb->data; 4526 4527 BUG_ON(len > skb->len); 4528 __skb_pull(skb, len); 4529 skb_postpull_rcsum(skb, data, len); 4530 return skb->data; 4531 } 4532 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4533 4534 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4535 { 4536 skb_frag_t head_frag; 4537 struct page *page; 4538 4539 page = virt_to_head_page(frag_skb->head); 4540 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4541 (unsigned char *)page_address(page), 4542 skb_headlen(frag_skb)); 4543 return head_frag; 4544 } 4545 4546 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4547 netdev_features_t features, 4548 unsigned int offset) 4549 { 4550 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4551 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4552 unsigned int delta_truesize = 0; 4553 unsigned int delta_len = 0; 4554 struct sk_buff *tail = NULL; 4555 struct sk_buff *nskb, *tmp; 4556 int len_diff, err; 4557 4558 skb_push(skb, -skb_network_offset(skb) + offset); 4559 4560 /* Ensure the head is writeable before touching the shared info */ 4561 err = skb_unclone(skb, GFP_ATOMIC); 4562 if (err) 4563 goto err_linearize; 4564 4565 skb_shinfo(skb)->frag_list = NULL; 4566 4567 while (list_skb) { 4568 nskb = list_skb; 4569 list_skb = list_skb->next; 4570 4571 err = 0; 4572 delta_truesize += nskb->truesize; 4573 if (skb_shared(nskb)) { 4574 tmp = skb_clone(nskb, GFP_ATOMIC); 4575 if (tmp) { 4576 consume_skb(nskb); 4577 nskb = tmp; 4578 err = skb_unclone(nskb, GFP_ATOMIC); 4579 } else { 4580 err = -ENOMEM; 4581 } 4582 } 4583 4584 if (!tail) 4585 skb->next = nskb; 4586 else 4587 tail->next = nskb; 4588 4589 if (unlikely(err)) { 4590 nskb->next = list_skb; 4591 goto err_linearize; 4592 } 4593 4594 tail = nskb; 4595 4596 delta_len += nskb->len; 4597 4598 skb_push(nskb, -skb_network_offset(nskb) + offset); 4599 4600 skb_release_head_state(nskb); 4601 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4602 __copy_skb_header(nskb, skb); 4603 4604 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4605 nskb->transport_header += len_diff; 4606 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4607 nskb->data - tnl_hlen, 4608 offset + tnl_hlen); 4609 4610 if (skb_needs_linearize(nskb, features) && 4611 __skb_linearize(nskb)) 4612 goto err_linearize; 4613 } 4614 4615 skb->truesize = skb->truesize - delta_truesize; 4616 skb->data_len = skb->data_len - delta_len; 4617 skb->len = skb->len - delta_len; 4618 4619 skb_gso_reset(skb); 4620 4621 skb->prev = tail; 4622 4623 if (skb_needs_linearize(skb, features) && 4624 __skb_linearize(skb)) 4625 goto err_linearize; 4626 4627 skb_get(skb); 4628 4629 return skb; 4630 4631 err_linearize: 4632 kfree_skb_list(skb->next); 4633 skb->next = NULL; 4634 return ERR_PTR(-ENOMEM); 4635 } 4636 EXPORT_SYMBOL_GPL(skb_segment_list); 4637 4638 /** 4639 * skb_segment - Perform protocol segmentation on skb. 4640 * @head_skb: buffer to segment 4641 * @features: features for the output path (see dev->features) 4642 * 4643 * This function performs segmentation on the given skb. It returns 4644 * a pointer to the first in a list of new skbs for the segments. 4645 * In case of error it returns ERR_PTR(err). 4646 */ 4647 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4648 netdev_features_t features) 4649 { 4650 struct sk_buff *segs = NULL; 4651 struct sk_buff *tail = NULL; 4652 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4653 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4654 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4655 unsigned int offset = doffset; 4656 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4657 unsigned int partial_segs = 0; 4658 unsigned int headroom; 4659 unsigned int len = head_skb->len; 4660 struct sk_buff *frag_skb; 4661 skb_frag_t *frag; 4662 __be16 proto; 4663 bool csum, sg; 4664 int err = -ENOMEM; 4665 int i = 0; 4666 int nfrags, pos; 4667 4668 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4669 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4670 struct sk_buff *check_skb; 4671 4672 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4673 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4674 /* gso_size is untrusted, and we have a frag_list with 4675 * a linear non head_frag item. 4676 * 4677 * If head_skb's headlen does not fit requested gso_size, 4678 * it means that the frag_list members do NOT terminate 4679 * on exact gso_size boundaries. Hence we cannot perform 4680 * skb_frag_t page sharing. Therefore we must fallback to 4681 * copying the frag_list skbs; we do so by disabling SG. 4682 */ 4683 features &= ~NETIF_F_SG; 4684 break; 4685 } 4686 } 4687 } 4688 4689 __skb_push(head_skb, doffset); 4690 proto = skb_network_protocol(head_skb, NULL); 4691 if (unlikely(!proto)) 4692 return ERR_PTR(-EINVAL); 4693 4694 sg = !!(features & NETIF_F_SG); 4695 csum = !!can_checksum_protocol(features, proto); 4696 4697 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4698 if (!(features & NETIF_F_GSO_PARTIAL)) { 4699 struct sk_buff *iter; 4700 unsigned int frag_len; 4701 4702 if (!list_skb || 4703 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4704 goto normal; 4705 4706 /* If we get here then all the required 4707 * GSO features except frag_list are supported. 4708 * Try to split the SKB to multiple GSO SKBs 4709 * with no frag_list. 4710 * Currently we can do that only when the buffers don't 4711 * have a linear part and all the buffers except 4712 * the last are of the same length. 4713 */ 4714 frag_len = list_skb->len; 4715 skb_walk_frags(head_skb, iter) { 4716 if (frag_len != iter->len && iter->next) 4717 goto normal; 4718 if (skb_headlen(iter) && !iter->head_frag) 4719 goto normal; 4720 4721 len -= iter->len; 4722 } 4723 4724 if (len != frag_len) 4725 goto normal; 4726 } 4727 4728 /* GSO partial only requires that we trim off any excess that 4729 * doesn't fit into an MSS sized block, so take care of that 4730 * now. 4731 * Cap len to not accidentally hit GSO_BY_FRAGS. 4732 */ 4733 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4734 if (partial_segs > 1) 4735 mss *= partial_segs; 4736 else 4737 partial_segs = 0; 4738 } 4739 4740 normal: 4741 headroom = skb_headroom(head_skb); 4742 pos = skb_headlen(head_skb); 4743 4744 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4745 return ERR_PTR(-ENOMEM); 4746 4747 nfrags = skb_shinfo(head_skb)->nr_frags; 4748 frag = skb_shinfo(head_skb)->frags; 4749 frag_skb = head_skb; 4750 4751 do { 4752 struct sk_buff *nskb; 4753 skb_frag_t *nskb_frag; 4754 int hsize; 4755 int size; 4756 4757 if (unlikely(mss == GSO_BY_FRAGS)) { 4758 len = list_skb->len; 4759 } else { 4760 len = head_skb->len - offset; 4761 if (len > mss) 4762 len = mss; 4763 } 4764 4765 hsize = skb_headlen(head_skb) - offset; 4766 4767 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4768 (skb_headlen(list_skb) == len || sg)) { 4769 BUG_ON(skb_headlen(list_skb) > len); 4770 4771 nskb = skb_clone(list_skb, GFP_ATOMIC); 4772 if (unlikely(!nskb)) 4773 goto err; 4774 4775 i = 0; 4776 nfrags = skb_shinfo(list_skb)->nr_frags; 4777 frag = skb_shinfo(list_skb)->frags; 4778 frag_skb = list_skb; 4779 pos += skb_headlen(list_skb); 4780 4781 while (pos < offset + len) { 4782 BUG_ON(i >= nfrags); 4783 4784 size = skb_frag_size(frag); 4785 if (pos + size > offset + len) 4786 break; 4787 4788 i++; 4789 pos += size; 4790 frag++; 4791 } 4792 4793 list_skb = list_skb->next; 4794 4795 if (unlikely(pskb_trim(nskb, len))) { 4796 kfree_skb(nskb); 4797 goto err; 4798 } 4799 4800 hsize = skb_end_offset(nskb); 4801 if (skb_cow_head(nskb, doffset + headroom)) { 4802 kfree_skb(nskb); 4803 goto err; 4804 } 4805 4806 nskb->truesize += skb_end_offset(nskb) - hsize; 4807 skb_release_head_state(nskb); 4808 __skb_push(nskb, doffset); 4809 } else { 4810 if (hsize < 0) 4811 hsize = 0; 4812 if (hsize > len || !sg) 4813 hsize = len; 4814 4815 nskb = __alloc_skb(hsize + doffset + headroom, 4816 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4817 NUMA_NO_NODE); 4818 4819 if (unlikely(!nskb)) 4820 goto err; 4821 4822 skb_reserve(nskb, headroom); 4823 __skb_put(nskb, doffset); 4824 } 4825 4826 if (segs) 4827 tail->next = nskb; 4828 else 4829 segs = nskb; 4830 tail = nskb; 4831 4832 __copy_skb_header(nskb, head_skb); 4833 4834 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4835 skb_reset_mac_len(nskb); 4836 4837 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4838 nskb->data - tnl_hlen, 4839 doffset + tnl_hlen); 4840 4841 if (nskb->len == len + doffset) 4842 goto perform_csum_check; 4843 4844 if (!sg) { 4845 if (!csum) { 4846 if (!nskb->remcsum_offload) 4847 nskb->ip_summed = CHECKSUM_NONE; 4848 SKB_GSO_CB(nskb)->csum = 4849 skb_copy_and_csum_bits(head_skb, offset, 4850 skb_put(nskb, 4851 len), 4852 len); 4853 SKB_GSO_CB(nskb)->csum_start = 4854 skb_headroom(nskb) + doffset; 4855 } else { 4856 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4857 goto err; 4858 } 4859 continue; 4860 } 4861 4862 nskb_frag = skb_shinfo(nskb)->frags; 4863 4864 skb_copy_from_linear_data_offset(head_skb, offset, 4865 skb_put(nskb, hsize), hsize); 4866 4867 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4868 SKBFL_SHARED_FRAG; 4869 4870 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4871 goto err; 4872 4873 while (pos < offset + len) { 4874 if (i >= nfrags) { 4875 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4876 skb_zerocopy_clone(nskb, list_skb, 4877 GFP_ATOMIC)) 4878 goto err; 4879 4880 i = 0; 4881 nfrags = skb_shinfo(list_skb)->nr_frags; 4882 frag = skb_shinfo(list_skb)->frags; 4883 frag_skb = list_skb; 4884 if (!skb_headlen(list_skb)) { 4885 BUG_ON(!nfrags); 4886 } else { 4887 BUG_ON(!list_skb->head_frag); 4888 4889 /* to make room for head_frag. */ 4890 i--; 4891 frag--; 4892 } 4893 4894 list_skb = list_skb->next; 4895 } 4896 4897 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4898 MAX_SKB_FRAGS)) { 4899 net_warn_ratelimited( 4900 "skb_segment: too many frags: %u %u\n", 4901 pos, mss); 4902 err = -EINVAL; 4903 goto err; 4904 } 4905 4906 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4907 __skb_frag_ref(nskb_frag); 4908 size = skb_frag_size(nskb_frag); 4909 4910 if (pos < offset) { 4911 skb_frag_off_add(nskb_frag, offset - pos); 4912 skb_frag_size_sub(nskb_frag, offset - pos); 4913 } 4914 4915 skb_shinfo(nskb)->nr_frags++; 4916 4917 if (pos + size <= offset + len) { 4918 i++; 4919 frag++; 4920 pos += size; 4921 } else { 4922 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4923 goto skip_fraglist; 4924 } 4925 4926 nskb_frag++; 4927 } 4928 4929 skip_fraglist: 4930 nskb->data_len = len - hsize; 4931 nskb->len += nskb->data_len; 4932 nskb->truesize += nskb->data_len; 4933 4934 perform_csum_check: 4935 if (!csum) { 4936 if (skb_has_shared_frag(nskb) && 4937 __skb_linearize(nskb)) 4938 goto err; 4939 4940 if (!nskb->remcsum_offload) 4941 nskb->ip_summed = CHECKSUM_NONE; 4942 SKB_GSO_CB(nskb)->csum = 4943 skb_checksum(nskb, doffset, 4944 nskb->len - doffset, 0); 4945 SKB_GSO_CB(nskb)->csum_start = 4946 skb_headroom(nskb) + doffset; 4947 } 4948 } while ((offset += len) < head_skb->len); 4949 4950 /* Some callers want to get the end of the list. 4951 * Put it in segs->prev to avoid walking the list. 4952 * (see validate_xmit_skb_list() for example) 4953 */ 4954 segs->prev = tail; 4955 4956 if (partial_segs) { 4957 struct sk_buff *iter; 4958 int type = skb_shinfo(head_skb)->gso_type; 4959 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4960 4961 /* Update type to add partial and then remove dodgy if set */ 4962 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4963 type &= ~SKB_GSO_DODGY; 4964 4965 /* Update GSO info and prepare to start updating headers on 4966 * our way back down the stack of protocols. 4967 */ 4968 for (iter = segs; iter; iter = iter->next) { 4969 skb_shinfo(iter)->gso_size = gso_size; 4970 skb_shinfo(iter)->gso_segs = partial_segs; 4971 skb_shinfo(iter)->gso_type = type; 4972 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4973 } 4974 4975 if (tail->len - doffset <= gso_size) 4976 skb_shinfo(tail)->gso_size = 0; 4977 else if (tail != segs) 4978 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4979 } 4980 4981 /* Following permits correct backpressure, for protocols 4982 * using skb_set_owner_w(). 4983 * Idea is to tranfert ownership from head_skb to last segment. 4984 */ 4985 if (head_skb->destructor == sock_wfree) { 4986 swap(tail->truesize, head_skb->truesize); 4987 swap(tail->destructor, head_skb->destructor); 4988 swap(tail->sk, head_skb->sk); 4989 } 4990 return segs; 4991 4992 err: 4993 kfree_skb_list(segs); 4994 return ERR_PTR(err); 4995 } 4996 EXPORT_SYMBOL_GPL(skb_segment); 4997 4998 #ifdef CONFIG_SKB_EXTENSIONS 4999 #define SKB_EXT_ALIGN_VALUE 8 5000 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 5001 5002 static const u8 skb_ext_type_len[] = { 5003 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 5004 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 5005 #endif 5006 #ifdef CONFIG_XFRM 5007 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 5008 #endif 5009 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 5010 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 5011 #endif 5012 #if IS_ENABLED(CONFIG_MPTCP) 5013 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 5014 #endif 5015 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 5016 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 5017 #endif 5018 }; 5019 5020 static __always_inline unsigned int skb_ext_total_length(void) 5021 { 5022 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 5023 int i; 5024 5025 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 5026 l += skb_ext_type_len[i]; 5027 5028 return l; 5029 } 5030 5031 static void skb_extensions_init(void) 5032 { 5033 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 5034 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 5035 BUILD_BUG_ON(skb_ext_total_length() > 255); 5036 #endif 5037 5038 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 5039 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 5040 0, 5041 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 5042 NULL); 5043 } 5044 #else 5045 static void skb_extensions_init(void) {} 5046 #endif 5047 5048 /* The SKB kmem_cache slab is critical for network performance. Never 5049 * merge/alias the slab with similar sized objects. This avoids fragmentation 5050 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 5051 */ 5052 #ifndef CONFIG_SLUB_TINY 5053 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 5054 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 5055 #define FLAG_SKB_NO_MERGE 0 5056 #endif 5057 5058 void __init skb_init(void) 5059 { 5060 net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 5061 sizeof(struct sk_buff), 5062 0, 5063 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 5064 FLAG_SKB_NO_MERGE, 5065 offsetof(struct sk_buff, cb), 5066 sizeof_field(struct sk_buff, cb), 5067 NULL); 5068 net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 5069 sizeof(struct sk_buff_fclones), 5070 0, 5071 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 5072 NULL); 5073 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 5074 * struct skb_shared_info is located at the end of skb->head, 5075 * and should not be copied to/from user. 5076 */ 5077 net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 5078 SKB_SMALL_HEAD_CACHE_SIZE, 5079 0, 5080 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 5081 0, 5082 SKB_SMALL_HEAD_HEADROOM, 5083 NULL); 5084 skb_extensions_init(); 5085 } 5086 5087 static int 5088 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 5089 unsigned int recursion_level) 5090 { 5091 int start = skb_headlen(skb); 5092 int i, copy = start - offset; 5093 struct sk_buff *frag_iter; 5094 int elt = 0; 5095 5096 if (unlikely(recursion_level >= 24)) 5097 return -EMSGSIZE; 5098 5099 if (copy > 0) { 5100 if (copy > len) 5101 copy = len; 5102 sg_set_buf(sg, skb->data + offset, copy); 5103 elt++; 5104 if ((len -= copy) == 0) 5105 return elt; 5106 offset += copy; 5107 } 5108 5109 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5110 int end; 5111 5112 WARN_ON(start > offset + len); 5113 5114 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 5115 if ((copy = end - offset) > 0) { 5116 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5117 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5118 return -EMSGSIZE; 5119 5120 if (copy > len) 5121 copy = len; 5122 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 5123 skb_frag_off(frag) + offset - start); 5124 elt++; 5125 if (!(len -= copy)) 5126 return elt; 5127 offset += copy; 5128 } 5129 start = end; 5130 } 5131 5132 skb_walk_frags(skb, frag_iter) { 5133 int end, ret; 5134 5135 WARN_ON(start > offset + len); 5136 5137 end = start + frag_iter->len; 5138 if ((copy = end - offset) > 0) { 5139 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5140 return -EMSGSIZE; 5141 5142 if (copy > len) 5143 copy = len; 5144 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 5145 copy, recursion_level + 1); 5146 if (unlikely(ret < 0)) 5147 return ret; 5148 elt += ret; 5149 if ((len -= copy) == 0) 5150 return elt; 5151 offset += copy; 5152 } 5153 start = end; 5154 } 5155 BUG_ON(len); 5156 return elt; 5157 } 5158 5159 /** 5160 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 5161 * @skb: Socket buffer containing the buffers to be mapped 5162 * @sg: The scatter-gather list to map into 5163 * @offset: The offset into the buffer's contents to start mapping 5164 * @len: Length of buffer space to be mapped 5165 * 5166 * Fill the specified scatter-gather list with mappings/pointers into a 5167 * region of the buffer space attached to a socket buffer. Returns either 5168 * the number of scatterlist items used, or -EMSGSIZE if the contents 5169 * could not fit. 5170 */ 5171 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5172 { 5173 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5174 5175 if (nsg <= 0) 5176 return nsg; 5177 5178 sg_mark_end(&sg[nsg - 1]); 5179 5180 return nsg; 5181 } 5182 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5183 5184 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5185 * sglist without mark the sg which contain last skb data as the end. 5186 * So the caller can mannipulate sg list as will when padding new data after 5187 * the first call without calling sg_unmark_end to expend sg list. 5188 * 5189 * Scenario to use skb_to_sgvec_nomark: 5190 * 1. sg_init_table 5191 * 2. skb_to_sgvec_nomark(payload1) 5192 * 3. skb_to_sgvec_nomark(payload2) 5193 * 5194 * This is equivalent to: 5195 * 1. sg_init_table 5196 * 2. skb_to_sgvec(payload1) 5197 * 3. sg_unmark_end 5198 * 4. skb_to_sgvec(payload2) 5199 * 5200 * When mapping multiple payload conditionally, skb_to_sgvec_nomark 5201 * is more preferable. 5202 */ 5203 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5204 int offset, int len) 5205 { 5206 return __skb_to_sgvec(skb, sg, offset, len, 0); 5207 } 5208 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5209 5210 5211 5212 /** 5213 * skb_cow_data - Check that a socket buffer's data buffers are writable 5214 * @skb: The socket buffer to check. 5215 * @tailbits: Amount of trailing space to be added 5216 * @trailer: Returned pointer to the skb where the @tailbits space begins 5217 * 5218 * Make sure that the data buffers attached to a socket buffer are 5219 * writable. If they are not, private copies are made of the data buffers 5220 * and the socket buffer is set to use these instead. 5221 * 5222 * If @tailbits is given, make sure that there is space to write @tailbits 5223 * bytes of data beyond current end of socket buffer. @trailer will be 5224 * set to point to the skb in which this space begins. 5225 * 5226 * The number of scatterlist elements required to completely map the 5227 * COW'd and extended socket buffer will be returned. 5228 */ 5229 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5230 { 5231 int copyflag; 5232 int elt; 5233 struct sk_buff *skb1, **skb_p; 5234 5235 /* If skb is cloned or its head is paged, reallocate 5236 * head pulling out all the pages (pages are considered not writable 5237 * at the moment even if they are anonymous). 5238 */ 5239 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5240 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5241 return -ENOMEM; 5242 5243 /* Easy case. Most of packets will go this way. */ 5244 if (!skb_has_frag_list(skb)) { 5245 /* A little of trouble, not enough of space for trailer. 5246 * This should not happen, when stack is tuned to generate 5247 * good frames. OK, on miss we reallocate and reserve even more 5248 * space, 128 bytes is fair. */ 5249 5250 if (skb_tailroom(skb) < tailbits && 5251 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5252 return -ENOMEM; 5253 5254 /* Voila! */ 5255 *trailer = skb; 5256 return 1; 5257 } 5258 5259 /* Misery. We are in troubles, going to mincer fragments... */ 5260 5261 elt = 1; 5262 skb_p = &skb_shinfo(skb)->frag_list; 5263 copyflag = 0; 5264 5265 while ((skb1 = *skb_p) != NULL) { 5266 int ntail = 0; 5267 5268 /* The fragment is partially pulled by someone, 5269 * this can happen on input. Copy it and everything 5270 * after it. */ 5271 5272 if (skb_shared(skb1)) 5273 copyflag = 1; 5274 5275 /* If the skb is the last, worry about trailer. */ 5276 5277 if (skb1->next == NULL && tailbits) { 5278 if (skb_shinfo(skb1)->nr_frags || 5279 skb_has_frag_list(skb1) || 5280 skb_tailroom(skb1) < tailbits) 5281 ntail = tailbits + 128; 5282 } 5283 5284 if (copyflag || 5285 skb_cloned(skb1) || 5286 ntail || 5287 skb_shinfo(skb1)->nr_frags || 5288 skb_has_frag_list(skb1)) { 5289 struct sk_buff *skb2; 5290 5291 /* Fuck, we are miserable poor guys... */ 5292 if (ntail == 0) 5293 skb2 = skb_copy(skb1, GFP_ATOMIC); 5294 else 5295 skb2 = skb_copy_expand(skb1, 5296 skb_headroom(skb1), 5297 ntail, 5298 GFP_ATOMIC); 5299 if (unlikely(skb2 == NULL)) 5300 return -ENOMEM; 5301 5302 if (skb1->sk) 5303 skb_set_owner_w(skb2, skb1->sk); 5304 5305 /* Looking around. Are we still alive? 5306 * OK, link new skb, drop old one */ 5307 5308 skb2->next = skb1->next; 5309 *skb_p = skb2; 5310 kfree_skb(skb1); 5311 skb1 = skb2; 5312 } 5313 elt++; 5314 *trailer = skb1; 5315 skb_p = &skb1->next; 5316 } 5317 5318 return elt; 5319 } 5320 EXPORT_SYMBOL_GPL(skb_cow_data); 5321 5322 static void sock_rmem_free(struct sk_buff *skb) 5323 { 5324 struct sock *sk = skb->sk; 5325 5326 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5327 } 5328 5329 static void skb_set_err_queue(struct sk_buff *skb) 5330 { 5331 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5332 * So, it is safe to (mis)use it to mark skbs on the error queue. 5333 */ 5334 skb->pkt_type = PACKET_OUTGOING; 5335 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5336 } 5337 5338 /* 5339 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5340 */ 5341 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5342 { 5343 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5344 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5345 return -ENOMEM; 5346 5347 skb_orphan(skb); 5348 skb->sk = sk; 5349 skb->destructor = sock_rmem_free; 5350 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5351 skb_set_err_queue(skb); 5352 5353 /* before exiting rcu section, make sure dst is refcounted */ 5354 skb_dst_force(skb); 5355 5356 skb_queue_tail(&sk->sk_error_queue, skb); 5357 if (!sock_flag(sk, SOCK_DEAD)) 5358 sk_error_report(sk); 5359 return 0; 5360 } 5361 EXPORT_SYMBOL(sock_queue_err_skb); 5362 5363 static bool is_icmp_err_skb(const struct sk_buff *skb) 5364 { 5365 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5366 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5367 } 5368 5369 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5370 { 5371 struct sk_buff_head *q = &sk->sk_error_queue; 5372 struct sk_buff *skb, *skb_next = NULL; 5373 bool icmp_next = false; 5374 unsigned long flags; 5375 5376 if (skb_queue_empty_lockless(q)) 5377 return NULL; 5378 5379 spin_lock_irqsave(&q->lock, flags); 5380 skb = __skb_dequeue(q); 5381 if (skb && (skb_next = skb_peek(q))) { 5382 icmp_next = is_icmp_err_skb(skb_next); 5383 if (icmp_next) 5384 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5385 } 5386 spin_unlock_irqrestore(&q->lock, flags); 5387 5388 if (is_icmp_err_skb(skb) && !icmp_next) 5389 sk->sk_err = 0; 5390 5391 if (skb_next) 5392 sk_error_report(sk); 5393 5394 return skb; 5395 } 5396 EXPORT_SYMBOL(sock_dequeue_err_skb); 5397 5398 /** 5399 * skb_clone_sk - create clone of skb, and take reference to socket 5400 * @skb: the skb to clone 5401 * 5402 * This function creates a clone of a buffer that holds a reference on 5403 * sk_refcnt. Buffers created via this function are meant to be 5404 * returned using sock_queue_err_skb, or free via kfree_skb. 5405 * 5406 * When passing buffers allocated with this function to sock_queue_err_skb 5407 * it is necessary to wrap the call with sock_hold/sock_put in order to 5408 * prevent the socket from being released prior to being enqueued on 5409 * the sk_error_queue. 5410 */ 5411 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5412 { 5413 struct sock *sk = skb->sk; 5414 struct sk_buff *clone; 5415 5416 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5417 return NULL; 5418 5419 clone = skb_clone(skb, GFP_ATOMIC); 5420 if (!clone) { 5421 sock_put(sk); 5422 return NULL; 5423 } 5424 5425 clone->sk = sk; 5426 clone->destructor = sock_efree; 5427 5428 return clone; 5429 } 5430 EXPORT_SYMBOL(skb_clone_sk); 5431 5432 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5433 struct sock *sk, 5434 int tstype, 5435 bool opt_stats) 5436 { 5437 struct sock_exterr_skb *serr; 5438 int err; 5439 5440 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5441 5442 serr = SKB_EXT_ERR(skb); 5443 memset(serr, 0, sizeof(*serr)); 5444 serr->ee.ee_errno = ENOMSG; 5445 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5446 serr->ee.ee_info = tstype; 5447 serr->opt_stats = opt_stats; 5448 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5449 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5450 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5451 if (sk_is_tcp(sk)) 5452 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5453 } 5454 5455 err = sock_queue_err_skb(sk, skb); 5456 5457 if (err) 5458 kfree_skb(skb); 5459 } 5460 5461 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5462 { 5463 bool ret; 5464 5465 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) 5466 return true; 5467 5468 read_lock_bh(&sk->sk_callback_lock); 5469 ret = sk->sk_socket && sk->sk_socket->file && 5470 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5471 read_unlock_bh(&sk->sk_callback_lock); 5472 return ret; 5473 } 5474 5475 void skb_complete_tx_timestamp(struct sk_buff *skb, 5476 struct skb_shared_hwtstamps *hwtstamps) 5477 { 5478 struct sock *sk = skb->sk; 5479 5480 if (!skb_may_tx_timestamp(sk, false)) 5481 goto err; 5482 5483 /* Take a reference to prevent skb_orphan() from freeing the socket, 5484 * but only if the socket refcount is not zero. 5485 */ 5486 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5487 *skb_hwtstamps(skb) = *hwtstamps; 5488 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5489 sock_put(sk); 5490 return; 5491 } 5492 5493 err: 5494 kfree_skb(skb); 5495 } 5496 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5497 5498 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5499 const struct sk_buff *ack_skb, 5500 struct skb_shared_hwtstamps *hwtstamps, 5501 struct sock *sk, int tstype) 5502 { 5503 struct sk_buff *skb; 5504 bool tsonly, opt_stats = false; 5505 u32 tsflags; 5506 5507 if (!sk) 5508 return; 5509 5510 tsflags = READ_ONCE(sk->sk_tsflags); 5511 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5512 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5513 return; 5514 5515 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5516 if (!skb_may_tx_timestamp(sk, tsonly)) 5517 return; 5518 5519 if (tsonly) { 5520 #ifdef CONFIG_INET 5521 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5522 sk_is_tcp(sk)) { 5523 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5524 ack_skb); 5525 opt_stats = true; 5526 } else 5527 #endif 5528 skb = alloc_skb(0, GFP_ATOMIC); 5529 } else { 5530 skb = skb_clone(orig_skb, GFP_ATOMIC); 5531 5532 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5533 kfree_skb(skb); 5534 return; 5535 } 5536 } 5537 if (!skb) 5538 return; 5539 5540 if (tsonly) { 5541 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5542 SKBTX_ANY_TSTAMP; 5543 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5544 } 5545 5546 if (hwtstamps) 5547 *skb_hwtstamps(skb) = *hwtstamps; 5548 else 5549 __net_timestamp(skb); 5550 5551 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5552 } 5553 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5554 5555 void skb_tstamp_tx(struct sk_buff *orig_skb, 5556 struct skb_shared_hwtstamps *hwtstamps) 5557 { 5558 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5559 SCM_TSTAMP_SND); 5560 } 5561 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5562 5563 #ifdef CONFIG_WIRELESS 5564 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5565 { 5566 struct sock *sk = skb->sk; 5567 struct sock_exterr_skb *serr; 5568 int err = 1; 5569 5570 skb->wifi_acked_valid = 1; 5571 skb->wifi_acked = acked; 5572 5573 serr = SKB_EXT_ERR(skb); 5574 memset(serr, 0, sizeof(*serr)); 5575 serr->ee.ee_errno = ENOMSG; 5576 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5577 5578 /* Take a reference to prevent skb_orphan() from freeing the socket, 5579 * but only if the socket refcount is not zero. 5580 */ 5581 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5582 err = sock_queue_err_skb(sk, skb); 5583 sock_put(sk); 5584 } 5585 if (err) 5586 kfree_skb(skb); 5587 } 5588 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5589 #endif /* CONFIG_WIRELESS */ 5590 5591 /** 5592 * skb_partial_csum_set - set up and verify partial csum values for packet 5593 * @skb: the skb to set 5594 * @start: the number of bytes after skb->data to start checksumming. 5595 * @off: the offset from start to place the checksum. 5596 * 5597 * For untrusted partially-checksummed packets, we need to make sure the values 5598 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5599 * 5600 * This function checks and sets those values and skb->ip_summed: if this 5601 * returns false you should drop the packet. 5602 */ 5603 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5604 { 5605 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5606 u32 csum_start = skb_headroom(skb) + (u32)start; 5607 5608 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5609 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5610 start, off, skb_headroom(skb), skb_headlen(skb)); 5611 return false; 5612 } 5613 skb->ip_summed = CHECKSUM_PARTIAL; 5614 skb->csum_start = csum_start; 5615 skb->csum_offset = off; 5616 skb->transport_header = csum_start; 5617 return true; 5618 } 5619 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5620 5621 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5622 unsigned int max) 5623 { 5624 if (skb_headlen(skb) >= len) 5625 return 0; 5626 5627 /* If we need to pullup then pullup to the max, so we 5628 * won't need to do it again. 5629 */ 5630 if (max > skb->len) 5631 max = skb->len; 5632 5633 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5634 return -ENOMEM; 5635 5636 if (skb_headlen(skb) < len) 5637 return -EPROTO; 5638 5639 return 0; 5640 } 5641 5642 #define MAX_TCP_HDR_LEN (15 * 4) 5643 5644 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5645 typeof(IPPROTO_IP) proto, 5646 unsigned int off) 5647 { 5648 int err; 5649 5650 switch (proto) { 5651 case IPPROTO_TCP: 5652 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5653 off + MAX_TCP_HDR_LEN); 5654 if (!err && !skb_partial_csum_set(skb, off, 5655 offsetof(struct tcphdr, 5656 check))) 5657 err = -EPROTO; 5658 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5659 5660 case IPPROTO_UDP: 5661 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5662 off + sizeof(struct udphdr)); 5663 if (!err && !skb_partial_csum_set(skb, off, 5664 offsetof(struct udphdr, 5665 check))) 5666 err = -EPROTO; 5667 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5668 } 5669 5670 return ERR_PTR(-EPROTO); 5671 } 5672 5673 /* This value should be large enough to cover a tagged ethernet header plus 5674 * maximally sized IP and TCP or UDP headers. 5675 */ 5676 #define MAX_IP_HDR_LEN 128 5677 5678 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5679 { 5680 unsigned int off; 5681 bool fragment; 5682 __sum16 *csum; 5683 int err; 5684 5685 fragment = false; 5686 5687 err = skb_maybe_pull_tail(skb, 5688 sizeof(struct iphdr), 5689 MAX_IP_HDR_LEN); 5690 if (err < 0) 5691 goto out; 5692 5693 if (ip_is_fragment(ip_hdr(skb))) 5694 fragment = true; 5695 5696 off = ip_hdrlen(skb); 5697 5698 err = -EPROTO; 5699 5700 if (fragment) 5701 goto out; 5702 5703 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5704 if (IS_ERR(csum)) 5705 return PTR_ERR(csum); 5706 5707 if (recalculate) 5708 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5709 ip_hdr(skb)->daddr, 5710 skb->len - off, 5711 ip_hdr(skb)->protocol, 0); 5712 err = 0; 5713 5714 out: 5715 return err; 5716 } 5717 5718 /* This value should be large enough to cover a tagged ethernet header plus 5719 * an IPv6 header, all options, and a maximal TCP or UDP header. 5720 */ 5721 #define MAX_IPV6_HDR_LEN 256 5722 5723 #define OPT_HDR(type, skb, off) \ 5724 (type *)(skb_network_header(skb) + (off)) 5725 5726 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5727 { 5728 int err; 5729 u8 nexthdr; 5730 unsigned int off; 5731 unsigned int len; 5732 bool fragment; 5733 bool done; 5734 __sum16 *csum; 5735 5736 fragment = false; 5737 done = false; 5738 5739 off = sizeof(struct ipv6hdr); 5740 5741 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5742 if (err < 0) 5743 goto out; 5744 5745 nexthdr = ipv6_hdr(skb)->nexthdr; 5746 5747 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5748 while (off <= len && !done) { 5749 switch (nexthdr) { 5750 case IPPROTO_DSTOPTS: 5751 case IPPROTO_HOPOPTS: 5752 case IPPROTO_ROUTING: { 5753 struct ipv6_opt_hdr *hp; 5754 5755 err = skb_maybe_pull_tail(skb, 5756 off + 5757 sizeof(struct ipv6_opt_hdr), 5758 MAX_IPV6_HDR_LEN); 5759 if (err < 0) 5760 goto out; 5761 5762 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5763 nexthdr = hp->nexthdr; 5764 off += ipv6_optlen(hp); 5765 break; 5766 } 5767 case IPPROTO_AH: { 5768 struct ip_auth_hdr *hp; 5769 5770 err = skb_maybe_pull_tail(skb, 5771 off + 5772 sizeof(struct ip_auth_hdr), 5773 MAX_IPV6_HDR_LEN); 5774 if (err < 0) 5775 goto out; 5776 5777 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5778 nexthdr = hp->nexthdr; 5779 off += ipv6_authlen(hp); 5780 break; 5781 } 5782 case IPPROTO_FRAGMENT: { 5783 struct frag_hdr *hp; 5784 5785 err = skb_maybe_pull_tail(skb, 5786 off + 5787 sizeof(struct frag_hdr), 5788 MAX_IPV6_HDR_LEN); 5789 if (err < 0) 5790 goto out; 5791 5792 hp = OPT_HDR(struct frag_hdr, skb, off); 5793 5794 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5795 fragment = true; 5796 5797 nexthdr = hp->nexthdr; 5798 off += sizeof(struct frag_hdr); 5799 break; 5800 } 5801 default: 5802 done = true; 5803 break; 5804 } 5805 } 5806 5807 err = -EPROTO; 5808 5809 if (!done || fragment) 5810 goto out; 5811 5812 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5813 if (IS_ERR(csum)) 5814 return PTR_ERR(csum); 5815 5816 if (recalculate) 5817 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5818 &ipv6_hdr(skb)->daddr, 5819 skb->len - off, nexthdr, 0); 5820 err = 0; 5821 5822 out: 5823 return err; 5824 } 5825 5826 /** 5827 * skb_checksum_setup - set up partial checksum offset 5828 * @skb: the skb to set up 5829 * @recalculate: if true the pseudo-header checksum will be recalculated 5830 */ 5831 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5832 { 5833 int err; 5834 5835 switch (skb->protocol) { 5836 case htons(ETH_P_IP): 5837 err = skb_checksum_setup_ipv4(skb, recalculate); 5838 break; 5839 5840 case htons(ETH_P_IPV6): 5841 err = skb_checksum_setup_ipv6(skb, recalculate); 5842 break; 5843 5844 default: 5845 err = -EPROTO; 5846 break; 5847 } 5848 5849 return err; 5850 } 5851 EXPORT_SYMBOL(skb_checksum_setup); 5852 5853 /** 5854 * skb_checksum_maybe_trim - maybe trims the given skb 5855 * @skb: the skb to check 5856 * @transport_len: the data length beyond the network header 5857 * 5858 * Checks whether the given skb has data beyond the given transport length. 5859 * If so, returns a cloned skb trimmed to this transport length. 5860 * Otherwise returns the provided skb. Returns NULL in error cases 5861 * (e.g. transport_len exceeds skb length or out-of-memory). 5862 * 5863 * Caller needs to set the skb transport header and free any returned skb if it 5864 * differs from the provided skb. 5865 */ 5866 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5867 unsigned int transport_len) 5868 { 5869 struct sk_buff *skb_chk; 5870 unsigned int len = skb_transport_offset(skb) + transport_len; 5871 int ret; 5872 5873 if (skb->len < len) 5874 return NULL; 5875 else if (skb->len == len) 5876 return skb; 5877 5878 skb_chk = skb_clone(skb, GFP_ATOMIC); 5879 if (!skb_chk) 5880 return NULL; 5881 5882 ret = pskb_trim_rcsum(skb_chk, len); 5883 if (ret) { 5884 kfree_skb(skb_chk); 5885 return NULL; 5886 } 5887 5888 return skb_chk; 5889 } 5890 5891 /** 5892 * skb_checksum_trimmed - validate checksum of an skb 5893 * @skb: the skb to check 5894 * @transport_len: the data length beyond the network header 5895 * @skb_chkf: checksum function to use 5896 * 5897 * Applies the given checksum function skb_chkf to the provided skb. 5898 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5899 * 5900 * If the skb has data beyond the given transport length, then a 5901 * trimmed & cloned skb is checked and returned. 5902 * 5903 * Caller needs to set the skb transport header and free any returned skb if it 5904 * differs from the provided skb. 5905 */ 5906 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5907 unsigned int transport_len, 5908 __sum16(*skb_chkf)(struct sk_buff *skb)) 5909 { 5910 struct sk_buff *skb_chk; 5911 unsigned int offset = skb_transport_offset(skb); 5912 __sum16 ret; 5913 5914 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5915 if (!skb_chk) 5916 goto err; 5917 5918 if (!pskb_may_pull(skb_chk, offset)) 5919 goto err; 5920 5921 skb_pull_rcsum(skb_chk, offset); 5922 ret = skb_chkf(skb_chk); 5923 skb_push_rcsum(skb_chk, offset); 5924 5925 if (ret) 5926 goto err; 5927 5928 return skb_chk; 5929 5930 err: 5931 if (skb_chk && skb_chk != skb) 5932 kfree_skb(skb_chk); 5933 5934 return NULL; 5935 5936 } 5937 EXPORT_SYMBOL(skb_checksum_trimmed); 5938 5939 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5940 { 5941 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5942 skb->dev->name); 5943 } 5944 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5945 5946 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5947 { 5948 if (head_stolen) { 5949 skb_release_head_state(skb); 5950 kmem_cache_free(net_hotdata.skbuff_cache, skb); 5951 } else { 5952 __kfree_skb(skb); 5953 } 5954 } 5955 EXPORT_SYMBOL(kfree_skb_partial); 5956 5957 /** 5958 * skb_try_coalesce - try to merge skb to prior one 5959 * @to: prior buffer 5960 * @from: buffer to add 5961 * @fragstolen: pointer to boolean 5962 * @delta_truesize: how much more was allocated than was requested 5963 */ 5964 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5965 bool *fragstolen, int *delta_truesize) 5966 { 5967 struct skb_shared_info *to_shinfo, *from_shinfo; 5968 int i, delta, len = from->len; 5969 5970 *fragstolen = false; 5971 5972 if (skb_cloned(to)) 5973 return false; 5974 5975 /* In general, avoid mixing page_pool and non-page_pool allocated 5976 * pages within the same SKB. In theory we could take full 5977 * references if @from is cloned and !@to->pp_recycle but its 5978 * tricky (due to potential race with the clone disappearing) and 5979 * rare, so not worth dealing with. 5980 */ 5981 if (to->pp_recycle != from->pp_recycle) 5982 return false; 5983 5984 if (len <= skb_tailroom(to)) { 5985 if (len) 5986 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5987 *delta_truesize = 0; 5988 return true; 5989 } 5990 5991 to_shinfo = skb_shinfo(to); 5992 from_shinfo = skb_shinfo(from); 5993 if (to_shinfo->frag_list || from_shinfo->frag_list) 5994 return false; 5995 if (skb_zcopy(to) || skb_zcopy(from)) 5996 return false; 5997 5998 if (skb_headlen(from) != 0) { 5999 struct page *page; 6000 unsigned int offset; 6001 6002 if (to_shinfo->nr_frags + 6003 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 6004 return false; 6005 6006 if (skb_head_is_locked(from)) 6007 return false; 6008 6009 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 6010 6011 page = virt_to_head_page(from->head); 6012 offset = from->data - (unsigned char *)page_address(page); 6013 6014 skb_fill_page_desc(to, to_shinfo->nr_frags, 6015 page, offset, skb_headlen(from)); 6016 *fragstolen = true; 6017 } else { 6018 if (to_shinfo->nr_frags + 6019 from_shinfo->nr_frags > MAX_SKB_FRAGS) 6020 return false; 6021 6022 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 6023 } 6024 6025 WARN_ON_ONCE(delta < len); 6026 6027 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 6028 from_shinfo->frags, 6029 from_shinfo->nr_frags * sizeof(skb_frag_t)); 6030 to_shinfo->nr_frags += from_shinfo->nr_frags; 6031 6032 if (!skb_cloned(from)) 6033 from_shinfo->nr_frags = 0; 6034 6035 /* if the skb is not cloned this does nothing 6036 * since we set nr_frags to 0. 6037 */ 6038 if (skb_pp_frag_ref(from)) { 6039 for (i = 0; i < from_shinfo->nr_frags; i++) 6040 __skb_frag_ref(&from_shinfo->frags[i]); 6041 } 6042 6043 to->truesize += delta; 6044 to->len += len; 6045 to->data_len += len; 6046 6047 *delta_truesize = delta; 6048 return true; 6049 } 6050 EXPORT_SYMBOL(skb_try_coalesce); 6051 6052 /** 6053 * skb_scrub_packet - scrub an skb 6054 * 6055 * @skb: buffer to clean 6056 * @xnet: packet is crossing netns 6057 * 6058 * skb_scrub_packet can be used after encapsulating or decapsulating a packet 6059 * into/from a tunnel. Some information have to be cleared during these 6060 * operations. 6061 * skb_scrub_packet can also be used to clean a skb before injecting it in 6062 * another namespace (@xnet == true). We have to clear all information in the 6063 * skb that could impact namespace isolation. 6064 */ 6065 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 6066 { 6067 skb->pkt_type = PACKET_HOST; 6068 skb->skb_iif = 0; 6069 skb->ignore_df = 0; 6070 skb_dst_drop(skb); 6071 skb_ext_reset(skb); 6072 nf_reset_ct(skb); 6073 nf_reset_trace(skb); 6074 6075 #ifdef CONFIG_NET_SWITCHDEV 6076 skb->offload_fwd_mark = 0; 6077 skb->offload_l3_fwd_mark = 0; 6078 #endif 6079 6080 if (!xnet) 6081 return; 6082 6083 ipvs_reset(skb); 6084 skb->mark = 0; 6085 skb_clear_tstamp(skb); 6086 } 6087 EXPORT_SYMBOL_GPL(skb_scrub_packet); 6088 6089 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 6090 { 6091 int mac_len, meta_len; 6092 void *meta; 6093 6094 if (skb_cow(skb, skb_headroom(skb)) < 0) { 6095 kfree_skb(skb); 6096 return NULL; 6097 } 6098 6099 mac_len = skb->data - skb_mac_header(skb); 6100 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 6101 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 6102 mac_len - VLAN_HLEN - ETH_TLEN); 6103 } 6104 6105 meta_len = skb_metadata_len(skb); 6106 if (meta_len) { 6107 meta = skb_metadata_end(skb) - meta_len; 6108 memmove(meta + VLAN_HLEN, meta, meta_len); 6109 } 6110 6111 skb->mac_header += VLAN_HLEN; 6112 return skb; 6113 } 6114 6115 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 6116 { 6117 struct vlan_hdr *vhdr; 6118 u16 vlan_tci; 6119 6120 if (unlikely(skb_vlan_tag_present(skb))) { 6121 /* vlan_tci is already set-up so leave this for another time */ 6122 return skb; 6123 } 6124 6125 skb = skb_share_check(skb, GFP_ATOMIC); 6126 if (unlikely(!skb)) 6127 goto err_free; 6128 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 6129 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 6130 goto err_free; 6131 6132 vhdr = (struct vlan_hdr *)skb->data; 6133 vlan_tci = ntohs(vhdr->h_vlan_TCI); 6134 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 6135 6136 skb_pull_rcsum(skb, VLAN_HLEN); 6137 vlan_set_encap_proto(skb, vhdr); 6138 6139 skb = skb_reorder_vlan_header(skb); 6140 if (unlikely(!skb)) 6141 goto err_free; 6142 6143 skb_reset_network_header(skb); 6144 if (!skb_transport_header_was_set(skb)) 6145 skb_reset_transport_header(skb); 6146 skb_reset_mac_len(skb); 6147 6148 return skb; 6149 6150 err_free: 6151 kfree_skb(skb); 6152 return NULL; 6153 } 6154 EXPORT_SYMBOL(skb_vlan_untag); 6155 6156 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 6157 { 6158 if (!pskb_may_pull(skb, write_len)) 6159 return -ENOMEM; 6160 6161 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 6162 return 0; 6163 6164 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6165 } 6166 EXPORT_SYMBOL(skb_ensure_writable); 6167 6168 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 6169 { 6170 int needed_headroom = dev->needed_headroom; 6171 int needed_tailroom = dev->needed_tailroom; 6172 6173 /* For tail taggers, we need to pad short frames ourselves, to ensure 6174 * that the tail tag does not fail at its role of being at the end of 6175 * the packet, once the conduit interface pads the frame. Account for 6176 * that pad length here, and pad later. 6177 */ 6178 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6179 needed_tailroom += ETH_ZLEN - skb->len; 6180 /* skb_headroom() returns unsigned int... */ 6181 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6182 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6183 6184 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6185 /* No reallocation needed, yay! */ 6186 return 0; 6187 6188 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6189 GFP_ATOMIC); 6190 } 6191 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6192 6193 /* remove VLAN header from packet and update csum accordingly. 6194 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6195 */ 6196 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6197 { 6198 int offset = skb->data - skb_mac_header(skb); 6199 int err; 6200 6201 if (WARN_ONCE(offset, 6202 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6203 offset)) { 6204 return -EINVAL; 6205 } 6206 6207 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6208 if (unlikely(err)) 6209 return err; 6210 6211 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6212 6213 vlan_remove_tag(skb, vlan_tci); 6214 6215 skb->mac_header += VLAN_HLEN; 6216 6217 if (skb_network_offset(skb) < ETH_HLEN) 6218 skb_set_network_header(skb, ETH_HLEN); 6219 6220 skb_reset_mac_len(skb); 6221 6222 return err; 6223 } 6224 EXPORT_SYMBOL(__skb_vlan_pop); 6225 6226 /* Pop a vlan tag either from hwaccel or from payload. 6227 * Expects skb->data at mac header. 6228 */ 6229 int skb_vlan_pop(struct sk_buff *skb) 6230 { 6231 u16 vlan_tci; 6232 __be16 vlan_proto; 6233 int err; 6234 6235 if (likely(skb_vlan_tag_present(skb))) { 6236 __vlan_hwaccel_clear_tag(skb); 6237 } else { 6238 if (unlikely(!eth_type_vlan(skb->protocol))) 6239 return 0; 6240 6241 err = __skb_vlan_pop(skb, &vlan_tci); 6242 if (err) 6243 return err; 6244 } 6245 /* move next vlan tag to hw accel tag */ 6246 if (likely(!eth_type_vlan(skb->protocol))) 6247 return 0; 6248 6249 vlan_proto = skb->protocol; 6250 err = __skb_vlan_pop(skb, &vlan_tci); 6251 if (unlikely(err)) 6252 return err; 6253 6254 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6255 return 0; 6256 } 6257 EXPORT_SYMBOL(skb_vlan_pop); 6258 6259 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6260 * Expects skb->data at mac header. 6261 */ 6262 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6263 { 6264 if (skb_vlan_tag_present(skb)) { 6265 int offset = skb->data - skb_mac_header(skb); 6266 int err; 6267 6268 if (WARN_ONCE(offset, 6269 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6270 offset)) { 6271 return -EINVAL; 6272 } 6273 6274 err = __vlan_insert_tag(skb, skb->vlan_proto, 6275 skb_vlan_tag_get(skb)); 6276 if (err) 6277 return err; 6278 6279 skb->protocol = skb->vlan_proto; 6280 skb->network_header -= VLAN_HLEN; 6281 6282 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6283 } 6284 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6285 return 0; 6286 } 6287 EXPORT_SYMBOL(skb_vlan_push); 6288 6289 /** 6290 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6291 * 6292 * @skb: Socket buffer to modify 6293 * 6294 * Drop the Ethernet header of @skb. 6295 * 6296 * Expects that skb->data points to the mac header and that no VLAN tags are 6297 * present. 6298 * 6299 * Returns 0 on success, -errno otherwise. 6300 */ 6301 int skb_eth_pop(struct sk_buff *skb) 6302 { 6303 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6304 skb_network_offset(skb) < ETH_HLEN) 6305 return -EPROTO; 6306 6307 skb_pull_rcsum(skb, ETH_HLEN); 6308 skb_reset_mac_header(skb); 6309 skb_reset_mac_len(skb); 6310 6311 return 0; 6312 } 6313 EXPORT_SYMBOL(skb_eth_pop); 6314 6315 /** 6316 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6317 * 6318 * @skb: Socket buffer to modify 6319 * @dst: Destination MAC address of the new header 6320 * @src: Source MAC address of the new header 6321 * 6322 * Prepend @skb with a new Ethernet header. 6323 * 6324 * Expects that skb->data points to the mac header, which must be empty. 6325 * 6326 * Returns 0 on success, -errno otherwise. 6327 */ 6328 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6329 const unsigned char *src) 6330 { 6331 struct ethhdr *eth; 6332 int err; 6333 6334 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6335 return -EPROTO; 6336 6337 err = skb_cow_head(skb, sizeof(*eth)); 6338 if (err < 0) 6339 return err; 6340 6341 skb_push(skb, sizeof(*eth)); 6342 skb_reset_mac_header(skb); 6343 skb_reset_mac_len(skb); 6344 6345 eth = eth_hdr(skb); 6346 ether_addr_copy(eth->h_dest, dst); 6347 ether_addr_copy(eth->h_source, src); 6348 eth->h_proto = skb->protocol; 6349 6350 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6351 6352 return 0; 6353 } 6354 EXPORT_SYMBOL(skb_eth_push); 6355 6356 /* Update the ethertype of hdr and the skb csum value if required. */ 6357 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6358 __be16 ethertype) 6359 { 6360 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6361 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6362 6363 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6364 } 6365 6366 hdr->h_proto = ethertype; 6367 } 6368 6369 /** 6370 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6371 * the packet 6372 * 6373 * @skb: buffer 6374 * @mpls_lse: MPLS label stack entry to push 6375 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6376 * @mac_len: length of the MAC header 6377 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6378 * ethernet 6379 * 6380 * Expects skb->data at mac header. 6381 * 6382 * Returns 0 on success, -errno otherwise. 6383 */ 6384 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6385 int mac_len, bool ethernet) 6386 { 6387 struct mpls_shim_hdr *lse; 6388 int err; 6389 6390 if (unlikely(!eth_p_mpls(mpls_proto))) 6391 return -EINVAL; 6392 6393 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6394 if (skb->encapsulation) 6395 return -EINVAL; 6396 6397 err = skb_cow_head(skb, MPLS_HLEN); 6398 if (unlikely(err)) 6399 return err; 6400 6401 if (!skb->inner_protocol) { 6402 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6403 skb_set_inner_protocol(skb, skb->protocol); 6404 } 6405 6406 skb_push(skb, MPLS_HLEN); 6407 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6408 mac_len); 6409 skb_reset_mac_header(skb); 6410 skb_set_network_header(skb, mac_len); 6411 skb_reset_mac_len(skb); 6412 6413 lse = mpls_hdr(skb); 6414 lse->label_stack_entry = mpls_lse; 6415 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6416 6417 if (ethernet && mac_len >= ETH_HLEN) 6418 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6419 skb->protocol = mpls_proto; 6420 6421 return 0; 6422 } 6423 EXPORT_SYMBOL_GPL(skb_mpls_push); 6424 6425 /** 6426 * skb_mpls_pop() - pop the outermost MPLS header 6427 * 6428 * @skb: buffer 6429 * @next_proto: ethertype of header after popped MPLS header 6430 * @mac_len: length of the MAC header 6431 * @ethernet: flag to indicate if the packet is ethernet 6432 * 6433 * Expects skb->data at mac header. 6434 * 6435 * Returns 0 on success, -errno otherwise. 6436 */ 6437 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6438 bool ethernet) 6439 { 6440 int err; 6441 6442 if (unlikely(!eth_p_mpls(skb->protocol))) 6443 return 0; 6444 6445 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6446 if (unlikely(err)) 6447 return err; 6448 6449 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6450 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6451 mac_len); 6452 6453 __skb_pull(skb, MPLS_HLEN); 6454 skb_reset_mac_header(skb); 6455 skb_set_network_header(skb, mac_len); 6456 6457 if (ethernet && mac_len >= ETH_HLEN) { 6458 struct ethhdr *hdr; 6459 6460 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6461 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6462 skb_mod_eth_type(skb, hdr, next_proto); 6463 } 6464 skb->protocol = next_proto; 6465 6466 return 0; 6467 } 6468 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6469 6470 /** 6471 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6472 * 6473 * @skb: buffer 6474 * @mpls_lse: new MPLS label stack entry to update to 6475 * 6476 * Expects skb->data at mac header. 6477 * 6478 * Returns 0 on success, -errno otherwise. 6479 */ 6480 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6481 { 6482 int err; 6483 6484 if (unlikely(!eth_p_mpls(skb->protocol))) 6485 return -EINVAL; 6486 6487 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6488 if (unlikely(err)) 6489 return err; 6490 6491 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6492 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6493 6494 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6495 } 6496 6497 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6498 6499 return 0; 6500 } 6501 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6502 6503 /** 6504 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6505 * 6506 * @skb: buffer 6507 * 6508 * Expects skb->data at mac header. 6509 * 6510 * Returns 0 on success, -errno otherwise. 6511 */ 6512 int skb_mpls_dec_ttl(struct sk_buff *skb) 6513 { 6514 u32 lse; 6515 u8 ttl; 6516 6517 if (unlikely(!eth_p_mpls(skb->protocol))) 6518 return -EINVAL; 6519 6520 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6521 return -ENOMEM; 6522 6523 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6524 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6525 if (!--ttl) 6526 return -EINVAL; 6527 6528 lse &= ~MPLS_LS_TTL_MASK; 6529 lse |= ttl << MPLS_LS_TTL_SHIFT; 6530 6531 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6532 } 6533 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6534 6535 /** 6536 * alloc_skb_with_frags - allocate skb with page frags 6537 * 6538 * @header_len: size of linear part 6539 * @data_len: needed length in frags 6540 * @order: max page order desired. 6541 * @errcode: pointer to error code if any 6542 * @gfp_mask: allocation mask 6543 * 6544 * This can be used to allocate a paged skb, given a maximal order for frags. 6545 */ 6546 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6547 unsigned long data_len, 6548 int order, 6549 int *errcode, 6550 gfp_t gfp_mask) 6551 { 6552 unsigned long chunk; 6553 struct sk_buff *skb; 6554 struct page *page; 6555 int nr_frags = 0; 6556 6557 *errcode = -EMSGSIZE; 6558 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6559 return NULL; 6560 6561 *errcode = -ENOBUFS; 6562 skb = alloc_skb(header_len, gfp_mask); 6563 if (!skb) 6564 return NULL; 6565 6566 while (data_len) { 6567 if (nr_frags == MAX_SKB_FRAGS - 1) 6568 goto failure; 6569 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6570 order--; 6571 6572 if (order) { 6573 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6574 __GFP_COMP | 6575 __GFP_NOWARN, 6576 order); 6577 if (!page) { 6578 order--; 6579 continue; 6580 } 6581 } else { 6582 page = alloc_page(gfp_mask); 6583 if (!page) 6584 goto failure; 6585 } 6586 chunk = min_t(unsigned long, data_len, 6587 PAGE_SIZE << order); 6588 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6589 nr_frags++; 6590 skb->truesize += (PAGE_SIZE << order); 6591 data_len -= chunk; 6592 } 6593 return skb; 6594 6595 failure: 6596 kfree_skb(skb); 6597 return NULL; 6598 } 6599 EXPORT_SYMBOL(alloc_skb_with_frags); 6600 6601 /* carve out the first off bytes from skb when off < headlen */ 6602 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6603 const int headlen, gfp_t gfp_mask) 6604 { 6605 int i; 6606 unsigned int size = skb_end_offset(skb); 6607 int new_hlen = headlen - off; 6608 u8 *data; 6609 6610 if (skb_pfmemalloc(skb)) 6611 gfp_mask |= __GFP_MEMALLOC; 6612 6613 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6614 if (!data) 6615 return -ENOMEM; 6616 size = SKB_WITH_OVERHEAD(size); 6617 6618 /* Copy real data, and all frags */ 6619 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6620 skb->len -= off; 6621 6622 memcpy((struct skb_shared_info *)(data + size), 6623 skb_shinfo(skb), 6624 offsetof(struct skb_shared_info, 6625 frags[skb_shinfo(skb)->nr_frags])); 6626 if (skb_cloned(skb)) { 6627 /* drop the old head gracefully */ 6628 if (skb_orphan_frags(skb, gfp_mask)) { 6629 skb_kfree_head(data, size); 6630 return -ENOMEM; 6631 } 6632 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6633 skb_frag_ref(skb, i); 6634 if (skb_has_frag_list(skb)) 6635 skb_clone_fraglist(skb); 6636 skb_release_data(skb, SKB_CONSUMED); 6637 } else { 6638 /* we can reuse existing recount- all we did was 6639 * relocate values 6640 */ 6641 skb_free_head(skb); 6642 } 6643 6644 skb->head = data; 6645 skb->data = data; 6646 skb->head_frag = 0; 6647 skb_set_end_offset(skb, size); 6648 skb_set_tail_pointer(skb, skb_headlen(skb)); 6649 skb_headers_offset_update(skb, 0); 6650 skb->cloned = 0; 6651 skb->hdr_len = 0; 6652 skb->nohdr = 0; 6653 atomic_set(&skb_shinfo(skb)->dataref, 1); 6654 6655 return 0; 6656 } 6657 6658 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6659 6660 /* carve out the first eat bytes from skb's frag_list. May recurse into 6661 * pskb_carve() 6662 */ 6663 static int pskb_carve_frag_list(struct sk_buff *skb, 6664 struct skb_shared_info *shinfo, int eat, 6665 gfp_t gfp_mask) 6666 { 6667 struct sk_buff *list = shinfo->frag_list; 6668 struct sk_buff *clone = NULL; 6669 struct sk_buff *insp = NULL; 6670 6671 do { 6672 if (!list) { 6673 pr_err("Not enough bytes to eat. Want %d\n", eat); 6674 return -EFAULT; 6675 } 6676 if (list->len <= eat) { 6677 /* Eaten as whole. */ 6678 eat -= list->len; 6679 list = list->next; 6680 insp = list; 6681 } else { 6682 /* Eaten partially. */ 6683 if (skb_shared(list)) { 6684 clone = skb_clone(list, gfp_mask); 6685 if (!clone) 6686 return -ENOMEM; 6687 insp = list->next; 6688 list = clone; 6689 } else { 6690 /* This may be pulled without problems. */ 6691 insp = list; 6692 } 6693 if (pskb_carve(list, eat, gfp_mask) < 0) { 6694 kfree_skb(clone); 6695 return -ENOMEM; 6696 } 6697 break; 6698 } 6699 } while (eat); 6700 6701 /* Free pulled out fragments. */ 6702 while ((list = shinfo->frag_list) != insp) { 6703 shinfo->frag_list = list->next; 6704 consume_skb(list); 6705 } 6706 /* And insert new clone at head. */ 6707 if (clone) { 6708 clone->next = list; 6709 shinfo->frag_list = clone; 6710 } 6711 return 0; 6712 } 6713 6714 /* carve off first len bytes from skb. Split line (off) is in the 6715 * non-linear part of skb 6716 */ 6717 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6718 int pos, gfp_t gfp_mask) 6719 { 6720 int i, k = 0; 6721 unsigned int size = skb_end_offset(skb); 6722 u8 *data; 6723 const int nfrags = skb_shinfo(skb)->nr_frags; 6724 struct skb_shared_info *shinfo; 6725 6726 if (skb_pfmemalloc(skb)) 6727 gfp_mask |= __GFP_MEMALLOC; 6728 6729 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6730 if (!data) 6731 return -ENOMEM; 6732 size = SKB_WITH_OVERHEAD(size); 6733 6734 memcpy((struct skb_shared_info *)(data + size), 6735 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6736 if (skb_orphan_frags(skb, gfp_mask)) { 6737 skb_kfree_head(data, size); 6738 return -ENOMEM; 6739 } 6740 shinfo = (struct skb_shared_info *)(data + size); 6741 for (i = 0; i < nfrags; i++) { 6742 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6743 6744 if (pos + fsize > off) { 6745 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6746 6747 if (pos < off) { 6748 /* Split frag. 6749 * We have two variants in this case: 6750 * 1. Move all the frag to the second 6751 * part, if it is possible. F.e. 6752 * this approach is mandatory for TUX, 6753 * where splitting is expensive. 6754 * 2. Split is accurately. We make this. 6755 */ 6756 skb_frag_off_add(&shinfo->frags[0], off - pos); 6757 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6758 } 6759 skb_frag_ref(skb, i); 6760 k++; 6761 } 6762 pos += fsize; 6763 } 6764 shinfo->nr_frags = k; 6765 if (skb_has_frag_list(skb)) 6766 skb_clone_fraglist(skb); 6767 6768 /* split line is in frag list */ 6769 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6770 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6771 if (skb_has_frag_list(skb)) 6772 kfree_skb_list(skb_shinfo(skb)->frag_list); 6773 skb_kfree_head(data, size); 6774 return -ENOMEM; 6775 } 6776 skb_release_data(skb, SKB_CONSUMED); 6777 6778 skb->head = data; 6779 skb->head_frag = 0; 6780 skb->data = data; 6781 skb_set_end_offset(skb, size); 6782 skb_reset_tail_pointer(skb); 6783 skb_headers_offset_update(skb, 0); 6784 skb->cloned = 0; 6785 skb->hdr_len = 0; 6786 skb->nohdr = 0; 6787 skb->len -= off; 6788 skb->data_len = skb->len; 6789 atomic_set(&skb_shinfo(skb)->dataref, 1); 6790 return 0; 6791 } 6792 6793 /* remove len bytes from the beginning of the skb */ 6794 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6795 { 6796 int headlen = skb_headlen(skb); 6797 6798 if (len < headlen) 6799 return pskb_carve_inside_header(skb, len, headlen, gfp); 6800 else 6801 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6802 } 6803 6804 /* Extract to_copy bytes starting at off from skb, and return this in 6805 * a new skb 6806 */ 6807 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6808 int to_copy, gfp_t gfp) 6809 { 6810 struct sk_buff *clone = skb_clone(skb, gfp); 6811 6812 if (!clone) 6813 return NULL; 6814 6815 if (pskb_carve(clone, off, gfp) < 0 || 6816 pskb_trim(clone, to_copy)) { 6817 kfree_skb(clone); 6818 return NULL; 6819 } 6820 return clone; 6821 } 6822 EXPORT_SYMBOL(pskb_extract); 6823 6824 /** 6825 * skb_condense - try to get rid of fragments/frag_list if possible 6826 * @skb: buffer 6827 * 6828 * Can be used to save memory before skb is added to a busy queue. 6829 * If packet has bytes in frags and enough tail room in skb->head, 6830 * pull all of them, so that we can free the frags right now and adjust 6831 * truesize. 6832 * Notes: 6833 * We do not reallocate skb->head thus can not fail. 6834 * Caller must re-evaluate skb->truesize if needed. 6835 */ 6836 void skb_condense(struct sk_buff *skb) 6837 { 6838 if (skb->data_len) { 6839 if (skb->data_len > skb->end - skb->tail || 6840 skb_cloned(skb)) 6841 return; 6842 6843 /* Nice, we can free page frag(s) right now */ 6844 __pskb_pull_tail(skb, skb->data_len); 6845 } 6846 /* At this point, skb->truesize might be over estimated, 6847 * because skb had a fragment, and fragments do not tell 6848 * their truesize. 6849 * When we pulled its content into skb->head, fragment 6850 * was freed, but __pskb_pull_tail() could not possibly 6851 * adjust skb->truesize, not knowing the frag truesize. 6852 */ 6853 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6854 } 6855 EXPORT_SYMBOL(skb_condense); 6856 6857 #ifdef CONFIG_SKB_EXTENSIONS 6858 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6859 { 6860 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6861 } 6862 6863 /** 6864 * __skb_ext_alloc - allocate a new skb extensions storage 6865 * 6866 * @flags: See kmalloc(). 6867 * 6868 * Returns the newly allocated pointer. The pointer can later attached to a 6869 * skb via __skb_ext_set(). 6870 * Note: caller must handle the skb_ext as an opaque data. 6871 */ 6872 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6873 { 6874 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6875 6876 if (new) { 6877 memset(new->offset, 0, sizeof(new->offset)); 6878 refcount_set(&new->refcnt, 1); 6879 } 6880 6881 return new; 6882 } 6883 6884 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6885 unsigned int old_active) 6886 { 6887 struct skb_ext *new; 6888 6889 if (refcount_read(&old->refcnt) == 1) 6890 return old; 6891 6892 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6893 if (!new) 6894 return NULL; 6895 6896 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6897 refcount_set(&new->refcnt, 1); 6898 6899 #ifdef CONFIG_XFRM 6900 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6901 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6902 unsigned int i; 6903 6904 for (i = 0; i < sp->len; i++) 6905 xfrm_state_hold(sp->xvec[i]); 6906 } 6907 #endif 6908 #ifdef CONFIG_MCTP_FLOWS 6909 if (old_active & (1 << SKB_EXT_MCTP)) { 6910 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6911 6912 if (flow->key) 6913 refcount_inc(&flow->key->refs); 6914 } 6915 #endif 6916 __skb_ext_put(old); 6917 return new; 6918 } 6919 6920 /** 6921 * __skb_ext_set - attach the specified extension storage to this skb 6922 * @skb: buffer 6923 * @id: extension id 6924 * @ext: extension storage previously allocated via __skb_ext_alloc() 6925 * 6926 * Existing extensions, if any, are cleared. 6927 * 6928 * Returns the pointer to the extension. 6929 */ 6930 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6931 struct skb_ext *ext) 6932 { 6933 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6934 6935 skb_ext_put(skb); 6936 newlen = newoff + skb_ext_type_len[id]; 6937 ext->chunks = newlen; 6938 ext->offset[id] = newoff; 6939 skb->extensions = ext; 6940 skb->active_extensions = 1 << id; 6941 return skb_ext_get_ptr(ext, id); 6942 } 6943 6944 /** 6945 * skb_ext_add - allocate space for given extension, COW if needed 6946 * @skb: buffer 6947 * @id: extension to allocate space for 6948 * 6949 * Allocates enough space for the given extension. 6950 * If the extension is already present, a pointer to that extension 6951 * is returned. 6952 * 6953 * If the skb was cloned, COW applies and the returned memory can be 6954 * modified without changing the extension space of clones buffers. 6955 * 6956 * Returns pointer to the extension or NULL on allocation failure. 6957 */ 6958 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6959 { 6960 struct skb_ext *new, *old = NULL; 6961 unsigned int newlen, newoff; 6962 6963 if (skb->active_extensions) { 6964 old = skb->extensions; 6965 6966 new = skb_ext_maybe_cow(old, skb->active_extensions); 6967 if (!new) 6968 return NULL; 6969 6970 if (__skb_ext_exist(new, id)) 6971 goto set_active; 6972 6973 newoff = new->chunks; 6974 } else { 6975 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6976 6977 new = __skb_ext_alloc(GFP_ATOMIC); 6978 if (!new) 6979 return NULL; 6980 } 6981 6982 newlen = newoff + skb_ext_type_len[id]; 6983 new->chunks = newlen; 6984 new->offset[id] = newoff; 6985 set_active: 6986 skb->slow_gro = 1; 6987 skb->extensions = new; 6988 skb->active_extensions |= 1 << id; 6989 return skb_ext_get_ptr(new, id); 6990 } 6991 EXPORT_SYMBOL(skb_ext_add); 6992 6993 #ifdef CONFIG_XFRM 6994 static void skb_ext_put_sp(struct sec_path *sp) 6995 { 6996 unsigned int i; 6997 6998 for (i = 0; i < sp->len; i++) 6999 xfrm_state_put(sp->xvec[i]); 7000 } 7001 #endif 7002 7003 #ifdef CONFIG_MCTP_FLOWS 7004 static void skb_ext_put_mctp(struct mctp_flow *flow) 7005 { 7006 if (flow->key) 7007 mctp_key_unref(flow->key); 7008 } 7009 #endif 7010 7011 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 7012 { 7013 struct skb_ext *ext = skb->extensions; 7014 7015 skb->active_extensions &= ~(1 << id); 7016 if (skb->active_extensions == 0) { 7017 skb->extensions = NULL; 7018 __skb_ext_put(ext); 7019 #ifdef CONFIG_XFRM 7020 } else if (id == SKB_EXT_SEC_PATH && 7021 refcount_read(&ext->refcnt) == 1) { 7022 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 7023 7024 skb_ext_put_sp(sp); 7025 sp->len = 0; 7026 #endif 7027 } 7028 } 7029 EXPORT_SYMBOL(__skb_ext_del); 7030 7031 void __skb_ext_put(struct skb_ext *ext) 7032 { 7033 /* If this is last clone, nothing can increment 7034 * it after check passes. Avoids one atomic op. 7035 */ 7036 if (refcount_read(&ext->refcnt) == 1) 7037 goto free_now; 7038 7039 if (!refcount_dec_and_test(&ext->refcnt)) 7040 return; 7041 free_now: 7042 #ifdef CONFIG_XFRM 7043 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 7044 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 7045 #endif 7046 #ifdef CONFIG_MCTP_FLOWS 7047 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 7048 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 7049 #endif 7050 7051 kmem_cache_free(skbuff_ext_cache, ext); 7052 } 7053 EXPORT_SYMBOL(__skb_ext_put); 7054 #endif /* CONFIG_SKB_EXTENSIONS */ 7055 7056 static void kfree_skb_napi_cache(struct sk_buff *skb) 7057 { 7058 /* if SKB is a clone, don't handle this case */ 7059 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 7060 __kfree_skb(skb); 7061 return; 7062 } 7063 7064 local_bh_disable(); 7065 __napi_kfree_skb(skb, SKB_CONSUMED); 7066 local_bh_enable(); 7067 } 7068 7069 /** 7070 * skb_attempt_defer_free - queue skb for remote freeing 7071 * @skb: buffer 7072 * 7073 * Put @skb in a per-cpu list, using the cpu which 7074 * allocated the skb/pages to reduce false sharing 7075 * and memory zone spinlock contention. 7076 */ 7077 void skb_attempt_defer_free(struct sk_buff *skb) 7078 { 7079 int cpu = skb->alloc_cpu; 7080 struct softnet_data *sd; 7081 unsigned int defer_max; 7082 bool kick; 7083 7084 if (cpu == raw_smp_processor_id() || 7085 WARN_ON_ONCE(cpu >= nr_cpu_ids) || 7086 !cpu_online(cpu)) { 7087 nodefer: kfree_skb_napi_cache(skb); 7088 return; 7089 } 7090 7091 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 7092 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 7093 7094 sd = &per_cpu(softnet_data, cpu); 7095 defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max); 7096 if (READ_ONCE(sd->defer_count) >= defer_max) 7097 goto nodefer; 7098 7099 spin_lock_bh(&sd->defer_lock); 7100 /* Send an IPI every time queue reaches half capacity. */ 7101 kick = sd->defer_count == (defer_max >> 1); 7102 /* Paired with the READ_ONCE() few lines above */ 7103 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 7104 7105 skb->next = sd->defer_list; 7106 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 7107 WRITE_ONCE(sd->defer_list, skb); 7108 spin_unlock_bh(&sd->defer_lock); 7109 7110 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 7111 * if we are unlucky enough (this seems very unlikely). 7112 */ 7113 if (unlikely(kick)) 7114 kick_defer_list_purge(sd, cpu); 7115 } 7116 7117 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 7118 size_t offset, size_t len) 7119 { 7120 const char *kaddr; 7121 __wsum csum; 7122 7123 kaddr = kmap_local_page(page); 7124 csum = csum_partial(kaddr + offset, len, 0); 7125 kunmap_local(kaddr); 7126 skb->csum = csum_block_add(skb->csum, csum, skb->len); 7127 } 7128 7129 /** 7130 * skb_splice_from_iter - Splice (or copy) pages to skbuff 7131 * @skb: The buffer to add pages to 7132 * @iter: Iterator representing the pages to be added 7133 * @maxsize: Maximum amount of pages to be added 7134 * @gfp: Allocation flags 7135 * 7136 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 7137 * extracts pages from an iterator and adds them to the socket buffer if 7138 * possible, copying them to fragments if not possible (such as if they're slab 7139 * pages). 7140 * 7141 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 7142 * insufficient space in the buffer to transfer anything. 7143 */ 7144 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 7145 ssize_t maxsize, gfp_t gfp) 7146 { 7147 size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags); 7148 struct page *pages[8], **ppages = pages; 7149 ssize_t spliced = 0, ret = 0; 7150 unsigned int i; 7151 7152 while (iter->count > 0) { 7153 ssize_t space, nr, len; 7154 size_t off; 7155 7156 ret = -EMSGSIZE; 7157 space = frag_limit - skb_shinfo(skb)->nr_frags; 7158 if (space < 0) 7159 break; 7160 7161 /* We might be able to coalesce without increasing nr_frags */ 7162 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 7163 7164 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 7165 if (len <= 0) { 7166 ret = len ?: -EIO; 7167 break; 7168 } 7169 7170 i = 0; 7171 do { 7172 struct page *page = pages[i++]; 7173 size_t part = min_t(size_t, PAGE_SIZE - off, len); 7174 7175 ret = -EIO; 7176 if (WARN_ON_ONCE(!sendpage_ok(page))) 7177 goto out; 7178 7179 ret = skb_append_pagefrags(skb, page, off, part, 7180 frag_limit); 7181 if (ret < 0) { 7182 iov_iter_revert(iter, len); 7183 goto out; 7184 } 7185 7186 if (skb->ip_summed == CHECKSUM_NONE) 7187 skb_splice_csum_page(skb, page, off, part); 7188 7189 off = 0; 7190 spliced += part; 7191 maxsize -= part; 7192 len -= part; 7193 } while (len > 0); 7194 7195 if (maxsize <= 0) 7196 break; 7197 } 7198 7199 out: 7200 skb_len_add(skb, spliced); 7201 return spliced ?: ret; 7202 } 7203 EXPORT_SYMBOL(skb_splice_from_iter); 7204 7205 static __always_inline 7206 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7207 size_t len, void *to, void *priv2) 7208 { 7209 __wsum *csum = priv2; 7210 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7211 7212 *csum = csum_block_add(*csum, next, progress); 7213 return 0; 7214 } 7215 7216 static __always_inline 7217 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7218 size_t len, void *to, void *priv2) 7219 { 7220 __wsum next, *csum = priv2; 7221 7222 next = csum_and_copy_from_user(iter_from, to + progress, len); 7223 *csum = csum_block_add(*csum, next, progress); 7224 return next ? 0 : len; 7225 } 7226 7227 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7228 __wsum *csum, struct iov_iter *i) 7229 { 7230 size_t copied; 7231 7232 if (WARN_ON_ONCE(!i->data_source)) 7233 return false; 7234 copied = iterate_and_advance2(i, bytes, addr, csum, 7235 copy_from_user_iter_csum, 7236 memcpy_from_iter_csum); 7237 if (likely(copied == bytes)) 7238 return true; 7239 iov_iter_revert(i, copied); 7240 return false; 7241 } 7242 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7243