1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 #include <linux/bitfield.h> 62 #include <linux/if_vlan.h> 63 #include <linux/mpls.h> 64 #include <linux/kcov.h> 65 #include <linux/iov_iter.h> 66 67 #include <net/protocol.h> 68 #include <net/dst.h> 69 #include <net/sock.h> 70 #include <net/checksum.h> 71 #include <net/gso.h> 72 #include <net/hotdata.h> 73 #include <net/ip6_checksum.h> 74 #include <net/xfrm.h> 75 #include <net/mpls.h> 76 #include <net/mptcp.h> 77 #include <net/mctp.h> 78 #include <net/page_pool/helpers.h> 79 #include <net/dropreason.h> 80 81 #include <linux/uaccess.h> 82 #include <trace/events/skb.h> 83 #include <linux/highmem.h> 84 #include <linux/capability.h> 85 #include <linux/user_namespace.h> 86 #include <linux/indirect_call_wrapper.h> 87 #include <linux/textsearch.h> 88 89 #include "dev.h" 90 #include "sock_destructor.h" 91 92 #ifdef CONFIG_SKB_EXTENSIONS 93 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 94 #endif 95 96 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) 97 98 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 99 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 100 * size, and we can differentiate heads from skb_small_head_cache 101 * vs system slabs by looking at their size (skb_end_offset()). 102 */ 103 #define SKB_SMALL_HEAD_CACHE_SIZE \ 104 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 105 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 106 SKB_SMALL_HEAD_SIZE) 107 108 #define SKB_SMALL_HEAD_HEADROOM \ 109 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 110 111 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 112 EXPORT_SYMBOL(sysctl_max_skb_frags); 113 114 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use 115 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the 116 * netmem is a page. 117 */ 118 static_assert(offsetof(struct bio_vec, bv_page) == 119 offsetof(skb_frag_t, netmem)); 120 static_assert(sizeof_field(struct bio_vec, bv_page) == 121 sizeof_field(skb_frag_t, netmem)); 122 123 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); 124 static_assert(sizeof_field(struct bio_vec, bv_len) == 125 sizeof_field(skb_frag_t, len)); 126 127 static_assert(offsetof(struct bio_vec, bv_offset) == 128 offsetof(skb_frag_t, offset)); 129 static_assert(sizeof_field(struct bio_vec, bv_offset) == 130 sizeof_field(skb_frag_t, offset)); 131 132 #undef FN 133 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 134 static const char * const drop_reasons[] = { 135 [SKB_CONSUMED] = "CONSUMED", 136 DEFINE_DROP_REASON(FN, FN) 137 }; 138 139 static const struct drop_reason_list drop_reasons_core = { 140 .reasons = drop_reasons, 141 .n_reasons = ARRAY_SIZE(drop_reasons), 142 }; 143 144 const struct drop_reason_list __rcu * 145 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 146 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 147 }; 148 EXPORT_SYMBOL(drop_reasons_by_subsys); 149 150 /** 151 * drop_reasons_register_subsys - register another drop reason subsystem 152 * @subsys: the subsystem to register, must not be the core 153 * @list: the list of drop reasons within the subsystem, must point to 154 * a statically initialized list 155 */ 156 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 157 const struct drop_reason_list *list) 158 { 159 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 160 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 161 "invalid subsystem %d\n", subsys)) 162 return; 163 164 /* must point to statically allocated memory, so INIT is OK */ 165 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 166 } 167 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 168 169 /** 170 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 171 * @subsys: the subsystem to remove, must not be the core 172 * 173 * Note: This will synchronize_rcu() to ensure no users when it returns. 174 */ 175 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 176 { 177 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 178 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 179 "invalid subsystem %d\n", subsys)) 180 return; 181 182 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 183 184 synchronize_rcu(); 185 } 186 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 187 188 /** 189 * skb_panic - private function for out-of-line support 190 * @skb: buffer 191 * @sz: size 192 * @addr: address 193 * @msg: skb_over_panic or skb_under_panic 194 * 195 * Out-of-line support for skb_put() and skb_push(). 196 * Called via the wrapper skb_over_panic() or skb_under_panic(). 197 * Keep out of line to prevent kernel bloat. 198 * __builtin_return_address is not used because it is not always reliable. 199 */ 200 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 201 const char msg[]) 202 { 203 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 204 msg, addr, skb->len, sz, skb->head, skb->data, 205 (unsigned long)skb->tail, (unsigned long)skb->end, 206 skb->dev ? skb->dev->name : "<NULL>"); 207 BUG(); 208 } 209 210 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 211 { 212 skb_panic(skb, sz, addr, __func__); 213 } 214 215 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 216 { 217 skb_panic(skb, sz, addr, __func__); 218 } 219 220 #define NAPI_SKB_CACHE_SIZE 64 221 #define NAPI_SKB_CACHE_BULK 16 222 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 223 224 #if PAGE_SIZE == SZ_4K 225 226 #define NAPI_HAS_SMALL_PAGE_FRAG 1 227 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) 228 229 /* specialized page frag allocator using a single order 0 page 230 * and slicing it into 1K sized fragment. Constrained to systems 231 * with a very limited amount of 1K fragments fitting a single 232 * page - to avoid excessive truesize underestimation 233 */ 234 235 struct page_frag_1k { 236 void *va; 237 u16 offset; 238 bool pfmemalloc; 239 }; 240 241 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) 242 { 243 struct page *page; 244 int offset; 245 246 offset = nc->offset - SZ_1K; 247 if (likely(offset >= 0)) 248 goto use_frag; 249 250 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 251 if (!page) 252 return NULL; 253 254 nc->va = page_address(page); 255 nc->pfmemalloc = page_is_pfmemalloc(page); 256 offset = PAGE_SIZE - SZ_1K; 257 page_ref_add(page, offset / SZ_1K); 258 259 use_frag: 260 nc->offset = offset; 261 return nc->va + offset; 262 } 263 #else 264 265 /* the small page is actually unused in this build; add dummy helpers 266 * to please the compiler and avoid later preprocessor's conditionals 267 */ 268 #define NAPI_HAS_SMALL_PAGE_FRAG 0 269 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false 270 271 struct page_frag_1k { 272 }; 273 274 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) 275 { 276 return NULL; 277 } 278 279 #endif 280 281 struct napi_alloc_cache { 282 struct page_frag_cache page; 283 struct page_frag_1k page_small; 284 unsigned int skb_count; 285 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 286 }; 287 288 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 289 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 290 291 /* Double check that napi_get_frags() allocates skbs with 292 * skb->head being backed by slab, not a page fragment. 293 * This is to make sure bug fixed in 3226b158e67c 294 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 295 * does not accidentally come back. 296 */ 297 void napi_get_frags_check(struct napi_struct *napi) 298 { 299 struct sk_buff *skb; 300 301 local_bh_disable(); 302 skb = napi_get_frags(napi); 303 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); 304 napi_free_frags(napi); 305 local_bh_enable(); 306 } 307 308 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 309 { 310 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 311 312 fragsz = SKB_DATA_ALIGN(fragsz); 313 314 return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 315 align_mask); 316 } 317 EXPORT_SYMBOL(__napi_alloc_frag_align); 318 319 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 320 { 321 void *data; 322 323 fragsz = SKB_DATA_ALIGN(fragsz); 324 if (in_hardirq() || irqs_disabled()) { 325 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 326 327 data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, 328 align_mask); 329 } else { 330 struct napi_alloc_cache *nc; 331 332 local_bh_disable(); 333 nc = this_cpu_ptr(&napi_alloc_cache); 334 data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 335 align_mask); 336 local_bh_enable(); 337 } 338 return data; 339 } 340 EXPORT_SYMBOL(__netdev_alloc_frag_align); 341 342 static struct sk_buff *napi_skb_cache_get(void) 343 { 344 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 345 struct sk_buff *skb; 346 347 if (unlikely(!nc->skb_count)) { 348 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 349 GFP_ATOMIC, 350 NAPI_SKB_CACHE_BULK, 351 nc->skb_cache); 352 if (unlikely(!nc->skb_count)) 353 return NULL; 354 } 355 356 skb = nc->skb_cache[--nc->skb_count]; 357 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); 358 359 return skb; 360 } 361 362 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 363 unsigned int size) 364 { 365 struct skb_shared_info *shinfo; 366 367 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 368 369 /* Assumes caller memset cleared SKB */ 370 skb->truesize = SKB_TRUESIZE(size); 371 refcount_set(&skb->users, 1); 372 skb->head = data; 373 skb->data = data; 374 skb_reset_tail_pointer(skb); 375 skb_set_end_offset(skb, size); 376 skb->mac_header = (typeof(skb->mac_header))~0U; 377 skb->transport_header = (typeof(skb->transport_header))~0U; 378 skb->alloc_cpu = raw_smp_processor_id(); 379 /* make sure we initialize shinfo sequentially */ 380 shinfo = skb_shinfo(skb); 381 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 382 atomic_set(&shinfo->dataref, 1); 383 384 skb_set_kcov_handle(skb, kcov_common_handle()); 385 } 386 387 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 388 unsigned int *size) 389 { 390 void *resized; 391 392 /* Must find the allocation size (and grow it to match). */ 393 *size = ksize(data); 394 /* krealloc() will immediately return "data" when 395 * "ksize(data)" is requested: it is the existing upper 396 * bounds. As a result, GFP_ATOMIC will be ignored. Note 397 * that this "new" pointer needs to be passed back to the 398 * caller for use so the __alloc_size hinting will be 399 * tracked correctly. 400 */ 401 resized = krealloc(data, *size, GFP_ATOMIC); 402 WARN_ON_ONCE(resized != data); 403 return resized; 404 } 405 406 /* build_skb() variant which can operate on slab buffers. 407 * Note that this should be used sparingly as slab buffers 408 * cannot be combined efficiently by GRO! 409 */ 410 struct sk_buff *slab_build_skb(void *data) 411 { 412 struct sk_buff *skb; 413 unsigned int size; 414 415 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 416 if (unlikely(!skb)) 417 return NULL; 418 419 memset(skb, 0, offsetof(struct sk_buff, tail)); 420 data = __slab_build_skb(skb, data, &size); 421 __finalize_skb_around(skb, data, size); 422 423 return skb; 424 } 425 EXPORT_SYMBOL(slab_build_skb); 426 427 /* Caller must provide SKB that is memset cleared */ 428 static void __build_skb_around(struct sk_buff *skb, void *data, 429 unsigned int frag_size) 430 { 431 unsigned int size = frag_size; 432 433 /* frag_size == 0 is considered deprecated now. Callers 434 * using slab buffer should use slab_build_skb() instead. 435 */ 436 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 437 data = __slab_build_skb(skb, data, &size); 438 439 __finalize_skb_around(skb, data, size); 440 } 441 442 /** 443 * __build_skb - build a network buffer 444 * @data: data buffer provided by caller 445 * @frag_size: size of data (must not be 0) 446 * 447 * Allocate a new &sk_buff. Caller provides space holding head and 448 * skb_shared_info. @data must have been allocated from the page 449 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 450 * allocation is deprecated, and callers should use slab_build_skb() 451 * instead.) 452 * The return is the new skb buffer. 453 * On a failure the return is %NULL, and @data is not freed. 454 * Notes : 455 * Before IO, driver allocates only data buffer where NIC put incoming frame 456 * Driver should add room at head (NET_SKB_PAD) and 457 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 458 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 459 * before giving packet to stack. 460 * RX rings only contains data buffers, not full skbs. 461 */ 462 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 463 { 464 struct sk_buff *skb; 465 466 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 467 if (unlikely(!skb)) 468 return NULL; 469 470 memset(skb, 0, offsetof(struct sk_buff, tail)); 471 __build_skb_around(skb, data, frag_size); 472 473 return skb; 474 } 475 476 /* build_skb() is wrapper over __build_skb(), that specifically 477 * takes care of skb->head and skb->pfmemalloc 478 */ 479 struct sk_buff *build_skb(void *data, unsigned int frag_size) 480 { 481 struct sk_buff *skb = __build_skb(data, frag_size); 482 483 if (likely(skb && frag_size)) { 484 skb->head_frag = 1; 485 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 486 } 487 return skb; 488 } 489 EXPORT_SYMBOL(build_skb); 490 491 /** 492 * build_skb_around - build a network buffer around provided skb 493 * @skb: sk_buff provide by caller, must be memset cleared 494 * @data: data buffer provided by caller 495 * @frag_size: size of data 496 */ 497 struct sk_buff *build_skb_around(struct sk_buff *skb, 498 void *data, unsigned int frag_size) 499 { 500 if (unlikely(!skb)) 501 return NULL; 502 503 __build_skb_around(skb, data, frag_size); 504 505 if (frag_size) { 506 skb->head_frag = 1; 507 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 508 } 509 return skb; 510 } 511 EXPORT_SYMBOL(build_skb_around); 512 513 /** 514 * __napi_build_skb - build a network buffer 515 * @data: data buffer provided by caller 516 * @frag_size: size of data 517 * 518 * Version of __build_skb() that uses NAPI percpu caches to obtain 519 * skbuff_head instead of inplace allocation. 520 * 521 * Returns a new &sk_buff on success, %NULL on allocation failure. 522 */ 523 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 524 { 525 struct sk_buff *skb; 526 527 skb = napi_skb_cache_get(); 528 if (unlikely(!skb)) 529 return NULL; 530 531 memset(skb, 0, offsetof(struct sk_buff, tail)); 532 __build_skb_around(skb, data, frag_size); 533 534 return skb; 535 } 536 537 /** 538 * napi_build_skb - build a network buffer 539 * @data: data buffer provided by caller 540 * @frag_size: size of data 541 * 542 * Version of __napi_build_skb() that takes care of skb->head_frag 543 * and skb->pfmemalloc when the data is a page or page fragment. 544 * 545 * Returns a new &sk_buff on success, %NULL on allocation failure. 546 */ 547 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 548 { 549 struct sk_buff *skb = __napi_build_skb(data, frag_size); 550 551 if (likely(skb) && frag_size) { 552 skb->head_frag = 1; 553 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 554 } 555 556 return skb; 557 } 558 EXPORT_SYMBOL(napi_build_skb); 559 560 /* 561 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 562 * the caller if emergency pfmemalloc reserves are being used. If it is and 563 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 564 * may be used. Otherwise, the packet data may be discarded until enough 565 * memory is free 566 */ 567 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 568 bool *pfmemalloc) 569 { 570 bool ret_pfmemalloc = false; 571 size_t obj_size; 572 void *obj; 573 574 obj_size = SKB_HEAD_ALIGN(*size); 575 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 576 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 577 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, 578 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 579 node); 580 *size = SKB_SMALL_HEAD_CACHE_SIZE; 581 if (obj || !(gfp_pfmemalloc_allowed(flags))) 582 goto out; 583 /* Try again but now we are using pfmemalloc reserves */ 584 ret_pfmemalloc = true; 585 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); 586 goto out; 587 } 588 589 obj_size = kmalloc_size_roundup(obj_size); 590 /* The following cast might truncate high-order bits of obj_size, this 591 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 592 */ 593 *size = (unsigned int)obj_size; 594 595 /* 596 * Try a regular allocation, when that fails and we're not entitled 597 * to the reserves, fail. 598 */ 599 obj = kmalloc_node_track_caller(obj_size, 600 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 601 node); 602 if (obj || !(gfp_pfmemalloc_allowed(flags))) 603 goto out; 604 605 /* Try again but now we are using pfmemalloc reserves */ 606 ret_pfmemalloc = true; 607 obj = kmalloc_node_track_caller(obj_size, flags, node); 608 609 out: 610 if (pfmemalloc) 611 *pfmemalloc = ret_pfmemalloc; 612 613 return obj; 614 } 615 616 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 617 * 'private' fields and also do memory statistics to find all the 618 * [BEEP] leaks. 619 * 620 */ 621 622 /** 623 * __alloc_skb - allocate a network buffer 624 * @size: size to allocate 625 * @gfp_mask: allocation mask 626 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 627 * instead of head cache and allocate a cloned (child) skb. 628 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 629 * allocations in case the data is required for writeback 630 * @node: numa node to allocate memory on 631 * 632 * Allocate a new &sk_buff. The returned buffer has no headroom and a 633 * tail room of at least size bytes. The object has a reference count 634 * of one. The return is the buffer. On a failure the return is %NULL. 635 * 636 * Buffers may only be allocated from interrupts using a @gfp_mask of 637 * %GFP_ATOMIC. 638 */ 639 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 640 int flags, int node) 641 { 642 struct kmem_cache *cache; 643 struct sk_buff *skb; 644 bool pfmemalloc; 645 u8 *data; 646 647 cache = (flags & SKB_ALLOC_FCLONE) 648 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; 649 650 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 651 gfp_mask |= __GFP_MEMALLOC; 652 653 /* Get the HEAD */ 654 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 655 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 656 skb = napi_skb_cache_get(); 657 else 658 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 659 if (unlikely(!skb)) 660 return NULL; 661 prefetchw(skb); 662 663 /* We do our best to align skb_shared_info on a separate cache 664 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 665 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 666 * Both skb->head and skb_shared_info are cache line aligned. 667 */ 668 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 669 if (unlikely(!data)) 670 goto nodata; 671 /* kmalloc_size_roundup() might give us more room than requested. 672 * Put skb_shared_info exactly at the end of allocated zone, 673 * to allow max possible filling before reallocation. 674 */ 675 prefetchw(data + SKB_WITH_OVERHEAD(size)); 676 677 /* 678 * Only clear those fields we need to clear, not those that we will 679 * actually initialise below. Hence, don't put any more fields after 680 * the tail pointer in struct sk_buff! 681 */ 682 memset(skb, 0, offsetof(struct sk_buff, tail)); 683 __build_skb_around(skb, data, size); 684 skb->pfmemalloc = pfmemalloc; 685 686 if (flags & SKB_ALLOC_FCLONE) { 687 struct sk_buff_fclones *fclones; 688 689 fclones = container_of(skb, struct sk_buff_fclones, skb1); 690 691 skb->fclone = SKB_FCLONE_ORIG; 692 refcount_set(&fclones->fclone_ref, 1); 693 } 694 695 return skb; 696 697 nodata: 698 kmem_cache_free(cache, skb); 699 return NULL; 700 } 701 EXPORT_SYMBOL(__alloc_skb); 702 703 /** 704 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 705 * @dev: network device to receive on 706 * @len: length to allocate 707 * @gfp_mask: get_free_pages mask, passed to alloc_skb 708 * 709 * Allocate a new &sk_buff and assign it a usage count of one. The 710 * buffer has NET_SKB_PAD headroom built in. Users should allocate 711 * the headroom they think they need without accounting for the 712 * built in space. The built in space is used for optimisations. 713 * 714 * %NULL is returned if there is no free memory. 715 */ 716 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 717 gfp_t gfp_mask) 718 { 719 struct page_frag_cache *nc; 720 struct sk_buff *skb; 721 bool pfmemalloc; 722 void *data; 723 724 len += NET_SKB_PAD; 725 726 /* If requested length is either too small or too big, 727 * we use kmalloc() for skb->head allocation. 728 */ 729 if (len <= SKB_WITH_OVERHEAD(1024) || 730 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 731 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 732 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 733 if (!skb) 734 goto skb_fail; 735 goto skb_success; 736 } 737 738 len = SKB_HEAD_ALIGN(len); 739 740 if (sk_memalloc_socks()) 741 gfp_mask |= __GFP_MEMALLOC; 742 743 if (in_hardirq() || irqs_disabled()) { 744 nc = this_cpu_ptr(&netdev_alloc_cache); 745 data = page_frag_alloc(nc, len, gfp_mask); 746 pfmemalloc = nc->pfmemalloc; 747 } else { 748 local_bh_disable(); 749 nc = this_cpu_ptr(&napi_alloc_cache.page); 750 data = page_frag_alloc(nc, len, gfp_mask); 751 pfmemalloc = nc->pfmemalloc; 752 local_bh_enable(); 753 } 754 755 if (unlikely(!data)) 756 return NULL; 757 758 skb = __build_skb(data, len); 759 if (unlikely(!skb)) { 760 skb_free_frag(data); 761 return NULL; 762 } 763 764 if (pfmemalloc) 765 skb->pfmemalloc = 1; 766 skb->head_frag = 1; 767 768 skb_success: 769 skb_reserve(skb, NET_SKB_PAD); 770 skb->dev = dev; 771 772 skb_fail: 773 return skb; 774 } 775 EXPORT_SYMBOL(__netdev_alloc_skb); 776 777 /** 778 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 779 * @napi: napi instance this buffer was allocated for 780 * @len: length to allocate 781 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 782 * 783 * Allocate a new sk_buff for use in NAPI receive. This buffer will 784 * attempt to allocate the head from a special reserved region used 785 * only for NAPI Rx allocation. By doing this we can save several 786 * CPU cycles by avoiding having to disable and re-enable IRQs. 787 * 788 * %NULL is returned if there is no free memory. 789 */ 790 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 791 gfp_t gfp_mask) 792 { 793 struct napi_alloc_cache *nc; 794 struct sk_buff *skb; 795 bool pfmemalloc; 796 void *data; 797 798 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 799 len += NET_SKB_PAD + NET_IP_ALIGN; 800 801 /* If requested length is either too small or too big, 802 * we use kmalloc() for skb->head allocation. 803 * When the small frag allocator is available, prefer it over kmalloc 804 * for small fragments 805 */ 806 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || 807 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 808 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 809 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 810 NUMA_NO_NODE); 811 if (!skb) 812 goto skb_fail; 813 goto skb_success; 814 } 815 816 nc = this_cpu_ptr(&napi_alloc_cache); 817 818 if (sk_memalloc_socks()) 819 gfp_mask |= __GFP_MEMALLOC; 820 821 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { 822 /* we are artificially inflating the allocation size, but 823 * that is not as bad as it may look like, as: 824 * - 'len' less than GRO_MAX_HEAD makes little sense 825 * - On most systems, larger 'len' values lead to fragment 826 * size above 512 bytes 827 * - kmalloc would use the kmalloc-1k slab for such values 828 * - Builds with smaller GRO_MAX_HEAD will very likely do 829 * little networking, as that implies no WiFi and no 830 * tunnels support, and 32 bits arches. 831 */ 832 len = SZ_1K; 833 834 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); 835 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); 836 } else { 837 len = SKB_HEAD_ALIGN(len); 838 839 data = page_frag_alloc(&nc->page, len, gfp_mask); 840 pfmemalloc = nc->page.pfmemalloc; 841 } 842 843 if (unlikely(!data)) 844 return NULL; 845 846 skb = __napi_build_skb(data, len); 847 if (unlikely(!skb)) { 848 skb_free_frag(data); 849 return NULL; 850 } 851 852 if (pfmemalloc) 853 skb->pfmemalloc = 1; 854 skb->head_frag = 1; 855 856 skb_success: 857 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 858 skb->dev = napi->dev; 859 860 skb_fail: 861 return skb; 862 } 863 EXPORT_SYMBOL(__napi_alloc_skb); 864 865 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, 866 int off, int size, unsigned int truesize) 867 { 868 DEBUG_NET_WARN_ON_ONCE(size > truesize); 869 870 skb_fill_netmem_desc(skb, i, netmem, off, size); 871 skb->len += size; 872 skb->data_len += size; 873 skb->truesize += truesize; 874 } 875 EXPORT_SYMBOL(skb_add_rx_frag_netmem); 876 877 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 878 unsigned int truesize) 879 { 880 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 881 882 DEBUG_NET_WARN_ON_ONCE(size > truesize); 883 884 skb_frag_size_add(frag, size); 885 skb->len += size; 886 skb->data_len += size; 887 skb->truesize += truesize; 888 } 889 EXPORT_SYMBOL(skb_coalesce_rx_frag); 890 891 static void skb_drop_list(struct sk_buff **listp) 892 { 893 kfree_skb_list(*listp); 894 *listp = NULL; 895 } 896 897 static inline void skb_drop_fraglist(struct sk_buff *skb) 898 { 899 skb_drop_list(&skb_shinfo(skb)->frag_list); 900 } 901 902 static void skb_clone_fraglist(struct sk_buff *skb) 903 { 904 struct sk_buff *list; 905 906 skb_walk_frags(skb, list) 907 skb_get(list); 908 } 909 910 static bool is_pp_page(struct page *page) 911 { 912 return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; 913 } 914 915 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 916 unsigned int headroom) 917 { 918 #if IS_ENABLED(CONFIG_PAGE_POOL) 919 u32 size, truesize, len, max_head_size, off; 920 struct sk_buff *skb = *pskb, *nskb; 921 int err, i, head_off; 922 void *data; 923 924 /* XDP does not support fraglist so we need to linearize 925 * the skb. 926 */ 927 if (skb_has_frag_list(skb)) 928 return -EOPNOTSUPP; 929 930 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); 931 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) 932 return -ENOMEM; 933 934 size = min_t(u32, skb->len, max_head_size); 935 truesize = SKB_HEAD_ALIGN(size) + headroom; 936 data = page_pool_dev_alloc_va(pool, &truesize); 937 if (!data) 938 return -ENOMEM; 939 940 nskb = napi_build_skb(data, truesize); 941 if (!nskb) { 942 page_pool_free_va(pool, data, true); 943 return -ENOMEM; 944 } 945 946 skb_reserve(nskb, headroom); 947 skb_copy_header(nskb, skb); 948 skb_mark_for_recycle(nskb); 949 950 err = skb_copy_bits(skb, 0, nskb->data, size); 951 if (err) { 952 consume_skb(nskb); 953 return err; 954 } 955 skb_put(nskb, size); 956 957 head_off = skb_headroom(nskb) - skb_headroom(skb); 958 skb_headers_offset_update(nskb, head_off); 959 960 off = size; 961 len = skb->len - off; 962 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { 963 struct page *page; 964 u32 page_off; 965 966 size = min_t(u32, len, PAGE_SIZE); 967 truesize = size; 968 969 page = page_pool_dev_alloc(pool, &page_off, &truesize); 970 if (!page) { 971 consume_skb(nskb); 972 return -ENOMEM; 973 } 974 975 skb_add_rx_frag(nskb, i, page, page_off, size, truesize); 976 err = skb_copy_bits(skb, off, page_address(page) + page_off, 977 size); 978 if (err) { 979 consume_skb(nskb); 980 return err; 981 } 982 983 len -= size; 984 off += size; 985 } 986 987 consume_skb(skb); 988 *pskb = nskb; 989 990 return 0; 991 #else 992 return -EOPNOTSUPP; 993 #endif 994 } 995 EXPORT_SYMBOL(skb_pp_cow_data); 996 997 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, 998 struct bpf_prog *prog) 999 { 1000 if (!prog->aux->xdp_has_frags) 1001 return -EINVAL; 1002 1003 return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); 1004 } 1005 EXPORT_SYMBOL(skb_cow_data_for_xdp); 1006 1007 #if IS_ENABLED(CONFIG_PAGE_POOL) 1008 bool napi_pp_put_page(struct page *page, bool napi_safe) 1009 { 1010 bool allow_direct = false; 1011 struct page_pool *pp; 1012 1013 page = compound_head(page); 1014 1015 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 1016 * in order to preserve any existing bits, such as bit 0 for the 1017 * head page of compound page and bit 1 for pfmemalloc page, so 1018 * mask those bits for freeing side when doing below checking, 1019 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 1020 * to avoid recycling the pfmemalloc page. 1021 */ 1022 if (unlikely(!is_pp_page(page))) 1023 return false; 1024 1025 pp = page->pp; 1026 1027 /* Allow direct recycle if we have reasons to believe that we are 1028 * in the same context as the consumer would run, so there's 1029 * no possible race. 1030 * __page_pool_put_page() makes sure we're not in hardirq context 1031 * and interrupts are enabled prior to accessing the cache. 1032 */ 1033 if (napi_safe || in_softirq()) { 1034 const struct napi_struct *napi = READ_ONCE(pp->p.napi); 1035 unsigned int cpuid = smp_processor_id(); 1036 1037 allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid; 1038 allow_direct |= READ_ONCE(pp->cpuid) == cpuid; 1039 } 1040 1041 /* Driver set this to memory recycling info. Reset it on recycle. 1042 * This will *not* work for NIC using a split-page memory model. 1043 * The page will be returned to the pool here regardless of the 1044 * 'flipped' fragment being in use or not. 1045 */ 1046 page_pool_put_full_page(pp, page, allow_direct); 1047 1048 return true; 1049 } 1050 EXPORT_SYMBOL(napi_pp_put_page); 1051 #endif 1052 1053 static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) 1054 { 1055 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 1056 return false; 1057 return napi_pp_put_page(virt_to_page(data), napi_safe); 1058 } 1059 1060 /** 1061 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb 1062 * @skb: page pool aware skb 1063 * 1064 * Increase the fragment reference count (pp_ref_count) of a skb. This is 1065 * intended to gain fragment references only for page pool aware skbs, 1066 * i.e. when skb->pp_recycle is true, and not for fragments in a 1067 * non-pp-recycling skb. It has a fallback to increase references on normal 1068 * pages, as page pool aware skbs may also have normal page fragments. 1069 */ 1070 static int skb_pp_frag_ref(struct sk_buff *skb) 1071 { 1072 struct skb_shared_info *shinfo; 1073 struct page *head_page; 1074 int i; 1075 1076 if (!skb->pp_recycle) 1077 return -EINVAL; 1078 1079 shinfo = skb_shinfo(skb); 1080 1081 for (i = 0; i < shinfo->nr_frags; i++) { 1082 head_page = compound_head(skb_frag_page(&shinfo->frags[i])); 1083 if (likely(is_pp_page(head_page))) 1084 page_pool_ref_page(head_page); 1085 else 1086 page_ref_inc(head_page); 1087 } 1088 return 0; 1089 } 1090 1091 static void skb_kfree_head(void *head, unsigned int end_offset) 1092 { 1093 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1094 kmem_cache_free(net_hotdata.skb_small_head_cache, head); 1095 else 1096 kfree(head); 1097 } 1098 1099 static void skb_free_head(struct sk_buff *skb, bool napi_safe) 1100 { 1101 unsigned char *head = skb->head; 1102 1103 if (skb->head_frag) { 1104 if (skb_pp_recycle(skb, head, napi_safe)) 1105 return; 1106 skb_free_frag(head); 1107 } else { 1108 skb_kfree_head(head, skb_end_offset(skb)); 1109 } 1110 } 1111 1112 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, 1113 bool napi_safe) 1114 { 1115 struct skb_shared_info *shinfo = skb_shinfo(skb); 1116 int i; 1117 1118 if (!skb_data_unref(skb, shinfo)) 1119 goto exit; 1120 1121 if (skb_zcopy(skb)) { 1122 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1123 1124 skb_zcopy_clear(skb, true); 1125 if (skip_unref) 1126 goto free_head; 1127 } 1128 1129 for (i = 0; i < shinfo->nr_frags; i++) 1130 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe); 1131 1132 free_head: 1133 if (shinfo->frag_list) 1134 kfree_skb_list_reason(shinfo->frag_list, reason); 1135 1136 skb_free_head(skb, napi_safe); 1137 exit: 1138 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1139 * bit is only set on the head though, so in order to avoid races 1140 * while trying to recycle fragments on __skb_frag_unref() we need 1141 * to make one SKB responsible for triggering the recycle path. 1142 * So disable the recycling bit if an SKB is cloned and we have 1143 * additional references to the fragmented part of the SKB. 1144 * Eventually the last SKB will have the recycling bit set and it's 1145 * dataref set to 0, which will trigger the recycling 1146 */ 1147 skb->pp_recycle = 0; 1148 } 1149 1150 /* 1151 * Free an skbuff by memory without cleaning the state. 1152 */ 1153 static void kfree_skbmem(struct sk_buff *skb) 1154 { 1155 struct sk_buff_fclones *fclones; 1156 1157 switch (skb->fclone) { 1158 case SKB_FCLONE_UNAVAILABLE: 1159 kmem_cache_free(net_hotdata.skbuff_cache, skb); 1160 return; 1161 1162 case SKB_FCLONE_ORIG: 1163 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1164 1165 /* We usually free the clone (TX completion) before original skb 1166 * This test would have no chance to be true for the clone, 1167 * while here, branch prediction will be good. 1168 */ 1169 if (refcount_read(&fclones->fclone_ref) == 1) 1170 goto fastpath; 1171 break; 1172 1173 default: /* SKB_FCLONE_CLONE */ 1174 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1175 break; 1176 } 1177 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1178 return; 1179 fastpath: 1180 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); 1181 } 1182 1183 void skb_release_head_state(struct sk_buff *skb) 1184 { 1185 skb_dst_drop(skb); 1186 if (skb->destructor) { 1187 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1188 skb->destructor(skb); 1189 } 1190 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1191 nf_conntrack_put(skb_nfct(skb)); 1192 #endif 1193 skb_ext_put(skb); 1194 } 1195 1196 /* Free everything but the sk_buff shell. */ 1197 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason, 1198 bool napi_safe) 1199 { 1200 skb_release_head_state(skb); 1201 if (likely(skb->head)) 1202 skb_release_data(skb, reason, napi_safe); 1203 } 1204 1205 /** 1206 * __kfree_skb - private function 1207 * @skb: buffer 1208 * 1209 * Free an sk_buff. Release anything attached to the buffer. 1210 * Clean the state. This is an internal helper function. Users should 1211 * always call kfree_skb 1212 */ 1213 1214 void __kfree_skb(struct sk_buff *skb) 1215 { 1216 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false); 1217 kfree_skbmem(skb); 1218 } 1219 EXPORT_SYMBOL(__kfree_skb); 1220 1221 static __always_inline 1222 bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1223 { 1224 if (unlikely(!skb_unref(skb))) 1225 return false; 1226 1227 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1228 u32_get_bits(reason, 1229 SKB_DROP_REASON_SUBSYS_MASK) >= 1230 SKB_DROP_REASON_SUBSYS_NUM); 1231 1232 if (reason == SKB_CONSUMED) 1233 trace_consume_skb(skb, __builtin_return_address(0)); 1234 else 1235 trace_kfree_skb(skb, __builtin_return_address(0), reason); 1236 return true; 1237 } 1238 1239 /** 1240 * kfree_skb_reason - free an sk_buff with special reason 1241 * @skb: buffer to free 1242 * @reason: reason why this skb is dropped 1243 * 1244 * Drop a reference to the buffer and free it if the usage count has 1245 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' 1246 * tracepoint. 1247 */ 1248 void __fix_address 1249 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1250 { 1251 if (__kfree_skb_reason(skb, reason)) 1252 __kfree_skb(skb); 1253 } 1254 EXPORT_SYMBOL(kfree_skb_reason); 1255 1256 #define KFREE_SKB_BULK_SIZE 16 1257 1258 struct skb_free_array { 1259 unsigned int skb_count; 1260 void *skb_array[KFREE_SKB_BULK_SIZE]; 1261 }; 1262 1263 static void kfree_skb_add_bulk(struct sk_buff *skb, 1264 struct skb_free_array *sa, 1265 enum skb_drop_reason reason) 1266 { 1267 /* if SKB is a clone, don't handle this case */ 1268 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1269 __kfree_skb(skb); 1270 return; 1271 } 1272 1273 skb_release_all(skb, reason, false); 1274 sa->skb_array[sa->skb_count++] = skb; 1275 1276 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1277 kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, 1278 sa->skb_array); 1279 sa->skb_count = 0; 1280 } 1281 } 1282 1283 void __fix_address 1284 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1285 { 1286 struct skb_free_array sa; 1287 1288 sa.skb_count = 0; 1289 1290 while (segs) { 1291 struct sk_buff *next = segs->next; 1292 1293 if (__kfree_skb_reason(segs, reason)) { 1294 skb_poison_list(segs); 1295 kfree_skb_add_bulk(segs, &sa, reason); 1296 } 1297 1298 segs = next; 1299 } 1300 1301 if (sa.skb_count) 1302 kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array); 1303 } 1304 EXPORT_SYMBOL(kfree_skb_list_reason); 1305 1306 /* Dump skb information and contents. 1307 * 1308 * Must only be called from net_ratelimit()-ed paths. 1309 * 1310 * Dumps whole packets if full_pkt, only headers otherwise. 1311 */ 1312 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1313 { 1314 struct skb_shared_info *sh = skb_shinfo(skb); 1315 struct net_device *dev = skb->dev; 1316 struct sock *sk = skb->sk; 1317 struct sk_buff *list_skb; 1318 bool has_mac, has_trans; 1319 int headroom, tailroom; 1320 int i, len, seg_len; 1321 1322 if (full_pkt) 1323 len = skb->len; 1324 else 1325 len = min_t(int, skb->len, MAX_HEADER + 128); 1326 1327 headroom = skb_headroom(skb); 1328 tailroom = skb_tailroom(skb); 1329 1330 has_mac = skb_mac_header_was_set(skb); 1331 has_trans = skb_transport_header_was_set(skb); 1332 1333 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1334 "mac=(%d,%d) net=(%d,%d) trans=%d\n" 1335 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1336 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1337 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 1338 level, skb->len, headroom, skb_headlen(skb), tailroom, 1339 has_mac ? skb->mac_header : -1, 1340 has_mac ? skb_mac_header_len(skb) : -1, 1341 skb->network_header, 1342 has_trans ? skb_network_header_len(skb) : -1, 1343 has_trans ? skb->transport_header : -1, 1344 sh->tx_flags, sh->nr_frags, 1345 sh->gso_size, sh->gso_type, sh->gso_segs, 1346 skb->csum, skb->ip_summed, skb->csum_complete_sw, 1347 skb->csum_valid, skb->csum_level, 1348 skb->hash, skb->sw_hash, skb->l4_hash, 1349 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 1350 1351 if (dev) 1352 printk("%sdev name=%s feat=%pNF\n", 1353 level, dev->name, &dev->features); 1354 if (sk) 1355 printk("%ssk family=%hu type=%u proto=%u\n", 1356 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1357 1358 if (full_pkt && headroom) 1359 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1360 16, 1, skb->head, headroom, false); 1361 1362 seg_len = min_t(int, skb_headlen(skb), len); 1363 if (seg_len) 1364 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1365 16, 1, skb->data, seg_len, false); 1366 len -= seg_len; 1367 1368 if (full_pkt && tailroom) 1369 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1370 16, 1, skb_tail_pointer(skb), tailroom, false); 1371 1372 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1373 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1374 u32 p_off, p_len, copied; 1375 struct page *p; 1376 u8 *vaddr; 1377 1378 skb_frag_foreach_page(frag, skb_frag_off(frag), 1379 skb_frag_size(frag), p, p_off, p_len, 1380 copied) { 1381 seg_len = min_t(int, p_len, len); 1382 vaddr = kmap_atomic(p); 1383 print_hex_dump(level, "skb frag: ", 1384 DUMP_PREFIX_OFFSET, 1385 16, 1, vaddr + p_off, seg_len, false); 1386 kunmap_atomic(vaddr); 1387 len -= seg_len; 1388 if (!len) 1389 break; 1390 } 1391 } 1392 1393 if (full_pkt && skb_has_frag_list(skb)) { 1394 printk("skb fraglist:\n"); 1395 skb_walk_frags(skb, list_skb) 1396 skb_dump(level, list_skb, true); 1397 } 1398 } 1399 EXPORT_SYMBOL(skb_dump); 1400 1401 /** 1402 * skb_tx_error - report an sk_buff xmit error 1403 * @skb: buffer that triggered an error 1404 * 1405 * Report xmit error if a device callback is tracking this skb. 1406 * skb must be freed afterwards. 1407 */ 1408 void skb_tx_error(struct sk_buff *skb) 1409 { 1410 if (skb) { 1411 skb_zcopy_downgrade_managed(skb); 1412 skb_zcopy_clear(skb, true); 1413 } 1414 } 1415 EXPORT_SYMBOL(skb_tx_error); 1416 1417 #ifdef CONFIG_TRACEPOINTS 1418 /** 1419 * consume_skb - free an skbuff 1420 * @skb: buffer to free 1421 * 1422 * Drop a ref to the buffer and free it if the usage count has hit zero 1423 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1424 * is being dropped after a failure and notes that 1425 */ 1426 void consume_skb(struct sk_buff *skb) 1427 { 1428 if (!skb_unref(skb)) 1429 return; 1430 1431 trace_consume_skb(skb, __builtin_return_address(0)); 1432 __kfree_skb(skb); 1433 } 1434 EXPORT_SYMBOL(consume_skb); 1435 #endif 1436 1437 /** 1438 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1439 * @skb: buffer to free 1440 * 1441 * Alike consume_skb(), but this variant assumes that this is the last 1442 * skb reference and all the head states have been already dropped 1443 */ 1444 void __consume_stateless_skb(struct sk_buff *skb) 1445 { 1446 trace_consume_skb(skb, __builtin_return_address(0)); 1447 skb_release_data(skb, SKB_CONSUMED, false); 1448 kfree_skbmem(skb); 1449 } 1450 1451 static void napi_skb_cache_put(struct sk_buff *skb) 1452 { 1453 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1454 u32 i; 1455 1456 if (!kasan_mempool_poison_object(skb)) 1457 return; 1458 1459 nc->skb_cache[nc->skb_count++] = skb; 1460 1461 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1462 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1463 kasan_mempool_unpoison_object(nc->skb_cache[i], 1464 kmem_cache_size(net_hotdata.skbuff_cache)); 1465 1466 kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, 1467 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1468 nc->skb_count = NAPI_SKB_CACHE_HALF; 1469 } 1470 } 1471 1472 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1473 { 1474 skb_release_all(skb, reason, true); 1475 napi_skb_cache_put(skb); 1476 } 1477 1478 void napi_skb_free_stolen_head(struct sk_buff *skb) 1479 { 1480 if (unlikely(skb->slow_gro)) { 1481 nf_reset_ct(skb); 1482 skb_dst_drop(skb); 1483 skb_ext_put(skb); 1484 skb_orphan(skb); 1485 skb->slow_gro = 0; 1486 } 1487 napi_skb_cache_put(skb); 1488 } 1489 1490 void napi_consume_skb(struct sk_buff *skb, int budget) 1491 { 1492 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1493 if (unlikely(!budget)) { 1494 dev_consume_skb_any(skb); 1495 return; 1496 } 1497 1498 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1499 1500 if (!skb_unref(skb)) 1501 return; 1502 1503 /* if reaching here SKB is ready to free */ 1504 trace_consume_skb(skb, __builtin_return_address(0)); 1505 1506 /* if SKB is a clone, don't handle this case */ 1507 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1508 __kfree_skb(skb); 1509 return; 1510 } 1511 1512 skb_release_all(skb, SKB_CONSUMED, !!budget); 1513 napi_skb_cache_put(skb); 1514 } 1515 EXPORT_SYMBOL(napi_consume_skb); 1516 1517 /* Make sure a field is contained by headers group */ 1518 #define CHECK_SKB_FIELD(field) \ 1519 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1520 offsetof(struct sk_buff, headers.field)); \ 1521 1522 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1523 { 1524 new->tstamp = old->tstamp; 1525 /* We do not copy old->sk */ 1526 new->dev = old->dev; 1527 memcpy(new->cb, old->cb, sizeof(old->cb)); 1528 skb_dst_copy(new, old); 1529 __skb_ext_copy(new, old); 1530 __nf_copy(new, old, false); 1531 1532 /* Note : this field could be in the headers group. 1533 * It is not yet because we do not want to have a 16 bit hole 1534 */ 1535 new->queue_mapping = old->queue_mapping; 1536 1537 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1538 CHECK_SKB_FIELD(protocol); 1539 CHECK_SKB_FIELD(csum); 1540 CHECK_SKB_FIELD(hash); 1541 CHECK_SKB_FIELD(priority); 1542 CHECK_SKB_FIELD(skb_iif); 1543 CHECK_SKB_FIELD(vlan_proto); 1544 CHECK_SKB_FIELD(vlan_tci); 1545 CHECK_SKB_FIELD(transport_header); 1546 CHECK_SKB_FIELD(network_header); 1547 CHECK_SKB_FIELD(mac_header); 1548 CHECK_SKB_FIELD(inner_protocol); 1549 CHECK_SKB_FIELD(inner_transport_header); 1550 CHECK_SKB_FIELD(inner_network_header); 1551 CHECK_SKB_FIELD(inner_mac_header); 1552 CHECK_SKB_FIELD(mark); 1553 #ifdef CONFIG_NETWORK_SECMARK 1554 CHECK_SKB_FIELD(secmark); 1555 #endif 1556 #ifdef CONFIG_NET_RX_BUSY_POLL 1557 CHECK_SKB_FIELD(napi_id); 1558 #endif 1559 CHECK_SKB_FIELD(alloc_cpu); 1560 #ifdef CONFIG_XPS 1561 CHECK_SKB_FIELD(sender_cpu); 1562 #endif 1563 #ifdef CONFIG_NET_SCHED 1564 CHECK_SKB_FIELD(tc_index); 1565 #endif 1566 1567 } 1568 1569 /* 1570 * You should not add any new code to this function. Add it to 1571 * __copy_skb_header above instead. 1572 */ 1573 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1574 { 1575 #define C(x) n->x = skb->x 1576 1577 n->next = n->prev = NULL; 1578 n->sk = NULL; 1579 __copy_skb_header(n, skb); 1580 1581 C(len); 1582 C(data_len); 1583 C(mac_len); 1584 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1585 n->cloned = 1; 1586 n->nohdr = 0; 1587 n->peeked = 0; 1588 C(pfmemalloc); 1589 C(pp_recycle); 1590 n->destructor = NULL; 1591 C(tail); 1592 C(end); 1593 C(head); 1594 C(head_frag); 1595 C(data); 1596 C(truesize); 1597 refcount_set(&n->users, 1); 1598 1599 atomic_inc(&(skb_shinfo(skb)->dataref)); 1600 skb->cloned = 1; 1601 1602 return n; 1603 #undef C 1604 } 1605 1606 /** 1607 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1608 * @first: first sk_buff of the msg 1609 */ 1610 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1611 { 1612 struct sk_buff *n; 1613 1614 n = alloc_skb(0, GFP_ATOMIC); 1615 if (!n) 1616 return NULL; 1617 1618 n->len = first->len; 1619 n->data_len = first->len; 1620 n->truesize = first->truesize; 1621 1622 skb_shinfo(n)->frag_list = first; 1623 1624 __copy_skb_header(n, first); 1625 n->destructor = NULL; 1626 1627 return n; 1628 } 1629 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1630 1631 /** 1632 * skb_morph - morph one skb into another 1633 * @dst: the skb to receive the contents 1634 * @src: the skb to supply the contents 1635 * 1636 * This is identical to skb_clone except that the target skb is 1637 * supplied by the user. 1638 * 1639 * The target skb is returned upon exit. 1640 */ 1641 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1642 { 1643 skb_release_all(dst, SKB_CONSUMED, false); 1644 return __skb_clone(dst, src); 1645 } 1646 EXPORT_SYMBOL_GPL(skb_morph); 1647 1648 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1649 { 1650 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1651 struct user_struct *user; 1652 1653 if (capable(CAP_IPC_LOCK) || !size) 1654 return 0; 1655 1656 rlim = rlimit(RLIMIT_MEMLOCK); 1657 if (rlim == RLIM_INFINITY) 1658 return 0; 1659 1660 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1661 max_pg = rlim >> PAGE_SHIFT; 1662 user = mmp->user ? : current_user(); 1663 1664 old_pg = atomic_long_read(&user->locked_vm); 1665 do { 1666 new_pg = old_pg + num_pg; 1667 if (new_pg > max_pg) 1668 return -ENOBUFS; 1669 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1670 1671 if (!mmp->user) { 1672 mmp->user = get_uid(user); 1673 mmp->num_pg = num_pg; 1674 } else { 1675 mmp->num_pg += num_pg; 1676 } 1677 1678 return 0; 1679 } 1680 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1681 1682 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1683 { 1684 if (mmp->user) { 1685 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1686 free_uid(mmp->user); 1687 } 1688 } 1689 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1690 1691 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1692 { 1693 struct ubuf_info_msgzc *uarg; 1694 struct sk_buff *skb; 1695 1696 WARN_ON_ONCE(!in_task()); 1697 1698 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1699 if (!skb) 1700 return NULL; 1701 1702 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1703 uarg = (void *)skb->cb; 1704 uarg->mmp.user = NULL; 1705 1706 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1707 kfree_skb(skb); 1708 return NULL; 1709 } 1710 1711 uarg->ubuf.callback = msg_zerocopy_callback; 1712 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1713 uarg->len = 1; 1714 uarg->bytelen = size; 1715 uarg->zerocopy = 1; 1716 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1717 refcount_set(&uarg->ubuf.refcnt, 1); 1718 sock_hold(sk); 1719 1720 return &uarg->ubuf; 1721 } 1722 1723 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1724 { 1725 return container_of((void *)uarg, struct sk_buff, cb); 1726 } 1727 1728 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1729 struct ubuf_info *uarg) 1730 { 1731 if (uarg) { 1732 struct ubuf_info_msgzc *uarg_zc; 1733 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1734 u32 bytelen, next; 1735 1736 /* there might be non MSG_ZEROCOPY users */ 1737 if (uarg->callback != msg_zerocopy_callback) 1738 return NULL; 1739 1740 /* realloc only when socket is locked (TCP, UDP cork), 1741 * so uarg->len and sk_zckey access is serialized 1742 */ 1743 if (!sock_owned_by_user(sk)) { 1744 WARN_ON_ONCE(1); 1745 return NULL; 1746 } 1747 1748 uarg_zc = uarg_to_msgzc(uarg); 1749 bytelen = uarg_zc->bytelen + size; 1750 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1751 /* TCP can create new skb to attach new uarg */ 1752 if (sk->sk_type == SOCK_STREAM) 1753 goto new_alloc; 1754 return NULL; 1755 } 1756 1757 next = (u32)atomic_read(&sk->sk_zckey); 1758 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1759 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1760 return NULL; 1761 uarg_zc->len++; 1762 uarg_zc->bytelen = bytelen; 1763 atomic_set(&sk->sk_zckey, ++next); 1764 1765 /* no extra ref when appending to datagram (MSG_MORE) */ 1766 if (sk->sk_type == SOCK_STREAM) 1767 net_zcopy_get(uarg); 1768 1769 return uarg; 1770 } 1771 } 1772 1773 new_alloc: 1774 return msg_zerocopy_alloc(sk, size); 1775 } 1776 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1777 1778 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1779 { 1780 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1781 u32 old_lo, old_hi; 1782 u64 sum_len; 1783 1784 old_lo = serr->ee.ee_info; 1785 old_hi = serr->ee.ee_data; 1786 sum_len = old_hi - old_lo + 1ULL + len; 1787 1788 if (sum_len >= (1ULL << 32)) 1789 return false; 1790 1791 if (lo != old_hi + 1) 1792 return false; 1793 1794 serr->ee.ee_data += len; 1795 return true; 1796 } 1797 1798 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1799 { 1800 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1801 struct sock_exterr_skb *serr; 1802 struct sock *sk = skb->sk; 1803 struct sk_buff_head *q; 1804 unsigned long flags; 1805 bool is_zerocopy; 1806 u32 lo, hi; 1807 u16 len; 1808 1809 mm_unaccount_pinned_pages(&uarg->mmp); 1810 1811 /* if !len, there was only 1 call, and it was aborted 1812 * so do not queue a completion notification 1813 */ 1814 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1815 goto release; 1816 1817 len = uarg->len; 1818 lo = uarg->id; 1819 hi = uarg->id + len - 1; 1820 is_zerocopy = uarg->zerocopy; 1821 1822 serr = SKB_EXT_ERR(skb); 1823 memset(serr, 0, sizeof(*serr)); 1824 serr->ee.ee_errno = 0; 1825 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1826 serr->ee.ee_data = hi; 1827 serr->ee.ee_info = lo; 1828 if (!is_zerocopy) 1829 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1830 1831 q = &sk->sk_error_queue; 1832 spin_lock_irqsave(&q->lock, flags); 1833 tail = skb_peek_tail(q); 1834 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1835 !skb_zerocopy_notify_extend(tail, lo, len)) { 1836 __skb_queue_tail(q, skb); 1837 skb = NULL; 1838 } 1839 spin_unlock_irqrestore(&q->lock, flags); 1840 1841 sk_error_report(sk); 1842 1843 release: 1844 consume_skb(skb); 1845 sock_put(sk); 1846 } 1847 1848 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 1849 bool success) 1850 { 1851 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1852 1853 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1854 1855 if (refcount_dec_and_test(&uarg->refcnt)) 1856 __msg_zerocopy_callback(uarg_zc); 1857 } 1858 EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 1859 1860 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1861 { 1862 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1863 1864 atomic_dec(&sk->sk_zckey); 1865 uarg_to_msgzc(uarg)->len--; 1866 1867 if (have_uref) 1868 msg_zerocopy_callback(NULL, uarg, true); 1869 } 1870 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1871 1872 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1873 struct msghdr *msg, int len, 1874 struct ubuf_info *uarg) 1875 { 1876 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1877 int err, orig_len = skb->len; 1878 1879 /* An skb can only point to one uarg. This edge case happens when 1880 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 1881 */ 1882 if (orig_uarg && uarg != orig_uarg) 1883 return -EEXIST; 1884 1885 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1886 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1887 struct sock *save_sk = skb->sk; 1888 1889 /* Streams do not free skb on error. Reset to prev state. */ 1890 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1891 skb->sk = sk; 1892 ___pskb_trim(skb, orig_len); 1893 skb->sk = save_sk; 1894 return err; 1895 } 1896 1897 skb_zcopy_set(skb, uarg, NULL); 1898 return skb->len - orig_len; 1899 } 1900 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1901 1902 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1903 { 1904 int i; 1905 1906 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1907 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1908 skb_frag_ref(skb, i); 1909 } 1910 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1911 1912 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1913 gfp_t gfp_mask) 1914 { 1915 if (skb_zcopy(orig)) { 1916 if (skb_zcopy(nskb)) { 1917 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1918 if (!gfp_mask) { 1919 WARN_ON_ONCE(1); 1920 return -ENOMEM; 1921 } 1922 if (skb_uarg(nskb) == skb_uarg(orig)) 1923 return 0; 1924 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1925 return -EIO; 1926 } 1927 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1928 } 1929 return 0; 1930 } 1931 1932 /** 1933 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1934 * @skb: the skb to modify 1935 * @gfp_mask: allocation priority 1936 * 1937 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1938 * It will copy all frags into kernel and drop the reference 1939 * to userspace pages. 1940 * 1941 * If this function is called from an interrupt gfp_mask() must be 1942 * %GFP_ATOMIC. 1943 * 1944 * Returns 0 on success or a negative error code on failure 1945 * to allocate kernel memory to copy to. 1946 */ 1947 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1948 { 1949 int num_frags = skb_shinfo(skb)->nr_frags; 1950 struct page *page, *head = NULL; 1951 int i, order, psize, new_frags; 1952 u32 d_off; 1953 1954 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1955 return -EINVAL; 1956 1957 if (!num_frags) 1958 goto release; 1959 1960 /* We might have to allocate high order pages, so compute what minimum 1961 * page order is needed. 1962 */ 1963 order = 0; 1964 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1965 order++; 1966 psize = (PAGE_SIZE << order); 1967 1968 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1969 for (i = 0; i < new_frags; i++) { 1970 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1971 if (!page) { 1972 while (head) { 1973 struct page *next = (struct page *)page_private(head); 1974 put_page(head); 1975 head = next; 1976 } 1977 return -ENOMEM; 1978 } 1979 set_page_private(page, (unsigned long)head); 1980 head = page; 1981 } 1982 1983 page = head; 1984 d_off = 0; 1985 for (i = 0; i < num_frags; i++) { 1986 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1987 u32 p_off, p_len, copied; 1988 struct page *p; 1989 u8 *vaddr; 1990 1991 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1992 p, p_off, p_len, copied) { 1993 u32 copy, done = 0; 1994 vaddr = kmap_atomic(p); 1995 1996 while (done < p_len) { 1997 if (d_off == psize) { 1998 d_off = 0; 1999 page = (struct page *)page_private(page); 2000 } 2001 copy = min_t(u32, psize - d_off, p_len - done); 2002 memcpy(page_address(page) + d_off, 2003 vaddr + p_off + done, copy); 2004 done += copy; 2005 d_off += copy; 2006 } 2007 kunmap_atomic(vaddr); 2008 } 2009 } 2010 2011 /* skb frags release userspace buffers */ 2012 for (i = 0; i < num_frags; i++) 2013 skb_frag_unref(skb, i); 2014 2015 /* skb frags point to kernel buffers */ 2016 for (i = 0; i < new_frags - 1; i++) { 2017 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); 2018 head = (struct page *)page_private(head); 2019 } 2020 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, 2021 d_off); 2022 skb_shinfo(skb)->nr_frags = new_frags; 2023 2024 release: 2025 skb_zcopy_clear(skb, false); 2026 return 0; 2027 } 2028 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 2029 2030 /** 2031 * skb_clone - duplicate an sk_buff 2032 * @skb: buffer to clone 2033 * @gfp_mask: allocation priority 2034 * 2035 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 2036 * copies share the same packet data but not structure. The new 2037 * buffer has a reference count of 1. If the allocation fails the 2038 * function returns %NULL otherwise the new buffer is returned. 2039 * 2040 * If this function is called from an interrupt gfp_mask() must be 2041 * %GFP_ATOMIC. 2042 */ 2043 2044 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 2045 { 2046 struct sk_buff_fclones *fclones = container_of(skb, 2047 struct sk_buff_fclones, 2048 skb1); 2049 struct sk_buff *n; 2050 2051 if (skb_orphan_frags(skb, gfp_mask)) 2052 return NULL; 2053 2054 if (skb->fclone == SKB_FCLONE_ORIG && 2055 refcount_read(&fclones->fclone_ref) == 1) { 2056 n = &fclones->skb2; 2057 refcount_set(&fclones->fclone_ref, 2); 2058 n->fclone = SKB_FCLONE_CLONE; 2059 } else { 2060 if (skb_pfmemalloc(skb)) 2061 gfp_mask |= __GFP_MEMALLOC; 2062 2063 n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); 2064 if (!n) 2065 return NULL; 2066 2067 n->fclone = SKB_FCLONE_UNAVAILABLE; 2068 } 2069 2070 return __skb_clone(n, skb); 2071 } 2072 EXPORT_SYMBOL(skb_clone); 2073 2074 void skb_headers_offset_update(struct sk_buff *skb, int off) 2075 { 2076 /* Only adjust this if it actually is csum_start rather than csum */ 2077 if (skb->ip_summed == CHECKSUM_PARTIAL) 2078 skb->csum_start += off; 2079 /* {transport,network,mac}_header and tail are relative to skb->head */ 2080 skb->transport_header += off; 2081 skb->network_header += off; 2082 if (skb_mac_header_was_set(skb)) 2083 skb->mac_header += off; 2084 skb->inner_transport_header += off; 2085 skb->inner_network_header += off; 2086 skb->inner_mac_header += off; 2087 } 2088 EXPORT_SYMBOL(skb_headers_offset_update); 2089 2090 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 2091 { 2092 __copy_skb_header(new, old); 2093 2094 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 2095 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 2096 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 2097 } 2098 EXPORT_SYMBOL(skb_copy_header); 2099 2100 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 2101 { 2102 if (skb_pfmemalloc(skb)) 2103 return SKB_ALLOC_RX; 2104 return 0; 2105 } 2106 2107 /** 2108 * skb_copy - create private copy of an sk_buff 2109 * @skb: buffer to copy 2110 * @gfp_mask: allocation priority 2111 * 2112 * Make a copy of both an &sk_buff and its data. This is used when the 2113 * caller wishes to modify the data and needs a private copy of the 2114 * data to alter. Returns %NULL on failure or the pointer to the buffer 2115 * on success. The returned buffer has a reference count of 1. 2116 * 2117 * As by-product this function converts non-linear &sk_buff to linear 2118 * one, so that &sk_buff becomes completely private and caller is allowed 2119 * to modify all the data of returned buffer. This means that this 2120 * function is not recommended for use in circumstances when only 2121 * header is going to be modified. Use pskb_copy() instead. 2122 */ 2123 2124 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2125 { 2126 struct sk_buff *n; 2127 unsigned int size; 2128 int headerlen; 2129 2130 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2131 return NULL; 2132 2133 headerlen = skb_headroom(skb); 2134 size = skb_end_offset(skb) + skb->data_len; 2135 n = __alloc_skb(size, gfp_mask, 2136 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2137 if (!n) 2138 return NULL; 2139 2140 /* Set the data pointer */ 2141 skb_reserve(n, headerlen); 2142 /* Set the tail pointer and length */ 2143 skb_put(n, skb->len); 2144 2145 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2146 2147 skb_copy_header(n, skb); 2148 return n; 2149 } 2150 EXPORT_SYMBOL(skb_copy); 2151 2152 /** 2153 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2154 * @skb: buffer to copy 2155 * @headroom: headroom of new skb 2156 * @gfp_mask: allocation priority 2157 * @fclone: if true allocate the copy of the skb from the fclone 2158 * cache instead of the head cache; it is recommended to set this 2159 * to true for the cases where the copy will likely be cloned 2160 * 2161 * Make a copy of both an &sk_buff and part of its data, located 2162 * in header. Fragmented data remain shared. This is used when 2163 * the caller wishes to modify only header of &sk_buff and needs 2164 * private copy of the header to alter. Returns %NULL on failure 2165 * or the pointer to the buffer on success. 2166 * The returned buffer has a reference count of 1. 2167 */ 2168 2169 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2170 gfp_t gfp_mask, bool fclone) 2171 { 2172 unsigned int size = skb_headlen(skb) + headroom; 2173 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2174 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2175 2176 if (!n) 2177 goto out; 2178 2179 /* Set the data pointer */ 2180 skb_reserve(n, headroom); 2181 /* Set the tail pointer and length */ 2182 skb_put(n, skb_headlen(skb)); 2183 /* Copy the bytes */ 2184 skb_copy_from_linear_data(skb, n->data, n->len); 2185 2186 n->truesize += skb->data_len; 2187 n->data_len = skb->data_len; 2188 n->len = skb->len; 2189 2190 if (skb_shinfo(skb)->nr_frags) { 2191 int i; 2192 2193 if (skb_orphan_frags(skb, gfp_mask) || 2194 skb_zerocopy_clone(n, skb, gfp_mask)) { 2195 kfree_skb(n); 2196 n = NULL; 2197 goto out; 2198 } 2199 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2200 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2201 skb_frag_ref(skb, i); 2202 } 2203 skb_shinfo(n)->nr_frags = i; 2204 } 2205 2206 if (skb_has_frag_list(skb)) { 2207 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2208 skb_clone_fraglist(n); 2209 } 2210 2211 skb_copy_header(n, skb); 2212 out: 2213 return n; 2214 } 2215 EXPORT_SYMBOL(__pskb_copy_fclone); 2216 2217 /** 2218 * pskb_expand_head - reallocate header of &sk_buff 2219 * @skb: buffer to reallocate 2220 * @nhead: room to add at head 2221 * @ntail: room to add at tail 2222 * @gfp_mask: allocation priority 2223 * 2224 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2225 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2226 * reference count of 1. Returns zero in the case of success or error, 2227 * if expansion failed. In the last case, &sk_buff is not changed. 2228 * 2229 * All the pointers pointing into skb header may change and must be 2230 * reloaded after call to this function. 2231 */ 2232 2233 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2234 gfp_t gfp_mask) 2235 { 2236 unsigned int osize = skb_end_offset(skb); 2237 unsigned int size = osize + nhead + ntail; 2238 long off; 2239 u8 *data; 2240 int i; 2241 2242 BUG_ON(nhead < 0); 2243 2244 BUG_ON(skb_shared(skb)); 2245 2246 skb_zcopy_downgrade_managed(skb); 2247 2248 if (skb_pfmemalloc(skb)) 2249 gfp_mask |= __GFP_MEMALLOC; 2250 2251 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2252 if (!data) 2253 goto nodata; 2254 size = SKB_WITH_OVERHEAD(size); 2255 2256 /* Copy only real data... and, alas, header. This should be 2257 * optimized for the cases when header is void. 2258 */ 2259 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2260 2261 memcpy((struct skb_shared_info *)(data + size), 2262 skb_shinfo(skb), 2263 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2264 2265 /* 2266 * if shinfo is shared we must drop the old head gracefully, but if it 2267 * is not we can just drop the old head and let the existing refcount 2268 * be since all we did is relocate the values 2269 */ 2270 if (skb_cloned(skb)) { 2271 if (skb_orphan_frags(skb, gfp_mask)) 2272 goto nofrags; 2273 if (skb_zcopy(skb)) 2274 refcount_inc(&skb_uarg(skb)->refcnt); 2275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2276 skb_frag_ref(skb, i); 2277 2278 if (skb_has_frag_list(skb)) 2279 skb_clone_fraglist(skb); 2280 2281 skb_release_data(skb, SKB_CONSUMED, false); 2282 } else { 2283 skb_free_head(skb, false); 2284 } 2285 off = (data + nhead) - skb->head; 2286 2287 skb->head = data; 2288 skb->head_frag = 0; 2289 skb->data += off; 2290 2291 skb_set_end_offset(skb, size); 2292 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2293 off = nhead; 2294 #endif 2295 skb->tail += off; 2296 skb_headers_offset_update(skb, nhead); 2297 skb->cloned = 0; 2298 skb->hdr_len = 0; 2299 skb->nohdr = 0; 2300 atomic_set(&skb_shinfo(skb)->dataref, 1); 2301 2302 skb_metadata_clear(skb); 2303 2304 /* It is not generally safe to change skb->truesize. 2305 * For the moment, we really care of rx path, or 2306 * when skb is orphaned (not attached to a socket). 2307 */ 2308 if (!skb->sk || skb->destructor == sock_edemux) 2309 skb->truesize += size - osize; 2310 2311 return 0; 2312 2313 nofrags: 2314 skb_kfree_head(data, size); 2315 nodata: 2316 return -ENOMEM; 2317 } 2318 EXPORT_SYMBOL(pskb_expand_head); 2319 2320 /* Make private copy of skb with writable head and some headroom */ 2321 2322 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2323 { 2324 struct sk_buff *skb2; 2325 int delta = headroom - skb_headroom(skb); 2326 2327 if (delta <= 0) 2328 skb2 = pskb_copy(skb, GFP_ATOMIC); 2329 else { 2330 skb2 = skb_clone(skb, GFP_ATOMIC); 2331 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2332 GFP_ATOMIC)) { 2333 kfree_skb(skb2); 2334 skb2 = NULL; 2335 } 2336 } 2337 return skb2; 2338 } 2339 EXPORT_SYMBOL(skb_realloc_headroom); 2340 2341 /* Note: We plan to rework this in linux-6.4 */ 2342 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2343 { 2344 unsigned int saved_end_offset, saved_truesize; 2345 struct skb_shared_info *shinfo; 2346 int res; 2347 2348 saved_end_offset = skb_end_offset(skb); 2349 saved_truesize = skb->truesize; 2350 2351 res = pskb_expand_head(skb, 0, 0, pri); 2352 if (res) 2353 return res; 2354 2355 skb->truesize = saved_truesize; 2356 2357 if (likely(skb_end_offset(skb) == saved_end_offset)) 2358 return 0; 2359 2360 /* We can not change skb->end if the original or new value 2361 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2362 */ 2363 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2364 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2365 /* We think this path should not be taken. 2366 * Add a temporary trace to warn us just in case. 2367 */ 2368 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2369 saved_end_offset, skb_end_offset(skb)); 2370 WARN_ON_ONCE(1); 2371 return 0; 2372 } 2373 2374 shinfo = skb_shinfo(skb); 2375 2376 /* We are about to change back skb->end, 2377 * we need to move skb_shinfo() to its new location. 2378 */ 2379 memmove(skb->head + saved_end_offset, 2380 shinfo, 2381 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2382 2383 skb_set_end_offset(skb, saved_end_offset); 2384 2385 return 0; 2386 } 2387 2388 /** 2389 * skb_expand_head - reallocate header of &sk_buff 2390 * @skb: buffer to reallocate 2391 * @headroom: needed headroom 2392 * 2393 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2394 * if possible; copies skb->sk to new skb as needed 2395 * and frees original skb in case of failures. 2396 * 2397 * It expect increased headroom and generates warning otherwise. 2398 */ 2399 2400 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2401 { 2402 int delta = headroom - skb_headroom(skb); 2403 int osize = skb_end_offset(skb); 2404 struct sock *sk = skb->sk; 2405 2406 if (WARN_ONCE(delta <= 0, 2407 "%s is expecting an increase in the headroom", __func__)) 2408 return skb; 2409 2410 delta = SKB_DATA_ALIGN(delta); 2411 /* pskb_expand_head() might crash, if skb is shared. */ 2412 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2413 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2414 2415 if (unlikely(!nskb)) 2416 goto fail; 2417 2418 if (sk) 2419 skb_set_owner_w(nskb, sk); 2420 consume_skb(skb); 2421 skb = nskb; 2422 } 2423 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2424 goto fail; 2425 2426 if (sk && is_skb_wmem(skb)) { 2427 delta = skb_end_offset(skb) - osize; 2428 refcount_add(delta, &sk->sk_wmem_alloc); 2429 skb->truesize += delta; 2430 } 2431 return skb; 2432 2433 fail: 2434 kfree_skb(skb); 2435 return NULL; 2436 } 2437 EXPORT_SYMBOL(skb_expand_head); 2438 2439 /** 2440 * skb_copy_expand - copy and expand sk_buff 2441 * @skb: buffer to copy 2442 * @newheadroom: new free bytes at head 2443 * @newtailroom: new free bytes at tail 2444 * @gfp_mask: allocation priority 2445 * 2446 * Make a copy of both an &sk_buff and its data and while doing so 2447 * allocate additional space. 2448 * 2449 * This is used when the caller wishes to modify the data and needs a 2450 * private copy of the data to alter as well as more space for new fields. 2451 * Returns %NULL on failure or the pointer to the buffer 2452 * on success. The returned buffer has a reference count of 1. 2453 * 2454 * You must pass %GFP_ATOMIC as the allocation priority if this function 2455 * is called from an interrupt. 2456 */ 2457 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2458 int newheadroom, int newtailroom, 2459 gfp_t gfp_mask) 2460 { 2461 /* 2462 * Allocate the copy buffer 2463 */ 2464 int head_copy_len, head_copy_off; 2465 struct sk_buff *n; 2466 int oldheadroom; 2467 2468 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2469 return NULL; 2470 2471 oldheadroom = skb_headroom(skb); 2472 n = __alloc_skb(newheadroom + skb->len + newtailroom, 2473 gfp_mask, skb_alloc_rx_flag(skb), 2474 NUMA_NO_NODE); 2475 if (!n) 2476 return NULL; 2477 2478 skb_reserve(n, newheadroom); 2479 2480 /* Set the tail pointer and length */ 2481 skb_put(n, skb->len); 2482 2483 head_copy_len = oldheadroom; 2484 head_copy_off = 0; 2485 if (newheadroom <= head_copy_len) 2486 head_copy_len = newheadroom; 2487 else 2488 head_copy_off = newheadroom - head_copy_len; 2489 2490 /* Copy the linear header and data. */ 2491 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2492 skb->len + head_copy_len)); 2493 2494 skb_copy_header(n, skb); 2495 2496 skb_headers_offset_update(n, newheadroom - oldheadroom); 2497 2498 return n; 2499 } 2500 EXPORT_SYMBOL(skb_copy_expand); 2501 2502 /** 2503 * __skb_pad - zero pad the tail of an skb 2504 * @skb: buffer to pad 2505 * @pad: space to pad 2506 * @free_on_error: free buffer on error 2507 * 2508 * Ensure that a buffer is followed by a padding area that is zero 2509 * filled. Used by network drivers which may DMA or transfer data 2510 * beyond the buffer end onto the wire. 2511 * 2512 * May return error in out of memory cases. The skb is freed on error 2513 * if @free_on_error is true. 2514 */ 2515 2516 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2517 { 2518 int err; 2519 int ntail; 2520 2521 /* If the skbuff is non linear tailroom is always zero.. */ 2522 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2523 memset(skb->data+skb->len, 0, pad); 2524 return 0; 2525 } 2526 2527 ntail = skb->data_len + pad - (skb->end - skb->tail); 2528 if (likely(skb_cloned(skb) || ntail > 0)) { 2529 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2530 if (unlikely(err)) 2531 goto free_skb; 2532 } 2533 2534 /* FIXME: The use of this function with non-linear skb's really needs 2535 * to be audited. 2536 */ 2537 err = skb_linearize(skb); 2538 if (unlikely(err)) 2539 goto free_skb; 2540 2541 memset(skb->data + skb->len, 0, pad); 2542 return 0; 2543 2544 free_skb: 2545 if (free_on_error) 2546 kfree_skb(skb); 2547 return err; 2548 } 2549 EXPORT_SYMBOL(__skb_pad); 2550 2551 /** 2552 * pskb_put - add data to the tail of a potentially fragmented buffer 2553 * @skb: start of the buffer to use 2554 * @tail: tail fragment of the buffer to use 2555 * @len: amount of data to add 2556 * 2557 * This function extends the used data area of the potentially 2558 * fragmented buffer. @tail must be the last fragment of @skb -- or 2559 * @skb itself. If this would exceed the total buffer size the kernel 2560 * will panic. A pointer to the first byte of the extra data is 2561 * returned. 2562 */ 2563 2564 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2565 { 2566 if (tail != skb) { 2567 skb->data_len += len; 2568 skb->len += len; 2569 } 2570 return skb_put(tail, len); 2571 } 2572 EXPORT_SYMBOL_GPL(pskb_put); 2573 2574 /** 2575 * skb_put - add data to a buffer 2576 * @skb: buffer to use 2577 * @len: amount of data to add 2578 * 2579 * This function extends the used data area of the buffer. If this would 2580 * exceed the total buffer size the kernel will panic. A pointer to the 2581 * first byte of the extra data is returned. 2582 */ 2583 void *skb_put(struct sk_buff *skb, unsigned int len) 2584 { 2585 void *tmp = skb_tail_pointer(skb); 2586 SKB_LINEAR_ASSERT(skb); 2587 skb->tail += len; 2588 skb->len += len; 2589 if (unlikely(skb->tail > skb->end)) 2590 skb_over_panic(skb, len, __builtin_return_address(0)); 2591 return tmp; 2592 } 2593 EXPORT_SYMBOL(skb_put); 2594 2595 /** 2596 * skb_push - add data to the start of a buffer 2597 * @skb: buffer to use 2598 * @len: amount of data to add 2599 * 2600 * This function extends the used data area of the buffer at the buffer 2601 * start. If this would exceed the total buffer headroom the kernel will 2602 * panic. A pointer to the first byte of the extra data is returned. 2603 */ 2604 void *skb_push(struct sk_buff *skb, unsigned int len) 2605 { 2606 skb->data -= len; 2607 skb->len += len; 2608 if (unlikely(skb->data < skb->head)) 2609 skb_under_panic(skb, len, __builtin_return_address(0)); 2610 return skb->data; 2611 } 2612 EXPORT_SYMBOL(skb_push); 2613 2614 /** 2615 * skb_pull - remove data from the start of a buffer 2616 * @skb: buffer to use 2617 * @len: amount of data to remove 2618 * 2619 * This function removes data from the start of a buffer, returning 2620 * the memory to the headroom. A pointer to the next data in the buffer 2621 * is returned. Once the data has been pulled future pushes will overwrite 2622 * the old data. 2623 */ 2624 void *skb_pull(struct sk_buff *skb, unsigned int len) 2625 { 2626 return skb_pull_inline(skb, len); 2627 } 2628 EXPORT_SYMBOL(skb_pull); 2629 2630 /** 2631 * skb_pull_data - remove data from the start of a buffer returning its 2632 * original position. 2633 * @skb: buffer to use 2634 * @len: amount of data to remove 2635 * 2636 * This function removes data from the start of a buffer, returning 2637 * the memory to the headroom. A pointer to the original data in the buffer 2638 * is returned after checking if there is enough data to pull. Once the 2639 * data has been pulled future pushes will overwrite the old data. 2640 */ 2641 void *skb_pull_data(struct sk_buff *skb, size_t len) 2642 { 2643 void *data = skb->data; 2644 2645 if (skb->len < len) 2646 return NULL; 2647 2648 skb_pull(skb, len); 2649 2650 return data; 2651 } 2652 EXPORT_SYMBOL(skb_pull_data); 2653 2654 /** 2655 * skb_trim - remove end from a buffer 2656 * @skb: buffer to alter 2657 * @len: new length 2658 * 2659 * Cut the length of a buffer down by removing data from the tail. If 2660 * the buffer is already under the length specified it is not modified. 2661 * The skb must be linear. 2662 */ 2663 void skb_trim(struct sk_buff *skb, unsigned int len) 2664 { 2665 if (skb->len > len) 2666 __skb_trim(skb, len); 2667 } 2668 EXPORT_SYMBOL(skb_trim); 2669 2670 /* Trims skb to length len. It can change skb pointers. 2671 */ 2672 2673 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2674 { 2675 struct sk_buff **fragp; 2676 struct sk_buff *frag; 2677 int offset = skb_headlen(skb); 2678 int nfrags = skb_shinfo(skb)->nr_frags; 2679 int i; 2680 int err; 2681 2682 if (skb_cloned(skb) && 2683 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2684 return err; 2685 2686 i = 0; 2687 if (offset >= len) 2688 goto drop_pages; 2689 2690 for (; i < nfrags; i++) { 2691 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2692 2693 if (end < len) { 2694 offset = end; 2695 continue; 2696 } 2697 2698 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2699 2700 drop_pages: 2701 skb_shinfo(skb)->nr_frags = i; 2702 2703 for (; i < nfrags; i++) 2704 skb_frag_unref(skb, i); 2705 2706 if (skb_has_frag_list(skb)) 2707 skb_drop_fraglist(skb); 2708 goto done; 2709 } 2710 2711 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2712 fragp = &frag->next) { 2713 int end = offset + frag->len; 2714 2715 if (skb_shared(frag)) { 2716 struct sk_buff *nfrag; 2717 2718 nfrag = skb_clone(frag, GFP_ATOMIC); 2719 if (unlikely(!nfrag)) 2720 return -ENOMEM; 2721 2722 nfrag->next = frag->next; 2723 consume_skb(frag); 2724 frag = nfrag; 2725 *fragp = frag; 2726 } 2727 2728 if (end < len) { 2729 offset = end; 2730 continue; 2731 } 2732 2733 if (end > len && 2734 unlikely((err = pskb_trim(frag, len - offset)))) 2735 return err; 2736 2737 if (frag->next) 2738 skb_drop_list(&frag->next); 2739 break; 2740 } 2741 2742 done: 2743 if (len > skb_headlen(skb)) { 2744 skb->data_len -= skb->len - len; 2745 skb->len = len; 2746 } else { 2747 skb->len = len; 2748 skb->data_len = 0; 2749 skb_set_tail_pointer(skb, len); 2750 } 2751 2752 if (!skb->sk || skb->destructor == sock_edemux) 2753 skb_condense(skb); 2754 return 0; 2755 } 2756 EXPORT_SYMBOL(___pskb_trim); 2757 2758 /* Note : use pskb_trim_rcsum() instead of calling this directly 2759 */ 2760 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2761 { 2762 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2763 int delta = skb->len - len; 2764 2765 skb->csum = csum_block_sub(skb->csum, 2766 skb_checksum(skb, len, delta, 0), 2767 len); 2768 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2769 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2770 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2771 2772 if (offset + sizeof(__sum16) > hdlen) 2773 return -EINVAL; 2774 } 2775 return __pskb_trim(skb, len); 2776 } 2777 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2778 2779 /** 2780 * __pskb_pull_tail - advance tail of skb header 2781 * @skb: buffer to reallocate 2782 * @delta: number of bytes to advance tail 2783 * 2784 * The function makes a sense only on a fragmented &sk_buff, 2785 * it expands header moving its tail forward and copying necessary 2786 * data from fragmented part. 2787 * 2788 * &sk_buff MUST have reference count of 1. 2789 * 2790 * Returns %NULL (and &sk_buff does not change) if pull failed 2791 * or value of new tail of skb in the case of success. 2792 * 2793 * All the pointers pointing into skb header may change and must be 2794 * reloaded after call to this function. 2795 */ 2796 2797 /* Moves tail of skb head forward, copying data from fragmented part, 2798 * when it is necessary. 2799 * 1. It may fail due to malloc failure. 2800 * 2. It may change skb pointers. 2801 * 2802 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2803 */ 2804 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2805 { 2806 /* If skb has not enough free space at tail, get new one 2807 * plus 128 bytes for future expansions. If we have enough 2808 * room at tail, reallocate without expansion only if skb is cloned. 2809 */ 2810 int i, k, eat = (skb->tail + delta) - skb->end; 2811 2812 if (eat > 0 || skb_cloned(skb)) { 2813 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2814 GFP_ATOMIC)) 2815 return NULL; 2816 } 2817 2818 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2819 skb_tail_pointer(skb), delta)); 2820 2821 /* Optimization: no fragments, no reasons to preestimate 2822 * size of pulled pages. Superb. 2823 */ 2824 if (!skb_has_frag_list(skb)) 2825 goto pull_pages; 2826 2827 /* Estimate size of pulled pages. */ 2828 eat = delta; 2829 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2830 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2831 2832 if (size >= eat) 2833 goto pull_pages; 2834 eat -= size; 2835 } 2836 2837 /* If we need update frag list, we are in troubles. 2838 * Certainly, it is possible to add an offset to skb data, 2839 * but taking into account that pulling is expected to 2840 * be very rare operation, it is worth to fight against 2841 * further bloating skb head and crucify ourselves here instead. 2842 * Pure masohism, indeed. 8)8) 2843 */ 2844 if (eat) { 2845 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2846 struct sk_buff *clone = NULL; 2847 struct sk_buff *insp = NULL; 2848 2849 do { 2850 if (list->len <= eat) { 2851 /* Eaten as whole. */ 2852 eat -= list->len; 2853 list = list->next; 2854 insp = list; 2855 } else { 2856 /* Eaten partially. */ 2857 if (skb_is_gso(skb) && !list->head_frag && 2858 skb_headlen(list)) 2859 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2860 2861 if (skb_shared(list)) { 2862 /* Sucks! We need to fork list. :-( */ 2863 clone = skb_clone(list, GFP_ATOMIC); 2864 if (!clone) 2865 return NULL; 2866 insp = list->next; 2867 list = clone; 2868 } else { 2869 /* This may be pulled without 2870 * problems. */ 2871 insp = list; 2872 } 2873 if (!pskb_pull(list, eat)) { 2874 kfree_skb(clone); 2875 return NULL; 2876 } 2877 break; 2878 } 2879 } while (eat); 2880 2881 /* Free pulled out fragments. */ 2882 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2883 skb_shinfo(skb)->frag_list = list->next; 2884 consume_skb(list); 2885 } 2886 /* And insert new clone at head. */ 2887 if (clone) { 2888 clone->next = list; 2889 skb_shinfo(skb)->frag_list = clone; 2890 } 2891 } 2892 /* Success! Now we may commit changes to skb data. */ 2893 2894 pull_pages: 2895 eat = delta; 2896 k = 0; 2897 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2898 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2899 2900 if (size <= eat) { 2901 skb_frag_unref(skb, i); 2902 eat -= size; 2903 } else { 2904 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2905 2906 *frag = skb_shinfo(skb)->frags[i]; 2907 if (eat) { 2908 skb_frag_off_add(frag, eat); 2909 skb_frag_size_sub(frag, eat); 2910 if (!i) 2911 goto end; 2912 eat = 0; 2913 } 2914 k++; 2915 } 2916 } 2917 skb_shinfo(skb)->nr_frags = k; 2918 2919 end: 2920 skb->tail += delta; 2921 skb->data_len -= delta; 2922 2923 if (!skb->data_len) 2924 skb_zcopy_clear(skb, false); 2925 2926 return skb_tail_pointer(skb); 2927 } 2928 EXPORT_SYMBOL(__pskb_pull_tail); 2929 2930 /** 2931 * skb_copy_bits - copy bits from skb to kernel buffer 2932 * @skb: source skb 2933 * @offset: offset in source 2934 * @to: destination buffer 2935 * @len: number of bytes to copy 2936 * 2937 * Copy the specified number of bytes from the source skb to the 2938 * destination buffer. 2939 * 2940 * CAUTION ! : 2941 * If its prototype is ever changed, 2942 * check arch/{*}/net/{*}.S files, 2943 * since it is called from BPF assembly code. 2944 */ 2945 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2946 { 2947 int start = skb_headlen(skb); 2948 struct sk_buff *frag_iter; 2949 int i, copy; 2950 2951 if (offset > (int)skb->len - len) 2952 goto fault; 2953 2954 /* Copy header. */ 2955 if ((copy = start - offset) > 0) { 2956 if (copy > len) 2957 copy = len; 2958 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2959 if ((len -= copy) == 0) 2960 return 0; 2961 offset += copy; 2962 to += copy; 2963 } 2964 2965 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2966 int end; 2967 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2968 2969 WARN_ON(start > offset + len); 2970 2971 end = start + skb_frag_size(f); 2972 if ((copy = end - offset) > 0) { 2973 u32 p_off, p_len, copied; 2974 struct page *p; 2975 u8 *vaddr; 2976 2977 if (copy > len) 2978 copy = len; 2979 2980 skb_frag_foreach_page(f, 2981 skb_frag_off(f) + offset - start, 2982 copy, p, p_off, p_len, copied) { 2983 vaddr = kmap_atomic(p); 2984 memcpy(to + copied, vaddr + p_off, p_len); 2985 kunmap_atomic(vaddr); 2986 } 2987 2988 if ((len -= copy) == 0) 2989 return 0; 2990 offset += copy; 2991 to += copy; 2992 } 2993 start = end; 2994 } 2995 2996 skb_walk_frags(skb, frag_iter) { 2997 int end; 2998 2999 WARN_ON(start > offset + len); 3000 3001 end = start + frag_iter->len; 3002 if ((copy = end - offset) > 0) { 3003 if (copy > len) 3004 copy = len; 3005 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 3006 goto fault; 3007 if ((len -= copy) == 0) 3008 return 0; 3009 offset += copy; 3010 to += copy; 3011 } 3012 start = end; 3013 } 3014 3015 if (!len) 3016 return 0; 3017 3018 fault: 3019 return -EFAULT; 3020 } 3021 EXPORT_SYMBOL(skb_copy_bits); 3022 3023 /* 3024 * Callback from splice_to_pipe(), if we need to release some pages 3025 * at the end of the spd in case we error'ed out in filling the pipe. 3026 */ 3027 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 3028 { 3029 put_page(spd->pages[i]); 3030 } 3031 3032 static struct page *linear_to_page(struct page *page, unsigned int *len, 3033 unsigned int *offset, 3034 struct sock *sk) 3035 { 3036 struct page_frag *pfrag = sk_page_frag(sk); 3037 3038 if (!sk_page_frag_refill(sk, pfrag)) 3039 return NULL; 3040 3041 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 3042 3043 memcpy(page_address(pfrag->page) + pfrag->offset, 3044 page_address(page) + *offset, *len); 3045 *offset = pfrag->offset; 3046 pfrag->offset += *len; 3047 3048 return pfrag->page; 3049 } 3050 3051 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 3052 struct page *page, 3053 unsigned int offset) 3054 { 3055 return spd->nr_pages && 3056 spd->pages[spd->nr_pages - 1] == page && 3057 (spd->partial[spd->nr_pages - 1].offset + 3058 spd->partial[spd->nr_pages - 1].len == offset); 3059 } 3060 3061 /* 3062 * Fill page/offset/length into spd, if it can hold more pages. 3063 */ 3064 static bool spd_fill_page(struct splice_pipe_desc *spd, 3065 struct pipe_inode_info *pipe, struct page *page, 3066 unsigned int *len, unsigned int offset, 3067 bool linear, 3068 struct sock *sk) 3069 { 3070 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 3071 return true; 3072 3073 if (linear) { 3074 page = linear_to_page(page, len, &offset, sk); 3075 if (!page) 3076 return true; 3077 } 3078 if (spd_can_coalesce(spd, page, offset)) { 3079 spd->partial[spd->nr_pages - 1].len += *len; 3080 return false; 3081 } 3082 get_page(page); 3083 spd->pages[spd->nr_pages] = page; 3084 spd->partial[spd->nr_pages].len = *len; 3085 spd->partial[spd->nr_pages].offset = offset; 3086 spd->nr_pages++; 3087 3088 return false; 3089 } 3090 3091 static bool __splice_segment(struct page *page, unsigned int poff, 3092 unsigned int plen, unsigned int *off, 3093 unsigned int *len, 3094 struct splice_pipe_desc *spd, bool linear, 3095 struct sock *sk, 3096 struct pipe_inode_info *pipe) 3097 { 3098 if (!*len) 3099 return true; 3100 3101 /* skip this segment if already processed */ 3102 if (*off >= plen) { 3103 *off -= plen; 3104 return false; 3105 } 3106 3107 /* ignore any bits we already processed */ 3108 poff += *off; 3109 plen -= *off; 3110 *off = 0; 3111 3112 do { 3113 unsigned int flen = min(*len, plen); 3114 3115 if (spd_fill_page(spd, pipe, page, &flen, poff, 3116 linear, sk)) 3117 return true; 3118 poff += flen; 3119 plen -= flen; 3120 *len -= flen; 3121 } while (*len && plen); 3122 3123 return false; 3124 } 3125 3126 /* 3127 * Map linear and fragment data from the skb to spd. It reports true if the 3128 * pipe is full or if we already spliced the requested length. 3129 */ 3130 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3131 unsigned int *offset, unsigned int *len, 3132 struct splice_pipe_desc *spd, struct sock *sk) 3133 { 3134 int seg; 3135 struct sk_buff *iter; 3136 3137 /* map the linear part : 3138 * If skb->head_frag is set, this 'linear' part is backed by a 3139 * fragment, and if the head is not shared with any clones then 3140 * we can avoid a copy since we own the head portion of this page. 3141 */ 3142 if (__splice_segment(virt_to_page(skb->data), 3143 (unsigned long) skb->data & (PAGE_SIZE - 1), 3144 skb_headlen(skb), 3145 offset, len, spd, 3146 skb_head_is_locked(skb), 3147 sk, pipe)) 3148 return true; 3149 3150 /* 3151 * then map the fragments 3152 */ 3153 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3154 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3155 3156 if (__splice_segment(skb_frag_page(f), 3157 skb_frag_off(f), skb_frag_size(f), 3158 offset, len, spd, false, sk, pipe)) 3159 return true; 3160 } 3161 3162 skb_walk_frags(skb, iter) { 3163 if (*offset >= iter->len) { 3164 *offset -= iter->len; 3165 continue; 3166 } 3167 /* __skb_splice_bits() only fails if the output has no room 3168 * left, so no point in going over the frag_list for the error 3169 * case. 3170 */ 3171 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3172 return true; 3173 } 3174 3175 return false; 3176 } 3177 3178 /* 3179 * Map data from the skb to a pipe. Should handle both the linear part, 3180 * the fragments, and the frag list. 3181 */ 3182 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3183 struct pipe_inode_info *pipe, unsigned int tlen, 3184 unsigned int flags) 3185 { 3186 struct partial_page partial[MAX_SKB_FRAGS]; 3187 struct page *pages[MAX_SKB_FRAGS]; 3188 struct splice_pipe_desc spd = { 3189 .pages = pages, 3190 .partial = partial, 3191 .nr_pages_max = MAX_SKB_FRAGS, 3192 .ops = &nosteal_pipe_buf_ops, 3193 .spd_release = sock_spd_release, 3194 }; 3195 int ret = 0; 3196 3197 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3198 3199 if (spd.nr_pages) 3200 ret = splice_to_pipe(pipe, &spd); 3201 3202 return ret; 3203 } 3204 EXPORT_SYMBOL_GPL(skb_splice_bits); 3205 3206 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3207 { 3208 struct socket *sock = sk->sk_socket; 3209 size_t size = msg_data_left(msg); 3210 3211 if (!sock) 3212 return -EINVAL; 3213 3214 if (!sock->ops->sendmsg_locked) 3215 return sock_no_sendmsg_locked(sk, msg, size); 3216 3217 return sock->ops->sendmsg_locked(sk, msg, size); 3218 } 3219 3220 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3221 { 3222 struct socket *sock = sk->sk_socket; 3223 3224 if (!sock) 3225 return -EINVAL; 3226 return sock_sendmsg(sock, msg); 3227 } 3228 3229 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3230 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3231 int len, sendmsg_func sendmsg) 3232 { 3233 unsigned int orig_len = len; 3234 struct sk_buff *head = skb; 3235 unsigned short fragidx; 3236 int slen, ret; 3237 3238 do_frag_list: 3239 3240 /* Deal with head data */ 3241 while (offset < skb_headlen(skb) && len) { 3242 struct kvec kv; 3243 struct msghdr msg; 3244 3245 slen = min_t(int, len, skb_headlen(skb) - offset); 3246 kv.iov_base = skb->data + offset; 3247 kv.iov_len = slen; 3248 memset(&msg, 0, sizeof(msg)); 3249 msg.msg_flags = MSG_DONTWAIT; 3250 3251 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3252 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3253 sendmsg_unlocked, sk, &msg); 3254 if (ret <= 0) 3255 goto error; 3256 3257 offset += ret; 3258 len -= ret; 3259 } 3260 3261 /* All the data was skb head? */ 3262 if (!len) 3263 goto out; 3264 3265 /* Make offset relative to start of frags */ 3266 offset -= skb_headlen(skb); 3267 3268 /* Find where we are in frag list */ 3269 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3270 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3271 3272 if (offset < skb_frag_size(frag)) 3273 break; 3274 3275 offset -= skb_frag_size(frag); 3276 } 3277 3278 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3279 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3280 3281 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3282 3283 while (slen) { 3284 struct bio_vec bvec; 3285 struct msghdr msg = { 3286 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3287 }; 3288 3289 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3290 skb_frag_off(frag) + offset); 3291 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3292 slen); 3293 3294 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3295 sendmsg_unlocked, sk, &msg); 3296 if (ret <= 0) 3297 goto error; 3298 3299 len -= ret; 3300 offset += ret; 3301 slen -= ret; 3302 } 3303 3304 offset = 0; 3305 } 3306 3307 if (len) { 3308 /* Process any frag lists */ 3309 3310 if (skb == head) { 3311 if (skb_has_frag_list(skb)) { 3312 skb = skb_shinfo(skb)->frag_list; 3313 goto do_frag_list; 3314 } 3315 } else if (skb->next) { 3316 skb = skb->next; 3317 goto do_frag_list; 3318 } 3319 } 3320 3321 out: 3322 return orig_len - len; 3323 3324 error: 3325 return orig_len == len ? ret : orig_len - len; 3326 } 3327 3328 /* Send skb data on a socket. Socket must be locked. */ 3329 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3330 int len) 3331 { 3332 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3333 } 3334 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3335 3336 /* Send skb data on a socket. Socket must be unlocked. */ 3337 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3338 { 3339 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3340 } 3341 3342 /** 3343 * skb_store_bits - store bits from kernel buffer to skb 3344 * @skb: destination buffer 3345 * @offset: offset in destination 3346 * @from: source buffer 3347 * @len: number of bytes to copy 3348 * 3349 * Copy the specified number of bytes from the source buffer to the 3350 * destination skb. This function handles all the messy bits of 3351 * traversing fragment lists and such. 3352 */ 3353 3354 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3355 { 3356 int start = skb_headlen(skb); 3357 struct sk_buff *frag_iter; 3358 int i, copy; 3359 3360 if (offset > (int)skb->len - len) 3361 goto fault; 3362 3363 if ((copy = start - offset) > 0) { 3364 if (copy > len) 3365 copy = len; 3366 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3367 if ((len -= copy) == 0) 3368 return 0; 3369 offset += copy; 3370 from += copy; 3371 } 3372 3373 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3374 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3375 int end; 3376 3377 WARN_ON(start > offset + len); 3378 3379 end = start + skb_frag_size(frag); 3380 if ((copy = end - offset) > 0) { 3381 u32 p_off, p_len, copied; 3382 struct page *p; 3383 u8 *vaddr; 3384 3385 if (copy > len) 3386 copy = len; 3387 3388 skb_frag_foreach_page(frag, 3389 skb_frag_off(frag) + offset - start, 3390 copy, p, p_off, p_len, copied) { 3391 vaddr = kmap_atomic(p); 3392 memcpy(vaddr + p_off, from + copied, p_len); 3393 kunmap_atomic(vaddr); 3394 } 3395 3396 if ((len -= copy) == 0) 3397 return 0; 3398 offset += copy; 3399 from += copy; 3400 } 3401 start = end; 3402 } 3403 3404 skb_walk_frags(skb, frag_iter) { 3405 int end; 3406 3407 WARN_ON(start > offset + len); 3408 3409 end = start + frag_iter->len; 3410 if ((copy = end - offset) > 0) { 3411 if (copy > len) 3412 copy = len; 3413 if (skb_store_bits(frag_iter, offset - start, 3414 from, copy)) 3415 goto fault; 3416 if ((len -= copy) == 0) 3417 return 0; 3418 offset += copy; 3419 from += copy; 3420 } 3421 start = end; 3422 } 3423 if (!len) 3424 return 0; 3425 3426 fault: 3427 return -EFAULT; 3428 } 3429 EXPORT_SYMBOL(skb_store_bits); 3430 3431 /* Checksum skb data. */ 3432 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3433 __wsum csum, const struct skb_checksum_ops *ops) 3434 { 3435 int start = skb_headlen(skb); 3436 int i, copy = start - offset; 3437 struct sk_buff *frag_iter; 3438 int pos = 0; 3439 3440 /* Checksum header. */ 3441 if (copy > 0) { 3442 if (copy > len) 3443 copy = len; 3444 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3445 skb->data + offset, copy, csum); 3446 if ((len -= copy) == 0) 3447 return csum; 3448 offset += copy; 3449 pos = copy; 3450 } 3451 3452 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3453 int end; 3454 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3455 3456 WARN_ON(start > offset + len); 3457 3458 end = start + skb_frag_size(frag); 3459 if ((copy = end - offset) > 0) { 3460 u32 p_off, p_len, copied; 3461 struct page *p; 3462 __wsum csum2; 3463 u8 *vaddr; 3464 3465 if (copy > len) 3466 copy = len; 3467 3468 skb_frag_foreach_page(frag, 3469 skb_frag_off(frag) + offset - start, 3470 copy, p, p_off, p_len, copied) { 3471 vaddr = kmap_atomic(p); 3472 csum2 = INDIRECT_CALL_1(ops->update, 3473 csum_partial_ext, 3474 vaddr + p_off, p_len, 0); 3475 kunmap_atomic(vaddr); 3476 csum = INDIRECT_CALL_1(ops->combine, 3477 csum_block_add_ext, csum, 3478 csum2, pos, p_len); 3479 pos += p_len; 3480 } 3481 3482 if (!(len -= copy)) 3483 return csum; 3484 offset += copy; 3485 } 3486 start = end; 3487 } 3488 3489 skb_walk_frags(skb, frag_iter) { 3490 int end; 3491 3492 WARN_ON(start > offset + len); 3493 3494 end = start + frag_iter->len; 3495 if ((copy = end - offset) > 0) { 3496 __wsum csum2; 3497 if (copy > len) 3498 copy = len; 3499 csum2 = __skb_checksum(frag_iter, offset - start, 3500 copy, 0, ops); 3501 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3502 csum, csum2, pos, copy); 3503 if ((len -= copy) == 0) 3504 return csum; 3505 offset += copy; 3506 pos += copy; 3507 } 3508 start = end; 3509 } 3510 BUG_ON(len); 3511 3512 return csum; 3513 } 3514 EXPORT_SYMBOL(__skb_checksum); 3515 3516 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3517 int len, __wsum csum) 3518 { 3519 const struct skb_checksum_ops ops = { 3520 .update = csum_partial_ext, 3521 .combine = csum_block_add_ext, 3522 }; 3523 3524 return __skb_checksum(skb, offset, len, csum, &ops); 3525 } 3526 EXPORT_SYMBOL(skb_checksum); 3527 3528 /* Both of above in one bottle. */ 3529 3530 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3531 u8 *to, int len) 3532 { 3533 int start = skb_headlen(skb); 3534 int i, copy = start - offset; 3535 struct sk_buff *frag_iter; 3536 int pos = 0; 3537 __wsum csum = 0; 3538 3539 /* Copy header. */ 3540 if (copy > 0) { 3541 if (copy > len) 3542 copy = len; 3543 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3544 copy); 3545 if ((len -= copy) == 0) 3546 return csum; 3547 offset += copy; 3548 to += copy; 3549 pos = copy; 3550 } 3551 3552 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3553 int end; 3554 3555 WARN_ON(start > offset + len); 3556 3557 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3558 if ((copy = end - offset) > 0) { 3559 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3560 u32 p_off, p_len, copied; 3561 struct page *p; 3562 __wsum csum2; 3563 u8 *vaddr; 3564 3565 if (copy > len) 3566 copy = len; 3567 3568 skb_frag_foreach_page(frag, 3569 skb_frag_off(frag) + offset - start, 3570 copy, p, p_off, p_len, copied) { 3571 vaddr = kmap_atomic(p); 3572 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3573 to + copied, 3574 p_len); 3575 kunmap_atomic(vaddr); 3576 csum = csum_block_add(csum, csum2, pos); 3577 pos += p_len; 3578 } 3579 3580 if (!(len -= copy)) 3581 return csum; 3582 offset += copy; 3583 to += copy; 3584 } 3585 start = end; 3586 } 3587 3588 skb_walk_frags(skb, frag_iter) { 3589 __wsum csum2; 3590 int end; 3591 3592 WARN_ON(start > offset + len); 3593 3594 end = start + frag_iter->len; 3595 if ((copy = end - offset) > 0) { 3596 if (copy > len) 3597 copy = len; 3598 csum2 = skb_copy_and_csum_bits(frag_iter, 3599 offset - start, 3600 to, copy); 3601 csum = csum_block_add(csum, csum2, pos); 3602 if ((len -= copy) == 0) 3603 return csum; 3604 offset += copy; 3605 to += copy; 3606 pos += copy; 3607 } 3608 start = end; 3609 } 3610 BUG_ON(len); 3611 return csum; 3612 } 3613 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3614 3615 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3616 { 3617 __sum16 sum; 3618 3619 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3620 /* See comments in __skb_checksum_complete(). */ 3621 if (likely(!sum)) { 3622 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3623 !skb->csum_complete_sw) 3624 netdev_rx_csum_fault(skb->dev, skb); 3625 } 3626 if (!skb_shared(skb)) 3627 skb->csum_valid = !sum; 3628 return sum; 3629 } 3630 EXPORT_SYMBOL(__skb_checksum_complete_head); 3631 3632 /* This function assumes skb->csum already holds pseudo header's checksum, 3633 * which has been changed from the hardware checksum, for example, by 3634 * __skb_checksum_validate_complete(). And, the original skb->csum must 3635 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3636 * 3637 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3638 * zero. The new checksum is stored back into skb->csum unless the skb is 3639 * shared. 3640 */ 3641 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3642 { 3643 __wsum csum; 3644 __sum16 sum; 3645 3646 csum = skb_checksum(skb, 0, skb->len, 0); 3647 3648 sum = csum_fold(csum_add(skb->csum, csum)); 3649 /* This check is inverted, because we already knew the hardware 3650 * checksum is invalid before calling this function. So, if the 3651 * re-computed checksum is valid instead, then we have a mismatch 3652 * between the original skb->csum and skb_checksum(). This means either 3653 * the original hardware checksum is incorrect or we screw up skb->csum 3654 * when moving skb->data around. 3655 */ 3656 if (likely(!sum)) { 3657 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3658 !skb->csum_complete_sw) 3659 netdev_rx_csum_fault(skb->dev, skb); 3660 } 3661 3662 if (!skb_shared(skb)) { 3663 /* Save full packet checksum */ 3664 skb->csum = csum; 3665 skb->ip_summed = CHECKSUM_COMPLETE; 3666 skb->csum_complete_sw = 1; 3667 skb->csum_valid = !sum; 3668 } 3669 3670 return sum; 3671 } 3672 EXPORT_SYMBOL(__skb_checksum_complete); 3673 3674 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3675 { 3676 net_warn_ratelimited( 3677 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3678 __func__); 3679 return 0; 3680 } 3681 3682 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3683 int offset, int len) 3684 { 3685 net_warn_ratelimited( 3686 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3687 __func__); 3688 return 0; 3689 } 3690 3691 static const struct skb_checksum_ops default_crc32c_ops = { 3692 .update = warn_crc32c_csum_update, 3693 .combine = warn_crc32c_csum_combine, 3694 }; 3695 3696 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3697 &default_crc32c_ops; 3698 EXPORT_SYMBOL(crc32c_csum_stub); 3699 3700 /** 3701 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3702 * @from: source buffer 3703 * 3704 * Calculates the amount of linear headroom needed in the 'to' skb passed 3705 * into skb_zerocopy(). 3706 */ 3707 unsigned int 3708 skb_zerocopy_headlen(const struct sk_buff *from) 3709 { 3710 unsigned int hlen = 0; 3711 3712 if (!from->head_frag || 3713 skb_headlen(from) < L1_CACHE_BYTES || 3714 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3715 hlen = skb_headlen(from); 3716 if (!hlen) 3717 hlen = from->len; 3718 } 3719 3720 if (skb_has_frag_list(from)) 3721 hlen = from->len; 3722 3723 return hlen; 3724 } 3725 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3726 3727 /** 3728 * skb_zerocopy - Zero copy skb to skb 3729 * @to: destination buffer 3730 * @from: source buffer 3731 * @len: number of bytes to copy from source buffer 3732 * @hlen: size of linear headroom in destination buffer 3733 * 3734 * Copies up to `len` bytes from `from` to `to` by creating references 3735 * to the frags in the source buffer. 3736 * 3737 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3738 * headroom in the `to` buffer. 3739 * 3740 * Return value: 3741 * 0: everything is OK 3742 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3743 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3744 */ 3745 int 3746 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3747 { 3748 int i, j = 0; 3749 int plen = 0; /* length of skb->head fragment */ 3750 int ret; 3751 struct page *page; 3752 unsigned int offset; 3753 3754 BUG_ON(!from->head_frag && !hlen); 3755 3756 /* dont bother with small payloads */ 3757 if (len <= skb_tailroom(to)) 3758 return skb_copy_bits(from, 0, skb_put(to, len), len); 3759 3760 if (hlen) { 3761 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3762 if (unlikely(ret)) 3763 return ret; 3764 len -= hlen; 3765 } else { 3766 plen = min_t(int, skb_headlen(from), len); 3767 if (plen) { 3768 page = virt_to_head_page(from->head); 3769 offset = from->data - (unsigned char *)page_address(page); 3770 __skb_fill_netmem_desc(to, 0, page_to_netmem(page), 3771 offset, plen); 3772 get_page(page); 3773 j = 1; 3774 len -= plen; 3775 } 3776 } 3777 3778 skb_len_add(to, len + plen); 3779 3780 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3781 skb_tx_error(from); 3782 return -ENOMEM; 3783 } 3784 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3785 3786 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3787 int size; 3788 3789 if (!len) 3790 break; 3791 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3792 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3793 len); 3794 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3795 len -= size; 3796 skb_frag_ref(to, j); 3797 j++; 3798 } 3799 skb_shinfo(to)->nr_frags = j; 3800 3801 return 0; 3802 } 3803 EXPORT_SYMBOL_GPL(skb_zerocopy); 3804 3805 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3806 { 3807 __wsum csum; 3808 long csstart; 3809 3810 if (skb->ip_summed == CHECKSUM_PARTIAL) 3811 csstart = skb_checksum_start_offset(skb); 3812 else 3813 csstart = skb_headlen(skb); 3814 3815 BUG_ON(csstart > skb_headlen(skb)); 3816 3817 skb_copy_from_linear_data(skb, to, csstart); 3818 3819 csum = 0; 3820 if (csstart != skb->len) 3821 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3822 skb->len - csstart); 3823 3824 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3825 long csstuff = csstart + skb->csum_offset; 3826 3827 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3828 } 3829 } 3830 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3831 3832 /** 3833 * skb_dequeue - remove from the head of the queue 3834 * @list: list to dequeue from 3835 * 3836 * Remove the head of the list. The list lock is taken so the function 3837 * may be used safely with other locking list functions. The head item is 3838 * returned or %NULL if the list is empty. 3839 */ 3840 3841 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3842 { 3843 unsigned long flags; 3844 struct sk_buff *result; 3845 3846 spin_lock_irqsave(&list->lock, flags); 3847 result = __skb_dequeue(list); 3848 spin_unlock_irqrestore(&list->lock, flags); 3849 return result; 3850 } 3851 EXPORT_SYMBOL(skb_dequeue); 3852 3853 /** 3854 * skb_dequeue_tail - remove from the tail of the queue 3855 * @list: list to dequeue from 3856 * 3857 * Remove the tail of the list. The list lock is taken so the function 3858 * may be used safely with other locking list functions. The tail item is 3859 * returned or %NULL if the list is empty. 3860 */ 3861 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3862 { 3863 unsigned long flags; 3864 struct sk_buff *result; 3865 3866 spin_lock_irqsave(&list->lock, flags); 3867 result = __skb_dequeue_tail(list); 3868 spin_unlock_irqrestore(&list->lock, flags); 3869 return result; 3870 } 3871 EXPORT_SYMBOL(skb_dequeue_tail); 3872 3873 /** 3874 * skb_queue_purge_reason - empty a list 3875 * @list: list to empty 3876 * @reason: drop reason 3877 * 3878 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3879 * the list and one reference dropped. This function takes the list 3880 * lock and is atomic with respect to other list locking functions. 3881 */ 3882 void skb_queue_purge_reason(struct sk_buff_head *list, 3883 enum skb_drop_reason reason) 3884 { 3885 struct sk_buff_head tmp; 3886 unsigned long flags; 3887 3888 if (skb_queue_empty_lockless(list)) 3889 return; 3890 3891 __skb_queue_head_init(&tmp); 3892 3893 spin_lock_irqsave(&list->lock, flags); 3894 skb_queue_splice_init(list, &tmp); 3895 spin_unlock_irqrestore(&list->lock, flags); 3896 3897 __skb_queue_purge_reason(&tmp, reason); 3898 } 3899 EXPORT_SYMBOL(skb_queue_purge_reason); 3900 3901 /** 3902 * skb_rbtree_purge - empty a skb rbtree 3903 * @root: root of the rbtree to empty 3904 * Return value: the sum of truesizes of all purged skbs. 3905 * 3906 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3907 * the list and one reference dropped. This function does not take 3908 * any lock. Synchronization should be handled by the caller (e.g., TCP 3909 * out-of-order queue is protected by the socket lock). 3910 */ 3911 unsigned int skb_rbtree_purge(struct rb_root *root) 3912 { 3913 struct rb_node *p = rb_first(root); 3914 unsigned int sum = 0; 3915 3916 while (p) { 3917 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3918 3919 p = rb_next(p); 3920 rb_erase(&skb->rbnode, root); 3921 sum += skb->truesize; 3922 kfree_skb(skb); 3923 } 3924 return sum; 3925 } 3926 3927 void skb_errqueue_purge(struct sk_buff_head *list) 3928 { 3929 struct sk_buff *skb, *next; 3930 struct sk_buff_head kill; 3931 unsigned long flags; 3932 3933 __skb_queue_head_init(&kill); 3934 3935 spin_lock_irqsave(&list->lock, flags); 3936 skb_queue_walk_safe(list, skb, next) { 3937 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3938 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3939 continue; 3940 __skb_unlink(skb, list); 3941 __skb_queue_tail(&kill, skb); 3942 } 3943 spin_unlock_irqrestore(&list->lock, flags); 3944 __skb_queue_purge(&kill); 3945 } 3946 EXPORT_SYMBOL(skb_errqueue_purge); 3947 3948 /** 3949 * skb_queue_head - queue a buffer at the list head 3950 * @list: list to use 3951 * @newsk: buffer to queue 3952 * 3953 * Queue a buffer at the start of the list. This function takes the 3954 * list lock and can be used safely with other locking &sk_buff functions 3955 * safely. 3956 * 3957 * A buffer cannot be placed on two lists at the same time. 3958 */ 3959 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3960 { 3961 unsigned long flags; 3962 3963 spin_lock_irqsave(&list->lock, flags); 3964 __skb_queue_head(list, newsk); 3965 spin_unlock_irqrestore(&list->lock, flags); 3966 } 3967 EXPORT_SYMBOL(skb_queue_head); 3968 3969 /** 3970 * skb_queue_tail - queue a buffer at the list tail 3971 * @list: list to use 3972 * @newsk: buffer to queue 3973 * 3974 * Queue a buffer at the tail of the list. This function takes the 3975 * list lock and can be used safely with other locking &sk_buff functions 3976 * safely. 3977 * 3978 * A buffer cannot be placed on two lists at the same time. 3979 */ 3980 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3981 { 3982 unsigned long flags; 3983 3984 spin_lock_irqsave(&list->lock, flags); 3985 __skb_queue_tail(list, newsk); 3986 spin_unlock_irqrestore(&list->lock, flags); 3987 } 3988 EXPORT_SYMBOL(skb_queue_tail); 3989 3990 /** 3991 * skb_unlink - remove a buffer from a list 3992 * @skb: buffer to remove 3993 * @list: list to use 3994 * 3995 * Remove a packet from a list. The list locks are taken and this 3996 * function is atomic with respect to other list locked calls 3997 * 3998 * You must know what list the SKB is on. 3999 */ 4000 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 4001 { 4002 unsigned long flags; 4003 4004 spin_lock_irqsave(&list->lock, flags); 4005 __skb_unlink(skb, list); 4006 spin_unlock_irqrestore(&list->lock, flags); 4007 } 4008 EXPORT_SYMBOL(skb_unlink); 4009 4010 /** 4011 * skb_append - append a buffer 4012 * @old: buffer to insert after 4013 * @newsk: buffer to insert 4014 * @list: list to use 4015 * 4016 * Place a packet after a given packet in a list. The list locks are taken 4017 * and this function is atomic with respect to other list locked calls. 4018 * A buffer cannot be placed on two lists at the same time. 4019 */ 4020 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 4021 { 4022 unsigned long flags; 4023 4024 spin_lock_irqsave(&list->lock, flags); 4025 __skb_queue_after(list, old, newsk); 4026 spin_unlock_irqrestore(&list->lock, flags); 4027 } 4028 EXPORT_SYMBOL(skb_append); 4029 4030 static inline void skb_split_inside_header(struct sk_buff *skb, 4031 struct sk_buff* skb1, 4032 const u32 len, const int pos) 4033 { 4034 int i; 4035 4036 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 4037 pos - len); 4038 /* And move data appendix as is. */ 4039 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4040 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 4041 4042 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 4043 skb_shinfo(skb)->nr_frags = 0; 4044 skb1->data_len = skb->data_len; 4045 skb1->len += skb1->data_len; 4046 skb->data_len = 0; 4047 skb->len = len; 4048 skb_set_tail_pointer(skb, len); 4049 } 4050 4051 static inline void skb_split_no_header(struct sk_buff *skb, 4052 struct sk_buff* skb1, 4053 const u32 len, int pos) 4054 { 4055 int i, k = 0; 4056 const int nfrags = skb_shinfo(skb)->nr_frags; 4057 4058 skb_shinfo(skb)->nr_frags = 0; 4059 skb1->len = skb1->data_len = skb->len - len; 4060 skb->len = len; 4061 skb->data_len = len - pos; 4062 4063 for (i = 0; i < nfrags; i++) { 4064 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4065 4066 if (pos + size > len) { 4067 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 4068 4069 if (pos < len) { 4070 /* Split frag. 4071 * We have two variants in this case: 4072 * 1. Move all the frag to the second 4073 * part, if it is possible. F.e. 4074 * this approach is mandatory for TUX, 4075 * where splitting is expensive. 4076 * 2. Split is accurately. We make this. 4077 */ 4078 skb_frag_ref(skb, i); 4079 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 4080 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 4081 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 4082 skb_shinfo(skb)->nr_frags++; 4083 } 4084 k++; 4085 } else 4086 skb_shinfo(skb)->nr_frags++; 4087 pos += size; 4088 } 4089 skb_shinfo(skb1)->nr_frags = k; 4090 } 4091 4092 /** 4093 * skb_split - Split fragmented skb to two parts at length len. 4094 * @skb: the buffer to split 4095 * @skb1: the buffer to receive the second part 4096 * @len: new length for skb 4097 */ 4098 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 4099 { 4100 int pos = skb_headlen(skb); 4101 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 4102 4103 skb_zcopy_downgrade_managed(skb); 4104 4105 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 4106 skb_zerocopy_clone(skb1, skb, 0); 4107 if (len < pos) /* Split line is inside header. */ 4108 skb_split_inside_header(skb, skb1, len, pos); 4109 else /* Second chunk has no header, nothing to copy. */ 4110 skb_split_no_header(skb, skb1, len, pos); 4111 } 4112 EXPORT_SYMBOL(skb_split); 4113 4114 /* Shifting from/to a cloned skb is a no-go. 4115 * 4116 * Caller cannot keep skb_shinfo related pointers past calling here! 4117 */ 4118 static int skb_prepare_for_shift(struct sk_buff *skb) 4119 { 4120 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 4121 } 4122 4123 /** 4124 * skb_shift - Shifts paged data partially from skb to another 4125 * @tgt: buffer into which tail data gets added 4126 * @skb: buffer from which the paged data comes from 4127 * @shiftlen: shift up to this many bytes 4128 * 4129 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4130 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4131 * It's up to caller to free skb if everything was shifted. 4132 * 4133 * If @tgt runs out of frags, the whole operation is aborted. 4134 * 4135 * Skb cannot include anything else but paged data while tgt is allowed 4136 * to have non-paged data as well. 4137 * 4138 * TODO: full sized shift could be optimized but that would need 4139 * specialized skb free'er to handle frags without up-to-date nr_frags. 4140 */ 4141 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4142 { 4143 int from, to, merge, todo; 4144 skb_frag_t *fragfrom, *fragto; 4145 4146 BUG_ON(shiftlen > skb->len); 4147 4148 if (skb_headlen(skb)) 4149 return 0; 4150 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4151 return 0; 4152 4153 todo = shiftlen; 4154 from = 0; 4155 to = skb_shinfo(tgt)->nr_frags; 4156 fragfrom = &skb_shinfo(skb)->frags[from]; 4157 4158 /* Actual merge is delayed until the point when we know we can 4159 * commit all, so that we don't have to undo partial changes 4160 */ 4161 if (!to || 4162 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4163 skb_frag_off(fragfrom))) { 4164 merge = -1; 4165 } else { 4166 merge = to - 1; 4167 4168 todo -= skb_frag_size(fragfrom); 4169 if (todo < 0) { 4170 if (skb_prepare_for_shift(skb) || 4171 skb_prepare_for_shift(tgt)) 4172 return 0; 4173 4174 /* All previous frag pointers might be stale! */ 4175 fragfrom = &skb_shinfo(skb)->frags[from]; 4176 fragto = &skb_shinfo(tgt)->frags[merge]; 4177 4178 skb_frag_size_add(fragto, shiftlen); 4179 skb_frag_size_sub(fragfrom, shiftlen); 4180 skb_frag_off_add(fragfrom, shiftlen); 4181 4182 goto onlymerged; 4183 } 4184 4185 from++; 4186 } 4187 4188 /* Skip full, not-fitting skb to avoid expensive operations */ 4189 if ((shiftlen == skb->len) && 4190 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4191 return 0; 4192 4193 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4194 return 0; 4195 4196 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4197 if (to == MAX_SKB_FRAGS) 4198 return 0; 4199 4200 fragfrom = &skb_shinfo(skb)->frags[from]; 4201 fragto = &skb_shinfo(tgt)->frags[to]; 4202 4203 if (todo >= skb_frag_size(fragfrom)) { 4204 *fragto = *fragfrom; 4205 todo -= skb_frag_size(fragfrom); 4206 from++; 4207 to++; 4208 4209 } else { 4210 __skb_frag_ref(fragfrom); 4211 skb_frag_page_copy(fragto, fragfrom); 4212 skb_frag_off_copy(fragto, fragfrom); 4213 skb_frag_size_set(fragto, todo); 4214 4215 skb_frag_off_add(fragfrom, todo); 4216 skb_frag_size_sub(fragfrom, todo); 4217 todo = 0; 4218 4219 to++; 4220 break; 4221 } 4222 } 4223 4224 /* Ready to "commit" this state change to tgt */ 4225 skb_shinfo(tgt)->nr_frags = to; 4226 4227 if (merge >= 0) { 4228 fragfrom = &skb_shinfo(skb)->frags[0]; 4229 fragto = &skb_shinfo(tgt)->frags[merge]; 4230 4231 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4232 __skb_frag_unref(fragfrom, skb->pp_recycle); 4233 } 4234 4235 /* Reposition in the original skb */ 4236 to = 0; 4237 while (from < skb_shinfo(skb)->nr_frags) 4238 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4239 skb_shinfo(skb)->nr_frags = to; 4240 4241 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4242 4243 onlymerged: 4244 /* Most likely the tgt won't ever need its checksum anymore, skb on 4245 * the other hand might need it if it needs to be resent 4246 */ 4247 tgt->ip_summed = CHECKSUM_PARTIAL; 4248 skb->ip_summed = CHECKSUM_PARTIAL; 4249 4250 skb_len_add(skb, -shiftlen); 4251 skb_len_add(tgt, shiftlen); 4252 4253 return shiftlen; 4254 } 4255 4256 /** 4257 * skb_prepare_seq_read - Prepare a sequential read of skb data 4258 * @skb: the buffer to read 4259 * @from: lower offset of data to be read 4260 * @to: upper offset of data to be read 4261 * @st: state variable 4262 * 4263 * Initializes the specified state variable. Must be called before 4264 * invoking skb_seq_read() for the first time. 4265 */ 4266 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4267 unsigned int to, struct skb_seq_state *st) 4268 { 4269 st->lower_offset = from; 4270 st->upper_offset = to; 4271 st->root_skb = st->cur_skb = skb; 4272 st->frag_idx = st->stepped_offset = 0; 4273 st->frag_data = NULL; 4274 st->frag_off = 0; 4275 } 4276 EXPORT_SYMBOL(skb_prepare_seq_read); 4277 4278 /** 4279 * skb_seq_read - Sequentially read skb data 4280 * @consumed: number of bytes consumed by the caller so far 4281 * @data: destination pointer for data to be returned 4282 * @st: state variable 4283 * 4284 * Reads a block of skb data at @consumed relative to the 4285 * lower offset specified to skb_prepare_seq_read(). Assigns 4286 * the head of the data block to @data and returns the length 4287 * of the block or 0 if the end of the skb data or the upper 4288 * offset has been reached. 4289 * 4290 * The caller is not required to consume all of the data 4291 * returned, i.e. @consumed is typically set to the number 4292 * of bytes already consumed and the next call to 4293 * skb_seq_read() will return the remaining part of the block. 4294 * 4295 * Note 1: The size of each block of data returned can be arbitrary, 4296 * this limitation is the cost for zerocopy sequential 4297 * reads of potentially non linear data. 4298 * 4299 * Note 2: Fragment lists within fragments are not implemented 4300 * at the moment, state->root_skb could be replaced with 4301 * a stack for this purpose. 4302 */ 4303 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4304 struct skb_seq_state *st) 4305 { 4306 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4307 skb_frag_t *frag; 4308 4309 if (unlikely(abs_offset >= st->upper_offset)) { 4310 if (st->frag_data) { 4311 kunmap_atomic(st->frag_data); 4312 st->frag_data = NULL; 4313 } 4314 return 0; 4315 } 4316 4317 next_skb: 4318 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4319 4320 if (abs_offset < block_limit && !st->frag_data) { 4321 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4322 return block_limit - abs_offset; 4323 } 4324 4325 if (st->frag_idx == 0 && !st->frag_data) 4326 st->stepped_offset += skb_headlen(st->cur_skb); 4327 4328 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4329 unsigned int pg_idx, pg_off, pg_sz; 4330 4331 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4332 4333 pg_idx = 0; 4334 pg_off = skb_frag_off(frag); 4335 pg_sz = skb_frag_size(frag); 4336 4337 if (skb_frag_must_loop(skb_frag_page(frag))) { 4338 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4339 pg_off = offset_in_page(pg_off + st->frag_off); 4340 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4341 PAGE_SIZE - pg_off); 4342 } 4343 4344 block_limit = pg_sz + st->stepped_offset; 4345 if (abs_offset < block_limit) { 4346 if (!st->frag_data) 4347 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4348 4349 *data = (u8 *)st->frag_data + pg_off + 4350 (abs_offset - st->stepped_offset); 4351 4352 return block_limit - abs_offset; 4353 } 4354 4355 if (st->frag_data) { 4356 kunmap_atomic(st->frag_data); 4357 st->frag_data = NULL; 4358 } 4359 4360 st->stepped_offset += pg_sz; 4361 st->frag_off += pg_sz; 4362 if (st->frag_off == skb_frag_size(frag)) { 4363 st->frag_off = 0; 4364 st->frag_idx++; 4365 } 4366 } 4367 4368 if (st->frag_data) { 4369 kunmap_atomic(st->frag_data); 4370 st->frag_data = NULL; 4371 } 4372 4373 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4374 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4375 st->frag_idx = 0; 4376 goto next_skb; 4377 } else if (st->cur_skb->next) { 4378 st->cur_skb = st->cur_skb->next; 4379 st->frag_idx = 0; 4380 goto next_skb; 4381 } 4382 4383 return 0; 4384 } 4385 EXPORT_SYMBOL(skb_seq_read); 4386 4387 /** 4388 * skb_abort_seq_read - Abort a sequential read of skb data 4389 * @st: state variable 4390 * 4391 * Must be called if skb_seq_read() was not called until it 4392 * returned 0. 4393 */ 4394 void skb_abort_seq_read(struct skb_seq_state *st) 4395 { 4396 if (st->frag_data) 4397 kunmap_atomic(st->frag_data); 4398 } 4399 EXPORT_SYMBOL(skb_abort_seq_read); 4400 4401 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4402 4403 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4404 struct ts_config *conf, 4405 struct ts_state *state) 4406 { 4407 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4408 } 4409 4410 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4411 { 4412 skb_abort_seq_read(TS_SKB_CB(state)); 4413 } 4414 4415 /** 4416 * skb_find_text - Find a text pattern in skb data 4417 * @skb: the buffer to look in 4418 * @from: search offset 4419 * @to: search limit 4420 * @config: textsearch configuration 4421 * 4422 * Finds a pattern in the skb data according to the specified 4423 * textsearch configuration. Use textsearch_next() to retrieve 4424 * subsequent occurrences of the pattern. Returns the offset 4425 * to the first occurrence or UINT_MAX if no match was found. 4426 */ 4427 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4428 unsigned int to, struct ts_config *config) 4429 { 4430 unsigned int patlen = config->ops->get_pattern_len(config); 4431 struct ts_state state; 4432 unsigned int ret; 4433 4434 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4435 4436 config->get_next_block = skb_ts_get_next_block; 4437 config->finish = skb_ts_finish; 4438 4439 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4440 4441 ret = textsearch_find(config, &state); 4442 return (ret + patlen <= to - from ? ret : UINT_MAX); 4443 } 4444 EXPORT_SYMBOL(skb_find_text); 4445 4446 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4447 int offset, size_t size, size_t max_frags) 4448 { 4449 int i = skb_shinfo(skb)->nr_frags; 4450 4451 if (skb_can_coalesce(skb, i, page, offset)) { 4452 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4453 } else if (i < max_frags) { 4454 skb_zcopy_downgrade_managed(skb); 4455 get_page(page); 4456 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4457 } else { 4458 return -EMSGSIZE; 4459 } 4460 4461 return 0; 4462 } 4463 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4464 4465 /** 4466 * skb_pull_rcsum - pull skb and update receive checksum 4467 * @skb: buffer to update 4468 * @len: length of data pulled 4469 * 4470 * This function performs an skb_pull on the packet and updates 4471 * the CHECKSUM_COMPLETE checksum. It should be used on 4472 * receive path processing instead of skb_pull unless you know 4473 * that the checksum difference is zero (e.g., a valid IP header) 4474 * or you are setting ip_summed to CHECKSUM_NONE. 4475 */ 4476 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4477 { 4478 unsigned char *data = skb->data; 4479 4480 BUG_ON(len > skb->len); 4481 __skb_pull(skb, len); 4482 skb_postpull_rcsum(skb, data, len); 4483 return skb->data; 4484 } 4485 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4486 4487 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4488 { 4489 skb_frag_t head_frag; 4490 struct page *page; 4491 4492 page = virt_to_head_page(frag_skb->head); 4493 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4494 (unsigned char *)page_address(page), 4495 skb_headlen(frag_skb)); 4496 return head_frag; 4497 } 4498 4499 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4500 netdev_features_t features, 4501 unsigned int offset) 4502 { 4503 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4504 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4505 unsigned int delta_truesize = 0; 4506 unsigned int delta_len = 0; 4507 struct sk_buff *tail = NULL; 4508 struct sk_buff *nskb, *tmp; 4509 int len_diff, err; 4510 4511 skb_push(skb, -skb_network_offset(skb) + offset); 4512 4513 /* Ensure the head is writeable before touching the shared info */ 4514 err = skb_unclone(skb, GFP_ATOMIC); 4515 if (err) 4516 goto err_linearize; 4517 4518 skb_shinfo(skb)->frag_list = NULL; 4519 4520 while (list_skb) { 4521 nskb = list_skb; 4522 list_skb = list_skb->next; 4523 4524 err = 0; 4525 delta_truesize += nskb->truesize; 4526 if (skb_shared(nskb)) { 4527 tmp = skb_clone(nskb, GFP_ATOMIC); 4528 if (tmp) { 4529 consume_skb(nskb); 4530 nskb = tmp; 4531 err = skb_unclone(nskb, GFP_ATOMIC); 4532 } else { 4533 err = -ENOMEM; 4534 } 4535 } 4536 4537 if (!tail) 4538 skb->next = nskb; 4539 else 4540 tail->next = nskb; 4541 4542 if (unlikely(err)) { 4543 nskb->next = list_skb; 4544 goto err_linearize; 4545 } 4546 4547 tail = nskb; 4548 4549 delta_len += nskb->len; 4550 4551 skb_push(nskb, -skb_network_offset(nskb) + offset); 4552 4553 skb_release_head_state(nskb); 4554 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4555 __copy_skb_header(nskb, skb); 4556 4557 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4558 nskb->transport_header += len_diff; 4559 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4560 nskb->data - tnl_hlen, 4561 offset + tnl_hlen); 4562 4563 if (skb_needs_linearize(nskb, features) && 4564 __skb_linearize(nskb)) 4565 goto err_linearize; 4566 } 4567 4568 skb->truesize = skb->truesize - delta_truesize; 4569 skb->data_len = skb->data_len - delta_len; 4570 skb->len = skb->len - delta_len; 4571 4572 skb_gso_reset(skb); 4573 4574 skb->prev = tail; 4575 4576 if (skb_needs_linearize(skb, features) && 4577 __skb_linearize(skb)) 4578 goto err_linearize; 4579 4580 skb_get(skb); 4581 4582 return skb; 4583 4584 err_linearize: 4585 kfree_skb_list(skb->next); 4586 skb->next = NULL; 4587 return ERR_PTR(-ENOMEM); 4588 } 4589 EXPORT_SYMBOL_GPL(skb_segment_list); 4590 4591 /** 4592 * skb_segment - Perform protocol segmentation on skb. 4593 * @head_skb: buffer to segment 4594 * @features: features for the output path (see dev->features) 4595 * 4596 * This function performs segmentation on the given skb. It returns 4597 * a pointer to the first in a list of new skbs for the segments. 4598 * In case of error it returns ERR_PTR(err). 4599 */ 4600 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4601 netdev_features_t features) 4602 { 4603 struct sk_buff *segs = NULL; 4604 struct sk_buff *tail = NULL; 4605 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4606 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4607 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4608 unsigned int offset = doffset; 4609 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4610 unsigned int partial_segs = 0; 4611 unsigned int headroom; 4612 unsigned int len = head_skb->len; 4613 struct sk_buff *frag_skb; 4614 skb_frag_t *frag; 4615 __be16 proto; 4616 bool csum, sg; 4617 int err = -ENOMEM; 4618 int i = 0; 4619 int nfrags, pos; 4620 4621 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4622 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4623 struct sk_buff *check_skb; 4624 4625 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4626 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4627 /* gso_size is untrusted, and we have a frag_list with 4628 * a linear non head_frag item. 4629 * 4630 * If head_skb's headlen does not fit requested gso_size, 4631 * it means that the frag_list members do NOT terminate 4632 * on exact gso_size boundaries. Hence we cannot perform 4633 * skb_frag_t page sharing. Therefore we must fallback to 4634 * copying the frag_list skbs; we do so by disabling SG. 4635 */ 4636 features &= ~NETIF_F_SG; 4637 break; 4638 } 4639 } 4640 } 4641 4642 __skb_push(head_skb, doffset); 4643 proto = skb_network_protocol(head_skb, NULL); 4644 if (unlikely(!proto)) 4645 return ERR_PTR(-EINVAL); 4646 4647 sg = !!(features & NETIF_F_SG); 4648 csum = !!can_checksum_protocol(features, proto); 4649 4650 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4651 if (!(features & NETIF_F_GSO_PARTIAL)) { 4652 struct sk_buff *iter; 4653 unsigned int frag_len; 4654 4655 if (!list_skb || 4656 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4657 goto normal; 4658 4659 /* If we get here then all the required 4660 * GSO features except frag_list are supported. 4661 * Try to split the SKB to multiple GSO SKBs 4662 * with no frag_list. 4663 * Currently we can do that only when the buffers don't 4664 * have a linear part and all the buffers except 4665 * the last are of the same length. 4666 */ 4667 frag_len = list_skb->len; 4668 skb_walk_frags(head_skb, iter) { 4669 if (frag_len != iter->len && iter->next) 4670 goto normal; 4671 if (skb_headlen(iter) && !iter->head_frag) 4672 goto normal; 4673 4674 len -= iter->len; 4675 } 4676 4677 if (len != frag_len) 4678 goto normal; 4679 } 4680 4681 /* GSO partial only requires that we trim off any excess that 4682 * doesn't fit into an MSS sized block, so take care of that 4683 * now. 4684 * Cap len to not accidentally hit GSO_BY_FRAGS. 4685 */ 4686 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4687 if (partial_segs > 1) 4688 mss *= partial_segs; 4689 else 4690 partial_segs = 0; 4691 } 4692 4693 normal: 4694 headroom = skb_headroom(head_skb); 4695 pos = skb_headlen(head_skb); 4696 4697 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4698 return ERR_PTR(-ENOMEM); 4699 4700 nfrags = skb_shinfo(head_skb)->nr_frags; 4701 frag = skb_shinfo(head_skb)->frags; 4702 frag_skb = head_skb; 4703 4704 do { 4705 struct sk_buff *nskb; 4706 skb_frag_t *nskb_frag; 4707 int hsize; 4708 int size; 4709 4710 if (unlikely(mss == GSO_BY_FRAGS)) { 4711 len = list_skb->len; 4712 } else { 4713 len = head_skb->len - offset; 4714 if (len > mss) 4715 len = mss; 4716 } 4717 4718 hsize = skb_headlen(head_skb) - offset; 4719 4720 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4721 (skb_headlen(list_skb) == len || sg)) { 4722 BUG_ON(skb_headlen(list_skb) > len); 4723 4724 nskb = skb_clone(list_skb, GFP_ATOMIC); 4725 if (unlikely(!nskb)) 4726 goto err; 4727 4728 i = 0; 4729 nfrags = skb_shinfo(list_skb)->nr_frags; 4730 frag = skb_shinfo(list_skb)->frags; 4731 frag_skb = list_skb; 4732 pos += skb_headlen(list_skb); 4733 4734 while (pos < offset + len) { 4735 BUG_ON(i >= nfrags); 4736 4737 size = skb_frag_size(frag); 4738 if (pos + size > offset + len) 4739 break; 4740 4741 i++; 4742 pos += size; 4743 frag++; 4744 } 4745 4746 list_skb = list_skb->next; 4747 4748 if (unlikely(pskb_trim(nskb, len))) { 4749 kfree_skb(nskb); 4750 goto err; 4751 } 4752 4753 hsize = skb_end_offset(nskb); 4754 if (skb_cow_head(nskb, doffset + headroom)) { 4755 kfree_skb(nskb); 4756 goto err; 4757 } 4758 4759 nskb->truesize += skb_end_offset(nskb) - hsize; 4760 skb_release_head_state(nskb); 4761 __skb_push(nskb, doffset); 4762 } else { 4763 if (hsize < 0) 4764 hsize = 0; 4765 if (hsize > len || !sg) 4766 hsize = len; 4767 4768 nskb = __alloc_skb(hsize + doffset + headroom, 4769 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4770 NUMA_NO_NODE); 4771 4772 if (unlikely(!nskb)) 4773 goto err; 4774 4775 skb_reserve(nskb, headroom); 4776 __skb_put(nskb, doffset); 4777 } 4778 4779 if (segs) 4780 tail->next = nskb; 4781 else 4782 segs = nskb; 4783 tail = nskb; 4784 4785 __copy_skb_header(nskb, head_skb); 4786 4787 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4788 skb_reset_mac_len(nskb); 4789 4790 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4791 nskb->data - tnl_hlen, 4792 doffset + tnl_hlen); 4793 4794 if (nskb->len == len + doffset) 4795 goto perform_csum_check; 4796 4797 if (!sg) { 4798 if (!csum) { 4799 if (!nskb->remcsum_offload) 4800 nskb->ip_summed = CHECKSUM_NONE; 4801 SKB_GSO_CB(nskb)->csum = 4802 skb_copy_and_csum_bits(head_skb, offset, 4803 skb_put(nskb, 4804 len), 4805 len); 4806 SKB_GSO_CB(nskb)->csum_start = 4807 skb_headroom(nskb) + doffset; 4808 } else { 4809 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4810 goto err; 4811 } 4812 continue; 4813 } 4814 4815 nskb_frag = skb_shinfo(nskb)->frags; 4816 4817 skb_copy_from_linear_data_offset(head_skb, offset, 4818 skb_put(nskb, hsize), hsize); 4819 4820 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4821 SKBFL_SHARED_FRAG; 4822 4823 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4824 goto err; 4825 4826 while (pos < offset + len) { 4827 if (i >= nfrags) { 4828 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4829 skb_zerocopy_clone(nskb, list_skb, 4830 GFP_ATOMIC)) 4831 goto err; 4832 4833 i = 0; 4834 nfrags = skb_shinfo(list_skb)->nr_frags; 4835 frag = skb_shinfo(list_skb)->frags; 4836 frag_skb = list_skb; 4837 if (!skb_headlen(list_skb)) { 4838 BUG_ON(!nfrags); 4839 } else { 4840 BUG_ON(!list_skb->head_frag); 4841 4842 /* to make room for head_frag. */ 4843 i--; 4844 frag--; 4845 } 4846 4847 list_skb = list_skb->next; 4848 } 4849 4850 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4851 MAX_SKB_FRAGS)) { 4852 net_warn_ratelimited( 4853 "skb_segment: too many frags: %u %u\n", 4854 pos, mss); 4855 err = -EINVAL; 4856 goto err; 4857 } 4858 4859 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4860 __skb_frag_ref(nskb_frag); 4861 size = skb_frag_size(nskb_frag); 4862 4863 if (pos < offset) { 4864 skb_frag_off_add(nskb_frag, offset - pos); 4865 skb_frag_size_sub(nskb_frag, offset - pos); 4866 } 4867 4868 skb_shinfo(nskb)->nr_frags++; 4869 4870 if (pos + size <= offset + len) { 4871 i++; 4872 frag++; 4873 pos += size; 4874 } else { 4875 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4876 goto skip_fraglist; 4877 } 4878 4879 nskb_frag++; 4880 } 4881 4882 skip_fraglist: 4883 nskb->data_len = len - hsize; 4884 nskb->len += nskb->data_len; 4885 nskb->truesize += nskb->data_len; 4886 4887 perform_csum_check: 4888 if (!csum) { 4889 if (skb_has_shared_frag(nskb) && 4890 __skb_linearize(nskb)) 4891 goto err; 4892 4893 if (!nskb->remcsum_offload) 4894 nskb->ip_summed = CHECKSUM_NONE; 4895 SKB_GSO_CB(nskb)->csum = 4896 skb_checksum(nskb, doffset, 4897 nskb->len - doffset, 0); 4898 SKB_GSO_CB(nskb)->csum_start = 4899 skb_headroom(nskb) + doffset; 4900 } 4901 } while ((offset += len) < head_skb->len); 4902 4903 /* Some callers want to get the end of the list. 4904 * Put it in segs->prev to avoid walking the list. 4905 * (see validate_xmit_skb_list() for example) 4906 */ 4907 segs->prev = tail; 4908 4909 if (partial_segs) { 4910 struct sk_buff *iter; 4911 int type = skb_shinfo(head_skb)->gso_type; 4912 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4913 4914 /* Update type to add partial and then remove dodgy if set */ 4915 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4916 type &= ~SKB_GSO_DODGY; 4917 4918 /* Update GSO info and prepare to start updating headers on 4919 * our way back down the stack of protocols. 4920 */ 4921 for (iter = segs; iter; iter = iter->next) { 4922 skb_shinfo(iter)->gso_size = gso_size; 4923 skb_shinfo(iter)->gso_segs = partial_segs; 4924 skb_shinfo(iter)->gso_type = type; 4925 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4926 } 4927 4928 if (tail->len - doffset <= gso_size) 4929 skb_shinfo(tail)->gso_size = 0; 4930 else if (tail != segs) 4931 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4932 } 4933 4934 /* Following permits correct backpressure, for protocols 4935 * using skb_set_owner_w(). 4936 * Idea is to tranfert ownership from head_skb to last segment. 4937 */ 4938 if (head_skb->destructor == sock_wfree) { 4939 swap(tail->truesize, head_skb->truesize); 4940 swap(tail->destructor, head_skb->destructor); 4941 swap(tail->sk, head_skb->sk); 4942 } 4943 return segs; 4944 4945 err: 4946 kfree_skb_list(segs); 4947 return ERR_PTR(err); 4948 } 4949 EXPORT_SYMBOL_GPL(skb_segment); 4950 4951 #ifdef CONFIG_SKB_EXTENSIONS 4952 #define SKB_EXT_ALIGN_VALUE 8 4953 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4954 4955 static const u8 skb_ext_type_len[] = { 4956 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4957 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4958 #endif 4959 #ifdef CONFIG_XFRM 4960 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4961 #endif 4962 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4963 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4964 #endif 4965 #if IS_ENABLED(CONFIG_MPTCP) 4966 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4967 #endif 4968 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 4969 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 4970 #endif 4971 }; 4972 4973 static __always_inline unsigned int skb_ext_total_length(void) 4974 { 4975 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 4976 int i; 4977 4978 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 4979 l += skb_ext_type_len[i]; 4980 4981 return l; 4982 } 4983 4984 static void skb_extensions_init(void) 4985 { 4986 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4987 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 4988 BUILD_BUG_ON(skb_ext_total_length() > 255); 4989 #endif 4990 4991 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4992 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4993 0, 4994 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4995 NULL); 4996 } 4997 #else 4998 static void skb_extensions_init(void) {} 4999 #endif 5000 5001 /* The SKB kmem_cache slab is critical for network performance. Never 5002 * merge/alias the slab with similar sized objects. This avoids fragmentation 5003 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 5004 */ 5005 #ifndef CONFIG_SLUB_TINY 5006 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 5007 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 5008 #define FLAG_SKB_NO_MERGE 0 5009 #endif 5010 5011 void __init skb_init(void) 5012 { 5013 net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 5014 sizeof(struct sk_buff), 5015 0, 5016 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 5017 FLAG_SKB_NO_MERGE, 5018 offsetof(struct sk_buff, cb), 5019 sizeof_field(struct sk_buff, cb), 5020 NULL); 5021 net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 5022 sizeof(struct sk_buff_fclones), 5023 0, 5024 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 5025 NULL); 5026 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 5027 * struct skb_shared_info is located at the end of skb->head, 5028 * and should not be copied to/from user. 5029 */ 5030 net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 5031 SKB_SMALL_HEAD_CACHE_SIZE, 5032 0, 5033 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 5034 0, 5035 SKB_SMALL_HEAD_HEADROOM, 5036 NULL); 5037 skb_extensions_init(); 5038 } 5039 5040 static int 5041 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 5042 unsigned int recursion_level) 5043 { 5044 int start = skb_headlen(skb); 5045 int i, copy = start - offset; 5046 struct sk_buff *frag_iter; 5047 int elt = 0; 5048 5049 if (unlikely(recursion_level >= 24)) 5050 return -EMSGSIZE; 5051 5052 if (copy > 0) { 5053 if (copy > len) 5054 copy = len; 5055 sg_set_buf(sg, skb->data + offset, copy); 5056 elt++; 5057 if ((len -= copy) == 0) 5058 return elt; 5059 offset += copy; 5060 } 5061 5062 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5063 int end; 5064 5065 WARN_ON(start > offset + len); 5066 5067 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 5068 if ((copy = end - offset) > 0) { 5069 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5070 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5071 return -EMSGSIZE; 5072 5073 if (copy > len) 5074 copy = len; 5075 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 5076 skb_frag_off(frag) + offset - start); 5077 elt++; 5078 if (!(len -= copy)) 5079 return elt; 5080 offset += copy; 5081 } 5082 start = end; 5083 } 5084 5085 skb_walk_frags(skb, frag_iter) { 5086 int end, ret; 5087 5088 WARN_ON(start > offset + len); 5089 5090 end = start + frag_iter->len; 5091 if ((copy = end - offset) > 0) { 5092 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5093 return -EMSGSIZE; 5094 5095 if (copy > len) 5096 copy = len; 5097 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 5098 copy, recursion_level + 1); 5099 if (unlikely(ret < 0)) 5100 return ret; 5101 elt += ret; 5102 if ((len -= copy) == 0) 5103 return elt; 5104 offset += copy; 5105 } 5106 start = end; 5107 } 5108 BUG_ON(len); 5109 return elt; 5110 } 5111 5112 /** 5113 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 5114 * @skb: Socket buffer containing the buffers to be mapped 5115 * @sg: The scatter-gather list to map into 5116 * @offset: The offset into the buffer's contents to start mapping 5117 * @len: Length of buffer space to be mapped 5118 * 5119 * Fill the specified scatter-gather list with mappings/pointers into a 5120 * region of the buffer space attached to a socket buffer. Returns either 5121 * the number of scatterlist items used, or -EMSGSIZE if the contents 5122 * could not fit. 5123 */ 5124 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5125 { 5126 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5127 5128 if (nsg <= 0) 5129 return nsg; 5130 5131 sg_mark_end(&sg[nsg - 1]); 5132 5133 return nsg; 5134 } 5135 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5136 5137 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5138 * sglist without mark the sg which contain last skb data as the end. 5139 * So the caller can mannipulate sg list as will when padding new data after 5140 * the first call without calling sg_unmark_end to expend sg list. 5141 * 5142 * Scenario to use skb_to_sgvec_nomark: 5143 * 1. sg_init_table 5144 * 2. skb_to_sgvec_nomark(payload1) 5145 * 3. skb_to_sgvec_nomark(payload2) 5146 * 5147 * This is equivalent to: 5148 * 1. sg_init_table 5149 * 2. skb_to_sgvec(payload1) 5150 * 3. sg_unmark_end 5151 * 4. skb_to_sgvec(payload2) 5152 * 5153 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 5154 * is more preferable. 5155 */ 5156 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5157 int offset, int len) 5158 { 5159 return __skb_to_sgvec(skb, sg, offset, len, 0); 5160 } 5161 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5162 5163 5164 5165 /** 5166 * skb_cow_data - Check that a socket buffer's data buffers are writable 5167 * @skb: The socket buffer to check. 5168 * @tailbits: Amount of trailing space to be added 5169 * @trailer: Returned pointer to the skb where the @tailbits space begins 5170 * 5171 * Make sure that the data buffers attached to a socket buffer are 5172 * writable. If they are not, private copies are made of the data buffers 5173 * and the socket buffer is set to use these instead. 5174 * 5175 * If @tailbits is given, make sure that there is space to write @tailbits 5176 * bytes of data beyond current end of socket buffer. @trailer will be 5177 * set to point to the skb in which this space begins. 5178 * 5179 * The number of scatterlist elements required to completely map the 5180 * COW'd and extended socket buffer will be returned. 5181 */ 5182 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5183 { 5184 int copyflag; 5185 int elt; 5186 struct sk_buff *skb1, **skb_p; 5187 5188 /* If skb is cloned or its head is paged, reallocate 5189 * head pulling out all the pages (pages are considered not writable 5190 * at the moment even if they are anonymous). 5191 */ 5192 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5193 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5194 return -ENOMEM; 5195 5196 /* Easy case. Most of packets will go this way. */ 5197 if (!skb_has_frag_list(skb)) { 5198 /* A little of trouble, not enough of space for trailer. 5199 * This should not happen, when stack is tuned to generate 5200 * good frames. OK, on miss we reallocate and reserve even more 5201 * space, 128 bytes is fair. */ 5202 5203 if (skb_tailroom(skb) < tailbits && 5204 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5205 return -ENOMEM; 5206 5207 /* Voila! */ 5208 *trailer = skb; 5209 return 1; 5210 } 5211 5212 /* Misery. We are in troubles, going to mincer fragments... */ 5213 5214 elt = 1; 5215 skb_p = &skb_shinfo(skb)->frag_list; 5216 copyflag = 0; 5217 5218 while ((skb1 = *skb_p) != NULL) { 5219 int ntail = 0; 5220 5221 /* The fragment is partially pulled by someone, 5222 * this can happen on input. Copy it and everything 5223 * after it. */ 5224 5225 if (skb_shared(skb1)) 5226 copyflag = 1; 5227 5228 /* If the skb is the last, worry about trailer. */ 5229 5230 if (skb1->next == NULL && tailbits) { 5231 if (skb_shinfo(skb1)->nr_frags || 5232 skb_has_frag_list(skb1) || 5233 skb_tailroom(skb1) < tailbits) 5234 ntail = tailbits + 128; 5235 } 5236 5237 if (copyflag || 5238 skb_cloned(skb1) || 5239 ntail || 5240 skb_shinfo(skb1)->nr_frags || 5241 skb_has_frag_list(skb1)) { 5242 struct sk_buff *skb2; 5243 5244 /* Fuck, we are miserable poor guys... */ 5245 if (ntail == 0) 5246 skb2 = skb_copy(skb1, GFP_ATOMIC); 5247 else 5248 skb2 = skb_copy_expand(skb1, 5249 skb_headroom(skb1), 5250 ntail, 5251 GFP_ATOMIC); 5252 if (unlikely(skb2 == NULL)) 5253 return -ENOMEM; 5254 5255 if (skb1->sk) 5256 skb_set_owner_w(skb2, skb1->sk); 5257 5258 /* Looking around. Are we still alive? 5259 * OK, link new skb, drop old one */ 5260 5261 skb2->next = skb1->next; 5262 *skb_p = skb2; 5263 kfree_skb(skb1); 5264 skb1 = skb2; 5265 } 5266 elt++; 5267 *trailer = skb1; 5268 skb_p = &skb1->next; 5269 } 5270 5271 return elt; 5272 } 5273 EXPORT_SYMBOL_GPL(skb_cow_data); 5274 5275 static void sock_rmem_free(struct sk_buff *skb) 5276 { 5277 struct sock *sk = skb->sk; 5278 5279 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5280 } 5281 5282 static void skb_set_err_queue(struct sk_buff *skb) 5283 { 5284 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5285 * So, it is safe to (mis)use it to mark skbs on the error queue. 5286 */ 5287 skb->pkt_type = PACKET_OUTGOING; 5288 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5289 } 5290 5291 /* 5292 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5293 */ 5294 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5295 { 5296 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5297 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5298 return -ENOMEM; 5299 5300 skb_orphan(skb); 5301 skb->sk = sk; 5302 skb->destructor = sock_rmem_free; 5303 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5304 skb_set_err_queue(skb); 5305 5306 /* before exiting rcu section, make sure dst is refcounted */ 5307 skb_dst_force(skb); 5308 5309 skb_queue_tail(&sk->sk_error_queue, skb); 5310 if (!sock_flag(sk, SOCK_DEAD)) 5311 sk_error_report(sk); 5312 return 0; 5313 } 5314 EXPORT_SYMBOL(sock_queue_err_skb); 5315 5316 static bool is_icmp_err_skb(const struct sk_buff *skb) 5317 { 5318 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5319 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5320 } 5321 5322 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5323 { 5324 struct sk_buff_head *q = &sk->sk_error_queue; 5325 struct sk_buff *skb, *skb_next = NULL; 5326 bool icmp_next = false; 5327 unsigned long flags; 5328 5329 if (skb_queue_empty_lockless(q)) 5330 return NULL; 5331 5332 spin_lock_irqsave(&q->lock, flags); 5333 skb = __skb_dequeue(q); 5334 if (skb && (skb_next = skb_peek(q))) { 5335 icmp_next = is_icmp_err_skb(skb_next); 5336 if (icmp_next) 5337 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5338 } 5339 spin_unlock_irqrestore(&q->lock, flags); 5340 5341 if (is_icmp_err_skb(skb) && !icmp_next) 5342 sk->sk_err = 0; 5343 5344 if (skb_next) 5345 sk_error_report(sk); 5346 5347 return skb; 5348 } 5349 EXPORT_SYMBOL(sock_dequeue_err_skb); 5350 5351 /** 5352 * skb_clone_sk - create clone of skb, and take reference to socket 5353 * @skb: the skb to clone 5354 * 5355 * This function creates a clone of a buffer that holds a reference on 5356 * sk_refcnt. Buffers created via this function are meant to be 5357 * returned using sock_queue_err_skb, or free via kfree_skb. 5358 * 5359 * When passing buffers allocated with this function to sock_queue_err_skb 5360 * it is necessary to wrap the call with sock_hold/sock_put in order to 5361 * prevent the socket from being released prior to being enqueued on 5362 * the sk_error_queue. 5363 */ 5364 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5365 { 5366 struct sock *sk = skb->sk; 5367 struct sk_buff *clone; 5368 5369 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5370 return NULL; 5371 5372 clone = skb_clone(skb, GFP_ATOMIC); 5373 if (!clone) { 5374 sock_put(sk); 5375 return NULL; 5376 } 5377 5378 clone->sk = sk; 5379 clone->destructor = sock_efree; 5380 5381 return clone; 5382 } 5383 EXPORT_SYMBOL(skb_clone_sk); 5384 5385 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5386 struct sock *sk, 5387 int tstype, 5388 bool opt_stats) 5389 { 5390 struct sock_exterr_skb *serr; 5391 int err; 5392 5393 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5394 5395 serr = SKB_EXT_ERR(skb); 5396 memset(serr, 0, sizeof(*serr)); 5397 serr->ee.ee_errno = ENOMSG; 5398 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5399 serr->ee.ee_info = tstype; 5400 serr->opt_stats = opt_stats; 5401 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5402 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5403 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5404 if (sk_is_tcp(sk)) 5405 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5406 } 5407 5408 err = sock_queue_err_skb(sk, skb); 5409 5410 if (err) 5411 kfree_skb(skb); 5412 } 5413 5414 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5415 { 5416 bool ret; 5417 5418 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) 5419 return true; 5420 5421 read_lock_bh(&sk->sk_callback_lock); 5422 ret = sk->sk_socket && sk->sk_socket->file && 5423 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5424 read_unlock_bh(&sk->sk_callback_lock); 5425 return ret; 5426 } 5427 5428 void skb_complete_tx_timestamp(struct sk_buff *skb, 5429 struct skb_shared_hwtstamps *hwtstamps) 5430 { 5431 struct sock *sk = skb->sk; 5432 5433 if (!skb_may_tx_timestamp(sk, false)) 5434 goto err; 5435 5436 /* Take a reference to prevent skb_orphan() from freeing the socket, 5437 * but only if the socket refcount is not zero. 5438 */ 5439 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5440 *skb_hwtstamps(skb) = *hwtstamps; 5441 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5442 sock_put(sk); 5443 return; 5444 } 5445 5446 err: 5447 kfree_skb(skb); 5448 } 5449 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5450 5451 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5452 const struct sk_buff *ack_skb, 5453 struct skb_shared_hwtstamps *hwtstamps, 5454 struct sock *sk, int tstype) 5455 { 5456 struct sk_buff *skb; 5457 bool tsonly, opt_stats = false; 5458 u32 tsflags; 5459 5460 if (!sk) 5461 return; 5462 5463 tsflags = READ_ONCE(sk->sk_tsflags); 5464 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5465 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5466 return; 5467 5468 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5469 if (!skb_may_tx_timestamp(sk, tsonly)) 5470 return; 5471 5472 if (tsonly) { 5473 #ifdef CONFIG_INET 5474 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5475 sk_is_tcp(sk)) { 5476 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5477 ack_skb); 5478 opt_stats = true; 5479 } else 5480 #endif 5481 skb = alloc_skb(0, GFP_ATOMIC); 5482 } else { 5483 skb = skb_clone(orig_skb, GFP_ATOMIC); 5484 5485 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5486 kfree_skb(skb); 5487 return; 5488 } 5489 } 5490 if (!skb) 5491 return; 5492 5493 if (tsonly) { 5494 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5495 SKBTX_ANY_TSTAMP; 5496 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5497 } 5498 5499 if (hwtstamps) 5500 *skb_hwtstamps(skb) = *hwtstamps; 5501 else 5502 __net_timestamp(skb); 5503 5504 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5505 } 5506 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5507 5508 void skb_tstamp_tx(struct sk_buff *orig_skb, 5509 struct skb_shared_hwtstamps *hwtstamps) 5510 { 5511 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5512 SCM_TSTAMP_SND); 5513 } 5514 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5515 5516 #ifdef CONFIG_WIRELESS 5517 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5518 { 5519 struct sock *sk = skb->sk; 5520 struct sock_exterr_skb *serr; 5521 int err = 1; 5522 5523 skb->wifi_acked_valid = 1; 5524 skb->wifi_acked = acked; 5525 5526 serr = SKB_EXT_ERR(skb); 5527 memset(serr, 0, sizeof(*serr)); 5528 serr->ee.ee_errno = ENOMSG; 5529 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5530 5531 /* Take a reference to prevent skb_orphan() from freeing the socket, 5532 * but only if the socket refcount is not zero. 5533 */ 5534 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5535 err = sock_queue_err_skb(sk, skb); 5536 sock_put(sk); 5537 } 5538 if (err) 5539 kfree_skb(skb); 5540 } 5541 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5542 #endif /* CONFIG_WIRELESS */ 5543 5544 /** 5545 * skb_partial_csum_set - set up and verify partial csum values for packet 5546 * @skb: the skb to set 5547 * @start: the number of bytes after skb->data to start checksumming. 5548 * @off: the offset from start to place the checksum. 5549 * 5550 * For untrusted partially-checksummed packets, we need to make sure the values 5551 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5552 * 5553 * This function checks and sets those values and skb->ip_summed: if this 5554 * returns false you should drop the packet. 5555 */ 5556 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5557 { 5558 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5559 u32 csum_start = skb_headroom(skb) + (u32)start; 5560 5561 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5562 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5563 start, off, skb_headroom(skb), skb_headlen(skb)); 5564 return false; 5565 } 5566 skb->ip_summed = CHECKSUM_PARTIAL; 5567 skb->csum_start = csum_start; 5568 skb->csum_offset = off; 5569 skb->transport_header = csum_start; 5570 return true; 5571 } 5572 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5573 5574 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5575 unsigned int max) 5576 { 5577 if (skb_headlen(skb) >= len) 5578 return 0; 5579 5580 /* If we need to pullup then pullup to the max, so we 5581 * won't need to do it again. 5582 */ 5583 if (max > skb->len) 5584 max = skb->len; 5585 5586 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5587 return -ENOMEM; 5588 5589 if (skb_headlen(skb) < len) 5590 return -EPROTO; 5591 5592 return 0; 5593 } 5594 5595 #define MAX_TCP_HDR_LEN (15 * 4) 5596 5597 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5598 typeof(IPPROTO_IP) proto, 5599 unsigned int off) 5600 { 5601 int err; 5602 5603 switch (proto) { 5604 case IPPROTO_TCP: 5605 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5606 off + MAX_TCP_HDR_LEN); 5607 if (!err && !skb_partial_csum_set(skb, off, 5608 offsetof(struct tcphdr, 5609 check))) 5610 err = -EPROTO; 5611 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5612 5613 case IPPROTO_UDP: 5614 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5615 off + sizeof(struct udphdr)); 5616 if (!err && !skb_partial_csum_set(skb, off, 5617 offsetof(struct udphdr, 5618 check))) 5619 err = -EPROTO; 5620 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5621 } 5622 5623 return ERR_PTR(-EPROTO); 5624 } 5625 5626 /* This value should be large enough to cover a tagged ethernet header plus 5627 * maximally sized IP and TCP or UDP headers. 5628 */ 5629 #define MAX_IP_HDR_LEN 128 5630 5631 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5632 { 5633 unsigned int off; 5634 bool fragment; 5635 __sum16 *csum; 5636 int err; 5637 5638 fragment = false; 5639 5640 err = skb_maybe_pull_tail(skb, 5641 sizeof(struct iphdr), 5642 MAX_IP_HDR_LEN); 5643 if (err < 0) 5644 goto out; 5645 5646 if (ip_is_fragment(ip_hdr(skb))) 5647 fragment = true; 5648 5649 off = ip_hdrlen(skb); 5650 5651 err = -EPROTO; 5652 5653 if (fragment) 5654 goto out; 5655 5656 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5657 if (IS_ERR(csum)) 5658 return PTR_ERR(csum); 5659 5660 if (recalculate) 5661 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5662 ip_hdr(skb)->daddr, 5663 skb->len - off, 5664 ip_hdr(skb)->protocol, 0); 5665 err = 0; 5666 5667 out: 5668 return err; 5669 } 5670 5671 /* This value should be large enough to cover a tagged ethernet header plus 5672 * an IPv6 header, all options, and a maximal TCP or UDP header. 5673 */ 5674 #define MAX_IPV6_HDR_LEN 256 5675 5676 #define OPT_HDR(type, skb, off) \ 5677 (type *)(skb_network_header(skb) + (off)) 5678 5679 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5680 { 5681 int err; 5682 u8 nexthdr; 5683 unsigned int off; 5684 unsigned int len; 5685 bool fragment; 5686 bool done; 5687 __sum16 *csum; 5688 5689 fragment = false; 5690 done = false; 5691 5692 off = sizeof(struct ipv6hdr); 5693 5694 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5695 if (err < 0) 5696 goto out; 5697 5698 nexthdr = ipv6_hdr(skb)->nexthdr; 5699 5700 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5701 while (off <= len && !done) { 5702 switch (nexthdr) { 5703 case IPPROTO_DSTOPTS: 5704 case IPPROTO_HOPOPTS: 5705 case IPPROTO_ROUTING: { 5706 struct ipv6_opt_hdr *hp; 5707 5708 err = skb_maybe_pull_tail(skb, 5709 off + 5710 sizeof(struct ipv6_opt_hdr), 5711 MAX_IPV6_HDR_LEN); 5712 if (err < 0) 5713 goto out; 5714 5715 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5716 nexthdr = hp->nexthdr; 5717 off += ipv6_optlen(hp); 5718 break; 5719 } 5720 case IPPROTO_AH: { 5721 struct ip_auth_hdr *hp; 5722 5723 err = skb_maybe_pull_tail(skb, 5724 off + 5725 sizeof(struct ip_auth_hdr), 5726 MAX_IPV6_HDR_LEN); 5727 if (err < 0) 5728 goto out; 5729 5730 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5731 nexthdr = hp->nexthdr; 5732 off += ipv6_authlen(hp); 5733 break; 5734 } 5735 case IPPROTO_FRAGMENT: { 5736 struct frag_hdr *hp; 5737 5738 err = skb_maybe_pull_tail(skb, 5739 off + 5740 sizeof(struct frag_hdr), 5741 MAX_IPV6_HDR_LEN); 5742 if (err < 0) 5743 goto out; 5744 5745 hp = OPT_HDR(struct frag_hdr, skb, off); 5746 5747 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5748 fragment = true; 5749 5750 nexthdr = hp->nexthdr; 5751 off += sizeof(struct frag_hdr); 5752 break; 5753 } 5754 default: 5755 done = true; 5756 break; 5757 } 5758 } 5759 5760 err = -EPROTO; 5761 5762 if (!done || fragment) 5763 goto out; 5764 5765 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5766 if (IS_ERR(csum)) 5767 return PTR_ERR(csum); 5768 5769 if (recalculate) 5770 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5771 &ipv6_hdr(skb)->daddr, 5772 skb->len - off, nexthdr, 0); 5773 err = 0; 5774 5775 out: 5776 return err; 5777 } 5778 5779 /** 5780 * skb_checksum_setup - set up partial checksum offset 5781 * @skb: the skb to set up 5782 * @recalculate: if true the pseudo-header checksum will be recalculated 5783 */ 5784 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5785 { 5786 int err; 5787 5788 switch (skb->protocol) { 5789 case htons(ETH_P_IP): 5790 err = skb_checksum_setup_ipv4(skb, recalculate); 5791 break; 5792 5793 case htons(ETH_P_IPV6): 5794 err = skb_checksum_setup_ipv6(skb, recalculate); 5795 break; 5796 5797 default: 5798 err = -EPROTO; 5799 break; 5800 } 5801 5802 return err; 5803 } 5804 EXPORT_SYMBOL(skb_checksum_setup); 5805 5806 /** 5807 * skb_checksum_maybe_trim - maybe trims the given skb 5808 * @skb: the skb to check 5809 * @transport_len: the data length beyond the network header 5810 * 5811 * Checks whether the given skb has data beyond the given transport length. 5812 * If so, returns a cloned skb trimmed to this transport length. 5813 * Otherwise returns the provided skb. Returns NULL in error cases 5814 * (e.g. transport_len exceeds skb length or out-of-memory). 5815 * 5816 * Caller needs to set the skb transport header and free any returned skb if it 5817 * differs from the provided skb. 5818 */ 5819 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5820 unsigned int transport_len) 5821 { 5822 struct sk_buff *skb_chk; 5823 unsigned int len = skb_transport_offset(skb) + transport_len; 5824 int ret; 5825 5826 if (skb->len < len) 5827 return NULL; 5828 else if (skb->len == len) 5829 return skb; 5830 5831 skb_chk = skb_clone(skb, GFP_ATOMIC); 5832 if (!skb_chk) 5833 return NULL; 5834 5835 ret = pskb_trim_rcsum(skb_chk, len); 5836 if (ret) { 5837 kfree_skb(skb_chk); 5838 return NULL; 5839 } 5840 5841 return skb_chk; 5842 } 5843 5844 /** 5845 * skb_checksum_trimmed - validate checksum of an skb 5846 * @skb: the skb to check 5847 * @transport_len: the data length beyond the network header 5848 * @skb_chkf: checksum function to use 5849 * 5850 * Applies the given checksum function skb_chkf to the provided skb. 5851 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5852 * 5853 * If the skb has data beyond the given transport length, then a 5854 * trimmed & cloned skb is checked and returned. 5855 * 5856 * Caller needs to set the skb transport header and free any returned skb if it 5857 * differs from the provided skb. 5858 */ 5859 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5860 unsigned int transport_len, 5861 __sum16(*skb_chkf)(struct sk_buff *skb)) 5862 { 5863 struct sk_buff *skb_chk; 5864 unsigned int offset = skb_transport_offset(skb); 5865 __sum16 ret; 5866 5867 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5868 if (!skb_chk) 5869 goto err; 5870 5871 if (!pskb_may_pull(skb_chk, offset)) 5872 goto err; 5873 5874 skb_pull_rcsum(skb_chk, offset); 5875 ret = skb_chkf(skb_chk); 5876 skb_push_rcsum(skb_chk, offset); 5877 5878 if (ret) 5879 goto err; 5880 5881 return skb_chk; 5882 5883 err: 5884 if (skb_chk && skb_chk != skb) 5885 kfree_skb(skb_chk); 5886 5887 return NULL; 5888 5889 } 5890 EXPORT_SYMBOL(skb_checksum_trimmed); 5891 5892 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5893 { 5894 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5895 skb->dev->name); 5896 } 5897 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5898 5899 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5900 { 5901 if (head_stolen) { 5902 skb_release_head_state(skb); 5903 kmem_cache_free(net_hotdata.skbuff_cache, skb); 5904 } else { 5905 __kfree_skb(skb); 5906 } 5907 } 5908 EXPORT_SYMBOL(kfree_skb_partial); 5909 5910 /** 5911 * skb_try_coalesce - try to merge skb to prior one 5912 * @to: prior buffer 5913 * @from: buffer to add 5914 * @fragstolen: pointer to boolean 5915 * @delta_truesize: how much more was allocated than was requested 5916 */ 5917 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5918 bool *fragstolen, int *delta_truesize) 5919 { 5920 struct skb_shared_info *to_shinfo, *from_shinfo; 5921 int i, delta, len = from->len; 5922 5923 *fragstolen = false; 5924 5925 if (skb_cloned(to)) 5926 return false; 5927 5928 /* In general, avoid mixing page_pool and non-page_pool allocated 5929 * pages within the same SKB. In theory we could take full 5930 * references if @from is cloned and !@to->pp_recycle but its 5931 * tricky (due to potential race with the clone disappearing) and 5932 * rare, so not worth dealing with. 5933 */ 5934 if (to->pp_recycle != from->pp_recycle) 5935 return false; 5936 5937 if (len <= skb_tailroom(to)) { 5938 if (len) 5939 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5940 *delta_truesize = 0; 5941 return true; 5942 } 5943 5944 to_shinfo = skb_shinfo(to); 5945 from_shinfo = skb_shinfo(from); 5946 if (to_shinfo->frag_list || from_shinfo->frag_list) 5947 return false; 5948 if (skb_zcopy(to) || skb_zcopy(from)) 5949 return false; 5950 5951 if (skb_headlen(from) != 0) { 5952 struct page *page; 5953 unsigned int offset; 5954 5955 if (to_shinfo->nr_frags + 5956 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5957 return false; 5958 5959 if (skb_head_is_locked(from)) 5960 return false; 5961 5962 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5963 5964 page = virt_to_head_page(from->head); 5965 offset = from->data - (unsigned char *)page_address(page); 5966 5967 skb_fill_page_desc(to, to_shinfo->nr_frags, 5968 page, offset, skb_headlen(from)); 5969 *fragstolen = true; 5970 } else { 5971 if (to_shinfo->nr_frags + 5972 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5973 return false; 5974 5975 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5976 } 5977 5978 WARN_ON_ONCE(delta < len); 5979 5980 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5981 from_shinfo->frags, 5982 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5983 to_shinfo->nr_frags += from_shinfo->nr_frags; 5984 5985 if (!skb_cloned(from)) 5986 from_shinfo->nr_frags = 0; 5987 5988 /* if the skb is not cloned this does nothing 5989 * since we set nr_frags to 0. 5990 */ 5991 if (skb_pp_frag_ref(from)) { 5992 for (i = 0; i < from_shinfo->nr_frags; i++) 5993 __skb_frag_ref(&from_shinfo->frags[i]); 5994 } 5995 5996 to->truesize += delta; 5997 to->len += len; 5998 to->data_len += len; 5999 6000 *delta_truesize = delta; 6001 return true; 6002 } 6003 EXPORT_SYMBOL(skb_try_coalesce); 6004 6005 /** 6006 * skb_scrub_packet - scrub an skb 6007 * 6008 * @skb: buffer to clean 6009 * @xnet: packet is crossing netns 6010 * 6011 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 6012 * into/from a tunnel. Some information have to be cleared during these 6013 * operations. 6014 * skb_scrub_packet can also be used to clean a skb before injecting it in 6015 * another namespace (@xnet == true). We have to clear all information in the 6016 * skb that could impact namespace isolation. 6017 */ 6018 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 6019 { 6020 skb->pkt_type = PACKET_HOST; 6021 skb->skb_iif = 0; 6022 skb->ignore_df = 0; 6023 skb_dst_drop(skb); 6024 skb_ext_reset(skb); 6025 nf_reset_ct(skb); 6026 nf_reset_trace(skb); 6027 6028 #ifdef CONFIG_NET_SWITCHDEV 6029 skb->offload_fwd_mark = 0; 6030 skb->offload_l3_fwd_mark = 0; 6031 #endif 6032 6033 if (!xnet) 6034 return; 6035 6036 ipvs_reset(skb); 6037 skb->mark = 0; 6038 skb_clear_tstamp(skb); 6039 } 6040 EXPORT_SYMBOL_GPL(skb_scrub_packet); 6041 6042 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 6043 { 6044 int mac_len, meta_len; 6045 void *meta; 6046 6047 if (skb_cow(skb, skb_headroom(skb)) < 0) { 6048 kfree_skb(skb); 6049 return NULL; 6050 } 6051 6052 mac_len = skb->data - skb_mac_header(skb); 6053 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 6054 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 6055 mac_len - VLAN_HLEN - ETH_TLEN); 6056 } 6057 6058 meta_len = skb_metadata_len(skb); 6059 if (meta_len) { 6060 meta = skb_metadata_end(skb) - meta_len; 6061 memmove(meta + VLAN_HLEN, meta, meta_len); 6062 } 6063 6064 skb->mac_header += VLAN_HLEN; 6065 return skb; 6066 } 6067 6068 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 6069 { 6070 struct vlan_hdr *vhdr; 6071 u16 vlan_tci; 6072 6073 if (unlikely(skb_vlan_tag_present(skb))) { 6074 /* vlan_tci is already set-up so leave this for another time */ 6075 return skb; 6076 } 6077 6078 skb = skb_share_check(skb, GFP_ATOMIC); 6079 if (unlikely(!skb)) 6080 goto err_free; 6081 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 6082 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 6083 goto err_free; 6084 6085 vhdr = (struct vlan_hdr *)skb->data; 6086 vlan_tci = ntohs(vhdr->h_vlan_TCI); 6087 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 6088 6089 skb_pull_rcsum(skb, VLAN_HLEN); 6090 vlan_set_encap_proto(skb, vhdr); 6091 6092 skb = skb_reorder_vlan_header(skb); 6093 if (unlikely(!skb)) 6094 goto err_free; 6095 6096 skb_reset_network_header(skb); 6097 if (!skb_transport_header_was_set(skb)) 6098 skb_reset_transport_header(skb); 6099 skb_reset_mac_len(skb); 6100 6101 return skb; 6102 6103 err_free: 6104 kfree_skb(skb); 6105 return NULL; 6106 } 6107 EXPORT_SYMBOL(skb_vlan_untag); 6108 6109 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 6110 { 6111 if (!pskb_may_pull(skb, write_len)) 6112 return -ENOMEM; 6113 6114 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 6115 return 0; 6116 6117 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6118 } 6119 EXPORT_SYMBOL(skb_ensure_writable); 6120 6121 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 6122 { 6123 int needed_headroom = dev->needed_headroom; 6124 int needed_tailroom = dev->needed_tailroom; 6125 6126 /* For tail taggers, we need to pad short frames ourselves, to ensure 6127 * that the tail tag does not fail at its role of being at the end of 6128 * the packet, once the conduit interface pads the frame. Account for 6129 * that pad length here, and pad later. 6130 */ 6131 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6132 needed_tailroom += ETH_ZLEN - skb->len; 6133 /* skb_headroom() returns unsigned int... */ 6134 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6135 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6136 6137 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6138 /* No reallocation needed, yay! */ 6139 return 0; 6140 6141 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6142 GFP_ATOMIC); 6143 } 6144 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6145 6146 /* remove VLAN header from packet and update csum accordingly. 6147 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6148 */ 6149 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6150 { 6151 int offset = skb->data - skb_mac_header(skb); 6152 int err; 6153 6154 if (WARN_ONCE(offset, 6155 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6156 offset)) { 6157 return -EINVAL; 6158 } 6159 6160 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6161 if (unlikely(err)) 6162 return err; 6163 6164 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6165 6166 vlan_remove_tag(skb, vlan_tci); 6167 6168 skb->mac_header += VLAN_HLEN; 6169 6170 if (skb_network_offset(skb) < ETH_HLEN) 6171 skb_set_network_header(skb, ETH_HLEN); 6172 6173 skb_reset_mac_len(skb); 6174 6175 return err; 6176 } 6177 EXPORT_SYMBOL(__skb_vlan_pop); 6178 6179 /* Pop a vlan tag either from hwaccel or from payload. 6180 * Expects skb->data at mac header. 6181 */ 6182 int skb_vlan_pop(struct sk_buff *skb) 6183 { 6184 u16 vlan_tci; 6185 __be16 vlan_proto; 6186 int err; 6187 6188 if (likely(skb_vlan_tag_present(skb))) { 6189 __vlan_hwaccel_clear_tag(skb); 6190 } else { 6191 if (unlikely(!eth_type_vlan(skb->protocol))) 6192 return 0; 6193 6194 err = __skb_vlan_pop(skb, &vlan_tci); 6195 if (err) 6196 return err; 6197 } 6198 /* move next vlan tag to hw accel tag */ 6199 if (likely(!eth_type_vlan(skb->protocol))) 6200 return 0; 6201 6202 vlan_proto = skb->protocol; 6203 err = __skb_vlan_pop(skb, &vlan_tci); 6204 if (unlikely(err)) 6205 return err; 6206 6207 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6208 return 0; 6209 } 6210 EXPORT_SYMBOL(skb_vlan_pop); 6211 6212 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6213 * Expects skb->data at mac header. 6214 */ 6215 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6216 { 6217 if (skb_vlan_tag_present(skb)) { 6218 int offset = skb->data - skb_mac_header(skb); 6219 int err; 6220 6221 if (WARN_ONCE(offset, 6222 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6223 offset)) { 6224 return -EINVAL; 6225 } 6226 6227 err = __vlan_insert_tag(skb, skb->vlan_proto, 6228 skb_vlan_tag_get(skb)); 6229 if (err) 6230 return err; 6231 6232 skb->protocol = skb->vlan_proto; 6233 skb->mac_len += VLAN_HLEN; 6234 6235 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6236 } 6237 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6238 return 0; 6239 } 6240 EXPORT_SYMBOL(skb_vlan_push); 6241 6242 /** 6243 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6244 * 6245 * @skb: Socket buffer to modify 6246 * 6247 * Drop the Ethernet header of @skb. 6248 * 6249 * Expects that skb->data points to the mac header and that no VLAN tags are 6250 * present. 6251 * 6252 * Returns 0 on success, -errno otherwise. 6253 */ 6254 int skb_eth_pop(struct sk_buff *skb) 6255 { 6256 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6257 skb_network_offset(skb) < ETH_HLEN) 6258 return -EPROTO; 6259 6260 skb_pull_rcsum(skb, ETH_HLEN); 6261 skb_reset_mac_header(skb); 6262 skb_reset_mac_len(skb); 6263 6264 return 0; 6265 } 6266 EXPORT_SYMBOL(skb_eth_pop); 6267 6268 /** 6269 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6270 * 6271 * @skb: Socket buffer to modify 6272 * @dst: Destination MAC address of the new header 6273 * @src: Source MAC address of the new header 6274 * 6275 * Prepend @skb with a new Ethernet header. 6276 * 6277 * Expects that skb->data points to the mac header, which must be empty. 6278 * 6279 * Returns 0 on success, -errno otherwise. 6280 */ 6281 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6282 const unsigned char *src) 6283 { 6284 struct ethhdr *eth; 6285 int err; 6286 6287 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6288 return -EPROTO; 6289 6290 err = skb_cow_head(skb, sizeof(*eth)); 6291 if (err < 0) 6292 return err; 6293 6294 skb_push(skb, sizeof(*eth)); 6295 skb_reset_mac_header(skb); 6296 skb_reset_mac_len(skb); 6297 6298 eth = eth_hdr(skb); 6299 ether_addr_copy(eth->h_dest, dst); 6300 ether_addr_copy(eth->h_source, src); 6301 eth->h_proto = skb->protocol; 6302 6303 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6304 6305 return 0; 6306 } 6307 EXPORT_SYMBOL(skb_eth_push); 6308 6309 /* Update the ethertype of hdr and the skb csum value if required. */ 6310 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6311 __be16 ethertype) 6312 { 6313 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6314 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6315 6316 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6317 } 6318 6319 hdr->h_proto = ethertype; 6320 } 6321 6322 /** 6323 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6324 * the packet 6325 * 6326 * @skb: buffer 6327 * @mpls_lse: MPLS label stack entry to push 6328 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6329 * @mac_len: length of the MAC header 6330 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6331 * ethernet 6332 * 6333 * Expects skb->data at mac header. 6334 * 6335 * Returns 0 on success, -errno otherwise. 6336 */ 6337 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6338 int mac_len, bool ethernet) 6339 { 6340 struct mpls_shim_hdr *lse; 6341 int err; 6342 6343 if (unlikely(!eth_p_mpls(mpls_proto))) 6344 return -EINVAL; 6345 6346 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6347 if (skb->encapsulation) 6348 return -EINVAL; 6349 6350 err = skb_cow_head(skb, MPLS_HLEN); 6351 if (unlikely(err)) 6352 return err; 6353 6354 if (!skb->inner_protocol) { 6355 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6356 skb_set_inner_protocol(skb, skb->protocol); 6357 } 6358 6359 skb_push(skb, MPLS_HLEN); 6360 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6361 mac_len); 6362 skb_reset_mac_header(skb); 6363 skb_set_network_header(skb, mac_len); 6364 skb_reset_mac_len(skb); 6365 6366 lse = mpls_hdr(skb); 6367 lse->label_stack_entry = mpls_lse; 6368 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6369 6370 if (ethernet && mac_len >= ETH_HLEN) 6371 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6372 skb->protocol = mpls_proto; 6373 6374 return 0; 6375 } 6376 EXPORT_SYMBOL_GPL(skb_mpls_push); 6377 6378 /** 6379 * skb_mpls_pop() - pop the outermost MPLS header 6380 * 6381 * @skb: buffer 6382 * @next_proto: ethertype of header after popped MPLS header 6383 * @mac_len: length of the MAC header 6384 * @ethernet: flag to indicate if the packet is ethernet 6385 * 6386 * Expects skb->data at mac header. 6387 * 6388 * Returns 0 on success, -errno otherwise. 6389 */ 6390 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6391 bool ethernet) 6392 { 6393 int err; 6394 6395 if (unlikely(!eth_p_mpls(skb->protocol))) 6396 return 0; 6397 6398 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6399 if (unlikely(err)) 6400 return err; 6401 6402 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6403 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6404 mac_len); 6405 6406 __skb_pull(skb, MPLS_HLEN); 6407 skb_reset_mac_header(skb); 6408 skb_set_network_header(skb, mac_len); 6409 6410 if (ethernet && mac_len >= ETH_HLEN) { 6411 struct ethhdr *hdr; 6412 6413 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6414 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6415 skb_mod_eth_type(skb, hdr, next_proto); 6416 } 6417 skb->protocol = next_proto; 6418 6419 return 0; 6420 } 6421 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6422 6423 /** 6424 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6425 * 6426 * @skb: buffer 6427 * @mpls_lse: new MPLS label stack entry to update to 6428 * 6429 * Expects skb->data at mac header. 6430 * 6431 * Returns 0 on success, -errno otherwise. 6432 */ 6433 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6434 { 6435 int err; 6436 6437 if (unlikely(!eth_p_mpls(skb->protocol))) 6438 return -EINVAL; 6439 6440 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6441 if (unlikely(err)) 6442 return err; 6443 6444 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6445 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6446 6447 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6448 } 6449 6450 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6451 6452 return 0; 6453 } 6454 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6455 6456 /** 6457 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6458 * 6459 * @skb: buffer 6460 * 6461 * Expects skb->data at mac header. 6462 * 6463 * Returns 0 on success, -errno otherwise. 6464 */ 6465 int skb_mpls_dec_ttl(struct sk_buff *skb) 6466 { 6467 u32 lse; 6468 u8 ttl; 6469 6470 if (unlikely(!eth_p_mpls(skb->protocol))) 6471 return -EINVAL; 6472 6473 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6474 return -ENOMEM; 6475 6476 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6477 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6478 if (!--ttl) 6479 return -EINVAL; 6480 6481 lse &= ~MPLS_LS_TTL_MASK; 6482 lse |= ttl << MPLS_LS_TTL_SHIFT; 6483 6484 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6485 } 6486 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6487 6488 /** 6489 * alloc_skb_with_frags - allocate skb with page frags 6490 * 6491 * @header_len: size of linear part 6492 * @data_len: needed length in frags 6493 * @order: max page order desired. 6494 * @errcode: pointer to error code if any 6495 * @gfp_mask: allocation mask 6496 * 6497 * This can be used to allocate a paged skb, given a maximal order for frags. 6498 */ 6499 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6500 unsigned long data_len, 6501 int order, 6502 int *errcode, 6503 gfp_t gfp_mask) 6504 { 6505 unsigned long chunk; 6506 struct sk_buff *skb; 6507 struct page *page; 6508 int nr_frags = 0; 6509 6510 *errcode = -EMSGSIZE; 6511 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6512 return NULL; 6513 6514 *errcode = -ENOBUFS; 6515 skb = alloc_skb(header_len, gfp_mask); 6516 if (!skb) 6517 return NULL; 6518 6519 while (data_len) { 6520 if (nr_frags == MAX_SKB_FRAGS - 1) 6521 goto failure; 6522 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6523 order--; 6524 6525 if (order) { 6526 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6527 __GFP_COMP | 6528 __GFP_NOWARN, 6529 order); 6530 if (!page) { 6531 order--; 6532 continue; 6533 } 6534 } else { 6535 page = alloc_page(gfp_mask); 6536 if (!page) 6537 goto failure; 6538 } 6539 chunk = min_t(unsigned long, data_len, 6540 PAGE_SIZE << order); 6541 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6542 nr_frags++; 6543 skb->truesize += (PAGE_SIZE << order); 6544 data_len -= chunk; 6545 } 6546 return skb; 6547 6548 failure: 6549 kfree_skb(skb); 6550 return NULL; 6551 } 6552 EXPORT_SYMBOL(alloc_skb_with_frags); 6553 6554 /* carve out the first off bytes from skb when off < headlen */ 6555 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6556 const int headlen, gfp_t gfp_mask) 6557 { 6558 int i; 6559 unsigned int size = skb_end_offset(skb); 6560 int new_hlen = headlen - off; 6561 u8 *data; 6562 6563 if (skb_pfmemalloc(skb)) 6564 gfp_mask |= __GFP_MEMALLOC; 6565 6566 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6567 if (!data) 6568 return -ENOMEM; 6569 size = SKB_WITH_OVERHEAD(size); 6570 6571 /* Copy real data, and all frags */ 6572 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6573 skb->len -= off; 6574 6575 memcpy((struct skb_shared_info *)(data + size), 6576 skb_shinfo(skb), 6577 offsetof(struct skb_shared_info, 6578 frags[skb_shinfo(skb)->nr_frags])); 6579 if (skb_cloned(skb)) { 6580 /* drop the old head gracefully */ 6581 if (skb_orphan_frags(skb, gfp_mask)) { 6582 skb_kfree_head(data, size); 6583 return -ENOMEM; 6584 } 6585 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6586 skb_frag_ref(skb, i); 6587 if (skb_has_frag_list(skb)) 6588 skb_clone_fraglist(skb); 6589 skb_release_data(skb, SKB_CONSUMED, false); 6590 } else { 6591 /* we can reuse existing recount- all we did was 6592 * relocate values 6593 */ 6594 skb_free_head(skb, false); 6595 } 6596 6597 skb->head = data; 6598 skb->data = data; 6599 skb->head_frag = 0; 6600 skb_set_end_offset(skb, size); 6601 skb_set_tail_pointer(skb, skb_headlen(skb)); 6602 skb_headers_offset_update(skb, 0); 6603 skb->cloned = 0; 6604 skb->hdr_len = 0; 6605 skb->nohdr = 0; 6606 atomic_set(&skb_shinfo(skb)->dataref, 1); 6607 6608 return 0; 6609 } 6610 6611 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6612 6613 /* carve out the first eat bytes from skb's frag_list. May recurse into 6614 * pskb_carve() 6615 */ 6616 static int pskb_carve_frag_list(struct sk_buff *skb, 6617 struct skb_shared_info *shinfo, int eat, 6618 gfp_t gfp_mask) 6619 { 6620 struct sk_buff *list = shinfo->frag_list; 6621 struct sk_buff *clone = NULL; 6622 struct sk_buff *insp = NULL; 6623 6624 do { 6625 if (!list) { 6626 pr_err("Not enough bytes to eat. Want %d\n", eat); 6627 return -EFAULT; 6628 } 6629 if (list->len <= eat) { 6630 /* Eaten as whole. */ 6631 eat -= list->len; 6632 list = list->next; 6633 insp = list; 6634 } else { 6635 /* Eaten partially. */ 6636 if (skb_shared(list)) { 6637 clone = skb_clone(list, gfp_mask); 6638 if (!clone) 6639 return -ENOMEM; 6640 insp = list->next; 6641 list = clone; 6642 } else { 6643 /* This may be pulled without problems. */ 6644 insp = list; 6645 } 6646 if (pskb_carve(list, eat, gfp_mask) < 0) { 6647 kfree_skb(clone); 6648 return -ENOMEM; 6649 } 6650 break; 6651 } 6652 } while (eat); 6653 6654 /* Free pulled out fragments. */ 6655 while ((list = shinfo->frag_list) != insp) { 6656 shinfo->frag_list = list->next; 6657 consume_skb(list); 6658 } 6659 /* And insert new clone at head. */ 6660 if (clone) { 6661 clone->next = list; 6662 shinfo->frag_list = clone; 6663 } 6664 return 0; 6665 } 6666 6667 /* carve off first len bytes from skb. Split line (off) is in the 6668 * non-linear part of skb 6669 */ 6670 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6671 int pos, gfp_t gfp_mask) 6672 { 6673 int i, k = 0; 6674 unsigned int size = skb_end_offset(skb); 6675 u8 *data; 6676 const int nfrags = skb_shinfo(skb)->nr_frags; 6677 struct skb_shared_info *shinfo; 6678 6679 if (skb_pfmemalloc(skb)) 6680 gfp_mask |= __GFP_MEMALLOC; 6681 6682 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6683 if (!data) 6684 return -ENOMEM; 6685 size = SKB_WITH_OVERHEAD(size); 6686 6687 memcpy((struct skb_shared_info *)(data + size), 6688 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6689 if (skb_orphan_frags(skb, gfp_mask)) { 6690 skb_kfree_head(data, size); 6691 return -ENOMEM; 6692 } 6693 shinfo = (struct skb_shared_info *)(data + size); 6694 for (i = 0; i < nfrags; i++) { 6695 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6696 6697 if (pos + fsize > off) { 6698 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6699 6700 if (pos < off) { 6701 /* Split frag. 6702 * We have two variants in this case: 6703 * 1. Move all the frag to the second 6704 * part, if it is possible. F.e. 6705 * this approach is mandatory for TUX, 6706 * where splitting is expensive. 6707 * 2. Split is accurately. We make this. 6708 */ 6709 skb_frag_off_add(&shinfo->frags[0], off - pos); 6710 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6711 } 6712 skb_frag_ref(skb, i); 6713 k++; 6714 } 6715 pos += fsize; 6716 } 6717 shinfo->nr_frags = k; 6718 if (skb_has_frag_list(skb)) 6719 skb_clone_fraglist(skb); 6720 6721 /* split line is in frag list */ 6722 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6723 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6724 if (skb_has_frag_list(skb)) 6725 kfree_skb_list(skb_shinfo(skb)->frag_list); 6726 skb_kfree_head(data, size); 6727 return -ENOMEM; 6728 } 6729 skb_release_data(skb, SKB_CONSUMED, false); 6730 6731 skb->head = data; 6732 skb->head_frag = 0; 6733 skb->data = data; 6734 skb_set_end_offset(skb, size); 6735 skb_reset_tail_pointer(skb); 6736 skb_headers_offset_update(skb, 0); 6737 skb->cloned = 0; 6738 skb->hdr_len = 0; 6739 skb->nohdr = 0; 6740 skb->len -= off; 6741 skb->data_len = skb->len; 6742 atomic_set(&skb_shinfo(skb)->dataref, 1); 6743 return 0; 6744 } 6745 6746 /* remove len bytes from the beginning of the skb */ 6747 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6748 { 6749 int headlen = skb_headlen(skb); 6750 6751 if (len < headlen) 6752 return pskb_carve_inside_header(skb, len, headlen, gfp); 6753 else 6754 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6755 } 6756 6757 /* Extract to_copy bytes starting at off from skb, and return this in 6758 * a new skb 6759 */ 6760 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6761 int to_copy, gfp_t gfp) 6762 { 6763 struct sk_buff *clone = skb_clone(skb, gfp); 6764 6765 if (!clone) 6766 return NULL; 6767 6768 if (pskb_carve(clone, off, gfp) < 0 || 6769 pskb_trim(clone, to_copy)) { 6770 kfree_skb(clone); 6771 return NULL; 6772 } 6773 return clone; 6774 } 6775 EXPORT_SYMBOL(pskb_extract); 6776 6777 /** 6778 * skb_condense - try to get rid of fragments/frag_list if possible 6779 * @skb: buffer 6780 * 6781 * Can be used to save memory before skb is added to a busy queue. 6782 * If packet has bytes in frags and enough tail room in skb->head, 6783 * pull all of them, so that we can free the frags right now and adjust 6784 * truesize. 6785 * Notes: 6786 * We do not reallocate skb->head thus can not fail. 6787 * Caller must re-evaluate skb->truesize if needed. 6788 */ 6789 void skb_condense(struct sk_buff *skb) 6790 { 6791 if (skb->data_len) { 6792 if (skb->data_len > skb->end - skb->tail || 6793 skb_cloned(skb)) 6794 return; 6795 6796 /* Nice, we can free page frag(s) right now */ 6797 __pskb_pull_tail(skb, skb->data_len); 6798 } 6799 /* At this point, skb->truesize might be over estimated, 6800 * because skb had a fragment, and fragments do not tell 6801 * their truesize. 6802 * When we pulled its content into skb->head, fragment 6803 * was freed, but __pskb_pull_tail() could not possibly 6804 * adjust skb->truesize, not knowing the frag truesize. 6805 */ 6806 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6807 } 6808 EXPORT_SYMBOL(skb_condense); 6809 6810 #ifdef CONFIG_SKB_EXTENSIONS 6811 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6812 { 6813 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6814 } 6815 6816 /** 6817 * __skb_ext_alloc - allocate a new skb extensions storage 6818 * 6819 * @flags: See kmalloc(). 6820 * 6821 * Returns the newly allocated pointer. The pointer can later attached to a 6822 * skb via __skb_ext_set(). 6823 * Note: caller must handle the skb_ext as an opaque data. 6824 */ 6825 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6826 { 6827 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6828 6829 if (new) { 6830 memset(new->offset, 0, sizeof(new->offset)); 6831 refcount_set(&new->refcnt, 1); 6832 } 6833 6834 return new; 6835 } 6836 6837 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6838 unsigned int old_active) 6839 { 6840 struct skb_ext *new; 6841 6842 if (refcount_read(&old->refcnt) == 1) 6843 return old; 6844 6845 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6846 if (!new) 6847 return NULL; 6848 6849 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6850 refcount_set(&new->refcnt, 1); 6851 6852 #ifdef CONFIG_XFRM 6853 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6854 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6855 unsigned int i; 6856 6857 for (i = 0; i < sp->len; i++) 6858 xfrm_state_hold(sp->xvec[i]); 6859 } 6860 #endif 6861 #ifdef CONFIG_MCTP_FLOWS 6862 if (old_active & (1 << SKB_EXT_MCTP)) { 6863 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6864 6865 if (flow->key) 6866 refcount_inc(&flow->key->refs); 6867 } 6868 #endif 6869 __skb_ext_put(old); 6870 return new; 6871 } 6872 6873 /** 6874 * __skb_ext_set - attach the specified extension storage to this skb 6875 * @skb: buffer 6876 * @id: extension id 6877 * @ext: extension storage previously allocated via __skb_ext_alloc() 6878 * 6879 * Existing extensions, if any, are cleared. 6880 * 6881 * Returns the pointer to the extension. 6882 */ 6883 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6884 struct skb_ext *ext) 6885 { 6886 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6887 6888 skb_ext_put(skb); 6889 newlen = newoff + skb_ext_type_len[id]; 6890 ext->chunks = newlen; 6891 ext->offset[id] = newoff; 6892 skb->extensions = ext; 6893 skb->active_extensions = 1 << id; 6894 return skb_ext_get_ptr(ext, id); 6895 } 6896 6897 /** 6898 * skb_ext_add - allocate space for given extension, COW if needed 6899 * @skb: buffer 6900 * @id: extension to allocate space for 6901 * 6902 * Allocates enough space for the given extension. 6903 * If the extension is already present, a pointer to that extension 6904 * is returned. 6905 * 6906 * If the skb was cloned, COW applies and the returned memory can be 6907 * modified without changing the extension space of clones buffers. 6908 * 6909 * Returns pointer to the extension or NULL on allocation failure. 6910 */ 6911 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6912 { 6913 struct skb_ext *new, *old = NULL; 6914 unsigned int newlen, newoff; 6915 6916 if (skb->active_extensions) { 6917 old = skb->extensions; 6918 6919 new = skb_ext_maybe_cow(old, skb->active_extensions); 6920 if (!new) 6921 return NULL; 6922 6923 if (__skb_ext_exist(new, id)) 6924 goto set_active; 6925 6926 newoff = new->chunks; 6927 } else { 6928 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6929 6930 new = __skb_ext_alloc(GFP_ATOMIC); 6931 if (!new) 6932 return NULL; 6933 } 6934 6935 newlen = newoff + skb_ext_type_len[id]; 6936 new->chunks = newlen; 6937 new->offset[id] = newoff; 6938 set_active: 6939 skb->slow_gro = 1; 6940 skb->extensions = new; 6941 skb->active_extensions |= 1 << id; 6942 return skb_ext_get_ptr(new, id); 6943 } 6944 EXPORT_SYMBOL(skb_ext_add); 6945 6946 #ifdef CONFIG_XFRM 6947 static void skb_ext_put_sp(struct sec_path *sp) 6948 { 6949 unsigned int i; 6950 6951 for (i = 0; i < sp->len; i++) 6952 xfrm_state_put(sp->xvec[i]); 6953 } 6954 #endif 6955 6956 #ifdef CONFIG_MCTP_FLOWS 6957 static void skb_ext_put_mctp(struct mctp_flow *flow) 6958 { 6959 if (flow->key) 6960 mctp_key_unref(flow->key); 6961 } 6962 #endif 6963 6964 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6965 { 6966 struct skb_ext *ext = skb->extensions; 6967 6968 skb->active_extensions &= ~(1 << id); 6969 if (skb->active_extensions == 0) { 6970 skb->extensions = NULL; 6971 __skb_ext_put(ext); 6972 #ifdef CONFIG_XFRM 6973 } else if (id == SKB_EXT_SEC_PATH && 6974 refcount_read(&ext->refcnt) == 1) { 6975 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6976 6977 skb_ext_put_sp(sp); 6978 sp->len = 0; 6979 #endif 6980 } 6981 } 6982 EXPORT_SYMBOL(__skb_ext_del); 6983 6984 void __skb_ext_put(struct skb_ext *ext) 6985 { 6986 /* If this is last clone, nothing can increment 6987 * it after check passes. Avoids one atomic op. 6988 */ 6989 if (refcount_read(&ext->refcnt) == 1) 6990 goto free_now; 6991 6992 if (!refcount_dec_and_test(&ext->refcnt)) 6993 return; 6994 free_now: 6995 #ifdef CONFIG_XFRM 6996 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6997 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6998 #endif 6999 #ifdef CONFIG_MCTP_FLOWS 7000 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 7001 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 7002 #endif 7003 7004 kmem_cache_free(skbuff_ext_cache, ext); 7005 } 7006 EXPORT_SYMBOL(__skb_ext_put); 7007 #endif /* CONFIG_SKB_EXTENSIONS */ 7008 7009 /** 7010 * skb_attempt_defer_free - queue skb for remote freeing 7011 * @skb: buffer 7012 * 7013 * Put @skb in a per-cpu list, using the cpu which 7014 * allocated the skb/pages to reduce false sharing 7015 * and memory zone spinlock contention. 7016 */ 7017 void skb_attempt_defer_free(struct sk_buff *skb) 7018 { 7019 int cpu = skb->alloc_cpu; 7020 struct softnet_data *sd; 7021 unsigned int defer_max; 7022 bool kick; 7023 7024 if (WARN_ON_ONCE(cpu >= nr_cpu_ids) || 7025 !cpu_online(cpu) || 7026 cpu == raw_smp_processor_id()) { 7027 nodefer: __kfree_skb(skb); 7028 return; 7029 } 7030 7031 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 7032 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 7033 7034 sd = &per_cpu(softnet_data, cpu); 7035 defer_max = READ_ONCE(sysctl_skb_defer_max); 7036 if (READ_ONCE(sd->defer_count) >= defer_max) 7037 goto nodefer; 7038 7039 spin_lock_bh(&sd->defer_lock); 7040 /* Send an IPI every time queue reaches half capacity. */ 7041 kick = sd->defer_count == (defer_max >> 1); 7042 /* Paired with the READ_ONCE() few lines above */ 7043 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 7044 7045 skb->next = sd->defer_list; 7046 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 7047 WRITE_ONCE(sd->defer_list, skb); 7048 spin_unlock_bh(&sd->defer_lock); 7049 7050 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 7051 * if we are unlucky enough (this seems very unlikely). 7052 */ 7053 if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) 7054 smp_call_function_single_async(cpu, &sd->defer_csd); 7055 } 7056 7057 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 7058 size_t offset, size_t len) 7059 { 7060 const char *kaddr; 7061 __wsum csum; 7062 7063 kaddr = kmap_local_page(page); 7064 csum = csum_partial(kaddr + offset, len, 0); 7065 kunmap_local(kaddr); 7066 skb->csum = csum_block_add(skb->csum, csum, skb->len); 7067 } 7068 7069 /** 7070 * skb_splice_from_iter - Splice (or copy) pages to skbuff 7071 * @skb: The buffer to add pages to 7072 * @iter: Iterator representing the pages to be added 7073 * @maxsize: Maximum amount of pages to be added 7074 * @gfp: Allocation flags 7075 * 7076 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 7077 * extracts pages from an iterator and adds them to the socket buffer if 7078 * possible, copying them to fragments if not possible (such as if they're slab 7079 * pages). 7080 * 7081 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 7082 * insufficient space in the buffer to transfer anything. 7083 */ 7084 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 7085 ssize_t maxsize, gfp_t gfp) 7086 { 7087 size_t frag_limit = READ_ONCE(sysctl_max_skb_frags); 7088 struct page *pages[8], **ppages = pages; 7089 ssize_t spliced = 0, ret = 0; 7090 unsigned int i; 7091 7092 while (iter->count > 0) { 7093 ssize_t space, nr, len; 7094 size_t off; 7095 7096 ret = -EMSGSIZE; 7097 space = frag_limit - skb_shinfo(skb)->nr_frags; 7098 if (space < 0) 7099 break; 7100 7101 /* We might be able to coalesce without increasing nr_frags */ 7102 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 7103 7104 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 7105 if (len <= 0) { 7106 ret = len ?: -EIO; 7107 break; 7108 } 7109 7110 i = 0; 7111 do { 7112 struct page *page = pages[i++]; 7113 size_t part = min_t(size_t, PAGE_SIZE - off, len); 7114 7115 ret = -EIO; 7116 if (WARN_ON_ONCE(!sendpage_ok(page))) 7117 goto out; 7118 7119 ret = skb_append_pagefrags(skb, page, off, part, 7120 frag_limit); 7121 if (ret < 0) { 7122 iov_iter_revert(iter, len); 7123 goto out; 7124 } 7125 7126 if (skb->ip_summed == CHECKSUM_NONE) 7127 skb_splice_csum_page(skb, page, off, part); 7128 7129 off = 0; 7130 spliced += part; 7131 maxsize -= part; 7132 len -= part; 7133 } while (len > 0); 7134 7135 if (maxsize <= 0) 7136 break; 7137 } 7138 7139 out: 7140 skb_len_add(skb, spliced); 7141 return spliced ?: ret; 7142 } 7143 EXPORT_SYMBOL(skb_splice_from_iter); 7144 7145 static __always_inline 7146 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7147 size_t len, void *to, void *priv2) 7148 { 7149 __wsum *csum = priv2; 7150 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7151 7152 *csum = csum_block_add(*csum, next, progress); 7153 return 0; 7154 } 7155 7156 static __always_inline 7157 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7158 size_t len, void *to, void *priv2) 7159 { 7160 __wsum next, *csum = priv2; 7161 7162 next = csum_and_copy_from_user(iter_from, to + progress, len); 7163 *csum = csum_block_add(*csum, next, progress); 7164 return next ? 0 : len; 7165 } 7166 7167 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7168 __wsum *csum, struct iov_iter *i) 7169 { 7170 size_t copied; 7171 7172 if (WARN_ON_ONCE(!i->data_source)) 7173 return false; 7174 copied = iterate_and_advance2(i, bytes, addr, csum, 7175 copy_from_user_iter_csum, 7176 memcpy_from_iter_csum); 7177 if (likely(copied == bytes)) 7178 return true; 7179 iov_iter_revert(i, copied); 7180 return false; 7181 } 7182 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7183