1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 #include <linux/bitfield.h> 62 #include <linux/if_vlan.h> 63 #include <linux/mpls.h> 64 #include <linux/kcov.h> 65 #include <linux/iov_iter.h> 66 67 #include <net/protocol.h> 68 #include <net/dst.h> 69 #include <net/sock.h> 70 #include <net/checksum.h> 71 #include <net/gso.h> 72 #include <net/ip6_checksum.h> 73 #include <net/xfrm.h> 74 #include <net/mpls.h> 75 #include <net/mptcp.h> 76 #include <net/mctp.h> 77 #include <net/page_pool/helpers.h> 78 #include <net/dropreason.h> 79 80 #include <linux/uaccess.h> 81 #include <trace/events/skb.h> 82 #include <linux/highmem.h> 83 #include <linux/capability.h> 84 #include <linux/user_namespace.h> 85 #include <linux/indirect_call_wrapper.h> 86 #include <linux/textsearch.h> 87 88 #include "dev.h" 89 #include "sock_destructor.h" 90 91 struct kmem_cache *skbuff_cache __ro_after_init; 92 static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 93 #ifdef CONFIG_SKB_EXTENSIONS 94 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 95 #endif 96 97 98 static struct kmem_cache *skb_small_head_cache __ro_after_init; 99 100 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) 101 102 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 103 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 104 * size, and we can differentiate heads from skb_small_head_cache 105 * vs system slabs by looking at their size (skb_end_offset()). 106 */ 107 #define SKB_SMALL_HEAD_CACHE_SIZE \ 108 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 109 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 110 SKB_SMALL_HEAD_SIZE) 111 112 #define SKB_SMALL_HEAD_HEADROOM \ 113 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 114 115 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 116 EXPORT_SYMBOL(sysctl_max_skb_frags); 117 118 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use 119 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the 120 * netmem is a page. 121 */ 122 static_assert(offsetof(struct bio_vec, bv_page) == 123 offsetof(skb_frag_t, netmem)); 124 static_assert(sizeof_field(struct bio_vec, bv_page) == 125 sizeof_field(skb_frag_t, netmem)); 126 127 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); 128 static_assert(sizeof_field(struct bio_vec, bv_len) == 129 sizeof_field(skb_frag_t, len)); 130 131 static_assert(offsetof(struct bio_vec, bv_offset) == 132 offsetof(skb_frag_t, offset)); 133 static_assert(sizeof_field(struct bio_vec, bv_offset) == 134 sizeof_field(skb_frag_t, offset)); 135 136 #undef FN 137 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 138 static const char * const drop_reasons[] = { 139 [SKB_CONSUMED] = "CONSUMED", 140 DEFINE_DROP_REASON(FN, FN) 141 }; 142 143 static const struct drop_reason_list drop_reasons_core = { 144 .reasons = drop_reasons, 145 .n_reasons = ARRAY_SIZE(drop_reasons), 146 }; 147 148 const struct drop_reason_list __rcu * 149 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 150 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 151 }; 152 EXPORT_SYMBOL(drop_reasons_by_subsys); 153 154 /** 155 * drop_reasons_register_subsys - register another drop reason subsystem 156 * @subsys: the subsystem to register, must not be the core 157 * @list: the list of drop reasons within the subsystem, must point to 158 * a statically initialized list 159 */ 160 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 161 const struct drop_reason_list *list) 162 { 163 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 164 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 165 "invalid subsystem %d\n", subsys)) 166 return; 167 168 /* must point to statically allocated memory, so INIT is OK */ 169 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 170 } 171 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 172 173 /** 174 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 175 * @subsys: the subsystem to remove, must not be the core 176 * 177 * Note: This will synchronize_rcu() to ensure no users when it returns. 178 */ 179 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 180 { 181 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 182 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 183 "invalid subsystem %d\n", subsys)) 184 return; 185 186 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 187 188 synchronize_rcu(); 189 } 190 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 191 192 /** 193 * skb_panic - private function for out-of-line support 194 * @skb: buffer 195 * @sz: size 196 * @addr: address 197 * @msg: skb_over_panic or skb_under_panic 198 * 199 * Out-of-line support for skb_put() and skb_push(). 200 * Called via the wrapper skb_over_panic() or skb_under_panic(). 201 * Keep out of line to prevent kernel bloat. 202 * __builtin_return_address is not used because it is not always reliable. 203 */ 204 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 205 const char msg[]) 206 { 207 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 208 msg, addr, skb->len, sz, skb->head, skb->data, 209 (unsigned long)skb->tail, (unsigned long)skb->end, 210 skb->dev ? skb->dev->name : "<NULL>"); 211 BUG(); 212 } 213 214 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 215 { 216 skb_panic(skb, sz, addr, __func__); 217 } 218 219 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 220 { 221 skb_panic(skb, sz, addr, __func__); 222 } 223 224 #define NAPI_SKB_CACHE_SIZE 64 225 #define NAPI_SKB_CACHE_BULK 16 226 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 227 228 #if PAGE_SIZE == SZ_4K 229 230 #define NAPI_HAS_SMALL_PAGE_FRAG 1 231 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) 232 233 /* specialized page frag allocator using a single order 0 page 234 * and slicing it into 1K sized fragment. Constrained to systems 235 * with a very limited amount of 1K fragments fitting a single 236 * page - to avoid excessive truesize underestimation 237 */ 238 239 struct page_frag_1k { 240 void *va; 241 u16 offset; 242 bool pfmemalloc; 243 }; 244 245 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) 246 { 247 struct page *page; 248 int offset; 249 250 offset = nc->offset - SZ_1K; 251 if (likely(offset >= 0)) 252 goto use_frag; 253 254 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 255 if (!page) 256 return NULL; 257 258 nc->va = page_address(page); 259 nc->pfmemalloc = page_is_pfmemalloc(page); 260 offset = PAGE_SIZE - SZ_1K; 261 page_ref_add(page, offset / SZ_1K); 262 263 use_frag: 264 nc->offset = offset; 265 return nc->va + offset; 266 } 267 #else 268 269 /* the small page is actually unused in this build; add dummy helpers 270 * to please the compiler and avoid later preprocessor's conditionals 271 */ 272 #define NAPI_HAS_SMALL_PAGE_FRAG 0 273 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false 274 275 struct page_frag_1k { 276 }; 277 278 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) 279 { 280 return NULL; 281 } 282 283 #endif 284 285 struct napi_alloc_cache { 286 struct page_frag_cache page; 287 struct page_frag_1k page_small; 288 unsigned int skb_count; 289 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 290 }; 291 292 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 293 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 294 295 /* Double check that napi_get_frags() allocates skbs with 296 * skb->head being backed by slab, not a page fragment. 297 * This is to make sure bug fixed in 3226b158e67c 298 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 299 * does not accidentally come back. 300 */ 301 void napi_get_frags_check(struct napi_struct *napi) 302 { 303 struct sk_buff *skb; 304 305 local_bh_disable(); 306 skb = napi_get_frags(napi); 307 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); 308 napi_free_frags(napi); 309 local_bh_enable(); 310 } 311 312 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 313 { 314 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 315 316 fragsz = SKB_DATA_ALIGN(fragsz); 317 318 return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 319 align_mask); 320 } 321 EXPORT_SYMBOL(__napi_alloc_frag_align); 322 323 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 324 { 325 void *data; 326 327 fragsz = SKB_DATA_ALIGN(fragsz); 328 if (in_hardirq() || irqs_disabled()) { 329 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 330 331 data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, 332 align_mask); 333 } else { 334 struct napi_alloc_cache *nc; 335 336 local_bh_disable(); 337 nc = this_cpu_ptr(&napi_alloc_cache); 338 data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 339 align_mask); 340 local_bh_enable(); 341 } 342 return data; 343 } 344 EXPORT_SYMBOL(__netdev_alloc_frag_align); 345 346 static struct sk_buff *napi_skb_cache_get(void) 347 { 348 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 349 struct sk_buff *skb; 350 351 if (unlikely(!nc->skb_count)) { 352 nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache, 353 GFP_ATOMIC, 354 NAPI_SKB_CACHE_BULK, 355 nc->skb_cache); 356 if (unlikely(!nc->skb_count)) 357 return NULL; 358 } 359 360 skb = nc->skb_cache[--nc->skb_count]; 361 kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache)); 362 363 return skb; 364 } 365 366 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 367 unsigned int size) 368 { 369 struct skb_shared_info *shinfo; 370 371 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 372 373 /* Assumes caller memset cleared SKB */ 374 skb->truesize = SKB_TRUESIZE(size); 375 refcount_set(&skb->users, 1); 376 skb->head = data; 377 skb->data = data; 378 skb_reset_tail_pointer(skb); 379 skb_set_end_offset(skb, size); 380 skb->mac_header = (typeof(skb->mac_header))~0U; 381 skb->transport_header = (typeof(skb->transport_header))~0U; 382 skb->alloc_cpu = raw_smp_processor_id(); 383 /* make sure we initialize shinfo sequentially */ 384 shinfo = skb_shinfo(skb); 385 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 386 atomic_set(&shinfo->dataref, 1); 387 388 skb_set_kcov_handle(skb, kcov_common_handle()); 389 } 390 391 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 392 unsigned int *size) 393 { 394 void *resized; 395 396 /* Must find the allocation size (and grow it to match). */ 397 *size = ksize(data); 398 /* krealloc() will immediately return "data" when 399 * "ksize(data)" is requested: it is the existing upper 400 * bounds. As a result, GFP_ATOMIC will be ignored. Note 401 * that this "new" pointer needs to be passed back to the 402 * caller for use so the __alloc_size hinting will be 403 * tracked correctly. 404 */ 405 resized = krealloc(data, *size, GFP_ATOMIC); 406 WARN_ON_ONCE(resized != data); 407 return resized; 408 } 409 410 /* build_skb() variant which can operate on slab buffers. 411 * Note that this should be used sparingly as slab buffers 412 * cannot be combined efficiently by GRO! 413 */ 414 struct sk_buff *slab_build_skb(void *data) 415 { 416 struct sk_buff *skb; 417 unsigned int size; 418 419 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); 420 if (unlikely(!skb)) 421 return NULL; 422 423 memset(skb, 0, offsetof(struct sk_buff, tail)); 424 data = __slab_build_skb(skb, data, &size); 425 __finalize_skb_around(skb, data, size); 426 427 return skb; 428 } 429 EXPORT_SYMBOL(slab_build_skb); 430 431 /* Caller must provide SKB that is memset cleared */ 432 static void __build_skb_around(struct sk_buff *skb, void *data, 433 unsigned int frag_size) 434 { 435 unsigned int size = frag_size; 436 437 /* frag_size == 0 is considered deprecated now. Callers 438 * using slab buffer should use slab_build_skb() instead. 439 */ 440 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 441 data = __slab_build_skb(skb, data, &size); 442 443 __finalize_skb_around(skb, data, size); 444 } 445 446 /** 447 * __build_skb - build a network buffer 448 * @data: data buffer provided by caller 449 * @frag_size: size of data (must not be 0) 450 * 451 * Allocate a new &sk_buff. Caller provides space holding head and 452 * skb_shared_info. @data must have been allocated from the page 453 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 454 * allocation is deprecated, and callers should use slab_build_skb() 455 * instead.) 456 * The return is the new skb buffer. 457 * On a failure the return is %NULL, and @data is not freed. 458 * Notes : 459 * Before IO, driver allocates only data buffer where NIC put incoming frame 460 * Driver should add room at head (NET_SKB_PAD) and 461 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 462 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 463 * before giving packet to stack. 464 * RX rings only contains data buffers, not full skbs. 465 */ 466 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 467 { 468 struct sk_buff *skb; 469 470 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); 471 if (unlikely(!skb)) 472 return NULL; 473 474 memset(skb, 0, offsetof(struct sk_buff, tail)); 475 __build_skb_around(skb, data, frag_size); 476 477 return skb; 478 } 479 480 /* build_skb() is wrapper over __build_skb(), that specifically 481 * takes care of skb->head and skb->pfmemalloc 482 */ 483 struct sk_buff *build_skb(void *data, unsigned int frag_size) 484 { 485 struct sk_buff *skb = __build_skb(data, frag_size); 486 487 if (likely(skb && frag_size)) { 488 skb->head_frag = 1; 489 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 490 } 491 return skb; 492 } 493 EXPORT_SYMBOL(build_skb); 494 495 /** 496 * build_skb_around - build a network buffer around provided skb 497 * @skb: sk_buff provide by caller, must be memset cleared 498 * @data: data buffer provided by caller 499 * @frag_size: size of data 500 */ 501 struct sk_buff *build_skb_around(struct sk_buff *skb, 502 void *data, unsigned int frag_size) 503 { 504 if (unlikely(!skb)) 505 return NULL; 506 507 __build_skb_around(skb, data, frag_size); 508 509 if (frag_size) { 510 skb->head_frag = 1; 511 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 512 } 513 return skb; 514 } 515 EXPORT_SYMBOL(build_skb_around); 516 517 /** 518 * __napi_build_skb - build a network buffer 519 * @data: data buffer provided by caller 520 * @frag_size: size of data 521 * 522 * Version of __build_skb() that uses NAPI percpu caches to obtain 523 * skbuff_head instead of inplace allocation. 524 * 525 * Returns a new &sk_buff on success, %NULL on allocation failure. 526 */ 527 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 528 { 529 struct sk_buff *skb; 530 531 skb = napi_skb_cache_get(); 532 if (unlikely(!skb)) 533 return NULL; 534 535 memset(skb, 0, offsetof(struct sk_buff, tail)); 536 __build_skb_around(skb, data, frag_size); 537 538 return skb; 539 } 540 541 /** 542 * napi_build_skb - build a network buffer 543 * @data: data buffer provided by caller 544 * @frag_size: size of data 545 * 546 * Version of __napi_build_skb() that takes care of skb->head_frag 547 * and skb->pfmemalloc when the data is a page or page fragment. 548 * 549 * Returns a new &sk_buff on success, %NULL on allocation failure. 550 */ 551 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 552 { 553 struct sk_buff *skb = __napi_build_skb(data, frag_size); 554 555 if (likely(skb) && frag_size) { 556 skb->head_frag = 1; 557 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 558 } 559 560 return skb; 561 } 562 EXPORT_SYMBOL(napi_build_skb); 563 564 /* 565 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 566 * the caller if emergency pfmemalloc reserves are being used. If it is and 567 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 568 * may be used. Otherwise, the packet data may be discarded until enough 569 * memory is free 570 */ 571 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 572 bool *pfmemalloc) 573 { 574 bool ret_pfmemalloc = false; 575 size_t obj_size; 576 void *obj; 577 578 obj_size = SKB_HEAD_ALIGN(*size); 579 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 580 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 581 obj = kmem_cache_alloc_node(skb_small_head_cache, 582 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 583 node); 584 *size = SKB_SMALL_HEAD_CACHE_SIZE; 585 if (obj || !(gfp_pfmemalloc_allowed(flags))) 586 goto out; 587 /* Try again but now we are using pfmemalloc reserves */ 588 ret_pfmemalloc = true; 589 obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node); 590 goto out; 591 } 592 593 obj_size = kmalloc_size_roundup(obj_size); 594 /* The following cast might truncate high-order bits of obj_size, this 595 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 596 */ 597 *size = (unsigned int)obj_size; 598 599 /* 600 * Try a regular allocation, when that fails and we're not entitled 601 * to the reserves, fail. 602 */ 603 obj = kmalloc_node_track_caller(obj_size, 604 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 605 node); 606 if (obj || !(gfp_pfmemalloc_allowed(flags))) 607 goto out; 608 609 /* Try again but now we are using pfmemalloc reserves */ 610 ret_pfmemalloc = true; 611 obj = kmalloc_node_track_caller(obj_size, flags, node); 612 613 out: 614 if (pfmemalloc) 615 *pfmemalloc = ret_pfmemalloc; 616 617 return obj; 618 } 619 620 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 621 * 'private' fields and also do memory statistics to find all the 622 * [BEEP] leaks. 623 * 624 */ 625 626 /** 627 * __alloc_skb - allocate a network buffer 628 * @size: size to allocate 629 * @gfp_mask: allocation mask 630 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 631 * instead of head cache and allocate a cloned (child) skb. 632 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 633 * allocations in case the data is required for writeback 634 * @node: numa node to allocate memory on 635 * 636 * Allocate a new &sk_buff. The returned buffer has no headroom and a 637 * tail room of at least size bytes. The object has a reference count 638 * of one. The return is the buffer. On a failure the return is %NULL. 639 * 640 * Buffers may only be allocated from interrupts using a @gfp_mask of 641 * %GFP_ATOMIC. 642 */ 643 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 644 int flags, int node) 645 { 646 struct kmem_cache *cache; 647 struct sk_buff *skb; 648 bool pfmemalloc; 649 u8 *data; 650 651 cache = (flags & SKB_ALLOC_FCLONE) 652 ? skbuff_fclone_cache : skbuff_cache; 653 654 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 655 gfp_mask |= __GFP_MEMALLOC; 656 657 /* Get the HEAD */ 658 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 659 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 660 skb = napi_skb_cache_get(); 661 else 662 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 663 if (unlikely(!skb)) 664 return NULL; 665 prefetchw(skb); 666 667 /* We do our best to align skb_shared_info on a separate cache 668 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 669 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 670 * Both skb->head and skb_shared_info are cache line aligned. 671 */ 672 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 673 if (unlikely(!data)) 674 goto nodata; 675 /* kmalloc_size_roundup() might give us more room than requested. 676 * Put skb_shared_info exactly at the end of allocated zone, 677 * to allow max possible filling before reallocation. 678 */ 679 prefetchw(data + SKB_WITH_OVERHEAD(size)); 680 681 /* 682 * Only clear those fields we need to clear, not those that we will 683 * actually initialise below. Hence, don't put any more fields after 684 * the tail pointer in struct sk_buff! 685 */ 686 memset(skb, 0, offsetof(struct sk_buff, tail)); 687 __build_skb_around(skb, data, size); 688 skb->pfmemalloc = pfmemalloc; 689 690 if (flags & SKB_ALLOC_FCLONE) { 691 struct sk_buff_fclones *fclones; 692 693 fclones = container_of(skb, struct sk_buff_fclones, skb1); 694 695 skb->fclone = SKB_FCLONE_ORIG; 696 refcount_set(&fclones->fclone_ref, 1); 697 } 698 699 return skb; 700 701 nodata: 702 kmem_cache_free(cache, skb); 703 return NULL; 704 } 705 EXPORT_SYMBOL(__alloc_skb); 706 707 /** 708 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 709 * @dev: network device to receive on 710 * @len: length to allocate 711 * @gfp_mask: get_free_pages mask, passed to alloc_skb 712 * 713 * Allocate a new &sk_buff and assign it a usage count of one. The 714 * buffer has NET_SKB_PAD headroom built in. Users should allocate 715 * the headroom they think they need without accounting for the 716 * built in space. The built in space is used for optimisations. 717 * 718 * %NULL is returned if there is no free memory. 719 */ 720 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 721 gfp_t gfp_mask) 722 { 723 struct page_frag_cache *nc; 724 struct sk_buff *skb; 725 bool pfmemalloc; 726 void *data; 727 728 len += NET_SKB_PAD; 729 730 /* If requested length is either too small or too big, 731 * we use kmalloc() for skb->head allocation. 732 */ 733 if (len <= SKB_WITH_OVERHEAD(1024) || 734 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 735 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 736 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 737 if (!skb) 738 goto skb_fail; 739 goto skb_success; 740 } 741 742 len = SKB_HEAD_ALIGN(len); 743 744 if (sk_memalloc_socks()) 745 gfp_mask |= __GFP_MEMALLOC; 746 747 if (in_hardirq() || irqs_disabled()) { 748 nc = this_cpu_ptr(&netdev_alloc_cache); 749 data = page_frag_alloc(nc, len, gfp_mask); 750 pfmemalloc = nc->pfmemalloc; 751 } else { 752 local_bh_disable(); 753 nc = this_cpu_ptr(&napi_alloc_cache.page); 754 data = page_frag_alloc(nc, len, gfp_mask); 755 pfmemalloc = nc->pfmemalloc; 756 local_bh_enable(); 757 } 758 759 if (unlikely(!data)) 760 return NULL; 761 762 skb = __build_skb(data, len); 763 if (unlikely(!skb)) { 764 skb_free_frag(data); 765 return NULL; 766 } 767 768 if (pfmemalloc) 769 skb->pfmemalloc = 1; 770 skb->head_frag = 1; 771 772 skb_success: 773 skb_reserve(skb, NET_SKB_PAD); 774 skb->dev = dev; 775 776 skb_fail: 777 return skb; 778 } 779 EXPORT_SYMBOL(__netdev_alloc_skb); 780 781 /** 782 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 783 * @napi: napi instance this buffer was allocated for 784 * @len: length to allocate 785 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 786 * 787 * Allocate a new sk_buff for use in NAPI receive. This buffer will 788 * attempt to allocate the head from a special reserved region used 789 * only for NAPI Rx allocation. By doing this we can save several 790 * CPU cycles by avoiding having to disable and re-enable IRQs. 791 * 792 * %NULL is returned if there is no free memory. 793 */ 794 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 795 gfp_t gfp_mask) 796 { 797 struct napi_alloc_cache *nc; 798 struct sk_buff *skb; 799 bool pfmemalloc; 800 void *data; 801 802 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 803 len += NET_SKB_PAD + NET_IP_ALIGN; 804 805 /* If requested length is either too small or too big, 806 * we use kmalloc() for skb->head allocation. 807 * When the small frag allocator is available, prefer it over kmalloc 808 * for small fragments 809 */ 810 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || 811 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 812 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 813 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 814 NUMA_NO_NODE); 815 if (!skb) 816 goto skb_fail; 817 goto skb_success; 818 } 819 820 nc = this_cpu_ptr(&napi_alloc_cache); 821 822 if (sk_memalloc_socks()) 823 gfp_mask |= __GFP_MEMALLOC; 824 825 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { 826 /* we are artificially inflating the allocation size, but 827 * that is not as bad as it may look like, as: 828 * - 'len' less than GRO_MAX_HEAD makes little sense 829 * - On most systems, larger 'len' values lead to fragment 830 * size above 512 bytes 831 * - kmalloc would use the kmalloc-1k slab for such values 832 * - Builds with smaller GRO_MAX_HEAD will very likely do 833 * little networking, as that implies no WiFi and no 834 * tunnels support, and 32 bits arches. 835 */ 836 len = SZ_1K; 837 838 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); 839 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); 840 } else { 841 len = SKB_HEAD_ALIGN(len); 842 843 data = page_frag_alloc(&nc->page, len, gfp_mask); 844 pfmemalloc = nc->page.pfmemalloc; 845 } 846 847 if (unlikely(!data)) 848 return NULL; 849 850 skb = __napi_build_skb(data, len); 851 if (unlikely(!skb)) { 852 skb_free_frag(data); 853 return NULL; 854 } 855 856 if (pfmemalloc) 857 skb->pfmemalloc = 1; 858 skb->head_frag = 1; 859 860 skb_success: 861 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 862 skb->dev = napi->dev; 863 864 skb_fail: 865 return skb; 866 } 867 EXPORT_SYMBOL(__napi_alloc_skb); 868 869 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, 870 int off, int size, unsigned int truesize) 871 { 872 DEBUG_NET_WARN_ON_ONCE(size > truesize); 873 874 skb_fill_netmem_desc(skb, i, netmem, off, size); 875 skb->len += size; 876 skb->data_len += size; 877 skb->truesize += truesize; 878 } 879 EXPORT_SYMBOL(skb_add_rx_frag_netmem); 880 881 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 882 unsigned int truesize) 883 { 884 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 885 886 DEBUG_NET_WARN_ON_ONCE(size > truesize); 887 888 skb_frag_size_add(frag, size); 889 skb->len += size; 890 skb->data_len += size; 891 skb->truesize += truesize; 892 } 893 EXPORT_SYMBOL(skb_coalesce_rx_frag); 894 895 static void skb_drop_list(struct sk_buff **listp) 896 { 897 kfree_skb_list(*listp); 898 *listp = NULL; 899 } 900 901 static inline void skb_drop_fraglist(struct sk_buff *skb) 902 { 903 skb_drop_list(&skb_shinfo(skb)->frag_list); 904 } 905 906 static void skb_clone_fraglist(struct sk_buff *skb) 907 { 908 struct sk_buff *list; 909 910 skb_walk_frags(skb, list) 911 skb_get(list); 912 } 913 914 static bool is_pp_page(struct page *page) 915 { 916 return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; 917 } 918 919 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 920 unsigned int headroom) 921 { 922 #if IS_ENABLED(CONFIG_PAGE_POOL) 923 u32 size, truesize, len, max_head_size, off; 924 struct sk_buff *skb = *pskb, *nskb; 925 int err, i, head_off; 926 void *data; 927 928 /* XDP does not support fraglist so we need to linearize 929 * the skb. 930 */ 931 if (skb_has_frag_list(skb)) 932 return -EOPNOTSUPP; 933 934 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); 935 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) 936 return -ENOMEM; 937 938 size = min_t(u32, skb->len, max_head_size); 939 truesize = SKB_HEAD_ALIGN(size) + headroom; 940 data = page_pool_dev_alloc_va(pool, &truesize); 941 if (!data) 942 return -ENOMEM; 943 944 nskb = napi_build_skb(data, truesize); 945 if (!nskb) { 946 page_pool_free_va(pool, data, true); 947 return -ENOMEM; 948 } 949 950 skb_reserve(nskb, headroom); 951 skb_copy_header(nskb, skb); 952 skb_mark_for_recycle(nskb); 953 954 err = skb_copy_bits(skb, 0, nskb->data, size); 955 if (err) { 956 consume_skb(nskb); 957 return err; 958 } 959 skb_put(nskb, size); 960 961 head_off = skb_headroom(nskb) - skb_headroom(skb); 962 skb_headers_offset_update(nskb, head_off); 963 964 off = size; 965 len = skb->len - off; 966 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { 967 struct page *page; 968 u32 page_off; 969 970 size = min_t(u32, len, PAGE_SIZE); 971 truesize = size; 972 973 page = page_pool_dev_alloc(pool, &page_off, &truesize); 974 if (!page) { 975 consume_skb(nskb); 976 return -ENOMEM; 977 } 978 979 skb_add_rx_frag(nskb, i, page, page_off, size, truesize); 980 err = skb_copy_bits(skb, off, page_address(page) + page_off, 981 size); 982 if (err) { 983 consume_skb(nskb); 984 return err; 985 } 986 987 len -= size; 988 off += size; 989 } 990 991 consume_skb(skb); 992 *pskb = nskb; 993 994 return 0; 995 #else 996 return -EOPNOTSUPP; 997 #endif 998 } 999 EXPORT_SYMBOL(skb_pp_cow_data); 1000 1001 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, 1002 struct bpf_prog *prog) 1003 { 1004 if (!prog->aux->xdp_has_frags) 1005 return -EINVAL; 1006 1007 return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); 1008 } 1009 EXPORT_SYMBOL(skb_cow_data_for_xdp); 1010 1011 #if IS_ENABLED(CONFIG_PAGE_POOL) 1012 bool napi_pp_put_page(struct page *page, bool napi_safe) 1013 { 1014 bool allow_direct = false; 1015 struct page_pool *pp; 1016 1017 page = compound_head(page); 1018 1019 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 1020 * in order to preserve any existing bits, such as bit 0 for the 1021 * head page of compound page and bit 1 for pfmemalloc page, so 1022 * mask those bits for freeing side when doing below checking, 1023 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 1024 * to avoid recycling the pfmemalloc page. 1025 */ 1026 if (unlikely(!is_pp_page(page))) 1027 return false; 1028 1029 pp = page->pp; 1030 1031 /* Allow direct recycle if we have reasons to believe that we are 1032 * in the same context as the consumer would run, so there's 1033 * no possible race. 1034 * __page_pool_put_page() makes sure we're not in hardirq context 1035 * and interrupts are enabled prior to accessing the cache. 1036 */ 1037 if (napi_safe || in_softirq()) { 1038 const struct napi_struct *napi = READ_ONCE(pp->p.napi); 1039 unsigned int cpuid = smp_processor_id(); 1040 1041 allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid; 1042 allow_direct |= READ_ONCE(pp->cpuid) == cpuid; 1043 } 1044 1045 /* Driver set this to memory recycling info. Reset it on recycle. 1046 * This will *not* work for NIC using a split-page memory model. 1047 * The page will be returned to the pool here regardless of the 1048 * 'flipped' fragment being in use or not. 1049 */ 1050 page_pool_put_full_page(pp, page, allow_direct); 1051 1052 return true; 1053 } 1054 EXPORT_SYMBOL(napi_pp_put_page); 1055 #endif 1056 1057 static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) 1058 { 1059 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 1060 return false; 1061 return napi_pp_put_page(virt_to_page(data), napi_safe); 1062 } 1063 1064 /** 1065 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb 1066 * @skb: page pool aware skb 1067 * 1068 * Increase the fragment reference count (pp_ref_count) of a skb. This is 1069 * intended to gain fragment references only for page pool aware skbs, 1070 * i.e. when skb->pp_recycle is true, and not for fragments in a 1071 * non-pp-recycling skb. It has a fallback to increase references on normal 1072 * pages, as page pool aware skbs may also have normal page fragments. 1073 */ 1074 static int skb_pp_frag_ref(struct sk_buff *skb) 1075 { 1076 struct skb_shared_info *shinfo; 1077 struct page *head_page; 1078 int i; 1079 1080 if (!skb->pp_recycle) 1081 return -EINVAL; 1082 1083 shinfo = skb_shinfo(skb); 1084 1085 for (i = 0; i < shinfo->nr_frags; i++) { 1086 head_page = compound_head(skb_frag_page(&shinfo->frags[i])); 1087 if (likely(is_pp_page(head_page))) 1088 page_pool_ref_page(head_page); 1089 else 1090 page_ref_inc(head_page); 1091 } 1092 return 0; 1093 } 1094 1095 static void skb_kfree_head(void *head, unsigned int end_offset) 1096 { 1097 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1098 kmem_cache_free(skb_small_head_cache, head); 1099 else 1100 kfree(head); 1101 } 1102 1103 static void skb_free_head(struct sk_buff *skb, bool napi_safe) 1104 { 1105 unsigned char *head = skb->head; 1106 1107 if (skb->head_frag) { 1108 if (skb_pp_recycle(skb, head, napi_safe)) 1109 return; 1110 skb_free_frag(head); 1111 } else { 1112 skb_kfree_head(head, skb_end_offset(skb)); 1113 } 1114 } 1115 1116 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, 1117 bool napi_safe) 1118 { 1119 struct skb_shared_info *shinfo = skb_shinfo(skb); 1120 int i; 1121 1122 if (skb->cloned && 1123 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 1124 &shinfo->dataref)) 1125 goto exit; 1126 1127 if (skb_zcopy(skb)) { 1128 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1129 1130 skb_zcopy_clear(skb, true); 1131 if (skip_unref) 1132 goto free_head; 1133 } 1134 1135 for (i = 0; i < shinfo->nr_frags; i++) 1136 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe); 1137 1138 free_head: 1139 if (shinfo->frag_list) 1140 kfree_skb_list_reason(shinfo->frag_list, reason); 1141 1142 skb_free_head(skb, napi_safe); 1143 exit: 1144 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1145 * bit is only set on the head though, so in order to avoid races 1146 * while trying to recycle fragments on __skb_frag_unref() we need 1147 * to make one SKB responsible for triggering the recycle path. 1148 * So disable the recycling bit if an SKB is cloned and we have 1149 * additional references to the fragmented part of the SKB. 1150 * Eventually the last SKB will have the recycling bit set and it's 1151 * dataref set to 0, which will trigger the recycling 1152 */ 1153 skb->pp_recycle = 0; 1154 } 1155 1156 /* 1157 * Free an skbuff by memory without cleaning the state. 1158 */ 1159 static void kfree_skbmem(struct sk_buff *skb) 1160 { 1161 struct sk_buff_fclones *fclones; 1162 1163 switch (skb->fclone) { 1164 case SKB_FCLONE_UNAVAILABLE: 1165 kmem_cache_free(skbuff_cache, skb); 1166 return; 1167 1168 case SKB_FCLONE_ORIG: 1169 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1170 1171 /* We usually free the clone (TX completion) before original skb 1172 * This test would have no chance to be true for the clone, 1173 * while here, branch prediction will be good. 1174 */ 1175 if (refcount_read(&fclones->fclone_ref) == 1) 1176 goto fastpath; 1177 break; 1178 1179 default: /* SKB_FCLONE_CLONE */ 1180 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1181 break; 1182 } 1183 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1184 return; 1185 fastpath: 1186 kmem_cache_free(skbuff_fclone_cache, fclones); 1187 } 1188 1189 void skb_release_head_state(struct sk_buff *skb) 1190 { 1191 skb_dst_drop(skb); 1192 if (skb->destructor) { 1193 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1194 skb->destructor(skb); 1195 } 1196 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1197 nf_conntrack_put(skb_nfct(skb)); 1198 #endif 1199 skb_ext_put(skb); 1200 } 1201 1202 /* Free everything but the sk_buff shell. */ 1203 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason, 1204 bool napi_safe) 1205 { 1206 skb_release_head_state(skb); 1207 if (likely(skb->head)) 1208 skb_release_data(skb, reason, napi_safe); 1209 } 1210 1211 /** 1212 * __kfree_skb - private function 1213 * @skb: buffer 1214 * 1215 * Free an sk_buff. Release anything attached to the buffer. 1216 * Clean the state. This is an internal helper function. Users should 1217 * always call kfree_skb 1218 */ 1219 1220 void __kfree_skb(struct sk_buff *skb) 1221 { 1222 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false); 1223 kfree_skbmem(skb); 1224 } 1225 EXPORT_SYMBOL(__kfree_skb); 1226 1227 static __always_inline 1228 bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1229 { 1230 if (unlikely(!skb_unref(skb))) 1231 return false; 1232 1233 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1234 u32_get_bits(reason, 1235 SKB_DROP_REASON_SUBSYS_MASK) >= 1236 SKB_DROP_REASON_SUBSYS_NUM); 1237 1238 if (reason == SKB_CONSUMED) 1239 trace_consume_skb(skb, __builtin_return_address(0)); 1240 else 1241 trace_kfree_skb(skb, __builtin_return_address(0), reason); 1242 return true; 1243 } 1244 1245 /** 1246 * kfree_skb_reason - free an sk_buff with special reason 1247 * @skb: buffer to free 1248 * @reason: reason why this skb is dropped 1249 * 1250 * Drop a reference to the buffer and free it if the usage count has 1251 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' 1252 * tracepoint. 1253 */ 1254 void __fix_address 1255 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1256 { 1257 if (__kfree_skb_reason(skb, reason)) 1258 __kfree_skb(skb); 1259 } 1260 EXPORT_SYMBOL(kfree_skb_reason); 1261 1262 #define KFREE_SKB_BULK_SIZE 16 1263 1264 struct skb_free_array { 1265 unsigned int skb_count; 1266 void *skb_array[KFREE_SKB_BULK_SIZE]; 1267 }; 1268 1269 static void kfree_skb_add_bulk(struct sk_buff *skb, 1270 struct skb_free_array *sa, 1271 enum skb_drop_reason reason) 1272 { 1273 /* if SKB is a clone, don't handle this case */ 1274 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1275 __kfree_skb(skb); 1276 return; 1277 } 1278 1279 skb_release_all(skb, reason, false); 1280 sa->skb_array[sa->skb_count++] = skb; 1281 1282 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1283 kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE, 1284 sa->skb_array); 1285 sa->skb_count = 0; 1286 } 1287 } 1288 1289 void __fix_address 1290 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1291 { 1292 struct skb_free_array sa; 1293 1294 sa.skb_count = 0; 1295 1296 while (segs) { 1297 struct sk_buff *next = segs->next; 1298 1299 if (__kfree_skb_reason(segs, reason)) { 1300 skb_poison_list(segs); 1301 kfree_skb_add_bulk(segs, &sa, reason); 1302 } 1303 1304 segs = next; 1305 } 1306 1307 if (sa.skb_count) 1308 kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array); 1309 } 1310 EXPORT_SYMBOL(kfree_skb_list_reason); 1311 1312 /* Dump skb information and contents. 1313 * 1314 * Must only be called from net_ratelimit()-ed paths. 1315 * 1316 * Dumps whole packets if full_pkt, only headers otherwise. 1317 */ 1318 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1319 { 1320 struct skb_shared_info *sh = skb_shinfo(skb); 1321 struct net_device *dev = skb->dev; 1322 struct sock *sk = skb->sk; 1323 struct sk_buff *list_skb; 1324 bool has_mac, has_trans; 1325 int headroom, tailroom; 1326 int i, len, seg_len; 1327 1328 if (full_pkt) 1329 len = skb->len; 1330 else 1331 len = min_t(int, skb->len, MAX_HEADER + 128); 1332 1333 headroom = skb_headroom(skb); 1334 tailroom = skb_tailroom(skb); 1335 1336 has_mac = skb_mac_header_was_set(skb); 1337 has_trans = skb_transport_header_was_set(skb); 1338 1339 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1340 "mac=(%d,%d) net=(%d,%d) trans=%d\n" 1341 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1342 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1343 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 1344 level, skb->len, headroom, skb_headlen(skb), tailroom, 1345 has_mac ? skb->mac_header : -1, 1346 has_mac ? skb_mac_header_len(skb) : -1, 1347 skb->network_header, 1348 has_trans ? skb_network_header_len(skb) : -1, 1349 has_trans ? skb->transport_header : -1, 1350 sh->tx_flags, sh->nr_frags, 1351 sh->gso_size, sh->gso_type, sh->gso_segs, 1352 skb->csum, skb->ip_summed, skb->csum_complete_sw, 1353 skb->csum_valid, skb->csum_level, 1354 skb->hash, skb->sw_hash, skb->l4_hash, 1355 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 1356 1357 if (dev) 1358 printk("%sdev name=%s feat=%pNF\n", 1359 level, dev->name, &dev->features); 1360 if (sk) 1361 printk("%ssk family=%hu type=%u proto=%u\n", 1362 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1363 1364 if (full_pkt && headroom) 1365 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1366 16, 1, skb->head, headroom, false); 1367 1368 seg_len = min_t(int, skb_headlen(skb), len); 1369 if (seg_len) 1370 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1371 16, 1, skb->data, seg_len, false); 1372 len -= seg_len; 1373 1374 if (full_pkt && tailroom) 1375 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1376 16, 1, skb_tail_pointer(skb), tailroom, false); 1377 1378 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1379 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1380 u32 p_off, p_len, copied; 1381 struct page *p; 1382 u8 *vaddr; 1383 1384 skb_frag_foreach_page(frag, skb_frag_off(frag), 1385 skb_frag_size(frag), p, p_off, p_len, 1386 copied) { 1387 seg_len = min_t(int, p_len, len); 1388 vaddr = kmap_atomic(p); 1389 print_hex_dump(level, "skb frag: ", 1390 DUMP_PREFIX_OFFSET, 1391 16, 1, vaddr + p_off, seg_len, false); 1392 kunmap_atomic(vaddr); 1393 len -= seg_len; 1394 if (!len) 1395 break; 1396 } 1397 } 1398 1399 if (full_pkt && skb_has_frag_list(skb)) { 1400 printk("skb fraglist:\n"); 1401 skb_walk_frags(skb, list_skb) 1402 skb_dump(level, list_skb, true); 1403 } 1404 } 1405 EXPORT_SYMBOL(skb_dump); 1406 1407 /** 1408 * skb_tx_error - report an sk_buff xmit error 1409 * @skb: buffer that triggered an error 1410 * 1411 * Report xmit error if a device callback is tracking this skb. 1412 * skb must be freed afterwards. 1413 */ 1414 void skb_tx_error(struct sk_buff *skb) 1415 { 1416 if (skb) { 1417 skb_zcopy_downgrade_managed(skb); 1418 skb_zcopy_clear(skb, true); 1419 } 1420 } 1421 EXPORT_SYMBOL(skb_tx_error); 1422 1423 #ifdef CONFIG_TRACEPOINTS 1424 /** 1425 * consume_skb - free an skbuff 1426 * @skb: buffer to free 1427 * 1428 * Drop a ref to the buffer and free it if the usage count has hit zero 1429 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1430 * is being dropped after a failure and notes that 1431 */ 1432 void consume_skb(struct sk_buff *skb) 1433 { 1434 if (!skb_unref(skb)) 1435 return; 1436 1437 trace_consume_skb(skb, __builtin_return_address(0)); 1438 __kfree_skb(skb); 1439 } 1440 EXPORT_SYMBOL(consume_skb); 1441 #endif 1442 1443 /** 1444 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1445 * @skb: buffer to free 1446 * 1447 * Alike consume_skb(), but this variant assumes that this is the last 1448 * skb reference and all the head states have been already dropped 1449 */ 1450 void __consume_stateless_skb(struct sk_buff *skb) 1451 { 1452 trace_consume_skb(skb, __builtin_return_address(0)); 1453 skb_release_data(skb, SKB_CONSUMED, false); 1454 kfree_skbmem(skb); 1455 } 1456 1457 static void napi_skb_cache_put(struct sk_buff *skb) 1458 { 1459 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1460 u32 i; 1461 1462 if (!kasan_mempool_poison_object(skb)) 1463 return; 1464 1465 nc->skb_cache[nc->skb_count++] = skb; 1466 1467 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1468 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1469 kasan_mempool_unpoison_object(nc->skb_cache[i], 1470 kmem_cache_size(skbuff_cache)); 1471 1472 kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF, 1473 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1474 nc->skb_count = NAPI_SKB_CACHE_HALF; 1475 } 1476 } 1477 1478 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1479 { 1480 skb_release_all(skb, reason, true); 1481 napi_skb_cache_put(skb); 1482 } 1483 1484 void napi_skb_free_stolen_head(struct sk_buff *skb) 1485 { 1486 if (unlikely(skb->slow_gro)) { 1487 nf_reset_ct(skb); 1488 skb_dst_drop(skb); 1489 skb_ext_put(skb); 1490 skb_orphan(skb); 1491 skb->slow_gro = 0; 1492 } 1493 napi_skb_cache_put(skb); 1494 } 1495 1496 void napi_consume_skb(struct sk_buff *skb, int budget) 1497 { 1498 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1499 if (unlikely(!budget)) { 1500 dev_consume_skb_any(skb); 1501 return; 1502 } 1503 1504 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1505 1506 if (!skb_unref(skb)) 1507 return; 1508 1509 /* if reaching here SKB is ready to free */ 1510 trace_consume_skb(skb, __builtin_return_address(0)); 1511 1512 /* if SKB is a clone, don't handle this case */ 1513 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1514 __kfree_skb(skb); 1515 return; 1516 } 1517 1518 skb_release_all(skb, SKB_CONSUMED, !!budget); 1519 napi_skb_cache_put(skb); 1520 } 1521 EXPORT_SYMBOL(napi_consume_skb); 1522 1523 /* Make sure a field is contained by headers group */ 1524 #define CHECK_SKB_FIELD(field) \ 1525 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1526 offsetof(struct sk_buff, headers.field)); \ 1527 1528 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1529 { 1530 new->tstamp = old->tstamp; 1531 /* We do not copy old->sk */ 1532 new->dev = old->dev; 1533 memcpy(new->cb, old->cb, sizeof(old->cb)); 1534 skb_dst_copy(new, old); 1535 __skb_ext_copy(new, old); 1536 __nf_copy(new, old, false); 1537 1538 /* Note : this field could be in the headers group. 1539 * It is not yet because we do not want to have a 16 bit hole 1540 */ 1541 new->queue_mapping = old->queue_mapping; 1542 1543 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1544 CHECK_SKB_FIELD(protocol); 1545 CHECK_SKB_FIELD(csum); 1546 CHECK_SKB_FIELD(hash); 1547 CHECK_SKB_FIELD(priority); 1548 CHECK_SKB_FIELD(skb_iif); 1549 CHECK_SKB_FIELD(vlan_proto); 1550 CHECK_SKB_FIELD(vlan_tci); 1551 CHECK_SKB_FIELD(transport_header); 1552 CHECK_SKB_FIELD(network_header); 1553 CHECK_SKB_FIELD(mac_header); 1554 CHECK_SKB_FIELD(inner_protocol); 1555 CHECK_SKB_FIELD(inner_transport_header); 1556 CHECK_SKB_FIELD(inner_network_header); 1557 CHECK_SKB_FIELD(inner_mac_header); 1558 CHECK_SKB_FIELD(mark); 1559 #ifdef CONFIG_NETWORK_SECMARK 1560 CHECK_SKB_FIELD(secmark); 1561 #endif 1562 #ifdef CONFIG_NET_RX_BUSY_POLL 1563 CHECK_SKB_FIELD(napi_id); 1564 #endif 1565 CHECK_SKB_FIELD(alloc_cpu); 1566 #ifdef CONFIG_XPS 1567 CHECK_SKB_FIELD(sender_cpu); 1568 #endif 1569 #ifdef CONFIG_NET_SCHED 1570 CHECK_SKB_FIELD(tc_index); 1571 #endif 1572 1573 } 1574 1575 /* 1576 * You should not add any new code to this function. Add it to 1577 * __copy_skb_header above instead. 1578 */ 1579 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1580 { 1581 #define C(x) n->x = skb->x 1582 1583 n->next = n->prev = NULL; 1584 n->sk = NULL; 1585 __copy_skb_header(n, skb); 1586 1587 C(len); 1588 C(data_len); 1589 C(mac_len); 1590 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1591 n->cloned = 1; 1592 n->nohdr = 0; 1593 n->peeked = 0; 1594 C(pfmemalloc); 1595 C(pp_recycle); 1596 n->destructor = NULL; 1597 C(tail); 1598 C(end); 1599 C(head); 1600 C(head_frag); 1601 C(data); 1602 C(truesize); 1603 refcount_set(&n->users, 1); 1604 1605 atomic_inc(&(skb_shinfo(skb)->dataref)); 1606 skb->cloned = 1; 1607 1608 return n; 1609 #undef C 1610 } 1611 1612 /** 1613 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1614 * @first: first sk_buff of the msg 1615 */ 1616 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1617 { 1618 struct sk_buff *n; 1619 1620 n = alloc_skb(0, GFP_ATOMIC); 1621 if (!n) 1622 return NULL; 1623 1624 n->len = first->len; 1625 n->data_len = first->len; 1626 n->truesize = first->truesize; 1627 1628 skb_shinfo(n)->frag_list = first; 1629 1630 __copy_skb_header(n, first); 1631 n->destructor = NULL; 1632 1633 return n; 1634 } 1635 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1636 1637 /** 1638 * skb_morph - morph one skb into another 1639 * @dst: the skb to receive the contents 1640 * @src: the skb to supply the contents 1641 * 1642 * This is identical to skb_clone except that the target skb is 1643 * supplied by the user. 1644 * 1645 * The target skb is returned upon exit. 1646 */ 1647 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1648 { 1649 skb_release_all(dst, SKB_CONSUMED, false); 1650 return __skb_clone(dst, src); 1651 } 1652 EXPORT_SYMBOL_GPL(skb_morph); 1653 1654 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1655 { 1656 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1657 struct user_struct *user; 1658 1659 if (capable(CAP_IPC_LOCK) || !size) 1660 return 0; 1661 1662 rlim = rlimit(RLIMIT_MEMLOCK); 1663 if (rlim == RLIM_INFINITY) 1664 return 0; 1665 1666 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1667 max_pg = rlim >> PAGE_SHIFT; 1668 user = mmp->user ? : current_user(); 1669 1670 old_pg = atomic_long_read(&user->locked_vm); 1671 do { 1672 new_pg = old_pg + num_pg; 1673 if (new_pg > max_pg) 1674 return -ENOBUFS; 1675 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1676 1677 if (!mmp->user) { 1678 mmp->user = get_uid(user); 1679 mmp->num_pg = num_pg; 1680 } else { 1681 mmp->num_pg += num_pg; 1682 } 1683 1684 return 0; 1685 } 1686 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1687 1688 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1689 { 1690 if (mmp->user) { 1691 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1692 free_uid(mmp->user); 1693 } 1694 } 1695 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1696 1697 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1698 { 1699 struct ubuf_info_msgzc *uarg; 1700 struct sk_buff *skb; 1701 1702 WARN_ON_ONCE(!in_task()); 1703 1704 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1705 if (!skb) 1706 return NULL; 1707 1708 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1709 uarg = (void *)skb->cb; 1710 uarg->mmp.user = NULL; 1711 1712 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1713 kfree_skb(skb); 1714 return NULL; 1715 } 1716 1717 uarg->ubuf.callback = msg_zerocopy_callback; 1718 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1719 uarg->len = 1; 1720 uarg->bytelen = size; 1721 uarg->zerocopy = 1; 1722 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1723 refcount_set(&uarg->ubuf.refcnt, 1); 1724 sock_hold(sk); 1725 1726 return &uarg->ubuf; 1727 } 1728 1729 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1730 { 1731 return container_of((void *)uarg, struct sk_buff, cb); 1732 } 1733 1734 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1735 struct ubuf_info *uarg) 1736 { 1737 if (uarg) { 1738 struct ubuf_info_msgzc *uarg_zc; 1739 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1740 u32 bytelen, next; 1741 1742 /* there might be non MSG_ZEROCOPY users */ 1743 if (uarg->callback != msg_zerocopy_callback) 1744 return NULL; 1745 1746 /* realloc only when socket is locked (TCP, UDP cork), 1747 * so uarg->len and sk_zckey access is serialized 1748 */ 1749 if (!sock_owned_by_user(sk)) { 1750 WARN_ON_ONCE(1); 1751 return NULL; 1752 } 1753 1754 uarg_zc = uarg_to_msgzc(uarg); 1755 bytelen = uarg_zc->bytelen + size; 1756 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1757 /* TCP can create new skb to attach new uarg */ 1758 if (sk->sk_type == SOCK_STREAM) 1759 goto new_alloc; 1760 return NULL; 1761 } 1762 1763 next = (u32)atomic_read(&sk->sk_zckey); 1764 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1765 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1766 return NULL; 1767 uarg_zc->len++; 1768 uarg_zc->bytelen = bytelen; 1769 atomic_set(&sk->sk_zckey, ++next); 1770 1771 /* no extra ref when appending to datagram (MSG_MORE) */ 1772 if (sk->sk_type == SOCK_STREAM) 1773 net_zcopy_get(uarg); 1774 1775 return uarg; 1776 } 1777 } 1778 1779 new_alloc: 1780 return msg_zerocopy_alloc(sk, size); 1781 } 1782 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1783 1784 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1785 { 1786 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1787 u32 old_lo, old_hi; 1788 u64 sum_len; 1789 1790 old_lo = serr->ee.ee_info; 1791 old_hi = serr->ee.ee_data; 1792 sum_len = old_hi - old_lo + 1ULL + len; 1793 1794 if (sum_len >= (1ULL << 32)) 1795 return false; 1796 1797 if (lo != old_hi + 1) 1798 return false; 1799 1800 serr->ee.ee_data += len; 1801 return true; 1802 } 1803 1804 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1805 { 1806 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1807 struct sock_exterr_skb *serr; 1808 struct sock *sk = skb->sk; 1809 struct sk_buff_head *q; 1810 unsigned long flags; 1811 bool is_zerocopy; 1812 u32 lo, hi; 1813 u16 len; 1814 1815 mm_unaccount_pinned_pages(&uarg->mmp); 1816 1817 /* if !len, there was only 1 call, and it was aborted 1818 * so do not queue a completion notification 1819 */ 1820 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1821 goto release; 1822 1823 len = uarg->len; 1824 lo = uarg->id; 1825 hi = uarg->id + len - 1; 1826 is_zerocopy = uarg->zerocopy; 1827 1828 serr = SKB_EXT_ERR(skb); 1829 memset(serr, 0, sizeof(*serr)); 1830 serr->ee.ee_errno = 0; 1831 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1832 serr->ee.ee_data = hi; 1833 serr->ee.ee_info = lo; 1834 if (!is_zerocopy) 1835 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1836 1837 q = &sk->sk_error_queue; 1838 spin_lock_irqsave(&q->lock, flags); 1839 tail = skb_peek_tail(q); 1840 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1841 !skb_zerocopy_notify_extend(tail, lo, len)) { 1842 __skb_queue_tail(q, skb); 1843 skb = NULL; 1844 } 1845 spin_unlock_irqrestore(&q->lock, flags); 1846 1847 sk_error_report(sk); 1848 1849 release: 1850 consume_skb(skb); 1851 sock_put(sk); 1852 } 1853 1854 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 1855 bool success) 1856 { 1857 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1858 1859 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1860 1861 if (refcount_dec_and_test(&uarg->refcnt)) 1862 __msg_zerocopy_callback(uarg_zc); 1863 } 1864 EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 1865 1866 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1867 { 1868 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1869 1870 atomic_dec(&sk->sk_zckey); 1871 uarg_to_msgzc(uarg)->len--; 1872 1873 if (have_uref) 1874 msg_zerocopy_callback(NULL, uarg, true); 1875 } 1876 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1877 1878 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1879 struct msghdr *msg, int len, 1880 struct ubuf_info *uarg) 1881 { 1882 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1883 int err, orig_len = skb->len; 1884 1885 /* An skb can only point to one uarg. This edge case happens when 1886 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 1887 */ 1888 if (orig_uarg && uarg != orig_uarg) 1889 return -EEXIST; 1890 1891 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1892 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1893 struct sock *save_sk = skb->sk; 1894 1895 /* Streams do not free skb on error. Reset to prev state. */ 1896 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1897 skb->sk = sk; 1898 ___pskb_trim(skb, orig_len); 1899 skb->sk = save_sk; 1900 return err; 1901 } 1902 1903 skb_zcopy_set(skb, uarg, NULL); 1904 return skb->len - orig_len; 1905 } 1906 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1907 1908 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1909 { 1910 int i; 1911 1912 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1914 skb_frag_ref(skb, i); 1915 } 1916 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1917 1918 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1919 gfp_t gfp_mask) 1920 { 1921 if (skb_zcopy(orig)) { 1922 if (skb_zcopy(nskb)) { 1923 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1924 if (!gfp_mask) { 1925 WARN_ON_ONCE(1); 1926 return -ENOMEM; 1927 } 1928 if (skb_uarg(nskb) == skb_uarg(orig)) 1929 return 0; 1930 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1931 return -EIO; 1932 } 1933 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1934 } 1935 return 0; 1936 } 1937 1938 /** 1939 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1940 * @skb: the skb to modify 1941 * @gfp_mask: allocation priority 1942 * 1943 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1944 * It will copy all frags into kernel and drop the reference 1945 * to userspace pages. 1946 * 1947 * If this function is called from an interrupt gfp_mask() must be 1948 * %GFP_ATOMIC. 1949 * 1950 * Returns 0 on success or a negative error code on failure 1951 * to allocate kernel memory to copy to. 1952 */ 1953 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1954 { 1955 int num_frags = skb_shinfo(skb)->nr_frags; 1956 struct page *page, *head = NULL; 1957 int i, order, psize, new_frags; 1958 u32 d_off; 1959 1960 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1961 return -EINVAL; 1962 1963 if (!num_frags) 1964 goto release; 1965 1966 /* We might have to allocate high order pages, so compute what minimum 1967 * page order is needed. 1968 */ 1969 order = 0; 1970 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1971 order++; 1972 psize = (PAGE_SIZE << order); 1973 1974 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1975 for (i = 0; i < new_frags; i++) { 1976 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1977 if (!page) { 1978 while (head) { 1979 struct page *next = (struct page *)page_private(head); 1980 put_page(head); 1981 head = next; 1982 } 1983 return -ENOMEM; 1984 } 1985 set_page_private(page, (unsigned long)head); 1986 head = page; 1987 } 1988 1989 page = head; 1990 d_off = 0; 1991 for (i = 0; i < num_frags; i++) { 1992 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1993 u32 p_off, p_len, copied; 1994 struct page *p; 1995 u8 *vaddr; 1996 1997 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1998 p, p_off, p_len, copied) { 1999 u32 copy, done = 0; 2000 vaddr = kmap_atomic(p); 2001 2002 while (done < p_len) { 2003 if (d_off == psize) { 2004 d_off = 0; 2005 page = (struct page *)page_private(page); 2006 } 2007 copy = min_t(u32, psize - d_off, p_len - done); 2008 memcpy(page_address(page) + d_off, 2009 vaddr + p_off + done, copy); 2010 done += copy; 2011 d_off += copy; 2012 } 2013 kunmap_atomic(vaddr); 2014 } 2015 } 2016 2017 /* skb frags release userspace buffers */ 2018 for (i = 0; i < num_frags; i++) 2019 skb_frag_unref(skb, i); 2020 2021 /* skb frags point to kernel buffers */ 2022 for (i = 0; i < new_frags - 1; i++) { 2023 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); 2024 head = (struct page *)page_private(head); 2025 } 2026 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, 2027 d_off); 2028 skb_shinfo(skb)->nr_frags = new_frags; 2029 2030 release: 2031 skb_zcopy_clear(skb, false); 2032 return 0; 2033 } 2034 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 2035 2036 /** 2037 * skb_clone - duplicate an sk_buff 2038 * @skb: buffer to clone 2039 * @gfp_mask: allocation priority 2040 * 2041 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 2042 * copies share the same packet data but not structure. The new 2043 * buffer has a reference count of 1. If the allocation fails the 2044 * function returns %NULL otherwise the new buffer is returned. 2045 * 2046 * If this function is called from an interrupt gfp_mask() must be 2047 * %GFP_ATOMIC. 2048 */ 2049 2050 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 2051 { 2052 struct sk_buff_fclones *fclones = container_of(skb, 2053 struct sk_buff_fclones, 2054 skb1); 2055 struct sk_buff *n; 2056 2057 if (skb_orphan_frags(skb, gfp_mask)) 2058 return NULL; 2059 2060 if (skb->fclone == SKB_FCLONE_ORIG && 2061 refcount_read(&fclones->fclone_ref) == 1) { 2062 n = &fclones->skb2; 2063 refcount_set(&fclones->fclone_ref, 2); 2064 n->fclone = SKB_FCLONE_CLONE; 2065 } else { 2066 if (skb_pfmemalloc(skb)) 2067 gfp_mask |= __GFP_MEMALLOC; 2068 2069 n = kmem_cache_alloc(skbuff_cache, gfp_mask); 2070 if (!n) 2071 return NULL; 2072 2073 n->fclone = SKB_FCLONE_UNAVAILABLE; 2074 } 2075 2076 return __skb_clone(n, skb); 2077 } 2078 EXPORT_SYMBOL(skb_clone); 2079 2080 void skb_headers_offset_update(struct sk_buff *skb, int off) 2081 { 2082 /* Only adjust this if it actually is csum_start rather than csum */ 2083 if (skb->ip_summed == CHECKSUM_PARTIAL) 2084 skb->csum_start += off; 2085 /* {transport,network,mac}_header and tail are relative to skb->head */ 2086 skb->transport_header += off; 2087 skb->network_header += off; 2088 if (skb_mac_header_was_set(skb)) 2089 skb->mac_header += off; 2090 skb->inner_transport_header += off; 2091 skb->inner_network_header += off; 2092 skb->inner_mac_header += off; 2093 } 2094 EXPORT_SYMBOL(skb_headers_offset_update); 2095 2096 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 2097 { 2098 __copy_skb_header(new, old); 2099 2100 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 2101 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 2102 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 2103 } 2104 EXPORT_SYMBOL(skb_copy_header); 2105 2106 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 2107 { 2108 if (skb_pfmemalloc(skb)) 2109 return SKB_ALLOC_RX; 2110 return 0; 2111 } 2112 2113 /** 2114 * skb_copy - create private copy of an sk_buff 2115 * @skb: buffer to copy 2116 * @gfp_mask: allocation priority 2117 * 2118 * Make a copy of both an &sk_buff and its data. This is used when the 2119 * caller wishes to modify the data and needs a private copy of the 2120 * data to alter. Returns %NULL on failure or the pointer to the buffer 2121 * on success. The returned buffer has a reference count of 1. 2122 * 2123 * As by-product this function converts non-linear &sk_buff to linear 2124 * one, so that &sk_buff becomes completely private and caller is allowed 2125 * to modify all the data of returned buffer. This means that this 2126 * function is not recommended for use in circumstances when only 2127 * header is going to be modified. Use pskb_copy() instead. 2128 */ 2129 2130 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2131 { 2132 int headerlen = skb_headroom(skb); 2133 unsigned int size = skb_end_offset(skb) + skb->data_len; 2134 struct sk_buff *n = __alloc_skb(size, gfp_mask, 2135 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2136 2137 if (!n) 2138 return NULL; 2139 2140 /* Set the data pointer */ 2141 skb_reserve(n, headerlen); 2142 /* Set the tail pointer and length */ 2143 skb_put(n, skb->len); 2144 2145 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2146 2147 skb_copy_header(n, skb); 2148 return n; 2149 } 2150 EXPORT_SYMBOL(skb_copy); 2151 2152 /** 2153 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2154 * @skb: buffer to copy 2155 * @headroom: headroom of new skb 2156 * @gfp_mask: allocation priority 2157 * @fclone: if true allocate the copy of the skb from the fclone 2158 * cache instead of the head cache; it is recommended to set this 2159 * to true for the cases where the copy will likely be cloned 2160 * 2161 * Make a copy of both an &sk_buff and part of its data, located 2162 * in header. Fragmented data remain shared. This is used when 2163 * the caller wishes to modify only header of &sk_buff and needs 2164 * private copy of the header to alter. Returns %NULL on failure 2165 * or the pointer to the buffer on success. 2166 * The returned buffer has a reference count of 1. 2167 */ 2168 2169 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2170 gfp_t gfp_mask, bool fclone) 2171 { 2172 unsigned int size = skb_headlen(skb) + headroom; 2173 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2174 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2175 2176 if (!n) 2177 goto out; 2178 2179 /* Set the data pointer */ 2180 skb_reserve(n, headroom); 2181 /* Set the tail pointer and length */ 2182 skb_put(n, skb_headlen(skb)); 2183 /* Copy the bytes */ 2184 skb_copy_from_linear_data(skb, n->data, n->len); 2185 2186 n->truesize += skb->data_len; 2187 n->data_len = skb->data_len; 2188 n->len = skb->len; 2189 2190 if (skb_shinfo(skb)->nr_frags) { 2191 int i; 2192 2193 if (skb_orphan_frags(skb, gfp_mask) || 2194 skb_zerocopy_clone(n, skb, gfp_mask)) { 2195 kfree_skb(n); 2196 n = NULL; 2197 goto out; 2198 } 2199 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2200 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2201 skb_frag_ref(skb, i); 2202 } 2203 skb_shinfo(n)->nr_frags = i; 2204 } 2205 2206 if (skb_has_frag_list(skb)) { 2207 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2208 skb_clone_fraglist(n); 2209 } 2210 2211 skb_copy_header(n, skb); 2212 out: 2213 return n; 2214 } 2215 EXPORT_SYMBOL(__pskb_copy_fclone); 2216 2217 /** 2218 * pskb_expand_head - reallocate header of &sk_buff 2219 * @skb: buffer to reallocate 2220 * @nhead: room to add at head 2221 * @ntail: room to add at tail 2222 * @gfp_mask: allocation priority 2223 * 2224 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2225 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2226 * reference count of 1. Returns zero in the case of success or error, 2227 * if expansion failed. In the last case, &sk_buff is not changed. 2228 * 2229 * All the pointers pointing into skb header may change and must be 2230 * reloaded after call to this function. 2231 */ 2232 2233 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2234 gfp_t gfp_mask) 2235 { 2236 unsigned int osize = skb_end_offset(skb); 2237 unsigned int size = osize + nhead + ntail; 2238 long off; 2239 u8 *data; 2240 int i; 2241 2242 BUG_ON(nhead < 0); 2243 2244 BUG_ON(skb_shared(skb)); 2245 2246 skb_zcopy_downgrade_managed(skb); 2247 2248 if (skb_pfmemalloc(skb)) 2249 gfp_mask |= __GFP_MEMALLOC; 2250 2251 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2252 if (!data) 2253 goto nodata; 2254 size = SKB_WITH_OVERHEAD(size); 2255 2256 /* Copy only real data... and, alas, header. This should be 2257 * optimized for the cases when header is void. 2258 */ 2259 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2260 2261 memcpy((struct skb_shared_info *)(data + size), 2262 skb_shinfo(skb), 2263 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2264 2265 /* 2266 * if shinfo is shared we must drop the old head gracefully, but if it 2267 * is not we can just drop the old head and let the existing refcount 2268 * be since all we did is relocate the values 2269 */ 2270 if (skb_cloned(skb)) { 2271 if (skb_orphan_frags(skb, gfp_mask)) 2272 goto nofrags; 2273 if (skb_zcopy(skb)) 2274 refcount_inc(&skb_uarg(skb)->refcnt); 2275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2276 skb_frag_ref(skb, i); 2277 2278 if (skb_has_frag_list(skb)) 2279 skb_clone_fraglist(skb); 2280 2281 skb_release_data(skb, SKB_CONSUMED, false); 2282 } else { 2283 skb_free_head(skb, false); 2284 } 2285 off = (data + nhead) - skb->head; 2286 2287 skb->head = data; 2288 skb->head_frag = 0; 2289 skb->data += off; 2290 2291 skb_set_end_offset(skb, size); 2292 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2293 off = nhead; 2294 #endif 2295 skb->tail += off; 2296 skb_headers_offset_update(skb, nhead); 2297 skb->cloned = 0; 2298 skb->hdr_len = 0; 2299 skb->nohdr = 0; 2300 atomic_set(&skb_shinfo(skb)->dataref, 1); 2301 2302 skb_metadata_clear(skb); 2303 2304 /* It is not generally safe to change skb->truesize. 2305 * For the moment, we really care of rx path, or 2306 * when skb is orphaned (not attached to a socket). 2307 */ 2308 if (!skb->sk || skb->destructor == sock_edemux) 2309 skb->truesize += size - osize; 2310 2311 return 0; 2312 2313 nofrags: 2314 skb_kfree_head(data, size); 2315 nodata: 2316 return -ENOMEM; 2317 } 2318 EXPORT_SYMBOL(pskb_expand_head); 2319 2320 /* Make private copy of skb with writable head and some headroom */ 2321 2322 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2323 { 2324 struct sk_buff *skb2; 2325 int delta = headroom - skb_headroom(skb); 2326 2327 if (delta <= 0) 2328 skb2 = pskb_copy(skb, GFP_ATOMIC); 2329 else { 2330 skb2 = skb_clone(skb, GFP_ATOMIC); 2331 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2332 GFP_ATOMIC)) { 2333 kfree_skb(skb2); 2334 skb2 = NULL; 2335 } 2336 } 2337 return skb2; 2338 } 2339 EXPORT_SYMBOL(skb_realloc_headroom); 2340 2341 /* Note: We plan to rework this in linux-6.4 */ 2342 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2343 { 2344 unsigned int saved_end_offset, saved_truesize; 2345 struct skb_shared_info *shinfo; 2346 int res; 2347 2348 saved_end_offset = skb_end_offset(skb); 2349 saved_truesize = skb->truesize; 2350 2351 res = pskb_expand_head(skb, 0, 0, pri); 2352 if (res) 2353 return res; 2354 2355 skb->truesize = saved_truesize; 2356 2357 if (likely(skb_end_offset(skb) == saved_end_offset)) 2358 return 0; 2359 2360 /* We can not change skb->end if the original or new value 2361 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2362 */ 2363 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2364 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2365 /* We think this path should not be taken. 2366 * Add a temporary trace to warn us just in case. 2367 */ 2368 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2369 saved_end_offset, skb_end_offset(skb)); 2370 WARN_ON_ONCE(1); 2371 return 0; 2372 } 2373 2374 shinfo = skb_shinfo(skb); 2375 2376 /* We are about to change back skb->end, 2377 * we need to move skb_shinfo() to its new location. 2378 */ 2379 memmove(skb->head + saved_end_offset, 2380 shinfo, 2381 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2382 2383 skb_set_end_offset(skb, saved_end_offset); 2384 2385 return 0; 2386 } 2387 2388 /** 2389 * skb_expand_head - reallocate header of &sk_buff 2390 * @skb: buffer to reallocate 2391 * @headroom: needed headroom 2392 * 2393 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2394 * if possible; copies skb->sk to new skb as needed 2395 * and frees original skb in case of failures. 2396 * 2397 * It expect increased headroom and generates warning otherwise. 2398 */ 2399 2400 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2401 { 2402 int delta = headroom - skb_headroom(skb); 2403 int osize = skb_end_offset(skb); 2404 struct sock *sk = skb->sk; 2405 2406 if (WARN_ONCE(delta <= 0, 2407 "%s is expecting an increase in the headroom", __func__)) 2408 return skb; 2409 2410 delta = SKB_DATA_ALIGN(delta); 2411 /* pskb_expand_head() might crash, if skb is shared. */ 2412 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2413 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2414 2415 if (unlikely(!nskb)) 2416 goto fail; 2417 2418 if (sk) 2419 skb_set_owner_w(nskb, sk); 2420 consume_skb(skb); 2421 skb = nskb; 2422 } 2423 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2424 goto fail; 2425 2426 if (sk && is_skb_wmem(skb)) { 2427 delta = skb_end_offset(skb) - osize; 2428 refcount_add(delta, &sk->sk_wmem_alloc); 2429 skb->truesize += delta; 2430 } 2431 return skb; 2432 2433 fail: 2434 kfree_skb(skb); 2435 return NULL; 2436 } 2437 EXPORT_SYMBOL(skb_expand_head); 2438 2439 /** 2440 * skb_copy_expand - copy and expand sk_buff 2441 * @skb: buffer to copy 2442 * @newheadroom: new free bytes at head 2443 * @newtailroom: new free bytes at tail 2444 * @gfp_mask: allocation priority 2445 * 2446 * Make a copy of both an &sk_buff and its data and while doing so 2447 * allocate additional space. 2448 * 2449 * This is used when the caller wishes to modify the data and needs a 2450 * private copy of the data to alter as well as more space for new fields. 2451 * Returns %NULL on failure or the pointer to the buffer 2452 * on success. The returned buffer has a reference count of 1. 2453 * 2454 * You must pass %GFP_ATOMIC as the allocation priority if this function 2455 * is called from an interrupt. 2456 */ 2457 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2458 int newheadroom, int newtailroom, 2459 gfp_t gfp_mask) 2460 { 2461 /* 2462 * Allocate the copy buffer 2463 */ 2464 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 2465 gfp_mask, skb_alloc_rx_flag(skb), 2466 NUMA_NO_NODE); 2467 int oldheadroom = skb_headroom(skb); 2468 int head_copy_len, head_copy_off; 2469 2470 if (!n) 2471 return NULL; 2472 2473 skb_reserve(n, newheadroom); 2474 2475 /* Set the tail pointer and length */ 2476 skb_put(n, skb->len); 2477 2478 head_copy_len = oldheadroom; 2479 head_copy_off = 0; 2480 if (newheadroom <= head_copy_len) 2481 head_copy_len = newheadroom; 2482 else 2483 head_copy_off = newheadroom - head_copy_len; 2484 2485 /* Copy the linear header and data. */ 2486 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2487 skb->len + head_copy_len)); 2488 2489 skb_copy_header(n, skb); 2490 2491 skb_headers_offset_update(n, newheadroom - oldheadroom); 2492 2493 return n; 2494 } 2495 EXPORT_SYMBOL(skb_copy_expand); 2496 2497 /** 2498 * __skb_pad - zero pad the tail of an skb 2499 * @skb: buffer to pad 2500 * @pad: space to pad 2501 * @free_on_error: free buffer on error 2502 * 2503 * Ensure that a buffer is followed by a padding area that is zero 2504 * filled. Used by network drivers which may DMA or transfer data 2505 * beyond the buffer end onto the wire. 2506 * 2507 * May return error in out of memory cases. The skb is freed on error 2508 * if @free_on_error is true. 2509 */ 2510 2511 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2512 { 2513 int err; 2514 int ntail; 2515 2516 /* If the skbuff is non linear tailroom is always zero.. */ 2517 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2518 memset(skb->data+skb->len, 0, pad); 2519 return 0; 2520 } 2521 2522 ntail = skb->data_len + pad - (skb->end - skb->tail); 2523 if (likely(skb_cloned(skb) || ntail > 0)) { 2524 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2525 if (unlikely(err)) 2526 goto free_skb; 2527 } 2528 2529 /* FIXME: The use of this function with non-linear skb's really needs 2530 * to be audited. 2531 */ 2532 err = skb_linearize(skb); 2533 if (unlikely(err)) 2534 goto free_skb; 2535 2536 memset(skb->data + skb->len, 0, pad); 2537 return 0; 2538 2539 free_skb: 2540 if (free_on_error) 2541 kfree_skb(skb); 2542 return err; 2543 } 2544 EXPORT_SYMBOL(__skb_pad); 2545 2546 /** 2547 * pskb_put - add data to the tail of a potentially fragmented buffer 2548 * @skb: start of the buffer to use 2549 * @tail: tail fragment of the buffer to use 2550 * @len: amount of data to add 2551 * 2552 * This function extends the used data area of the potentially 2553 * fragmented buffer. @tail must be the last fragment of @skb -- or 2554 * @skb itself. If this would exceed the total buffer size the kernel 2555 * will panic. A pointer to the first byte of the extra data is 2556 * returned. 2557 */ 2558 2559 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2560 { 2561 if (tail != skb) { 2562 skb->data_len += len; 2563 skb->len += len; 2564 } 2565 return skb_put(tail, len); 2566 } 2567 EXPORT_SYMBOL_GPL(pskb_put); 2568 2569 /** 2570 * skb_put - add data to a buffer 2571 * @skb: buffer to use 2572 * @len: amount of data to add 2573 * 2574 * This function extends the used data area of the buffer. If this would 2575 * exceed the total buffer size the kernel will panic. A pointer to the 2576 * first byte of the extra data is returned. 2577 */ 2578 void *skb_put(struct sk_buff *skb, unsigned int len) 2579 { 2580 void *tmp = skb_tail_pointer(skb); 2581 SKB_LINEAR_ASSERT(skb); 2582 skb->tail += len; 2583 skb->len += len; 2584 if (unlikely(skb->tail > skb->end)) 2585 skb_over_panic(skb, len, __builtin_return_address(0)); 2586 return tmp; 2587 } 2588 EXPORT_SYMBOL(skb_put); 2589 2590 /** 2591 * skb_push - add data to the start of a buffer 2592 * @skb: buffer to use 2593 * @len: amount of data to add 2594 * 2595 * This function extends the used data area of the buffer at the buffer 2596 * start. If this would exceed the total buffer headroom the kernel will 2597 * panic. A pointer to the first byte of the extra data is returned. 2598 */ 2599 void *skb_push(struct sk_buff *skb, unsigned int len) 2600 { 2601 skb->data -= len; 2602 skb->len += len; 2603 if (unlikely(skb->data < skb->head)) 2604 skb_under_panic(skb, len, __builtin_return_address(0)); 2605 return skb->data; 2606 } 2607 EXPORT_SYMBOL(skb_push); 2608 2609 /** 2610 * skb_pull - remove data from the start of a buffer 2611 * @skb: buffer to use 2612 * @len: amount of data to remove 2613 * 2614 * This function removes data from the start of a buffer, returning 2615 * the memory to the headroom. A pointer to the next data in the buffer 2616 * is returned. Once the data has been pulled future pushes will overwrite 2617 * the old data. 2618 */ 2619 void *skb_pull(struct sk_buff *skb, unsigned int len) 2620 { 2621 return skb_pull_inline(skb, len); 2622 } 2623 EXPORT_SYMBOL(skb_pull); 2624 2625 /** 2626 * skb_pull_data - remove data from the start of a buffer returning its 2627 * original position. 2628 * @skb: buffer to use 2629 * @len: amount of data to remove 2630 * 2631 * This function removes data from the start of a buffer, returning 2632 * the memory to the headroom. A pointer to the original data in the buffer 2633 * is returned after checking if there is enough data to pull. Once the 2634 * data has been pulled future pushes will overwrite the old data. 2635 */ 2636 void *skb_pull_data(struct sk_buff *skb, size_t len) 2637 { 2638 void *data = skb->data; 2639 2640 if (skb->len < len) 2641 return NULL; 2642 2643 skb_pull(skb, len); 2644 2645 return data; 2646 } 2647 EXPORT_SYMBOL(skb_pull_data); 2648 2649 /** 2650 * skb_trim - remove end from a buffer 2651 * @skb: buffer to alter 2652 * @len: new length 2653 * 2654 * Cut the length of a buffer down by removing data from the tail. If 2655 * the buffer is already under the length specified it is not modified. 2656 * The skb must be linear. 2657 */ 2658 void skb_trim(struct sk_buff *skb, unsigned int len) 2659 { 2660 if (skb->len > len) 2661 __skb_trim(skb, len); 2662 } 2663 EXPORT_SYMBOL(skb_trim); 2664 2665 /* Trims skb to length len. It can change skb pointers. 2666 */ 2667 2668 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2669 { 2670 struct sk_buff **fragp; 2671 struct sk_buff *frag; 2672 int offset = skb_headlen(skb); 2673 int nfrags = skb_shinfo(skb)->nr_frags; 2674 int i; 2675 int err; 2676 2677 if (skb_cloned(skb) && 2678 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2679 return err; 2680 2681 i = 0; 2682 if (offset >= len) 2683 goto drop_pages; 2684 2685 for (; i < nfrags; i++) { 2686 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2687 2688 if (end < len) { 2689 offset = end; 2690 continue; 2691 } 2692 2693 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2694 2695 drop_pages: 2696 skb_shinfo(skb)->nr_frags = i; 2697 2698 for (; i < nfrags; i++) 2699 skb_frag_unref(skb, i); 2700 2701 if (skb_has_frag_list(skb)) 2702 skb_drop_fraglist(skb); 2703 goto done; 2704 } 2705 2706 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2707 fragp = &frag->next) { 2708 int end = offset + frag->len; 2709 2710 if (skb_shared(frag)) { 2711 struct sk_buff *nfrag; 2712 2713 nfrag = skb_clone(frag, GFP_ATOMIC); 2714 if (unlikely(!nfrag)) 2715 return -ENOMEM; 2716 2717 nfrag->next = frag->next; 2718 consume_skb(frag); 2719 frag = nfrag; 2720 *fragp = frag; 2721 } 2722 2723 if (end < len) { 2724 offset = end; 2725 continue; 2726 } 2727 2728 if (end > len && 2729 unlikely((err = pskb_trim(frag, len - offset)))) 2730 return err; 2731 2732 if (frag->next) 2733 skb_drop_list(&frag->next); 2734 break; 2735 } 2736 2737 done: 2738 if (len > skb_headlen(skb)) { 2739 skb->data_len -= skb->len - len; 2740 skb->len = len; 2741 } else { 2742 skb->len = len; 2743 skb->data_len = 0; 2744 skb_set_tail_pointer(skb, len); 2745 } 2746 2747 if (!skb->sk || skb->destructor == sock_edemux) 2748 skb_condense(skb); 2749 return 0; 2750 } 2751 EXPORT_SYMBOL(___pskb_trim); 2752 2753 /* Note : use pskb_trim_rcsum() instead of calling this directly 2754 */ 2755 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2756 { 2757 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2758 int delta = skb->len - len; 2759 2760 skb->csum = csum_block_sub(skb->csum, 2761 skb_checksum(skb, len, delta, 0), 2762 len); 2763 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2764 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2765 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2766 2767 if (offset + sizeof(__sum16) > hdlen) 2768 return -EINVAL; 2769 } 2770 return __pskb_trim(skb, len); 2771 } 2772 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2773 2774 /** 2775 * __pskb_pull_tail - advance tail of skb header 2776 * @skb: buffer to reallocate 2777 * @delta: number of bytes to advance tail 2778 * 2779 * The function makes a sense only on a fragmented &sk_buff, 2780 * it expands header moving its tail forward and copying necessary 2781 * data from fragmented part. 2782 * 2783 * &sk_buff MUST have reference count of 1. 2784 * 2785 * Returns %NULL (and &sk_buff does not change) if pull failed 2786 * or value of new tail of skb in the case of success. 2787 * 2788 * All the pointers pointing into skb header may change and must be 2789 * reloaded after call to this function. 2790 */ 2791 2792 /* Moves tail of skb head forward, copying data from fragmented part, 2793 * when it is necessary. 2794 * 1. It may fail due to malloc failure. 2795 * 2. It may change skb pointers. 2796 * 2797 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2798 */ 2799 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2800 { 2801 /* If skb has not enough free space at tail, get new one 2802 * plus 128 bytes for future expansions. If we have enough 2803 * room at tail, reallocate without expansion only if skb is cloned. 2804 */ 2805 int i, k, eat = (skb->tail + delta) - skb->end; 2806 2807 if (eat > 0 || skb_cloned(skb)) { 2808 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2809 GFP_ATOMIC)) 2810 return NULL; 2811 } 2812 2813 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2814 skb_tail_pointer(skb), delta)); 2815 2816 /* Optimization: no fragments, no reasons to preestimate 2817 * size of pulled pages. Superb. 2818 */ 2819 if (!skb_has_frag_list(skb)) 2820 goto pull_pages; 2821 2822 /* Estimate size of pulled pages. */ 2823 eat = delta; 2824 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2825 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2826 2827 if (size >= eat) 2828 goto pull_pages; 2829 eat -= size; 2830 } 2831 2832 /* If we need update frag list, we are in troubles. 2833 * Certainly, it is possible to add an offset to skb data, 2834 * but taking into account that pulling is expected to 2835 * be very rare operation, it is worth to fight against 2836 * further bloating skb head and crucify ourselves here instead. 2837 * Pure masohism, indeed. 8)8) 2838 */ 2839 if (eat) { 2840 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2841 struct sk_buff *clone = NULL; 2842 struct sk_buff *insp = NULL; 2843 2844 do { 2845 if (list->len <= eat) { 2846 /* Eaten as whole. */ 2847 eat -= list->len; 2848 list = list->next; 2849 insp = list; 2850 } else { 2851 /* Eaten partially. */ 2852 if (skb_is_gso(skb) && !list->head_frag && 2853 skb_headlen(list)) 2854 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2855 2856 if (skb_shared(list)) { 2857 /* Sucks! We need to fork list. :-( */ 2858 clone = skb_clone(list, GFP_ATOMIC); 2859 if (!clone) 2860 return NULL; 2861 insp = list->next; 2862 list = clone; 2863 } else { 2864 /* This may be pulled without 2865 * problems. */ 2866 insp = list; 2867 } 2868 if (!pskb_pull(list, eat)) { 2869 kfree_skb(clone); 2870 return NULL; 2871 } 2872 break; 2873 } 2874 } while (eat); 2875 2876 /* Free pulled out fragments. */ 2877 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2878 skb_shinfo(skb)->frag_list = list->next; 2879 consume_skb(list); 2880 } 2881 /* And insert new clone at head. */ 2882 if (clone) { 2883 clone->next = list; 2884 skb_shinfo(skb)->frag_list = clone; 2885 } 2886 } 2887 /* Success! Now we may commit changes to skb data. */ 2888 2889 pull_pages: 2890 eat = delta; 2891 k = 0; 2892 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2893 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2894 2895 if (size <= eat) { 2896 skb_frag_unref(skb, i); 2897 eat -= size; 2898 } else { 2899 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2900 2901 *frag = skb_shinfo(skb)->frags[i]; 2902 if (eat) { 2903 skb_frag_off_add(frag, eat); 2904 skb_frag_size_sub(frag, eat); 2905 if (!i) 2906 goto end; 2907 eat = 0; 2908 } 2909 k++; 2910 } 2911 } 2912 skb_shinfo(skb)->nr_frags = k; 2913 2914 end: 2915 skb->tail += delta; 2916 skb->data_len -= delta; 2917 2918 if (!skb->data_len) 2919 skb_zcopy_clear(skb, false); 2920 2921 return skb_tail_pointer(skb); 2922 } 2923 EXPORT_SYMBOL(__pskb_pull_tail); 2924 2925 /** 2926 * skb_copy_bits - copy bits from skb to kernel buffer 2927 * @skb: source skb 2928 * @offset: offset in source 2929 * @to: destination buffer 2930 * @len: number of bytes to copy 2931 * 2932 * Copy the specified number of bytes from the source skb to the 2933 * destination buffer. 2934 * 2935 * CAUTION ! : 2936 * If its prototype is ever changed, 2937 * check arch/{*}/net/{*}.S files, 2938 * since it is called from BPF assembly code. 2939 */ 2940 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2941 { 2942 int start = skb_headlen(skb); 2943 struct sk_buff *frag_iter; 2944 int i, copy; 2945 2946 if (offset > (int)skb->len - len) 2947 goto fault; 2948 2949 /* Copy header. */ 2950 if ((copy = start - offset) > 0) { 2951 if (copy > len) 2952 copy = len; 2953 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2954 if ((len -= copy) == 0) 2955 return 0; 2956 offset += copy; 2957 to += copy; 2958 } 2959 2960 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2961 int end; 2962 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2963 2964 WARN_ON(start > offset + len); 2965 2966 end = start + skb_frag_size(f); 2967 if ((copy = end - offset) > 0) { 2968 u32 p_off, p_len, copied; 2969 struct page *p; 2970 u8 *vaddr; 2971 2972 if (copy > len) 2973 copy = len; 2974 2975 skb_frag_foreach_page(f, 2976 skb_frag_off(f) + offset - start, 2977 copy, p, p_off, p_len, copied) { 2978 vaddr = kmap_atomic(p); 2979 memcpy(to + copied, vaddr + p_off, p_len); 2980 kunmap_atomic(vaddr); 2981 } 2982 2983 if ((len -= copy) == 0) 2984 return 0; 2985 offset += copy; 2986 to += copy; 2987 } 2988 start = end; 2989 } 2990 2991 skb_walk_frags(skb, frag_iter) { 2992 int end; 2993 2994 WARN_ON(start > offset + len); 2995 2996 end = start + frag_iter->len; 2997 if ((copy = end - offset) > 0) { 2998 if (copy > len) 2999 copy = len; 3000 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 3001 goto fault; 3002 if ((len -= copy) == 0) 3003 return 0; 3004 offset += copy; 3005 to += copy; 3006 } 3007 start = end; 3008 } 3009 3010 if (!len) 3011 return 0; 3012 3013 fault: 3014 return -EFAULT; 3015 } 3016 EXPORT_SYMBOL(skb_copy_bits); 3017 3018 /* 3019 * Callback from splice_to_pipe(), if we need to release some pages 3020 * at the end of the spd in case we error'ed out in filling the pipe. 3021 */ 3022 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 3023 { 3024 put_page(spd->pages[i]); 3025 } 3026 3027 static struct page *linear_to_page(struct page *page, unsigned int *len, 3028 unsigned int *offset, 3029 struct sock *sk) 3030 { 3031 struct page_frag *pfrag = sk_page_frag(sk); 3032 3033 if (!sk_page_frag_refill(sk, pfrag)) 3034 return NULL; 3035 3036 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 3037 3038 memcpy(page_address(pfrag->page) + pfrag->offset, 3039 page_address(page) + *offset, *len); 3040 *offset = pfrag->offset; 3041 pfrag->offset += *len; 3042 3043 return pfrag->page; 3044 } 3045 3046 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 3047 struct page *page, 3048 unsigned int offset) 3049 { 3050 return spd->nr_pages && 3051 spd->pages[spd->nr_pages - 1] == page && 3052 (spd->partial[spd->nr_pages - 1].offset + 3053 spd->partial[spd->nr_pages - 1].len == offset); 3054 } 3055 3056 /* 3057 * Fill page/offset/length into spd, if it can hold more pages. 3058 */ 3059 static bool spd_fill_page(struct splice_pipe_desc *spd, 3060 struct pipe_inode_info *pipe, struct page *page, 3061 unsigned int *len, unsigned int offset, 3062 bool linear, 3063 struct sock *sk) 3064 { 3065 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 3066 return true; 3067 3068 if (linear) { 3069 page = linear_to_page(page, len, &offset, sk); 3070 if (!page) 3071 return true; 3072 } 3073 if (spd_can_coalesce(spd, page, offset)) { 3074 spd->partial[spd->nr_pages - 1].len += *len; 3075 return false; 3076 } 3077 get_page(page); 3078 spd->pages[spd->nr_pages] = page; 3079 spd->partial[spd->nr_pages].len = *len; 3080 spd->partial[spd->nr_pages].offset = offset; 3081 spd->nr_pages++; 3082 3083 return false; 3084 } 3085 3086 static bool __splice_segment(struct page *page, unsigned int poff, 3087 unsigned int plen, unsigned int *off, 3088 unsigned int *len, 3089 struct splice_pipe_desc *spd, bool linear, 3090 struct sock *sk, 3091 struct pipe_inode_info *pipe) 3092 { 3093 if (!*len) 3094 return true; 3095 3096 /* skip this segment if already processed */ 3097 if (*off >= plen) { 3098 *off -= plen; 3099 return false; 3100 } 3101 3102 /* ignore any bits we already processed */ 3103 poff += *off; 3104 plen -= *off; 3105 *off = 0; 3106 3107 do { 3108 unsigned int flen = min(*len, plen); 3109 3110 if (spd_fill_page(spd, pipe, page, &flen, poff, 3111 linear, sk)) 3112 return true; 3113 poff += flen; 3114 plen -= flen; 3115 *len -= flen; 3116 } while (*len && plen); 3117 3118 return false; 3119 } 3120 3121 /* 3122 * Map linear and fragment data from the skb to spd. It reports true if the 3123 * pipe is full or if we already spliced the requested length. 3124 */ 3125 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3126 unsigned int *offset, unsigned int *len, 3127 struct splice_pipe_desc *spd, struct sock *sk) 3128 { 3129 int seg; 3130 struct sk_buff *iter; 3131 3132 /* map the linear part : 3133 * If skb->head_frag is set, this 'linear' part is backed by a 3134 * fragment, and if the head is not shared with any clones then 3135 * we can avoid a copy since we own the head portion of this page. 3136 */ 3137 if (__splice_segment(virt_to_page(skb->data), 3138 (unsigned long) skb->data & (PAGE_SIZE - 1), 3139 skb_headlen(skb), 3140 offset, len, spd, 3141 skb_head_is_locked(skb), 3142 sk, pipe)) 3143 return true; 3144 3145 /* 3146 * then map the fragments 3147 */ 3148 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3149 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3150 3151 if (__splice_segment(skb_frag_page(f), 3152 skb_frag_off(f), skb_frag_size(f), 3153 offset, len, spd, false, sk, pipe)) 3154 return true; 3155 } 3156 3157 skb_walk_frags(skb, iter) { 3158 if (*offset >= iter->len) { 3159 *offset -= iter->len; 3160 continue; 3161 } 3162 /* __skb_splice_bits() only fails if the output has no room 3163 * left, so no point in going over the frag_list for the error 3164 * case. 3165 */ 3166 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3167 return true; 3168 } 3169 3170 return false; 3171 } 3172 3173 /* 3174 * Map data from the skb to a pipe. Should handle both the linear part, 3175 * the fragments, and the frag list. 3176 */ 3177 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3178 struct pipe_inode_info *pipe, unsigned int tlen, 3179 unsigned int flags) 3180 { 3181 struct partial_page partial[MAX_SKB_FRAGS]; 3182 struct page *pages[MAX_SKB_FRAGS]; 3183 struct splice_pipe_desc spd = { 3184 .pages = pages, 3185 .partial = partial, 3186 .nr_pages_max = MAX_SKB_FRAGS, 3187 .ops = &nosteal_pipe_buf_ops, 3188 .spd_release = sock_spd_release, 3189 }; 3190 int ret = 0; 3191 3192 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3193 3194 if (spd.nr_pages) 3195 ret = splice_to_pipe(pipe, &spd); 3196 3197 return ret; 3198 } 3199 EXPORT_SYMBOL_GPL(skb_splice_bits); 3200 3201 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3202 { 3203 struct socket *sock = sk->sk_socket; 3204 size_t size = msg_data_left(msg); 3205 3206 if (!sock) 3207 return -EINVAL; 3208 3209 if (!sock->ops->sendmsg_locked) 3210 return sock_no_sendmsg_locked(sk, msg, size); 3211 3212 return sock->ops->sendmsg_locked(sk, msg, size); 3213 } 3214 3215 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3216 { 3217 struct socket *sock = sk->sk_socket; 3218 3219 if (!sock) 3220 return -EINVAL; 3221 return sock_sendmsg(sock, msg); 3222 } 3223 3224 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3225 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3226 int len, sendmsg_func sendmsg) 3227 { 3228 unsigned int orig_len = len; 3229 struct sk_buff *head = skb; 3230 unsigned short fragidx; 3231 int slen, ret; 3232 3233 do_frag_list: 3234 3235 /* Deal with head data */ 3236 while (offset < skb_headlen(skb) && len) { 3237 struct kvec kv; 3238 struct msghdr msg; 3239 3240 slen = min_t(int, len, skb_headlen(skb) - offset); 3241 kv.iov_base = skb->data + offset; 3242 kv.iov_len = slen; 3243 memset(&msg, 0, sizeof(msg)); 3244 msg.msg_flags = MSG_DONTWAIT; 3245 3246 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3247 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3248 sendmsg_unlocked, sk, &msg); 3249 if (ret <= 0) 3250 goto error; 3251 3252 offset += ret; 3253 len -= ret; 3254 } 3255 3256 /* All the data was skb head? */ 3257 if (!len) 3258 goto out; 3259 3260 /* Make offset relative to start of frags */ 3261 offset -= skb_headlen(skb); 3262 3263 /* Find where we are in frag list */ 3264 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3265 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3266 3267 if (offset < skb_frag_size(frag)) 3268 break; 3269 3270 offset -= skb_frag_size(frag); 3271 } 3272 3273 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3274 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3275 3276 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3277 3278 while (slen) { 3279 struct bio_vec bvec; 3280 struct msghdr msg = { 3281 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3282 }; 3283 3284 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3285 skb_frag_off(frag) + offset); 3286 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3287 slen); 3288 3289 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3290 sendmsg_unlocked, sk, &msg); 3291 if (ret <= 0) 3292 goto error; 3293 3294 len -= ret; 3295 offset += ret; 3296 slen -= ret; 3297 } 3298 3299 offset = 0; 3300 } 3301 3302 if (len) { 3303 /* Process any frag lists */ 3304 3305 if (skb == head) { 3306 if (skb_has_frag_list(skb)) { 3307 skb = skb_shinfo(skb)->frag_list; 3308 goto do_frag_list; 3309 } 3310 } else if (skb->next) { 3311 skb = skb->next; 3312 goto do_frag_list; 3313 } 3314 } 3315 3316 out: 3317 return orig_len - len; 3318 3319 error: 3320 return orig_len == len ? ret : orig_len - len; 3321 } 3322 3323 /* Send skb data on a socket. Socket must be locked. */ 3324 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3325 int len) 3326 { 3327 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3328 } 3329 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3330 3331 /* Send skb data on a socket. Socket must be unlocked. */ 3332 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3333 { 3334 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3335 } 3336 3337 /** 3338 * skb_store_bits - store bits from kernel buffer to skb 3339 * @skb: destination buffer 3340 * @offset: offset in destination 3341 * @from: source buffer 3342 * @len: number of bytes to copy 3343 * 3344 * Copy the specified number of bytes from the source buffer to the 3345 * destination skb. This function handles all the messy bits of 3346 * traversing fragment lists and such. 3347 */ 3348 3349 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3350 { 3351 int start = skb_headlen(skb); 3352 struct sk_buff *frag_iter; 3353 int i, copy; 3354 3355 if (offset > (int)skb->len - len) 3356 goto fault; 3357 3358 if ((copy = start - offset) > 0) { 3359 if (copy > len) 3360 copy = len; 3361 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3362 if ((len -= copy) == 0) 3363 return 0; 3364 offset += copy; 3365 from += copy; 3366 } 3367 3368 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3369 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3370 int end; 3371 3372 WARN_ON(start > offset + len); 3373 3374 end = start + skb_frag_size(frag); 3375 if ((copy = end - offset) > 0) { 3376 u32 p_off, p_len, copied; 3377 struct page *p; 3378 u8 *vaddr; 3379 3380 if (copy > len) 3381 copy = len; 3382 3383 skb_frag_foreach_page(frag, 3384 skb_frag_off(frag) + offset - start, 3385 copy, p, p_off, p_len, copied) { 3386 vaddr = kmap_atomic(p); 3387 memcpy(vaddr + p_off, from + copied, p_len); 3388 kunmap_atomic(vaddr); 3389 } 3390 3391 if ((len -= copy) == 0) 3392 return 0; 3393 offset += copy; 3394 from += copy; 3395 } 3396 start = end; 3397 } 3398 3399 skb_walk_frags(skb, frag_iter) { 3400 int end; 3401 3402 WARN_ON(start > offset + len); 3403 3404 end = start + frag_iter->len; 3405 if ((copy = end - offset) > 0) { 3406 if (copy > len) 3407 copy = len; 3408 if (skb_store_bits(frag_iter, offset - start, 3409 from, copy)) 3410 goto fault; 3411 if ((len -= copy) == 0) 3412 return 0; 3413 offset += copy; 3414 from += copy; 3415 } 3416 start = end; 3417 } 3418 if (!len) 3419 return 0; 3420 3421 fault: 3422 return -EFAULT; 3423 } 3424 EXPORT_SYMBOL(skb_store_bits); 3425 3426 /* Checksum skb data. */ 3427 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3428 __wsum csum, const struct skb_checksum_ops *ops) 3429 { 3430 int start = skb_headlen(skb); 3431 int i, copy = start - offset; 3432 struct sk_buff *frag_iter; 3433 int pos = 0; 3434 3435 /* Checksum header. */ 3436 if (copy > 0) { 3437 if (copy > len) 3438 copy = len; 3439 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3440 skb->data + offset, copy, csum); 3441 if ((len -= copy) == 0) 3442 return csum; 3443 offset += copy; 3444 pos = copy; 3445 } 3446 3447 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3448 int end; 3449 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3450 3451 WARN_ON(start > offset + len); 3452 3453 end = start + skb_frag_size(frag); 3454 if ((copy = end - offset) > 0) { 3455 u32 p_off, p_len, copied; 3456 struct page *p; 3457 __wsum csum2; 3458 u8 *vaddr; 3459 3460 if (copy > len) 3461 copy = len; 3462 3463 skb_frag_foreach_page(frag, 3464 skb_frag_off(frag) + offset - start, 3465 copy, p, p_off, p_len, copied) { 3466 vaddr = kmap_atomic(p); 3467 csum2 = INDIRECT_CALL_1(ops->update, 3468 csum_partial_ext, 3469 vaddr + p_off, p_len, 0); 3470 kunmap_atomic(vaddr); 3471 csum = INDIRECT_CALL_1(ops->combine, 3472 csum_block_add_ext, csum, 3473 csum2, pos, p_len); 3474 pos += p_len; 3475 } 3476 3477 if (!(len -= copy)) 3478 return csum; 3479 offset += copy; 3480 } 3481 start = end; 3482 } 3483 3484 skb_walk_frags(skb, frag_iter) { 3485 int end; 3486 3487 WARN_ON(start > offset + len); 3488 3489 end = start + frag_iter->len; 3490 if ((copy = end - offset) > 0) { 3491 __wsum csum2; 3492 if (copy > len) 3493 copy = len; 3494 csum2 = __skb_checksum(frag_iter, offset - start, 3495 copy, 0, ops); 3496 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3497 csum, csum2, pos, copy); 3498 if ((len -= copy) == 0) 3499 return csum; 3500 offset += copy; 3501 pos += copy; 3502 } 3503 start = end; 3504 } 3505 BUG_ON(len); 3506 3507 return csum; 3508 } 3509 EXPORT_SYMBOL(__skb_checksum); 3510 3511 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3512 int len, __wsum csum) 3513 { 3514 const struct skb_checksum_ops ops = { 3515 .update = csum_partial_ext, 3516 .combine = csum_block_add_ext, 3517 }; 3518 3519 return __skb_checksum(skb, offset, len, csum, &ops); 3520 } 3521 EXPORT_SYMBOL(skb_checksum); 3522 3523 /* Both of above in one bottle. */ 3524 3525 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3526 u8 *to, int len) 3527 { 3528 int start = skb_headlen(skb); 3529 int i, copy = start - offset; 3530 struct sk_buff *frag_iter; 3531 int pos = 0; 3532 __wsum csum = 0; 3533 3534 /* Copy header. */ 3535 if (copy > 0) { 3536 if (copy > len) 3537 copy = len; 3538 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3539 copy); 3540 if ((len -= copy) == 0) 3541 return csum; 3542 offset += copy; 3543 to += copy; 3544 pos = copy; 3545 } 3546 3547 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3548 int end; 3549 3550 WARN_ON(start > offset + len); 3551 3552 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3553 if ((copy = end - offset) > 0) { 3554 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3555 u32 p_off, p_len, copied; 3556 struct page *p; 3557 __wsum csum2; 3558 u8 *vaddr; 3559 3560 if (copy > len) 3561 copy = len; 3562 3563 skb_frag_foreach_page(frag, 3564 skb_frag_off(frag) + offset - start, 3565 copy, p, p_off, p_len, copied) { 3566 vaddr = kmap_atomic(p); 3567 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3568 to + copied, 3569 p_len); 3570 kunmap_atomic(vaddr); 3571 csum = csum_block_add(csum, csum2, pos); 3572 pos += p_len; 3573 } 3574 3575 if (!(len -= copy)) 3576 return csum; 3577 offset += copy; 3578 to += copy; 3579 } 3580 start = end; 3581 } 3582 3583 skb_walk_frags(skb, frag_iter) { 3584 __wsum csum2; 3585 int end; 3586 3587 WARN_ON(start > offset + len); 3588 3589 end = start + frag_iter->len; 3590 if ((copy = end - offset) > 0) { 3591 if (copy > len) 3592 copy = len; 3593 csum2 = skb_copy_and_csum_bits(frag_iter, 3594 offset - start, 3595 to, copy); 3596 csum = csum_block_add(csum, csum2, pos); 3597 if ((len -= copy) == 0) 3598 return csum; 3599 offset += copy; 3600 to += copy; 3601 pos += copy; 3602 } 3603 start = end; 3604 } 3605 BUG_ON(len); 3606 return csum; 3607 } 3608 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3609 3610 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3611 { 3612 __sum16 sum; 3613 3614 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3615 /* See comments in __skb_checksum_complete(). */ 3616 if (likely(!sum)) { 3617 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3618 !skb->csum_complete_sw) 3619 netdev_rx_csum_fault(skb->dev, skb); 3620 } 3621 if (!skb_shared(skb)) 3622 skb->csum_valid = !sum; 3623 return sum; 3624 } 3625 EXPORT_SYMBOL(__skb_checksum_complete_head); 3626 3627 /* This function assumes skb->csum already holds pseudo header's checksum, 3628 * which has been changed from the hardware checksum, for example, by 3629 * __skb_checksum_validate_complete(). And, the original skb->csum must 3630 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3631 * 3632 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3633 * zero. The new checksum is stored back into skb->csum unless the skb is 3634 * shared. 3635 */ 3636 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3637 { 3638 __wsum csum; 3639 __sum16 sum; 3640 3641 csum = skb_checksum(skb, 0, skb->len, 0); 3642 3643 sum = csum_fold(csum_add(skb->csum, csum)); 3644 /* This check is inverted, because we already knew the hardware 3645 * checksum is invalid before calling this function. So, if the 3646 * re-computed checksum is valid instead, then we have a mismatch 3647 * between the original skb->csum and skb_checksum(). This means either 3648 * the original hardware checksum is incorrect or we screw up skb->csum 3649 * when moving skb->data around. 3650 */ 3651 if (likely(!sum)) { 3652 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3653 !skb->csum_complete_sw) 3654 netdev_rx_csum_fault(skb->dev, skb); 3655 } 3656 3657 if (!skb_shared(skb)) { 3658 /* Save full packet checksum */ 3659 skb->csum = csum; 3660 skb->ip_summed = CHECKSUM_COMPLETE; 3661 skb->csum_complete_sw = 1; 3662 skb->csum_valid = !sum; 3663 } 3664 3665 return sum; 3666 } 3667 EXPORT_SYMBOL(__skb_checksum_complete); 3668 3669 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3670 { 3671 net_warn_ratelimited( 3672 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3673 __func__); 3674 return 0; 3675 } 3676 3677 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3678 int offset, int len) 3679 { 3680 net_warn_ratelimited( 3681 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3682 __func__); 3683 return 0; 3684 } 3685 3686 static const struct skb_checksum_ops default_crc32c_ops = { 3687 .update = warn_crc32c_csum_update, 3688 .combine = warn_crc32c_csum_combine, 3689 }; 3690 3691 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3692 &default_crc32c_ops; 3693 EXPORT_SYMBOL(crc32c_csum_stub); 3694 3695 /** 3696 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3697 * @from: source buffer 3698 * 3699 * Calculates the amount of linear headroom needed in the 'to' skb passed 3700 * into skb_zerocopy(). 3701 */ 3702 unsigned int 3703 skb_zerocopy_headlen(const struct sk_buff *from) 3704 { 3705 unsigned int hlen = 0; 3706 3707 if (!from->head_frag || 3708 skb_headlen(from) < L1_CACHE_BYTES || 3709 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3710 hlen = skb_headlen(from); 3711 if (!hlen) 3712 hlen = from->len; 3713 } 3714 3715 if (skb_has_frag_list(from)) 3716 hlen = from->len; 3717 3718 return hlen; 3719 } 3720 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3721 3722 /** 3723 * skb_zerocopy - Zero copy skb to skb 3724 * @to: destination buffer 3725 * @from: source buffer 3726 * @len: number of bytes to copy from source buffer 3727 * @hlen: size of linear headroom in destination buffer 3728 * 3729 * Copies up to `len` bytes from `from` to `to` by creating references 3730 * to the frags in the source buffer. 3731 * 3732 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3733 * headroom in the `to` buffer. 3734 * 3735 * Return value: 3736 * 0: everything is OK 3737 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3738 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3739 */ 3740 int 3741 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3742 { 3743 int i, j = 0; 3744 int plen = 0; /* length of skb->head fragment */ 3745 int ret; 3746 struct page *page; 3747 unsigned int offset; 3748 3749 BUG_ON(!from->head_frag && !hlen); 3750 3751 /* dont bother with small payloads */ 3752 if (len <= skb_tailroom(to)) 3753 return skb_copy_bits(from, 0, skb_put(to, len), len); 3754 3755 if (hlen) { 3756 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3757 if (unlikely(ret)) 3758 return ret; 3759 len -= hlen; 3760 } else { 3761 plen = min_t(int, skb_headlen(from), len); 3762 if (plen) { 3763 page = virt_to_head_page(from->head); 3764 offset = from->data - (unsigned char *)page_address(page); 3765 __skb_fill_netmem_desc(to, 0, page_to_netmem(page), 3766 offset, plen); 3767 get_page(page); 3768 j = 1; 3769 len -= plen; 3770 } 3771 } 3772 3773 skb_len_add(to, len + plen); 3774 3775 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3776 skb_tx_error(from); 3777 return -ENOMEM; 3778 } 3779 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3780 3781 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3782 int size; 3783 3784 if (!len) 3785 break; 3786 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3787 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3788 len); 3789 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3790 len -= size; 3791 skb_frag_ref(to, j); 3792 j++; 3793 } 3794 skb_shinfo(to)->nr_frags = j; 3795 3796 return 0; 3797 } 3798 EXPORT_SYMBOL_GPL(skb_zerocopy); 3799 3800 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3801 { 3802 __wsum csum; 3803 long csstart; 3804 3805 if (skb->ip_summed == CHECKSUM_PARTIAL) 3806 csstart = skb_checksum_start_offset(skb); 3807 else 3808 csstart = skb_headlen(skb); 3809 3810 BUG_ON(csstart > skb_headlen(skb)); 3811 3812 skb_copy_from_linear_data(skb, to, csstart); 3813 3814 csum = 0; 3815 if (csstart != skb->len) 3816 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3817 skb->len - csstart); 3818 3819 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3820 long csstuff = csstart + skb->csum_offset; 3821 3822 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3823 } 3824 } 3825 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3826 3827 /** 3828 * skb_dequeue - remove from the head of the queue 3829 * @list: list to dequeue from 3830 * 3831 * Remove the head of the list. The list lock is taken so the function 3832 * may be used safely with other locking list functions. The head item is 3833 * returned or %NULL if the list is empty. 3834 */ 3835 3836 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3837 { 3838 unsigned long flags; 3839 struct sk_buff *result; 3840 3841 spin_lock_irqsave(&list->lock, flags); 3842 result = __skb_dequeue(list); 3843 spin_unlock_irqrestore(&list->lock, flags); 3844 return result; 3845 } 3846 EXPORT_SYMBOL(skb_dequeue); 3847 3848 /** 3849 * skb_dequeue_tail - remove from the tail of the queue 3850 * @list: list to dequeue from 3851 * 3852 * Remove the tail of the list. The list lock is taken so the function 3853 * may be used safely with other locking list functions. The tail item is 3854 * returned or %NULL if the list is empty. 3855 */ 3856 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3857 { 3858 unsigned long flags; 3859 struct sk_buff *result; 3860 3861 spin_lock_irqsave(&list->lock, flags); 3862 result = __skb_dequeue_tail(list); 3863 spin_unlock_irqrestore(&list->lock, flags); 3864 return result; 3865 } 3866 EXPORT_SYMBOL(skb_dequeue_tail); 3867 3868 /** 3869 * skb_queue_purge_reason - empty a list 3870 * @list: list to empty 3871 * @reason: drop reason 3872 * 3873 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3874 * the list and one reference dropped. This function takes the list 3875 * lock and is atomic with respect to other list locking functions. 3876 */ 3877 void skb_queue_purge_reason(struct sk_buff_head *list, 3878 enum skb_drop_reason reason) 3879 { 3880 struct sk_buff_head tmp; 3881 unsigned long flags; 3882 3883 if (skb_queue_empty_lockless(list)) 3884 return; 3885 3886 __skb_queue_head_init(&tmp); 3887 3888 spin_lock_irqsave(&list->lock, flags); 3889 skb_queue_splice_init(list, &tmp); 3890 spin_unlock_irqrestore(&list->lock, flags); 3891 3892 __skb_queue_purge_reason(&tmp, reason); 3893 } 3894 EXPORT_SYMBOL(skb_queue_purge_reason); 3895 3896 /** 3897 * skb_rbtree_purge - empty a skb rbtree 3898 * @root: root of the rbtree to empty 3899 * Return value: the sum of truesizes of all purged skbs. 3900 * 3901 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3902 * the list and one reference dropped. This function does not take 3903 * any lock. Synchronization should be handled by the caller (e.g., TCP 3904 * out-of-order queue is protected by the socket lock). 3905 */ 3906 unsigned int skb_rbtree_purge(struct rb_root *root) 3907 { 3908 struct rb_node *p = rb_first(root); 3909 unsigned int sum = 0; 3910 3911 while (p) { 3912 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3913 3914 p = rb_next(p); 3915 rb_erase(&skb->rbnode, root); 3916 sum += skb->truesize; 3917 kfree_skb(skb); 3918 } 3919 return sum; 3920 } 3921 3922 void skb_errqueue_purge(struct sk_buff_head *list) 3923 { 3924 struct sk_buff *skb, *next; 3925 struct sk_buff_head kill; 3926 unsigned long flags; 3927 3928 __skb_queue_head_init(&kill); 3929 3930 spin_lock_irqsave(&list->lock, flags); 3931 skb_queue_walk_safe(list, skb, next) { 3932 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3933 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3934 continue; 3935 __skb_unlink(skb, list); 3936 __skb_queue_tail(&kill, skb); 3937 } 3938 spin_unlock_irqrestore(&list->lock, flags); 3939 __skb_queue_purge(&kill); 3940 } 3941 EXPORT_SYMBOL(skb_errqueue_purge); 3942 3943 /** 3944 * skb_queue_head - queue a buffer at the list head 3945 * @list: list to use 3946 * @newsk: buffer to queue 3947 * 3948 * Queue a buffer at the start of the list. This function takes the 3949 * list lock and can be used safely with other locking &sk_buff functions 3950 * safely. 3951 * 3952 * A buffer cannot be placed on two lists at the same time. 3953 */ 3954 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3955 { 3956 unsigned long flags; 3957 3958 spin_lock_irqsave(&list->lock, flags); 3959 __skb_queue_head(list, newsk); 3960 spin_unlock_irqrestore(&list->lock, flags); 3961 } 3962 EXPORT_SYMBOL(skb_queue_head); 3963 3964 /** 3965 * skb_queue_tail - queue a buffer at the list tail 3966 * @list: list to use 3967 * @newsk: buffer to queue 3968 * 3969 * Queue a buffer at the tail of the list. This function takes the 3970 * list lock and can be used safely with other locking &sk_buff functions 3971 * safely. 3972 * 3973 * A buffer cannot be placed on two lists at the same time. 3974 */ 3975 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3976 { 3977 unsigned long flags; 3978 3979 spin_lock_irqsave(&list->lock, flags); 3980 __skb_queue_tail(list, newsk); 3981 spin_unlock_irqrestore(&list->lock, flags); 3982 } 3983 EXPORT_SYMBOL(skb_queue_tail); 3984 3985 /** 3986 * skb_unlink - remove a buffer from a list 3987 * @skb: buffer to remove 3988 * @list: list to use 3989 * 3990 * Remove a packet from a list. The list locks are taken and this 3991 * function is atomic with respect to other list locked calls 3992 * 3993 * You must know what list the SKB is on. 3994 */ 3995 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3996 { 3997 unsigned long flags; 3998 3999 spin_lock_irqsave(&list->lock, flags); 4000 __skb_unlink(skb, list); 4001 spin_unlock_irqrestore(&list->lock, flags); 4002 } 4003 EXPORT_SYMBOL(skb_unlink); 4004 4005 /** 4006 * skb_append - append a buffer 4007 * @old: buffer to insert after 4008 * @newsk: buffer to insert 4009 * @list: list to use 4010 * 4011 * Place a packet after a given packet in a list. The list locks are taken 4012 * and this function is atomic with respect to other list locked calls. 4013 * A buffer cannot be placed on two lists at the same time. 4014 */ 4015 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 4016 { 4017 unsigned long flags; 4018 4019 spin_lock_irqsave(&list->lock, flags); 4020 __skb_queue_after(list, old, newsk); 4021 spin_unlock_irqrestore(&list->lock, flags); 4022 } 4023 EXPORT_SYMBOL(skb_append); 4024 4025 static inline void skb_split_inside_header(struct sk_buff *skb, 4026 struct sk_buff* skb1, 4027 const u32 len, const int pos) 4028 { 4029 int i; 4030 4031 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 4032 pos - len); 4033 /* And move data appendix as is. */ 4034 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4035 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 4036 4037 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 4038 skb_shinfo(skb)->nr_frags = 0; 4039 skb1->data_len = skb->data_len; 4040 skb1->len += skb1->data_len; 4041 skb->data_len = 0; 4042 skb->len = len; 4043 skb_set_tail_pointer(skb, len); 4044 } 4045 4046 static inline void skb_split_no_header(struct sk_buff *skb, 4047 struct sk_buff* skb1, 4048 const u32 len, int pos) 4049 { 4050 int i, k = 0; 4051 const int nfrags = skb_shinfo(skb)->nr_frags; 4052 4053 skb_shinfo(skb)->nr_frags = 0; 4054 skb1->len = skb1->data_len = skb->len - len; 4055 skb->len = len; 4056 skb->data_len = len - pos; 4057 4058 for (i = 0; i < nfrags; i++) { 4059 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4060 4061 if (pos + size > len) { 4062 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 4063 4064 if (pos < len) { 4065 /* Split frag. 4066 * We have two variants in this case: 4067 * 1. Move all the frag to the second 4068 * part, if it is possible. F.e. 4069 * this approach is mandatory for TUX, 4070 * where splitting is expensive. 4071 * 2. Split is accurately. We make this. 4072 */ 4073 skb_frag_ref(skb, i); 4074 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 4075 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 4076 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 4077 skb_shinfo(skb)->nr_frags++; 4078 } 4079 k++; 4080 } else 4081 skb_shinfo(skb)->nr_frags++; 4082 pos += size; 4083 } 4084 skb_shinfo(skb1)->nr_frags = k; 4085 } 4086 4087 /** 4088 * skb_split - Split fragmented skb to two parts at length len. 4089 * @skb: the buffer to split 4090 * @skb1: the buffer to receive the second part 4091 * @len: new length for skb 4092 */ 4093 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 4094 { 4095 int pos = skb_headlen(skb); 4096 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 4097 4098 skb_zcopy_downgrade_managed(skb); 4099 4100 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 4101 skb_zerocopy_clone(skb1, skb, 0); 4102 if (len < pos) /* Split line is inside header. */ 4103 skb_split_inside_header(skb, skb1, len, pos); 4104 else /* Second chunk has no header, nothing to copy. */ 4105 skb_split_no_header(skb, skb1, len, pos); 4106 } 4107 EXPORT_SYMBOL(skb_split); 4108 4109 /* Shifting from/to a cloned skb is a no-go. 4110 * 4111 * Caller cannot keep skb_shinfo related pointers past calling here! 4112 */ 4113 static int skb_prepare_for_shift(struct sk_buff *skb) 4114 { 4115 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 4116 } 4117 4118 /** 4119 * skb_shift - Shifts paged data partially from skb to another 4120 * @tgt: buffer into which tail data gets added 4121 * @skb: buffer from which the paged data comes from 4122 * @shiftlen: shift up to this many bytes 4123 * 4124 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4125 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4126 * It's up to caller to free skb if everything was shifted. 4127 * 4128 * If @tgt runs out of frags, the whole operation is aborted. 4129 * 4130 * Skb cannot include anything else but paged data while tgt is allowed 4131 * to have non-paged data as well. 4132 * 4133 * TODO: full sized shift could be optimized but that would need 4134 * specialized skb free'er to handle frags without up-to-date nr_frags. 4135 */ 4136 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4137 { 4138 int from, to, merge, todo; 4139 skb_frag_t *fragfrom, *fragto; 4140 4141 BUG_ON(shiftlen > skb->len); 4142 4143 if (skb_headlen(skb)) 4144 return 0; 4145 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4146 return 0; 4147 4148 todo = shiftlen; 4149 from = 0; 4150 to = skb_shinfo(tgt)->nr_frags; 4151 fragfrom = &skb_shinfo(skb)->frags[from]; 4152 4153 /* Actual merge is delayed until the point when we know we can 4154 * commit all, so that we don't have to undo partial changes 4155 */ 4156 if (!to || 4157 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4158 skb_frag_off(fragfrom))) { 4159 merge = -1; 4160 } else { 4161 merge = to - 1; 4162 4163 todo -= skb_frag_size(fragfrom); 4164 if (todo < 0) { 4165 if (skb_prepare_for_shift(skb) || 4166 skb_prepare_for_shift(tgt)) 4167 return 0; 4168 4169 /* All previous frag pointers might be stale! */ 4170 fragfrom = &skb_shinfo(skb)->frags[from]; 4171 fragto = &skb_shinfo(tgt)->frags[merge]; 4172 4173 skb_frag_size_add(fragto, shiftlen); 4174 skb_frag_size_sub(fragfrom, shiftlen); 4175 skb_frag_off_add(fragfrom, shiftlen); 4176 4177 goto onlymerged; 4178 } 4179 4180 from++; 4181 } 4182 4183 /* Skip full, not-fitting skb to avoid expensive operations */ 4184 if ((shiftlen == skb->len) && 4185 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4186 return 0; 4187 4188 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4189 return 0; 4190 4191 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4192 if (to == MAX_SKB_FRAGS) 4193 return 0; 4194 4195 fragfrom = &skb_shinfo(skb)->frags[from]; 4196 fragto = &skb_shinfo(tgt)->frags[to]; 4197 4198 if (todo >= skb_frag_size(fragfrom)) { 4199 *fragto = *fragfrom; 4200 todo -= skb_frag_size(fragfrom); 4201 from++; 4202 to++; 4203 4204 } else { 4205 __skb_frag_ref(fragfrom); 4206 skb_frag_page_copy(fragto, fragfrom); 4207 skb_frag_off_copy(fragto, fragfrom); 4208 skb_frag_size_set(fragto, todo); 4209 4210 skb_frag_off_add(fragfrom, todo); 4211 skb_frag_size_sub(fragfrom, todo); 4212 todo = 0; 4213 4214 to++; 4215 break; 4216 } 4217 } 4218 4219 /* Ready to "commit" this state change to tgt */ 4220 skb_shinfo(tgt)->nr_frags = to; 4221 4222 if (merge >= 0) { 4223 fragfrom = &skb_shinfo(skb)->frags[0]; 4224 fragto = &skb_shinfo(tgt)->frags[merge]; 4225 4226 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4227 __skb_frag_unref(fragfrom, skb->pp_recycle); 4228 } 4229 4230 /* Reposition in the original skb */ 4231 to = 0; 4232 while (from < skb_shinfo(skb)->nr_frags) 4233 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4234 skb_shinfo(skb)->nr_frags = to; 4235 4236 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4237 4238 onlymerged: 4239 /* Most likely the tgt won't ever need its checksum anymore, skb on 4240 * the other hand might need it if it needs to be resent 4241 */ 4242 tgt->ip_summed = CHECKSUM_PARTIAL; 4243 skb->ip_summed = CHECKSUM_PARTIAL; 4244 4245 skb_len_add(skb, -shiftlen); 4246 skb_len_add(tgt, shiftlen); 4247 4248 return shiftlen; 4249 } 4250 4251 /** 4252 * skb_prepare_seq_read - Prepare a sequential read of skb data 4253 * @skb: the buffer to read 4254 * @from: lower offset of data to be read 4255 * @to: upper offset of data to be read 4256 * @st: state variable 4257 * 4258 * Initializes the specified state variable. Must be called before 4259 * invoking skb_seq_read() for the first time. 4260 */ 4261 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4262 unsigned int to, struct skb_seq_state *st) 4263 { 4264 st->lower_offset = from; 4265 st->upper_offset = to; 4266 st->root_skb = st->cur_skb = skb; 4267 st->frag_idx = st->stepped_offset = 0; 4268 st->frag_data = NULL; 4269 st->frag_off = 0; 4270 } 4271 EXPORT_SYMBOL(skb_prepare_seq_read); 4272 4273 /** 4274 * skb_seq_read - Sequentially read skb data 4275 * @consumed: number of bytes consumed by the caller so far 4276 * @data: destination pointer for data to be returned 4277 * @st: state variable 4278 * 4279 * Reads a block of skb data at @consumed relative to the 4280 * lower offset specified to skb_prepare_seq_read(). Assigns 4281 * the head of the data block to @data and returns the length 4282 * of the block or 0 if the end of the skb data or the upper 4283 * offset has been reached. 4284 * 4285 * The caller is not required to consume all of the data 4286 * returned, i.e. @consumed is typically set to the number 4287 * of bytes already consumed and the next call to 4288 * skb_seq_read() will return the remaining part of the block. 4289 * 4290 * Note 1: The size of each block of data returned can be arbitrary, 4291 * this limitation is the cost for zerocopy sequential 4292 * reads of potentially non linear data. 4293 * 4294 * Note 2: Fragment lists within fragments are not implemented 4295 * at the moment, state->root_skb could be replaced with 4296 * a stack for this purpose. 4297 */ 4298 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4299 struct skb_seq_state *st) 4300 { 4301 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4302 skb_frag_t *frag; 4303 4304 if (unlikely(abs_offset >= st->upper_offset)) { 4305 if (st->frag_data) { 4306 kunmap_atomic(st->frag_data); 4307 st->frag_data = NULL; 4308 } 4309 return 0; 4310 } 4311 4312 next_skb: 4313 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4314 4315 if (abs_offset < block_limit && !st->frag_data) { 4316 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4317 return block_limit - abs_offset; 4318 } 4319 4320 if (st->frag_idx == 0 && !st->frag_data) 4321 st->stepped_offset += skb_headlen(st->cur_skb); 4322 4323 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4324 unsigned int pg_idx, pg_off, pg_sz; 4325 4326 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4327 4328 pg_idx = 0; 4329 pg_off = skb_frag_off(frag); 4330 pg_sz = skb_frag_size(frag); 4331 4332 if (skb_frag_must_loop(skb_frag_page(frag))) { 4333 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4334 pg_off = offset_in_page(pg_off + st->frag_off); 4335 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4336 PAGE_SIZE - pg_off); 4337 } 4338 4339 block_limit = pg_sz + st->stepped_offset; 4340 if (abs_offset < block_limit) { 4341 if (!st->frag_data) 4342 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4343 4344 *data = (u8 *)st->frag_data + pg_off + 4345 (abs_offset - st->stepped_offset); 4346 4347 return block_limit - abs_offset; 4348 } 4349 4350 if (st->frag_data) { 4351 kunmap_atomic(st->frag_data); 4352 st->frag_data = NULL; 4353 } 4354 4355 st->stepped_offset += pg_sz; 4356 st->frag_off += pg_sz; 4357 if (st->frag_off == skb_frag_size(frag)) { 4358 st->frag_off = 0; 4359 st->frag_idx++; 4360 } 4361 } 4362 4363 if (st->frag_data) { 4364 kunmap_atomic(st->frag_data); 4365 st->frag_data = NULL; 4366 } 4367 4368 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4369 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4370 st->frag_idx = 0; 4371 goto next_skb; 4372 } else if (st->cur_skb->next) { 4373 st->cur_skb = st->cur_skb->next; 4374 st->frag_idx = 0; 4375 goto next_skb; 4376 } 4377 4378 return 0; 4379 } 4380 EXPORT_SYMBOL(skb_seq_read); 4381 4382 /** 4383 * skb_abort_seq_read - Abort a sequential read of skb data 4384 * @st: state variable 4385 * 4386 * Must be called if skb_seq_read() was not called until it 4387 * returned 0. 4388 */ 4389 void skb_abort_seq_read(struct skb_seq_state *st) 4390 { 4391 if (st->frag_data) 4392 kunmap_atomic(st->frag_data); 4393 } 4394 EXPORT_SYMBOL(skb_abort_seq_read); 4395 4396 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4397 4398 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4399 struct ts_config *conf, 4400 struct ts_state *state) 4401 { 4402 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4403 } 4404 4405 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4406 { 4407 skb_abort_seq_read(TS_SKB_CB(state)); 4408 } 4409 4410 /** 4411 * skb_find_text - Find a text pattern in skb data 4412 * @skb: the buffer to look in 4413 * @from: search offset 4414 * @to: search limit 4415 * @config: textsearch configuration 4416 * 4417 * Finds a pattern in the skb data according to the specified 4418 * textsearch configuration. Use textsearch_next() to retrieve 4419 * subsequent occurrences of the pattern. Returns the offset 4420 * to the first occurrence or UINT_MAX if no match was found. 4421 */ 4422 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4423 unsigned int to, struct ts_config *config) 4424 { 4425 unsigned int patlen = config->ops->get_pattern_len(config); 4426 struct ts_state state; 4427 unsigned int ret; 4428 4429 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4430 4431 config->get_next_block = skb_ts_get_next_block; 4432 config->finish = skb_ts_finish; 4433 4434 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4435 4436 ret = textsearch_find(config, &state); 4437 return (ret + patlen <= to - from ? ret : UINT_MAX); 4438 } 4439 EXPORT_SYMBOL(skb_find_text); 4440 4441 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4442 int offset, size_t size, size_t max_frags) 4443 { 4444 int i = skb_shinfo(skb)->nr_frags; 4445 4446 if (skb_can_coalesce(skb, i, page, offset)) { 4447 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4448 } else if (i < max_frags) { 4449 skb_zcopy_downgrade_managed(skb); 4450 get_page(page); 4451 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4452 } else { 4453 return -EMSGSIZE; 4454 } 4455 4456 return 0; 4457 } 4458 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4459 4460 /** 4461 * skb_pull_rcsum - pull skb and update receive checksum 4462 * @skb: buffer to update 4463 * @len: length of data pulled 4464 * 4465 * This function performs an skb_pull on the packet and updates 4466 * the CHECKSUM_COMPLETE checksum. It should be used on 4467 * receive path processing instead of skb_pull unless you know 4468 * that the checksum difference is zero (e.g., a valid IP header) 4469 * or you are setting ip_summed to CHECKSUM_NONE. 4470 */ 4471 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4472 { 4473 unsigned char *data = skb->data; 4474 4475 BUG_ON(len > skb->len); 4476 __skb_pull(skb, len); 4477 skb_postpull_rcsum(skb, data, len); 4478 return skb->data; 4479 } 4480 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4481 4482 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4483 { 4484 skb_frag_t head_frag; 4485 struct page *page; 4486 4487 page = virt_to_head_page(frag_skb->head); 4488 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4489 (unsigned char *)page_address(page), 4490 skb_headlen(frag_skb)); 4491 return head_frag; 4492 } 4493 4494 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4495 netdev_features_t features, 4496 unsigned int offset) 4497 { 4498 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4499 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4500 unsigned int delta_truesize = 0; 4501 unsigned int delta_len = 0; 4502 struct sk_buff *tail = NULL; 4503 struct sk_buff *nskb, *tmp; 4504 int len_diff, err; 4505 4506 skb_push(skb, -skb_network_offset(skb) + offset); 4507 4508 /* Ensure the head is writeable before touching the shared info */ 4509 err = skb_unclone(skb, GFP_ATOMIC); 4510 if (err) 4511 goto err_linearize; 4512 4513 skb_shinfo(skb)->frag_list = NULL; 4514 4515 while (list_skb) { 4516 nskb = list_skb; 4517 list_skb = list_skb->next; 4518 4519 err = 0; 4520 delta_truesize += nskb->truesize; 4521 if (skb_shared(nskb)) { 4522 tmp = skb_clone(nskb, GFP_ATOMIC); 4523 if (tmp) { 4524 consume_skb(nskb); 4525 nskb = tmp; 4526 err = skb_unclone(nskb, GFP_ATOMIC); 4527 } else { 4528 err = -ENOMEM; 4529 } 4530 } 4531 4532 if (!tail) 4533 skb->next = nskb; 4534 else 4535 tail->next = nskb; 4536 4537 if (unlikely(err)) { 4538 nskb->next = list_skb; 4539 goto err_linearize; 4540 } 4541 4542 tail = nskb; 4543 4544 delta_len += nskb->len; 4545 4546 skb_push(nskb, -skb_network_offset(nskb) + offset); 4547 4548 skb_release_head_state(nskb); 4549 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4550 __copy_skb_header(nskb, skb); 4551 4552 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4553 nskb->transport_header += len_diff; 4554 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4555 nskb->data - tnl_hlen, 4556 offset + tnl_hlen); 4557 4558 if (skb_needs_linearize(nskb, features) && 4559 __skb_linearize(nskb)) 4560 goto err_linearize; 4561 } 4562 4563 skb->truesize = skb->truesize - delta_truesize; 4564 skb->data_len = skb->data_len - delta_len; 4565 skb->len = skb->len - delta_len; 4566 4567 skb_gso_reset(skb); 4568 4569 skb->prev = tail; 4570 4571 if (skb_needs_linearize(skb, features) && 4572 __skb_linearize(skb)) 4573 goto err_linearize; 4574 4575 skb_get(skb); 4576 4577 return skb; 4578 4579 err_linearize: 4580 kfree_skb_list(skb->next); 4581 skb->next = NULL; 4582 return ERR_PTR(-ENOMEM); 4583 } 4584 EXPORT_SYMBOL_GPL(skb_segment_list); 4585 4586 /** 4587 * skb_segment - Perform protocol segmentation on skb. 4588 * @head_skb: buffer to segment 4589 * @features: features for the output path (see dev->features) 4590 * 4591 * This function performs segmentation on the given skb. It returns 4592 * a pointer to the first in a list of new skbs for the segments. 4593 * In case of error it returns ERR_PTR(err). 4594 */ 4595 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4596 netdev_features_t features) 4597 { 4598 struct sk_buff *segs = NULL; 4599 struct sk_buff *tail = NULL; 4600 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4601 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4602 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4603 unsigned int offset = doffset; 4604 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4605 unsigned int partial_segs = 0; 4606 unsigned int headroom; 4607 unsigned int len = head_skb->len; 4608 struct sk_buff *frag_skb; 4609 skb_frag_t *frag; 4610 __be16 proto; 4611 bool csum, sg; 4612 int err = -ENOMEM; 4613 int i = 0; 4614 int nfrags, pos; 4615 4616 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4617 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4618 struct sk_buff *check_skb; 4619 4620 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4621 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4622 /* gso_size is untrusted, and we have a frag_list with 4623 * a linear non head_frag item. 4624 * 4625 * If head_skb's headlen does not fit requested gso_size, 4626 * it means that the frag_list members do NOT terminate 4627 * on exact gso_size boundaries. Hence we cannot perform 4628 * skb_frag_t page sharing. Therefore we must fallback to 4629 * copying the frag_list skbs; we do so by disabling SG. 4630 */ 4631 features &= ~NETIF_F_SG; 4632 break; 4633 } 4634 } 4635 } 4636 4637 __skb_push(head_skb, doffset); 4638 proto = skb_network_protocol(head_skb, NULL); 4639 if (unlikely(!proto)) 4640 return ERR_PTR(-EINVAL); 4641 4642 sg = !!(features & NETIF_F_SG); 4643 csum = !!can_checksum_protocol(features, proto); 4644 4645 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4646 if (!(features & NETIF_F_GSO_PARTIAL)) { 4647 struct sk_buff *iter; 4648 unsigned int frag_len; 4649 4650 if (!list_skb || 4651 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4652 goto normal; 4653 4654 /* If we get here then all the required 4655 * GSO features except frag_list are supported. 4656 * Try to split the SKB to multiple GSO SKBs 4657 * with no frag_list. 4658 * Currently we can do that only when the buffers don't 4659 * have a linear part and all the buffers except 4660 * the last are of the same length. 4661 */ 4662 frag_len = list_skb->len; 4663 skb_walk_frags(head_skb, iter) { 4664 if (frag_len != iter->len && iter->next) 4665 goto normal; 4666 if (skb_headlen(iter) && !iter->head_frag) 4667 goto normal; 4668 4669 len -= iter->len; 4670 } 4671 4672 if (len != frag_len) 4673 goto normal; 4674 } 4675 4676 /* GSO partial only requires that we trim off any excess that 4677 * doesn't fit into an MSS sized block, so take care of that 4678 * now. 4679 * Cap len to not accidentally hit GSO_BY_FRAGS. 4680 */ 4681 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4682 if (partial_segs > 1) 4683 mss *= partial_segs; 4684 else 4685 partial_segs = 0; 4686 } 4687 4688 normal: 4689 headroom = skb_headroom(head_skb); 4690 pos = skb_headlen(head_skb); 4691 4692 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4693 return ERR_PTR(-ENOMEM); 4694 4695 nfrags = skb_shinfo(head_skb)->nr_frags; 4696 frag = skb_shinfo(head_skb)->frags; 4697 frag_skb = head_skb; 4698 4699 do { 4700 struct sk_buff *nskb; 4701 skb_frag_t *nskb_frag; 4702 int hsize; 4703 int size; 4704 4705 if (unlikely(mss == GSO_BY_FRAGS)) { 4706 len = list_skb->len; 4707 } else { 4708 len = head_skb->len - offset; 4709 if (len > mss) 4710 len = mss; 4711 } 4712 4713 hsize = skb_headlen(head_skb) - offset; 4714 4715 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4716 (skb_headlen(list_skb) == len || sg)) { 4717 BUG_ON(skb_headlen(list_skb) > len); 4718 4719 nskb = skb_clone(list_skb, GFP_ATOMIC); 4720 if (unlikely(!nskb)) 4721 goto err; 4722 4723 i = 0; 4724 nfrags = skb_shinfo(list_skb)->nr_frags; 4725 frag = skb_shinfo(list_skb)->frags; 4726 frag_skb = list_skb; 4727 pos += skb_headlen(list_skb); 4728 4729 while (pos < offset + len) { 4730 BUG_ON(i >= nfrags); 4731 4732 size = skb_frag_size(frag); 4733 if (pos + size > offset + len) 4734 break; 4735 4736 i++; 4737 pos += size; 4738 frag++; 4739 } 4740 4741 list_skb = list_skb->next; 4742 4743 if (unlikely(pskb_trim(nskb, len))) { 4744 kfree_skb(nskb); 4745 goto err; 4746 } 4747 4748 hsize = skb_end_offset(nskb); 4749 if (skb_cow_head(nskb, doffset + headroom)) { 4750 kfree_skb(nskb); 4751 goto err; 4752 } 4753 4754 nskb->truesize += skb_end_offset(nskb) - hsize; 4755 skb_release_head_state(nskb); 4756 __skb_push(nskb, doffset); 4757 } else { 4758 if (hsize < 0) 4759 hsize = 0; 4760 if (hsize > len || !sg) 4761 hsize = len; 4762 4763 nskb = __alloc_skb(hsize + doffset + headroom, 4764 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4765 NUMA_NO_NODE); 4766 4767 if (unlikely(!nskb)) 4768 goto err; 4769 4770 skb_reserve(nskb, headroom); 4771 __skb_put(nskb, doffset); 4772 } 4773 4774 if (segs) 4775 tail->next = nskb; 4776 else 4777 segs = nskb; 4778 tail = nskb; 4779 4780 __copy_skb_header(nskb, head_skb); 4781 4782 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4783 skb_reset_mac_len(nskb); 4784 4785 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4786 nskb->data - tnl_hlen, 4787 doffset + tnl_hlen); 4788 4789 if (nskb->len == len + doffset) 4790 goto perform_csum_check; 4791 4792 if (!sg) { 4793 if (!csum) { 4794 if (!nskb->remcsum_offload) 4795 nskb->ip_summed = CHECKSUM_NONE; 4796 SKB_GSO_CB(nskb)->csum = 4797 skb_copy_and_csum_bits(head_skb, offset, 4798 skb_put(nskb, 4799 len), 4800 len); 4801 SKB_GSO_CB(nskb)->csum_start = 4802 skb_headroom(nskb) + doffset; 4803 } else { 4804 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4805 goto err; 4806 } 4807 continue; 4808 } 4809 4810 nskb_frag = skb_shinfo(nskb)->frags; 4811 4812 skb_copy_from_linear_data_offset(head_skb, offset, 4813 skb_put(nskb, hsize), hsize); 4814 4815 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4816 SKBFL_SHARED_FRAG; 4817 4818 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4819 goto err; 4820 4821 while (pos < offset + len) { 4822 if (i >= nfrags) { 4823 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4824 skb_zerocopy_clone(nskb, list_skb, 4825 GFP_ATOMIC)) 4826 goto err; 4827 4828 i = 0; 4829 nfrags = skb_shinfo(list_skb)->nr_frags; 4830 frag = skb_shinfo(list_skb)->frags; 4831 frag_skb = list_skb; 4832 if (!skb_headlen(list_skb)) { 4833 BUG_ON(!nfrags); 4834 } else { 4835 BUG_ON(!list_skb->head_frag); 4836 4837 /* to make room for head_frag. */ 4838 i--; 4839 frag--; 4840 } 4841 4842 list_skb = list_skb->next; 4843 } 4844 4845 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4846 MAX_SKB_FRAGS)) { 4847 net_warn_ratelimited( 4848 "skb_segment: too many frags: %u %u\n", 4849 pos, mss); 4850 err = -EINVAL; 4851 goto err; 4852 } 4853 4854 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4855 __skb_frag_ref(nskb_frag); 4856 size = skb_frag_size(nskb_frag); 4857 4858 if (pos < offset) { 4859 skb_frag_off_add(nskb_frag, offset - pos); 4860 skb_frag_size_sub(nskb_frag, offset - pos); 4861 } 4862 4863 skb_shinfo(nskb)->nr_frags++; 4864 4865 if (pos + size <= offset + len) { 4866 i++; 4867 frag++; 4868 pos += size; 4869 } else { 4870 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4871 goto skip_fraglist; 4872 } 4873 4874 nskb_frag++; 4875 } 4876 4877 skip_fraglist: 4878 nskb->data_len = len - hsize; 4879 nskb->len += nskb->data_len; 4880 nskb->truesize += nskb->data_len; 4881 4882 perform_csum_check: 4883 if (!csum) { 4884 if (skb_has_shared_frag(nskb) && 4885 __skb_linearize(nskb)) 4886 goto err; 4887 4888 if (!nskb->remcsum_offload) 4889 nskb->ip_summed = CHECKSUM_NONE; 4890 SKB_GSO_CB(nskb)->csum = 4891 skb_checksum(nskb, doffset, 4892 nskb->len - doffset, 0); 4893 SKB_GSO_CB(nskb)->csum_start = 4894 skb_headroom(nskb) + doffset; 4895 } 4896 } while ((offset += len) < head_skb->len); 4897 4898 /* Some callers want to get the end of the list. 4899 * Put it in segs->prev to avoid walking the list. 4900 * (see validate_xmit_skb_list() for example) 4901 */ 4902 segs->prev = tail; 4903 4904 if (partial_segs) { 4905 struct sk_buff *iter; 4906 int type = skb_shinfo(head_skb)->gso_type; 4907 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4908 4909 /* Update type to add partial and then remove dodgy if set */ 4910 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4911 type &= ~SKB_GSO_DODGY; 4912 4913 /* Update GSO info and prepare to start updating headers on 4914 * our way back down the stack of protocols. 4915 */ 4916 for (iter = segs; iter; iter = iter->next) { 4917 skb_shinfo(iter)->gso_size = gso_size; 4918 skb_shinfo(iter)->gso_segs = partial_segs; 4919 skb_shinfo(iter)->gso_type = type; 4920 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4921 } 4922 4923 if (tail->len - doffset <= gso_size) 4924 skb_shinfo(tail)->gso_size = 0; 4925 else if (tail != segs) 4926 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4927 } 4928 4929 /* Following permits correct backpressure, for protocols 4930 * using skb_set_owner_w(). 4931 * Idea is to tranfert ownership from head_skb to last segment. 4932 */ 4933 if (head_skb->destructor == sock_wfree) { 4934 swap(tail->truesize, head_skb->truesize); 4935 swap(tail->destructor, head_skb->destructor); 4936 swap(tail->sk, head_skb->sk); 4937 } 4938 return segs; 4939 4940 err: 4941 kfree_skb_list(segs); 4942 return ERR_PTR(err); 4943 } 4944 EXPORT_SYMBOL_GPL(skb_segment); 4945 4946 #ifdef CONFIG_SKB_EXTENSIONS 4947 #define SKB_EXT_ALIGN_VALUE 8 4948 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4949 4950 static const u8 skb_ext_type_len[] = { 4951 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4952 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4953 #endif 4954 #ifdef CONFIG_XFRM 4955 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4956 #endif 4957 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4958 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4959 #endif 4960 #if IS_ENABLED(CONFIG_MPTCP) 4961 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4962 #endif 4963 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 4964 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 4965 #endif 4966 }; 4967 4968 static __always_inline unsigned int skb_ext_total_length(void) 4969 { 4970 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 4971 int i; 4972 4973 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 4974 l += skb_ext_type_len[i]; 4975 4976 return l; 4977 } 4978 4979 static void skb_extensions_init(void) 4980 { 4981 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4982 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 4983 BUILD_BUG_ON(skb_ext_total_length() > 255); 4984 #endif 4985 4986 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4987 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4988 0, 4989 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4990 NULL); 4991 } 4992 #else 4993 static void skb_extensions_init(void) {} 4994 #endif 4995 4996 /* The SKB kmem_cache slab is critical for network performance. Never 4997 * merge/alias the slab with similar sized objects. This avoids fragmentation 4998 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 4999 */ 5000 #ifndef CONFIG_SLUB_TINY 5001 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 5002 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 5003 #define FLAG_SKB_NO_MERGE 0 5004 #endif 5005 5006 void __init skb_init(void) 5007 { 5008 skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 5009 sizeof(struct sk_buff), 5010 0, 5011 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 5012 FLAG_SKB_NO_MERGE, 5013 offsetof(struct sk_buff, cb), 5014 sizeof_field(struct sk_buff, cb), 5015 NULL); 5016 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 5017 sizeof(struct sk_buff_fclones), 5018 0, 5019 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 5020 NULL); 5021 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 5022 * struct skb_shared_info is located at the end of skb->head, 5023 * and should not be copied to/from user. 5024 */ 5025 skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 5026 SKB_SMALL_HEAD_CACHE_SIZE, 5027 0, 5028 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 5029 0, 5030 SKB_SMALL_HEAD_HEADROOM, 5031 NULL); 5032 skb_extensions_init(); 5033 } 5034 5035 static int 5036 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 5037 unsigned int recursion_level) 5038 { 5039 int start = skb_headlen(skb); 5040 int i, copy = start - offset; 5041 struct sk_buff *frag_iter; 5042 int elt = 0; 5043 5044 if (unlikely(recursion_level >= 24)) 5045 return -EMSGSIZE; 5046 5047 if (copy > 0) { 5048 if (copy > len) 5049 copy = len; 5050 sg_set_buf(sg, skb->data + offset, copy); 5051 elt++; 5052 if ((len -= copy) == 0) 5053 return elt; 5054 offset += copy; 5055 } 5056 5057 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5058 int end; 5059 5060 WARN_ON(start > offset + len); 5061 5062 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 5063 if ((copy = end - offset) > 0) { 5064 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5065 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5066 return -EMSGSIZE; 5067 5068 if (copy > len) 5069 copy = len; 5070 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 5071 skb_frag_off(frag) + offset - start); 5072 elt++; 5073 if (!(len -= copy)) 5074 return elt; 5075 offset += copy; 5076 } 5077 start = end; 5078 } 5079 5080 skb_walk_frags(skb, frag_iter) { 5081 int end, ret; 5082 5083 WARN_ON(start > offset + len); 5084 5085 end = start + frag_iter->len; 5086 if ((copy = end - offset) > 0) { 5087 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5088 return -EMSGSIZE; 5089 5090 if (copy > len) 5091 copy = len; 5092 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 5093 copy, recursion_level + 1); 5094 if (unlikely(ret < 0)) 5095 return ret; 5096 elt += ret; 5097 if ((len -= copy) == 0) 5098 return elt; 5099 offset += copy; 5100 } 5101 start = end; 5102 } 5103 BUG_ON(len); 5104 return elt; 5105 } 5106 5107 /** 5108 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 5109 * @skb: Socket buffer containing the buffers to be mapped 5110 * @sg: The scatter-gather list to map into 5111 * @offset: The offset into the buffer's contents to start mapping 5112 * @len: Length of buffer space to be mapped 5113 * 5114 * Fill the specified scatter-gather list with mappings/pointers into a 5115 * region of the buffer space attached to a socket buffer. Returns either 5116 * the number of scatterlist items used, or -EMSGSIZE if the contents 5117 * could not fit. 5118 */ 5119 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5120 { 5121 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5122 5123 if (nsg <= 0) 5124 return nsg; 5125 5126 sg_mark_end(&sg[nsg - 1]); 5127 5128 return nsg; 5129 } 5130 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5131 5132 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5133 * sglist without mark the sg which contain last skb data as the end. 5134 * So the caller can mannipulate sg list as will when padding new data after 5135 * the first call without calling sg_unmark_end to expend sg list. 5136 * 5137 * Scenario to use skb_to_sgvec_nomark: 5138 * 1. sg_init_table 5139 * 2. skb_to_sgvec_nomark(payload1) 5140 * 3. skb_to_sgvec_nomark(payload2) 5141 * 5142 * This is equivalent to: 5143 * 1. sg_init_table 5144 * 2. skb_to_sgvec(payload1) 5145 * 3. sg_unmark_end 5146 * 4. skb_to_sgvec(payload2) 5147 * 5148 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 5149 * is more preferable. 5150 */ 5151 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5152 int offset, int len) 5153 { 5154 return __skb_to_sgvec(skb, sg, offset, len, 0); 5155 } 5156 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5157 5158 5159 5160 /** 5161 * skb_cow_data - Check that a socket buffer's data buffers are writable 5162 * @skb: The socket buffer to check. 5163 * @tailbits: Amount of trailing space to be added 5164 * @trailer: Returned pointer to the skb where the @tailbits space begins 5165 * 5166 * Make sure that the data buffers attached to a socket buffer are 5167 * writable. If they are not, private copies are made of the data buffers 5168 * and the socket buffer is set to use these instead. 5169 * 5170 * If @tailbits is given, make sure that there is space to write @tailbits 5171 * bytes of data beyond current end of socket buffer. @trailer will be 5172 * set to point to the skb in which this space begins. 5173 * 5174 * The number of scatterlist elements required to completely map the 5175 * COW'd and extended socket buffer will be returned. 5176 */ 5177 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5178 { 5179 int copyflag; 5180 int elt; 5181 struct sk_buff *skb1, **skb_p; 5182 5183 /* If skb is cloned or its head is paged, reallocate 5184 * head pulling out all the pages (pages are considered not writable 5185 * at the moment even if they are anonymous). 5186 */ 5187 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5188 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5189 return -ENOMEM; 5190 5191 /* Easy case. Most of packets will go this way. */ 5192 if (!skb_has_frag_list(skb)) { 5193 /* A little of trouble, not enough of space for trailer. 5194 * This should not happen, when stack is tuned to generate 5195 * good frames. OK, on miss we reallocate and reserve even more 5196 * space, 128 bytes is fair. */ 5197 5198 if (skb_tailroom(skb) < tailbits && 5199 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5200 return -ENOMEM; 5201 5202 /* Voila! */ 5203 *trailer = skb; 5204 return 1; 5205 } 5206 5207 /* Misery. We are in troubles, going to mincer fragments... */ 5208 5209 elt = 1; 5210 skb_p = &skb_shinfo(skb)->frag_list; 5211 copyflag = 0; 5212 5213 while ((skb1 = *skb_p) != NULL) { 5214 int ntail = 0; 5215 5216 /* The fragment is partially pulled by someone, 5217 * this can happen on input. Copy it and everything 5218 * after it. */ 5219 5220 if (skb_shared(skb1)) 5221 copyflag = 1; 5222 5223 /* If the skb is the last, worry about trailer. */ 5224 5225 if (skb1->next == NULL && tailbits) { 5226 if (skb_shinfo(skb1)->nr_frags || 5227 skb_has_frag_list(skb1) || 5228 skb_tailroom(skb1) < tailbits) 5229 ntail = tailbits + 128; 5230 } 5231 5232 if (copyflag || 5233 skb_cloned(skb1) || 5234 ntail || 5235 skb_shinfo(skb1)->nr_frags || 5236 skb_has_frag_list(skb1)) { 5237 struct sk_buff *skb2; 5238 5239 /* Fuck, we are miserable poor guys... */ 5240 if (ntail == 0) 5241 skb2 = skb_copy(skb1, GFP_ATOMIC); 5242 else 5243 skb2 = skb_copy_expand(skb1, 5244 skb_headroom(skb1), 5245 ntail, 5246 GFP_ATOMIC); 5247 if (unlikely(skb2 == NULL)) 5248 return -ENOMEM; 5249 5250 if (skb1->sk) 5251 skb_set_owner_w(skb2, skb1->sk); 5252 5253 /* Looking around. Are we still alive? 5254 * OK, link new skb, drop old one */ 5255 5256 skb2->next = skb1->next; 5257 *skb_p = skb2; 5258 kfree_skb(skb1); 5259 skb1 = skb2; 5260 } 5261 elt++; 5262 *trailer = skb1; 5263 skb_p = &skb1->next; 5264 } 5265 5266 return elt; 5267 } 5268 EXPORT_SYMBOL_GPL(skb_cow_data); 5269 5270 static void sock_rmem_free(struct sk_buff *skb) 5271 { 5272 struct sock *sk = skb->sk; 5273 5274 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5275 } 5276 5277 static void skb_set_err_queue(struct sk_buff *skb) 5278 { 5279 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5280 * So, it is safe to (mis)use it to mark skbs on the error queue. 5281 */ 5282 skb->pkt_type = PACKET_OUTGOING; 5283 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5284 } 5285 5286 /* 5287 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5288 */ 5289 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5290 { 5291 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5292 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5293 return -ENOMEM; 5294 5295 skb_orphan(skb); 5296 skb->sk = sk; 5297 skb->destructor = sock_rmem_free; 5298 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5299 skb_set_err_queue(skb); 5300 5301 /* before exiting rcu section, make sure dst is refcounted */ 5302 skb_dst_force(skb); 5303 5304 skb_queue_tail(&sk->sk_error_queue, skb); 5305 if (!sock_flag(sk, SOCK_DEAD)) 5306 sk_error_report(sk); 5307 return 0; 5308 } 5309 EXPORT_SYMBOL(sock_queue_err_skb); 5310 5311 static bool is_icmp_err_skb(const struct sk_buff *skb) 5312 { 5313 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5314 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5315 } 5316 5317 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5318 { 5319 struct sk_buff_head *q = &sk->sk_error_queue; 5320 struct sk_buff *skb, *skb_next = NULL; 5321 bool icmp_next = false; 5322 unsigned long flags; 5323 5324 if (skb_queue_empty_lockless(q)) 5325 return NULL; 5326 5327 spin_lock_irqsave(&q->lock, flags); 5328 skb = __skb_dequeue(q); 5329 if (skb && (skb_next = skb_peek(q))) { 5330 icmp_next = is_icmp_err_skb(skb_next); 5331 if (icmp_next) 5332 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5333 } 5334 spin_unlock_irqrestore(&q->lock, flags); 5335 5336 if (is_icmp_err_skb(skb) && !icmp_next) 5337 sk->sk_err = 0; 5338 5339 if (skb_next) 5340 sk_error_report(sk); 5341 5342 return skb; 5343 } 5344 EXPORT_SYMBOL(sock_dequeue_err_skb); 5345 5346 /** 5347 * skb_clone_sk - create clone of skb, and take reference to socket 5348 * @skb: the skb to clone 5349 * 5350 * This function creates a clone of a buffer that holds a reference on 5351 * sk_refcnt. Buffers created via this function are meant to be 5352 * returned using sock_queue_err_skb, or free via kfree_skb. 5353 * 5354 * When passing buffers allocated with this function to sock_queue_err_skb 5355 * it is necessary to wrap the call with sock_hold/sock_put in order to 5356 * prevent the socket from being released prior to being enqueued on 5357 * the sk_error_queue. 5358 */ 5359 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5360 { 5361 struct sock *sk = skb->sk; 5362 struct sk_buff *clone; 5363 5364 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5365 return NULL; 5366 5367 clone = skb_clone(skb, GFP_ATOMIC); 5368 if (!clone) { 5369 sock_put(sk); 5370 return NULL; 5371 } 5372 5373 clone->sk = sk; 5374 clone->destructor = sock_efree; 5375 5376 return clone; 5377 } 5378 EXPORT_SYMBOL(skb_clone_sk); 5379 5380 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5381 struct sock *sk, 5382 int tstype, 5383 bool opt_stats) 5384 { 5385 struct sock_exterr_skb *serr; 5386 int err; 5387 5388 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5389 5390 serr = SKB_EXT_ERR(skb); 5391 memset(serr, 0, sizeof(*serr)); 5392 serr->ee.ee_errno = ENOMSG; 5393 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5394 serr->ee.ee_info = tstype; 5395 serr->opt_stats = opt_stats; 5396 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5397 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5398 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5399 if (sk_is_tcp(sk)) 5400 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5401 } 5402 5403 err = sock_queue_err_skb(sk, skb); 5404 5405 if (err) 5406 kfree_skb(skb); 5407 } 5408 5409 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5410 { 5411 bool ret; 5412 5413 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) 5414 return true; 5415 5416 read_lock_bh(&sk->sk_callback_lock); 5417 ret = sk->sk_socket && sk->sk_socket->file && 5418 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5419 read_unlock_bh(&sk->sk_callback_lock); 5420 return ret; 5421 } 5422 5423 void skb_complete_tx_timestamp(struct sk_buff *skb, 5424 struct skb_shared_hwtstamps *hwtstamps) 5425 { 5426 struct sock *sk = skb->sk; 5427 5428 if (!skb_may_tx_timestamp(sk, false)) 5429 goto err; 5430 5431 /* Take a reference to prevent skb_orphan() from freeing the socket, 5432 * but only if the socket refcount is not zero. 5433 */ 5434 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5435 *skb_hwtstamps(skb) = *hwtstamps; 5436 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5437 sock_put(sk); 5438 return; 5439 } 5440 5441 err: 5442 kfree_skb(skb); 5443 } 5444 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5445 5446 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5447 const struct sk_buff *ack_skb, 5448 struct skb_shared_hwtstamps *hwtstamps, 5449 struct sock *sk, int tstype) 5450 { 5451 struct sk_buff *skb; 5452 bool tsonly, opt_stats = false; 5453 u32 tsflags; 5454 5455 if (!sk) 5456 return; 5457 5458 tsflags = READ_ONCE(sk->sk_tsflags); 5459 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5460 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5461 return; 5462 5463 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5464 if (!skb_may_tx_timestamp(sk, tsonly)) 5465 return; 5466 5467 if (tsonly) { 5468 #ifdef CONFIG_INET 5469 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5470 sk_is_tcp(sk)) { 5471 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5472 ack_skb); 5473 opt_stats = true; 5474 } else 5475 #endif 5476 skb = alloc_skb(0, GFP_ATOMIC); 5477 } else { 5478 skb = skb_clone(orig_skb, GFP_ATOMIC); 5479 5480 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5481 kfree_skb(skb); 5482 return; 5483 } 5484 } 5485 if (!skb) 5486 return; 5487 5488 if (tsonly) { 5489 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5490 SKBTX_ANY_TSTAMP; 5491 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5492 } 5493 5494 if (hwtstamps) 5495 *skb_hwtstamps(skb) = *hwtstamps; 5496 else 5497 __net_timestamp(skb); 5498 5499 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5500 } 5501 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5502 5503 void skb_tstamp_tx(struct sk_buff *orig_skb, 5504 struct skb_shared_hwtstamps *hwtstamps) 5505 { 5506 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5507 SCM_TSTAMP_SND); 5508 } 5509 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5510 5511 #ifdef CONFIG_WIRELESS 5512 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5513 { 5514 struct sock *sk = skb->sk; 5515 struct sock_exterr_skb *serr; 5516 int err = 1; 5517 5518 skb->wifi_acked_valid = 1; 5519 skb->wifi_acked = acked; 5520 5521 serr = SKB_EXT_ERR(skb); 5522 memset(serr, 0, sizeof(*serr)); 5523 serr->ee.ee_errno = ENOMSG; 5524 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5525 5526 /* Take a reference to prevent skb_orphan() from freeing the socket, 5527 * but only if the socket refcount is not zero. 5528 */ 5529 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5530 err = sock_queue_err_skb(sk, skb); 5531 sock_put(sk); 5532 } 5533 if (err) 5534 kfree_skb(skb); 5535 } 5536 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5537 #endif /* CONFIG_WIRELESS */ 5538 5539 /** 5540 * skb_partial_csum_set - set up and verify partial csum values for packet 5541 * @skb: the skb to set 5542 * @start: the number of bytes after skb->data to start checksumming. 5543 * @off: the offset from start to place the checksum. 5544 * 5545 * For untrusted partially-checksummed packets, we need to make sure the values 5546 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5547 * 5548 * This function checks and sets those values and skb->ip_summed: if this 5549 * returns false you should drop the packet. 5550 */ 5551 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5552 { 5553 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5554 u32 csum_start = skb_headroom(skb) + (u32)start; 5555 5556 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5557 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5558 start, off, skb_headroom(skb), skb_headlen(skb)); 5559 return false; 5560 } 5561 skb->ip_summed = CHECKSUM_PARTIAL; 5562 skb->csum_start = csum_start; 5563 skb->csum_offset = off; 5564 skb->transport_header = csum_start; 5565 return true; 5566 } 5567 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5568 5569 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5570 unsigned int max) 5571 { 5572 if (skb_headlen(skb) >= len) 5573 return 0; 5574 5575 /* If we need to pullup then pullup to the max, so we 5576 * won't need to do it again. 5577 */ 5578 if (max > skb->len) 5579 max = skb->len; 5580 5581 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5582 return -ENOMEM; 5583 5584 if (skb_headlen(skb) < len) 5585 return -EPROTO; 5586 5587 return 0; 5588 } 5589 5590 #define MAX_TCP_HDR_LEN (15 * 4) 5591 5592 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5593 typeof(IPPROTO_IP) proto, 5594 unsigned int off) 5595 { 5596 int err; 5597 5598 switch (proto) { 5599 case IPPROTO_TCP: 5600 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5601 off + MAX_TCP_HDR_LEN); 5602 if (!err && !skb_partial_csum_set(skb, off, 5603 offsetof(struct tcphdr, 5604 check))) 5605 err = -EPROTO; 5606 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5607 5608 case IPPROTO_UDP: 5609 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5610 off + sizeof(struct udphdr)); 5611 if (!err && !skb_partial_csum_set(skb, off, 5612 offsetof(struct udphdr, 5613 check))) 5614 err = -EPROTO; 5615 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5616 } 5617 5618 return ERR_PTR(-EPROTO); 5619 } 5620 5621 /* This value should be large enough to cover a tagged ethernet header plus 5622 * maximally sized IP and TCP or UDP headers. 5623 */ 5624 #define MAX_IP_HDR_LEN 128 5625 5626 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5627 { 5628 unsigned int off; 5629 bool fragment; 5630 __sum16 *csum; 5631 int err; 5632 5633 fragment = false; 5634 5635 err = skb_maybe_pull_tail(skb, 5636 sizeof(struct iphdr), 5637 MAX_IP_HDR_LEN); 5638 if (err < 0) 5639 goto out; 5640 5641 if (ip_is_fragment(ip_hdr(skb))) 5642 fragment = true; 5643 5644 off = ip_hdrlen(skb); 5645 5646 err = -EPROTO; 5647 5648 if (fragment) 5649 goto out; 5650 5651 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5652 if (IS_ERR(csum)) 5653 return PTR_ERR(csum); 5654 5655 if (recalculate) 5656 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5657 ip_hdr(skb)->daddr, 5658 skb->len - off, 5659 ip_hdr(skb)->protocol, 0); 5660 err = 0; 5661 5662 out: 5663 return err; 5664 } 5665 5666 /* This value should be large enough to cover a tagged ethernet header plus 5667 * an IPv6 header, all options, and a maximal TCP or UDP header. 5668 */ 5669 #define MAX_IPV6_HDR_LEN 256 5670 5671 #define OPT_HDR(type, skb, off) \ 5672 (type *)(skb_network_header(skb) + (off)) 5673 5674 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5675 { 5676 int err; 5677 u8 nexthdr; 5678 unsigned int off; 5679 unsigned int len; 5680 bool fragment; 5681 bool done; 5682 __sum16 *csum; 5683 5684 fragment = false; 5685 done = false; 5686 5687 off = sizeof(struct ipv6hdr); 5688 5689 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5690 if (err < 0) 5691 goto out; 5692 5693 nexthdr = ipv6_hdr(skb)->nexthdr; 5694 5695 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5696 while (off <= len && !done) { 5697 switch (nexthdr) { 5698 case IPPROTO_DSTOPTS: 5699 case IPPROTO_HOPOPTS: 5700 case IPPROTO_ROUTING: { 5701 struct ipv6_opt_hdr *hp; 5702 5703 err = skb_maybe_pull_tail(skb, 5704 off + 5705 sizeof(struct ipv6_opt_hdr), 5706 MAX_IPV6_HDR_LEN); 5707 if (err < 0) 5708 goto out; 5709 5710 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5711 nexthdr = hp->nexthdr; 5712 off += ipv6_optlen(hp); 5713 break; 5714 } 5715 case IPPROTO_AH: { 5716 struct ip_auth_hdr *hp; 5717 5718 err = skb_maybe_pull_tail(skb, 5719 off + 5720 sizeof(struct ip_auth_hdr), 5721 MAX_IPV6_HDR_LEN); 5722 if (err < 0) 5723 goto out; 5724 5725 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5726 nexthdr = hp->nexthdr; 5727 off += ipv6_authlen(hp); 5728 break; 5729 } 5730 case IPPROTO_FRAGMENT: { 5731 struct frag_hdr *hp; 5732 5733 err = skb_maybe_pull_tail(skb, 5734 off + 5735 sizeof(struct frag_hdr), 5736 MAX_IPV6_HDR_LEN); 5737 if (err < 0) 5738 goto out; 5739 5740 hp = OPT_HDR(struct frag_hdr, skb, off); 5741 5742 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5743 fragment = true; 5744 5745 nexthdr = hp->nexthdr; 5746 off += sizeof(struct frag_hdr); 5747 break; 5748 } 5749 default: 5750 done = true; 5751 break; 5752 } 5753 } 5754 5755 err = -EPROTO; 5756 5757 if (!done || fragment) 5758 goto out; 5759 5760 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5761 if (IS_ERR(csum)) 5762 return PTR_ERR(csum); 5763 5764 if (recalculate) 5765 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5766 &ipv6_hdr(skb)->daddr, 5767 skb->len - off, nexthdr, 0); 5768 err = 0; 5769 5770 out: 5771 return err; 5772 } 5773 5774 /** 5775 * skb_checksum_setup - set up partial checksum offset 5776 * @skb: the skb to set up 5777 * @recalculate: if true the pseudo-header checksum will be recalculated 5778 */ 5779 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5780 { 5781 int err; 5782 5783 switch (skb->protocol) { 5784 case htons(ETH_P_IP): 5785 err = skb_checksum_setup_ipv4(skb, recalculate); 5786 break; 5787 5788 case htons(ETH_P_IPV6): 5789 err = skb_checksum_setup_ipv6(skb, recalculate); 5790 break; 5791 5792 default: 5793 err = -EPROTO; 5794 break; 5795 } 5796 5797 return err; 5798 } 5799 EXPORT_SYMBOL(skb_checksum_setup); 5800 5801 /** 5802 * skb_checksum_maybe_trim - maybe trims the given skb 5803 * @skb: the skb to check 5804 * @transport_len: the data length beyond the network header 5805 * 5806 * Checks whether the given skb has data beyond the given transport length. 5807 * If so, returns a cloned skb trimmed to this transport length. 5808 * Otherwise returns the provided skb. Returns NULL in error cases 5809 * (e.g. transport_len exceeds skb length or out-of-memory). 5810 * 5811 * Caller needs to set the skb transport header and free any returned skb if it 5812 * differs from the provided skb. 5813 */ 5814 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5815 unsigned int transport_len) 5816 { 5817 struct sk_buff *skb_chk; 5818 unsigned int len = skb_transport_offset(skb) + transport_len; 5819 int ret; 5820 5821 if (skb->len < len) 5822 return NULL; 5823 else if (skb->len == len) 5824 return skb; 5825 5826 skb_chk = skb_clone(skb, GFP_ATOMIC); 5827 if (!skb_chk) 5828 return NULL; 5829 5830 ret = pskb_trim_rcsum(skb_chk, len); 5831 if (ret) { 5832 kfree_skb(skb_chk); 5833 return NULL; 5834 } 5835 5836 return skb_chk; 5837 } 5838 5839 /** 5840 * skb_checksum_trimmed - validate checksum of an skb 5841 * @skb: the skb to check 5842 * @transport_len: the data length beyond the network header 5843 * @skb_chkf: checksum function to use 5844 * 5845 * Applies the given checksum function skb_chkf to the provided skb. 5846 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5847 * 5848 * If the skb has data beyond the given transport length, then a 5849 * trimmed & cloned skb is checked and returned. 5850 * 5851 * Caller needs to set the skb transport header and free any returned skb if it 5852 * differs from the provided skb. 5853 */ 5854 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5855 unsigned int transport_len, 5856 __sum16(*skb_chkf)(struct sk_buff *skb)) 5857 { 5858 struct sk_buff *skb_chk; 5859 unsigned int offset = skb_transport_offset(skb); 5860 __sum16 ret; 5861 5862 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5863 if (!skb_chk) 5864 goto err; 5865 5866 if (!pskb_may_pull(skb_chk, offset)) 5867 goto err; 5868 5869 skb_pull_rcsum(skb_chk, offset); 5870 ret = skb_chkf(skb_chk); 5871 skb_push_rcsum(skb_chk, offset); 5872 5873 if (ret) 5874 goto err; 5875 5876 return skb_chk; 5877 5878 err: 5879 if (skb_chk && skb_chk != skb) 5880 kfree_skb(skb_chk); 5881 5882 return NULL; 5883 5884 } 5885 EXPORT_SYMBOL(skb_checksum_trimmed); 5886 5887 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5888 { 5889 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5890 skb->dev->name); 5891 } 5892 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5893 5894 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5895 { 5896 if (head_stolen) { 5897 skb_release_head_state(skb); 5898 kmem_cache_free(skbuff_cache, skb); 5899 } else { 5900 __kfree_skb(skb); 5901 } 5902 } 5903 EXPORT_SYMBOL(kfree_skb_partial); 5904 5905 /** 5906 * skb_try_coalesce - try to merge skb to prior one 5907 * @to: prior buffer 5908 * @from: buffer to add 5909 * @fragstolen: pointer to boolean 5910 * @delta_truesize: how much more was allocated than was requested 5911 */ 5912 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5913 bool *fragstolen, int *delta_truesize) 5914 { 5915 struct skb_shared_info *to_shinfo, *from_shinfo; 5916 int i, delta, len = from->len; 5917 5918 *fragstolen = false; 5919 5920 if (skb_cloned(to)) 5921 return false; 5922 5923 /* In general, avoid mixing page_pool and non-page_pool allocated 5924 * pages within the same SKB. In theory we could take full 5925 * references if @from is cloned and !@to->pp_recycle but its 5926 * tricky (due to potential race with the clone disappearing) and 5927 * rare, so not worth dealing with. 5928 */ 5929 if (to->pp_recycle != from->pp_recycle) 5930 return false; 5931 5932 if (len <= skb_tailroom(to)) { 5933 if (len) 5934 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5935 *delta_truesize = 0; 5936 return true; 5937 } 5938 5939 to_shinfo = skb_shinfo(to); 5940 from_shinfo = skb_shinfo(from); 5941 if (to_shinfo->frag_list || from_shinfo->frag_list) 5942 return false; 5943 if (skb_zcopy(to) || skb_zcopy(from)) 5944 return false; 5945 5946 if (skb_headlen(from) != 0) { 5947 struct page *page; 5948 unsigned int offset; 5949 5950 if (to_shinfo->nr_frags + 5951 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5952 return false; 5953 5954 if (skb_head_is_locked(from)) 5955 return false; 5956 5957 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5958 5959 page = virt_to_head_page(from->head); 5960 offset = from->data - (unsigned char *)page_address(page); 5961 5962 skb_fill_page_desc(to, to_shinfo->nr_frags, 5963 page, offset, skb_headlen(from)); 5964 *fragstolen = true; 5965 } else { 5966 if (to_shinfo->nr_frags + 5967 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5968 return false; 5969 5970 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5971 } 5972 5973 WARN_ON_ONCE(delta < len); 5974 5975 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5976 from_shinfo->frags, 5977 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5978 to_shinfo->nr_frags += from_shinfo->nr_frags; 5979 5980 if (!skb_cloned(from)) 5981 from_shinfo->nr_frags = 0; 5982 5983 /* if the skb is not cloned this does nothing 5984 * since we set nr_frags to 0. 5985 */ 5986 if (skb_pp_frag_ref(from)) { 5987 for (i = 0; i < from_shinfo->nr_frags; i++) 5988 __skb_frag_ref(&from_shinfo->frags[i]); 5989 } 5990 5991 to->truesize += delta; 5992 to->len += len; 5993 to->data_len += len; 5994 5995 *delta_truesize = delta; 5996 return true; 5997 } 5998 EXPORT_SYMBOL(skb_try_coalesce); 5999 6000 /** 6001 * skb_scrub_packet - scrub an skb 6002 * 6003 * @skb: buffer to clean 6004 * @xnet: packet is crossing netns 6005 * 6006 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 6007 * into/from a tunnel. Some information have to be cleared during these 6008 * operations. 6009 * skb_scrub_packet can also be used to clean a skb before injecting it in 6010 * another namespace (@xnet == true). We have to clear all information in the 6011 * skb that could impact namespace isolation. 6012 */ 6013 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 6014 { 6015 skb->pkt_type = PACKET_HOST; 6016 skb->skb_iif = 0; 6017 skb->ignore_df = 0; 6018 skb_dst_drop(skb); 6019 skb_ext_reset(skb); 6020 nf_reset_ct(skb); 6021 nf_reset_trace(skb); 6022 6023 #ifdef CONFIG_NET_SWITCHDEV 6024 skb->offload_fwd_mark = 0; 6025 skb->offload_l3_fwd_mark = 0; 6026 #endif 6027 6028 if (!xnet) 6029 return; 6030 6031 ipvs_reset(skb); 6032 skb->mark = 0; 6033 skb_clear_tstamp(skb); 6034 } 6035 EXPORT_SYMBOL_GPL(skb_scrub_packet); 6036 6037 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 6038 { 6039 int mac_len, meta_len; 6040 void *meta; 6041 6042 if (skb_cow(skb, skb_headroom(skb)) < 0) { 6043 kfree_skb(skb); 6044 return NULL; 6045 } 6046 6047 mac_len = skb->data - skb_mac_header(skb); 6048 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 6049 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 6050 mac_len - VLAN_HLEN - ETH_TLEN); 6051 } 6052 6053 meta_len = skb_metadata_len(skb); 6054 if (meta_len) { 6055 meta = skb_metadata_end(skb) - meta_len; 6056 memmove(meta + VLAN_HLEN, meta, meta_len); 6057 } 6058 6059 skb->mac_header += VLAN_HLEN; 6060 return skb; 6061 } 6062 6063 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 6064 { 6065 struct vlan_hdr *vhdr; 6066 u16 vlan_tci; 6067 6068 if (unlikely(skb_vlan_tag_present(skb))) { 6069 /* vlan_tci is already set-up so leave this for another time */ 6070 return skb; 6071 } 6072 6073 skb = skb_share_check(skb, GFP_ATOMIC); 6074 if (unlikely(!skb)) 6075 goto err_free; 6076 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 6077 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 6078 goto err_free; 6079 6080 vhdr = (struct vlan_hdr *)skb->data; 6081 vlan_tci = ntohs(vhdr->h_vlan_TCI); 6082 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 6083 6084 skb_pull_rcsum(skb, VLAN_HLEN); 6085 vlan_set_encap_proto(skb, vhdr); 6086 6087 skb = skb_reorder_vlan_header(skb); 6088 if (unlikely(!skb)) 6089 goto err_free; 6090 6091 skb_reset_network_header(skb); 6092 if (!skb_transport_header_was_set(skb)) 6093 skb_reset_transport_header(skb); 6094 skb_reset_mac_len(skb); 6095 6096 return skb; 6097 6098 err_free: 6099 kfree_skb(skb); 6100 return NULL; 6101 } 6102 EXPORT_SYMBOL(skb_vlan_untag); 6103 6104 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 6105 { 6106 if (!pskb_may_pull(skb, write_len)) 6107 return -ENOMEM; 6108 6109 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 6110 return 0; 6111 6112 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6113 } 6114 EXPORT_SYMBOL(skb_ensure_writable); 6115 6116 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 6117 { 6118 int needed_headroom = dev->needed_headroom; 6119 int needed_tailroom = dev->needed_tailroom; 6120 6121 /* For tail taggers, we need to pad short frames ourselves, to ensure 6122 * that the tail tag does not fail at its role of being at the end of 6123 * the packet, once the conduit interface pads the frame. Account for 6124 * that pad length here, and pad later. 6125 */ 6126 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6127 needed_tailroom += ETH_ZLEN - skb->len; 6128 /* skb_headroom() returns unsigned int... */ 6129 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6130 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6131 6132 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6133 /* No reallocation needed, yay! */ 6134 return 0; 6135 6136 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6137 GFP_ATOMIC); 6138 } 6139 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6140 6141 /* remove VLAN header from packet and update csum accordingly. 6142 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6143 */ 6144 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6145 { 6146 int offset = skb->data - skb_mac_header(skb); 6147 int err; 6148 6149 if (WARN_ONCE(offset, 6150 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6151 offset)) { 6152 return -EINVAL; 6153 } 6154 6155 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6156 if (unlikely(err)) 6157 return err; 6158 6159 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6160 6161 vlan_remove_tag(skb, vlan_tci); 6162 6163 skb->mac_header += VLAN_HLEN; 6164 6165 if (skb_network_offset(skb) < ETH_HLEN) 6166 skb_set_network_header(skb, ETH_HLEN); 6167 6168 skb_reset_mac_len(skb); 6169 6170 return err; 6171 } 6172 EXPORT_SYMBOL(__skb_vlan_pop); 6173 6174 /* Pop a vlan tag either from hwaccel or from payload. 6175 * Expects skb->data at mac header. 6176 */ 6177 int skb_vlan_pop(struct sk_buff *skb) 6178 { 6179 u16 vlan_tci; 6180 __be16 vlan_proto; 6181 int err; 6182 6183 if (likely(skb_vlan_tag_present(skb))) { 6184 __vlan_hwaccel_clear_tag(skb); 6185 } else { 6186 if (unlikely(!eth_type_vlan(skb->protocol))) 6187 return 0; 6188 6189 err = __skb_vlan_pop(skb, &vlan_tci); 6190 if (err) 6191 return err; 6192 } 6193 /* move next vlan tag to hw accel tag */ 6194 if (likely(!eth_type_vlan(skb->protocol))) 6195 return 0; 6196 6197 vlan_proto = skb->protocol; 6198 err = __skb_vlan_pop(skb, &vlan_tci); 6199 if (unlikely(err)) 6200 return err; 6201 6202 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6203 return 0; 6204 } 6205 EXPORT_SYMBOL(skb_vlan_pop); 6206 6207 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6208 * Expects skb->data at mac header. 6209 */ 6210 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6211 { 6212 if (skb_vlan_tag_present(skb)) { 6213 int offset = skb->data - skb_mac_header(skb); 6214 int err; 6215 6216 if (WARN_ONCE(offset, 6217 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6218 offset)) { 6219 return -EINVAL; 6220 } 6221 6222 err = __vlan_insert_tag(skb, skb->vlan_proto, 6223 skb_vlan_tag_get(skb)); 6224 if (err) 6225 return err; 6226 6227 skb->protocol = skb->vlan_proto; 6228 skb->mac_len += VLAN_HLEN; 6229 6230 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6231 } 6232 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6233 return 0; 6234 } 6235 EXPORT_SYMBOL(skb_vlan_push); 6236 6237 /** 6238 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6239 * 6240 * @skb: Socket buffer to modify 6241 * 6242 * Drop the Ethernet header of @skb. 6243 * 6244 * Expects that skb->data points to the mac header and that no VLAN tags are 6245 * present. 6246 * 6247 * Returns 0 on success, -errno otherwise. 6248 */ 6249 int skb_eth_pop(struct sk_buff *skb) 6250 { 6251 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6252 skb_network_offset(skb) < ETH_HLEN) 6253 return -EPROTO; 6254 6255 skb_pull_rcsum(skb, ETH_HLEN); 6256 skb_reset_mac_header(skb); 6257 skb_reset_mac_len(skb); 6258 6259 return 0; 6260 } 6261 EXPORT_SYMBOL(skb_eth_pop); 6262 6263 /** 6264 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6265 * 6266 * @skb: Socket buffer to modify 6267 * @dst: Destination MAC address of the new header 6268 * @src: Source MAC address of the new header 6269 * 6270 * Prepend @skb with a new Ethernet header. 6271 * 6272 * Expects that skb->data points to the mac header, which must be empty. 6273 * 6274 * Returns 0 on success, -errno otherwise. 6275 */ 6276 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6277 const unsigned char *src) 6278 { 6279 struct ethhdr *eth; 6280 int err; 6281 6282 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6283 return -EPROTO; 6284 6285 err = skb_cow_head(skb, sizeof(*eth)); 6286 if (err < 0) 6287 return err; 6288 6289 skb_push(skb, sizeof(*eth)); 6290 skb_reset_mac_header(skb); 6291 skb_reset_mac_len(skb); 6292 6293 eth = eth_hdr(skb); 6294 ether_addr_copy(eth->h_dest, dst); 6295 ether_addr_copy(eth->h_source, src); 6296 eth->h_proto = skb->protocol; 6297 6298 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6299 6300 return 0; 6301 } 6302 EXPORT_SYMBOL(skb_eth_push); 6303 6304 /* Update the ethertype of hdr and the skb csum value if required. */ 6305 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6306 __be16 ethertype) 6307 { 6308 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6309 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6310 6311 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6312 } 6313 6314 hdr->h_proto = ethertype; 6315 } 6316 6317 /** 6318 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6319 * the packet 6320 * 6321 * @skb: buffer 6322 * @mpls_lse: MPLS label stack entry to push 6323 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6324 * @mac_len: length of the MAC header 6325 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6326 * ethernet 6327 * 6328 * Expects skb->data at mac header. 6329 * 6330 * Returns 0 on success, -errno otherwise. 6331 */ 6332 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6333 int mac_len, bool ethernet) 6334 { 6335 struct mpls_shim_hdr *lse; 6336 int err; 6337 6338 if (unlikely(!eth_p_mpls(mpls_proto))) 6339 return -EINVAL; 6340 6341 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6342 if (skb->encapsulation) 6343 return -EINVAL; 6344 6345 err = skb_cow_head(skb, MPLS_HLEN); 6346 if (unlikely(err)) 6347 return err; 6348 6349 if (!skb->inner_protocol) { 6350 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6351 skb_set_inner_protocol(skb, skb->protocol); 6352 } 6353 6354 skb_push(skb, MPLS_HLEN); 6355 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6356 mac_len); 6357 skb_reset_mac_header(skb); 6358 skb_set_network_header(skb, mac_len); 6359 skb_reset_mac_len(skb); 6360 6361 lse = mpls_hdr(skb); 6362 lse->label_stack_entry = mpls_lse; 6363 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6364 6365 if (ethernet && mac_len >= ETH_HLEN) 6366 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6367 skb->protocol = mpls_proto; 6368 6369 return 0; 6370 } 6371 EXPORT_SYMBOL_GPL(skb_mpls_push); 6372 6373 /** 6374 * skb_mpls_pop() - pop the outermost MPLS header 6375 * 6376 * @skb: buffer 6377 * @next_proto: ethertype of header after popped MPLS header 6378 * @mac_len: length of the MAC header 6379 * @ethernet: flag to indicate if the packet is ethernet 6380 * 6381 * Expects skb->data at mac header. 6382 * 6383 * Returns 0 on success, -errno otherwise. 6384 */ 6385 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6386 bool ethernet) 6387 { 6388 int err; 6389 6390 if (unlikely(!eth_p_mpls(skb->protocol))) 6391 return 0; 6392 6393 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6394 if (unlikely(err)) 6395 return err; 6396 6397 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6398 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6399 mac_len); 6400 6401 __skb_pull(skb, MPLS_HLEN); 6402 skb_reset_mac_header(skb); 6403 skb_set_network_header(skb, mac_len); 6404 6405 if (ethernet && mac_len >= ETH_HLEN) { 6406 struct ethhdr *hdr; 6407 6408 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6409 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6410 skb_mod_eth_type(skb, hdr, next_proto); 6411 } 6412 skb->protocol = next_proto; 6413 6414 return 0; 6415 } 6416 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6417 6418 /** 6419 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6420 * 6421 * @skb: buffer 6422 * @mpls_lse: new MPLS label stack entry to update to 6423 * 6424 * Expects skb->data at mac header. 6425 * 6426 * Returns 0 on success, -errno otherwise. 6427 */ 6428 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6429 { 6430 int err; 6431 6432 if (unlikely(!eth_p_mpls(skb->protocol))) 6433 return -EINVAL; 6434 6435 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6436 if (unlikely(err)) 6437 return err; 6438 6439 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6440 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6441 6442 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6443 } 6444 6445 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6446 6447 return 0; 6448 } 6449 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6450 6451 /** 6452 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6453 * 6454 * @skb: buffer 6455 * 6456 * Expects skb->data at mac header. 6457 * 6458 * Returns 0 on success, -errno otherwise. 6459 */ 6460 int skb_mpls_dec_ttl(struct sk_buff *skb) 6461 { 6462 u32 lse; 6463 u8 ttl; 6464 6465 if (unlikely(!eth_p_mpls(skb->protocol))) 6466 return -EINVAL; 6467 6468 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6469 return -ENOMEM; 6470 6471 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6472 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6473 if (!--ttl) 6474 return -EINVAL; 6475 6476 lse &= ~MPLS_LS_TTL_MASK; 6477 lse |= ttl << MPLS_LS_TTL_SHIFT; 6478 6479 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6480 } 6481 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6482 6483 /** 6484 * alloc_skb_with_frags - allocate skb with page frags 6485 * 6486 * @header_len: size of linear part 6487 * @data_len: needed length in frags 6488 * @order: max page order desired. 6489 * @errcode: pointer to error code if any 6490 * @gfp_mask: allocation mask 6491 * 6492 * This can be used to allocate a paged skb, given a maximal order for frags. 6493 */ 6494 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6495 unsigned long data_len, 6496 int order, 6497 int *errcode, 6498 gfp_t gfp_mask) 6499 { 6500 unsigned long chunk; 6501 struct sk_buff *skb; 6502 struct page *page; 6503 int nr_frags = 0; 6504 6505 *errcode = -EMSGSIZE; 6506 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6507 return NULL; 6508 6509 *errcode = -ENOBUFS; 6510 skb = alloc_skb(header_len, gfp_mask); 6511 if (!skb) 6512 return NULL; 6513 6514 while (data_len) { 6515 if (nr_frags == MAX_SKB_FRAGS - 1) 6516 goto failure; 6517 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6518 order--; 6519 6520 if (order) { 6521 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6522 __GFP_COMP | 6523 __GFP_NOWARN, 6524 order); 6525 if (!page) { 6526 order--; 6527 continue; 6528 } 6529 } else { 6530 page = alloc_page(gfp_mask); 6531 if (!page) 6532 goto failure; 6533 } 6534 chunk = min_t(unsigned long, data_len, 6535 PAGE_SIZE << order); 6536 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6537 nr_frags++; 6538 skb->truesize += (PAGE_SIZE << order); 6539 data_len -= chunk; 6540 } 6541 return skb; 6542 6543 failure: 6544 kfree_skb(skb); 6545 return NULL; 6546 } 6547 EXPORT_SYMBOL(alloc_skb_with_frags); 6548 6549 /* carve out the first off bytes from skb when off < headlen */ 6550 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6551 const int headlen, gfp_t gfp_mask) 6552 { 6553 int i; 6554 unsigned int size = skb_end_offset(skb); 6555 int new_hlen = headlen - off; 6556 u8 *data; 6557 6558 if (skb_pfmemalloc(skb)) 6559 gfp_mask |= __GFP_MEMALLOC; 6560 6561 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6562 if (!data) 6563 return -ENOMEM; 6564 size = SKB_WITH_OVERHEAD(size); 6565 6566 /* Copy real data, and all frags */ 6567 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6568 skb->len -= off; 6569 6570 memcpy((struct skb_shared_info *)(data + size), 6571 skb_shinfo(skb), 6572 offsetof(struct skb_shared_info, 6573 frags[skb_shinfo(skb)->nr_frags])); 6574 if (skb_cloned(skb)) { 6575 /* drop the old head gracefully */ 6576 if (skb_orphan_frags(skb, gfp_mask)) { 6577 skb_kfree_head(data, size); 6578 return -ENOMEM; 6579 } 6580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6581 skb_frag_ref(skb, i); 6582 if (skb_has_frag_list(skb)) 6583 skb_clone_fraglist(skb); 6584 skb_release_data(skb, SKB_CONSUMED, false); 6585 } else { 6586 /* we can reuse existing recount- all we did was 6587 * relocate values 6588 */ 6589 skb_free_head(skb, false); 6590 } 6591 6592 skb->head = data; 6593 skb->data = data; 6594 skb->head_frag = 0; 6595 skb_set_end_offset(skb, size); 6596 skb_set_tail_pointer(skb, skb_headlen(skb)); 6597 skb_headers_offset_update(skb, 0); 6598 skb->cloned = 0; 6599 skb->hdr_len = 0; 6600 skb->nohdr = 0; 6601 atomic_set(&skb_shinfo(skb)->dataref, 1); 6602 6603 return 0; 6604 } 6605 6606 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6607 6608 /* carve out the first eat bytes from skb's frag_list. May recurse into 6609 * pskb_carve() 6610 */ 6611 static int pskb_carve_frag_list(struct sk_buff *skb, 6612 struct skb_shared_info *shinfo, int eat, 6613 gfp_t gfp_mask) 6614 { 6615 struct sk_buff *list = shinfo->frag_list; 6616 struct sk_buff *clone = NULL; 6617 struct sk_buff *insp = NULL; 6618 6619 do { 6620 if (!list) { 6621 pr_err("Not enough bytes to eat. Want %d\n", eat); 6622 return -EFAULT; 6623 } 6624 if (list->len <= eat) { 6625 /* Eaten as whole. */ 6626 eat -= list->len; 6627 list = list->next; 6628 insp = list; 6629 } else { 6630 /* Eaten partially. */ 6631 if (skb_shared(list)) { 6632 clone = skb_clone(list, gfp_mask); 6633 if (!clone) 6634 return -ENOMEM; 6635 insp = list->next; 6636 list = clone; 6637 } else { 6638 /* This may be pulled without problems. */ 6639 insp = list; 6640 } 6641 if (pskb_carve(list, eat, gfp_mask) < 0) { 6642 kfree_skb(clone); 6643 return -ENOMEM; 6644 } 6645 break; 6646 } 6647 } while (eat); 6648 6649 /* Free pulled out fragments. */ 6650 while ((list = shinfo->frag_list) != insp) { 6651 shinfo->frag_list = list->next; 6652 consume_skb(list); 6653 } 6654 /* And insert new clone at head. */ 6655 if (clone) { 6656 clone->next = list; 6657 shinfo->frag_list = clone; 6658 } 6659 return 0; 6660 } 6661 6662 /* carve off first len bytes from skb. Split line (off) is in the 6663 * non-linear part of skb 6664 */ 6665 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6666 int pos, gfp_t gfp_mask) 6667 { 6668 int i, k = 0; 6669 unsigned int size = skb_end_offset(skb); 6670 u8 *data; 6671 const int nfrags = skb_shinfo(skb)->nr_frags; 6672 struct skb_shared_info *shinfo; 6673 6674 if (skb_pfmemalloc(skb)) 6675 gfp_mask |= __GFP_MEMALLOC; 6676 6677 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6678 if (!data) 6679 return -ENOMEM; 6680 size = SKB_WITH_OVERHEAD(size); 6681 6682 memcpy((struct skb_shared_info *)(data + size), 6683 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6684 if (skb_orphan_frags(skb, gfp_mask)) { 6685 skb_kfree_head(data, size); 6686 return -ENOMEM; 6687 } 6688 shinfo = (struct skb_shared_info *)(data + size); 6689 for (i = 0; i < nfrags; i++) { 6690 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6691 6692 if (pos + fsize > off) { 6693 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6694 6695 if (pos < off) { 6696 /* Split frag. 6697 * We have two variants in this case: 6698 * 1. Move all the frag to the second 6699 * part, if it is possible. F.e. 6700 * this approach is mandatory for TUX, 6701 * where splitting is expensive. 6702 * 2. Split is accurately. We make this. 6703 */ 6704 skb_frag_off_add(&shinfo->frags[0], off - pos); 6705 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6706 } 6707 skb_frag_ref(skb, i); 6708 k++; 6709 } 6710 pos += fsize; 6711 } 6712 shinfo->nr_frags = k; 6713 if (skb_has_frag_list(skb)) 6714 skb_clone_fraglist(skb); 6715 6716 /* split line is in frag list */ 6717 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6718 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6719 if (skb_has_frag_list(skb)) 6720 kfree_skb_list(skb_shinfo(skb)->frag_list); 6721 skb_kfree_head(data, size); 6722 return -ENOMEM; 6723 } 6724 skb_release_data(skb, SKB_CONSUMED, false); 6725 6726 skb->head = data; 6727 skb->head_frag = 0; 6728 skb->data = data; 6729 skb_set_end_offset(skb, size); 6730 skb_reset_tail_pointer(skb); 6731 skb_headers_offset_update(skb, 0); 6732 skb->cloned = 0; 6733 skb->hdr_len = 0; 6734 skb->nohdr = 0; 6735 skb->len -= off; 6736 skb->data_len = skb->len; 6737 atomic_set(&skb_shinfo(skb)->dataref, 1); 6738 return 0; 6739 } 6740 6741 /* remove len bytes from the beginning of the skb */ 6742 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6743 { 6744 int headlen = skb_headlen(skb); 6745 6746 if (len < headlen) 6747 return pskb_carve_inside_header(skb, len, headlen, gfp); 6748 else 6749 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6750 } 6751 6752 /* Extract to_copy bytes starting at off from skb, and return this in 6753 * a new skb 6754 */ 6755 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6756 int to_copy, gfp_t gfp) 6757 { 6758 struct sk_buff *clone = skb_clone(skb, gfp); 6759 6760 if (!clone) 6761 return NULL; 6762 6763 if (pskb_carve(clone, off, gfp) < 0 || 6764 pskb_trim(clone, to_copy)) { 6765 kfree_skb(clone); 6766 return NULL; 6767 } 6768 return clone; 6769 } 6770 EXPORT_SYMBOL(pskb_extract); 6771 6772 /** 6773 * skb_condense - try to get rid of fragments/frag_list if possible 6774 * @skb: buffer 6775 * 6776 * Can be used to save memory before skb is added to a busy queue. 6777 * If packet has bytes in frags and enough tail room in skb->head, 6778 * pull all of them, so that we can free the frags right now and adjust 6779 * truesize. 6780 * Notes: 6781 * We do not reallocate skb->head thus can not fail. 6782 * Caller must re-evaluate skb->truesize if needed. 6783 */ 6784 void skb_condense(struct sk_buff *skb) 6785 { 6786 if (skb->data_len) { 6787 if (skb->data_len > skb->end - skb->tail || 6788 skb_cloned(skb)) 6789 return; 6790 6791 /* Nice, we can free page frag(s) right now */ 6792 __pskb_pull_tail(skb, skb->data_len); 6793 } 6794 /* At this point, skb->truesize might be over estimated, 6795 * because skb had a fragment, and fragments do not tell 6796 * their truesize. 6797 * When we pulled its content into skb->head, fragment 6798 * was freed, but __pskb_pull_tail() could not possibly 6799 * adjust skb->truesize, not knowing the frag truesize. 6800 */ 6801 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6802 } 6803 EXPORT_SYMBOL(skb_condense); 6804 6805 #ifdef CONFIG_SKB_EXTENSIONS 6806 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6807 { 6808 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6809 } 6810 6811 /** 6812 * __skb_ext_alloc - allocate a new skb extensions storage 6813 * 6814 * @flags: See kmalloc(). 6815 * 6816 * Returns the newly allocated pointer. The pointer can later attached to a 6817 * skb via __skb_ext_set(). 6818 * Note: caller must handle the skb_ext as an opaque data. 6819 */ 6820 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6821 { 6822 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6823 6824 if (new) { 6825 memset(new->offset, 0, sizeof(new->offset)); 6826 refcount_set(&new->refcnt, 1); 6827 } 6828 6829 return new; 6830 } 6831 6832 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6833 unsigned int old_active) 6834 { 6835 struct skb_ext *new; 6836 6837 if (refcount_read(&old->refcnt) == 1) 6838 return old; 6839 6840 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6841 if (!new) 6842 return NULL; 6843 6844 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6845 refcount_set(&new->refcnt, 1); 6846 6847 #ifdef CONFIG_XFRM 6848 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6849 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6850 unsigned int i; 6851 6852 for (i = 0; i < sp->len; i++) 6853 xfrm_state_hold(sp->xvec[i]); 6854 } 6855 #endif 6856 #ifdef CONFIG_MCTP_FLOWS 6857 if (old_active & (1 << SKB_EXT_MCTP)) { 6858 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6859 6860 if (flow->key) 6861 refcount_inc(&flow->key->refs); 6862 } 6863 #endif 6864 __skb_ext_put(old); 6865 return new; 6866 } 6867 6868 /** 6869 * __skb_ext_set - attach the specified extension storage to this skb 6870 * @skb: buffer 6871 * @id: extension id 6872 * @ext: extension storage previously allocated via __skb_ext_alloc() 6873 * 6874 * Existing extensions, if any, are cleared. 6875 * 6876 * Returns the pointer to the extension. 6877 */ 6878 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6879 struct skb_ext *ext) 6880 { 6881 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6882 6883 skb_ext_put(skb); 6884 newlen = newoff + skb_ext_type_len[id]; 6885 ext->chunks = newlen; 6886 ext->offset[id] = newoff; 6887 skb->extensions = ext; 6888 skb->active_extensions = 1 << id; 6889 return skb_ext_get_ptr(ext, id); 6890 } 6891 6892 /** 6893 * skb_ext_add - allocate space for given extension, COW if needed 6894 * @skb: buffer 6895 * @id: extension to allocate space for 6896 * 6897 * Allocates enough space for the given extension. 6898 * If the extension is already present, a pointer to that extension 6899 * is returned. 6900 * 6901 * If the skb was cloned, COW applies and the returned memory can be 6902 * modified without changing the extension space of clones buffers. 6903 * 6904 * Returns pointer to the extension or NULL on allocation failure. 6905 */ 6906 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6907 { 6908 struct skb_ext *new, *old = NULL; 6909 unsigned int newlen, newoff; 6910 6911 if (skb->active_extensions) { 6912 old = skb->extensions; 6913 6914 new = skb_ext_maybe_cow(old, skb->active_extensions); 6915 if (!new) 6916 return NULL; 6917 6918 if (__skb_ext_exist(new, id)) 6919 goto set_active; 6920 6921 newoff = new->chunks; 6922 } else { 6923 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6924 6925 new = __skb_ext_alloc(GFP_ATOMIC); 6926 if (!new) 6927 return NULL; 6928 } 6929 6930 newlen = newoff + skb_ext_type_len[id]; 6931 new->chunks = newlen; 6932 new->offset[id] = newoff; 6933 set_active: 6934 skb->slow_gro = 1; 6935 skb->extensions = new; 6936 skb->active_extensions |= 1 << id; 6937 return skb_ext_get_ptr(new, id); 6938 } 6939 EXPORT_SYMBOL(skb_ext_add); 6940 6941 #ifdef CONFIG_XFRM 6942 static void skb_ext_put_sp(struct sec_path *sp) 6943 { 6944 unsigned int i; 6945 6946 for (i = 0; i < sp->len; i++) 6947 xfrm_state_put(sp->xvec[i]); 6948 } 6949 #endif 6950 6951 #ifdef CONFIG_MCTP_FLOWS 6952 static void skb_ext_put_mctp(struct mctp_flow *flow) 6953 { 6954 if (flow->key) 6955 mctp_key_unref(flow->key); 6956 } 6957 #endif 6958 6959 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6960 { 6961 struct skb_ext *ext = skb->extensions; 6962 6963 skb->active_extensions &= ~(1 << id); 6964 if (skb->active_extensions == 0) { 6965 skb->extensions = NULL; 6966 __skb_ext_put(ext); 6967 #ifdef CONFIG_XFRM 6968 } else if (id == SKB_EXT_SEC_PATH && 6969 refcount_read(&ext->refcnt) == 1) { 6970 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6971 6972 skb_ext_put_sp(sp); 6973 sp->len = 0; 6974 #endif 6975 } 6976 } 6977 EXPORT_SYMBOL(__skb_ext_del); 6978 6979 void __skb_ext_put(struct skb_ext *ext) 6980 { 6981 /* If this is last clone, nothing can increment 6982 * it after check passes. Avoids one atomic op. 6983 */ 6984 if (refcount_read(&ext->refcnt) == 1) 6985 goto free_now; 6986 6987 if (!refcount_dec_and_test(&ext->refcnt)) 6988 return; 6989 free_now: 6990 #ifdef CONFIG_XFRM 6991 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6992 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6993 #endif 6994 #ifdef CONFIG_MCTP_FLOWS 6995 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 6996 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 6997 #endif 6998 6999 kmem_cache_free(skbuff_ext_cache, ext); 7000 } 7001 EXPORT_SYMBOL(__skb_ext_put); 7002 #endif /* CONFIG_SKB_EXTENSIONS */ 7003 7004 /** 7005 * skb_attempt_defer_free - queue skb for remote freeing 7006 * @skb: buffer 7007 * 7008 * Put @skb in a per-cpu list, using the cpu which 7009 * allocated the skb/pages to reduce false sharing 7010 * and memory zone spinlock contention. 7011 */ 7012 void skb_attempt_defer_free(struct sk_buff *skb) 7013 { 7014 int cpu = skb->alloc_cpu; 7015 struct softnet_data *sd; 7016 unsigned int defer_max; 7017 bool kick; 7018 7019 if (WARN_ON_ONCE(cpu >= nr_cpu_ids) || 7020 !cpu_online(cpu) || 7021 cpu == raw_smp_processor_id()) { 7022 nodefer: __kfree_skb(skb); 7023 return; 7024 } 7025 7026 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 7027 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 7028 7029 sd = &per_cpu(softnet_data, cpu); 7030 defer_max = READ_ONCE(sysctl_skb_defer_max); 7031 if (READ_ONCE(sd->defer_count) >= defer_max) 7032 goto nodefer; 7033 7034 spin_lock_bh(&sd->defer_lock); 7035 /* Send an IPI every time queue reaches half capacity. */ 7036 kick = sd->defer_count == (defer_max >> 1); 7037 /* Paired with the READ_ONCE() few lines above */ 7038 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 7039 7040 skb->next = sd->defer_list; 7041 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 7042 WRITE_ONCE(sd->defer_list, skb); 7043 spin_unlock_bh(&sd->defer_lock); 7044 7045 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 7046 * if we are unlucky enough (this seems very unlikely). 7047 */ 7048 if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) 7049 smp_call_function_single_async(cpu, &sd->defer_csd); 7050 } 7051 7052 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 7053 size_t offset, size_t len) 7054 { 7055 const char *kaddr; 7056 __wsum csum; 7057 7058 kaddr = kmap_local_page(page); 7059 csum = csum_partial(kaddr + offset, len, 0); 7060 kunmap_local(kaddr); 7061 skb->csum = csum_block_add(skb->csum, csum, skb->len); 7062 } 7063 7064 /** 7065 * skb_splice_from_iter - Splice (or copy) pages to skbuff 7066 * @skb: The buffer to add pages to 7067 * @iter: Iterator representing the pages to be added 7068 * @maxsize: Maximum amount of pages to be added 7069 * @gfp: Allocation flags 7070 * 7071 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 7072 * extracts pages from an iterator and adds them to the socket buffer if 7073 * possible, copying them to fragments if not possible (such as if they're slab 7074 * pages). 7075 * 7076 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 7077 * insufficient space in the buffer to transfer anything. 7078 */ 7079 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 7080 ssize_t maxsize, gfp_t gfp) 7081 { 7082 size_t frag_limit = READ_ONCE(sysctl_max_skb_frags); 7083 struct page *pages[8], **ppages = pages; 7084 ssize_t spliced = 0, ret = 0; 7085 unsigned int i; 7086 7087 while (iter->count > 0) { 7088 ssize_t space, nr, len; 7089 size_t off; 7090 7091 ret = -EMSGSIZE; 7092 space = frag_limit - skb_shinfo(skb)->nr_frags; 7093 if (space < 0) 7094 break; 7095 7096 /* We might be able to coalesce without increasing nr_frags */ 7097 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 7098 7099 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 7100 if (len <= 0) { 7101 ret = len ?: -EIO; 7102 break; 7103 } 7104 7105 i = 0; 7106 do { 7107 struct page *page = pages[i++]; 7108 size_t part = min_t(size_t, PAGE_SIZE - off, len); 7109 7110 ret = -EIO; 7111 if (WARN_ON_ONCE(!sendpage_ok(page))) 7112 goto out; 7113 7114 ret = skb_append_pagefrags(skb, page, off, part, 7115 frag_limit); 7116 if (ret < 0) { 7117 iov_iter_revert(iter, len); 7118 goto out; 7119 } 7120 7121 if (skb->ip_summed == CHECKSUM_NONE) 7122 skb_splice_csum_page(skb, page, off, part); 7123 7124 off = 0; 7125 spliced += part; 7126 maxsize -= part; 7127 len -= part; 7128 } while (len > 0); 7129 7130 if (maxsize <= 0) 7131 break; 7132 } 7133 7134 out: 7135 skb_len_add(skb, spliced); 7136 return spliced ?: ret; 7137 } 7138 EXPORT_SYMBOL(skb_splice_from_iter); 7139 7140 static __always_inline 7141 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7142 size_t len, void *to, void *priv2) 7143 { 7144 __wsum *csum = priv2; 7145 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7146 7147 *csum = csum_block_add(*csum, next, progress); 7148 return 0; 7149 } 7150 7151 static __always_inline 7152 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7153 size_t len, void *to, void *priv2) 7154 { 7155 __wsum next, *csum = priv2; 7156 7157 next = csum_and_copy_from_user(iter_from, to + progress, len); 7158 *csum = csum_block_add(*csum, next, progress); 7159 return next ? 0 : len; 7160 } 7161 7162 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7163 __wsum *csum, struct iov_iter *i) 7164 { 7165 size_t copied; 7166 7167 if (WARN_ON_ONCE(!i->data_source)) 7168 return false; 7169 copied = iterate_and_advance2(i, bytes, addr, csum, 7170 copy_from_user_iter_csum, 7171 memcpy_from_iter_csum); 7172 if (likely(copied == bytes)) 7173 return true; 7174 iov_iter_revert(i, copied); 7175 return false; 7176 } 7177 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7178