1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/skbuff_ref.h> 55 #include <linux/splice.h> 56 #include <linux/cache.h> 57 #include <linux/rtnetlink.h> 58 #include <linux/init.h> 59 #include <linux/scatterlist.h> 60 #include <linux/errqueue.h> 61 #include <linux/prefetch.h> 62 #include <linux/bitfield.h> 63 #include <linux/if_vlan.h> 64 #include <linux/mpls.h> 65 #include <linux/kcov.h> 66 #include <linux/iov_iter.h> 67 68 #include <net/protocol.h> 69 #include <net/dst.h> 70 #include <net/sock.h> 71 #include <net/checksum.h> 72 #include <net/gso.h> 73 #include <net/hotdata.h> 74 #include <net/ip6_checksum.h> 75 #include <net/xfrm.h> 76 #include <net/mpls.h> 77 #include <net/mptcp.h> 78 #include <net/mctp.h> 79 #include <net/page_pool/helpers.h> 80 #include <net/dropreason.h> 81 82 #include <linux/uaccess.h> 83 #include <trace/events/skb.h> 84 #include <linux/highmem.h> 85 #include <linux/capability.h> 86 #include <linux/user_namespace.h> 87 #include <linux/indirect_call_wrapper.h> 88 #include <linux/textsearch.h> 89 90 #include "dev.h" 91 #include "sock_destructor.h" 92 93 #ifdef CONFIG_SKB_EXTENSIONS 94 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 95 #endif 96 97 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) 98 99 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 100 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 101 * size, and we can differentiate heads from skb_small_head_cache 102 * vs system slabs by looking at their size (skb_end_offset()). 103 */ 104 #define SKB_SMALL_HEAD_CACHE_SIZE \ 105 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 106 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 107 SKB_SMALL_HEAD_SIZE) 108 109 #define SKB_SMALL_HEAD_HEADROOM \ 110 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 111 112 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use 113 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the 114 * netmem is a page. 115 */ 116 static_assert(offsetof(struct bio_vec, bv_page) == 117 offsetof(skb_frag_t, netmem)); 118 static_assert(sizeof_field(struct bio_vec, bv_page) == 119 sizeof_field(skb_frag_t, netmem)); 120 121 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); 122 static_assert(sizeof_field(struct bio_vec, bv_len) == 123 sizeof_field(skb_frag_t, len)); 124 125 static_assert(offsetof(struct bio_vec, bv_offset) == 126 offsetof(skb_frag_t, offset)); 127 static_assert(sizeof_field(struct bio_vec, bv_offset) == 128 sizeof_field(skb_frag_t, offset)); 129 130 #undef FN 131 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 132 static const char * const drop_reasons[] = { 133 [SKB_CONSUMED] = "CONSUMED", 134 DEFINE_DROP_REASON(FN, FN) 135 }; 136 137 static const struct drop_reason_list drop_reasons_core = { 138 .reasons = drop_reasons, 139 .n_reasons = ARRAY_SIZE(drop_reasons), 140 }; 141 142 const struct drop_reason_list __rcu * 143 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 144 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 145 }; 146 EXPORT_SYMBOL(drop_reasons_by_subsys); 147 148 /** 149 * drop_reasons_register_subsys - register another drop reason subsystem 150 * @subsys: the subsystem to register, must not be the core 151 * @list: the list of drop reasons within the subsystem, must point to 152 * a statically initialized list 153 */ 154 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 155 const struct drop_reason_list *list) 156 { 157 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 158 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 159 "invalid subsystem %d\n", subsys)) 160 return; 161 162 /* must point to statically allocated memory, so INIT is OK */ 163 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 164 } 165 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 166 167 /** 168 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 169 * @subsys: the subsystem to remove, must not be the core 170 * 171 * Note: This will synchronize_rcu() to ensure no users when it returns. 172 */ 173 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 174 { 175 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 176 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 177 "invalid subsystem %d\n", subsys)) 178 return; 179 180 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 181 182 synchronize_rcu(); 183 } 184 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 185 186 /** 187 * skb_panic - private function for out-of-line support 188 * @skb: buffer 189 * @sz: size 190 * @addr: address 191 * @msg: skb_over_panic or skb_under_panic 192 * 193 * Out-of-line support for skb_put() and skb_push(). 194 * Called via the wrapper skb_over_panic() or skb_under_panic(). 195 * Keep out of line to prevent kernel bloat. 196 * __builtin_return_address is not used because it is not always reliable. 197 */ 198 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 199 const char msg[]) 200 { 201 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 202 msg, addr, skb->len, sz, skb->head, skb->data, 203 (unsigned long)skb->tail, (unsigned long)skb->end, 204 skb->dev ? skb->dev->name : "<NULL>"); 205 BUG(); 206 } 207 208 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 209 { 210 skb_panic(skb, sz, addr, __func__); 211 } 212 213 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 214 { 215 skb_panic(skb, sz, addr, __func__); 216 } 217 218 #define NAPI_SKB_CACHE_SIZE 64 219 #define NAPI_SKB_CACHE_BULK 16 220 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 221 222 #if PAGE_SIZE == SZ_4K 223 224 #define NAPI_HAS_SMALL_PAGE_FRAG 1 225 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) 226 227 /* specialized page frag allocator using a single order 0 page 228 * and slicing it into 1K sized fragment. Constrained to systems 229 * with a very limited amount of 1K fragments fitting a single 230 * page - to avoid excessive truesize underestimation 231 */ 232 233 struct page_frag_1k { 234 void *va; 235 u16 offset; 236 bool pfmemalloc; 237 }; 238 239 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) 240 { 241 struct page *page; 242 int offset; 243 244 offset = nc->offset - SZ_1K; 245 if (likely(offset >= 0)) 246 goto use_frag; 247 248 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 249 if (!page) 250 return NULL; 251 252 nc->va = page_address(page); 253 nc->pfmemalloc = page_is_pfmemalloc(page); 254 offset = PAGE_SIZE - SZ_1K; 255 page_ref_add(page, offset / SZ_1K); 256 257 use_frag: 258 nc->offset = offset; 259 return nc->va + offset; 260 } 261 #else 262 263 /* the small page is actually unused in this build; add dummy helpers 264 * to please the compiler and avoid later preprocessor's conditionals 265 */ 266 #define NAPI_HAS_SMALL_PAGE_FRAG 0 267 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false 268 269 struct page_frag_1k { 270 }; 271 272 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) 273 { 274 return NULL; 275 } 276 277 #endif 278 279 struct napi_alloc_cache { 280 struct page_frag_cache page; 281 struct page_frag_1k page_small; 282 unsigned int skb_count; 283 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 284 }; 285 286 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 287 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 288 289 /* Double check that napi_get_frags() allocates skbs with 290 * skb->head being backed by slab, not a page fragment. 291 * This is to make sure bug fixed in 3226b158e67c 292 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 293 * does not accidentally come back. 294 */ 295 void napi_get_frags_check(struct napi_struct *napi) 296 { 297 struct sk_buff *skb; 298 299 local_bh_disable(); 300 skb = napi_get_frags(napi); 301 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); 302 napi_free_frags(napi); 303 local_bh_enable(); 304 } 305 306 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 307 { 308 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 309 310 fragsz = SKB_DATA_ALIGN(fragsz); 311 312 return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 313 align_mask); 314 } 315 EXPORT_SYMBOL(__napi_alloc_frag_align); 316 317 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 318 { 319 void *data; 320 321 fragsz = SKB_DATA_ALIGN(fragsz); 322 if (in_hardirq() || irqs_disabled()) { 323 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 324 325 data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, 326 align_mask); 327 } else { 328 struct napi_alloc_cache *nc; 329 330 local_bh_disable(); 331 nc = this_cpu_ptr(&napi_alloc_cache); 332 data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 333 align_mask); 334 local_bh_enable(); 335 } 336 return data; 337 } 338 EXPORT_SYMBOL(__netdev_alloc_frag_align); 339 340 static struct sk_buff *napi_skb_cache_get(void) 341 { 342 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 343 struct sk_buff *skb; 344 345 if (unlikely(!nc->skb_count)) { 346 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 347 GFP_ATOMIC, 348 NAPI_SKB_CACHE_BULK, 349 nc->skb_cache); 350 if (unlikely(!nc->skb_count)) 351 return NULL; 352 } 353 354 skb = nc->skb_cache[--nc->skb_count]; 355 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); 356 357 return skb; 358 } 359 360 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 361 unsigned int size) 362 { 363 struct skb_shared_info *shinfo; 364 365 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 366 367 /* Assumes caller memset cleared SKB */ 368 skb->truesize = SKB_TRUESIZE(size); 369 refcount_set(&skb->users, 1); 370 skb->head = data; 371 skb->data = data; 372 skb_reset_tail_pointer(skb); 373 skb_set_end_offset(skb, size); 374 skb->mac_header = (typeof(skb->mac_header))~0U; 375 skb->transport_header = (typeof(skb->transport_header))~0U; 376 skb->alloc_cpu = raw_smp_processor_id(); 377 /* make sure we initialize shinfo sequentially */ 378 shinfo = skb_shinfo(skb); 379 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 380 atomic_set(&shinfo->dataref, 1); 381 382 skb_set_kcov_handle(skb, kcov_common_handle()); 383 } 384 385 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 386 unsigned int *size) 387 { 388 void *resized; 389 390 /* Must find the allocation size (and grow it to match). */ 391 *size = ksize(data); 392 /* krealloc() will immediately return "data" when 393 * "ksize(data)" is requested: it is the existing upper 394 * bounds. As a result, GFP_ATOMIC will be ignored. Note 395 * that this "new" pointer needs to be passed back to the 396 * caller for use so the __alloc_size hinting will be 397 * tracked correctly. 398 */ 399 resized = krealloc(data, *size, GFP_ATOMIC); 400 WARN_ON_ONCE(resized != data); 401 return resized; 402 } 403 404 /* build_skb() variant which can operate on slab buffers. 405 * Note that this should be used sparingly as slab buffers 406 * cannot be combined efficiently by GRO! 407 */ 408 struct sk_buff *slab_build_skb(void *data) 409 { 410 struct sk_buff *skb; 411 unsigned int size; 412 413 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 414 if (unlikely(!skb)) 415 return NULL; 416 417 memset(skb, 0, offsetof(struct sk_buff, tail)); 418 data = __slab_build_skb(skb, data, &size); 419 __finalize_skb_around(skb, data, size); 420 421 return skb; 422 } 423 EXPORT_SYMBOL(slab_build_skb); 424 425 /* Caller must provide SKB that is memset cleared */ 426 static void __build_skb_around(struct sk_buff *skb, void *data, 427 unsigned int frag_size) 428 { 429 unsigned int size = frag_size; 430 431 /* frag_size == 0 is considered deprecated now. Callers 432 * using slab buffer should use slab_build_skb() instead. 433 */ 434 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 435 data = __slab_build_skb(skb, data, &size); 436 437 __finalize_skb_around(skb, data, size); 438 } 439 440 /** 441 * __build_skb - build a network buffer 442 * @data: data buffer provided by caller 443 * @frag_size: size of data (must not be 0) 444 * 445 * Allocate a new &sk_buff. Caller provides space holding head and 446 * skb_shared_info. @data must have been allocated from the page 447 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 448 * allocation is deprecated, and callers should use slab_build_skb() 449 * instead.) 450 * The return is the new skb buffer. 451 * On a failure the return is %NULL, and @data is not freed. 452 * Notes : 453 * Before IO, driver allocates only data buffer where NIC put incoming frame 454 * Driver should add room at head (NET_SKB_PAD) and 455 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 456 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 457 * before giving packet to stack. 458 * RX rings only contains data buffers, not full skbs. 459 */ 460 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 461 { 462 struct sk_buff *skb; 463 464 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 465 if (unlikely(!skb)) 466 return NULL; 467 468 memset(skb, 0, offsetof(struct sk_buff, tail)); 469 __build_skb_around(skb, data, frag_size); 470 471 return skb; 472 } 473 474 /* build_skb() is wrapper over __build_skb(), that specifically 475 * takes care of skb->head and skb->pfmemalloc 476 */ 477 struct sk_buff *build_skb(void *data, unsigned int frag_size) 478 { 479 struct sk_buff *skb = __build_skb(data, frag_size); 480 481 if (likely(skb && frag_size)) { 482 skb->head_frag = 1; 483 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 484 } 485 return skb; 486 } 487 EXPORT_SYMBOL(build_skb); 488 489 /** 490 * build_skb_around - build a network buffer around provided skb 491 * @skb: sk_buff provide by caller, must be memset cleared 492 * @data: data buffer provided by caller 493 * @frag_size: size of data 494 */ 495 struct sk_buff *build_skb_around(struct sk_buff *skb, 496 void *data, unsigned int frag_size) 497 { 498 if (unlikely(!skb)) 499 return NULL; 500 501 __build_skb_around(skb, data, frag_size); 502 503 if (frag_size) { 504 skb->head_frag = 1; 505 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 506 } 507 return skb; 508 } 509 EXPORT_SYMBOL(build_skb_around); 510 511 /** 512 * __napi_build_skb - build a network buffer 513 * @data: data buffer provided by caller 514 * @frag_size: size of data 515 * 516 * Version of __build_skb() that uses NAPI percpu caches to obtain 517 * skbuff_head instead of inplace allocation. 518 * 519 * Returns a new &sk_buff on success, %NULL on allocation failure. 520 */ 521 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 522 { 523 struct sk_buff *skb; 524 525 skb = napi_skb_cache_get(); 526 if (unlikely(!skb)) 527 return NULL; 528 529 memset(skb, 0, offsetof(struct sk_buff, tail)); 530 __build_skb_around(skb, data, frag_size); 531 532 return skb; 533 } 534 535 /** 536 * napi_build_skb - build a network buffer 537 * @data: data buffer provided by caller 538 * @frag_size: size of data 539 * 540 * Version of __napi_build_skb() that takes care of skb->head_frag 541 * and skb->pfmemalloc when the data is a page or page fragment. 542 * 543 * Returns a new &sk_buff on success, %NULL on allocation failure. 544 */ 545 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 546 { 547 struct sk_buff *skb = __napi_build_skb(data, frag_size); 548 549 if (likely(skb) && frag_size) { 550 skb->head_frag = 1; 551 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 552 } 553 554 return skb; 555 } 556 EXPORT_SYMBOL(napi_build_skb); 557 558 /* 559 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 560 * the caller if emergency pfmemalloc reserves are being used. If it is and 561 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 562 * may be used. Otherwise, the packet data may be discarded until enough 563 * memory is free 564 */ 565 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 566 bool *pfmemalloc) 567 { 568 bool ret_pfmemalloc = false; 569 size_t obj_size; 570 void *obj; 571 572 obj_size = SKB_HEAD_ALIGN(*size); 573 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 574 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 575 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, 576 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 577 node); 578 *size = SKB_SMALL_HEAD_CACHE_SIZE; 579 if (obj || !(gfp_pfmemalloc_allowed(flags))) 580 goto out; 581 /* Try again but now we are using pfmemalloc reserves */ 582 ret_pfmemalloc = true; 583 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); 584 goto out; 585 } 586 587 obj_size = kmalloc_size_roundup(obj_size); 588 /* The following cast might truncate high-order bits of obj_size, this 589 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 590 */ 591 *size = (unsigned int)obj_size; 592 593 /* 594 * Try a regular allocation, when that fails and we're not entitled 595 * to the reserves, fail. 596 */ 597 obj = kmalloc_node_track_caller(obj_size, 598 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 599 node); 600 if (obj || !(gfp_pfmemalloc_allowed(flags))) 601 goto out; 602 603 /* Try again but now we are using pfmemalloc reserves */ 604 ret_pfmemalloc = true; 605 obj = kmalloc_node_track_caller(obj_size, flags, node); 606 607 out: 608 if (pfmemalloc) 609 *pfmemalloc = ret_pfmemalloc; 610 611 return obj; 612 } 613 614 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 615 * 'private' fields and also do memory statistics to find all the 616 * [BEEP] leaks. 617 * 618 */ 619 620 /** 621 * __alloc_skb - allocate a network buffer 622 * @size: size to allocate 623 * @gfp_mask: allocation mask 624 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 625 * instead of head cache and allocate a cloned (child) skb. 626 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 627 * allocations in case the data is required for writeback 628 * @node: numa node to allocate memory on 629 * 630 * Allocate a new &sk_buff. The returned buffer has no headroom and a 631 * tail room of at least size bytes. The object has a reference count 632 * of one. The return is the buffer. On a failure the return is %NULL. 633 * 634 * Buffers may only be allocated from interrupts using a @gfp_mask of 635 * %GFP_ATOMIC. 636 */ 637 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 638 int flags, int node) 639 { 640 struct kmem_cache *cache; 641 struct sk_buff *skb; 642 bool pfmemalloc; 643 u8 *data; 644 645 cache = (flags & SKB_ALLOC_FCLONE) 646 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; 647 648 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 649 gfp_mask |= __GFP_MEMALLOC; 650 651 /* Get the HEAD */ 652 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 653 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 654 skb = napi_skb_cache_get(); 655 else 656 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 657 if (unlikely(!skb)) 658 return NULL; 659 prefetchw(skb); 660 661 /* We do our best to align skb_shared_info on a separate cache 662 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 663 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 664 * Both skb->head and skb_shared_info are cache line aligned. 665 */ 666 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 667 if (unlikely(!data)) 668 goto nodata; 669 /* kmalloc_size_roundup() might give us more room than requested. 670 * Put skb_shared_info exactly at the end of allocated zone, 671 * to allow max possible filling before reallocation. 672 */ 673 prefetchw(data + SKB_WITH_OVERHEAD(size)); 674 675 /* 676 * Only clear those fields we need to clear, not those that we will 677 * actually initialise below. Hence, don't put any more fields after 678 * the tail pointer in struct sk_buff! 679 */ 680 memset(skb, 0, offsetof(struct sk_buff, tail)); 681 __build_skb_around(skb, data, size); 682 skb->pfmemalloc = pfmemalloc; 683 684 if (flags & SKB_ALLOC_FCLONE) { 685 struct sk_buff_fclones *fclones; 686 687 fclones = container_of(skb, struct sk_buff_fclones, skb1); 688 689 skb->fclone = SKB_FCLONE_ORIG; 690 refcount_set(&fclones->fclone_ref, 1); 691 } 692 693 return skb; 694 695 nodata: 696 kmem_cache_free(cache, skb); 697 return NULL; 698 } 699 EXPORT_SYMBOL(__alloc_skb); 700 701 /** 702 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 703 * @dev: network device to receive on 704 * @len: length to allocate 705 * @gfp_mask: get_free_pages mask, passed to alloc_skb 706 * 707 * Allocate a new &sk_buff and assign it a usage count of one. The 708 * buffer has NET_SKB_PAD headroom built in. Users should allocate 709 * the headroom they think they need without accounting for the 710 * built in space. The built in space is used for optimisations. 711 * 712 * %NULL is returned if there is no free memory. 713 */ 714 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 715 gfp_t gfp_mask) 716 { 717 struct page_frag_cache *nc; 718 struct sk_buff *skb; 719 bool pfmemalloc; 720 void *data; 721 722 len += NET_SKB_PAD; 723 724 /* If requested length is either too small or too big, 725 * we use kmalloc() for skb->head allocation. 726 */ 727 if (len <= SKB_WITH_OVERHEAD(1024) || 728 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 729 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 730 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 731 if (!skb) 732 goto skb_fail; 733 goto skb_success; 734 } 735 736 len = SKB_HEAD_ALIGN(len); 737 738 if (sk_memalloc_socks()) 739 gfp_mask |= __GFP_MEMALLOC; 740 741 if (in_hardirq() || irqs_disabled()) { 742 nc = this_cpu_ptr(&netdev_alloc_cache); 743 data = page_frag_alloc(nc, len, gfp_mask); 744 pfmemalloc = nc->pfmemalloc; 745 } else { 746 local_bh_disable(); 747 nc = this_cpu_ptr(&napi_alloc_cache.page); 748 data = page_frag_alloc(nc, len, gfp_mask); 749 pfmemalloc = nc->pfmemalloc; 750 local_bh_enable(); 751 } 752 753 if (unlikely(!data)) 754 return NULL; 755 756 skb = __build_skb(data, len); 757 if (unlikely(!skb)) { 758 skb_free_frag(data); 759 return NULL; 760 } 761 762 if (pfmemalloc) 763 skb->pfmemalloc = 1; 764 skb->head_frag = 1; 765 766 skb_success: 767 skb_reserve(skb, NET_SKB_PAD); 768 skb->dev = dev; 769 770 skb_fail: 771 return skb; 772 } 773 EXPORT_SYMBOL(__netdev_alloc_skb); 774 775 /** 776 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 777 * @napi: napi instance this buffer was allocated for 778 * @len: length to allocate 779 * 780 * Allocate a new sk_buff for use in NAPI receive. This buffer will 781 * attempt to allocate the head from a special reserved region used 782 * only for NAPI Rx allocation. By doing this we can save several 783 * CPU cycles by avoiding having to disable and re-enable IRQs. 784 * 785 * %NULL is returned if there is no free memory. 786 */ 787 struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) 788 { 789 gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; 790 struct napi_alloc_cache *nc; 791 struct sk_buff *skb; 792 bool pfmemalloc; 793 void *data; 794 795 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 796 len += NET_SKB_PAD + NET_IP_ALIGN; 797 798 /* If requested length is either too small or too big, 799 * we use kmalloc() for skb->head allocation. 800 * When the small frag allocator is available, prefer it over kmalloc 801 * for small fragments 802 */ 803 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || 804 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 805 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 806 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 807 NUMA_NO_NODE); 808 if (!skb) 809 goto skb_fail; 810 goto skb_success; 811 } 812 813 nc = this_cpu_ptr(&napi_alloc_cache); 814 815 if (sk_memalloc_socks()) 816 gfp_mask |= __GFP_MEMALLOC; 817 818 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { 819 /* we are artificially inflating the allocation size, but 820 * that is not as bad as it may look like, as: 821 * - 'len' less than GRO_MAX_HEAD makes little sense 822 * - On most systems, larger 'len' values lead to fragment 823 * size above 512 bytes 824 * - kmalloc would use the kmalloc-1k slab for such values 825 * - Builds with smaller GRO_MAX_HEAD will very likely do 826 * little networking, as that implies no WiFi and no 827 * tunnels support, and 32 bits arches. 828 */ 829 len = SZ_1K; 830 831 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); 832 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); 833 } else { 834 len = SKB_HEAD_ALIGN(len); 835 836 data = page_frag_alloc(&nc->page, len, gfp_mask); 837 pfmemalloc = nc->page.pfmemalloc; 838 } 839 840 if (unlikely(!data)) 841 return NULL; 842 843 skb = __napi_build_skb(data, len); 844 if (unlikely(!skb)) { 845 skb_free_frag(data); 846 return NULL; 847 } 848 849 if (pfmemalloc) 850 skb->pfmemalloc = 1; 851 skb->head_frag = 1; 852 853 skb_success: 854 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 855 skb->dev = napi->dev; 856 857 skb_fail: 858 return skb; 859 } 860 EXPORT_SYMBOL(napi_alloc_skb); 861 862 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, 863 int off, int size, unsigned int truesize) 864 { 865 DEBUG_NET_WARN_ON_ONCE(size > truesize); 866 867 skb_fill_netmem_desc(skb, i, netmem, off, size); 868 skb->len += size; 869 skb->data_len += size; 870 skb->truesize += truesize; 871 } 872 EXPORT_SYMBOL(skb_add_rx_frag_netmem); 873 874 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 875 unsigned int truesize) 876 { 877 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 878 879 DEBUG_NET_WARN_ON_ONCE(size > truesize); 880 881 skb_frag_size_add(frag, size); 882 skb->len += size; 883 skb->data_len += size; 884 skb->truesize += truesize; 885 } 886 EXPORT_SYMBOL(skb_coalesce_rx_frag); 887 888 static void skb_drop_list(struct sk_buff **listp) 889 { 890 kfree_skb_list(*listp); 891 *listp = NULL; 892 } 893 894 static inline void skb_drop_fraglist(struct sk_buff *skb) 895 { 896 skb_drop_list(&skb_shinfo(skb)->frag_list); 897 } 898 899 static void skb_clone_fraglist(struct sk_buff *skb) 900 { 901 struct sk_buff *list; 902 903 skb_walk_frags(skb, list) 904 skb_get(list); 905 } 906 907 static bool is_pp_page(struct page *page) 908 { 909 return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; 910 } 911 912 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 913 unsigned int headroom) 914 { 915 #if IS_ENABLED(CONFIG_PAGE_POOL) 916 u32 size, truesize, len, max_head_size, off; 917 struct sk_buff *skb = *pskb, *nskb; 918 int err, i, head_off; 919 void *data; 920 921 /* XDP does not support fraglist so we need to linearize 922 * the skb. 923 */ 924 if (skb_has_frag_list(skb)) 925 return -EOPNOTSUPP; 926 927 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); 928 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) 929 return -ENOMEM; 930 931 size = min_t(u32, skb->len, max_head_size); 932 truesize = SKB_HEAD_ALIGN(size) + headroom; 933 data = page_pool_dev_alloc_va(pool, &truesize); 934 if (!data) 935 return -ENOMEM; 936 937 nskb = napi_build_skb(data, truesize); 938 if (!nskb) { 939 page_pool_free_va(pool, data, true); 940 return -ENOMEM; 941 } 942 943 skb_reserve(nskb, headroom); 944 skb_copy_header(nskb, skb); 945 skb_mark_for_recycle(nskb); 946 947 err = skb_copy_bits(skb, 0, nskb->data, size); 948 if (err) { 949 consume_skb(nskb); 950 return err; 951 } 952 skb_put(nskb, size); 953 954 head_off = skb_headroom(nskb) - skb_headroom(skb); 955 skb_headers_offset_update(nskb, head_off); 956 957 off = size; 958 len = skb->len - off; 959 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { 960 struct page *page; 961 u32 page_off; 962 963 size = min_t(u32, len, PAGE_SIZE); 964 truesize = size; 965 966 page = page_pool_dev_alloc(pool, &page_off, &truesize); 967 if (!page) { 968 consume_skb(nskb); 969 return -ENOMEM; 970 } 971 972 skb_add_rx_frag(nskb, i, page, page_off, size, truesize); 973 err = skb_copy_bits(skb, off, page_address(page) + page_off, 974 size); 975 if (err) { 976 consume_skb(nskb); 977 return err; 978 } 979 980 len -= size; 981 off += size; 982 } 983 984 consume_skb(skb); 985 *pskb = nskb; 986 987 return 0; 988 #else 989 return -EOPNOTSUPP; 990 #endif 991 } 992 EXPORT_SYMBOL(skb_pp_cow_data); 993 994 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, 995 struct bpf_prog *prog) 996 { 997 if (!prog->aux->xdp_has_frags) 998 return -EINVAL; 999 1000 return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); 1001 } 1002 EXPORT_SYMBOL(skb_cow_data_for_xdp); 1003 1004 #if IS_ENABLED(CONFIG_PAGE_POOL) 1005 bool napi_pp_put_page(struct page *page) 1006 { 1007 page = compound_head(page); 1008 1009 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 1010 * in order to preserve any existing bits, such as bit 0 for the 1011 * head page of compound page and bit 1 for pfmemalloc page, so 1012 * mask those bits for freeing side when doing below checking, 1013 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 1014 * to avoid recycling the pfmemalloc page. 1015 */ 1016 if (unlikely(!is_pp_page(page))) 1017 return false; 1018 1019 page_pool_put_full_page(page->pp, page, false); 1020 1021 return true; 1022 } 1023 EXPORT_SYMBOL(napi_pp_put_page); 1024 #endif 1025 1026 static bool skb_pp_recycle(struct sk_buff *skb, void *data) 1027 { 1028 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 1029 return false; 1030 return napi_pp_put_page(virt_to_page(data)); 1031 } 1032 1033 /** 1034 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb 1035 * @skb: page pool aware skb 1036 * 1037 * Increase the fragment reference count (pp_ref_count) of a skb. This is 1038 * intended to gain fragment references only for page pool aware skbs, 1039 * i.e. when skb->pp_recycle is true, and not for fragments in a 1040 * non-pp-recycling skb. It has a fallback to increase references on normal 1041 * pages, as page pool aware skbs may also have normal page fragments. 1042 */ 1043 static int skb_pp_frag_ref(struct sk_buff *skb) 1044 { 1045 struct skb_shared_info *shinfo; 1046 struct page *head_page; 1047 int i; 1048 1049 if (!skb->pp_recycle) 1050 return -EINVAL; 1051 1052 shinfo = skb_shinfo(skb); 1053 1054 for (i = 0; i < shinfo->nr_frags; i++) { 1055 head_page = compound_head(skb_frag_page(&shinfo->frags[i])); 1056 if (likely(is_pp_page(head_page))) 1057 page_pool_ref_page(head_page); 1058 else 1059 page_ref_inc(head_page); 1060 } 1061 return 0; 1062 } 1063 1064 static void skb_kfree_head(void *head, unsigned int end_offset) 1065 { 1066 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1067 kmem_cache_free(net_hotdata.skb_small_head_cache, head); 1068 else 1069 kfree(head); 1070 } 1071 1072 static void skb_free_head(struct sk_buff *skb) 1073 { 1074 unsigned char *head = skb->head; 1075 1076 if (skb->head_frag) { 1077 if (skb_pp_recycle(skb, head)) 1078 return; 1079 skb_free_frag(head); 1080 } else { 1081 skb_kfree_head(head, skb_end_offset(skb)); 1082 } 1083 } 1084 1085 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) 1086 { 1087 struct skb_shared_info *shinfo = skb_shinfo(skb); 1088 int i; 1089 1090 if (!skb_data_unref(skb, shinfo)) 1091 goto exit; 1092 1093 if (skb_zcopy(skb)) { 1094 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1095 1096 skb_zcopy_clear(skb, true); 1097 if (skip_unref) 1098 goto free_head; 1099 } 1100 1101 for (i = 0; i < shinfo->nr_frags; i++) 1102 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); 1103 1104 free_head: 1105 if (shinfo->frag_list) 1106 kfree_skb_list_reason(shinfo->frag_list, reason); 1107 1108 skb_free_head(skb); 1109 exit: 1110 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1111 * bit is only set on the head though, so in order to avoid races 1112 * while trying to recycle fragments on __skb_frag_unref() we need 1113 * to make one SKB responsible for triggering the recycle path. 1114 * So disable the recycling bit if an SKB is cloned and we have 1115 * additional references to the fragmented part of the SKB. 1116 * Eventually the last SKB will have the recycling bit set and it's 1117 * dataref set to 0, which will trigger the recycling 1118 */ 1119 skb->pp_recycle = 0; 1120 } 1121 1122 /* 1123 * Free an skbuff by memory without cleaning the state. 1124 */ 1125 static void kfree_skbmem(struct sk_buff *skb) 1126 { 1127 struct sk_buff_fclones *fclones; 1128 1129 switch (skb->fclone) { 1130 case SKB_FCLONE_UNAVAILABLE: 1131 kmem_cache_free(net_hotdata.skbuff_cache, skb); 1132 return; 1133 1134 case SKB_FCLONE_ORIG: 1135 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1136 1137 /* We usually free the clone (TX completion) before original skb 1138 * This test would have no chance to be true for the clone, 1139 * while here, branch prediction will be good. 1140 */ 1141 if (refcount_read(&fclones->fclone_ref) == 1) 1142 goto fastpath; 1143 break; 1144 1145 default: /* SKB_FCLONE_CLONE */ 1146 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1147 break; 1148 } 1149 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1150 return; 1151 fastpath: 1152 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); 1153 } 1154 1155 void skb_release_head_state(struct sk_buff *skb) 1156 { 1157 skb_dst_drop(skb); 1158 if (skb->destructor) { 1159 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1160 skb->destructor(skb); 1161 } 1162 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1163 nf_conntrack_put(skb_nfct(skb)); 1164 #endif 1165 skb_ext_put(skb); 1166 } 1167 1168 /* Free everything but the sk_buff shell. */ 1169 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) 1170 { 1171 skb_release_head_state(skb); 1172 if (likely(skb->head)) 1173 skb_release_data(skb, reason); 1174 } 1175 1176 /** 1177 * __kfree_skb - private function 1178 * @skb: buffer 1179 * 1180 * Free an sk_buff. Release anything attached to the buffer. 1181 * Clean the state. This is an internal helper function. Users should 1182 * always call kfree_skb 1183 */ 1184 1185 void __kfree_skb(struct sk_buff *skb) 1186 { 1187 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); 1188 kfree_skbmem(skb); 1189 } 1190 EXPORT_SYMBOL(__kfree_skb); 1191 1192 static __always_inline 1193 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, 1194 enum skb_drop_reason reason) 1195 { 1196 if (unlikely(!skb_unref(skb))) 1197 return false; 1198 1199 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1200 u32_get_bits(reason, 1201 SKB_DROP_REASON_SUBSYS_MASK) >= 1202 SKB_DROP_REASON_SUBSYS_NUM); 1203 1204 if (reason == SKB_CONSUMED) 1205 trace_consume_skb(skb, __builtin_return_address(0)); 1206 else 1207 trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); 1208 return true; 1209 } 1210 1211 /** 1212 * sk_skb_reason_drop - free an sk_buff with special reason 1213 * @sk: the socket to receive @skb, or NULL if not applicable 1214 * @skb: buffer to free 1215 * @reason: reason why this skb is dropped 1216 * 1217 * Drop a reference to the buffer and free it if the usage count has hit 1218 * zero. Meanwhile, pass the receiving socket and drop reason to 1219 * 'kfree_skb' tracepoint. 1220 */ 1221 void __fix_address 1222 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) 1223 { 1224 if (__sk_skb_reason_drop(sk, skb, reason)) 1225 __kfree_skb(skb); 1226 } 1227 EXPORT_SYMBOL(sk_skb_reason_drop); 1228 1229 #define KFREE_SKB_BULK_SIZE 16 1230 1231 struct skb_free_array { 1232 unsigned int skb_count; 1233 void *skb_array[KFREE_SKB_BULK_SIZE]; 1234 }; 1235 1236 static void kfree_skb_add_bulk(struct sk_buff *skb, 1237 struct skb_free_array *sa, 1238 enum skb_drop_reason reason) 1239 { 1240 /* if SKB is a clone, don't handle this case */ 1241 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1242 __kfree_skb(skb); 1243 return; 1244 } 1245 1246 skb_release_all(skb, reason); 1247 sa->skb_array[sa->skb_count++] = skb; 1248 1249 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1250 kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, 1251 sa->skb_array); 1252 sa->skb_count = 0; 1253 } 1254 } 1255 1256 void __fix_address 1257 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1258 { 1259 struct skb_free_array sa; 1260 1261 sa.skb_count = 0; 1262 1263 while (segs) { 1264 struct sk_buff *next = segs->next; 1265 1266 if (__sk_skb_reason_drop(NULL, segs, reason)) { 1267 skb_poison_list(segs); 1268 kfree_skb_add_bulk(segs, &sa, reason); 1269 } 1270 1271 segs = next; 1272 } 1273 1274 if (sa.skb_count) 1275 kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array); 1276 } 1277 EXPORT_SYMBOL(kfree_skb_list_reason); 1278 1279 /* Dump skb information and contents. 1280 * 1281 * Must only be called from net_ratelimit()-ed paths. 1282 * 1283 * Dumps whole packets if full_pkt, only headers otherwise. 1284 */ 1285 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1286 { 1287 struct skb_shared_info *sh = skb_shinfo(skb); 1288 struct net_device *dev = skb->dev; 1289 struct sock *sk = skb->sk; 1290 struct sk_buff *list_skb; 1291 bool has_mac, has_trans; 1292 int headroom, tailroom; 1293 int i, len, seg_len; 1294 1295 if (full_pkt) 1296 len = skb->len; 1297 else 1298 len = min_t(int, skb->len, MAX_HEADER + 128); 1299 1300 headroom = skb_headroom(skb); 1301 tailroom = skb_tailroom(skb); 1302 1303 has_mac = skb_mac_header_was_set(skb); 1304 has_trans = skb_transport_header_was_set(skb); 1305 1306 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1307 "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n" 1308 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1309 "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1310 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n" 1311 "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n" 1312 "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n", 1313 level, skb->len, headroom, skb_headlen(skb), tailroom, 1314 has_mac ? skb->mac_header : -1, 1315 has_mac ? skb_mac_header_len(skb) : -1, 1316 skb->mac_len, 1317 skb->network_header, 1318 has_trans ? skb_network_header_len(skb) : -1, 1319 has_trans ? skb->transport_header : -1, 1320 sh->tx_flags, sh->nr_frags, 1321 sh->gso_size, sh->gso_type, sh->gso_segs, 1322 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, 1323 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, 1324 skb->hash, skb->sw_hash, skb->l4_hash, 1325 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, 1326 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, 1327 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, 1328 skb->inner_network_header, skb->inner_transport_header); 1329 1330 if (dev) 1331 printk("%sdev name=%s feat=%pNF\n", 1332 level, dev->name, &dev->features); 1333 if (sk) 1334 printk("%ssk family=%hu type=%u proto=%u\n", 1335 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1336 1337 if (full_pkt && headroom) 1338 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1339 16, 1, skb->head, headroom, false); 1340 1341 seg_len = min_t(int, skb_headlen(skb), len); 1342 if (seg_len) 1343 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1344 16, 1, skb->data, seg_len, false); 1345 len -= seg_len; 1346 1347 if (full_pkt && tailroom) 1348 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1349 16, 1, skb_tail_pointer(skb), tailroom, false); 1350 1351 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1352 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1353 u32 p_off, p_len, copied; 1354 struct page *p; 1355 u8 *vaddr; 1356 1357 skb_frag_foreach_page(frag, skb_frag_off(frag), 1358 skb_frag_size(frag), p, p_off, p_len, 1359 copied) { 1360 seg_len = min_t(int, p_len, len); 1361 vaddr = kmap_atomic(p); 1362 print_hex_dump(level, "skb frag: ", 1363 DUMP_PREFIX_OFFSET, 1364 16, 1, vaddr + p_off, seg_len, false); 1365 kunmap_atomic(vaddr); 1366 len -= seg_len; 1367 if (!len) 1368 break; 1369 } 1370 } 1371 1372 if (full_pkt && skb_has_frag_list(skb)) { 1373 printk("skb fraglist:\n"); 1374 skb_walk_frags(skb, list_skb) 1375 skb_dump(level, list_skb, true); 1376 } 1377 } 1378 EXPORT_SYMBOL(skb_dump); 1379 1380 /** 1381 * skb_tx_error - report an sk_buff xmit error 1382 * @skb: buffer that triggered an error 1383 * 1384 * Report xmit error if a device callback is tracking this skb. 1385 * skb must be freed afterwards. 1386 */ 1387 void skb_tx_error(struct sk_buff *skb) 1388 { 1389 if (skb) { 1390 skb_zcopy_downgrade_managed(skb); 1391 skb_zcopy_clear(skb, true); 1392 } 1393 } 1394 EXPORT_SYMBOL(skb_tx_error); 1395 1396 #ifdef CONFIG_TRACEPOINTS 1397 /** 1398 * consume_skb - free an skbuff 1399 * @skb: buffer to free 1400 * 1401 * Drop a ref to the buffer and free it if the usage count has hit zero 1402 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1403 * is being dropped after a failure and notes that 1404 */ 1405 void consume_skb(struct sk_buff *skb) 1406 { 1407 if (!skb_unref(skb)) 1408 return; 1409 1410 trace_consume_skb(skb, __builtin_return_address(0)); 1411 __kfree_skb(skb); 1412 } 1413 EXPORT_SYMBOL(consume_skb); 1414 #endif 1415 1416 /** 1417 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1418 * @skb: buffer to free 1419 * 1420 * Alike consume_skb(), but this variant assumes that this is the last 1421 * skb reference and all the head states have been already dropped 1422 */ 1423 void __consume_stateless_skb(struct sk_buff *skb) 1424 { 1425 trace_consume_skb(skb, __builtin_return_address(0)); 1426 skb_release_data(skb, SKB_CONSUMED); 1427 kfree_skbmem(skb); 1428 } 1429 1430 static void napi_skb_cache_put(struct sk_buff *skb) 1431 { 1432 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1433 u32 i; 1434 1435 if (!kasan_mempool_poison_object(skb)) 1436 return; 1437 1438 nc->skb_cache[nc->skb_count++] = skb; 1439 1440 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1441 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1442 kasan_mempool_unpoison_object(nc->skb_cache[i], 1443 kmem_cache_size(net_hotdata.skbuff_cache)); 1444 1445 kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, 1446 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1447 nc->skb_count = NAPI_SKB_CACHE_HALF; 1448 } 1449 } 1450 1451 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1452 { 1453 skb_release_all(skb, reason); 1454 napi_skb_cache_put(skb); 1455 } 1456 1457 void napi_skb_free_stolen_head(struct sk_buff *skb) 1458 { 1459 if (unlikely(skb->slow_gro)) { 1460 nf_reset_ct(skb); 1461 skb_dst_drop(skb); 1462 skb_ext_put(skb); 1463 skb_orphan(skb); 1464 skb->slow_gro = 0; 1465 } 1466 napi_skb_cache_put(skb); 1467 } 1468 1469 void napi_consume_skb(struct sk_buff *skb, int budget) 1470 { 1471 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1472 if (unlikely(!budget)) { 1473 dev_consume_skb_any(skb); 1474 return; 1475 } 1476 1477 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1478 1479 if (!skb_unref(skb)) 1480 return; 1481 1482 /* if reaching here SKB is ready to free */ 1483 trace_consume_skb(skb, __builtin_return_address(0)); 1484 1485 /* if SKB is a clone, don't handle this case */ 1486 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1487 __kfree_skb(skb); 1488 return; 1489 } 1490 1491 skb_release_all(skb, SKB_CONSUMED); 1492 napi_skb_cache_put(skb); 1493 } 1494 EXPORT_SYMBOL(napi_consume_skb); 1495 1496 /* Make sure a field is contained by headers group */ 1497 #define CHECK_SKB_FIELD(field) \ 1498 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1499 offsetof(struct sk_buff, headers.field)); \ 1500 1501 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1502 { 1503 new->tstamp = old->tstamp; 1504 /* We do not copy old->sk */ 1505 new->dev = old->dev; 1506 memcpy(new->cb, old->cb, sizeof(old->cb)); 1507 skb_dst_copy(new, old); 1508 __skb_ext_copy(new, old); 1509 __nf_copy(new, old, false); 1510 1511 /* Note : this field could be in the headers group. 1512 * It is not yet because we do not want to have a 16 bit hole 1513 */ 1514 new->queue_mapping = old->queue_mapping; 1515 1516 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1517 CHECK_SKB_FIELD(protocol); 1518 CHECK_SKB_FIELD(csum); 1519 CHECK_SKB_FIELD(hash); 1520 CHECK_SKB_FIELD(priority); 1521 CHECK_SKB_FIELD(skb_iif); 1522 CHECK_SKB_FIELD(vlan_proto); 1523 CHECK_SKB_FIELD(vlan_tci); 1524 CHECK_SKB_FIELD(transport_header); 1525 CHECK_SKB_FIELD(network_header); 1526 CHECK_SKB_FIELD(mac_header); 1527 CHECK_SKB_FIELD(inner_protocol); 1528 CHECK_SKB_FIELD(inner_transport_header); 1529 CHECK_SKB_FIELD(inner_network_header); 1530 CHECK_SKB_FIELD(inner_mac_header); 1531 CHECK_SKB_FIELD(mark); 1532 #ifdef CONFIG_NETWORK_SECMARK 1533 CHECK_SKB_FIELD(secmark); 1534 #endif 1535 #ifdef CONFIG_NET_RX_BUSY_POLL 1536 CHECK_SKB_FIELD(napi_id); 1537 #endif 1538 CHECK_SKB_FIELD(alloc_cpu); 1539 #ifdef CONFIG_XPS 1540 CHECK_SKB_FIELD(sender_cpu); 1541 #endif 1542 #ifdef CONFIG_NET_SCHED 1543 CHECK_SKB_FIELD(tc_index); 1544 #endif 1545 1546 } 1547 1548 /* 1549 * You should not add any new code to this function. Add it to 1550 * __copy_skb_header above instead. 1551 */ 1552 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1553 { 1554 #define C(x) n->x = skb->x 1555 1556 n->next = n->prev = NULL; 1557 n->sk = NULL; 1558 __copy_skb_header(n, skb); 1559 1560 C(len); 1561 C(data_len); 1562 C(mac_len); 1563 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1564 n->cloned = 1; 1565 n->nohdr = 0; 1566 n->peeked = 0; 1567 C(pfmemalloc); 1568 C(pp_recycle); 1569 n->destructor = NULL; 1570 C(tail); 1571 C(end); 1572 C(head); 1573 C(head_frag); 1574 C(data); 1575 C(truesize); 1576 refcount_set(&n->users, 1); 1577 1578 atomic_inc(&(skb_shinfo(skb)->dataref)); 1579 skb->cloned = 1; 1580 1581 return n; 1582 #undef C 1583 } 1584 1585 /** 1586 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1587 * @first: first sk_buff of the msg 1588 */ 1589 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1590 { 1591 struct sk_buff *n; 1592 1593 n = alloc_skb(0, GFP_ATOMIC); 1594 if (!n) 1595 return NULL; 1596 1597 n->len = first->len; 1598 n->data_len = first->len; 1599 n->truesize = first->truesize; 1600 1601 skb_shinfo(n)->frag_list = first; 1602 1603 __copy_skb_header(n, first); 1604 n->destructor = NULL; 1605 1606 return n; 1607 } 1608 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1609 1610 /** 1611 * skb_morph - morph one skb into another 1612 * @dst: the skb to receive the contents 1613 * @src: the skb to supply the contents 1614 * 1615 * This is identical to skb_clone except that the target skb is 1616 * supplied by the user. 1617 * 1618 * The target skb is returned upon exit. 1619 */ 1620 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1621 { 1622 skb_release_all(dst, SKB_CONSUMED); 1623 return __skb_clone(dst, src); 1624 } 1625 EXPORT_SYMBOL_GPL(skb_morph); 1626 1627 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1628 { 1629 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1630 struct user_struct *user; 1631 1632 if (capable(CAP_IPC_LOCK) || !size) 1633 return 0; 1634 1635 rlim = rlimit(RLIMIT_MEMLOCK); 1636 if (rlim == RLIM_INFINITY) 1637 return 0; 1638 1639 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1640 max_pg = rlim >> PAGE_SHIFT; 1641 user = mmp->user ? : current_user(); 1642 1643 old_pg = atomic_long_read(&user->locked_vm); 1644 do { 1645 new_pg = old_pg + num_pg; 1646 if (new_pg > max_pg) 1647 return -ENOBUFS; 1648 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1649 1650 if (!mmp->user) { 1651 mmp->user = get_uid(user); 1652 mmp->num_pg = num_pg; 1653 } else { 1654 mmp->num_pg += num_pg; 1655 } 1656 1657 return 0; 1658 } 1659 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1660 1661 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1662 { 1663 if (mmp->user) { 1664 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1665 free_uid(mmp->user); 1666 } 1667 } 1668 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1669 1670 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1671 { 1672 struct ubuf_info_msgzc *uarg; 1673 struct sk_buff *skb; 1674 1675 WARN_ON_ONCE(!in_task()); 1676 1677 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1678 if (!skb) 1679 return NULL; 1680 1681 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1682 uarg = (void *)skb->cb; 1683 uarg->mmp.user = NULL; 1684 1685 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1686 kfree_skb(skb); 1687 return NULL; 1688 } 1689 1690 uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; 1691 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1692 uarg->len = 1; 1693 uarg->bytelen = size; 1694 uarg->zerocopy = 1; 1695 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1696 refcount_set(&uarg->ubuf.refcnt, 1); 1697 sock_hold(sk); 1698 1699 return &uarg->ubuf; 1700 } 1701 1702 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1703 { 1704 return container_of((void *)uarg, struct sk_buff, cb); 1705 } 1706 1707 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1708 struct ubuf_info *uarg) 1709 { 1710 if (uarg) { 1711 struct ubuf_info_msgzc *uarg_zc; 1712 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1713 u32 bytelen, next; 1714 1715 /* there might be non MSG_ZEROCOPY users */ 1716 if (uarg->ops != &msg_zerocopy_ubuf_ops) 1717 return NULL; 1718 1719 /* realloc only when socket is locked (TCP, UDP cork), 1720 * so uarg->len and sk_zckey access is serialized 1721 */ 1722 if (!sock_owned_by_user(sk)) { 1723 WARN_ON_ONCE(1); 1724 return NULL; 1725 } 1726 1727 uarg_zc = uarg_to_msgzc(uarg); 1728 bytelen = uarg_zc->bytelen + size; 1729 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1730 /* TCP can create new skb to attach new uarg */ 1731 if (sk->sk_type == SOCK_STREAM) 1732 goto new_alloc; 1733 return NULL; 1734 } 1735 1736 next = (u32)atomic_read(&sk->sk_zckey); 1737 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1738 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1739 return NULL; 1740 uarg_zc->len++; 1741 uarg_zc->bytelen = bytelen; 1742 atomic_set(&sk->sk_zckey, ++next); 1743 1744 /* no extra ref when appending to datagram (MSG_MORE) */ 1745 if (sk->sk_type == SOCK_STREAM) 1746 net_zcopy_get(uarg); 1747 1748 return uarg; 1749 } 1750 } 1751 1752 new_alloc: 1753 return msg_zerocopy_alloc(sk, size); 1754 } 1755 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1756 1757 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1758 { 1759 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1760 u32 old_lo, old_hi; 1761 u64 sum_len; 1762 1763 old_lo = serr->ee.ee_info; 1764 old_hi = serr->ee.ee_data; 1765 sum_len = old_hi - old_lo + 1ULL + len; 1766 1767 if (sum_len >= (1ULL << 32)) 1768 return false; 1769 1770 if (lo != old_hi + 1) 1771 return false; 1772 1773 serr->ee.ee_data += len; 1774 return true; 1775 } 1776 1777 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1778 { 1779 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1780 struct sock_exterr_skb *serr; 1781 struct sock *sk = skb->sk; 1782 struct sk_buff_head *q; 1783 unsigned long flags; 1784 bool is_zerocopy; 1785 u32 lo, hi; 1786 u16 len; 1787 1788 mm_unaccount_pinned_pages(&uarg->mmp); 1789 1790 /* if !len, there was only 1 call, and it was aborted 1791 * so do not queue a completion notification 1792 */ 1793 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1794 goto release; 1795 1796 len = uarg->len; 1797 lo = uarg->id; 1798 hi = uarg->id + len - 1; 1799 is_zerocopy = uarg->zerocopy; 1800 1801 serr = SKB_EXT_ERR(skb); 1802 memset(serr, 0, sizeof(*serr)); 1803 serr->ee.ee_errno = 0; 1804 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1805 serr->ee.ee_data = hi; 1806 serr->ee.ee_info = lo; 1807 if (!is_zerocopy) 1808 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1809 1810 q = &sk->sk_error_queue; 1811 spin_lock_irqsave(&q->lock, flags); 1812 tail = skb_peek_tail(q); 1813 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1814 !skb_zerocopy_notify_extend(tail, lo, len)) { 1815 __skb_queue_tail(q, skb); 1816 skb = NULL; 1817 } 1818 spin_unlock_irqrestore(&q->lock, flags); 1819 1820 sk_error_report(sk); 1821 1822 release: 1823 consume_skb(skb); 1824 sock_put(sk); 1825 } 1826 1827 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, 1828 bool success) 1829 { 1830 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1831 1832 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1833 1834 if (refcount_dec_and_test(&uarg->refcnt)) 1835 __msg_zerocopy_callback(uarg_zc); 1836 } 1837 1838 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1839 { 1840 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1841 1842 atomic_dec(&sk->sk_zckey); 1843 uarg_to_msgzc(uarg)->len--; 1844 1845 if (have_uref) 1846 msg_zerocopy_complete(NULL, uarg, true); 1847 } 1848 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1849 1850 const struct ubuf_info_ops msg_zerocopy_ubuf_ops = { 1851 .complete = msg_zerocopy_complete, 1852 }; 1853 EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops); 1854 1855 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1856 struct msghdr *msg, int len, 1857 struct ubuf_info *uarg) 1858 { 1859 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1860 int err, orig_len = skb->len; 1861 1862 if (uarg->ops->link_skb) { 1863 err = uarg->ops->link_skb(skb, uarg); 1864 if (err) 1865 return err; 1866 } else { 1867 /* An skb can only point to one uarg. This edge case happens 1868 * when TCP appends to an skb, but zerocopy_realloc triggered 1869 * a new alloc. 1870 */ 1871 if (orig_uarg && uarg != orig_uarg) 1872 return -EEXIST; 1873 } 1874 1875 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1876 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1877 struct sock *save_sk = skb->sk; 1878 1879 /* Streams do not free skb on error. Reset to prev state. */ 1880 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1881 skb->sk = sk; 1882 ___pskb_trim(skb, orig_len); 1883 skb->sk = save_sk; 1884 return err; 1885 } 1886 1887 if (!uarg->ops->link_skb) 1888 skb_zcopy_set(skb, uarg, NULL); 1889 return skb->len - orig_len; 1890 } 1891 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1892 1893 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1894 { 1895 int i; 1896 1897 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1898 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1899 skb_frag_ref(skb, i); 1900 } 1901 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1902 1903 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1904 gfp_t gfp_mask) 1905 { 1906 if (skb_zcopy(orig)) { 1907 if (skb_zcopy(nskb)) { 1908 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1909 if (!gfp_mask) { 1910 WARN_ON_ONCE(1); 1911 return -ENOMEM; 1912 } 1913 if (skb_uarg(nskb) == skb_uarg(orig)) 1914 return 0; 1915 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1916 return -EIO; 1917 } 1918 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1919 } 1920 return 0; 1921 } 1922 1923 /** 1924 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1925 * @skb: the skb to modify 1926 * @gfp_mask: allocation priority 1927 * 1928 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1929 * It will copy all frags into kernel and drop the reference 1930 * to userspace pages. 1931 * 1932 * If this function is called from an interrupt gfp_mask() must be 1933 * %GFP_ATOMIC. 1934 * 1935 * Returns 0 on success or a negative error code on failure 1936 * to allocate kernel memory to copy to. 1937 */ 1938 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1939 { 1940 int num_frags = skb_shinfo(skb)->nr_frags; 1941 struct page *page, *head = NULL; 1942 int i, order, psize, new_frags; 1943 u32 d_off; 1944 1945 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1946 return -EINVAL; 1947 1948 if (!num_frags) 1949 goto release; 1950 1951 /* We might have to allocate high order pages, so compute what minimum 1952 * page order is needed. 1953 */ 1954 order = 0; 1955 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1956 order++; 1957 psize = (PAGE_SIZE << order); 1958 1959 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1960 for (i = 0; i < new_frags; i++) { 1961 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1962 if (!page) { 1963 while (head) { 1964 struct page *next = (struct page *)page_private(head); 1965 put_page(head); 1966 head = next; 1967 } 1968 return -ENOMEM; 1969 } 1970 set_page_private(page, (unsigned long)head); 1971 head = page; 1972 } 1973 1974 page = head; 1975 d_off = 0; 1976 for (i = 0; i < num_frags; i++) { 1977 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1978 u32 p_off, p_len, copied; 1979 struct page *p; 1980 u8 *vaddr; 1981 1982 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1983 p, p_off, p_len, copied) { 1984 u32 copy, done = 0; 1985 vaddr = kmap_atomic(p); 1986 1987 while (done < p_len) { 1988 if (d_off == psize) { 1989 d_off = 0; 1990 page = (struct page *)page_private(page); 1991 } 1992 copy = min_t(u32, psize - d_off, p_len - done); 1993 memcpy(page_address(page) + d_off, 1994 vaddr + p_off + done, copy); 1995 done += copy; 1996 d_off += copy; 1997 } 1998 kunmap_atomic(vaddr); 1999 } 2000 } 2001 2002 /* skb frags release userspace buffers */ 2003 for (i = 0; i < num_frags; i++) 2004 skb_frag_unref(skb, i); 2005 2006 /* skb frags point to kernel buffers */ 2007 for (i = 0; i < new_frags - 1; i++) { 2008 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); 2009 head = (struct page *)page_private(head); 2010 } 2011 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, 2012 d_off); 2013 skb_shinfo(skb)->nr_frags = new_frags; 2014 2015 release: 2016 skb_zcopy_clear(skb, false); 2017 return 0; 2018 } 2019 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 2020 2021 /** 2022 * skb_clone - duplicate an sk_buff 2023 * @skb: buffer to clone 2024 * @gfp_mask: allocation priority 2025 * 2026 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 2027 * copies share the same packet data but not structure. The new 2028 * buffer has a reference count of 1. If the allocation fails the 2029 * function returns %NULL otherwise the new buffer is returned. 2030 * 2031 * If this function is called from an interrupt gfp_mask() must be 2032 * %GFP_ATOMIC. 2033 */ 2034 2035 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 2036 { 2037 struct sk_buff_fclones *fclones = container_of(skb, 2038 struct sk_buff_fclones, 2039 skb1); 2040 struct sk_buff *n; 2041 2042 if (skb_orphan_frags(skb, gfp_mask)) 2043 return NULL; 2044 2045 if (skb->fclone == SKB_FCLONE_ORIG && 2046 refcount_read(&fclones->fclone_ref) == 1) { 2047 n = &fclones->skb2; 2048 refcount_set(&fclones->fclone_ref, 2); 2049 n->fclone = SKB_FCLONE_CLONE; 2050 } else { 2051 if (skb_pfmemalloc(skb)) 2052 gfp_mask |= __GFP_MEMALLOC; 2053 2054 n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); 2055 if (!n) 2056 return NULL; 2057 2058 n->fclone = SKB_FCLONE_UNAVAILABLE; 2059 } 2060 2061 return __skb_clone(n, skb); 2062 } 2063 EXPORT_SYMBOL(skb_clone); 2064 2065 void skb_headers_offset_update(struct sk_buff *skb, int off) 2066 { 2067 /* Only adjust this if it actually is csum_start rather than csum */ 2068 if (skb->ip_summed == CHECKSUM_PARTIAL) 2069 skb->csum_start += off; 2070 /* {transport,network,mac}_header and tail are relative to skb->head */ 2071 skb->transport_header += off; 2072 skb->network_header += off; 2073 if (skb_mac_header_was_set(skb)) 2074 skb->mac_header += off; 2075 skb->inner_transport_header += off; 2076 skb->inner_network_header += off; 2077 skb->inner_mac_header += off; 2078 } 2079 EXPORT_SYMBOL(skb_headers_offset_update); 2080 2081 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 2082 { 2083 __copy_skb_header(new, old); 2084 2085 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 2086 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 2087 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 2088 } 2089 EXPORT_SYMBOL(skb_copy_header); 2090 2091 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 2092 { 2093 if (skb_pfmemalloc(skb)) 2094 return SKB_ALLOC_RX; 2095 return 0; 2096 } 2097 2098 /** 2099 * skb_copy - create private copy of an sk_buff 2100 * @skb: buffer to copy 2101 * @gfp_mask: allocation priority 2102 * 2103 * Make a copy of both an &sk_buff and its data. This is used when the 2104 * caller wishes to modify the data and needs a private copy of the 2105 * data to alter. Returns %NULL on failure or the pointer to the buffer 2106 * on success. The returned buffer has a reference count of 1. 2107 * 2108 * As by-product this function converts non-linear &sk_buff to linear 2109 * one, so that &sk_buff becomes completely private and caller is allowed 2110 * to modify all the data of returned buffer. This means that this 2111 * function is not recommended for use in circumstances when only 2112 * header is going to be modified. Use pskb_copy() instead. 2113 */ 2114 2115 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2116 { 2117 struct sk_buff *n; 2118 unsigned int size; 2119 int headerlen; 2120 2121 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2122 return NULL; 2123 2124 headerlen = skb_headroom(skb); 2125 size = skb_end_offset(skb) + skb->data_len; 2126 n = __alloc_skb(size, gfp_mask, 2127 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2128 if (!n) 2129 return NULL; 2130 2131 /* Set the data pointer */ 2132 skb_reserve(n, headerlen); 2133 /* Set the tail pointer and length */ 2134 skb_put(n, skb->len); 2135 2136 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2137 2138 skb_copy_header(n, skb); 2139 return n; 2140 } 2141 EXPORT_SYMBOL(skb_copy); 2142 2143 /** 2144 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2145 * @skb: buffer to copy 2146 * @headroom: headroom of new skb 2147 * @gfp_mask: allocation priority 2148 * @fclone: if true allocate the copy of the skb from the fclone 2149 * cache instead of the head cache; it is recommended to set this 2150 * to true for the cases where the copy will likely be cloned 2151 * 2152 * Make a copy of both an &sk_buff and part of its data, located 2153 * in header. Fragmented data remain shared. This is used when 2154 * the caller wishes to modify only header of &sk_buff and needs 2155 * private copy of the header to alter. Returns %NULL on failure 2156 * or the pointer to the buffer on success. 2157 * The returned buffer has a reference count of 1. 2158 */ 2159 2160 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2161 gfp_t gfp_mask, bool fclone) 2162 { 2163 unsigned int size = skb_headlen(skb) + headroom; 2164 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2165 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2166 2167 if (!n) 2168 goto out; 2169 2170 /* Set the data pointer */ 2171 skb_reserve(n, headroom); 2172 /* Set the tail pointer and length */ 2173 skb_put(n, skb_headlen(skb)); 2174 /* Copy the bytes */ 2175 skb_copy_from_linear_data(skb, n->data, n->len); 2176 2177 n->truesize += skb->data_len; 2178 n->data_len = skb->data_len; 2179 n->len = skb->len; 2180 2181 if (skb_shinfo(skb)->nr_frags) { 2182 int i; 2183 2184 if (skb_orphan_frags(skb, gfp_mask) || 2185 skb_zerocopy_clone(n, skb, gfp_mask)) { 2186 kfree_skb(n); 2187 n = NULL; 2188 goto out; 2189 } 2190 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2191 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2192 skb_frag_ref(skb, i); 2193 } 2194 skb_shinfo(n)->nr_frags = i; 2195 } 2196 2197 if (skb_has_frag_list(skb)) { 2198 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2199 skb_clone_fraglist(n); 2200 } 2201 2202 skb_copy_header(n, skb); 2203 out: 2204 return n; 2205 } 2206 EXPORT_SYMBOL(__pskb_copy_fclone); 2207 2208 /** 2209 * pskb_expand_head - reallocate header of &sk_buff 2210 * @skb: buffer to reallocate 2211 * @nhead: room to add at head 2212 * @ntail: room to add at tail 2213 * @gfp_mask: allocation priority 2214 * 2215 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2216 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2217 * reference count of 1. Returns zero in the case of success or error, 2218 * if expansion failed. In the last case, &sk_buff is not changed. 2219 * 2220 * All the pointers pointing into skb header may change and must be 2221 * reloaded after call to this function. 2222 */ 2223 2224 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2225 gfp_t gfp_mask) 2226 { 2227 unsigned int osize = skb_end_offset(skb); 2228 unsigned int size = osize + nhead + ntail; 2229 long off; 2230 u8 *data; 2231 int i; 2232 2233 BUG_ON(nhead < 0); 2234 2235 BUG_ON(skb_shared(skb)); 2236 2237 skb_zcopy_downgrade_managed(skb); 2238 2239 if (skb_pfmemalloc(skb)) 2240 gfp_mask |= __GFP_MEMALLOC; 2241 2242 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2243 if (!data) 2244 goto nodata; 2245 size = SKB_WITH_OVERHEAD(size); 2246 2247 /* Copy only real data... and, alas, header. This should be 2248 * optimized for the cases when header is void. 2249 */ 2250 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2251 2252 memcpy((struct skb_shared_info *)(data + size), 2253 skb_shinfo(skb), 2254 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2255 2256 /* 2257 * if shinfo is shared we must drop the old head gracefully, but if it 2258 * is not we can just drop the old head and let the existing refcount 2259 * be since all we did is relocate the values 2260 */ 2261 if (skb_cloned(skb)) { 2262 if (skb_orphan_frags(skb, gfp_mask)) 2263 goto nofrags; 2264 if (skb_zcopy(skb)) 2265 refcount_inc(&skb_uarg(skb)->refcnt); 2266 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2267 skb_frag_ref(skb, i); 2268 2269 if (skb_has_frag_list(skb)) 2270 skb_clone_fraglist(skb); 2271 2272 skb_release_data(skb, SKB_CONSUMED); 2273 } else { 2274 skb_free_head(skb); 2275 } 2276 off = (data + nhead) - skb->head; 2277 2278 skb->head = data; 2279 skb->head_frag = 0; 2280 skb->data += off; 2281 2282 skb_set_end_offset(skb, size); 2283 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2284 off = nhead; 2285 #endif 2286 skb->tail += off; 2287 skb_headers_offset_update(skb, nhead); 2288 skb->cloned = 0; 2289 skb->hdr_len = 0; 2290 skb->nohdr = 0; 2291 atomic_set(&skb_shinfo(skb)->dataref, 1); 2292 2293 skb_metadata_clear(skb); 2294 2295 /* It is not generally safe to change skb->truesize. 2296 * For the moment, we really care of rx path, or 2297 * when skb is orphaned (not attached to a socket). 2298 */ 2299 if (!skb->sk || skb->destructor == sock_edemux) 2300 skb->truesize += size - osize; 2301 2302 return 0; 2303 2304 nofrags: 2305 skb_kfree_head(data, size); 2306 nodata: 2307 return -ENOMEM; 2308 } 2309 EXPORT_SYMBOL(pskb_expand_head); 2310 2311 /* Make private copy of skb with writable head and some headroom */ 2312 2313 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2314 { 2315 struct sk_buff *skb2; 2316 int delta = headroom - skb_headroom(skb); 2317 2318 if (delta <= 0) 2319 skb2 = pskb_copy(skb, GFP_ATOMIC); 2320 else { 2321 skb2 = skb_clone(skb, GFP_ATOMIC); 2322 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2323 GFP_ATOMIC)) { 2324 kfree_skb(skb2); 2325 skb2 = NULL; 2326 } 2327 } 2328 return skb2; 2329 } 2330 EXPORT_SYMBOL(skb_realloc_headroom); 2331 2332 /* Note: We plan to rework this in linux-6.4 */ 2333 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2334 { 2335 unsigned int saved_end_offset, saved_truesize; 2336 struct skb_shared_info *shinfo; 2337 int res; 2338 2339 saved_end_offset = skb_end_offset(skb); 2340 saved_truesize = skb->truesize; 2341 2342 res = pskb_expand_head(skb, 0, 0, pri); 2343 if (res) 2344 return res; 2345 2346 skb->truesize = saved_truesize; 2347 2348 if (likely(skb_end_offset(skb) == saved_end_offset)) 2349 return 0; 2350 2351 /* We can not change skb->end if the original or new value 2352 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2353 */ 2354 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2355 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2356 /* We think this path should not be taken. 2357 * Add a temporary trace to warn us just in case. 2358 */ 2359 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2360 saved_end_offset, skb_end_offset(skb)); 2361 WARN_ON_ONCE(1); 2362 return 0; 2363 } 2364 2365 shinfo = skb_shinfo(skb); 2366 2367 /* We are about to change back skb->end, 2368 * we need to move skb_shinfo() to its new location. 2369 */ 2370 memmove(skb->head + saved_end_offset, 2371 shinfo, 2372 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2373 2374 skb_set_end_offset(skb, saved_end_offset); 2375 2376 return 0; 2377 } 2378 2379 /** 2380 * skb_expand_head - reallocate header of &sk_buff 2381 * @skb: buffer to reallocate 2382 * @headroom: needed headroom 2383 * 2384 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2385 * if possible; copies skb->sk to new skb as needed 2386 * and frees original skb in case of failures. 2387 * 2388 * It expect increased headroom and generates warning otherwise. 2389 */ 2390 2391 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2392 { 2393 int delta = headroom - skb_headroom(skb); 2394 int osize = skb_end_offset(skb); 2395 struct sock *sk = skb->sk; 2396 2397 if (WARN_ONCE(delta <= 0, 2398 "%s is expecting an increase in the headroom", __func__)) 2399 return skb; 2400 2401 delta = SKB_DATA_ALIGN(delta); 2402 /* pskb_expand_head() might crash, if skb is shared. */ 2403 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2404 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2405 2406 if (unlikely(!nskb)) 2407 goto fail; 2408 2409 if (sk) 2410 skb_set_owner_w(nskb, sk); 2411 consume_skb(skb); 2412 skb = nskb; 2413 } 2414 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2415 goto fail; 2416 2417 if (sk && is_skb_wmem(skb)) { 2418 delta = skb_end_offset(skb) - osize; 2419 refcount_add(delta, &sk->sk_wmem_alloc); 2420 skb->truesize += delta; 2421 } 2422 return skb; 2423 2424 fail: 2425 kfree_skb(skb); 2426 return NULL; 2427 } 2428 EXPORT_SYMBOL(skb_expand_head); 2429 2430 /** 2431 * skb_copy_expand - copy and expand sk_buff 2432 * @skb: buffer to copy 2433 * @newheadroom: new free bytes at head 2434 * @newtailroom: new free bytes at tail 2435 * @gfp_mask: allocation priority 2436 * 2437 * Make a copy of both an &sk_buff and its data and while doing so 2438 * allocate additional space. 2439 * 2440 * This is used when the caller wishes to modify the data and needs a 2441 * private copy of the data to alter as well as more space for new fields. 2442 * Returns %NULL on failure or the pointer to the buffer 2443 * on success. The returned buffer has a reference count of 1. 2444 * 2445 * You must pass %GFP_ATOMIC as the allocation priority if this function 2446 * is called from an interrupt. 2447 */ 2448 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2449 int newheadroom, int newtailroom, 2450 gfp_t gfp_mask) 2451 { 2452 /* 2453 * Allocate the copy buffer 2454 */ 2455 int head_copy_len, head_copy_off; 2456 struct sk_buff *n; 2457 int oldheadroom; 2458 2459 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2460 return NULL; 2461 2462 oldheadroom = skb_headroom(skb); 2463 n = __alloc_skb(newheadroom + skb->len + newtailroom, 2464 gfp_mask, skb_alloc_rx_flag(skb), 2465 NUMA_NO_NODE); 2466 if (!n) 2467 return NULL; 2468 2469 skb_reserve(n, newheadroom); 2470 2471 /* Set the tail pointer and length */ 2472 skb_put(n, skb->len); 2473 2474 head_copy_len = oldheadroom; 2475 head_copy_off = 0; 2476 if (newheadroom <= head_copy_len) 2477 head_copy_len = newheadroom; 2478 else 2479 head_copy_off = newheadroom - head_copy_len; 2480 2481 /* Copy the linear header and data. */ 2482 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2483 skb->len + head_copy_len)); 2484 2485 skb_copy_header(n, skb); 2486 2487 skb_headers_offset_update(n, newheadroom - oldheadroom); 2488 2489 return n; 2490 } 2491 EXPORT_SYMBOL(skb_copy_expand); 2492 2493 /** 2494 * __skb_pad - zero pad the tail of an skb 2495 * @skb: buffer to pad 2496 * @pad: space to pad 2497 * @free_on_error: free buffer on error 2498 * 2499 * Ensure that a buffer is followed by a padding area that is zero 2500 * filled. Used by network drivers which may DMA or transfer data 2501 * beyond the buffer end onto the wire. 2502 * 2503 * May return error in out of memory cases. The skb is freed on error 2504 * if @free_on_error is true. 2505 */ 2506 2507 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2508 { 2509 int err; 2510 int ntail; 2511 2512 /* If the skbuff is non linear tailroom is always zero.. */ 2513 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2514 memset(skb->data+skb->len, 0, pad); 2515 return 0; 2516 } 2517 2518 ntail = skb->data_len + pad - (skb->end - skb->tail); 2519 if (likely(skb_cloned(skb) || ntail > 0)) { 2520 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2521 if (unlikely(err)) 2522 goto free_skb; 2523 } 2524 2525 /* FIXME: The use of this function with non-linear skb's really needs 2526 * to be audited. 2527 */ 2528 err = skb_linearize(skb); 2529 if (unlikely(err)) 2530 goto free_skb; 2531 2532 memset(skb->data + skb->len, 0, pad); 2533 return 0; 2534 2535 free_skb: 2536 if (free_on_error) 2537 kfree_skb(skb); 2538 return err; 2539 } 2540 EXPORT_SYMBOL(__skb_pad); 2541 2542 /** 2543 * pskb_put - add data to the tail of a potentially fragmented buffer 2544 * @skb: start of the buffer to use 2545 * @tail: tail fragment of the buffer to use 2546 * @len: amount of data to add 2547 * 2548 * This function extends the used data area of the potentially 2549 * fragmented buffer. @tail must be the last fragment of @skb -- or 2550 * @skb itself. If this would exceed the total buffer size the kernel 2551 * will panic. A pointer to the first byte of the extra data is 2552 * returned. 2553 */ 2554 2555 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2556 { 2557 if (tail != skb) { 2558 skb->data_len += len; 2559 skb->len += len; 2560 } 2561 return skb_put(tail, len); 2562 } 2563 EXPORT_SYMBOL_GPL(pskb_put); 2564 2565 /** 2566 * skb_put - add data to a buffer 2567 * @skb: buffer to use 2568 * @len: amount of data to add 2569 * 2570 * This function extends the used data area of the buffer. If this would 2571 * exceed the total buffer size the kernel will panic. A pointer to the 2572 * first byte of the extra data is returned. 2573 */ 2574 void *skb_put(struct sk_buff *skb, unsigned int len) 2575 { 2576 void *tmp = skb_tail_pointer(skb); 2577 SKB_LINEAR_ASSERT(skb); 2578 skb->tail += len; 2579 skb->len += len; 2580 if (unlikely(skb->tail > skb->end)) 2581 skb_over_panic(skb, len, __builtin_return_address(0)); 2582 return tmp; 2583 } 2584 EXPORT_SYMBOL(skb_put); 2585 2586 /** 2587 * skb_push - add data to the start of a buffer 2588 * @skb: buffer to use 2589 * @len: amount of data to add 2590 * 2591 * This function extends the used data area of the buffer at the buffer 2592 * start. If this would exceed the total buffer headroom the kernel will 2593 * panic. A pointer to the first byte of the extra data is returned. 2594 */ 2595 void *skb_push(struct sk_buff *skb, unsigned int len) 2596 { 2597 skb->data -= len; 2598 skb->len += len; 2599 if (unlikely(skb->data < skb->head)) 2600 skb_under_panic(skb, len, __builtin_return_address(0)); 2601 return skb->data; 2602 } 2603 EXPORT_SYMBOL(skb_push); 2604 2605 /** 2606 * skb_pull - remove data from the start of a buffer 2607 * @skb: buffer to use 2608 * @len: amount of data to remove 2609 * 2610 * This function removes data from the start of a buffer, returning 2611 * the memory to the headroom. A pointer to the next data in the buffer 2612 * is returned. Once the data has been pulled future pushes will overwrite 2613 * the old data. 2614 */ 2615 void *skb_pull(struct sk_buff *skb, unsigned int len) 2616 { 2617 return skb_pull_inline(skb, len); 2618 } 2619 EXPORT_SYMBOL(skb_pull); 2620 2621 /** 2622 * skb_pull_data - remove data from the start of a buffer returning its 2623 * original position. 2624 * @skb: buffer to use 2625 * @len: amount of data to remove 2626 * 2627 * This function removes data from the start of a buffer, returning 2628 * the memory to the headroom. A pointer to the original data in the buffer 2629 * is returned after checking if there is enough data to pull. Once the 2630 * data has been pulled future pushes will overwrite the old data. 2631 */ 2632 void *skb_pull_data(struct sk_buff *skb, size_t len) 2633 { 2634 void *data = skb->data; 2635 2636 if (skb->len < len) 2637 return NULL; 2638 2639 skb_pull(skb, len); 2640 2641 return data; 2642 } 2643 EXPORT_SYMBOL(skb_pull_data); 2644 2645 /** 2646 * skb_trim - remove end from a buffer 2647 * @skb: buffer to alter 2648 * @len: new length 2649 * 2650 * Cut the length of a buffer down by removing data from the tail. If 2651 * the buffer is already under the length specified it is not modified. 2652 * The skb must be linear. 2653 */ 2654 void skb_trim(struct sk_buff *skb, unsigned int len) 2655 { 2656 if (skb->len > len) 2657 __skb_trim(skb, len); 2658 } 2659 EXPORT_SYMBOL(skb_trim); 2660 2661 /* Trims skb to length len. It can change skb pointers. 2662 */ 2663 2664 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2665 { 2666 struct sk_buff **fragp; 2667 struct sk_buff *frag; 2668 int offset = skb_headlen(skb); 2669 int nfrags = skb_shinfo(skb)->nr_frags; 2670 int i; 2671 int err; 2672 2673 if (skb_cloned(skb) && 2674 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2675 return err; 2676 2677 i = 0; 2678 if (offset >= len) 2679 goto drop_pages; 2680 2681 for (; i < nfrags; i++) { 2682 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2683 2684 if (end < len) { 2685 offset = end; 2686 continue; 2687 } 2688 2689 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2690 2691 drop_pages: 2692 skb_shinfo(skb)->nr_frags = i; 2693 2694 for (; i < nfrags; i++) 2695 skb_frag_unref(skb, i); 2696 2697 if (skb_has_frag_list(skb)) 2698 skb_drop_fraglist(skb); 2699 goto done; 2700 } 2701 2702 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2703 fragp = &frag->next) { 2704 int end = offset + frag->len; 2705 2706 if (skb_shared(frag)) { 2707 struct sk_buff *nfrag; 2708 2709 nfrag = skb_clone(frag, GFP_ATOMIC); 2710 if (unlikely(!nfrag)) 2711 return -ENOMEM; 2712 2713 nfrag->next = frag->next; 2714 consume_skb(frag); 2715 frag = nfrag; 2716 *fragp = frag; 2717 } 2718 2719 if (end < len) { 2720 offset = end; 2721 continue; 2722 } 2723 2724 if (end > len && 2725 unlikely((err = pskb_trim(frag, len - offset)))) 2726 return err; 2727 2728 if (frag->next) 2729 skb_drop_list(&frag->next); 2730 break; 2731 } 2732 2733 done: 2734 if (len > skb_headlen(skb)) { 2735 skb->data_len -= skb->len - len; 2736 skb->len = len; 2737 } else { 2738 skb->len = len; 2739 skb->data_len = 0; 2740 skb_set_tail_pointer(skb, len); 2741 } 2742 2743 if (!skb->sk || skb->destructor == sock_edemux) 2744 skb_condense(skb); 2745 return 0; 2746 } 2747 EXPORT_SYMBOL(___pskb_trim); 2748 2749 /* Note : use pskb_trim_rcsum() instead of calling this directly 2750 */ 2751 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2752 { 2753 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2754 int delta = skb->len - len; 2755 2756 skb->csum = csum_block_sub(skb->csum, 2757 skb_checksum(skb, len, delta, 0), 2758 len); 2759 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2760 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2761 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2762 2763 if (offset + sizeof(__sum16) > hdlen) 2764 return -EINVAL; 2765 } 2766 return __pskb_trim(skb, len); 2767 } 2768 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2769 2770 /** 2771 * __pskb_pull_tail - advance tail of skb header 2772 * @skb: buffer to reallocate 2773 * @delta: number of bytes to advance tail 2774 * 2775 * The function makes a sense only on a fragmented &sk_buff, 2776 * it expands header moving its tail forward and copying necessary 2777 * data from fragmented part. 2778 * 2779 * &sk_buff MUST have reference count of 1. 2780 * 2781 * Returns %NULL (and &sk_buff does not change) if pull failed 2782 * or value of new tail of skb in the case of success. 2783 * 2784 * All the pointers pointing into skb header may change and must be 2785 * reloaded after call to this function. 2786 */ 2787 2788 /* Moves tail of skb head forward, copying data from fragmented part, 2789 * when it is necessary. 2790 * 1. It may fail due to malloc failure. 2791 * 2. It may change skb pointers. 2792 * 2793 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2794 */ 2795 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2796 { 2797 /* If skb has not enough free space at tail, get new one 2798 * plus 128 bytes for future expansions. If we have enough 2799 * room at tail, reallocate without expansion only if skb is cloned. 2800 */ 2801 int i, k, eat = (skb->tail + delta) - skb->end; 2802 2803 if (eat > 0 || skb_cloned(skb)) { 2804 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2805 GFP_ATOMIC)) 2806 return NULL; 2807 } 2808 2809 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2810 skb_tail_pointer(skb), delta)); 2811 2812 /* Optimization: no fragments, no reasons to preestimate 2813 * size of pulled pages. Superb. 2814 */ 2815 if (!skb_has_frag_list(skb)) 2816 goto pull_pages; 2817 2818 /* Estimate size of pulled pages. */ 2819 eat = delta; 2820 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2821 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2822 2823 if (size >= eat) 2824 goto pull_pages; 2825 eat -= size; 2826 } 2827 2828 /* If we need update frag list, we are in troubles. 2829 * Certainly, it is possible to add an offset to skb data, 2830 * but taking into account that pulling is expected to 2831 * be very rare operation, it is worth to fight against 2832 * further bloating skb head and crucify ourselves here instead. 2833 * Pure masohism, indeed. 8)8) 2834 */ 2835 if (eat) { 2836 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2837 struct sk_buff *clone = NULL; 2838 struct sk_buff *insp = NULL; 2839 2840 do { 2841 if (list->len <= eat) { 2842 /* Eaten as whole. */ 2843 eat -= list->len; 2844 list = list->next; 2845 insp = list; 2846 } else { 2847 /* Eaten partially. */ 2848 if (skb_is_gso(skb) && !list->head_frag && 2849 skb_headlen(list)) 2850 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2851 2852 if (skb_shared(list)) { 2853 /* Sucks! We need to fork list. :-( */ 2854 clone = skb_clone(list, GFP_ATOMIC); 2855 if (!clone) 2856 return NULL; 2857 insp = list->next; 2858 list = clone; 2859 } else { 2860 /* This may be pulled without 2861 * problems. */ 2862 insp = list; 2863 } 2864 if (!pskb_pull(list, eat)) { 2865 kfree_skb(clone); 2866 return NULL; 2867 } 2868 break; 2869 } 2870 } while (eat); 2871 2872 /* Free pulled out fragments. */ 2873 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2874 skb_shinfo(skb)->frag_list = list->next; 2875 consume_skb(list); 2876 } 2877 /* And insert new clone at head. */ 2878 if (clone) { 2879 clone->next = list; 2880 skb_shinfo(skb)->frag_list = clone; 2881 } 2882 } 2883 /* Success! Now we may commit changes to skb data. */ 2884 2885 pull_pages: 2886 eat = delta; 2887 k = 0; 2888 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2889 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2890 2891 if (size <= eat) { 2892 skb_frag_unref(skb, i); 2893 eat -= size; 2894 } else { 2895 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2896 2897 *frag = skb_shinfo(skb)->frags[i]; 2898 if (eat) { 2899 skb_frag_off_add(frag, eat); 2900 skb_frag_size_sub(frag, eat); 2901 if (!i) 2902 goto end; 2903 eat = 0; 2904 } 2905 k++; 2906 } 2907 } 2908 skb_shinfo(skb)->nr_frags = k; 2909 2910 end: 2911 skb->tail += delta; 2912 skb->data_len -= delta; 2913 2914 if (!skb->data_len) 2915 skb_zcopy_clear(skb, false); 2916 2917 return skb_tail_pointer(skb); 2918 } 2919 EXPORT_SYMBOL(__pskb_pull_tail); 2920 2921 /** 2922 * skb_copy_bits - copy bits from skb to kernel buffer 2923 * @skb: source skb 2924 * @offset: offset in source 2925 * @to: destination buffer 2926 * @len: number of bytes to copy 2927 * 2928 * Copy the specified number of bytes from the source skb to the 2929 * destination buffer. 2930 * 2931 * CAUTION ! : 2932 * If its prototype is ever changed, 2933 * check arch/{*}/net/{*}.S files, 2934 * since it is called from BPF assembly code. 2935 */ 2936 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2937 { 2938 int start = skb_headlen(skb); 2939 struct sk_buff *frag_iter; 2940 int i, copy; 2941 2942 if (offset > (int)skb->len - len) 2943 goto fault; 2944 2945 /* Copy header. */ 2946 if ((copy = start - offset) > 0) { 2947 if (copy > len) 2948 copy = len; 2949 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2950 if ((len -= copy) == 0) 2951 return 0; 2952 offset += copy; 2953 to += copy; 2954 } 2955 2956 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2957 int end; 2958 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2959 2960 WARN_ON(start > offset + len); 2961 2962 end = start + skb_frag_size(f); 2963 if ((copy = end - offset) > 0) { 2964 u32 p_off, p_len, copied; 2965 struct page *p; 2966 u8 *vaddr; 2967 2968 if (copy > len) 2969 copy = len; 2970 2971 skb_frag_foreach_page(f, 2972 skb_frag_off(f) + offset - start, 2973 copy, p, p_off, p_len, copied) { 2974 vaddr = kmap_atomic(p); 2975 memcpy(to + copied, vaddr + p_off, p_len); 2976 kunmap_atomic(vaddr); 2977 } 2978 2979 if ((len -= copy) == 0) 2980 return 0; 2981 offset += copy; 2982 to += copy; 2983 } 2984 start = end; 2985 } 2986 2987 skb_walk_frags(skb, frag_iter) { 2988 int end; 2989 2990 WARN_ON(start > offset + len); 2991 2992 end = start + frag_iter->len; 2993 if ((copy = end - offset) > 0) { 2994 if (copy > len) 2995 copy = len; 2996 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2997 goto fault; 2998 if ((len -= copy) == 0) 2999 return 0; 3000 offset += copy; 3001 to += copy; 3002 } 3003 start = end; 3004 } 3005 3006 if (!len) 3007 return 0; 3008 3009 fault: 3010 return -EFAULT; 3011 } 3012 EXPORT_SYMBOL(skb_copy_bits); 3013 3014 /* 3015 * Callback from splice_to_pipe(), if we need to release some pages 3016 * at the end of the spd in case we error'ed out in filling the pipe. 3017 */ 3018 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 3019 { 3020 put_page(spd->pages[i]); 3021 } 3022 3023 static struct page *linear_to_page(struct page *page, unsigned int *len, 3024 unsigned int *offset, 3025 struct sock *sk) 3026 { 3027 struct page_frag *pfrag = sk_page_frag(sk); 3028 3029 if (!sk_page_frag_refill(sk, pfrag)) 3030 return NULL; 3031 3032 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 3033 3034 memcpy(page_address(pfrag->page) + pfrag->offset, 3035 page_address(page) + *offset, *len); 3036 *offset = pfrag->offset; 3037 pfrag->offset += *len; 3038 3039 return pfrag->page; 3040 } 3041 3042 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 3043 struct page *page, 3044 unsigned int offset) 3045 { 3046 return spd->nr_pages && 3047 spd->pages[spd->nr_pages - 1] == page && 3048 (spd->partial[spd->nr_pages - 1].offset + 3049 spd->partial[spd->nr_pages - 1].len == offset); 3050 } 3051 3052 /* 3053 * Fill page/offset/length into spd, if it can hold more pages. 3054 */ 3055 static bool spd_fill_page(struct splice_pipe_desc *spd, 3056 struct pipe_inode_info *pipe, struct page *page, 3057 unsigned int *len, unsigned int offset, 3058 bool linear, 3059 struct sock *sk) 3060 { 3061 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 3062 return true; 3063 3064 if (linear) { 3065 page = linear_to_page(page, len, &offset, sk); 3066 if (!page) 3067 return true; 3068 } 3069 if (spd_can_coalesce(spd, page, offset)) { 3070 spd->partial[spd->nr_pages - 1].len += *len; 3071 return false; 3072 } 3073 get_page(page); 3074 spd->pages[spd->nr_pages] = page; 3075 spd->partial[spd->nr_pages].len = *len; 3076 spd->partial[spd->nr_pages].offset = offset; 3077 spd->nr_pages++; 3078 3079 return false; 3080 } 3081 3082 static bool __splice_segment(struct page *page, unsigned int poff, 3083 unsigned int plen, unsigned int *off, 3084 unsigned int *len, 3085 struct splice_pipe_desc *spd, bool linear, 3086 struct sock *sk, 3087 struct pipe_inode_info *pipe) 3088 { 3089 if (!*len) 3090 return true; 3091 3092 /* skip this segment if already processed */ 3093 if (*off >= plen) { 3094 *off -= plen; 3095 return false; 3096 } 3097 3098 /* ignore any bits we already processed */ 3099 poff += *off; 3100 plen -= *off; 3101 *off = 0; 3102 3103 do { 3104 unsigned int flen = min(*len, plen); 3105 3106 if (spd_fill_page(spd, pipe, page, &flen, poff, 3107 linear, sk)) 3108 return true; 3109 poff += flen; 3110 plen -= flen; 3111 *len -= flen; 3112 } while (*len && plen); 3113 3114 return false; 3115 } 3116 3117 /* 3118 * Map linear and fragment data from the skb to spd. It reports true if the 3119 * pipe is full or if we already spliced the requested length. 3120 */ 3121 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3122 unsigned int *offset, unsigned int *len, 3123 struct splice_pipe_desc *spd, struct sock *sk) 3124 { 3125 int seg; 3126 struct sk_buff *iter; 3127 3128 /* map the linear part : 3129 * If skb->head_frag is set, this 'linear' part is backed by a 3130 * fragment, and if the head is not shared with any clones then 3131 * we can avoid a copy since we own the head portion of this page. 3132 */ 3133 if (__splice_segment(virt_to_page(skb->data), 3134 (unsigned long) skb->data & (PAGE_SIZE - 1), 3135 skb_headlen(skb), 3136 offset, len, spd, 3137 skb_head_is_locked(skb), 3138 sk, pipe)) 3139 return true; 3140 3141 /* 3142 * then map the fragments 3143 */ 3144 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3145 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3146 3147 if (__splice_segment(skb_frag_page(f), 3148 skb_frag_off(f), skb_frag_size(f), 3149 offset, len, spd, false, sk, pipe)) 3150 return true; 3151 } 3152 3153 skb_walk_frags(skb, iter) { 3154 if (*offset >= iter->len) { 3155 *offset -= iter->len; 3156 continue; 3157 } 3158 /* __skb_splice_bits() only fails if the output has no room 3159 * left, so no point in going over the frag_list for the error 3160 * case. 3161 */ 3162 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3163 return true; 3164 } 3165 3166 return false; 3167 } 3168 3169 /* 3170 * Map data from the skb to a pipe. Should handle both the linear part, 3171 * the fragments, and the frag list. 3172 */ 3173 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3174 struct pipe_inode_info *pipe, unsigned int tlen, 3175 unsigned int flags) 3176 { 3177 struct partial_page partial[MAX_SKB_FRAGS]; 3178 struct page *pages[MAX_SKB_FRAGS]; 3179 struct splice_pipe_desc spd = { 3180 .pages = pages, 3181 .partial = partial, 3182 .nr_pages_max = MAX_SKB_FRAGS, 3183 .ops = &nosteal_pipe_buf_ops, 3184 .spd_release = sock_spd_release, 3185 }; 3186 int ret = 0; 3187 3188 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3189 3190 if (spd.nr_pages) 3191 ret = splice_to_pipe(pipe, &spd); 3192 3193 return ret; 3194 } 3195 EXPORT_SYMBOL_GPL(skb_splice_bits); 3196 3197 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3198 { 3199 struct socket *sock = sk->sk_socket; 3200 size_t size = msg_data_left(msg); 3201 3202 if (!sock) 3203 return -EINVAL; 3204 3205 if (!sock->ops->sendmsg_locked) 3206 return sock_no_sendmsg_locked(sk, msg, size); 3207 3208 return sock->ops->sendmsg_locked(sk, msg, size); 3209 } 3210 3211 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3212 { 3213 struct socket *sock = sk->sk_socket; 3214 3215 if (!sock) 3216 return -EINVAL; 3217 return sock_sendmsg(sock, msg); 3218 } 3219 3220 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3221 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3222 int len, sendmsg_func sendmsg) 3223 { 3224 unsigned int orig_len = len; 3225 struct sk_buff *head = skb; 3226 unsigned short fragidx; 3227 int slen, ret; 3228 3229 do_frag_list: 3230 3231 /* Deal with head data */ 3232 while (offset < skb_headlen(skb) && len) { 3233 struct kvec kv; 3234 struct msghdr msg; 3235 3236 slen = min_t(int, len, skb_headlen(skb) - offset); 3237 kv.iov_base = skb->data + offset; 3238 kv.iov_len = slen; 3239 memset(&msg, 0, sizeof(msg)); 3240 msg.msg_flags = MSG_DONTWAIT; 3241 3242 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3243 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3244 sendmsg_unlocked, sk, &msg); 3245 if (ret <= 0) 3246 goto error; 3247 3248 offset += ret; 3249 len -= ret; 3250 } 3251 3252 /* All the data was skb head? */ 3253 if (!len) 3254 goto out; 3255 3256 /* Make offset relative to start of frags */ 3257 offset -= skb_headlen(skb); 3258 3259 /* Find where we are in frag list */ 3260 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3261 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3262 3263 if (offset < skb_frag_size(frag)) 3264 break; 3265 3266 offset -= skb_frag_size(frag); 3267 } 3268 3269 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3270 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3271 3272 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3273 3274 while (slen) { 3275 struct bio_vec bvec; 3276 struct msghdr msg = { 3277 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3278 }; 3279 3280 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3281 skb_frag_off(frag) + offset); 3282 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3283 slen); 3284 3285 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3286 sendmsg_unlocked, sk, &msg); 3287 if (ret <= 0) 3288 goto error; 3289 3290 len -= ret; 3291 offset += ret; 3292 slen -= ret; 3293 } 3294 3295 offset = 0; 3296 } 3297 3298 if (len) { 3299 /* Process any frag lists */ 3300 3301 if (skb == head) { 3302 if (skb_has_frag_list(skb)) { 3303 skb = skb_shinfo(skb)->frag_list; 3304 goto do_frag_list; 3305 } 3306 } else if (skb->next) { 3307 skb = skb->next; 3308 goto do_frag_list; 3309 } 3310 } 3311 3312 out: 3313 return orig_len - len; 3314 3315 error: 3316 return orig_len == len ? ret : orig_len - len; 3317 } 3318 3319 /* Send skb data on a socket. Socket must be locked. */ 3320 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3321 int len) 3322 { 3323 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3324 } 3325 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3326 3327 /* Send skb data on a socket. Socket must be unlocked. */ 3328 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3329 { 3330 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3331 } 3332 3333 /** 3334 * skb_store_bits - store bits from kernel buffer to skb 3335 * @skb: destination buffer 3336 * @offset: offset in destination 3337 * @from: source buffer 3338 * @len: number of bytes to copy 3339 * 3340 * Copy the specified number of bytes from the source buffer to the 3341 * destination skb. This function handles all the messy bits of 3342 * traversing fragment lists and such. 3343 */ 3344 3345 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3346 { 3347 int start = skb_headlen(skb); 3348 struct sk_buff *frag_iter; 3349 int i, copy; 3350 3351 if (offset > (int)skb->len - len) 3352 goto fault; 3353 3354 if ((copy = start - offset) > 0) { 3355 if (copy > len) 3356 copy = len; 3357 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3358 if ((len -= copy) == 0) 3359 return 0; 3360 offset += copy; 3361 from += copy; 3362 } 3363 3364 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3365 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3366 int end; 3367 3368 WARN_ON(start > offset + len); 3369 3370 end = start + skb_frag_size(frag); 3371 if ((copy = end - offset) > 0) { 3372 u32 p_off, p_len, copied; 3373 struct page *p; 3374 u8 *vaddr; 3375 3376 if (copy > len) 3377 copy = len; 3378 3379 skb_frag_foreach_page(frag, 3380 skb_frag_off(frag) + offset - start, 3381 copy, p, p_off, p_len, copied) { 3382 vaddr = kmap_atomic(p); 3383 memcpy(vaddr + p_off, from + copied, p_len); 3384 kunmap_atomic(vaddr); 3385 } 3386 3387 if ((len -= copy) == 0) 3388 return 0; 3389 offset += copy; 3390 from += copy; 3391 } 3392 start = end; 3393 } 3394 3395 skb_walk_frags(skb, frag_iter) { 3396 int end; 3397 3398 WARN_ON(start > offset + len); 3399 3400 end = start + frag_iter->len; 3401 if ((copy = end - offset) > 0) { 3402 if (copy > len) 3403 copy = len; 3404 if (skb_store_bits(frag_iter, offset - start, 3405 from, copy)) 3406 goto fault; 3407 if ((len -= copy) == 0) 3408 return 0; 3409 offset += copy; 3410 from += copy; 3411 } 3412 start = end; 3413 } 3414 if (!len) 3415 return 0; 3416 3417 fault: 3418 return -EFAULT; 3419 } 3420 EXPORT_SYMBOL(skb_store_bits); 3421 3422 /* Checksum skb data. */ 3423 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3424 __wsum csum, const struct skb_checksum_ops *ops) 3425 { 3426 int start = skb_headlen(skb); 3427 int i, copy = start - offset; 3428 struct sk_buff *frag_iter; 3429 int pos = 0; 3430 3431 /* Checksum header. */ 3432 if (copy > 0) { 3433 if (copy > len) 3434 copy = len; 3435 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3436 skb->data + offset, copy, csum); 3437 if ((len -= copy) == 0) 3438 return csum; 3439 offset += copy; 3440 pos = copy; 3441 } 3442 3443 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3444 int end; 3445 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3446 3447 WARN_ON(start > offset + len); 3448 3449 end = start + skb_frag_size(frag); 3450 if ((copy = end - offset) > 0) { 3451 u32 p_off, p_len, copied; 3452 struct page *p; 3453 __wsum csum2; 3454 u8 *vaddr; 3455 3456 if (copy > len) 3457 copy = len; 3458 3459 skb_frag_foreach_page(frag, 3460 skb_frag_off(frag) + offset - start, 3461 copy, p, p_off, p_len, copied) { 3462 vaddr = kmap_atomic(p); 3463 csum2 = INDIRECT_CALL_1(ops->update, 3464 csum_partial_ext, 3465 vaddr + p_off, p_len, 0); 3466 kunmap_atomic(vaddr); 3467 csum = INDIRECT_CALL_1(ops->combine, 3468 csum_block_add_ext, csum, 3469 csum2, pos, p_len); 3470 pos += p_len; 3471 } 3472 3473 if (!(len -= copy)) 3474 return csum; 3475 offset += copy; 3476 } 3477 start = end; 3478 } 3479 3480 skb_walk_frags(skb, frag_iter) { 3481 int end; 3482 3483 WARN_ON(start > offset + len); 3484 3485 end = start + frag_iter->len; 3486 if ((copy = end - offset) > 0) { 3487 __wsum csum2; 3488 if (copy > len) 3489 copy = len; 3490 csum2 = __skb_checksum(frag_iter, offset - start, 3491 copy, 0, ops); 3492 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3493 csum, csum2, pos, copy); 3494 if ((len -= copy) == 0) 3495 return csum; 3496 offset += copy; 3497 pos += copy; 3498 } 3499 start = end; 3500 } 3501 BUG_ON(len); 3502 3503 return csum; 3504 } 3505 EXPORT_SYMBOL(__skb_checksum); 3506 3507 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3508 int len, __wsum csum) 3509 { 3510 const struct skb_checksum_ops ops = { 3511 .update = csum_partial_ext, 3512 .combine = csum_block_add_ext, 3513 }; 3514 3515 return __skb_checksum(skb, offset, len, csum, &ops); 3516 } 3517 EXPORT_SYMBOL(skb_checksum); 3518 3519 /* Both of above in one bottle. */ 3520 3521 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3522 u8 *to, int len) 3523 { 3524 int start = skb_headlen(skb); 3525 int i, copy = start - offset; 3526 struct sk_buff *frag_iter; 3527 int pos = 0; 3528 __wsum csum = 0; 3529 3530 /* Copy header. */ 3531 if (copy > 0) { 3532 if (copy > len) 3533 copy = len; 3534 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3535 copy); 3536 if ((len -= copy) == 0) 3537 return csum; 3538 offset += copy; 3539 to += copy; 3540 pos = copy; 3541 } 3542 3543 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3544 int end; 3545 3546 WARN_ON(start > offset + len); 3547 3548 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3549 if ((copy = end - offset) > 0) { 3550 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3551 u32 p_off, p_len, copied; 3552 struct page *p; 3553 __wsum csum2; 3554 u8 *vaddr; 3555 3556 if (copy > len) 3557 copy = len; 3558 3559 skb_frag_foreach_page(frag, 3560 skb_frag_off(frag) + offset - start, 3561 copy, p, p_off, p_len, copied) { 3562 vaddr = kmap_atomic(p); 3563 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3564 to + copied, 3565 p_len); 3566 kunmap_atomic(vaddr); 3567 csum = csum_block_add(csum, csum2, pos); 3568 pos += p_len; 3569 } 3570 3571 if (!(len -= copy)) 3572 return csum; 3573 offset += copy; 3574 to += copy; 3575 } 3576 start = end; 3577 } 3578 3579 skb_walk_frags(skb, frag_iter) { 3580 __wsum csum2; 3581 int end; 3582 3583 WARN_ON(start > offset + len); 3584 3585 end = start + frag_iter->len; 3586 if ((copy = end - offset) > 0) { 3587 if (copy > len) 3588 copy = len; 3589 csum2 = skb_copy_and_csum_bits(frag_iter, 3590 offset - start, 3591 to, copy); 3592 csum = csum_block_add(csum, csum2, pos); 3593 if ((len -= copy) == 0) 3594 return csum; 3595 offset += copy; 3596 to += copy; 3597 pos += copy; 3598 } 3599 start = end; 3600 } 3601 BUG_ON(len); 3602 return csum; 3603 } 3604 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3605 3606 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3607 { 3608 __sum16 sum; 3609 3610 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3611 /* See comments in __skb_checksum_complete(). */ 3612 if (likely(!sum)) { 3613 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3614 !skb->csum_complete_sw) 3615 netdev_rx_csum_fault(skb->dev, skb); 3616 } 3617 if (!skb_shared(skb)) 3618 skb->csum_valid = !sum; 3619 return sum; 3620 } 3621 EXPORT_SYMBOL(__skb_checksum_complete_head); 3622 3623 /* This function assumes skb->csum already holds pseudo header's checksum, 3624 * which has been changed from the hardware checksum, for example, by 3625 * __skb_checksum_validate_complete(). And, the original skb->csum must 3626 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3627 * 3628 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3629 * zero. The new checksum is stored back into skb->csum unless the skb is 3630 * shared. 3631 */ 3632 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3633 { 3634 __wsum csum; 3635 __sum16 sum; 3636 3637 csum = skb_checksum(skb, 0, skb->len, 0); 3638 3639 sum = csum_fold(csum_add(skb->csum, csum)); 3640 /* This check is inverted, because we already knew the hardware 3641 * checksum is invalid before calling this function. So, if the 3642 * re-computed checksum is valid instead, then we have a mismatch 3643 * between the original skb->csum and skb_checksum(). This means either 3644 * the original hardware checksum is incorrect or we screw up skb->csum 3645 * when moving skb->data around. 3646 */ 3647 if (likely(!sum)) { 3648 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3649 !skb->csum_complete_sw) 3650 netdev_rx_csum_fault(skb->dev, skb); 3651 } 3652 3653 if (!skb_shared(skb)) { 3654 /* Save full packet checksum */ 3655 skb->csum = csum; 3656 skb->ip_summed = CHECKSUM_COMPLETE; 3657 skb->csum_complete_sw = 1; 3658 skb->csum_valid = !sum; 3659 } 3660 3661 return sum; 3662 } 3663 EXPORT_SYMBOL(__skb_checksum_complete); 3664 3665 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3666 { 3667 net_warn_ratelimited( 3668 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3669 __func__); 3670 return 0; 3671 } 3672 3673 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3674 int offset, int len) 3675 { 3676 net_warn_ratelimited( 3677 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3678 __func__); 3679 return 0; 3680 } 3681 3682 static const struct skb_checksum_ops default_crc32c_ops = { 3683 .update = warn_crc32c_csum_update, 3684 .combine = warn_crc32c_csum_combine, 3685 }; 3686 3687 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3688 &default_crc32c_ops; 3689 EXPORT_SYMBOL(crc32c_csum_stub); 3690 3691 /** 3692 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3693 * @from: source buffer 3694 * 3695 * Calculates the amount of linear headroom needed in the 'to' skb passed 3696 * into skb_zerocopy(). 3697 */ 3698 unsigned int 3699 skb_zerocopy_headlen(const struct sk_buff *from) 3700 { 3701 unsigned int hlen = 0; 3702 3703 if (!from->head_frag || 3704 skb_headlen(from) < L1_CACHE_BYTES || 3705 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3706 hlen = skb_headlen(from); 3707 if (!hlen) 3708 hlen = from->len; 3709 } 3710 3711 if (skb_has_frag_list(from)) 3712 hlen = from->len; 3713 3714 return hlen; 3715 } 3716 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3717 3718 /** 3719 * skb_zerocopy - Zero copy skb to skb 3720 * @to: destination buffer 3721 * @from: source buffer 3722 * @len: number of bytes to copy from source buffer 3723 * @hlen: size of linear headroom in destination buffer 3724 * 3725 * Copies up to `len` bytes from `from` to `to` by creating references 3726 * to the frags in the source buffer. 3727 * 3728 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3729 * headroom in the `to` buffer. 3730 * 3731 * Return value: 3732 * 0: everything is OK 3733 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3734 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3735 */ 3736 int 3737 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3738 { 3739 int i, j = 0; 3740 int plen = 0; /* length of skb->head fragment */ 3741 int ret; 3742 struct page *page; 3743 unsigned int offset; 3744 3745 BUG_ON(!from->head_frag && !hlen); 3746 3747 /* dont bother with small payloads */ 3748 if (len <= skb_tailroom(to)) 3749 return skb_copy_bits(from, 0, skb_put(to, len), len); 3750 3751 if (hlen) { 3752 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3753 if (unlikely(ret)) 3754 return ret; 3755 len -= hlen; 3756 } else { 3757 plen = min_t(int, skb_headlen(from), len); 3758 if (plen) { 3759 page = virt_to_head_page(from->head); 3760 offset = from->data - (unsigned char *)page_address(page); 3761 __skb_fill_netmem_desc(to, 0, page_to_netmem(page), 3762 offset, plen); 3763 get_page(page); 3764 j = 1; 3765 len -= plen; 3766 } 3767 } 3768 3769 skb_len_add(to, len + plen); 3770 3771 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3772 skb_tx_error(from); 3773 return -ENOMEM; 3774 } 3775 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3776 3777 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3778 int size; 3779 3780 if (!len) 3781 break; 3782 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3783 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3784 len); 3785 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3786 len -= size; 3787 skb_frag_ref(to, j); 3788 j++; 3789 } 3790 skb_shinfo(to)->nr_frags = j; 3791 3792 return 0; 3793 } 3794 EXPORT_SYMBOL_GPL(skb_zerocopy); 3795 3796 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3797 { 3798 __wsum csum; 3799 long csstart; 3800 3801 if (skb->ip_summed == CHECKSUM_PARTIAL) 3802 csstart = skb_checksum_start_offset(skb); 3803 else 3804 csstart = skb_headlen(skb); 3805 3806 BUG_ON(csstart > skb_headlen(skb)); 3807 3808 skb_copy_from_linear_data(skb, to, csstart); 3809 3810 csum = 0; 3811 if (csstart != skb->len) 3812 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3813 skb->len - csstart); 3814 3815 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3816 long csstuff = csstart + skb->csum_offset; 3817 3818 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3819 } 3820 } 3821 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3822 3823 /** 3824 * skb_dequeue - remove from the head of the queue 3825 * @list: list to dequeue from 3826 * 3827 * Remove the head of the list. The list lock is taken so the function 3828 * may be used safely with other locking list functions. The head item is 3829 * returned or %NULL if the list is empty. 3830 */ 3831 3832 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3833 { 3834 unsigned long flags; 3835 struct sk_buff *result; 3836 3837 spin_lock_irqsave(&list->lock, flags); 3838 result = __skb_dequeue(list); 3839 spin_unlock_irqrestore(&list->lock, flags); 3840 return result; 3841 } 3842 EXPORT_SYMBOL(skb_dequeue); 3843 3844 /** 3845 * skb_dequeue_tail - remove from the tail of the queue 3846 * @list: list to dequeue from 3847 * 3848 * Remove the tail of the list. The list lock is taken so the function 3849 * may be used safely with other locking list functions. The tail item is 3850 * returned or %NULL if the list is empty. 3851 */ 3852 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3853 { 3854 unsigned long flags; 3855 struct sk_buff *result; 3856 3857 spin_lock_irqsave(&list->lock, flags); 3858 result = __skb_dequeue_tail(list); 3859 spin_unlock_irqrestore(&list->lock, flags); 3860 return result; 3861 } 3862 EXPORT_SYMBOL(skb_dequeue_tail); 3863 3864 /** 3865 * skb_queue_purge_reason - empty a list 3866 * @list: list to empty 3867 * @reason: drop reason 3868 * 3869 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3870 * the list and one reference dropped. This function takes the list 3871 * lock and is atomic with respect to other list locking functions. 3872 */ 3873 void skb_queue_purge_reason(struct sk_buff_head *list, 3874 enum skb_drop_reason reason) 3875 { 3876 struct sk_buff_head tmp; 3877 unsigned long flags; 3878 3879 if (skb_queue_empty_lockless(list)) 3880 return; 3881 3882 __skb_queue_head_init(&tmp); 3883 3884 spin_lock_irqsave(&list->lock, flags); 3885 skb_queue_splice_init(list, &tmp); 3886 spin_unlock_irqrestore(&list->lock, flags); 3887 3888 __skb_queue_purge_reason(&tmp, reason); 3889 } 3890 EXPORT_SYMBOL(skb_queue_purge_reason); 3891 3892 /** 3893 * skb_rbtree_purge - empty a skb rbtree 3894 * @root: root of the rbtree to empty 3895 * Return value: the sum of truesizes of all purged skbs. 3896 * 3897 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3898 * the list and one reference dropped. This function does not take 3899 * any lock. Synchronization should be handled by the caller (e.g., TCP 3900 * out-of-order queue is protected by the socket lock). 3901 */ 3902 unsigned int skb_rbtree_purge(struct rb_root *root) 3903 { 3904 struct rb_node *p = rb_first(root); 3905 unsigned int sum = 0; 3906 3907 while (p) { 3908 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3909 3910 p = rb_next(p); 3911 rb_erase(&skb->rbnode, root); 3912 sum += skb->truesize; 3913 kfree_skb(skb); 3914 } 3915 return sum; 3916 } 3917 3918 void skb_errqueue_purge(struct sk_buff_head *list) 3919 { 3920 struct sk_buff *skb, *next; 3921 struct sk_buff_head kill; 3922 unsigned long flags; 3923 3924 __skb_queue_head_init(&kill); 3925 3926 spin_lock_irqsave(&list->lock, flags); 3927 skb_queue_walk_safe(list, skb, next) { 3928 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3929 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3930 continue; 3931 __skb_unlink(skb, list); 3932 __skb_queue_tail(&kill, skb); 3933 } 3934 spin_unlock_irqrestore(&list->lock, flags); 3935 __skb_queue_purge(&kill); 3936 } 3937 EXPORT_SYMBOL(skb_errqueue_purge); 3938 3939 /** 3940 * skb_queue_head - queue a buffer at the list head 3941 * @list: list to use 3942 * @newsk: buffer to queue 3943 * 3944 * Queue a buffer at the start of the list. This function takes the 3945 * list lock and can be used safely with other locking &sk_buff functions 3946 * safely. 3947 * 3948 * A buffer cannot be placed on two lists at the same time. 3949 */ 3950 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3951 { 3952 unsigned long flags; 3953 3954 spin_lock_irqsave(&list->lock, flags); 3955 __skb_queue_head(list, newsk); 3956 spin_unlock_irqrestore(&list->lock, flags); 3957 } 3958 EXPORT_SYMBOL(skb_queue_head); 3959 3960 /** 3961 * skb_queue_tail - queue a buffer at the list tail 3962 * @list: list to use 3963 * @newsk: buffer to queue 3964 * 3965 * Queue a buffer at the tail of the list. This function takes the 3966 * list lock and can be used safely with other locking &sk_buff functions 3967 * safely. 3968 * 3969 * A buffer cannot be placed on two lists at the same time. 3970 */ 3971 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3972 { 3973 unsigned long flags; 3974 3975 spin_lock_irqsave(&list->lock, flags); 3976 __skb_queue_tail(list, newsk); 3977 spin_unlock_irqrestore(&list->lock, flags); 3978 } 3979 EXPORT_SYMBOL(skb_queue_tail); 3980 3981 /** 3982 * skb_unlink - remove a buffer from a list 3983 * @skb: buffer to remove 3984 * @list: list to use 3985 * 3986 * Remove a packet from a list. The list locks are taken and this 3987 * function is atomic with respect to other list locked calls 3988 * 3989 * You must know what list the SKB is on. 3990 */ 3991 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3992 { 3993 unsigned long flags; 3994 3995 spin_lock_irqsave(&list->lock, flags); 3996 __skb_unlink(skb, list); 3997 spin_unlock_irqrestore(&list->lock, flags); 3998 } 3999 EXPORT_SYMBOL(skb_unlink); 4000 4001 /** 4002 * skb_append - append a buffer 4003 * @old: buffer to insert after 4004 * @newsk: buffer to insert 4005 * @list: list to use 4006 * 4007 * Place a packet after a given packet in a list. The list locks are taken 4008 * and this function is atomic with respect to other list locked calls. 4009 * A buffer cannot be placed on two lists at the same time. 4010 */ 4011 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 4012 { 4013 unsigned long flags; 4014 4015 spin_lock_irqsave(&list->lock, flags); 4016 __skb_queue_after(list, old, newsk); 4017 spin_unlock_irqrestore(&list->lock, flags); 4018 } 4019 EXPORT_SYMBOL(skb_append); 4020 4021 static inline void skb_split_inside_header(struct sk_buff *skb, 4022 struct sk_buff* skb1, 4023 const u32 len, const int pos) 4024 { 4025 int i; 4026 4027 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 4028 pos - len); 4029 /* And move data appendix as is. */ 4030 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4031 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 4032 4033 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 4034 skb_shinfo(skb)->nr_frags = 0; 4035 skb1->data_len = skb->data_len; 4036 skb1->len += skb1->data_len; 4037 skb->data_len = 0; 4038 skb->len = len; 4039 skb_set_tail_pointer(skb, len); 4040 } 4041 4042 static inline void skb_split_no_header(struct sk_buff *skb, 4043 struct sk_buff* skb1, 4044 const u32 len, int pos) 4045 { 4046 int i, k = 0; 4047 const int nfrags = skb_shinfo(skb)->nr_frags; 4048 4049 skb_shinfo(skb)->nr_frags = 0; 4050 skb1->len = skb1->data_len = skb->len - len; 4051 skb->len = len; 4052 skb->data_len = len - pos; 4053 4054 for (i = 0; i < nfrags; i++) { 4055 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4056 4057 if (pos + size > len) { 4058 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 4059 4060 if (pos < len) { 4061 /* Split frag. 4062 * We have two variants in this case: 4063 * 1. Move all the frag to the second 4064 * part, if it is possible. F.e. 4065 * this approach is mandatory for TUX, 4066 * where splitting is expensive. 4067 * 2. Split is accurately. We make this. 4068 */ 4069 skb_frag_ref(skb, i); 4070 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 4071 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 4072 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 4073 skb_shinfo(skb)->nr_frags++; 4074 } 4075 k++; 4076 } else 4077 skb_shinfo(skb)->nr_frags++; 4078 pos += size; 4079 } 4080 skb_shinfo(skb1)->nr_frags = k; 4081 } 4082 4083 /** 4084 * skb_split - Split fragmented skb to two parts at length len. 4085 * @skb: the buffer to split 4086 * @skb1: the buffer to receive the second part 4087 * @len: new length for skb 4088 */ 4089 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 4090 { 4091 int pos = skb_headlen(skb); 4092 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 4093 4094 skb_zcopy_downgrade_managed(skb); 4095 4096 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 4097 skb_zerocopy_clone(skb1, skb, 0); 4098 if (len < pos) /* Split line is inside header. */ 4099 skb_split_inside_header(skb, skb1, len, pos); 4100 else /* Second chunk has no header, nothing to copy. */ 4101 skb_split_no_header(skb, skb1, len, pos); 4102 } 4103 EXPORT_SYMBOL(skb_split); 4104 4105 /* Shifting from/to a cloned skb is a no-go. 4106 * 4107 * Caller cannot keep skb_shinfo related pointers past calling here! 4108 */ 4109 static int skb_prepare_for_shift(struct sk_buff *skb) 4110 { 4111 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 4112 } 4113 4114 /** 4115 * skb_shift - Shifts paged data partially from skb to another 4116 * @tgt: buffer into which tail data gets added 4117 * @skb: buffer from which the paged data comes from 4118 * @shiftlen: shift up to this many bytes 4119 * 4120 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4121 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4122 * It's up to caller to free skb if everything was shifted. 4123 * 4124 * If @tgt runs out of frags, the whole operation is aborted. 4125 * 4126 * Skb cannot include anything else but paged data while tgt is allowed 4127 * to have non-paged data as well. 4128 * 4129 * TODO: full sized shift could be optimized but that would need 4130 * specialized skb free'er to handle frags without up-to-date nr_frags. 4131 */ 4132 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4133 { 4134 int from, to, merge, todo; 4135 skb_frag_t *fragfrom, *fragto; 4136 4137 BUG_ON(shiftlen > skb->len); 4138 4139 if (skb_headlen(skb)) 4140 return 0; 4141 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4142 return 0; 4143 4144 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); 4145 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); 4146 4147 todo = shiftlen; 4148 from = 0; 4149 to = skb_shinfo(tgt)->nr_frags; 4150 fragfrom = &skb_shinfo(skb)->frags[from]; 4151 4152 /* Actual merge is delayed until the point when we know we can 4153 * commit all, so that we don't have to undo partial changes 4154 */ 4155 if (!to || 4156 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4157 skb_frag_off(fragfrom))) { 4158 merge = -1; 4159 } else { 4160 merge = to - 1; 4161 4162 todo -= skb_frag_size(fragfrom); 4163 if (todo < 0) { 4164 if (skb_prepare_for_shift(skb) || 4165 skb_prepare_for_shift(tgt)) 4166 return 0; 4167 4168 /* All previous frag pointers might be stale! */ 4169 fragfrom = &skb_shinfo(skb)->frags[from]; 4170 fragto = &skb_shinfo(tgt)->frags[merge]; 4171 4172 skb_frag_size_add(fragto, shiftlen); 4173 skb_frag_size_sub(fragfrom, shiftlen); 4174 skb_frag_off_add(fragfrom, shiftlen); 4175 4176 goto onlymerged; 4177 } 4178 4179 from++; 4180 } 4181 4182 /* Skip full, not-fitting skb to avoid expensive operations */ 4183 if ((shiftlen == skb->len) && 4184 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4185 return 0; 4186 4187 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4188 return 0; 4189 4190 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4191 if (to == MAX_SKB_FRAGS) 4192 return 0; 4193 4194 fragfrom = &skb_shinfo(skb)->frags[from]; 4195 fragto = &skb_shinfo(tgt)->frags[to]; 4196 4197 if (todo >= skb_frag_size(fragfrom)) { 4198 *fragto = *fragfrom; 4199 todo -= skb_frag_size(fragfrom); 4200 from++; 4201 to++; 4202 4203 } else { 4204 __skb_frag_ref(fragfrom); 4205 skb_frag_page_copy(fragto, fragfrom); 4206 skb_frag_off_copy(fragto, fragfrom); 4207 skb_frag_size_set(fragto, todo); 4208 4209 skb_frag_off_add(fragfrom, todo); 4210 skb_frag_size_sub(fragfrom, todo); 4211 todo = 0; 4212 4213 to++; 4214 break; 4215 } 4216 } 4217 4218 /* Ready to "commit" this state change to tgt */ 4219 skb_shinfo(tgt)->nr_frags = to; 4220 4221 if (merge >= 0) { 4222 fragfrom = &skb_shinfo(skb)->frags[0]; 4223 fragto = &skb_shinfo(tgt)->frags[merge]; 4224 4225 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4226 __skb_frag_unref(fragfrom, skb->pp_recycle); 4227 } 4228 4229 /* Reposition in the original skb */ 4230 to = 0; 4231 while (from < skb_shinfo(skb)->nr_frags) 4232 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4233 skb_shinfo(skb)->nr_frags = to; 4234 4235 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4236 4237 onlymerged: 4238 /* Most likely the tgt won't ever need its checksum anymore, skb on 4239 * the other hand might need it if it needs to be resent 4240 */ 4241 tgt->ip_summed = CHECKSUM_PARTIAL; 4242 skb->ip_summed = CHECKSUM_PARTIAL; 4243 4244 skb_len_add(skb, -shiftlen); 4245 skb_len_add(tgt, shiftlen); 4246 4247 return shiftlen; 4248 } 4249 4250 /** 4251 * skb_prepare_seq_read - Prepare a sequential read of skb data 4252 * @skb: the buffer to read 4253 * @from: lower offset of data to be read 4254 * @to: upper offset of data to be read 4255 * @st: state variable 4256 * 4257 * Initializes the specified state variable. Must be called before 4258 * invoking skb_seq_read() for the first time. 4259 */ 4260 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4261 unsigned int to, struct skb_seq_state *st) 4262 { 4263 st->lower_offset = from; 4264 st->upper_offset = to; 4265 st->root_skb = st->cur_skb = skb; 4266 st->frag_idx = st->stepped_offset = 0; 4267 st->frag_data = NULL; 4268 st->frag_off = 0; 4269 } 4270 EXPORT_SYMBOL(skb_prepare_seq_read); 4271 4272 /** 4273 * skb_seq_read - Sequentially read skb data 4274 * @consumed: number of bytes consumed by the caller so far 4275 * @data: destination pointer for data to be returned 4276 * @st: state variable 4277 * 4278 * Reads a block of skb data at @consumed relative to the 4279 * lower offset specified to skb_prepare_seq_read(). Assigns 4280 * the head of the data block to @data and returns the length 4281 * of the block or 0 if the end of the skb data or the upper 4282 * offset has been reached. 4283 * 4284 * The caller is not required to consume all of the data 4285 * returned, i.e. @consumed is typically set to the number 4286 * of bytes already consumed and the next call to 4287 * skb_seq_read() will return the remaining part of the block. 4288 * 4289 * Note 1: The size of each block of data returned can be arbitrary, 4290 * this limitation is the cost for zerocopy sequential 4291 * reads of potentially non linear data. 4292 * 4293 * Note 2: Fragment lists within fragments are not implemented 4294 * at the moment, state->root_skb could be replaced with 4295 * a stack for this purpose. 4296 */ 4297 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4298 struct skb_seq_state *st) 4299 { 4300 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4301 skb_frag_t *frag; 4302 4303 if (unlikely(abs_offset >= st->upper_offset)) { 4304 if (st->frag_data) { 4305 kunmap_atomic(st->frag_data); 4306 st->frag_data = NULL; 4307 } 4308 return 0; 4309 } 4310 4311 next_skb: 4312 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4313 4314 if (abs_offset < block_limit && !st->frag_data) { 4315 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4316 return block_limit - abs_offset; 4317 } 4318 4319 if (st->frag_idx == 0 && !st->frag_data) 4320 st->stepped_offset += skb_headlen(st->cur_skb); 4321 4322 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4323 unsigned int pg_idx, pg_off, pg_sz; 4324 4325 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4326 4327 pg_idx = 0; 4328 pg_off = skb_frag_off(frag); 4329 pg_sz = skb_frag_size(frag); 4330 4331 if (skb_frag_must_loop(skb_frag_page(frag))) { 4332 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4333 pg_off = offset_in_page(pg_off + st->frag_off); 4334 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4335 PAGE_SIZE - pg_off); 4336 } 4337 4338 block_limit = pg_sz + st->stepped_offset; 4339 if (abs_offset < block_limit) { 4340 if (!st->frag_data) 4341 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4342 4343 *data = (u8 *)st->frag_data + pg_off + 4344 (abs_offset - st->stepped_offset); 4345 4346 return block_limit - abs_offset; 4347 } 4348 4349 if (st->frag_data) { 4350 kunmap_atomic(st->frag_data); 4351 st->frag_data = NULL; 4352 } 4353 4354 st->stepped_offset += pg_sz; 4355 st->frag_off += pg_sz; 4356 if (st->frag_off == skb_frag_size(frag)) { 4357 st->frag_off = 0; 4358 st->frag_idx++; 4359 } 4360 } 4361 4362 if (st->frag_data) { 4363 kunmap_atomic(st->frag_data); 4364 st->frag_data = NULL; 4365 } 4366 4367 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4368 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4369 st->frag_idx = 0; 4370 goto next_skb; 4371 } else if (st->cur_skb->next) { 4372 st->cur_skb = st->cur_skb->next; 4373 st->frag_idx = 0; 4374 goto next_skb; 4375 } 4376 4377 return 0; 4378 } 4379 EXPORT_SYMBOL(skb_seq_read); 4380 4381 /** 4382 * skb_abort_seq_read - Abort a sequential read of skb data 4383 * @st: state variable 4384 * 4385 * Must be called if skb_seq_read() was not called until it 4386 * returned 0. 4387 */ 4388 void skb_abort_seq_read(struct skb_seq_state *st) 4389 { 4390 if (st->frag_data) 4391 kunmap_atomic(st->frag_data); 4392 } 4393 EXPORT_SYMBOL(skb_abort_seq_read); 4394 4395 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4396 4397 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4398 struct ts_config *conf, 4399 struct ts_state *state) 4400 { 4401 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4402 } 4403 4404 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4405 { 4406 skb_abort_seq_read(TS_SKB_CB(state)); 4407 } 4408 4409 /** 4410 * skb_find_text - Find a text pattern in skb data 4411 * @skb: the buffer to look in 4412 * @from: search offset 4413 * @to: search limit 4414 * @config: textsearch configuration 4415 * 4416 * Finds a pattern in the skb data according to the specified 4417 * textsearch configuration. Use textsearch_next() to retrieve 4418 * subsequent occurrences of the pattern. Returns the offset 4419 * to the first occurrence or UINT_MAX if no match was found. 4420 */ 4421 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4422 unsigned int to, struct ts_config *config) 4423 { 4424 unsigned int patlen = config->ops->get_pattern_len(config); 4425 struct ts_state state; 4426 unsigned int ret; 4427 4428 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4429 4430 config->get_next_block = skb_ts_get_next_block; 4431 config->finish = skb_ts_finish; 4432 4433 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4434 4435 ret = textsearch_find(config, &state); 4436 return (ret + patlen <= to - from ? ret : UINT_MAX); 4437 } 4438 EXPORT_SYMBOL(skb_find_text); 4439 4440 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4441 int offset, size_t size, size_t max_frags) 4442 { 4443 int i = skb_shinfo(skb)->nr_frags; 4444 4445 if (skb_can_coalesce(skb, i, page, offset)) { 4446 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4447 } else if (i < max_frags) { 4448 skb_zcopy_downgrade_managed(skb); 4449 get_page(page); 4450 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4451 } else { 4452 return -EMSGSIZE; 4453 } 4454 4455 return 0; 4456 } 4457 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4458 4459 /** 4460 * skb_pull_rcsum - pull skb and update receive checksum 4461 * @skb: buffer to update 4462 * @len: length of data pulled 4463 * 4464 * This function performs an skb_pull on the packet and updates 4465 * the CHECKSUM_COMPLETE checksum. It should be used on 4466 * receive path processing instead of skb_pull unless you know 4467 * that the checksum difference is zero (e.g., a valid IP header) 4468 * or you are setting ip_summed to CHECKSUM_NONE. 4469 */ 4470 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4471 { 4472 unsigned char *data = skb->data; 4473 4474 BUG_ON(len > skb->len); 4475 __skb_pull(skb, len); 4476 skb_postpull_rcsum(skb, data, len); 4477 return skb->data; 4478 } 4479 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4480 4481 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4482 { 4483 skb_frag_t head_frag; 4484 struct page *page; 4485 4486 page = virt_to_head_page(frag_skb->head); 4487 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4488 (unsigned char *)page_address(page), 4489 skb_headlen(frag_skb)); 4490 return head_frag; 4491 } 4492 4493 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4494 netdev_features_t features, 4495 unsigned int offset) 4496 { 4497 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4498 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4499 unsigned int delta_truesize = 0; 4500 unsigned int delta_len = 0; 4501 struct sk_buff *tail = NULL; 4502 struct sk_buff *nskb, *tmp; 4503 int len_diff, err; 4504 4505 skb_push(skb, -skb_network_offset(skb) + offset); 4506 4507 /* Ensure the head is writeable before touching the shared info */ 4508 err = skb_unclone(skb, GFP_ATOMIC); 4509 if (err) 4510 goto err_linearize; 4511 4512 skb_shinfo(skb)->frag_list = NULL; 4513 4514 while (list_skb) { 4515 nskb = list_skb; 4516 list_skb = list_skb->next; 4517 4518 err = 0; 4519 delta_truesize += nskb->truesize; 4520 if (skb_shared(nskb)) { 4521 tmp = skb_clone(nskb, GFP_ATOMIC); 4522 if (tmp) { 4523 consume_skb(nskb); 4524 nskb = tmp; 4525 err = skb_unclone(nskb, GFP_ATOMIC); 4526 } else { 4527 err = -ENOMEM; 4528 } 4529 } 4530 4531 if (!tail) 4532 skb->next = nskb; 4533 else 4534 tail->next = nskb; 4535 4536 if (unlikely(err)) { 4537 nskb->next = list_skb; 4538 goto err_linearize; 4539 } 4540 4541 tail = nskb; 4542 4543 delta_len += nskb->len; 4544 4545 skb_push(nskb, -skb_network_offset(nskb) + offset); 4546 4547 skb_release_head_state(nskb); 4548 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4549 __copy_skb_header(nskb, skb); 4550 4551 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4552 nskb->transport_header += len_diff; 4553 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4554 nskb->data - tnl_hlen, 4555 offset + tnl_hlen); 4556 4557 if (skb_needs_linearize(nskb, features) && 4558 __skb_linearize(nskb)) 4559 goto err_linearize; 4560 } 4561 4562 skb->truesize = skb->truesize - delta_truesize; 4563 skb->data_len = skb->data_len - delta_len; 4564 skb->len = skb->len - delta_len; 4565 4566 skb_gso_reset(skb); 4567 4568 skb->prev = tail; 4569 4570 if (skb_needs_linearize(skb, features) && 4571 __skb_linearize(skb)) 4572 goto err_linearize; 4573 4574 skb_get(skb); 4575 4576 return skb; 4577 4578 err_linearize: 4579 kfree_skb_list(skb->next); 4580 skb->next = NULL; 4581 return ERR_PTR(-ENOMEM); 4582 } 4583 EXPORT_SYMBOL_GPL(skb_segment_list); 4584 4585 /** 4586 * skb_segment - Perform protocol segmentation on skb. 4587 * @head_skb: buffer to segment 4588 * @features: features for the output path (see dev->features) 4589 * 4590 * This function performs segmentation on the given skb. It returns 4591 * a pointer to the first in a list of new skbs for the segments. 4592 * In case of error it returns ERR_PTR(err). 4593 */ 4594 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4595 netdev_features_t features) 4596 { 4597 struct sk_buff *segs = NULL; 4598 struct sk_buff *tail = NULL; 4599 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4600 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4601 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4602 unsigned int offset = doffset; 4603 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4604 unsigned int partial_segs = 0; 4605 unsigned int headroom; 4606 unsigned int len = head_skb->len; 4607 struct sk_buff *frag_skb; 4608 skb_frag_t *frag; 4609 __be16 proto; 4610 bool csum, sg; 4611 int err = -ENOMEM; 4612 int i = 0; 4613 int nfrags, pos; 4614 4615 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4616 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4617 struct sk_buff *check_skb; 4618 4619 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4620 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4621 /* gso_size is untrusted, and we have a frag_list with 4622 * a linear non head_frag item. 4623 * 4624 * If head_skb's headlen does not fit requested gso_size, 4625 * it means that the frag_list members do NOT terminate 4626 * on exact gso_size boundaries. Hence we cannot perform 4627 * skb_frag_t page sharing. Therefore we must fallback to 4628 * copying the frag_list skbs; we do so by disabling SG. 4629 */ 4630 features &= ~NETIF_F_SG; 4631 break; 4632 } 4633 } 4634 } 4635 4636 __skb_push(head_skb, doffset); 4637 proto = skb_network_protocol(head_skb, NULL); 4638 if (unlikely(!proto)) 4639 return ERR_PTR(-EINVAL); 4640 4641 sg = !!(features & NETIF_F_SG); 4642 csum = !!can_checksum_protocol(features, proto); 4643 4644 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4645 if (!(features & NETIF_F_GSO_PARTIAL)) { 4646 struct sk_buff *iter; 4647 unsigned int frag_len; 4648 4649 if (!list_skb || 4650 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4651 goto normal; 4652 4653 /* If we get here then all the required 4654 * GSO features except frag_list are supported. 4655 * Try to split the SKB to multiple GSO SKBs 4656 * with no frag_list. 4657 * Currently we can do that only when the buffers don't 4658 * have a linear part and all the buffers except 4659 * the last are of the same length. 4660 */ 4661 frag_len = list_skb->len; 4662 skb_walk_frags(head_skb, iter) { 4663 if (frag_len != iter->len && iter->next) 4664 goto normal; 4665 if (skb_headlen(iter) && !iter->head_frag) 4666 goto normal; 4667 4668 len -= iter->len; 4669 } 4670 4671 if (len != frag_len) 4672 goto normal; 4673 } 4674 4675 /* GSO partial only requires that we trim off any excess that 4676 * doesn't fit into an MSS sized block, so take care of that 4677 * now. 4678 * Cap len to not accidentally hit GSO_BY_FRAGS. 4679 */ 4680 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4681 if (partial_segs > 1) 4682 mss *= partial_segs; 4683 else 4684 partial_segs = 0; 4685 } 4686 4687 normal: 4688 headroom = skb_headroom(head_skb); 4689 pos = skb_headlen(head_skb); 4690 4691 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4692 return ERR_PTR(-ENOMEM); 4693 4694 nfrags = skb_shinfo(head_skb)->nr_frags; 4695 frag = skb_shinfo(head_skb)->frags; 4696 frag_skb = head_skb; 4697 4698 do { 4699 struct sk_buff *nskb; 4700 skb_frag_t *nskb_frag; 4701 int hsize; 4702 int size; 4703 4704 if (unlikely(mss == GSO_BY_FRAGS)) { 4705 len = list_skb->len; 4706 } else { 4707 len = head_skb->len - offset; 4708 if (len > mss) 4709 len = mss; 4710 } 4711 4712 hsize = skb_headlen(head_skb) - offset; 4713 4714 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4715 (skb_headlen(list_skb) == len || sg)) { 4716 BUG_ON(skb_headlen(list_skb) > len); 4717 4718 nskb = skb_clone(list_skb, GFP_ATOMIC); 4719 if (unlikely(!nskb)) 4720 goto err; 4721 4722 i = 0; 4723 nfrags = skb_shinfo(list_skb)->nr_frags; 4724 frag = skb_shinfo(list_skb)->frags; 4725 frag_skb = list_skb; 4726 pos += skb_headlen(list_skb); 4727 4728 while (pos < offset + len) { 4729 BUG_ON(i >= nfrags); 4730 4731 size = skb_frag_size(frag); 4732 if (pos + size > offset + len) 4733 break; 4734 4735 i++; 4736 pos += size; 4737 frag++; 4738 } 4739 4740 list_skb = list_skb->next; 4741 4742 if (unlikely(pskb_trim(nskb, len))) { 4743 kfree_skb(nskb); 4744 goto err; 4745 } 4746 4747 hsize = skb_end_offset(nskb); 4748 if (skb_cow_head(nskb, doffset + headroom)) { 4749 kfree_skb(nskb); 4750 goto err; 4751 } 4752 4753 nskb->truesize += skb_end_offset(nskb) - hsize; 4754 skb_release_head_state(nskb); 4755 __skb_push(nskb, doffset); 4756 } else { 4757 if (hsize < 0) 4758 hsize = 0; 4759 if (hsize > len || !sg) 4760 hsize = len; 4761 4762 nskb = __alloc_skb(hsize + doffset + headroom, 4763 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4764 NUMA_NO_NODE); 4765 4766 if (unlikely(!nskb)) 4767 goto err; 4768 4769 skb_reserve(nskb, headroom); 4770 __skb_put(nskb, doffset); 4771 } 4772 4773 if (segs) 4774 tail->next = nskb; 4775 else 4776 segs = nskb; 4777 tail = nskb; 4778 4779 __copy_skb_header(nskb, head_skb); 4780 4781 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4782 skb_reset_mac_len(nskb); 4783 4784 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4785 nskb->data - tnl_hlen, 4786 doffset + tnl_hlen); 4787 4788 if (nskb->len == len + doffset) 4789 goto perform_csum_check; 4790 4791 if (!sg) { 4792 if (!csum) { 4793 if (!nskb->remcsum_offload) 4794 nskb->ip_summed = CHECKSUM_NONE; 4795 SKB_GSO_CB(nskb)->csum = 4796 skb_copy_and_csum_bits(head_skb, offset, 4797 skb_put(nskb, 4798 len), 4799 len); 4800 SKB_GSO_CB(nskb)->csum_start = 4801 skb_headroom(nskb) + doffset; 4802 } else { 4803 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4804 goto err; 4805 } 4806 continue; 4807 } 4808 4809 nskb_frag = skb_shinfo(nskb)->frags; 4810 4811 skb_copy_from_linear_data_offset(head_skb, offset, 4812 skb_put(nskb, hsize), hsize); 4813 4814 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4815 SKBFL_SHARED_FRAG; 4816 4817 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4818 goto err; 4819 4820 while (pos < offset + len) { 4821 if (i >= nfrags) { 4822 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4823 skb_zerocopy_clone(nskb, list_skb, 4824 GFP_ATOMIC)) 4825 goto err; 4826 4827 i = 0; 4828 nfrags = skb_shinfo(list_skb)->nr_frags; 4829 frag = skb_shinfo(list_skb)->frags; 4830 frag_skb = list_skb; 4831 if (!skb_headlen(list_skb)) { 4832 BUG_ON(!nfrags); 4833 } else { 4834 BUG_ON(!list_skb->head_frag); 4835 4836 /* to make room for head_frag. */ 4837 i--; 4838 frag--; 4839 } 4840 4841 list_skb = list_skb->next; 4842 } 4843 4844 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4845 MAX_SKB_FRAGS)) { 4846 net_warn_ratelimited( 4847 "skb_segment: too many frags: %u %u\n", 4848 pos, mss); 4849 err = -EINVAL; 4850 goto err; 4851 } 4852 4853 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4854 __skb_frag_ref(nskb_frag); 4855 size = skb_frag_size(nskb_frag); 4856 4857 if (pos < offset) { 4858 skb_frag_off_add(nskb_frag, offset - pos); 4859 skb_frag_size_sub(nskb_frag, offset - pos); 4860 } 4861 4862 skb_shinfo(nskb)->nr_frags++; 4863 4864 if (pos + size <= offset + len) { 4865 i++; 4866 frag++; 4867 pos += size; 4868 } else { 4869 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4870 goto skip_fraglist; 4871 } 4872 4873 nskb_frag++; 4874 } 4875 4876 skip_fraglist: 4877 nskb->data_len = len - hsize; 4878 nskb->len += nskb->data_len; 4879 nskb->truesize += nskb->data_len; 4880 4881 perform_csum_check: 4882 if (!csum) { 4883 if (skb_has_shared_frag(nskb) && 4884 __skb_linearize(nskb)) 4885 goto err; 4886 4887 if (!nskb->remcsum_offload) 4888 nskb->ip_summed = CHECKSUM_NONE; 4889 SKB_GSO_CB(nskb)->csum = 4890 skb_checksum(nskb, doffset, 4891 nskb->len - doffset, 0); 4892 SKB_GSO_CB(nskb)->csum_start = 4893 skb_headroom(nskb) + doffset; 4894 } 4895 } while ((offset += len) < head_skb->len); 4896 4897 /* Some callers want to get the end of the list. 4898 * Put it in segs->prev to avoid walking the list. 4899 * (see validate_xmit_skb_list() for example) 4900 */ 4901 segs->prev = tail; 4902 4903 if (partial_segs) { 4904 struct sk_buff *iter; 4905 int type = skb_shinfo(head_skb)->gso_type; 4906 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4907 4908 /* Update type to add partial and then remove dodgy if set */ 4909 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4910 type &= ~SKB_GSO_DODGY; 4911 4912 /* Update GSO info and prepare to start updating headers on 4913 * our way back down the stack of protocols. 4914 */ 4915 for (iter = segs; iter; iter = iter->next) { 4916 skb_shinfo(iter)->gso_size = gso_size; 4917 skb_shinfo(iter)->gso_segs = partial_segs; 4918 skb_shinfo(iter)->gso_type = type; 4919 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4920 } 4921 4922 if (tail->len - doffset <= gso_size) 4923 skb_shinfo(tail)->gso_size = 0; 4924 else if (tail != segs) 4925 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4926 } 4927 4928 /* Following permits correct backpressure, for protocols 4929 * using skb_set_owner_w(). 4930 * Idea is to tranfert ownership from head_skb to last segment. 4931 */ 4932 if (head_skb->destructor == sock_wfree) { 4933 swap(tail->truesize, head_skb->truesize); 4934 swap(tail->destructor, head_skb->destructor); 4935 swap(tail->sk, head_skb->sk); 4936 } 4937 return segs; 4938 4939 err: 4940 kfree_skb_list(segs); 4941 return ERR_PTR(err); 4942 } 4943 EXPORT_SYMBOL_GPL(skb_segment); 4944 4945 #ifdef CONFIG_SKB_EXTENSIONS 4946 #define SKB_EXT_ALIGN_VALUE 8 4947 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4948 4949 static const u8 skb_ext_type_len[] = { 4950 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4951 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4952 #endif 4953 #ifdef CONFIG_XFRM 4954 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4955 #endif 4956 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4957 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4958 #endif 4959 #if IS_ENABLED(CONFIG_MPTCP) 4960 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4961 #endif 4962 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 4963 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 4964 #endif 4965 }; 4966 4967 static __always_inline unsigned int skb_ext_total_length(void) 4968 { 4969 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 4970 int i; 4971 4972 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 4973 l += skb_ext_type_len[i]; 4974 4975 return l; 4976 } 4977 4978 static void skb_extensions_init(void) 4979 { 4980 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4981 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 4982 BUILD_BUG_ON(skb_ext_total_length() > 255); 4983 #endif 4984 4985 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4986 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4987 0, 4988 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4989 NULL); 4990 } 4991 #else 4992 static void skb_extensions_init(void) {} 4993 #endif 4994 4995 /* The SKB kmem_cache slab is critical for network performance. Never 4996 * merge/alias the slab with similar sized objects. This avoids fragmentation 4997 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 4998 */ 4999 #ifndef CONFIG_SLUB_TINY 5000 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 5001 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 5002 #define FLAG_SKB_NO_MERGE 0 5003 #endif 5004 5005 void __init skb_init(void) 5006 { 5007 net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 5008 sizeof(struct sk_buff), 5009 0, 5010 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 5011 FLAG_SKB_NO_MERGE, 5012 offsetof(struct sk_buff, cb), 5013 sizeof_field(struct sk_buff, cb), 5014 NULL); 5015 net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 5016 sizeof(struct sk_buff_fclones), 5017 0, 5018 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 5019 NULL); 5020 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 5021 * struct skb_shared_info is located at the end of skb->head, 5022 * and should not be copied to/from user. 5023 */ 5024 net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 5025 SKB_SMALL_HEAD_CACHE_SIZE, 5026 0, 5027 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 5028 0, 5029 SKB_SMALL_HEAD_HEADROOM, 5030 NULL); 5031 skb_extensions_init(); 5032 } 5033 5034 static int 5035 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 5036 unsigned int recursion_level) 5037 { 5038 int start = skb_headlen(skb); 5039 int i, copy = start - offset; 5040 struct sk_buff *frag_iter; 5041 int elt = 0; 5042 5043 if (unlikely(recursion_level >= 24)) 5044 return -EMSGSIZE; 5045 5046 if (copy > 0) { 5047 if (copy > len) 5048 copy = len; 5049 sg_set_buf(sg, skb->data + offset, copy); 5050 elt++; 5051 if ((len -= copy) == 0) 5052 return elt; 5053 offset += copy; 5054 } 5055 5056 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5057 int end; 5058 5059 WARN_ON(start > offset + len); 5060 5061 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 5062 if ((copy = end - offset) > 0) { 5063 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5064 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5065 return -EMSGSIZE; 5066 5067 if (copy > len) 5068 copy = len; 5069 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 5070 skb_frag_off(frag) + offset - start); 5071 elt++; 5072 if (!(len -= copy)) 5073 return elt; 5074 offset += copy; 5075 } 5076 start = end; 5077 } 5078 5079 skb_walk_frags(skb, frag_iter) { 5080 int end, ret; 5081 5082 WARN_ON(start > offset + len); 5083 5084 end = start + frag_iter->len; 5085 if ((copy = end - offset) > 0) { 5086 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5087 return -EMSGSIZE; 5088 5089 if (copy > len) 5090 copy = len; 5091 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 5092 copy, recursion_level + 1); 5093 if (unlikely(ret < 0)) 5094 return ret; 5095 elt += ret; 5096 if ((len -= copy) == 0) 5097 return elt; 5098 offset += copy; 5099 } 5100 start = end; 5101 } 5102 BUG_ON(len); 5103 return elt; 5104 } 5105 5106 /** 5107 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 5108 * @skb: Socket buffer containing the buffers to be mapped 5109 * @sg: The scatter-gather list to map into 5110 * @offset: The offset into the buffer's contents to start mapping 5111 * @len: Length of buffer space to be mapped 5112 * 5113 * Fill the specified scatter-gather list with mappings/pointers into a 5114 * region of the buffer space attached to a socket buffer. Returns either 5115 * the number of scatterlist items used, or -EMSGSIZE if the contents 5116 * could not fit. 5117 */ 5118 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5119 { 5120 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5121 5122 if (nsg <= 0) 5123 return nsg; 5124 5125 sg_mark_end(&sg[nsg - 1]); 5126 5127 return nsg; 5128 } 5129 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5130 5131 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5132 * sglist without mark the sg which contain last skb data as the end. 5133 * So the caller can mannipulate sg list as will when padding new data after 5134 * the first call without calling sg_unmark_end to expend sg list. 5135 * 5136 * Scenario to use skb_to_sgvec_nomark: 5137 * 1. sg_init_table 5138 * 2. skb_to_sgvec_nomark(payload1) 5139 * 3. skb_to_sgvec_nomark(payload2) 5140 * 5141 * This is equivalent to: 5142 * 1. sg_init_table 5143 * 2. skb_to_sgvec(payload1) 5144 * 3. sg_unmark_end 5145 * 4. skb_to_sgvec(payload2) 5146 * 5147 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 5148 * is more preferable. 5149 */ 5150 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5151 int offset, int len) 5152 { 5153 return __skb_to_sgvec(skb, sg, offset, len, 0); 5154 } 5155 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5156 5157 5158 5159 /** 5160 * skb_cow_data - Check that a socket buffer's data buffers are writable 5161 * @skb: The socket buffer to check. 5162 * @tailbits: Amount of trailing space to be added 5163 * @trailer: Returned pointer to the skb where the @tailbits space begins 5164 * 5165 * Make sure that the data buffers attached to a socket buffer are 5166 * writable. If they are not, private copies are made of the data buffers 5167 * and the socket buffer is set to use these instead. 5168 * 5169 * If @tailbits is given, make sure that there is space to write @tailbits 5170 * bytes of data beyond current end of socket buffer. @trailer will be 5171 * set to point to the skb in which this space begins. 5172 * 5173 * The number of scatterlist elements required to completely map the 5174 * COW'd and extended socket buffer will be returned. 5175 */ 5176 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5177 { 5178 int copyflag; 5179 int elt; 5180 struct sk_buff *skb1, **skb_p; 5181 5182 /* If skb is cloned or its head is paged, reallocate 5183 * head pulling out all the pages (pages are considered not writable 5184 * at the moment even if they are anonymous). 5185 */ 5186 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5187 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5188 return -ENOMEM; 5189 5190 /* Easy case. Most of packets will go this way. */ 5191 if (!skb_has_frag_list(skb)) { 5192 /* A little of trouble, not enough of space for trailer. 5193 * This should not happen, when stack is tuned to generate 5194 * good frames. OK, on miss we reallocate and reserve even more 5195 * space, 128 bytes is fair. */ 5196 5197 if (skb_tailroom(skb) < tailbits && 5198 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5199 return -ENOMEM; 5200 5201 /* Voila! */ 5202 *trailer = skb; 5203 return 1; 5204 } 5205 5206 /* Misery. We are in troubles, going to mincer fragments... */ 5207 5208 elt = 1; 5209 skb_p = &skb_shinfo(skb)->frag_list; 5210 copyflag = 0; 5211 5212 while ((skb1 = *skb_p) != NULL) { 5213 int ntail = 0; 5214 5215 /* The fragment is partially pulled by someone, 5216 * this can happen on input. Copy it and everything 5217 * after it. */ 5218 5219 if (skb_shared(skb1)) 5220 copyflag = 1; 5221 5222 /* If the skb is the last, worry about trailer. */ 5223 5224 if (skb1->next == NULL && tailbits) { 5225 if (skb_shinfo(skb1)->nr_frags || 5226 skb_has_frag_list(skb1) || 5227 skb_tailroom(skb1) < tailbits) 5228 ntail = tailbits + 128; 5229 } 5230 5231 if (copyflag || 5232 skb_cloned(skb1) || 5233 ntail || 5234 skb_shinfo(skb1)->nr_frags || 5235 skb_has_frag_list(skb1)) { 5236 struct sk_buff *skb2; 5237 5238 /* Fuck, we are miserable poor guys... */ 5239 if (ntail == 0) 5240 skb2 = skb_copy(skb1, GFP_ATOMIC); 5241 else 5242 skb2 = skb_copy_expand(skb1, 5243 skb_headroom(skb1), 5244 ntail, 5245 GFP_ATOMIC); 5246 if (unlikely(skb2 == NULL)) 5247 return -ENOMEM; 5248 5249 if (skb1->sk) 5250 skb_set_owner_w(skb2, skb1->sk); 5251 5252 /* Looking around. Are we still alive? 5253 * OK, link new skb, drop old one */ 5254 5255 skb2->next = skb1->next; 5256 *skb_p = skb2; 5257 kfree_skb(skb1); 5258 skb1 = skb2; 5259 } 5260 elt++; 5261 *trailer = skb1; 5262 skb_p = &skb1->next; 5263 } 5264 5265 return elt; 5266 } 5267 EXPORT_SYMBOL_GPL(skb_cow_data); 5268 5269 static void sock_rmem_free(struct sk_buff *skb) 5270 { 5271 struct sock *sk = skb->sk; 5272 5273 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5274 } 5275 5276 static void skb_set_err_queue(struct sk_buff *skb) 5277 { 5278 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5279 * So, it is safe to (mis)use it to mark skbs on the error queue. 5280 */ 5281 skb->pkt_type = PACKET_OUTGOING; 5282 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5283 } 5284 5285 /* 5286 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5287 */ 5288 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5289 { 5290 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5291 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5292 return -ENOMEM; 5293 5294 skb_orphan(skb); 5295 skb->sk = sk; 5296 skb->destructor = sock_rmem_free; 5297 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5298 skb_set_err_queue(skb); 5299 5300 /* before exiting rcu section, make sure dst is refcounted */ 5301 skb_dst_force(skb); 5302 5303 skb_queue_tail(&sk->sk_error_queue, skb); 5304 if (!sock_flag(sk, SOCK_DEAD)) 5305 sk_error_report(sk); 5306 return 0; 5307 } 5308 EXPORT_SYMBOL(sock_queue_err_skb); 5309 5310 static bool is_icmp_err_skb(const struct sk_buff *skb) 5311 { 5312 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5313 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5314 } 5315 5316 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5317 { 5318 struct sk_buff_head *q = &sk->sk_error_queue; 5319 struct sk_buff *skb, *skb_next = NULL; 5320 bool icmp_next = false; 5321 unsigned long flags; 5322 5323 if (skb_queue_empty_lockless(q)) 5324 return NULL; 5325 5326 spin_lock_irqsave(&q->lock, flags); 5327 skb = __skb_dequeue(q); 5328 if (skb && (skb_next = skb_peek(q))) { 5329 icmp_next = is_icmp_err_skb(skb_next); 5330 if (icmp_next) 5331 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5332 } 5333 spin_unlock_irqrestore(&q->lock, flags); 5334 5335 if (is_icmp_err_skb(skb) && !icmp_next) 5336 sk->sk_err = 0; 5337 5338 if (skb_next) 5339 sk_error_report(sk); 5340 5341 return skb; 5342 } 5343 EXPORT_SYMBOL(sock_dequeue_err_skb); 5344 5345 /** 5346 * skb_clone_sk - create clone of skb, and take reference to socket 5347 * @skb: the skb to clone 5348 * 5349 * This function creates a clone of a buffer that holds a reference on 5350 * sk_refcnt. Buffers created via this function are meant to be 5351 * returned using sock_queue_err_skb, or free via kfree_skb. 5352 * 5353 * When passing buffers allocated with this function to sock_queue_err_skb 5354 * it is necessary to wrap the call with sock_hold/sock_put in order to 5355 * prevent the socket from being released prior to being enqueued on 5356 * the sk_error_queue. 5357 */ 5358 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5359 { 5360 struct sock *sk = skb->sk; 5361 struct sk_buff *clone; 5362 5363 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5364 return NULL; 5365 5366 clone = skb_clone(skb, GFP_ATOMIC); 5367 if (!clone) { 5368 sock_put(sk); 5369 return NULL; 5370 } 5371 5372 clone->sk = sk; 5373 clone->destructor = sock_efree; 5374 5375 return clone; 5376 } 5377 EXPORT_SYMBOL(skb_clone_sk); 5378 5379 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5380 struct sock *sk, 5381 int tstype, 5382 bool opt_stats) 5383 { 5384 struct sock_exterr_skb *serr; 5385 int err; 5386 5387 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5388 5389 serr = SKB_EXT_ERR(skb); 5390 memset(serr, 0, sizeof(*serr)); 5391 serr->ee.ee_errno = ENOMSG; 5392 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5393 serr->ee.ee_info = tstype; 5394 serr->opt_stats = opt_stats; 5395 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5396 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5397 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5398 if (sk_is_tcp(sk)) 5399 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5400 } 5401 5402 err = sock_queue_err_skb(sk, skb); 5403 5404 if (err) 5405 kfree_skb(skb); 5406 } 5407 5408 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5409 { 5410 bool ret; 5411 5412 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) 5413 return true; 5414 5415 read_lock_bh(&sk->sk_callback_lock); 5416 ret = sk->sk_socket && sk->sk_socket->file && 5417 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5418 read_unlock_bh(&sk->sk_callback_lock); 5419 return ret; 5420 } 5421 5422 void skb_complete_tx_timestamp(struct sk_buff *skb, 5423 struct skb_shared_hwtstamps *hwtstamps) 5424 { 5425 struct sock *sk = skb->sk; 5426 5427 if (!skb_may_tx_timestamp(sk, false)) 5428 goto err; 5429 5430 /* Take a reference to prevent skb_orphan() from freeing the socket, 5431 * but only if the socket refcount is not zero. 5432 */ 5433 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5434 *skb_hwtstamps(skb) = *hwtstamps; 5435 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5436 sock_put(sk); 5437 return; 5438 } 5439 5440 err: 5441 kfree_skb(skb); 5442 } 5443 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5444 5445 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5446 const struct sk_buff *ack_skb, 5447 struct skb_shared_hwtstamps *hwtstamps, 5448 struct sock *sk, int tstype) 5449 { 5450 struct sk_buff *skb; 5451 bool tsonly, opt_stats = false; 5452 u32 tsflags; 5453 5454 if (!sk) 5455 return; 5456 5457 tsflags = READ_ONCE(sk->sk_tsflags); 5458 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5459 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5460 return; 5461 5462 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5463 if (!skb_may_tx_timestamp(sk, tsonly)) 5464 return; 5465 5466 if (tsonly) { 5467 #ifdef CONFIG_INET 5468 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5469 sk_is_tcp(sk)) { 5470 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5471 ack_skb); 5472 opt_stats = true; 5473 } else 5474 #endif 5475 skb = alloc_skb(0, GFP_ATOMIC); 5476 } else { 5477 skb = skb_clone(orig_skb, GFP_ATOMIC); 5478 5479 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5480 kfree_skb(skb); 5481 return; 5482 } 5483 } 5484 if (!skb) 5485 return; 5486 5487 if (tsonly) { 5488 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5489 SKBTX_ANY_TSTAMP; 5490 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5491 } 5492 5493 if (hwtstamps) 5494 *skb_hwtstamps(skb) = *hwtstamps; 5495 else 5496 __net_timestamp(skb); 5497 5498 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5499 } 5500 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5501 5502 void skb_tstamp_tx(struct sk_buff *orig_skb, 5503 struct skb_shared_hwtstamps *hwtstamps) 5504 { 5505 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5506 SCM_TSTAMP_SND); 5507 } 5508 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5509 5510 #ifdef CONFIG_WIRELESS 5511 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5512 { 5513 struct sock *sk = skb->sk; 5514 struct sock_exterr_skb *serr; 5515 int err = 1; 5516 5517 skb->wifi_acked_valid = 1; 5518 skb->wifi_acked = acked; 5519 5520 serr = SKB_EXT_ERR(skb); 5521 memset(serr, 0, sizeof(*serr)); 5522 serr->ee.ee_errno = ENOMSG; 5523 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5524 5525 /* Take a reference to prevent skb_orphan() from freeing the socket, 5526 * but only if the socket refcount is not zero. 5527 */ 5528 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5529 err = sock_queue_err_skb(sk, skb); 5530 sock_put(sk); 5531 } 5532 if (err) 5533 kfree_skb(skb); 5534 } 5535 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5536 #endif /* CONFIG_WIRELESS */ 5537 5538 /** 5539 * skb_partial_csum_set - set up and verify partial csum values for packet 5540 * @skb: the skb to set 5541 * @start: the number of bytes after skb->data to start checksumming. 5542 * @off: the offset from start to place the checksum. 5543 * 5544 * For untrusted partially-checksummed packets, we need to make sure the values 5545 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5546 * 5547 * This function checks and sets those values and skb->ip_summed: if this 5548 * returns false you should drop the packet. 5549 */ 5550 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5551 { 5552 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5553 u32 csum_start = skb_headroom(skb) + (u32)start; 5554 5555 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5556 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5557 start, off, skb_headroom(skb), skb_headlen(skb)); 5558 return false; 5559 } 5560 skb->ip_summed = CHECKSUM_PARTIAL; 5561 skb->csum_start = csum_start; 5562 skb->csum_offset = off; 5563 skb->transport_header = csum_start; 5564 return true; 5565 } 5566 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5567 5568 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5569 unsigned int max) 5570 { 5571 if (skb_headlen(skb) >= len) 5572 return 0; 5573 5574 /* If we need to pullup then pullup to the max, so we 5575 * won't need to do it again. 5576 */ 5577 if (max > skb->len) 5578 max = skb->len; 5579 5580 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5581 return -ENOMEM; 5582 5583 if (skb_headlen(skb) < len) 5584 return -EPROTO; 5585 5586 return 0; 5587 } 5588 5589 #define MAX_TCP_HDR_LEN (15 * 4) 5590 5591 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5592 typeof(IPPROTO_IP) proto, 5593 unsigned int off) 5594 { 5595 int err; 5596 5597 switch (proto) { 5598 case IPPROTO_TCP: 5599 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5600 off + MAX_TCP_HDR_LEN); 5601 if (!err && !skb_partial_csum_set(skb, off, 5602 offsetof(struct tcphdr, 5603 check))) 5604 err = -EPROTO; 5605 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5606 5607 case IPPROTO_UDP: 5608 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5609 off + sizeof(struct udphdr)); 5610 if (!err && !skb_partial_csum_set(skb, off, 5611 offsetof(struct udphdr, 5612 check))) 5613 err = -EPROTO; 5614 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5615 } 5616 5617 return ERR_PTR(-EPROTO); 5618 } 5619 5620 /* This value should be large enough to cover a tagged ethernet header plus 5621 * maximally sized IP and TCP or UDP headers. 5622 */ 5623 #define MAX_IP_HDR_LEN 128 5624 5625 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5626 { 5627 unsigned int off; 5628 bool fragment; 5629 __sum16 *csum; 5630 int err; 5631 5632 fragment = false; 5633 5634 err = skb_maybe_pull_tail(skb, 5635 sizeof(struct iphdr), 5636 MAX_IP_HDR_LEN); 5637 if (err < 0) 5638 goto out; 5639 5640 if (ip_is_fragment(ip_hdr(skb))) 5641 fragment = true; 5642 5643 off = ip_hdrlen(skb); 5644 5645 err = -EPROTO; 5646 5647 if (fragment) 5648 goto out; 5649 5650 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5651 if (IS_ERR(csum)) 5652 return PTR_ERR(csum); 5653 5654 if (recalculate) 5655 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5656 ip_hdr(skb)->daddr, 5657 skb->len - off, 5658 ip_hdr(skb)->protocol, 0); 5659 err = 0; 5660 5661 out: 5662 return err; 5663 } 5664 5665 /* This value should be large enough to cover a tagged ethernet header plus 5666 * an IPv6 header, all options, and a maximal TCP or UDP header. 5667 */ 5668 #define MAX_IPV6_HDR_LEN 256 5669 5670 #define OPT_HDR(type, skb, off) \ 5671 (type *)(skb_network_header(skb) + (off)) 5672 5673 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5674 { 5675 int err; 5676 u8 nexthdr; 5677 unsigned int off; 5678 unsigned int len; 5679 bool fragment; 5680 bool done; 5681 __sum16 *csum; 5682 5683 fragment = false; 5684 done = false; 5685 5686 off = sizeof(struct ipv6hdr); 5687 5688 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5689 if (err < 0) 5690 goto out; 5691 5692 nexthdr = ipv6_hdr(skb)->nexthdr; 5693 5694 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5695 while (off <= len && !done) { 5696 switch (nexthdr) { 5697 case IPPROTO_DSTOPTS: 5698 case IPPROTO_HOPOPTS: 5699 case IPPROTO_ROUTING: { 5700 struct ipv6_opt_hdr *hp; 5701 5702 err = skb_maybe_pull_tail(skb, 5703 off + 5704 sizeof(struct ipv6_opt_hdr), 5705 MAX_IPV6_HDR_LEN); 5706 if (err < 0) 5707 goto out; 5708 5709 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5710 nexthdr = hp->nexthdr; 5711 off += ipv6_optlen(hp); 5712 break; 5713 } 5714 case IPPROTO_AH: { 5715 struct ip_auth_hdr *hp; 5716 5717 err = skb_maybe_pull_tail(skb, 5718 off + 5719 sizeof(struct ip_auth_hdr), 5720 MAX_IPV6_HDR_LEN); 5721 if (err < 0) 5722 goto out; 5723 5724 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5725 nexthdr = hp->nexthdr; 5726 off += ipv6_authlen(hp); 5727 break; 5728 } 5729 case IPPROTO_FRAGMENT: { 5730 struct frag_hdr *hp; 5731 5732 err = skb_maybe_pull_tail(skb, 5733 off + 5734 sizeof(struct frag_hdr), 5735 MAX_IPV6_HDR_LEN); 5736 if (err < 0) 5737 goto out; 5738 5739 hp = OPT_HDR(struct frag_hdr, skb, off); 5740 5741 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5742 fragment = true; 5743 5744 nexthdr = hp->nexthdr; 5745 off += sizeof(struct frag_hdr); 5746 break; 5747 } 5748 default: 5749 done = true; 5750 break; 5751 } 5752 } 5753 5754 err = -EPROTO; 5755 5756 if (!done || fragment) 5757 goto out; 5758 5759 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5760 if (IS_ERR(csum)) 5761 return PTR_ERR(csum); 5762 5763 if (recalculate) 5764 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5765 &ipv6_hdr(skb)->daddr, 5766 skb->len - off, nexthdr, 0); 5767 err = 0; 5768 5769 out: 5770 return err; 5771 } 5772 5773 /** 5774 * skb_checksum_setup - set up partial checksum offset 5775 * @skb: the skb to set up 5776 * @recalculate: if true the pseudo-header checksum will be recalculated 5777 */ 5778 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5779 { 5780 int err; 5781 5782 switch (skb->protocol) { 5783 case htons(ETH_P_IP): 5784 err = skb_checksum_setup_ipv4(skb, recalculate); 5785 break; 5786 5787 case htons(ETH_P_IPV6): 5788 err = skb_checksum_setup_ipv6(skb, recalculate); 5789 break; 5790 5791 default: 5792 err = -EPROTO; 5793 break; 5794 } 5795 5796 return err; 5797 } 5798 EXPORT_SYMBOL(skb_checksum_setup); 5799 5800 /** 5801 * skb_checksum_maybe_trim - maybe trims the given skb 5802 * @skb: the skb to check 5803 * @transport_len: the data length beyond the network header 5804 * 5805 * Checks whether the given skb has data beyond the given transport length. 5806 * If so, returns a cloned skb trimmed to this transport length. 5807 * Otherwise returns the provided skb. Returns NULL in error cases 5808 * (e.g. transport_len exceeds skb length or out-of-memory). 5809 * 5810 * Caller needs to set the skb transport header and free any returned skb if it 5811 * differs from the provided skb. 5812 */ 5813 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5814 unsigned int transport_len) 5815 { 5816 struct sk_buff *skb_chk; 5817 unsigned int len = skb_transport_offset(skb) + transport_len; 5818 int ret; 5819 5820 if (skb->len < len) 5821 return NULL; 5822 else if (skb->len == len) 5823 return skb; 5824 5825 skb_chk = skb_clone(skb, GFP_ATOMIC); 5826 if (!skb_chk) 5827 return NULL; 5828 5829 ret = pskb_trim_rcsum(skb_chk, len); 5830 if (ret) { 5831 kfree_skb(skb_chk); 5832 return NULL; 5833 } 5834 5835 return skb_chk; 5836 } 5837 5838 /** 5839 * skb_checksum_trimmed - validate checksum of an skb 5840 * @skb: the skb to check 5841 * @transport_len: the data length beyond the network header 5842 * @skb_chkf: checksum function to use 5843 * 5844 * Applies the given checksum function skb_chkf to the provided skb. 5845 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5846 * 5847 * If the skb has data beyond the given transport length, then a 5848 * trimmed & cloned skb is checked and returned. 5849 * 5850 * Caller needs to set the skb transport header and free any returned skb if it 5851 * differs from the provided skb. 5852 */ 5853 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5854 unsigned int transport_len, 5855 __sum16(*skb_chkf)(struct sk_buff *skb)) 5856 { 5857 struct sk_buff *skb_chk; 5858 unsigned int offset = skb_transport_offset(skb); 5859 __sum16 ret; 5860 5861 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5862 if (!skb_chk) 5863 goto err; 5864 5865 if (!pskb_may_pull(skb_chk, offset)) 5866 goto err; 5867 5868 skb_pull_rcsum(skb_chk, offset); 5869 ret = skb_chkf(skb_chk); 5870 skb_push_rcsum(skb_chk, offset); 5871 5872 if (ret) 5873 goto err; 5874 5875 return skb_chk; 5876 5877 err: 5878 if (skb_chk && skb_chk != skb) 5879 kfree_skb(skb_chk); 5880 5881 return NULL; 5882 5883 } 5884 EXPORT_SYMBOL(skb_checksum_trimmed); 5885 5886 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5887 { 5888 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5889 skb->dev->name); 5890 } 5891 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5892 5893 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5894 { 5895 if (head_stolen) { 5896 skb_release_head_state(skb); 5897 kmem_cache_free(net_hotdata.skbuff_cache, skb); 5898 } else { 5899 __kfree_skb(skb); 5900 } 5901 } 5902 EXPORT_SYMBOL(kfree_skb_partial); 5903 5904 /** 5905 * skb_try_coalesce - try to merge skb to prior one 5906 * @to: prior buffer 5907 * @from: buffer to add 5908 * @fragstolen: pointer to boolean 5909 * @delta_truesize: how much more was allocated than was requested 5910 */ 5911 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5912 bool *fragstolen, int *delta_truesize) 5913 { 5914 struct skb_shared_info *to_shinfo, *from_shinfo; 5915 int i, delta, len = from->len; 5916 5917 *fragstolen = false; 5918 5919 if (skb_cloned(to)) 5920 return false; 5921 5922 /* In general, avoid mixing page_pool and non-page_pool allocated 5923 * pages within the same SKB. In theory we could take full 5924 * references if @from is cloned and !@to->pp_recycle but its 5925 * tricky (due to potential race with the clone disappearing) and 5926 * rare, so not worth dealing with. 5927 */ 5928 if (to->pp_recycle != from->pp_recycle) 5929 return false; 5930 5931 if (len <= skb_tailroom(to)) { 5932 if (len) 5933 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5934 *delta_truesize = 0; 5935 return true; 5936 } 5937 5938 to_shinfo = skb_shinfo(to); 5939 from_shinfo = skb_shinfo(from); 5940 if (to_shinfo->frag_list || from_shinfo->frag_list) 5941 return false; 5942 if (skb_zcopy(to) || skb_zcopy(from)) 5943 return false; 5944 5945 if (skb_headlen(from) != 0) { 5946 struct page *page; 5947 unsigned int offset; 5948 5949 if (to_shinfo->nr_frags + 5950 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5951 return false; 5952 5953 if (skb_head_is_locked(from)) 5954 return false; 5955 5956 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5957 5958 page = virt_to_head_page(from->head); 5959 offset = from->data - (unsigned char *)page_address(page); 5960 5961 skb_fill_page_desc(to, to_shinfo->nr_frags, 5962 page, offset, skb_headlen(from)); 5963 *fragstolen = true; 5964 } else { 5965 if (to_shinfo->nr_frags + 5966 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5967 return false; 5968 5969 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5970 } 5971 5972 WARN_ON_ONCE(delta < len); 5973 5974 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5975 from_shinfo->frags, 5976 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5977 to_shinfo->nr_frags += from_shinfo->nr_frags; 5978 5979 if (!skb_cloned(from)) 5980 from_shinfo->nr_frags = 0; 5981 5982 /* if the skb is not cloned this does nothing 5983 * since we set nr_frags to 0. 5984 */ 5985 if (skb_pp_frag_ref(from)) { 5986 for (i = 0; i < from_shinfo->nr_frags; i++) 5987 __skb_frag_ref(&from_shinfo->frags[i]); 5988 } 5989 5990 to->truesize += delta; 5991 to->len += len; 5992 to->data_len += len; 5993 5994 *delta_truesize = delta; 5995 return true; 5996 } 5997 EXPORT_SYMBOL(skb_try_coalesce); 5998 5999 /** 6000 * skb_scrub_packet - scrub an skb 6001 * 6002 * @skb: buffer to clean 6003 * @xnet: packet is crossing netns 6004 * 6005 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 6006 * into/from a tunnel. Some information have to be cleared during these 6007 * operations. 6008 * skb_scrub_packet can also be used to clean a skb before injecting it in 6009 * another namespace (@xnet == true). We have to clear all information in the 6010 * skb that could impact namespace isolation. 6011 */ 6012 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 6013 { 6014 skb->pkt_type = PACKET_HOST; 6015 skb->skb_iif = 0; 6016 skb->ignore_df = 0; 6017 skb_dst_drop(skb); 6018 skb_ext_reset(skb); 6019 nf_reset_ct(skb); 6020 nf_reset_trace(skb); 6021 6022 #ifdef CONFIG_NET_SWITCHDEV 6023 skb->offload_fwd_mark = 0; 6024 skb->offload_l3_fwd_mark = 0; 6025 #endif 6026 6027 if (!xnet) 6028 return; 6029 6030 ipvs_reset(skb); 6031 skb->mark = 0; 6032 skb_clear_tstamp(skb); 6033 } 6034 EXPORT_SYMBOL_GPL(skb_scrub_packet); 6035 6036 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 6037 { 6038 int mac_len, meta_len; 6039 void *meta; 6040 6041 if (skb_cow(skb, skb_headroom(skb)) < 0) { 6042 kfree_skb(skb); 6043 return NULL; 6044 } 6045 6046 mac_len = skb->data - skb_mac_header(skb); 6047 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 6048 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 6049 mac_len - VLAN_HLEN - ETH_TLEN); 6050 } 6051 6052 meta_len = skb_metadata_len(skb); 6053 if (meta_len) { 6054 meta = skb_metadata_end(skb) - meta_len; 6055 memmove(meta + VLAN_HLEN, meta, meta_len); 6056 } 6057 6058 skb->mac_header += VLAN_HLEN; 6059 return skb; 6060 } 6061 6062 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 6063 { 6064 struct vlan_hdr *vhdr; 6065 u16 vlan_tci; 6066 6067 if (unlikely(skb_vlan_tag_present(skb))) { 6068 /* vlan_tci is already set-up so leave this for another time */ 6069 return skb; 6070 } 6071 6072 skb = skb_share_check(skb, GFP_ATOMIC); 6073 if (unlikely(!skb)) 6074 goto err_free; 6075 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 6076 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 6077 goto err_free; 6078 6079 vhdr = (struct vlan_hdr *)skb->data; 6080 vlan_tci = ntohs(vhdr->h_vlan_TCI); 6081 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 6082 6083 skb_pull_rcsum(skb, VLAN_HLEN); 6084 vlan_set_encap_proto(skb, vhdr); 6085 6086 skb = skb_reorder_vlan_header(skb); 6087 if (unlikely(!skb)) 6088 goto err_free; 6089 6090 skb_reset_network_header(skb); 6091 if (!skb_transport_header_was_set(skb)) 6092 skb_reset_transport_header(skb); 6093 skb_reset_mac_len(skb); 6094 6095 return skb; 6096 6097 err_free: 6098 kfree_skb(skb); 6099 return NULL; 6100 } 6101 EXPORT_SYMBOL(skb_vlan_untag); 6102 6103 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 6104 { 6105 if (!pskb_may_pull(skb, write_len)) 6106 return -ENOMEM; 6107 6108 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 6109 return 0; 6110 6111 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6112 } 6113 EXPORT_SYMBOL(skb_ensure_writable); 6114 6115 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 6116 { 6117 int needed_headroom = dev->needed_headroom; 6118 int needed_tailroom = dev->needed_tailroom; 6119 6120 /* For tail taggers, we need to pad short frames ourselves, to ensure 6121 * that the tail tag does not fail at its role of being at the end of 6122 * the packet, once the conduit interface pads the frame. Account for 6123 * that pad length here, and pad later. 6124 */ 6125 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6126 needed_tailroom += ETH_ZLEN - skb->len; 6127 /* skb_headroom() returns unsigned int... */ 6128 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6129 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6130 6131 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6132 /* No reallocation needed, yay! */ 6133 return 0; 6134 6135 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6136 GFP_ATOMIC); 6137 } 6138 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6139 6140 /* remove VLAN header from packet and update csum accordingly. 6141 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6142 */ 6143 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6144 { 6145 int offset = skb->data - skb_mac_header(skb); 6146 int err; 6147 6148 if (WARN_ONCE(offset, 6149 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6150 offset)) { 6151 return -EINVAL; 6152 } 6153 6154 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6155 if (unlikely(err)) 6156 return err; 6157 6158 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6159 6160 vlan_remove_tag(skb, vlan_tci); 6161 6162 skb->mac_header += VLAN_HLEN; 6163 6164 if (skb_network_offset(skb) < ETH_HLEN) 6165 skb_set_network_header(skb, ETH_HLEN); 6166 6167 skb_reset_mac_len(skb); 6168 6169 return err; 6170 } 6171 EXPORT_SYMBOL(__skb_vlan_pop); 6172 6173 /* Pop a vlan tag either from hwaccel or from payload. 6174 * Expects skb->data at mac header. 6175 */ 6176 int skb_vlan_pop(struct sk_buff *skb) 6177 { 6178 u16 vlan_tci; 6179 __be16 vlan_proto; 6180 int err; 6181 6182 if (likely(skb_vlan_tag_present(skb))) { 6183 __vlan_hwaccel_clear_tag(skb); 6184 } else { 6185 if (unlikely(!eth_type_vlan(skb->protocol))) 6186 return 0; 6187 6188 err = __skb_vlan_pop(skb, &vlan_tci); 6189 if (err) 6190 return err; 6191 } 6192 /* move next vlan tag to hw accel tag */ 6193 if (likely(!eth_type_vlan(skb->protocol))) 6194 return 0; 6195 6196 vlan_proto = skb->protocol; 6197 err = __skb_vlan_pop(skb, &vlan_tci); 6198 if (unlikely(err)) 6199 return err; 6200 6201 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6202 return 0; 6203 } 6204 EXPORT_SYMBOL(skb_vlan_pop); 6205 6206 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6207 * Expects skb->data at mac header. 6208 */ 6209 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6210 { 6211 if (skb_vlan_tag_present(skb)) { 6212 int offset = skb->data - skb_mac_header(skb); 6213 int err; 6214 6215 if (WARN_ONCE(offset, 6216 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6217 offset)) { 6218 return -EINVAL; 6219 } 6220 6221 err = __vlan_insert_tag(skb, skb->vlan_proto, 6222 skb_vlan_tag_get(skb)); 6223 if (err) 6224 return err; 6225 6226 skb->protocol = skb->vlan_proto; 6227 skb->mac_len += VLAN_HLEN; 6228 6229 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6230 } 6231 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6232 return 0; 6233 } 6234 EXPORT_SYMBOL(skb_vlan_push); 6235 6236 /** 6237 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6238 * 6239 * @skb: Socket buffer to modify 6240 * 6241 * Drop the Ethernet header of @skb. 6242 * 6243 * Expects that skb->data points to the mac header and that no VLAN tags are 6244 * present. 6245 * 6246 * Returns 0 on success, -errno otherwise. 6247 */ 6248 int skb_eth_pop(struct sk_buff *skb) 6249 { 6250 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6251 skb_network_offset(skb) < ETH_HLEN) 6252 return -EPROTO; 6253 6254 skb_pull_rcsum(skb, ETH_HLEN); 6255 skb_reset_mac_header(skb); 6256 skb_reset_mac_len(skb); 6257 6258 return 0; 6259 } 6260 EXPORT_SYMBOL(skb_eth_pop); 6261 6262 /** 6263 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6264 * 6265 * @skb: Socket buffer to modify 6266 * @dst: Destination MAC address of the new header 6267 * @src: Source MAC address of the new header 6268 * 6269 * Prepend @skb with a new Ethernet header. 6270 * 6271 * Expects that skb->data points to the mac header, which must be empty. 6272 * 6273 * Returns 0 on success, -errno otherwise. 6274 */ 6275 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6276 const unsigned char *src) 6277 { 6278 struct ethhdr *eth; 6279 int err; 6280 6281 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6282 return -EPROTO; 6283 6284 err = skb_cow_head(skb, sizeof(*eth)); 6285 if (err < 0) 6286 return err; 6287 6288 skb_push(skb, sizeof(*eth)); 6289 skb_reset_mac_header(skb); 6290 skb_reset_mac_len(skb); 6291 6292 eth = eth_hdr(skb); 6293 ether_addr_copy(eth->h_dest, dst); 6294 ether_addr_copy(eth->h_source, src); 6295 eth->h_proto = skb->protocol; 6296 6297 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6298 6299 return 0; 6300 } 6301 EXPORT_SYMBOL(skb_eth_push); 6302 6303 /* Update the ethertype of hdr and the skb csum value if required. */ 6304 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6305 __be16 ethertype) 6306 { 6307 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6308 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6309 6310 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6311 } 6312 6313 hdr->h_proto = ethertype; 6314 } 6315 6316 /** 6317 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6318 * the packet 6319 * 6320 * @skb: buffer 6321 * @mpls_lse: MPLS label stack entry to push 6322 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6323 * @mac_len: length of the MAC header 6324 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6325 * ethernet 6326 * 6327 * Expects skb->data at mac header. 6328 * 6329 * Returns 0 on success, -errno otherwise. 6330 */ 6331 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6332 int mac_len, bool ethernet) 6333 { 6334 struct mpls_shim_hdr *lse; 6335 int err; 6336 6337 if (unlikely(!eth_p_mpls(mpls_proto))) 6338 return -EINVAL; 6339 6340 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6341 if (skb->encapsulation) 6342 return -EINVAL; 6343 6344 err = skb_cow_head(skb, MPLS_HLEN); 6345 if (unlikely(err)) 6346 return err; 6347 6348 if (!skb->inner_protocol) { 6349 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6350 skb_set_inner_protocol(skb, skb->protocol); 6351 } 6352 6353 skb_push(skb, MPLS_HLEN); 6354 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6355 mac_len); 6356 skb_reset_mac_header(skb); 6357 skb_set_network_header(skb, mac_len); 6358 skb_reset_mac_len(skb); 6359 6360 lse = mpls_hdr(skb); 6361 lse->label_stack_entry = mpls_lse; 6362 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6363 6364 if (ethernet && mac_len >= ETH_HLEN) 6365 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6366 skb->protocol = mpls_proto; 6367 6368 return 0; 6369 } 6370 EXPORT_SYMBOL_GPL(skb_mpls_push); 6371 6372 /** 6373 * skb_mpls_pop() - pop the outermost MPLS header 6374 * 6375 * @skb: buffer 6376 * @next_proto: ethertype of header after popped MPLS header 6377 * @mac_len: length of the MAC header 6378 * @ethernet: flag to indicate if the packet is ethernet 6379 * 6380 * Expects skb->data at mac header. 6381 * 6382 * Returns 0 on success, -errno otherwise. 6383 */ 6384 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6385 bool ethernet) 6386 { 6387 int err; 6388 6389 if (unlikely(!eth_p_mpls(skb->protocol))) 6390 return 0; 6391 6392 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6393 if (unlikely(err)) 6394 return err; 6395 6396 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6397 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6398 mac_len); 6399 6400 __skb_pull(skb, MPLS_HLEN); 6401 skb_reset_mac_header(skb); 6402 skb_set_network_header(skb, mac_len); 6403 6404 if (ethernet && mac_len >= ETH_HLEN) { 6405 struct ethhdr *hdr; 6406 6407 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6408 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6409 skb_mod_eth_type(skb, hdr, next_proto); 6410 } 6411 skb->protocol = next_proto; 6412 6413 return 0; 6414 } 6415 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6416 6417 /** 6418 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6419 * 6420 * @skb: buffer 6421 * @mpls_lse: new MPLS label stack entry to update to 6422 * 6423 * Expects skb->data at mac header. 6424 * 6425 * Returns 0 on success, -errno otherwise. 6426 */ 6427 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6428 { 6429 int err; 6430 6431 if (unlikely(!eth_p_mpls(skb->protocol))) 6432 return -EINVAL; 6433 6434 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6435 if (unlikely(err)) 6436 return err; 6437 6438 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6439 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6440 6441 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6442 } 6443 6444 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6445 6446 return 0; 6447 } 6448 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6449 6450 /** 6451 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6452 * 6453 * @skb: buffer 6454 * 6455 * Expects skb->data at mac header. 6456 * 6457 * Returns 0 on success, -errno otherwise. 6458 */ 6459 int skb_mpls_dec_ttl(struct sk_buff *skb) 6460 { 6461 u32 lse; 6462 u8 ttl; 6463 6464 if (unlikely(!eth_p_mpls(skb->protocol))) 6465 return -EINVAL; 6466 6467 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6468 return -ENOMEM; 6469 6470 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6471 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6472 if (!--ttl) 6473 return -EINVAL; 6474 6475 lse &= ~MPLS_LS_TTL_MASK; 6476 lse |= ttl << MPLS_LS_TTL_SHIFT; 6477 6478 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6479 } 6480 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6481 6482 /** 6483 * alloc_skb_with_frags - allocate skb with page frags 6484 * 6485 * @header_len: size of linear part 6486 * @data_len: needed length in frags 6487 * @order: max page order desired. 6488 * @errcode: pointer to error code if any 6489 * @gfp_mask: allocation mask 6490 * 6491 * This can be used to allocate a paged skb, given a maximal order for frags. 6492 */ 6493 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6494 unsigned long data_len, 6495 int order, 6496 int *errcode, 6497 gfp_t gfp_mask) 6498 { 6499 unsigned long chunk; 6500 struct sk_buff *skb; 6501 struct page *page; 6502 int nr_frags = 0; 6503 6504 *errcode = -EMSGSIZE; 6505 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6506 return NULL; 6507 6508 *errcode = -ENOBUFS; 6509 skb = alloc_skb(header_len, gfp_mask); 6510 if (!skb) 6511 return NULL; 6512 6513 while (data_len) { 6514 if (nr_frags == MAX_SKB_FRAGS - 1) 6515 goto failure; 6516 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6517 order--; 6518 6519 if (order) { 6520 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6521 __GFP_COMP | 6522 __GFP_NOWARN, 6523 order); 6524 if (!page) { 6525 order--; 6526 continue; 6527 } 6528 } else { 6529 page = alloc_page(gfp_mask); 6530 if (!page) 6531 goto failure; 6532 } 6533 chunk = min_t(unsigned long, data_len, 6534 PAGE_SIZE << order); 6535 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6536 nr_frags++; 6537 skb->truesize += (PAGE_SIZE << order); 6538 data_len -= chunk; 6539 } 6540 return skb; 6541 6542 failure: 6543 kfree_skb(skb); 6544 return NULL; 6545 } 6546 EXPORT_SYMBOL(alloc_skb_with_frags); 6547 6548 /* carve out the first off bytes from skb when off < headlen */ 6549 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6550 const int headlen, gfp_t gfp_mask) 6551 { 6552 int i; 6553 unsigned int size = skb_end_offset(skb); 6554 int new_hlen = headlen - off; 6555 u8 *data; 6556 6557 if (skb_pfmemalloc(skb)) 6558 gfp_mask |= __GFP_MEMALLOC; 6559 6560 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6561 if (!data) 6562 return -ENOMEM; 6563 size = SKB_WITH_OVERHEAD(size); 6564 6565 /* Copy real data, and all frags */ 6566 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6567 skb->len -= off; 6568 6569 memcpy((struct skb_shared_info *)(data + size), 6570 skb_shinfo(skb), 6571 offsetof(struct skb_shared_info, 6572 frags[skb_shinfo(skb)->nr_frags])); 6573 if (skb_cloned(skb)) { 6574 /* drop the old head gracefully */ 6575 if (skb_orphan_frags(skb, gfp_mask)) { 6576 skb_kfree_head(data, size); 6577 return -ENOMEM; 6578 } 6579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6580 skb_frag_ref(skb, i); 6581 if (skb_has_frag_list(skb)) 6582 skb_clone_fraglist(skb); 6583 skb_release_data(skb, SKB_CONSUMED); 6584 } else { 6585 /* we can reuse existing recount- all we did was 6586 * relocate values 6587 */ 6588 skb_free_head(skb); 6589 } 6590 6591 skb->head = data; 6592 skb->data = data; 6593 skb->head_frag = 0; 6594 skb_set_end_offset(skb, size); 6595 skb_set_tail_pointer(skb, skb_headlen(skb)); 6596 skb_headers_offset_update(skb, 0); 6597 skb->cloned = 0; 6598 skb->hdr_len = 0; 6599 skb->nohdr = 0; 6600 atomic_set(&skb_shinfo(skb)->dataref, 1); 6601 6602 return 0; 6603 } 6604 6605 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6606 6607 /* carve out the first eat bytes from skb's frag_list. May recurse into 6608 * pskb_carve() 6609 */ 6610 static int pskb_carve_frag_list(struct sk_buff *skb, 6611 struct skb_shared_info *shinfo, int eat, 6612 gfp_t gfp_mask) 6613 { 6614 struct sk_buff *list = shinfo->frag_list; 6615 struct sk_buff *clone = NULL; 6616 struct sk_buff *insp = NULL; 6617 6618 do { 6619 if (!list) { 6620 pr_err("Not enough bytes to eat. Want %d\n", eat); 6621 return -EFAULT; 6622 } 6623 if (list->len <= eat) { 6624 /* Eaten as whole. */ 6625 eat -= list->len; 6626 list = list->next; 6627 insp = list; 6628 } else { 6629 /* Eaten partially. */ 6630 if (skb_shared(list)) { 6631 clone = skb_clone(list, gfp_mask); 6632 if (!clone) 6633 return -ENOMEM; 6634 insp = list->next; 6635 list = clone; 6636 } else { 6637 /* This may be pulled without problems. */ 6638 insp = list; 6639 } 6640 if (pskb_carve(list, eat, gfp_mask) < 0) { 6641 kfree_skb(clone); 6642 return -ENOMEM; 6643 } 6644 break; 6645 } 6646 } while (eat); 6647 6648 /* Free pulled out fragments. */ 6649 while ((list = shinfo->frag_list) != insp) { 6650 shinfo->frag_list = list->next; 6651 consume_skb(list); 6652 } 6653 /* And insert new clone at head. */ 6654 if (clone) { 6655 clone->next = list; 6656 shinfo->frag_list = clone; 6657 } 6658 return 0; 6659 } 6660 6661 /* carve off first len bytes from skb. Split line (off) is in the 6662 * non-linear part of skb 6663 */ 6664 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6665 int pos, gfp_t gfp_mask) 6666 { 6667 int i, k = 0; 6668 unsigned int size = skb_end_offset(skb); 6669 u8 *data; 6670 const int nfrags = skb_shinfo(skb)->nr_frags; 6671 struct skb_shared_info *shinfo; 6672 6673 if (skb_pfmemalloc(skb)) 6674 gfp_mask |= __GFP_MEMALLOC; 6675 6676 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6677 if (!data) 6678 return -ENOMEM; 6679 size = SKB_WITH_OVERHEAD(size); 6680 6681 memcpy((struct skb_shared_info *)(data + size), 6682 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6683 if (skb_orphan_frags(skb, gfp_mask)) { 6684 skb_kfree_head(data, size); 6685 return -ENOMEM; 6686 } 6687 shinfo = (struct skb_shared_info *)(data + size); 6688 for (i = 0; i < nfrags; i++) { 6689 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6690 6691 if (pos + fsize > off) { 6692 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6693 6694 if (pos < off) { 6695 /* Split frag. 6696 * We have two variants in this case: 6697 * 1. Move all the frag to the second 6698 * part, if it is possible. F.e. 6699 * this approach is mandatory for TUX, 6700 * where splitting is expensive. 6701 * 2. Split is accurately. We make this. 6702 */ 6703 skb_frag_off_add(&shinfo->frags[0], off - pos); 6704 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6705 } 6706 skb_frag_ref(skb, i); 6707 k++; 6708 } 6709 pos += fsize; 6710 } 6711 shinfo->nr_frags = k; 6712 if (skb_has_frag_list(skb)) 6713 skb_clone_fraglist(skb); 6714 6715 /* split line is in frag list */ 6716 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6717 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6718 if (skb_has_frag_list(skb)) 6719 kfree_skb_list(skb_shinfo(skb)->frag_list); 6720 skb_kfree_head(data, size); 6721 return -ENOMEM; 6722 } 6723 skb_release_data(skb, SKB_CONSUMED); 6724 6725 skb->head = data; 6726 skb->head_frag = 0; 6727 skb->data = data; 6728 skb_set_end_offset(skb, size); 6729 skb_reset_tail_pointer(skb); 6730 skb_headers_offset_update(skb, 0); 6731 skb->cloned = 0; 6732 skb->hdr_len = 0; 6733 skb->nohdr = 0; 6734 skb->len -= off; 6735 skb->data_len = skb->len; 6736 atomic_set(&skb_shinfo(skb)->dataref, 1); 6737 return 0; 6738 } 6739 6740 /* remove len bytes from the beginning of the skb */ 6741 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6742 { 6743 int headlen = skb_headlen(skb); 6744 6745 if (len < headlen) 6746 return pskb_carve_inside_header(skb, len, headlen, gfp); 6747 else 6748 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6749 } 6750 6751 /* Extract to_copy bytes starting at off from skb, and return this in 6752 * a new skb 6753 */ 6754 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6755 int to_copy, gfp_t gfp) 6756 { 6757 struct sk_buff *clone = skb_clone(skb, gfp); 6758 6759 if (!clone) 6760 return NULL; 6761 6762 if (pskb_carve(clone, off, gfp) < 0 || 6763 pskb_trim(clone, to_copy)) { 6764 kfree_skb(clone); 6765 return NULL; 6766 } 6767 return clone; 6768 } 6769 EXPORT_SYMBOL(pskb_extract); 6770 6771 /** 6772 * skb_condense - try to get rid of fragments/frag_list if possible 6773 * @skb: buffer 6774 * 6775 * Can be used to save memory before skb is added to a busy queue. 6776 * If packet has bytes in frags and enough tail room in skb->head, 6777 * pull all of them, so that we can free the frags right now and adjust 6778 * truesize. 6779 * Notes: 6780 * We do not reallocate skb->head thus can not fail. 6781 * Caller must re-evaluate skb->truesize if needed. 6782 */ 6783 void skb_condense(struct sk_buff *skb) 6784 { 6785 if (skb->data_len) { 6786 if (skb->data_len > skb->end - skb->tail || 6787 skb_cloned(skb)) 6788 return; 6789 6790 /* Nice, we can free page frag(s) right now */ 6791 __pskb_pull_tail(skb, skb->data_len); 6792 } 6793 /* At this point, skb->truesize might be over estimated, 6794 * because skb had a fragment, and fragments do not tell 6795 * their truesize. 6796 * When we pulled its content into skb->head, fragment 6797 * was freed, but __pskb_pull_tail() could not possibly 6798 * adjust skb->truesize, not knowing the frag truesize. 6799 */ 6800 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6801 } 6802 EXPORT_SYMBOL(skb_condense); 6803 6804 #ifdef CONFIG_SKB_EXTENSIONS 6805 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6806 { 6807 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6808 } 6809 6810 /** 6811 * __skb_ext_alloc - allocate a new skb extensions storage 6812 * 6813 * @flags: See kmalloc(). 6814 * 6815 * Returns the newly allocated pointer. The pointer can later attached to a 6816 * skb via __skb_ext_set(). 6817 * Note: caller must handle the skb_ext as an opaque data. 6818 */ 6819 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6820 { 6821 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6822 6823 if (new) { 6824 memset(new->offset, 0, sizeof(new->offset)); 6825 refcount_set(&new->refcnt, 1); 6826 } 6827 6828 return new; 6829 } 6830 6831 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6832 unsigned int old_active) 6833 { 6834 struct skb_ext *new; 6835 6836 if (refcount_read(&old->refcnt) == 1) 6837 return old; 6838 6839 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6840 if (!new) 6841 return NULL; 6842 6843 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6844 refcount_set(&new->refcnt, 1); 6845 6846 #ifdef CONFIG_XFRM 6847 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6848 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6849 unsigned int i; 6850 6851 for (i = 0; i < sp->len; i++) 6852 xfrm_state_hold(sp->xvec[i]); 6853 } 6854 #endif 6855 #ifdef CONFIG_MCTP_FLOWS 6856 if (old_active & (1 << SKB_EXT_MCTP)) { 6857 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6858 6859 if (flow->key) 6860 refcount_inc(&flow->key->refs); 6861 } 6862 #endif 6863 __skb_ext_put(old); 6864 return new; 6865 } 6866 6867 /** 6868 * __skb_ext_set - attach the specified extension storage to this skb 6869 * @skb: buffer 6870 * @id: extension id 6871 * @ext: extension storage previously allocated via __skb_ext_alloc() 6872 * 6873 * Existing extensions, if any, are cleared. 6874 * 6875 * Returns the pointer to the extension. 6876 */ 6877 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6878 struct skb_ext *ext) 6879 { 6880 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6881 6882 skb_ext_put(skb); 6883 newlen = newoff + skb_ext_type_len[id]; 6884 ext->chunks = newlen; 6885 ext->offset[id] = newoff; 6886 skb->extensions = ext; 6887 skb->active_extensions = 1 << id; 6888 return skb_ext_get_ptr(ext, id); 6889 } 6890 6891 /** 6892 * skb_ext_add - allocate space for given extension, COW if needed 6893 * @skb: buffer 6894 * @id: extension to allocate space for 6895 * 6896 * Allocates enough space for the given extension. 6897 * If the extension is already present, a pointer to that extension 6898 * is returned. 6899 * 6900 * If the skb was cloned, COW applies and the returned memory can be 6901 * modified without changing the extension space of clones buffers. 6902 * 6903 * Returns pointer to the extension or NULL on allocation failure. 6904 */ 6905 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6906 { 6907 struct skb_ext *new, *old = NULL; 6908 unsigned int newlen, newoff; 6909 6910 if (skb->active_extensions) { 6911 old = skb->extensions; 6912 6913 new = skb_ext_maybe_cow(old, skb->active_extensions); 6914 if (!new) 6915 return NULL; 6916 6917 if (__skb_ext_exist(new, id)) 6918 goto set_active; 6919 6920 newoff = new->chunks; 6921 } else { 6922 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6923 6924 new = __skb_ext_alloc(GFP_ATOMIC); 6925 if (!new) 6926 return NULL; 6927 } 6928 6929 newlen = newoff + skb_ext_type_len[id]; 6930 new->chunks = newlen; 6931 new->offset[id] = newoff; 6932 set_active: 6933 skb->slow_gro = 1; 6934 skb->extensions = new; 6935 skb->active_extensions |= 1 << id; 6936 return skb_ext_get_ptr(new, id); 6937 } 6938 EXPORT_SYMBOL(skb_ext_add); 6939 6940 #ifdef CONFIG_XFRM 6941 static void skb_ext_put_sp(struct sec_path *sp) 6942 { 6943 unsigned int i; 6944 6945 for (i = 0; i < sp->len; i++) 6946 xfrm_state_put(sp->xvec[i]); 6947 } 6948 #endif 6949 6950 #ifdef CONFIG_MCTP_FLOWS 6951 static void skb_ext_put_mctp(struct mctp_flow *flow) 6952 { 6953 if (flow->key) 6954 mctp_key_unref(flow->key); 6955 } 6956 #endif 6957 6958 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6959 { 6960 struct skb_ext *ext = skb->extensions; 6961 6962 skb->active_extensions &= ~(1 << id); 6963 if (skb->active_extensions == 0) { 6964 skb->extensions = NULL; 6965 __skb_ext_put(ext); 6966 #ifdef CONFIG_XFRM 6967 } else if (id == SKB_EXT_SEC_PATH && 6968 refcount_read(&ext->refcnt) == 1) { 6969 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6970 6971 skb_ext_put_sp(sp); 6972 sp->len = 0; 6973 #endif 6974 } 6975 } 6976 EXPORT_SYMBOL(__skb_ext_del); 6977 6978 void __skb_ext_put(struct skb_ext *ext) 6979 { 6980 /* If this is last clone, nothing can increment 6981 * it after check passes. Avoids one atomic op. 6982 */ 6983 if (refcount_read(&ext->refcnt) == 1) 6984 goto free_now; 6985 6986 if (!refcount_dec_and_test(&ext->refcnt)) 6987 return; 6988 free_now: 6989 #ifdef CONFIG_XFRM 6990 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6991 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6992 #endif 6993 #ifdef CONFIG_MCTP_FLOWS 6994 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 6995 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 6996 #endif 6997 6998 kmem_cache_free(skbuff_ext_cache, ext); 6999 } 7000 EXPORT_SYMBOL(__skb_ext_put); 7001 #endif /* CONFIG_SKB_EXTENSIONS */ 7002 7003 static void kfree_skb_napi_cache(struct sk_buff *skb) 7004 { 7005 /* if SKB is a clone, don't handle this case */ 7006 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 7007 __kfree_skb(skb); 7008 return; 7009 } 7010 7011 local_bh_disable(); 7012 __napi_kfree_skb(skb, SKB_CONSUMED); 7013 local_bh_enable(); 7014 } 7015 7016 /** 7017 * skb_attempt_defer_free - queue skb for remote freeing 7018 * @skb: buffer 7019 * 7020 * Put @skb in a per-cpu list, using the cpu which 7021 * allocated the skb/pages to reduce false sharing 7022 * and memory zone spinlock contention. 7023 */ 7024 void skb_attempt_defer_free(struct sk_buff *skb) 7025 { 7026 int cpu = skb->alloc_cpu; 7027 struct softnet_data *sd; 7028 unsigned int defer_max; 7029 bool kick; 7030 7031 if (cpu == raw_smp_processor_id() || 7032 WARN_ON_ONCE(cpu >= nr_cpu_ids) || 7033 !cpu_online(cpu)) { 7034 nodefer: kfree_skb_napi_cache(skb); 7035 return; 7036 } 7037 7038 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 7039 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 7040 7041 sd = &per_cpu(softnet_data, cpu); 7042 defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max); 7043 if (READ_ONCE(sd->defer_count) >= defer_max) 7044 goto nodefer; 7045 7046 spin_lock_bh(&sd->defer_lock); 7047 /* Send an IPI every time queue reaches half capacity. */ 7048 kick = sd->defer_count == (defer_max >> 1); 7049 /* Paired with the READ_ONCE() few lines above */ 7050 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 7051 7052 skb->next = sd->defer_list; 7053 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 7054 WRITE_ONCE(sd->defer_list, skb); 7055 spin_unlock_bh(&sd->defer_lock); 7056 7057 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 7058 * if we are unlucky enough (this seems very unlikely). 7059 */ 7060 if (unlikely(kick)) 7061 kick_defer_list_purge(sd, cpu); 7062 } 7063 7064 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 7065 size_t offset, size_t len) 7066 { 7067 const char *kaddr; 7068 __wsum csum; 7069 7070 kaddr = kmap_local_page(page); 7071 csum = csum_partial(kaddr + offset, len, 0); 7072 kunmap_local(kaddr); 7073 skb->csum = csum_block_add(skb->csum, csum, skb->len); 7074 } 7075 7076 /** 7077 * skb_splice_from_iter - Splice (or copy) pages to skbuff 7078 * @skb: The buffer to add pages to 7079 * @iter: Iterator representing the pages to be added 7080 * @maxsize: Maximum amount of pages to be added 7081 * @gfp: Allocation flags 7082 * 7083 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 7084 * extracts pages from an iterator and adds them to the socket buffer if 7085 * possible, copying them to fragments if not possible (such as if they're slab 7086 * pages). 7087 * 7088 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 7089 * insufficient space in the buffer to transfer anything. 7090 */ 7091 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 7092 ssize_t maxsize, gfp_t gfp) 7093 { 7094 size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags); 7095 struct page *pages[8], **ppages = pages; 7096 ssize_t spliced = 0, ret = 0; 7097 unsigned int i; 7098 7099 while (iter->count > 0) { 7100 ssize_t space, nr, len; 7101 size_t off; 7102 7103 ret = -EMSGSIZE; 7104 space = frag_limit - skb_shinfo(skb)->nr_frags; 7105 if (space < 0) 7106 break; 7107 7108 /* We might be able to coalesce without increasing nr_frags */ 7109 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 7110 7111 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 7112 if (len <= 0) { 7113 ret = len ?: -EIO; 7114 break; 7115 } 7116 7117 i = 0; 7118 do { 7119 struct page *page = pages[i++]; 7120 size_t part = min_t(size_t, PAGE_SIZE - off, len); 7121 7122 ret = -EIO; 7123 if (WARN_ON_ONCE(!sendpage_ok(page))) 7124 goto out; 7125 7126 ret = skb_append_pagefrags(skb, page, off, part, 7127 frag_limit); 7128 if (ret < 0) { 7129 iov_iter_revert(iter, len); 7130 goto out; 7131 } 7132 7133 if (skb->ip_summed == CHECKSUM_NONE) 7134 skb_splice_csum_page(skb, page, off, part); 7135 7136 off = 0; 7137 spliced += part; 7138 maxsize -= part; 7139 len -= part; 7140 } while (len > 0); 7141 7142 if (maxsize <= 0) 7143 break; 7144 } 7145 7146 out: 7147 skb_len_add(skb, spliced); 7148 return spliced ?: ret; 7149 } 7150 EXPORT_SYMBOL(skb_splice_from_iter); 7151 7152 static __always_inline 7153 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7154 size_t len, void *to, void *priv2) 7155 { 7156 __wsum *csum = priv2; 7157 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7158 7159 *csum = csum_block_add(*csum, next, progress); 7160 return 0; 7161 } 7162 7163 static __always_inline 7164 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7165 size_t len, void *to, void *priv2) 7166 { 7167 __wsum next, *csum = priv2; 7168 7169 next = csum_and_copy_from_user(iter_from, to + progress, len); 7170 *csum = csum_block_add(*csum, next, progress); 7171 return next ? 0 : len; 7172 } 7173 7174 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7175 __wsum *csum, struct iov_iter *i) 7176 { 7177 size_t copied; 7178 7179 if (WARN_ON_ONCE(!i->data_source)) 7180 return false; 7181 copied = iterate_and_advance2(i, bytes, addr, csum, 7182 copy_from_user_iter_csum, 7183 memcpy_from_iter_csum); 7184 if (likely(copied == bytes)) 7185 return true; 7186 iov_iter_revert(i, copied); 7187 return false; 7188 } 7189 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7190