1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 #include <linux/bitfield.h> 62 #include <linux/if_vlan.h> 63 #include <linux/mpls.h> 64 #include <linux/kcov.h> 65 #include <linux/iov_iter.h> 66 67 #include <net/protocol.h> 68 #include <net/dst.h> 69 #include <net/sock.h> 70 #include <net/checksum.h> 71 #include <net/gso.h> 72 #include <net/hotdata.h> 73 #include <net/ip6_checksum.h> 74 #include <net/xfrm.h> 75 #include <net/mpls.h> 76 #include <net/mptcp.h> 77 #include <net/mctp.h> 78 #include <net/page_pool/helpers.h> 79 #include <net/dropreason.h> 80 81 #include <linux/uaccess.h> 82 #include <trace/events/skb.h> 83 #include <linux/highmem.h> 84 #include <linux/capability.h> 85 #include <linux/user_namespace.h> 86 #include <linux/indirect_call_wrapper.h> 87 #include <linux/textsearch.h> 88 89 #include "dev.h" 90 #include "sock_destructor.h" 91 92 #ifdef CONFIG_SKB_EXTENSIONS 93 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 94 #endif 95 96 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) 97 98 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 99 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 100 * size, and we can differentiate heads from skb_small_head_cache 101 * vs system slabs by looking at their size (skb_end_offset()). 102 */ 103 #define SKB_SMALL_HEAD_CACHE_SIZE \ 104 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 105 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 106 SKB_SMALL_HEAD_SIZE) 107 108 #define SKB_SMALL_HEAD_HEADROOM \ 109 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 110 111 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 112 EXPORT_SYMBOL(sysctl_max_skb_frags); 113 114 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use 115 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the 116 * netmem is a page. 117 */ 118 static_assert(offsetof(struct bio_vec, bv_page) == 119 offsetof(skb_frag_t, netmem)); 120 static_assert(sizeof_field(struct bio_vec, bv_page) == 121 sizeof_field(skb_frag_t, netmem)); 122 123 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); 124 static_assert(sizeof_field(struct bio_vec, bv_len) == 125 sizeof_field(skb_frag_t, len)); 126 127 static_assert(offsetof(struct bio_vec, bv_offset) == 128 offsetof(skb_frag_t, offset)); 129 static_assert(sizeof_field(struct bio_vec, bv_offset) == 130 sizeof_field(skb_frag_t, offset)); 131 132 #undef FN 133 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 134 static const char * const drop_reasons[] = { 135 [SKB_CONSUMED] = "CONSUMED", 136 DEFINE_DROP_REASON(FN, FN) 137 }; 138 139 static const struct drop_reason_list drop_reasons_core = { 140 .reasons = drop_reasons, 141 .n_reasons = ARRAY_SIZE(drop_reasons), 142 }; 143 144 const struct drop_reason_list __rcu * 145 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 146 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 147 }; 148 EXPORT_SYMBOL(drop_reasons_by_subsys); 149 150 /** 151 * drop_reasons_register_subsys - register another drop reason subsystem 152 * @subsys: the subsystem to register, must not be the core 153 * @list: the list of drop reasons within the subsystem, must point to 154 * a statically initialized list 155 */ 156 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 157 const struct drop_reason_list *list) 158 { 159 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 160 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 161 "invalid subsystem %d\n", subsys)) 162 return; 163 164 /* must point to statically allocated memory, so INIT is OK */ 165 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 166 } 167 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 168 169 /** 170 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 171 * @subsys: the subsystem to remove, must not be the core 172 * 173 * Note: This will synchronize_rcu() to ensure no users when it returns. 174 */ 175 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 176 { 177 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 178 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 179 "invalid subsystem %d\n", subsys)) 180 return; 181 182 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 183 184 synchronize_rcu(); 185 } 186 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 187 188 /** 189 * skb_panic - private function for out-of-line support 190 * @skb: buffer 191 * @sz: size 192 * @addr: address 193 * @msg: skb_over_panic or skb_under_panic 194 * 195 * Out-of-line support for skb_put() and skb_push(). 196 * Called via the wrapper skb_over_panic() or skb_under_panic(). 197 * Keep out of line to prevent kernel bloat. 198 * __builtin_return_address is not used because it is not always reliable. 199 */ 200 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 201 const char msg[]) 202 { 203 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 204 msg, addr, skb->len, sz, skb->head, skb->data, 205 (unsigned long)skb->tail, (unsigned long)skb->end, 206 skb->dev ? skb->dev->name : "<NULL>"); 207 BUG(); 208 } 209 210 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 211 { 212 skb_panic(skb, sz, addr, __func__); 213 } 214 215 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 216 { 217 skb_panic(skb, sz, addr, __func__); 218 } 219 220 #define NAPI_SKB_CACHE_SIZE 64 221 #define NAPI_SKB_CACHE_BULK 16 222 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 223 224 #if PAGE_SIZE == SZ_4K 225 226 #define NAPI_HAS_SMALL_PAGE_FRAG 1 227 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) 228 229 /* specialized page frag allocator using a single order 0 page 230 * and slicing it into 1K sized fragment. Constrained to systems 231 * with a very limited amount of 1K fragments fitting a single 232 * page - to avoid excessive truesize underestimation 233 */ 234 235 struct page_frag_1k { 236 void *va; 237 u16 offset; 238 bool pfmemalloc; 239 }; 240 241 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) 242 { 243 struct page *page; 244 int offset; 245 246 offset = nc->offset - SZ_1K; 247 if (likely(offset >= 0)) 248 goto use_frag; 249 250 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 251 if (!page) 252 return NULL; 253 254 nc->va = page_address(page); 255 nc->pfmemalloc = page_is_pfmemalloc(page); 256 offset = PAGE_SIZE - SZ_1K; 257 page_ref_add(page, offset / SZ_1K); 258 259 use_frag: 260 nc->offset = offset; 261 return nc->va + offset; 262 } 263 #else 264 265 /* the small page is actually unused in this build; add dummy helpers 266 * to please the compiler and avoid later preprocessor's conditionals 267 */ 268 #define NAPI_HAS_SMALL_PAGE_FRAG 0 269 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false 270 271 struct page_frag_1k { 272 }; 273 274 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) 275 { 276 return NULL; 277 } 278 279 #endif 280 281 struct napi_alloc_cache { 282 struct page_frag_cache page; 283 struct page_frag_1k page_small; 284 unsigned int skb_count; 285 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 286 }; 287 288 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 289 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 290 291 /* Double check that napi_get_frags() allocates skbs with 292 * skb->head being backed by slab, not a page fragment. 293 * This is to make sure bug fixed in 3226b158e67c 294 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 295 * does not accidentally come back. 296 */ 297 void napi_get_frags_check(struct napi_struct *napi) 298 { 299 struct sk_buff *skb; 300 301 local_bh_disable(); 302 skb = napi_get_frags(napi); 303 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); 304 napi_free_frags(napi); 305 local_bh_enable(); 306 } 307 308 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 309 { 310 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 311 312 fragsz = SKB_DATA_ALIGN(fragsz); 313 314 return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 315 align_mask); 316 } 317 EXPORT_SYMBOL(__napi_alloc_frag_align); 318 319 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 320 { 321 void *data; 322 323 fragsz = SKB_DATA_ALIGN(fragsz); 324 if (in_hardirq() || irqs_disabled()) { 325 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 326 327 data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, 328 align_mask); 329 } else { 330 struct napi_alloc_cache *nc; 331 332 local_bh_disable(); 333 nc = this_cpu_ptr(&napi_alloc_cache); 334 data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 335 align_mask); 336 local_bh_enable(); 337 } 338 return data; 339 } 340 EXPORT_SYMBOL(__netdev_alloc_frag_align); 341 342 static struct sk_buff *napi_skb_cache_get(void) 343 { 344 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 345 struct sk_buff *skb; 346 347 if (unlikely(!nc->skb_count)) { 348 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 349 GFP_ATOMIC, 350 NAPI_SKB_CACHE_BULK, 351 nc->skb_cache); 352 if (unlikely(!nc->skb_count)) 353 return NULL; 354 } 355 356 skb = nc->skb_cache[--nc->skb_count]; 357 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); 358 359 return skb; 360 } 361 362 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 363 unsigned int size) 364 { 365 struct skb_shared_info *shinfo; 366 367 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 368 369 /* Assumes caller memset cleared SKB */ 370 skb->truesize = SKB_TRUESIZE(size); 371 refcount_set(&skb->users, 1); 372 skb->head = data; 373 skb->data = data; 374 skb_reset_tail_pointer(skb); 375 skb_set_end_offset(skb, size); 376 skb->mac_header = (typeof(skb->mac_header))~0U; 377 skb->transport_header = (typeof(skb->transport_header))~0U; 378 skb->alloc_cpu = raw_smp_processor_id(); 379 /* make sure we initialize shinfo sequentially */ 380 shinfo = skb_shinfo(skb); 381 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 382 atomic_set(&shinfo->dataref, 1); 383 384 skb_set_kcov_handle(skb, kcov_common_handle()); 385 } 386 387 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 388 unsigned int *size) 389 { 390 void *resized; 391 392 /* Must find the allocation size (and grow it to match). */ 393 *size = ksize(data); 394 /* krealloc() will immediately return "data" when 395 * "ksize(data)" is requested: it is the existing upper 396 * bounds. As a result, GFP_ATOMIC will be ignored. Note 397 * that this "new" pointer needs to be passed back to the 398 * caller for use so the __alloc_size hinting will be 399 * tracked correctly. 400 */ 401 resized = krealloc(data, *size, GFP_ATOMIC); 402 WARN_ON_ONCE(resized != data); 403 return resized; 404 } 405 406 /* build_skb() variant which can operate on slab buffers. 407 * Note that this should be used sparingly as slab buffers 408 * cannot be combined efficiently by GRO! 409 */ 410 struct sk_buff *slab_build_skb(void *data) 411 { 412 struct sk_buff *skb; 413 unsigned int size; 414 415 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 416 if (unlikely(!skb)) 417 return NULL; 418 419 memset(skb, 0, offsetof(struct sk_buff, tail)); 420 data = __slab_build_skb(skb, data, &size); 421 __finalize_skb_around(skb, data, size); 422 423 return skb; 424 } 425 EXPORT_SYMBOL(slab_build_skb); 426 427 /* Caller must provide SKB that is memset cleared */ 428 static void __build_skb_around(struct sk_buff *skb, void *data, 429 unsigned int frag_size) 430 { 431 unsigned int size = frag_size; 432 433 /* frag_size == 0 is considered deprecated now. Callers 434 * using slab buffer should use slab_build_skb() instead. 435 */ 436 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 437 data = __slab_build_skb(skb, data, &size); 438 439 __finalize_skb_around(skb, data, size); 440 } 441 442 /** 443 * __build_skb - build a network buffer 444 * @data: data buffer provided by caller 445 * @frag_size: size of data (must not be 0) 446 * 447 * Allocate a new &sk_buff. Caller provides space holding head and 448 * skb_shared_info. @data must have been allocated from the page 449 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 450 * allocation is deprecated, and callers should use slab_build_skb() 451 * instead.) 452 * The return is the new skb buffer. 453 * On a failure the return is %NULL, and @data is not freed. 454 * Notes : 455 * Before IO, driver allocates only data buffer where NIC put incoming frame 456 * Driver should add room at head (NET_SKB_PAD) and 457 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 458 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 459 * before giving packet to stack. 460 * RX rings only contains data buffers, not full skbs. 461 */ 462 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 463 { 464 struct sk_buff *skb; 465 466 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 467 if (unlikely(!skb)) 468 return NULL; 469 470 memset(skb, 0, offsetof(struct sk_buff, tail)); 471 __build_skb_around(skb, data, frag_size); 472 473 return skb; 474 } 475 476 /* build_skb() is wrapper over __build_skb(), that specifically 477 * takes care of skb->head and skb->pfmemalloc 478 */ 479 struct sk_buff *build_skb(void *data, unsigned int frag_size) 480 { 481 struct sk_buff *skb = __build_skb(data, frag_size); 482 483 if (likely(skb && frag_size)) { 484 skb->head_frag = 1; 485 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 486 } 487 return skb; 488 } 489 EXPORT_SYMBOL(build_skb); 490 491 /** 492 * build_skb_around - build a network buffer around provided skb 493 * @skb: sk_buff provide by caller, must be memset cleared 494 * @data: data buffer provided by caller 495 * @frag_size: size of data 496 */ 497 struct sk_buff *build_skb_around(struct sk_buff *skb, 498 void *data, unsigned int frag_size) 499 { 500 if (unlikely(!skb)) 501 return NULL; 502 503 __build_skb_around(skb, data, frag_size); 504 505 if (frag_size) { 506 skb->head_frag = 1; 507 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 508 } 509 return skb; 510 } 511 EXPORT_SYMBOL(build_skb_around); 512 513 /** 514 * __napi_build_skb - build a network buffer 515 * @data: data buffer provided by caller 516 * @frag_size: size of data 517 * 518 * Version of __build_skb() that uses NAPI percpu caches to obtain 519 * skbuff_head instead of inplace allocation. 520 * 521 * Returns a new &sk_buff on success, %NULL on allocation failure. 522 */ 523 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 524 { 525 struct sk_buff *skb; 526 527 skb = napi_skb_cache_get(); 528 if (unlikely(!skb)) 529 return NULL; 530 531 memset(skb, 0, offsetof(struct sk_buff, tail)); 532 __build_skb_around(skb, data, frag_size); 533 534 return skb; 535 } 536 537 /** 538 * napi_build_skb - build a network buffer 539 * @data: data buffer provided by caller 540 * @frag_size: size of data 541 * 542 * Version of __napi_build_skb() that takes care of skb->head_frag 543 * and skb->pfmemalloc when the data is a page or page fragment. 544 * 545 * Returns a new &sk_buff on success, %NULL on allocation failure. 546 */ 547 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 548 { 549 struct sk_buff *skb = __napi_build_skb(data, frag_size); 550 551 if (likely(skb) && frag_size) { 552 skb->head_frag = 1; 553 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 554 } 555 556 return skb; 557 } 558 EXPORT_SYMBOL(napi_build_skb); 559 560 /* 561 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 562 * the caller if emergency pfmemalloc reserves are being used. If it is and 563 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 564 * may be used. Otherwise, the packet data may be discarded until enough 565 * memory is free 566 */ 567 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 568 bool *pfmemalloc) 569 { 570 bool ret_pfmemalloc = false; 571 size_t obj_size; 572 void *obj; 573 574 obj_size = SKB_HEAD_ALIGN(*size); 575 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 576 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 577 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, 578 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 579 node); 580 *size = SKB_SMALL_HEAD_CACHE_SIZE; 581 if (obj || !(gfp_pfmemalloc_allowed(flags))) 582 goto out; 583 /* Try again but now we are using pfmemalloc reserves */ 584 ret_pfmemalloc = true; 585 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); 586 goto out; 587 } 588 589 obj_size = kmalloc_size_roundup(obj_size); 590 /* The following cast might truncate high-order bits of obj_size, this 591 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 592 */ 593 *size = (unsigned int)obj_size; 594 595 /* 596 * Try a regular allocation, when that fails and we're not entitled 597 * to the reserves, fail. 598 */ 599 obj = kmalloc_node_track_caller(obj_size, 600 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 601 node); 602 if (obj || !(gfp_pfmemalloc_allowed(flags))) 603 goto out; 604 605 /* Try again but now we are using pfmemalloc reserves */ 606 ret_pfmemalloc = true; 607 obj = kmalloc_node_track_caller(obj_size, flags, node); 608 609 out: 610 if (pfmemalloc) 611 *pfmemalloc = ret_pfmemalloc; 612 613 return obj; 614 } 615 616 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 617 * 'private' fields and also do memory statistics to find all the 618 * [BEEP] leaks. 619 * 620 */ 621 622 /** 623 * __alloc_skb - allocate a network buffer 624 * @size: size to allocate 625 * @gfp_mask: allocation mask 626 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 627 * instead of head cache and allocate a cloned (child) skb. 628 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 629 * allocations in case the data is required for writeback 630 * @node: numa node to allocate memory on 631 * 632 * Allocate a new &sk_buff. The returned buffer has no headroom and a 633 * tail room of at least size bytes. The object has a reference count 634 * of one. The return is the buffer. On a failure the return is %NULL. 635 * 636 * Buffers may only be allocated from interrupts using a @gfp_mask of 637 * %GFP_ATOMIC. 638 */ 639 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 640 int flags, int node) 641 { 642 struct kmem_cache *cache; 643 struct sk_buff *skb; 644 bool pfmemalloc; 645 u8 *data; 646 647 cache = (flags & SKB_ALLOC_FCLONE) 648 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; 649 650 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 651 gfp_mask |= __GFP_MEMALLOC; 652 653 /* Get the HEAD */ 654 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 655 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 656 skb = napi_skb_cache_get(); 657 else 658 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 659 if (unlikely(!skb)) 660 return NULL; 661 prefetchw(skb); 662 663 /* We do our best to align skb_shared_info on a separate cache 664 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 665 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 666 * Both skb->head and skb_shared_info are cache line aligned. 667 */ 668 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 669 if (unlikely(!data)) 670 goto nodata; 671 /* kmalloc_size_roundup() might give us more room than requested. 672 * Put skb_shared_info exactly at the end of allocated zone, 673 * to allow max possible filling before reallocation. 674 */ 675 prefetchw(data + SKB_WITH_OVERHEAD(size)); 676 677 /* 678 * Only clear those fields we need to clear, not those that we will 679 * actually initialise below. Hence, don't put any more fields after 680 * the tail pointer in struct sk_buff! 681 */ 682 memset(skb, 0, offsetof(struct sk_buff, tail)); 683 __build_skb_around(skb, data, size); 684 skb->pfmemalloc = pfmemalloc; 685 686 if (flags & SKB_ALLOC_FCLONE) { 687 struct sk_buff_fclones *fclones; 688 689 fclones = container_of(skb, struct sk_buff_fclones, skb1); 690 691 skb->fclone = SKB_FCLONE_ORIG; 692 refcount_set(&fclones->fclone_ref, 1); 693 } 694 695 return skb; 696 697 nodata: 698 kmem_cache_free(cache, skb); 699 return NULL; 700 } 701 EXPORT_SYMBOL(__alloc_skb); 702 703 /** 704 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 705 * @dev: network device to receive on 706 * @len: length to allocate 707 * @gfp_mask: get_free_pages mask, passed to alloc_skb 708 * 709 * Allocate a new &sk_buff and assign it a usage count of one. The 710 * buffer has NET_SKB_PAD headroom built in. Users should allocate 711 * the headroom they think they need without accounting for the 712 * built in space. The built in space is used for optimisations. 713 * 714 * %NULL is returned if there is no free memory. 715 */ 716 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 717 gfp_t gfp_mask) 718 { 719 struct page_frag_cache *nc; 720 struct sk_buff *skb; 721 bool pfmemalloc; 722 void *data; 723 724 len += NET_SKB_PAD; 725 726 /* If requested length is either too small or too big, 727 * we use kmalloc() for skb->head allocation. 728 */ 729 if (len <= SKB_WITH_OVERHEAD(1024) || 730 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 731 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 732 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 733 if (!skb) 734 goto skb_fail; 735 goto skb_success; 736 } 737 738 len = SKB_HEAD_ALIGN(len); 739 740 if (sk_memalloc_socks()) 741 gfp_mask |= __GFP_MEMALLOC; 742 743 if (in_hardirq() || irqs_disabled()) { 744 nc = this_cpu_ptr(&netdev_alloc_cache); 745 data = page_frag_alloc(nc, len, gfp_mask); 746 pfmemalloc = nc->pfmemalloc; 747 } else { 748 local_bh_disable(); 749 nc = this_cpu_ptr(&napi_alloc_cache.page); 750 data = page_frag_alloc(nc, len, gfp_mask); 751 pfmemalloc = nc->pfmemalloc; 752 local_bh_enable(); 753 } 754 755 if (unlikely(!data)) 756 return NULL; 757 758 skb = __build_skb(data, len); 759 if (unlikely(!skb)) { 760 skb_free_frag(data); 761 return NULL; 762 } 763 764 if (pfmemalloc) 765 skb->pfmemalloc = 1; 766 skb->head_frag = 1; 767 768 skb_success: 769 skb_reserve(skb, NET_SKB_PAD); 770 skb->dev = dev; 771 772 skb_fail: 773 return skb; 774 } 775 EXPORT_SYMBOL(__netdev_alloc_skb); 776 777 /** 778 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 779 * @napi: napi instance this buffer was allocated for 780 * @len: length to allocate 781 * 782 * Allocate a new sk_buff for use in NAPI receive. This buffer will 783 * attempt to allocate the head from a special reserved region used 784 * only for NAPI Rx allocation. By doing this we can save several 785 * CPU cycles by avoiding having to disable and re-enable IRQs. 786 * 787 * %NULL is returned if there is no free memory. 788 */ 789 struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) 790 { 791 gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; 792 struct napi_alloc_cache *nc; 793 struct sk_buff *skb; 794 bool pfmemalloc; 795 void *data; 796 797 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 798 len += NET_SKB_PAD + NET_IP_ALIGN; 799 800 /* If requested length is either too small or too big, 801 * we use kmalloc() for skb->head allocation. 802 * When the small frag allocator is available, prefer it over kmalloc 803 * for small fragments 804 */ 805 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || 806 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 807 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 808 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 809 NUMA_NO_NODE); 810 if (!skb) 811 goto skb_fail; 812 goto skb_success; 813 } 814 815 nc = this_cpu_ptr(&napi_alloc_cache); 816 817 if (sk_memalloc_socks()) 818 gfp_mask |= __GFP_MEMALLOC; 819 820 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { 821 /* we are artificially inflating the allocation size, but 822 * that is not as bad as it may look like, as: 823 * - 'len' less than GRO_MAX_HEAD makes little sense 824 * - On most systems, larger 'len' values lead to fragment 825 * size above 512 bytes 826 * - kmalloc would use the kmalloc-1k slab for such values 827 * - Builds with smaller GRO_MAX_HEAD will very likely do 828 * little networking, as that implies no WiFi and no 829 * tunnels support, and 32 bits arches. 830 */ 831 len = SZ_1K; 832 833 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); 834 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); 835 } else { 836 len = SKB_HEAD_ALIGN(len); 837 838 data = page_frag_alloc(&nc->page, len, gfp_mask); 839 pfmemalloc = nc->page.pfmemalloc; 840 } 841 842 if (unlikely(!data)) 843 return NULL; 844 845 skb = __napi_build_skb(data, len); 846 if (unlikely(!skb)) { 847 skb_free_frag(data); 848 return NULL; 849 } 850 851 if (pfmemalloc) 852 skb->pfmemalloc = 1; 853 skb->head_frag = 1; 854 855 skb_success: 856 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 857 skb->dev = napi->dev; 858 859 skb_fail: 860 return skb; 861 } 862 EXPORT_SYMBOL(napi_alloc_skb); 863 864 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, 865 int off, int size, unsigned int truesize) 866 { 867 DEBUG_NET_WARN_ON_ONCE(size > truesize); 868 869 skb_fill_netmem_desc(skb, i, netmem, off, size); 870 skb->len += size; 871 skb->data_len += size; 872 skb->truesize += truesize; 873 } 874 EXPORT_SYMBOL(skb_add_rx_frag_netmem); 875 876 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 877 unsigned int truesize) 878 { 879 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 880 881 DEBUG_NET_WARN_ON_ONCE(size > truesize); 882 883 skb_frag_size_add(frag, size); 884 skb->len += size; 885 skb->data_len += size; 886 skb->truesize += truesize; 887 } 888 EXPORT_SYMBOL(skb_coalesce_rx_frag); 889 890 static void skb_drop_list(struct sk_buff **listp) 891 { 892 kfree_skb_list(*listp); 893 *listp = NULL; 894 } 895 896 static inline void skb_drop_fraglist(struct sk_buff *skb) 897 { 898 skb_drop_list(&skb_shinfo(skb)->frag_list); 899 } 900 901 static void skb_clone_fraglist(struct sk_buff *skb) 902 { 903 struct sk_buff *list; 904 905 skb_walk_frags(skb, list) 906 skb_get(list); 907 } 908 909 static bool is_pp_page(struct page *page) 910 { 911 return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; 912 } 913 914 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 915 unsigned int headroom) 916 { 917 #if IS_ENABLED(CONFIG_PAGE_POOL) 918 u32 size, truesize, len, max_head_size, off; 919 struct sk_buff *skb = *pskb, *nskb; 920 int err, i, head_off; 921 void *data; 922 923 /* XDP does not support fraglist so we need to linearize 924 * the skb. 925 */ 926 if (skb_has_frag_list(skb)) 927 return -EOPNOTSUPP; 928 929 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); 930 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) 931 return -ENOMEM; 932 933 size = min_t(u32, skb->len, max_head_size); 934 truesize = SKB_HEAD_ALIGN(size) + headroom; 935 data = page_pool_dev_alloc_va(pool, &truesize); 936 if (!data) 937 return -ENOMEM; 938 939 nskb = napi_build_skb(data, truesize); 940 if (!nskb) { 941 page_pool_free_va(pool, data, true); 942 return -ENOMEM; 943 } 944 945 skb_reserve(nskb, headroom); 946 skb_copy_header(nskb, skb); 947 skb_mark_for_recycle(nskb); 948 949 err = skb_copy_bits(skb, 0, nskb->data, size); 950 if (err) { 951 consume_skb(nskb); 952 return err; 953 } 954 skb_put(nskb, size); 955 956 head_off = skb_headroom(nskb) - skb_headroom(skb); 957 skb_headers_offset_update(nskb, head_off); 958 959 off = size; 960 len = skb->len - off; 961 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { 962 struct page *page; 963 u32 page_off; 964 965 size = min_t(u32, len, PAGE_SIZE); 966 truesize = size; 967 968 page = page_pool_dev_alloc(pool, &page_off, &truesize); 969 if (!page) { 970 consume_skb(nskb); 971 return -ENOMEM; 972 } 973 974 skb_add_rx_frag(nskb, i, page, page_off, size, truesize); 975 err = skb_copy_bits(skb, off, page_address(page) + page_off, 976 size); 977 if (err) { 978 consume_skb(nskb); 979 return err; 980 } 981 982 len -= size; 983 off += size; 984 } 985 986 consume_skb(skb); 987 *pskb = nskb; 988 989 return 0; 990 #else 991 return -EOPNOTSUPP; 992 #endif 993 } 994 EXPORT_SYMBOL(skb_pp_cow_data); 995 996 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, 997 struct bpf_prog *prog) 998 { 999 if (!prog->aux->xdp_has_frags) 1000 return -EINVAL; 1001 1002 return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); 1003 } 1004 EXPORT_SYMBOL(skb_cow_data_for_xdp); 1005 1006 #if IS_ENABLED(CONFIG_PAGE_POOL) 1007 bool napi_pp_put_page(struct page *page) 1008 { 1009 page = compound_head(page); 1010 1011 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 1012 * in order to preserve any existing bits, such as bit 0 for the 1013 * head page of compound page and bit 1 for pfmemalloc page, so 1014 * mask those bits for freeing side when doing below checking, 1015 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 1016 * to avoid recycling the pfmemalloc page. 1017 */ 1018 if (unlikely(!is_pp_page(page))) 1019 return false; 1020 1021 page_pool_put_full_page(page->pp, page, false); 1022 1023 return true; 1024 } 1025 EXPORT_SYMBOL(napi_pp_put_page); 1026 #endif 1027 1028 static bool skb_pp_recycle(struct sk_buff *skb, void *data) 1029 { 1030 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 1031 return false; 1032 return napi_pp_put_page(virt_to_page(data)); 1033 } 1034 1035 /** 1036 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb 1037 * @skb: page pool aware skb 1038 * 1039 * Increase the fragment reference count (pp_ref_count) of a skb. This is 1040 * intended to gain fragment references only for page pool aware skbs, 1041 * i.e. when skb->pp_recycle is true, and not for fragments in a 1042 * non-pp-recycling skb. It has a fallback to increase references on normal 1043 * pages, as page pool aware skbs may also have normal page fragments. 1044 */ 1045 static int skb_pp_frag_ref(struct sk_buff *skb) 1046 { 1047 struct skb_shared_info *shinfo; 1048 struct page *head_page; 1049 int i; 1050 1051 if (!skb->pp_recycle) 1052 return -EINVAL; 1053 1054 shinfo = skb_shinfo(skb); 1055 1056 for (i = 0; i < shinfo->nr_frags; i++) { 1057 head_page = compound_head(skb_frag_page(&shinfo->frags[i])); 1058 if (likely(is_pp_page(head_page))) 1059 page_pool_ref_page(head_page); 1060 else 1061 page_ref_inc(head_page); 1062 } 1063 return 0; 1064 } 1065 1066 static void skb_kfree_head(void *head, unsigned int end_offset) 1067 { 1068 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1069 kmem_cache_free(net_hotdata.skb_small_head_cache, head); 1070 else 1071 kfree(head); 1072 } 1073 1074 static void skb_free_head(struct sk_buff *skb) 1075 { 1076 unsigned char *head = skb->head; 1077 1078 if (skb->head_frag) { 1079 if (skb_pp_recycle(skb, head)) 1080 return; 1081 skb_free_frag(head); 1082 } else { 1083 skb_kfree_head(head, skb_end_offset(skb)); 1084 } 1085 } 1086 1087 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) 1088 { 1089 struct skb_shared_info *shinfo = skb_shinfo(skb); 1090 int i; 1091 1092 if (!skb_data_unref(skb, shinfo)) 1093 goto exit; 1094 1095 if (skb_zcopy(skb)) { 1096 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1097 1098 skb_zcopy_clear(skb, true); 1099 if (skip_unref) 1100 goto free_head; 1101 } 1102 1103 for (i = 0; i < shinfo->nr_frags; i++) 1104 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle); 1105 1106 free_head: 1107 if (shinfo->frag_list) 1108 kfree_skb_list_reason(shinfo->frag_list, reason); 1109 1110 skb_free_head(skb); 1111 exit: 1112 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1113 * bit is only set on the head though, so in order to avoid races 1114 * while trying to recycle fragments on __skb_frag_unref() we need 1115 * to make one SKB responsible for triggering the recycle path. 1116 * So disable the recycling bit if an SKB is cloned and we have 1117 * additional references to the fragmented part of the SKB. 1118 * Eventually the last SKB will have the recycling bit set and it's 1119 * dataref set to 0, which will trigger the recycling 1120 */ 1121 skb->pp_recycle = 0; 1122 } 1123 1124 /* 1125 * Free an skbuff by memory without cleaning the state. 1126 */ 1127 static void kfree_skbmem(struct sk_buff *skb) 1128 { 1129 struct sk_buff_fclones *fclones; 1130 1131 switch (skb->fclone) { 1132 case SKB_FCLONE_UNAVAILABLE: 1133 kmem_cache_free(net_hotdata.skbuff_cache, skb); 1134 return; 1135 1136 case SKB_FCLONE_ORIG: 1137 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1138 1139 /* We usually free the clone (TX completion) before original skb 1140 * This test would have no chance to be true for the clone, 1141 * while here, branch prediction will be good. 1142 */ 1143 if (refcount_read(&fclones->fclone_ref) == 1) 1144 goto fastpath; 1145 break; 1146 1147 default: /* SKB_FCLONE_CLONE */ 1148 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1149 break; 1150 } 1151 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1152 return; 1153 fastpath: 1154 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); 1155 } 1156 1157 void skb_release_head_state(struct sk_buff *skb) 1158 { 1159 skb_dst_drop(skb); 1160 if (skb->destructor) { 1161 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1162 skb->destructor(skb); 1163 } 1164 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1165 nf_conntrack_put(skb_nfct(skb)); 1166 #endif 1167 skb_ext_put(skb); 1168 } 1169 1170 /* Free everything but the sk_buff shell. */ 1171 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) 1172 { 1173 skb_release_head_state(skb); 1174 if (likely(skb->head)) 1175 skb_release_data(skb, reason); 1176 } 1177 1178 /** 1179 * __kfree_skb - private function 1180 * @skb: buffer 1181 * 1182 * Free an sk_buff. Release anything attached to the buffer. 1183 * Clean the state. This is an internal helper function. Users should 1184 * always call kfree_skb 1185 */ 1186 1187 void __kfree_skb(struct sk_buff *skb) 1188 { 1189 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); 1190 kfree_skbmem(skb); 1191 } 1192 EXPORT_SYMBOL(__kfree_skb); 1193 1194 static __always_inline 1195 bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1196 { 1197 if (unlikely(!skb_unref(skb))) 1198 return false; 1199 1200 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1201 u32_get_bits(reason, 1202 SKB_DROP_REASON_SUBSYS_MASK) >= 1203 SKB_DROP_REASON_SUBSYS_NUM); 1204 1205 if (reason == SKB_CONSUMED) 1206 trace_consume_skb(skb, __builtin_return_address(0)); 1207 else 1208 trace_kfree_skb(skb, __builtin_return_address(0), reason); 1209 return true; 1210 } 1211 1212 /** 1213 * kfree_skb_reason - free an sk_buff with special reason 1214 * @skb: buffer to free 1215 * @reason: reason why this skb is dropped 1216 * 1217 * Drop a reference to the buffer and free it if the usage count has 1218 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' 1219 * tracepoint. 1220 */ 1221 void __fix_address 1222 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1223 { 1224 if (__kfree_skb_reason(skb, reason)) 1225 __kfree_skb(skb); 1226 } 1227 EXPORT_SYMBOL(kfree_skb_reason); 1228 1229 #define KFREE_SKB_BULK_SIZE 16 1230 1231 struct skb_free_array { 1232 unsigned int skb_count; 1233 void *skb_array[KFREE_SKB_BULK_SIZE]; 1234 }; 1235 1236 static void kfree_skb_add_bulk(struct sk_buff *skb, 1237 struct skb_free_array *sa, 1238 enum skb_drop_reason reason) 1239 { 1240 /* if SKB is a clone, don't handle this case */ 1241 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1242 __kfree_skb(skb); 1243 return; 1244 } 1245 1246 skb_release_all(skb, reason); 1247 sa->skb_array[sa->skb_count++] = skb; 1248 1249 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1250 kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, 1251 sa->skb_array); 1252 sa->skb_count = 0; 1253 } 1254 } 1255 1256 void __fix_address 1257 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1258 { 1259 struct skb_free_array sa; 1260 1261 sa.skb_count = 0; 1262 1263 while (segs) { 1264 struct sk_buff *next = segs->next; 1265 1266 if (__kfree_skb_reason(segs, reason)) { 1267 skb_poison_list(segs); 1268 kfree_skb_add_bulk(segs, &sa, reason); 1269 } 1270 1271 segs = next; 1272 } 1273 1274 if (sa.skb_count) 1275 kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array); 1276 } 1277 EXPORT_SYMBOL(kfree_skb_list_reason); 1278 1279 /* Dump skb information and contents. 1280 * 1281 * Must only be called from net_ratelimit()-ed paths. 1282 * 1283 * Dumps whole packets if full_pkt, only headers otherwise. 1284 */ 1285 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1286 { 1287 struct skb_shared_info *sh = skb_shinfo(skb); 1288 struct net_device *dev = skb->dev; 1289 struct sock *sk = skb->sk; 1290 struct sk_buff *list_skb; 1291 bool has_mac, has_trans; 1292 int headroom, tailroom; 1293 int i, len, seg_len; 1294 1295 if (full_pkt) 1296 len = skb->len; 1297 else 1298 len = min_t(int, skb->len, MAX_HEADER + 128); 1299 1300 headroom = skb_headroom(skb); 1301 tailroom = skb_tailroom(skb); 1302 1303 has_mac = skb_mac_header_was_set(skb); 1304 has_trans = skb_transport_header_was_set(skb); 1305 1306 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1307 "mac=(%d,%d) net=(%d,%d) trans=%d\n" 1308 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1309 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1310 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 1311 level, skb->len, headroom, skb_headlen(skb), tailroom, 1312 has_mac ? skb->mac_header : -1, 1313 has_mac ? skb_mac_header_len(skb) : -1, 1314 skb->network_header, 1315 has_trans ? skb_network_header_len(skb) : -1, 1316 has_trans ? skb->transport_header : -1, 1317 sh->tx_flags, sh->nr_frags, 1318 sh->gso_size, sh->gso_type, sh->gso_segs, 1319 skb->csum, skb->ip_summed, skb->csum_complete_sw, 1320 skb->csum_valid, skb->csum_level, 1321 skb->hash, skb->sw_hash, skb->l4_hash, 1322 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 1323 1324 if (dev) 1325 printk("%sdev name=%s feat=%pNF\n", 1326 level, dev->name, &dev->features); 1327 if (sk) 1328 printk("%ssk family=%hu type=%u proto=%u\n", 1329 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1330 1331 if (full_pkt && headroom) 1332 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1333 16, 1, skb->head, headroom, false); 1334 1335 seg_len = min_t(int, skb_headlen(skb), len); 1336 if (seg_len) 1337 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1338 16, 1, skb->data, seg_len, false); 1339 len -= seg_len; 1340 1341 if (full_pkt && tailroom) 1342 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1343 16, 1, skb_tail_pointer(skb), tailroom, false); 1344 1345 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1346 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1347 u32 p_off, p_len, copied; 1348 struct page *p; 1349 u8 *vaddr; 1350 1351 skb_frag_foreach_page(frag, skb_frag_off(frag), 1352 skb_frag_size(frag), p, p_off, p_len, 1353 copied) { 1354 seg_len = min_t(int, p_len, len); 1355 vaddr = kmap_atomic(p); 1356 print_hex_dump(level, "skb frag: ", 1357 DUMP_PREFIX_OFFSET, 1358 16, 1, vaddr + p_off, seg_len, false); 1359 kunmap_atomic(vaddr); 1360 len -= seg_len; 1361 if (!len) 1362 break; 1363 } 1364 } 1365 1366 if (full_pkt && skb_has_frag_list(skb)) { 1367 printk("skb fraglist:\n"); 1368 skb_walk_frags(skb, list_skb) 1369 skb_dump(level, list_skb, true); 1370 } 1371 } 1372 EXPORT_SYMBOL(skb_dump); 1373 1374 /** 1375 * skb_tx_error - report an sk_buff xmit error 1376 * @skb: buffer that triggered an error 1377 * 1378 * Report xmit error if a device callback is tracking this skb. 1379 * skb must be freed afterwards. 1380 */ 1381 void skb_tx_error(struct sk_buff *skb) 1382 { 1383 if (skb) { 1384 skb_zcopy_downgrade_managed(skb); 1385 skb_zcopy_clear(skb, true); 1386 } 1387 } 1388 EXPORT_SYMBOL(skb_tx_error); 1389 1390 #ifdef CONFIG_TRACEPOINTS 1391 /** 1392 * consume_skb - free an skbuff 1393 * @skb: buffer to free 1394 * 1395 * Drop a ref to the buffer and free it if the usage count has hit zero 1396 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1397 * is being dropped after a failure and notes that 1398 */ 1399 void consume_skb(struct sk_buff *skb) 1400 { 1401 if (!skb_unref(skb)) 1402 return; 1403 1404 trace_consume_skb(skb, __builtin_return_address(0)); 1405 __kfree_skb(skb); 1406 } 1407 EXPORT_SYMBOL(consume_skb); 1408 #endif 1409 1410 /** 1411 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1412 * @skb: buffer to free 1413 * 1414 * Alike consume_skb(), but this variant assumes that this is the last 1415 * skb reference and all the head states have been already dropped 1416 */ 1417 void __consume_stateless_skb(struct sk_buff *skb) 1418 { 1419 trace_consume_skb(skb, __builtin_return_address(0)); 1420 skb_release_data(skb, SKB_CONSUMED); 1421 kfree_skbmem(skb); 1422 } 1423 1424 static void napi_skb_cache_put(struct sk_buff *skb) 1425 { 1426 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1427 u32 i; 1428 1429 if (!kasan_mempool_poison_object(skb)) 1430 return; 1431 1432 nc->skb_cache[nc->skb_count++] = skb; 1433 1434 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1435 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1436 kasan_mempool_unpoison_object(nc->skb_cache[i], 1437 kmem_cache_size(net_hotdata.skbuff_cache)); 1438 1439 kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, 1440 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1441 nc->skb_count = NAPI_SKB_CACHE_HALF; 1442 } 1443 } 1444 1445 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1446 { 1447 skb_release_all(skb, reason); 1448 napi_skb_cache_put(skb); 1449 } 1450 1451 void napi_skb_free_stolen_head(struct sk_buff *skb) 1452 { 1453 if (unlikely(skb->slow_gro)) { 1454 nf_reset_ct(skb); 1455 skb_dst_drop(skb); 1456 skb_ext_put(skb); 1457 skb_orphan(skb); 1458 skb->slow_gro = 0; 1459 } 1460 napi_skb_cache_put(skb); 1461 } 1462 1463 void napi_consume_skb(struct sk_buff *skb, int budget) 1464 { 1465 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1466 if (unlikely(!budget)) { 1467 dev_consume_skb_any(skb); 1468 return; 1469 } 1470 1471 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1472 1473 if (!skb_unref(skb)) 1474 return; 1475 1476 /* if reaching here SKB is ready to free */ 1477 trace_consume_skb(skb, __builtin_return_address(0)); 1478 1479 /* if SKB is a clone, don't handle this case */ 1480 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1481 __kfree_skb(skb); 1482 return; 1483 } 1484 1485 skb_release_all(skb, SKB_CONSUMED); 1486 napi_skb_cache_put(skb); 1487 } 1488 EXPORT_SYMBOL(napi_consume_skb); 1489 1490 /* Make sure a field is contained by headers group */ 1491 #define CHECK_SKB_FIELD(field) \ 1492 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1493 offsetof(struct sk_buff, headers.field)); \ 1494 1495 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1496 { 1497 new->tstamp = old->tstamp; 1498 /* We do not copy old->sk */ 1499 new->dev = old->dev; 1500 memcpy(new->cb, old->cb, sizeof(old->cb)); 1501 skb_dst_copy(new, old); 1502 __skb_ext_copy(new, old); 1503 __nf_copy(new, old, false); 1504 1505 /* Note : this field could be in the headers group. 1506 * It is not yet because we do not want to have a 16 bit hole 1507 */ 1508 new->queue_mapping = old->queue_mapping; 1509 1510 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1511 CHECK_SKB_FIELD(protocol); 1512 CHECK_SKB_FIELD(csum); 1513 CHECK_SKB_FIELD(hash); 1514 CHECK_SKB_FIELD(priority); 1515 CHECK_SKB_FIELD(skb_iif); 1516 CHECK_SKB_FIELD(vlan_proto); 1517 CHECK_SKB_FIELD(vlan_tci); 1518 CHECK_SKB_FIELD(transport_header); 1519 CHECK_SKB_FIELD(network_header); 1520 CHECK_SKB_FIELD(mac_header); 1521 CHECK_SKB_FIELD(inner_protocol); 1522 CHECK_SKB_FIELD(inner_transport_header); 1523 CHECK_SKB_FIELD(inner_network_header); 1524 CHECK_SKB_FIELD(inner_mac_header); 1525 CHECK_SKB_FIELD(mark); 1526 #ifdef CONFIG_NETWORK_SECMARK 1527 CHECK_SKB_FIELD(secmark); 1528 #endif 1529 #ifdef CONFIG_NET_RX_BUSY_POLL 1530 CHECK_SKB_FIELD(napi_id); 1531 #endif 1532 CHECK_SKB_FIELD(alloc_cpu); 1533 #ifdef CONFIG_XPS 1534 CHECK_SKB_FIELD(sender_cpu); 1535 #endif 1536 #ifdef CONFIG_NET_SCHED 1537 CHECK_SKB_FIELD(tc_index); 1538 #endif 1539 1540 } 1541 1542 /* 1543 * You should not add any new code to this function. Add it to 1544 * __copy_skb_header above instead. 1545 */ 1546 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1547 { 1548 #define C(x) n->x = skb->x 1549 1550 n->next = n->prev = NULL; 1551 n->sk = NULL; 1552 __copy_skb_header(n, skb); 1553 1554 C(len); 1555 C(data_len); 1556 C(mac_len); 1557 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1558 n->cloned = 1; 1559 n->nohdr = 0; 1560 n->peeked = 0; 1561 C(pfmemalloc); 1562 C(pp_recycle); 1563 n->destructor = NULL; 1564 C(tail); 1565 C(end); 1566 C(head); 1567 C(head_frag); 1568 C(data); 1569 C(truesize); 1570 refcount_set(&n->users, 1); 1571 1572 atomic_inc(&(skb_shinfo(skb)->dataref)); 1573 skb->cloned = 1; 1574 1575 return n; 1576 #undef C 1577 } 1578 1579 /** 1580 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1581 * @first: first sk_buff of the msg 1582 */ 1583 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1584 { 1585 struct sk_buff *n; 1586 1587 n = alloc_skb(0, GFP_ATOMIC); 1588 if (!n) 1589 return NULL; 1590 1591 n->len = first->len; 1592 n->data_len = first->len; 1593 n->truesize = first->truesize; 1594 1595 skb_shinfo(n)->frag_list = first; 1596 1597 __copy_skb_header(n, first); 1598 n->destructor = NULL; 1599 1600 return n; 1601 } 1602 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1603 1604 /** 1605 * skb_morph - morph one skb into another 1606 * @dst: the skb to receive the contents 1607 * @src: the skb to supply the contents 1608 * 1609 * This is identical to skb_clone except that the target skb is 1610 * supplied by the user. 1611 * 1612 * The target skb is returned upon exit. 1613 */ 1614 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1615 { 1616 skb_release_all(dst, SKB_CONSUMED); 1617 return __skb_clone(dst, src); 1618 } 1619 EXPORT_SYMBOL_GPL(skb_morph); 1620 1621 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1622 { 1623 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1624 struct user_struct *user; 1625 1626 if (capable(CAP_IPC_LOCK) || !size) 1627 return 0; 1628 1629 rlim = rlimit(RLIMIT_MEMLOCK); 1630 if (rlim == RLIM_INFINITY) 1631 return 0; 1632 1633 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1634 max_pg = rlim >> PAGE_SHIFT; 1635 user = mmp->user ? : current_user(); 1636 1637 old_pg = atomic_long_read(&user->locked_vm); 1638 do { 1639 new_pg = old_pg + num_pg; 1640 if (new_pg > max_pg) 1641 return -ENOBUFS; 1642 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1643 1644 if (!mmp->user) { 1645 mmp->user = get_uid(user); 1646 mmp->num_pg = num_pg; 1647 } else { 1648 mmp->num_pg += num_pg; 1649 } 1650 1651 return 0; 1652 } 1653 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1654 1655 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1656 { 1657 if (mmp->user) { 1658 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1659 free_uid(mmp->user); 1660 } 1661 } 1662 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1663 1664 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1665 { 1666 struct ubuf_info_msgzc *uarg; 1667 struct sk_buff *skb; 1668 1669 WARN_ON_ONCE(!in_task()); 1670 1671 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1672 if (!skb) 1673 return NULL; 1674 1675 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1676 uarg = (void *)skb->cb; 1677 uarg->mmp.user = NULL; 1678 1679 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1680 kfree_skb(skb); 1681 return NULL; 1682 } 1683 1684 uarg->ubuf.callback = msg_zerocopy_callback; 1685 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1686 uarg->len = 1; 1687 uarg->bytelen = size; 1688 uarg->zerocopy = 1; 1689 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1690 refcount_set(&uarg->ubuf.refcnt, 1); 1691 sock_hold(sk); 1692 1693 return &uarg->ubuf; 1694 } 1695 1696 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1697 { 1698 return container_of((void *)uarg, struct sk_buff, cb); 1699 } 1700 1701 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1702 struct ubuf_info *uarg) 1703 { 1704 if (uarg) { 1705 struct ubuf_info_msgzc *uarg_zc; 1706 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1707 u32 bytelen, next; 1708 1709 /* there might be non MSG_ZEROCOPY users */ 1710 if (uarg->callback != msg_zerocopy_callback) 1711 return NULL; 1712 1713 /* realloc only when socket is locked (TCP, UDP cork), 1714 * so uarg->len and sk_zckey access is serialized 1715 */ 1716 if (!sock_owned_by_user(sk)) { 1717 WARN_ON_ONCE(1); 1718 return NULL; 1719 } 1720 1721 uarg_zc = uarg_to_msgzc(uarg); 1722 bytelen = uarg_zc->bytelen + size; 1723 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1724 /* TCP can create new skb to attach new uarg */ 1725 if (sk->sk_type == SOCK_STREAM) 1726 goto new_alloc; 1727 return NULL; 1728 } 1729 1730 next = (u32)atomic_read(&sk->sk_zckey); 1731 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1732 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1733 return NULL; 1734 uarg_zc->len++; 1735 uarg_zc->bytelen = bytelen; 1736 atomic_set(&sk->sk_zckey, ++next); 1737 1738 /* no extra ref when appending to datagram (MSG_MORE) */ 1739 if (sk->sk_type == SOCK_STREAM) 1740 net_zcopy_get(uarg); 1741 1742 return uarg; 1743 } 1744 } 1745 1746 new_alloc: 1747 return msg_zerocopy_alloc(sk, size); 1748 } 1749 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1750 1751 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1752 { 1753 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1754 u32 old_lo, old_hi; 1755 u64 sum_len; 1756 1757 old_lo = serr->ee.ee_info; 1758 old_hi = serr->ee.ee_data; 1759 sum_len = old_hi - old_lo + 1ULL + len; 1760 1761 if (sum_len >= (1ULL << 32)) 1762 return false; 1763 1764 if (lo != old_hi + 1) 1765 return false; 1766 1767 serr->ee.ee_data += len; 1768 return true; 1769 } 1770 1771 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1772 { 1773 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1774 struct sock_exterr_skb *serr; 1775 struct sock *sk = skb->sk; 1776 struct sk_buff_head *q; 1777 unsigned long flags; 1778 bool is_zerocopy; 1779 u32 lo, hi; 1780 u16 len; 1781 1782 mm_unaccount_pinned_pages(&uarg->mmp); 1783 1784 /* if !len, there was only 1 call, and it was aborted 1785 * so do not queue a completion notification 1786 */ 1787 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1788 goto release; 1789 1790 len = uarg->len; 1791 lo = uarg->id; 1792 hi = uarg->id + len - 1; 1793 is_zerocopy = uarg->zerocopy; 1794 1795 serr = SKB_EXT_ERR(skb); 1796 memset(serr, 0, sizeof(*serr)); 1797 serr->ee.ee_errno = 0; 1798 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1799 serr->ee.ee_data = hi; 1800 serr->ee.ee_info = lo; 1801 if (!is_zerocopy) 1802 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1803 1804 q = &sk->sk_error_queue; 1805 spin_lock_irqsave(&q->lock, flags); 1806 tail = skb_peek_tail(q); 1807 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1808 !skb_zerocopy_notify_extend(tail, lo, len)) { 1809 __skb_queue_tail(q, skb); 1810 skb = NULL; 1811 } 1812 spin_unlock_irqrestore(&q->lock, flags); 1813 1814 sk_error_report(sk); 1815 1816 release: 1817 consume_skb(skb); 1818 sock_put(sk); 1819 } 1820 1821 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 1822 bool success) 1823 { 1824 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1825 1826 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1827 1828 if (refcount_dec_and_test(&uarg->refcnt)) 1829 __msg_zerocopy_callback(uarg_zc); 1830 } 1831 EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 1832 1833 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1834 { 1835 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1836 1837 atomic_dec(&sk->sk_zckey); 1838 uarg_to_msgzc(uarg)->len--; 1839 1840 if (have_uref) 1841 msg_zerocopy_callback(NULL, uarg, true); 1842 } 1843 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1844 1845 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1846 struct msghdr *msg, int len, 1847 struct ubuf_info *uarg) 1848 { 1849 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1850 int err, orig_len = skb->len; 1851 1852 /* An skb can only point to one uarg. This edge case happens when 1853 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 1854 */ 1855 if (orig_uarg && uarg != orig_uarg) 1856 return -EEXIST; 1857 1858 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1859 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1860 struct sock *save_sk = skb->sk; 1861 1862 /* Streams do not free skb on error. Reset to prev state. */ 1863 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1864 skb->sk = sk; 1865 ___pskb_trim(skb, orig_len); 1866 skb->sk = save_sk; 1867 return err; 1868 } 1869 1870 skb_zcopy_set(skb, uarg, NULL); 1871 return skb->len - orig_len; 1872 } 1873 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1874 1875 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1876 { 1877 int i; 1878 1879 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1880 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1881 skb_frag_ref(skb, i); 1882 } 1883 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1884 1885 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1886 gfp_t gfp_mask) 1887 { 1888 if (skb_zcopy(orig)) { 1889 if (skb_zcopy(nskb)) { 1890 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1891 if (!gfp_mask) { 1892 WARN_ON_ONCE(1); 1893 return -ENOMEM; 1894 } 1895 if (skb_uarg(nskb) == skb_uarg(orig)) 1896 return 0; 1897 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1898 return -EIO; 1899 } 1900 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1901 } 1902 return 0; 1903 } 1904 1905 /** 1906 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1907 * @skb: the skb to modify 1908 * @gfp_mask: allocation priority 1909 * 1910 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1911 * It will copy all frags into kernel and drop the reference 1912 * to userspace pages. 1913 * 1914 * If this function is called from an interrupt gfp_mask() must be 1915 * %GFP_ATOMIC. 1916 * 1917 * Returns 0 on success or a negative error code on failure 1918 * to allocate kernel memory to copy to. 1919 */ 1920 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1921 { 1922 int num_frags = skb_shinfo(skb)->nr_frags; 1923 struct page *page, *head = NULL; 1924 int i, order, psize, new_frags; 1925 u32 d_off; 1926 1927 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1928 return -EINVAL; 1929 1930 if (!num_frags) 1931 goto release; 1932 1933 /* We might have to allocate high order pages, so compute what minimum 1934 * page order is needed. 1935 */ 1936 order = 0; 1937 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1938 order++; 1939 psize = (PAGE_SIZE << order); 1940 1941 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1942 for (i = 0; i < new_frags; i++) { 1943 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1944 if (!page) { 1945 while (head) { 1946 struct page *next = (struct page *)page_private(head); 1947 put_page(head); 1948 head = next; 1949 } 1950 return -ENOMEM; 1951 } 1952 set_page_private(page, (unsigned long)head); 1953 head = page; 1954 } 1955 1956 page = head; 1957 d_off = 0; 1958 for (i = 0; i < num_frags; i++) { 1959 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1960 u32 p_off, p_len, copied; 1961 struct page *p; 1962 u8 *vaddr; 1963 1964 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1965 p, p_off, p_len, copied) { 1966 u32 copy, done = 0; 1967 vaddr = kmap_atomic(p); 1968 1969 while (done < p_len) { 1970 if (d_off == psize) { 1971 d_off = 0; 1972 page = (struct page *)page_private(page); 1973 } 1974 copy = min_t(u32, psize - d_off, p_len - done); 1975 memcpy(page_address(page) + d_off, 1976 vaddr + p_off + done, copy); 1977 done += copy; 1978 d_off += copy; 1979 } 1980 kunmap_atomic(vaddr); 1981 } 1982 } 1983 1984 /* skb frags release userspace buffers */ 1985 for (i = 0; i < num_frags; i++) 1986 skb_frag_unref(skb, i); 1987 1988 /* skb frags point to kernel buffers */ 1989 for (i = 0; i < new_frags - 1; i++) { 1990 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); 1991 head = (struct page *)page_private(head); 1992 } 1993 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, 1994 d_off); 1995 skb_shinfo(skb)->nr_frags = new_frags; 1996 1997 release: 1998 skb_zcopy_clear(skb, false); 1999 return 0; 2000 } 2001 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 2002 2003 /** 2004 * skb_clone - duplicate an sk_buff 2005 * @skb: buffer to clone 2006 * @gfp_mask: allocation priority 2007 * 2008 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 2009 * copies share the same packet data but not structure. The new 2010 * buffer has a reference count of 1. If the allocation fails the 2011 * function returns %NULL otherwise the new buffer is returned. 2012 * 2013 * If this function is called from an interrupt gfp_mask() must be 2014 * %GFP_ATOMIC. 2015 */ 2016 2017 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 2018 { 2019 struct sk_buff_fclones *fclones = container_of(skb, 2020 struct sk_buff_fclones, 2021 skb1); 2022 struct sk_buff *n; 2023 2024 if (skb_orphan_frags(skb, gfp_mask)) 2025 return NULL; 2026 2027 if (skb->fclone == SKB_FCLONE_ORIG && 2028 refcount_read(&fclones->fclone_ref) == 1) { 2029 n = &fclones->skb2; 2030 refcount_set(&fclones->fclone_ref, 2); 2031 n->fclone = SKB_FCLONE_CLONE; 2032 } else { 2033 if (skb_pfmemalloc(skb)) 2034 gfp_mask |= __GFP_MEMALLOC; 2035 2036 n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); 2037 if (!n) 2038 return NULL; 2039 2040 n->fclone = SKB_FCLONE_UNAVAILABLE; 2041 } 2042 2043 return __skb_clone(n, skb); 2044 } 2045 EXPORT_SYMBOL(skb_clone); 2046 2047 void skb_headers_offset_update(struct sk_buff *skb, int off) 2048 { 2049 /* Only adjust this if it actually is csum_start rather than csum */ 2050 if (skb->ip_summed == CHECKSUM_PARTIAL) 2051 skb->csum_start += off; 2052 /* {transport,network,mac}_header and tail are relative to skb->head */ 2053 skb->transport_header += off; 2054 skb->network_header += off; 2055 if (skb_mac_header_was_set(skb)) 2056 skb->mac_header += off; 2057 skb->inner_transport_header += off; 2058 skb->inner_network_header += off; 2059 skb->inner_mac_header += off; 2060 } 2061 EXPORT_SYMBOL(skb_headers_offset_update); 2062 2063 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 2064 { 2065 __copy_skb_header(new, old); 2066 2067 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 2068 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 2069 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 2070 } 2071 EXPORT_SYMBOL(skb_copy_header); 2072 2073 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 2074 { 2075 if (skb_pfmemalloc(skb)) 2076 return SKB_ALLOC_RX; 2077 return 0; 2078 } 2079 2080 /** 2081 * skb_copy - create private copy of an sk_buff 2082 * @skb: buffer to copy 2083 * @gfp_mask: allocation priority 2084 * 2085 * Make a copy of both an &sk_buff and its data. This is used when the 2086 * caller wishes to modify the data and needs a private copy of the 2087 * data to alter. Returns %NULL on failure or the pointer to the buffer 2088 * on success. The returned buffer has a reference count of 1. 2089 * 2090 * As by-product this function converts non-linear &sk_buff to linear 2091 * one, so that &sk_buff becomes completely private and caller is allowed 2092 * to modify all the data of returned buffer. This means that this 2093 * function is not recommended for use in circumstances when only 2094 * header is going to be modified. Use pskb_copy() instead. 2095 */ 2096 2097 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2098 { 2099 int headerlen = skb_headroom(skb); 2100 unsigned int size = skb_end_offset(skb) + skb->data_len; 2101 struct sk_buff *n = __alloc_skb(size, gfp_mask, 2102 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2103 2104 if (!n) 2105 return NULL; 2106 2107 /* Set the data pointer */ 2108 skb_reserve(n, headerlen); 2109 /* Set the tail pointer and length */ 2110 skb_put(n, skb->len); 2111 2112 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2113 2114 skb_copy_header(n, skb); 2115 return n; 2116 } 2117 EXPORT_SYMBOL(skb_copy); 2118 2119 /** 2120 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2121 * @skb: buffer to copy 2122 * @headroom: headroom of new skb 2123 * @gfp_mask: allocation priority 2124 * @fclone: if true allocate the copy of the skb from the fclone 2125 * cache instead of the head cache; it is recommended to set this 2126 * to true for the cases where the copy will likely be cloned 2127 * 2128 * Make a copy of both an &sk_buff and part of its data, located 2129 * in header. Fragmented data remain shared. This is used when 2130 * the caller wishes to modify only header of &sk_buff and needs 2131 * private copy of the header to alter. Returns %NULL on failure 2132 * or the pointer to the buffer on success. 2133 * The returned buffer has a reference count of 1. 2134 */ 2135 2136 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2137 gfp_t gfp_mask, bool fclone) 2138 { 2139 unsigned int size = skb_headlen(skb) + headroom; 2140 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2141 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2142 2143 if (!n) 2144 goto out; 2145 2146 /* Set the data pointer */ 2147 skb_reserve(n, headroom); 2148 /* Set the tail pointer and length */ 2149 skb_put(n, skb_headlen(skb)); 2150 /* Copy the bytes */ 2151 skb_copy_from_linear_data(skb, n->data, n->len); 2152 2153 n->truesize += skb->data_len; 2154 n->data_len = skb->data_len; 2155 n->len = skb->len; 2156 2157 if (skb_shinfo(skb)->nr_frags) { 2158 int i; 2159 2160 if (skb_orphan_frags(skb, gfp_mask) || 2161 skb_zerocopy_clone(n, skb, gfp_mask)) { 2162 kfree_skb(n); 2163 n = NULL; 2164 goto out; 2165 } 2166 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2167 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2168 skb_frag_ref(skb, i); 2169 } 2170 skb_shinfo(n)->nr_frags = i; 2171 } 2172 2173 if (skb_has_frag_list(skb)) { 2174 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2175 skb_clone_fraglist(n); 2176 } 2177 2178 skb_copy_header(n, skb); 2179 out: 2180 return n; 2181 } 2182 EXPORT_SYMBOL(__pskb_copy_fclone); 2183 2184 /** 2185 * pskb_expand_head - reallocate header of &sk_buff 2186 * @skb: buffer to reallocate 2187 * @nhead: room to add at head 2188 * @ntail: room to add at tail 2189 * @gfp_mask: allocation priority 2190 * 2191 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2192 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2193 * reference count of 1. Returns zero in the case of success or error, 2194 * if expansion failed. In the last case, &sk_buff is not changed. 2195 * 2196 * All the pointers pointing into skb header may change and must be 2197 * reloaded after call to this function. 2198 */ 2199 2200 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2201 gfp_t gfp_mask) 2202 { 2203 unsigned int osize = skb_end_offset(skb); 2204 unsigned int size = osize + nhead + ntail; 2205 long off; 2206 u8 *data; 2207 int i; 2208 2209 BUG_ON(nhead < 0); 2210 2211 BUG_ON(skb_shared(skb)); 2212 2213 skb_zcopy_downgrade_managed(skb); 2214 2215 if (skb_pfmemalloc(skb)) 2216 gfp_mask |= __GFP_MEMALLOC; 2217 2218 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2219 if (!data) 2220 goto nodata; 2221 size = SKB_WITH_OVERHEAD(size); 2222 2223 /* Copy only real data... and, alas, header. This should be 2224 * optimized for the cases when header is void. 2225 */ 2226 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2227 2228 memcpy((struct skb_shared_info *)(data + size), 2229 skb_shinfo(skb), 2230 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2231 2232 /* 2233 * if shinfo is shared we must drop the old head gracefully, but if it 2234 * is not we can just drop the old head and let the existing refcount 2235 * be since all we did is relocate the values 2236 */ 2237 if (skb_cloned(skb)) { 2238 if (skb_orphan_frags(skb, gfp_mask)) 2239 goto nofrags; 2240 if (skb_zcopy(skb)) 2241 refcount_inc(&skb_uarg(skb)->refcnt); 2242 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2243 skb_frag_ref(skb, i); 2244 2245 if (skb_has_frag_list(skb)) 2246 skb_clone_fraglist(skb); 2247 2248 skb_release_data(skb, SKB_CONSUMED); 2249 } else { 2250 skb_free_head(skb); 2251 } 2252 off = (data + nhead) - skb->head; 2253 2254 skb->head = data; 2255 skb->head_frag = 0; 2256 skb->data += off; 2257 2258 skb_set_end_offset(skb, size); 2259 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2260 off = nhead; 2261 #endif 2262 skb->tail += off; 2263 skb_headers_offset_update(skb, nhead); 2264 skb->cloned = 0; 2265 skb->hdr_len = 0; 2266 skb->nohdr = 0; 2267 atomic_set(&skb_shinfo(skb)->dataref, 1); 2268 2269 skb_metadata_clear(skb); 2270 2271 /* It is not generally safe to change skb->truesize. 2272 * For the moment, we really care of rx path, or 2273 * when skb is orphaned (not attached to a socket). 2274 */ 2275 if (!skb->sk || skb->destructor == sock_edemux) 2276 skb->truesize += size - osize; 2277 2278 return 0; 2279 2280 nofrags: 2281 skb_kfree_head(data, size); 2282 nodata: 2283 return -ENOMEM; 2284 } 2285 EXPORT_SYMBOL(pskb_expand_head); 2286 2287 /* Make private copy of skb with writable head and some headroom */ 2288 2289 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2290 { 2291 struct sk_buff *skb2; 2292 int delta = headroom - skb_headroom(skb); 2293 2294 if (delta <= 0) 2295 skb2 = pskb_copy(skb, GFP_ATOMIC); 2296 else { 2297 skb2 = skb_clone(skb, GFP_ATOMIC); 2298 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2299 GFP_ATOMIC)) { 2300 kfree_skb(skb2); 2301 skb2 = NULL; 2302 } 2303 } 2304 return skb2; 2305 } 2306 EXPORT_SYMBOL(skb_realloc_headroom); 2307 2308 /* Note: We plan to rework this in linux-6.4 */ 2309 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2310 { 2311 unsigned int saved_end_offset, saved_truesize; 2312 struct skb_shared_info *shinfo; 2313 int res; 2314 2315 saved_end_offset = skb_end_offset(skb); 2316 saved_truesize = skb->truesize; 2317 2318 res = pskb_expand_head(skb, 0, 0, pri); 2319 if (res) 2320 return res; 2321 2322 skb->truesize = saved_truesize; 2323 2324 if (likely(skb_end_offset(skb) == saved_end_offset)) 2325 return 0; 2326 2327 /* We can not change skb->end if the original or new value 2328 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2329 */ 2330 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2331 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2332 /* We think this path should not be taken. 2333 * Add a temporary trace to warn us just in case. 2334 */ 2335 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2336 saved_end_offset, skb_end_offset(skb)); 2337 WARN_ON_ONCE(1); 2338 return 0; 2339 } 2340 2341 shinfo = skb_shinfo(skb); 2342 2343 /* We are about to change back skb->end, 2344 * we need to move skb_shinfo() to its new location. 2345 */ 2346 memmove(skb->head + saved_end_offset, 2347 shinfo, 2348 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2349 2350 skb_set_end_offset(skb, saved_end_offset); 2351 2352 return 0; 2353 } 2354 2355 /** 2356 * skb_expand_head - reallocate header of &sk_buff 2357 * @skb: buffer to reallocate 2358 * @headroom: needed headroom 2359 * 2360 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2361 * if possible; copies skb->sk to new skb as needed 2362 * and frees original skb in case of failures. 2363 * 2364 * It expect increased headroom and generates warning otherwise. 2365 */ 2366 2367 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2368 { 2369 int delta = headroom - skb_headroom(skb); 2370 int osize = skb_end_offset(skb); 2371 struct sock *sk = skb->sk; 2372 2373 if (WARN_ONCE(delta <= 0, 2374 "%s is expecting an increase in the headroom", __func__)) 2375 return skb; 2376 2377 delta = SKB_DATA_ALIGN(delta); 2378 /* pskb_expand_head() might crash, if skb is shared. */ 2379 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2380 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2381 2382 if (unlikely(!nskb)) 2383 goto fail; 2384 2385 if (sk) 2386 skb_set_owner_w(nskb, sk); 2387 consume_skb(skb); 2388 skb = nskb; 2389 } 2390 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2391 goto fail; 2392 2393 if (sk && is_skb_wmem(skb)) { 2394 delta = skb_end_offset(skb) - osize; 2395 refcount_add(delta, &sk->sk_wmem_alloc); 2396 skb->truesize += delta; 2397 } 2398 return skb; 2399 2400 fail: 2401 kfree_skb(skb); 2402 return NULL; 2403 } 2404 EXPORT_SYMBOL(skb_expand_head); 2405 2406 /** 2407 * skb_copy_expand - copy and expand sk_buff 2408 * @skb: buffer to copy 2409 * @newheadroom: new free bytes at head 2410 * @newtailroom: new free bytes at tail 2411 * @gfp_mask: allocation priority 2412 * 2413 * Make a copy of both an &sk_buff and its data and while doing so 2414 * allocate additional space. 2415 * 2416 * This is used when the caller wishes to modify the data and needs a 2417 * private copy of the data to alter as well as more space for new fields. 2418 * Returns %NULL on failure or the pointer to the buffer 2419 * on success. The returned buffer has a reference count of 1. 2420 * 2421 * You must pass %GFP_ATOMIC as the allocation priority if this function 2422 * is called from an interrupt. 2423 */ 2424 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2425 int newheadroom, int newtailroom, 2426 gfp_t gfp_mask) 2427 { 2428 /* 2429 * Allocate the copy buffer 2430 */ 2431 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 2432 gfp_mask, skb_alloc_rx_flag(skb), 2433 NUMA_NO_NODE); 2434 int oldheadroom = skb_headroom(skb); 2435 int head_copy_len, head_copy_off; 2436 2437 if (!n) 2438 return NULL; 2439 2440 skb_reserve(n, newheadroom); 2441 2442 /* Set the tail pointer and length */ 2443 skb_put(n, skb->len); 2444 2445 head_copy_len = oldheadroom; 2446 head_copy_off = 0; 2447 if (newheadroom <= head_copy_len) 2448 head_copy_len = newheadroom; 2449 else 2450 head_copy_off = newheadroom - head_copy_len; 2451 2452 /* Copy the linear header and data. */ 2453 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2454 skb->len + head_copy_len)); 2455 2456 skb_copy_header(n, skb); 2457 2458 skb_headers_offset_update(n, newheadroom - oldheadroom); 2459 2460 return n; 2461 } 2462 EXPORT_SYMBOL(skb_copy_expand); 2463 2464 /** 2465 * __skb_pad - zero pad the tail of an skb 2466 * @skb: buffer to pad 2467 * @pad: space to pad 2468 * @free_on_error: free buffer on error 2469 * 2470 * Ensure that a buffer is followed by a padding area that is zero 2471 * filled. Used by network drivers which may DMA or transfer data 2472 * beyond the buffer end onto the wire. 2473 * 2474 * May return error in out of memory cases. The skb is freed on error 2475 * if @free_on_error is true. 2476 */ 2477 2478 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2479 { 2480 int err; 2481 int ntail; 2482 2483 /* If the skbuff is non linear tailroom is always zero.. */ 2484 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2485 memset(skb->data+skb->len, 0, pad); 2486 return 0; 2487 } 2488 2489 ntail = skb->data_len + pad - (skb->end - skb->tail); 2490 if (likely(skb_cloned(skb) || ntail > 0)) { 2491 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2492 if (unlikely(err)) 2493 goto free_skb; 2494 } 2495 2496 /* FIXME: The use of this function with non-linear skb's really needs 2497 * to be audited. 2498 */ 2499 err = skb_linearize(skb); 2500 if (unlikely(err)) 2501 goto free_skb; 2502 2503 memset(skb->data + skb->len, 0, pad); 2504 return 0; 2505 2506 free_skb: 2507 if (free_on_error) 2508 kfree_skb(skb); 2509 return err; 2510 } 2511 EXPORT_SYMBOL(__skb_pad); 2512 2513 /** 2514 * pskb_put - add data to the tail of a potentially fragmented buffer 2515 * @skb: start of the buffer to use 2516 * @tail: tail fragment of the buffer to use 2517 * @len: amount of data to add 2518 * 2519 * This function extends the used data area of the potentially 2520 * fragmented buffer. @tail must be the last fragment of @skb -- or 2521 * @skb itself. If this would exceed the total buffer size the kernel 2522 * will panic. A pointer to the first byte of the extra data is 2523 * returned. 2524 */ 2525 2526 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2527 { 2528 if (tail != skb) { 2529 skb->data_len += len; 2530 skb->len += len; 2531 } 2532 return skb_put(tail, len); 2533 } 2534 EXPORT_SYMBOL_GPL(pskb_put); 2535 2536 /** 2537 * skb_put - add data to a buffer 2538 * @skb: buffer to use 2539 * @len: amount of data to add 2540 * 2541 * This function extends the used data area of the buffer. If this would 2542 * exceed the total buffer size the kernel will panic. A pointer to the 2543 * first byte of the extra data is returned. 2544 */ 2545 void *skb_put(struct sk_buff *skb, unsigned int len) 2546 { 2547 void *tmp = skb_tail_pointer(skb); 2548 SKB_LINEAR_ASSERT(skb); 2549 skb->tail += len; 2550 skb->len += len; 2551 if (unlikely(skb->tail > skb->end)) 2552 skb_over_panic(skb, len, __builtin_return_address(0)); 2553 return tmp; 2554 } 2555 EXPORT_SYMBOL(skb_put); 2556 2557 /** 2558 * skb_push - add data to the start of a buffer 2559 * @skb: buffer to use 2560 * @len: amount of data to add 2561 * 2562 * This function extends the used data area of the buffer at the buffer 2563 * start. If this would exceed the total buffer headroom the kernel will 2564 * panic. A pointer to the first byte of the extra data is returned. 2565 */ 2566 void *skb_push(struct sk_buff *skb, unsigned int len) 2567 { 2568 skb->data -= len; 2569 skb->len += len; 2570 if (unlikely(skb->data < skb->head)) 2571 skb_under_panic(skb, len, __builtin_return_address(0)); 2572 return skb->data; 2573 } 2574 EXPORT_SYMBOL(skb_push); 2575 2576 /** 2577 * skb_pull - remove data from the start of a buffer 2578 * @skb: buffer to use 2579 * @len: amount of data to remove 2580 * 2581 * This function removes data from the start of a buffer, returning 2582 * the memory to the headroom. A pointer to the next data in the buffer 2583 * is returned. Once the data has been pulled future pushes will overwrite 2584 * the old data. 2585 */ 2586 void *skb_pull(struct sk_buff *skb, unsigned int len) 2587 { 2588 return skb_pull_inline(skb, len); 2589 } 2590 EXPORT_SYMBOL(skb_pull); 2591 2592 /** 2593 * skb_pull_data - remove data from the start of a buffer returning its 2594 * original position. 2595 * @skb: buffer to use 2596 * @len: amount of data to remove 2597 * 2598 * This function removes data from the start of a buffer, returning 2599 * the memory to the headroom. A pointer to the original data in the buffer 2600 * is returned after checking if there is enough data to pull. Once the 2601 * data has been pulled future pushes will overwrite the old data. 2602 */ 2603 void *skb_pull_data(struct sk_buff *skb, size_t len) 2604 { 2605 void *data = skb->data; 2606 2607 if (skb->len < len) 2608 return NULL; 2609 2610 skb_pull(skb, len); 2611 2612 return data; 2613 } 2614 EXPORT_SYMBOL(skb_pull_data); 2615 2616 /** 2617 * skb_trim - remove end from a buffer 2618 * @skb: buffer to alter 2619 * @len: new length 2620 * 2621 * Cut the length of a buffer down by removing data from the tail. If 2622 * the buffer is already under the length specified it is not modified. 2623 * The skb must be linear. 2624 */ 2625 void skb_trim(struct sk_buff *skb, unsigned int len) 2626 { 2627 if (skb->len > len) 2628 __skb_trim(skb, len); 2629 } 2630 EXPORT_SYMBOL(skb_trim); 2631 2632 /* Trims skb to length len. It can change skb pointers. 2633 */ 2634 2635 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2636 { 2637 struct sk_buff **fragp; 2638 struct sk_buff *frag; 2639 int offset = skb_headlen(skb); 2640 int nfrags = skb_shinfo(skb)->nr_frags; 2641 int i; 2642 int err; 2643 2644 if (skb_cloned(skb) && 2645 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2646 return err; 2647 2648 i = 0; 2649 if (offset >= len) 2650 goto drop_pages; 2651 2652 for (; i < nfrags; i++) { 2653 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2654 2655 if (end < len) { 2656 offset = end; 2657 continue; 2658 } 2659 2660 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2661 2662 drop_pages: 2663 skb_shinfo(skb)->nr_frags = i; 2664 2665 for (; i < nfrags; i++) 2666 skb_frag_unref(skb, i); 2667 2668 if (skb_has_frag_list(skb)) 2669 skb_drop_fraglist(skb); 2670 goto done; 2671 } 2672 2673 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2674 fragp = &frag->next) { 2675 int end = offset + frag->len; 2676 2677 if (skb_shared(frag)) { 2678 struct sk_buff *nfrag; 2679 2680 nfrag = skb_clone(frag, GFP_ATOMIC); 2681 if (unlikely(!nfrag)) 2682 return -ENOMEM; 2683 2684 nfrag->next = frag->next; 2685 consume_skb(frag); 2686 frag = nfrag; 2687 *fragp = frag; 2688 } 2689 2690 if (end < len) { 2691 offset = end; 2692 continue; 2693 } 2694 2695 if (end > len && 2696 unlikely((err = pskb_trim(frag, len - offset)))) 2697 return err; 2698 2699 if (frag->next) 2700 skb_drop_list(&frag->next); 2701 break; 2702 } 2703 2704 done: 2705 if (len > skb_headlen(skb)) { 2706 skb->data_len -= skb->len - len; 2707 skb->len = len; 2708 } else { 2709 skb->len = len; 2710 skb->data_len = 0; 2711 skb_set_tail_pointer(skb, len); 2712 } 2713 2714 if (!skb->sk || skb->destructor == sock_edemux) 2715 skb_condense(skb); 2716 return 0; 2717 } 2718 EXPORT_SYMBOL(___pskb_trim); 2719 2720 /* Note : use pskb_trim_rcsum() instead of calling this directly 2721 */ 2722 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2723 { 2724 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2725 int delta = skb->len - len; 2726 2727 skb->csum = csum_block_sub(skb->csum, 2728 skb_checksum(skb, len, delta, 0), 2729 len); 2730 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2731 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2732 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2733 2734 if (offset + sizeof(__sum16) > hdlen) 2735 return -EINVAL; 2736 } 2737 return __pskb_trim(skb, len); 2738 } 2739 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2740 2741 /** 2742 * __pskb_pull_tail - advance tail of skb header 2743 * @skb: buffer to reallocate 2744 * @delta: number of bytes to advance tail 2745 * 2746 * The function makes a sense only on a fragmented &sk_buff, 2747 * it expands header moving its tail forward and copying necessary 2748 * data from fragmented part. 2749 * 2750 * &sk_buff MUST have reference count of 1. 2751 * 2752 * Returns %NULL (and &sk_buff does not change) if pull failed 2753 * or value of new tail of skb in the case of success. 2754 * 2755 * All the pointers pointing into skb header may change and must be 2756 * reloaded after call to this function. 2757 */ 2758 2759 /* Moves tail of skb head forward, copying data from fragmented part, 2760 * when it is necessary. 2761 * 1. It may fail due to malloc failure. 2762 * 2. It may change skb pointers. 2763 * 2764 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2765 */ 2766 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2767 { 2768 /* If skb has not enough free space at tail, get new one 2769 * plus 128 bytes for future expansions. If we have enough 2770 * room at tail, reallocate without expansion only if skb is cloned. 2771 */ 2772 int i, k, eat = (skb->tail + delta) - skb->end; 2773 2774 if (eat > 0 || skb_cloned(skb)) { 2775 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2776 GFP_ATOMIC)) 2777 return NULL; 2778 } 2779 2780 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2781 skb_tail_pointer(skb), delta)); 2782 2783 /* Optimization: no fragments, no reasons to preestimate 2784 * size of pulled pages. Superb. 2785 */ 2786 if (!skb_has_frag_list(skb)) 2787 goto pull_pages; 2788 2789 /* Estimate size of pulled pages. */ 2790 eat = delta; 2791 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2792 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2793 2794 if (size >= eat) 2795 goto pull_pages; 2796 eat -= size; 2797 } 2798 2799 /* If we need update frag list, we are in troubles. 2800 * Certainly, it is possible to add an offset to skb data, 2801 * but taking into account that pulling is expected to 2802 * be very rare operation, it is worth to fight against 2803 * further bloating skb head and crucify ourselves here instead. 2804 * Pure masohism, indeed. 8)8) 2805 */ 2806 if (eat) { 2807 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2808 struct sk_buff *clone = NULL; 2809 struct sk_buff *insp = NULL; 2810 2811 do { 2812 if (list->len <= eat) { 2813 /* Eaten as whole. */ 2814 eat -= list->len; 2815 list = list->next; 2816 insp = list; 2817 } else { 2818 /* Eaten partially. */ 2819 if (skb_is_gso(skb) && !list->head_frag && 2820 skb_headlen(list)) 2821 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2822 2823 if (skb_shared(list)) { 2824 /* Sucks! We need to fork list. :-( */ 2825 clone = skb_clone(list, GFP_ATOMIC); 2826 if (!clone) 2827 return NULL; 2828 insp = list->next; 2829 list = clone; 2830 } else { 2831 /* This may be pulled without 2832 * problems. */ 2833 insp = list; 2834 } 2835 if (!pskb_pull(list, eat)) { 2836 kfree_skb(clone); 2837 return NULL; 2838 } 2839 break; 2840 } 2841 } while (eat); 2842 2843 /* Free pulled out fragments. */ 2844 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2845 skb_shinfo(skb)->frag_list = list->next; 2846 consume_skb(list); 2847 } 2848 /* And insert new clone at head. */ 2849 if (clone) { 2850 clone->next = list; 2851 skb_shinfo(skb)->frag_list = clone; 2852 } 2853 } 2854 /* Success! Now we may commit changes to skb data. */ 2855 2856 pull_pages: 2857 eat = delta; 2858 k = 0; 2859 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2860 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2861 2862 if (size <= eat) { 2863 skb_frag_unref(skb, i); 2864 eat -= size; 2865 } else { 2866 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2867 2868 *frag = skb_shinfo(skb)->frags[i]; 2869 if (eat) { 2870 skb_frag_off_add(frag, eat); 2871 skb_frag_size_sub(frag, eat); 2872 if (!i) 2873 goto end; 2874 eat = 0; 2875 } 2876 k++; 2877 } 2878 } 2879 skb_shinfo(skb)->nr_frags = k; 2880 2881 end: 2882 skb->tail += delta; 2883 skb->data_len -= delta; 2884 2885 if (!skb->data_len) 2886 skb_zcopy_clear(skb, false); 2887 2888 return skb_tail_pointer(skb); 2889 } 2890 EXPORT_SYMBOL(__pskb_pull_tail); 2891 2892 /** 2893 * skb_copy_bits - copy bits from skb to kernel buffer 2894 * @skb: source skb 2895 * @offset: offset in source 2896 * @to: destination buffer 2897 * @len: number of bytes to copy 2898 * 2899 * Copy the specified number of bytes from the source skb to the 2900 * destination buffer. 2901 * 2902 * CAUTION ! : 2903 * If its prototype is ever changed, 2904 * check arch/{*}/net/{*}.S files, 2905 * since it is called from BPF assembly code. 2906 */ 2907 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2908 { 2909 int start = skb_headlen(skb); 2910 struct sk_buff *frag_iter; 2911 int i, copy; 2912 2913 if (offset > (int)skb->len - len) 2914 goto fault; 2915 2916 /* Copy header. */ 2917 if ((copy = start - offset) > 0) { 2918 if (copy > len) 2919 copy = len; 2920 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2921 if ((len -= copy) == 0) 2922 return 0; 2923 offset += copy; 2924 to += copy; 2925 } 2926 2927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2928 int end; 2929 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2930 2931 WARN_ON(start > offset + len); 2932 2933 end = start + skb_frag_size(f); 2934 if ((copy = end - offset) > 0) { 2935 u32 p_off, p_len, copied; 2936 struct page *p; 2937 u8 *vaddr; 2938 2939 if (copy > len) 2940 copy = len; 2941 2942 skb_frag_foreach_page(f, 2943 skb_frag_off(f) + offset - start, 2944 copy, p, p_off, p_len, copied) { 2945 vaddr = kmap_atomic(p); 2946 memcpy(to + copied, vaddr + p_off, p_len); 2947 kunmap_atomic(vaddr); 2948 } 2949 2950 if ((len -= copy) == 0) 2951 return 0; 2952 offset += copy; 2953 to += copy; 2954 } 2955 start = end; 2956 } 2957 2958 skb_walk_frags(skb, frag_iter) { 2959 int end; 2960 2961 WARN_ON(start > offset + len); 2962 2963 end = start + frag_iter->len; 2964 if ((copy = end - offset) > 0) { 2965 if (copy > len) 2966 copy = len; 2967 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2968 goto fault; 2969 if ((len -= copy) == 0) 2970 return 0; 2971 offset += copy; 2972 to += copy; 2973 } 2974 start = end; 2975 } 2976 2977 if (!len) 2978 return 0; 2979 2980 fault: 2981 return -EFAULT; 2982 } 2983 EXPORT_SYMBOL(skb_copy_bits); 2984 2985 /* 2986 * Callback from splice_to_pipe(), if we need to release some pages 2987 * at the end of the spd in case we error'ed out in filling the pipe. 2988 */ 2989 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 2990 { 2991 put_page(spd->pages[i]); 2992 } 2993 2994 static struct page *linear_to_page(struct page *page, unsigned int *len, 2995 unsigned int *offset, 2996 struct sock *sk) 2997 { 2998 struct page_frag *pfrag = sk_page_frag(sk); 2999 3000 if (!sk_page_frag_refill(sk, pfrag)) 3001 return NULL; 3002 3003 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 3004 3005 memcpy(page_address(pfrag->page) + pfrag->offset, 3006 page_address(page) + *offset, *len); 3007 *offset = pfrag->offset; 3008 pfrag->offset += *len; 3009 3010 return pfrag->page; 3011 } 3012 3013 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 3014 struct page *page, 3015 unsigned int offset) 3016 { 3017 return spd->nr_pages && 3018 spd->pages[spd->nr_pages - 1] == page && 3019 (spd->partial[spd->nr_pages - 1].offset + 3020 spd->partial[spd->nr_pages - 1].len == offset); 3021 } 3022 3023 /* 3024 * Fill page/offset/length into spd, if it can hold more pages. 3025 */ 3026 static bool spd_fill_page(struct splice_pipe_desc *spd, 3027 struct pipe_inode_info *pipe, struct page *page, 3028 unsigned int *len, unsigned int offset, 3029 bool linear, 3030 struct sock *sk) 3031 { 3032 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 3033 return true; 3034 3035 if (linear) { 3036 page = linear_to_page(page, len, &offset, sk); 3037 if (!page) 3038 return true; 3039 } 3040 if (spd_can_coalesce(spd, page, offset)) { 3041 spd->partial[spd->nr_pages - 1].len += *len; 3042 return false; 3043 } 3044 get_page(page); 3045 spd->pages[spd->nr_pages] = page; 3046 spd->partial[spd->nr_pages].len = *len; 3047 spd->partial[spd->nr_pages].offset = offset; 3048 spd->nr_pages++; 3049 3050 return false; 3051 } 3052 3053 static bool __splice_segment(struct page *page, unsigned int poff, 3054 unsigned int plen, unsigned int *off, 3055 unsigned int *len, 3056 struct splice_pipe_desc *spd, bool linear, 3057 struct sock *sk, 3058 struct pipe_inode_info *pipe) 3059 { 3060 if (!*len) 3061 return true; 3062 3063 /* skip this segment if already processed */ 3064 if (*off >= plen) { 3065 *off -= plen; 3066 return false; 3067 } 3068 3069 /* ignore any bits we already processed */ 3070 poff += *off; 3071 plen -= *off; 3072 *off = 0; 3073 3074 do { 3075 unsigned int flen = min(*len, plen); 3076 3077 if (spd_fill_page(spd, pipe, page, &flen, poff, 3078 linear, sk)) 3079 return true; 3080 poff += flen; 3081 plen -= flen; 3082 *len -= flen; 3083 } while (*len && plen); 3084 3085 return false; 3086 } 3087 3088 /* 3089 * Map linear and fragment data from the skb to spd. It reports true if the 3090 * pipe is full or if we already spliced the requested length. 3091 */ 3092 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3093 unsigned int *offset, unsigned int *len, 3094 struct splice_pipe_desc *spd, struct sock *sk) 3095 { 3096 int seg; 3097 struct sk_buff *iter; 3098 3099 /* map the linear part : 3100 * If skb->head_frag is set, this 'linear' part is backed by a 3101 * fragment, and if the head is not shared with any clones then 3102 * we can avoid a copy since we own the head portion of this page. 3103 */ 3104 if (__splice_segment(virt_to_page(skb->data), 3105 (unsigned long) skb->data & (PAGE_SIZE - 1), 3106 skb_headlen(skb), 3107 offset, len, spd, 3108 skb_head_is_locked(skb), 3109 sk, pipe)) 3110 return true; 3111 3112 /* 3113 * then map the fragments 3114 */ 3115 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3116 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3117 3118 if (__splice_segment(skb_frag_page(f), 3119 skb_frag_off(f), skb_frag_size(f), 3120 offset, len, spd, false, sk, pipe)) 3121 return true; 3122 } 3123 3124 skb_walk_frags(skb, iter) { 3125 if (*offset >= iter->len) { 3126 *offset -= iter->len; 3127 continue; 3128 } 3129 /* __skb_splice_bits() only fails if the output has no room 3130 * left, so no point in going over the frag_list for the error 3131 * case. 3132 */ 3133 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3134 return true; 3135 } 3136 3137 return false; 3138 } 3139 3140 /* 3141 * Map data from the skb to a pipe. Should handle both the linear part, 3142 * the fragments, and the frag list. 3143 */ 3144 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3145 struct pipe_inode_info *pipe, unsigned int tlen, 3146 unsigned int flags) 3147 { 3148 struct partial_page partial[MAX_SKB_FRAGS]; 3149 struct page *pages[MAX_SKB_FRAGS]; 3150 struct splice_pipe_desc spd = { 3151 .pages = pages, 3152 .partial = partial, 3153 .nr_pages_max = MAX_SKB_FRAGS, 3154 .ops = &nosteal_pipe_buf_ops, 3155 .spd_release = sock_spd_release, 3156 }; 3157 int ret = 0; 3158 3159 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3160 3161 if (spd.nr_pages) 3162 ret = splice_to_pipe(pipe, &spd); 3163 3164 return ret; 3165 } 3166 EXPORT_SYMBOL_GPL(skb_splice_bits); 3167 3168 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3169 { 3170 struct socket *sock = sk->sk_socket; 3171 size_t size = msg_data_left(msg); 3172 3173 if (!sock) 3174 return -EINVAL; 3175 3176 if (!sock->ops->sendmsg_locked) 3177 return sock_no_sendmsg_locked(sk, msg, size); 3178 3179 return sock->ops->sendmsg_locked(sk, msg, size); 3180 } 3181 3182 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3183 { 3184 struct socket *sock = sk->sk_socket; 3185 3186 if (!sock) 3187 return -EINVAL; 3188 return sock_sendmsg(sock, msg); 3189 } 3190 3191 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3192 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3193 int len, sendmsg_func sendmsg) 3194 { 3195 unsigned int orig_len = len; 3196 struct sk_buff *head = skb; 3197 unsigned short fragidx; 3198 int slen, ret; 3199 3200 do_frag_list: 3201 3202 /* Deal with head data */ 3203 while (offset < skb_headlen(skb) && len) { 3204 struct kvec kv; 3205 struct msghdr msg; 3206 3207 slen = min_t(int, len, skb_headlen(skb) - offset); 3208 kv.iov_base = skb->data + offset; 3209 kv.iov_len = slen; 3210 memset(&msg, 0, sizeof(msg)); 3211 msg.msg_flags = MSG_DONTWAIT; 3212 3213 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3214 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3215 sendmsg_unlocked, sk, &msg); 3216 if (ret <= 0) 3217 goto error; 3218 3219 offset += ret; 3220 len -= ret; 3221 } 3222 3223 /* All the data was skb head? */ 3224 if (!len) 3225 goto out; 3226 3227 /* Make offset relative to start of frags */ 3228 offset -= skb_headlen(skb); 3229 3230 /* Find where we are in frag list */ 3231 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3232 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3233 3234 if (offset < skb_frag_size(frag)) 3235 break; 3236 3237 offset -= skb_frag_size(frag); 3238 } 3239 3240 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3241 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3242 3243 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3244 3245 while (slen) { 3246 struct bio_vec bvec; 3247 struct msghdr msg = { 3248 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3249 }; 3250 3251 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3252 skb_frag_off(frag) + offset); 3253 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3254 slen); 3255 3256 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3257 sendmsg_unlocked, sk, &msg); 3258 if (ret <= 0) 3259 goto error; 3260 3261 len -= ret; 3262 offset += ret; 3263 slen -= ret; 3264 } 3265 3266 offset = 0; 3267 } 3268 3269 if (len) { 3270 /* Process any frag lists */ 3271 3272 if (skb == head) { 3273 if (skb_has_frag_list(skb)) { 3274 skb = skb_shinfo(skb)->frag_list; 3275 goto do_frag_list; 3276 } 3277 } else if (skb->next) { 3278 skb = skb->next; 3279 goto do_frag_list; 3280 } 3281 } 3282 3283 out: 3284 return orig_len - len; 3285 3286 error: 3287 return orig_len == len ? ret : orig_len - len; 3288 } 3289 3290 /* Send skb data on a socket. Socket must be locked. */ 3291 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3292 int len) 3293 { 3294 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3295 } 3296 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3297 3298 /* Send skb data on a socket. Socket must be unlocked. */ 3299 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3300 { 3301 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3302 } 3303 3304 /** 3305 * skb_store_bits - store bits from kernel buffer to skb 3306 * @skb: destination buffer 3307 * @offset: offset in destination 3308 * @from: source buffer 3309 * @len: number of bytes to copy 3310 * 3311 * Copy the specified number of bytes from the source buffer to the 3312 * destination skb. This function handles all the messy bits of 3313 * traversing fragment lists and such. 3314 */ 3315 3316 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3317 { 3318 int start = skb_headlen(skb); 3319 struct sk_buff *frag_iter; 3320 int i, copy; 3321 3322 if (offset > (int)skb->len - len) 3323 goto fault; 3324 3325 if ((copy = start - offset) > 0) { 3326 if (copy > len) 3327 copy = len; 3328 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3329 if ((len -= copy) == 0) 3330 return 0; 3331 offset += copy; 3332 from += copy; 3333 } 3334 3335 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3336 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3337 int end; 3338 3339 WARN_ON(start > offset + len); 3340 3341 end = start + skb_frag_size(frag); 3342 if ((copy = end - offset) > 0) { 3343 u32 p_off, p_len, copied; 3344 struct page *p; 3345 u8 *vaddr; 3346 3347 if (copy > len) 3348 copy = len; 3349 3350 skb_frag_foreach_page(frag, 3351 skb_frag_off(frag) + offset - start, 3352 copy, p, p_off, p_len, copied) { 3353 vaddr = kmap_atomic(p); 3354 memcpy(vaddr + p_off, from + copied, p_len); 3355 kunmap_atomic(vaddr); 3356 } 3357 3358 if ((len -= copy) == 0) 3359 return 0; 3360 offset += copy; 3361 from += copy; 3362 } 3363 start = end; 3364 } 3365 3366 skb_walk_frags(skb, frag_iter) { 3367 int end; 3368 3369 WARN_ON(start > offset + len); 3370 3371 end = start + frag_iter->len; 3372 if ((copy = end - offset) > 0) { 3373 if (copy > len) 3374 copy = len; 3375 if (skb_store_bits(frag_iter, offset - start, 3376 from, copy)) 3377 goto fault; 3378 if ((len -= copy) == 0) 3379 return 0; 3380 offset += copy; 3381 from += copy; 3382 } 3383 start = end; 3384 } 3385 if (!len) 3386 return 0; 3387 3388 fault: 3389 return -EFAULT; 3390 } 3391 EXPORT_SYMBOL(skb_store_bits); 3392 3393 /* Checksum skb data. */ 3394 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3395 __wsum csum, const struct skb_checksum_ops *ops) 3396 { 3397 int start = skb_headlen(skb); 3398 int i, copy = start - offset; 3399 struct sk_buff *frag_iter; 3400 int pos = 0; 3401 3402 /* Checksum header. */ 3403 if (copy > 0) { 3404 if (copy > len) 3405 copy = len; 3406 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3407 skb->data + offset, copy, csum); 3408 if ((len -= copy) == 0) 3409 return csum; 3410 offset += copy; 3411 pos = copy; 3412 } 3413 3414 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3415 int end; 3416 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3417 3418 WARN_ON(start > offset + len); 3419 3420 end = start + skb_frag_size(frag); 3421 if ((copy = end - offset) > 0) { 3422 u32 p_off, p_len, copied; 3423 struct page *p; 3424 __wsum csum2; 3425 u8 *vaddr; 3426 3427 if (copy > len) 3428 copy = len; 3429 3430 skb_frag_foreach_page(frag, 3431 skb_frag_off(frag) + offset - start, 3432 copy, p, p_off, p_len, copied) { 3433 vaddr = kmap_atomic(p); 3434 csum2 = INDIRECT_CALL_1(ops->update, 3435 csum_partial_ext, 3436 vaddr + p_off, p_len, 0); 3437 kunmap_atomic(vaddr); 3438 csum = INDIRECT_CALL_1(ops->combine, 3439 csum_block_add_ext, csum, 3440 csum2, pos, p_len); 3441 pos += p_len; 3442 } 3443 3444 if (!(len -= copy)) 3445 return csum; 3446 offset += copy; 3447 } 3448 start = end; 3449 } 3450 3451 skb_walk_frags(skb, frag_iter) { 3452 int end; 3453 3454 WARN_ON(start > offset + len); 3455 3456 end = start + frag_iter->len; 3457 if ((copy = end - offset) > 0) { 3458 __wsum csum2; 3459 if (copy > len) 3460 copy = len; 3461 csum2 = __skb_checksum(frag_iter, offset - start, 3462 copy, 0, ops); 3463 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3464 csum, csum2, pos, copy); 3465 if ((len -= copy) == 0) 3466 return csum; 3467 offset += copy; 3468 pos += copy; 3469 } 3470 start = end; 3471 } 3472 BUG_ON(len); 3473 3474 return csum; 3475 } 3476 EXPORT_SYMBOL(__skb_checksum); 3477 3478 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3479 int len, __wsum csum) 3480 { 3481 const struct skb_checksum_ops ops = { 3482 .update = csum_partial_ext, 3483 .combine = csum_block_add_ext, 3484 }; 3485 3486 return __skb_checksum(skb, offset, len, csum, &ops); 3487 } 3488 EXPORT_SYMBOL(skb_checksum); 3489 3490 /* Both of above in one bottle. */ 3491 3492 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3493 u8 *to, int len) 3494 { 3495 int start = skb_headlen(skb); 3496 int i, copy = start - offset; 3497 struct sk_buff *frag_iter; 3498 int pos = 0; 3499 __wsum csum = 0; 3500 3501 /* Copy header. */ 3502 if (copy > 0) { 3503 if (copy > len) 3504 copy = len; 3505 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3506 copy); 3507 if ((len -= copy) == 0) 3508 return csum; 3509 offset += copy; 3510 to += copy; 3511 pos = copy; 3512 } 3513 3514 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3515 int end; 3516 3517 WARN_ON(start > offset + len); 3518 3519 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3520 if ((copy = end - offset) > 0) { 3521 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3522 u32 p_off, p_len, copied; 3523 struct page *p; 3524 __wsum csum2; 3525 u8 *vaddr; 3526 3527 if (copy > len) 3528 copy = len; 3529 3530 skb_frag_foreach_page(frag, 3531 skb_frag_off(frag) + offset - start, 3532 copy, p, p_off, p_len, copied) { 3533 vaddr = kmap_atomic(p); 3534 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3535 to + copied, 3536 p_len); 3537 kunmap_atomic(vaddr); 3538 csum = csum_block_add(csum, csum2, pos); 3539 pos += p_len; 3540 } 3541 3542 if (!(len -= copy)) 3543 return csum; 3544 offset += copy; 3545 to += copy; 3546 } 3547 start = end; 3548 } 3549 3550 skb_walk_frags(skb, frag_iter) { 3551 __wsum csum2; 3552 int end; 3553 3554 WARN_ON(start > offset + len); 3555 3556 end = start + frag_iter->len; 3557 if ((copy = end - offset) > 0) { 3558 if (copy > len) 3559 copy = len; 3560 csum2 = skb_copy_and_csum_bits(frag_iter, 3561 offset - start, 3562 to, copy); 3563 csum = csum_block_add(csum, csum2, pos); 3564 if ((len -= copy) == 0) 3565 return csum; 3566 offset += copy; 3567 to += copy; 3568 pos += copy; 3569 } 3570 start = end; 3571 } 3572 BUG_ON(len); 3573 return csum; 3574 } 3575 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3576 3577 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3578 { 3579 __sum16 sum; 3580 3581 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3582 /* See comments in __skb_checksum_complete(). */ 3583 if (likely(!sum)) { 3584 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3585 !skb->csum_complete_sw) 3586 netdev_rx_csum_fault(skb->dev, skb); 3587 } 3588 if (!skb_shared(skb)) 3589 skb->csum_valid = !sum; 3590 return sum; 3591 } 3592 EXPORT_SYMBOL(__skb_checksum_complete_head); 3593 3594 /* This function assumes skb->csum already holds pseudo header's checksum, 3595 * which has been changed from the hardware checksum, for example, by 3596 * __skb_checksum_validate_complete(). And, the original skb->csum must 3597 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3598 * 3599 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3600 * zero. The new checksum is stored back into skb->csum unless the skb is 3601 * shared. 3602 */ 3603 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3604 { 3605 __wsum csum; 3606 __sum16 sum; 3607 3608 csum = skb_checksum(skb, 0, skb->len, 0); 3609 3610 sum = csum_fold(csum_add(skb->csum, csum)); 3611 /* This check is inverted, because we already knew the hardware 3612 * checksum is invalid before calling this function. So, if the 3613 * re-computed checksum is valid instead, then we have a mismatch 3614 * between the original skb->csum and skb_checksum(). This means either 3615 * the original hardware checksum is incorrect or we screw up skb->csum 3616 * when moving skb->data around. 3617 */ 3618 if (likely(!sum)) { 3619 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3620 !skb->csum_complete_sw) 3621 netdev_rx_csum_fault(skb->dev, skb); 3622 } 3623 3624 if (!skb_shared(skb)) { 3625 /* Save full packet checksum */ 3626 skb->csum = csum; 3627 skb->ip_summed = CHECKSUM_COMPLETE; 3628 skb->csum_complete_sw = 1; 3629 skb->csum_valid = !sum; 3630 } 3631 3632 return sum; 3633 } 3634 EXPORT_SYMBOL(__skb_checksum_complete); 3635 3636 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3637 { 3638 net_warn_ratelimited( 3639 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3640 __func__); 3641 return 0; 3642 } 3643 3644 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3645 int offset, int len) 3646 { 3647 net_warn_ratelimited( 3648 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3649 __func__); 3650 return 0; 3651 } 3652 3653 static const struct skb_checksum_ops default_crc32c_ops = { 3654 .update = warn_crc32c_csum_update, 3655 .combine = warn_crc32c_csum_combine, 3656 }; 3657 3658 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3659 &default_crc32c_ops; 3660 EXPORT_SYMBOL(crc32c_csum_stub); 3661 3662 /** 3663 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3664 * @from: source buffer 3665 * 3666 * Calculates the amount of linear headroom needed in the 'to' skb passed 3667 * into skb_zerocopy(). 3668 */ 3669 unsigned int 3670 skb_zerocopy_headlen(const struct sk_buff *from) 3671 { 3672 unsigned int hlen = 0; 3673 3674 if (!from->head_frag || 3675 skb_headlen(from) < L1_CACHE_BYTES || 3676 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3677 hlen = skb_headlen(from); 3678 if (!hlen) 3679 hlen = from->len; 3680 } 3681 3682 if (skb_has_frag_list(from)) 3683 hlen = from->len; 3684 3685 return hlen; 3686 } 3687 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3688 3689 /** 3690 * skb_zerocopy - Zero copy skb to skb 3691 * @to: destination buffer 3692 * @from: source buffer 3693 * @len: number of bytes to copy from source buffer 3694 * @hlen: size of linear headroom in destination buffer 3695 * 3696 * Copies up to `len` bytes from `from` to `to` by creating references 3697 * to the frags in the source buffer. 3698 * 3699 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3700 * headroom in the `to` buffer. 3701 * 3702 * Return value: 3703 * 0: everything is OK 3704 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3705 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3706 */ 3707 int 3708 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3709 { 3710 int i, j = 0; 3711 int plen = 0; /* length of skb->head fragment */ 3712 int ret; 3713 struct page *page; 3714 unsigned int offset; 3715 3716 BUG_ON(!from->head_frag && !hlen); 3717 3718 /* dont bother with small payloads */ 3719 if (len <= skb_tailroom(to)) 3720 return skb_copy_bits(from, 0, skb_put(to, len), len); 3721 3722 if (hlen) { 3723 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3724 if (unlikely(ret)) 3725 return ret; 3726 len -= hlen; 3727 } else { 3728 plen = min_t(int, skb_headlen(from), len); 3729 if (plen) { 3730 page = virt_to_head_page(from->head); 3731 offset = from->data - (unsigned char *)page_address(page); 3732 __skb_fill_netmem_desc(to, 0, page_to_netmem(page), 3733 offset, plen); 3734 get_page(page); 3735 j = 1; 3736 len -= plen; 3737 } 3738 } 3739 3740 skb_len_add(to, len + plen); 3741 3742 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3743 skb_tx_error(from); 3744 return -ENOMEM; 3745 } 3746 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3747 3748 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3749 int size; 3750 3751 if (!len) 3752 break; 3753 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3754 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3755 len); 3756 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3757 len -= size; 3758 skb_frag_ref(to, j); 3759 j++; 3760 } 3761 skb_shinfo(to)->nr_frags = j; 3762 3763 return 0; 3764 } 3765 EXPORT_SYMBOL_GPL(skb_zerocopy); 3766 3767 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3768 { 3769 __wsum csum; 3770 long csstart; 3771 3772 if (skb->ip_summed == CHECKSUM_PARTIAL) 3773 csstart = skb_checksum_start_offset(skb); 3774 else 3775 csstart = skb_headlen(skb); 3776 3777 BUG_ON(csstart > skb_headlen(skb)); 3778 3779 skb_copy_from_linear_data(skb, to, csstart); 3780 3781 csum = 0; 3782 if (csstart != skb->len) 3783 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3784 skb->len - csstart); 3785 3786 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3787 long csstuff = csstart + skb->csum_offset; 3788 3789 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3790 } 3791 } 3792 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3793 3794 /** 3795 * skb_dequeue - remove from the head of the queue 3796 * @list: list to dequeue from 3797 * 3798 * Remove the head of the list. The list lock is taken so the function 3799 * may be used safely with other locking list functions. The head item is 3800 * returned or %NULL if the list is empty. 3801 */ 3802 3803 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3804 { 3805 unsigned long flags; 3806 struct sk_buff *result; 3807 3808 spin_lock_irqsave(&list->lock, flags); 3809 result = __skb_dequeue(list); 3810 spin_unlock_irqrestore(&list->lock, flags); 3811 return result; 3812 } 3813 EXPORT_SYMBOL(skb_dequeue); 3814 3815 /** 3816 * skb_dequeue_tail - remove from the tail of the queue 3817 * @list: list to dequeue from 3818 * 3819 * Remove the tail of the list. The list lock is taken so the function 3820 * may be used safely with other locking list functions. The tail item is 3821 * returned or %NULL if the list is empty. 3822 */ 3823 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3824 { 3825 unsigned long flags; 3826 struct sk_buff *result; 3827 3828 spin_lock_irqsave(&list->lock, flags); 3829 result = __skb_dequeue_tail(list); 3830 spin_unlock_irqrestore(&list->lock, flags); 3831 return result; 3832 } 3833 EXPORT_SYMBOL(skb_dequeue_tail); 3834 3835 /** 3836 * skb_queue_purge_reason - empty a list 3837 * @list: list to empty 3838 * @reason: drop reason 3839 * 3840 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3841 * the list and one reference dropped. This function takes the list 3842 * lock and is atomic with respect to other list locking functions. 3843 */ 3844 void skb_queue_purge_reason(struct sk_buff_head *list, 3845 enum skb_drop_reason reason) 3846 { 3847 struct sk_buff_head tmp; 3848 unsigned long flags; 3849 3850 if (skb_queue_empty_lockless(list)) 3851 return; 3852 3853 __skb_queue_head_init(&tmp); 3854 3855 spin_lock_irqsave(&list->lock, flags); 3856 skb_queue_splice_init(list, &tmp); 3857 spin_unlock_irqrestore(&list->lock, flags); 3858 3859 __skb_queue_purge_reason(&tmp, reason); 3860 } 3861 EXPORT_SYMBOL(skb_queue_purge_reason); 3862 3863 /** 3864 * skb_rbtree_purge - empty a skb rbtree 3865 * @root: root of the rbtree to empty 3866 * Return value: the sum of truesizes of all purged skbs. 3867 * 3868 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3869 * the list and one reference dropped. This function does not take 3870 * any lock. Synchronization should be handled by the caller (e.g., TCP 3871 * out-of-order queue is protected by the socket lock). 3872 */ 3873 unsigned int skb_rbtree_purge(struct rb_root *root) 3874 { 3875 struct rb_node *p = rb_first(root); 3876 unsigned int sum = 0; 3877 3878 while (p) { 3879 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3880 3881 p = rb_next(p); 3882 rb_erase(&skb->rbnode, root); 3883 sum += skb->truesize; 3884 kfree_skb(skb); 3885 } 3886 return sum; 3887 } 3888 3889 void skb_errqueue_purge(struct sk_buff_head *list) 3890 { 3891 struct sk_buff *skb, *next; 3892 struct sk_buff_head kill; 3893 unsigned long flags; 3894 3895 __skb_queue_head_init(&kill); 3896 3897 spin_lock_irqsave(&list->lock, flags); 3898 skb_queue_walk_safe(list, skb, next) { 3899 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3900 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3901 continue; 3902 __skb_unlink(skb, list); 3903 __skb_queue_tail(&kill, skb); 3904 } 3905 spin_unlock_irqrestore(&list->lock, flags); 3906 __skb_queue_purge(&kill); 3907 } 3908 EXPORT_SYMBOL(skb_errqueue_purge); 3909 3910 /** 3911 * skb_queue_head - queue a buffer at the list head 3912 * @list: list to use 3913 * @newsk: buffer to queue 3914 * 3915 * Queue a buffer at the start of the list. This function takes the 3916 * list lock and can be used safely with other locking &sk_buff functions 3917 * safely. 3918 * 3919 * A buffer cannot be placed on two lists at the same time. 3920 */ 3921 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3922 { 3923 unsigned long flags; 3924 3925 spin_lock_irqsave(&list->lock, flags); 3926 __skb_queue_head(list, newsk); 3927 spin_unlock_irqrestore(&list->lock, flags); 3928 } 3929 EXPORT_SYMBOL(skb_queue_head); 3930 3931 /** 3932 * skb_queue_tail - queue a buffer at the list tail 3933 * @list: list to use 3934 * @newsk: buffer to queue 3935 * 3936 * Queue a buffer at the tail of the list. This function takes the 3937 * list lock and can be used safely with other locking &sk_buff functions 3938 * safely. 3939 * 3940 * A buffer cannot be placed on two lists at the same time. 3941 */ 3942 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3943 { 3944 unsigned long flags; 3945 3946 spin_lock_irqsave(&list->lock, flags); 3947 __skb_queue_tail(list, newsk); 3948 spin_unlock_irqrestore(&list->lock, flags); 3949 } 3950 EXPORT_SYMBOL(skb_queue_tail); 3951 3952 /** 3953 * skb_unlink - remove a buffer from a list 3954 * @skb: buffer to remove 3955 * @list: list to use 3956 * 3957 * Remove a packet from a list. The list locks are taken and this 3958 * function is atomic with respect to other list locked calls 3959 * 3960 * You must know what list the SKB is on. 3961 */ 3962 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3963 { 3964 unsigned long flags; 3965 3966 spin_lock_irqsave(&list->lock, flags); 3967 __skb_unlink(skb, list); 3968 spin_unlock_irqrestore(&list->lock, flags); 3969 } 3970 EXPORT_SYMBOL(skb_unlink); 3971 3972 /** 3973 * skb_append - append a buffer 3974 * @old: buffer to insert after 3975 * @newsk: buffer to insert 3976 * @list: list to use 3977 * 3978 * Place a packet after a given packet in a list. The list locks are taken 3979 * and this function is atomic with respect to other list locked calls. 3980 * A buffer cannot be placed on two lists at the same time. 3981 */ 3982 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 3983 { 3984 unsigned long flags; 3985 3986 spin_lock_irqsave(&list->lock, flags); 3987 __skb_queue_after(list, old, newsk); 3988 spin_unlock_irqrestore(&list->lock, flags); 3989 } 3990 EXPORT_SYMBOL(skb_append); 3991 3992 static inline void skb_split_inside_header(struct sk_buff *skb, 3993 struct sk_buff* skb1, 3994 const u32 len, const int pos) 3995 { 3996 int i; 3997 3998 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3999 pos - len); 4000 /* And move data appendix as is. */ 4001 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4002 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 4003 4004 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 4005 skb_shinfo(skb)->nr_frags = 0; 4006 skb1->data_len = skb->data_len; 4007 skb1->len += skb1->data_len; 4008 skb->data_len = 0; 4009 skb->len = len; 4010 skb_set_tail_pointer(skb, len); 4011 } 4012 4013 static inline void skb_split_no_header(struct sk_buff *skb, 4014 struct sk_buff* skb1, 4015 const u32 len, int pos) 4016 { 4017 int i, k = 0; 4018 const int nfrags = skb_shinfo(skb)->nr_frags; 4019 4020 skb_shinfo(skb)->nr_frags = 0; 4021 skb1->len = skb1->data_len = skb->len - len; 4022 skb->len = len; 4023 skb->data_len = len - pos; 4024 4025 for (i = 0; i < nfrags; i++) { 4026 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4027 4028 if (pos + size > len) { 4029 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 4030 4031 if (pos < len) { 4032 /* Split frag. 4033 * We have two variants in this case: 4034 * 1. Move all the frag to the second 4035 * part, if it is possible. F.e. 4036 * this approach is mandatory for TUX, 4037 * where splitting is expensive. 4038 * 2. Split is accurately. We make this. 4039 */ 4040 skb_frag_ref(skb, i); 4041 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 4042 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 4043 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 4044 skb_shinfo(skb)->nr_frags++; 4045 } 4046 k++; 4047 } else 4048 skb_shinfo(skb)->nr_frags++; 4049 pos += size; 4050 } 4051 skb_shinfo(skb1)->nr_frags = k; 4052 } 4053 4054 /** 4055 * skb_split - Split fragmented skb to two parts at length len. 4056 * @skb: the buffer to split 4057 * @skb1: the buffer to receive the second part 4058 * @len: new length for skb 4059 */ 4060 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 4061 { 4062 int pos = skb_headlen(skb); 4063 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 4064 4065 skb_zcopy_downgrade_managed(skb); 4066 4067 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 4068 skb_zerocopy_clone(skb1, skb, 0); 4069 if (len < pos) /* Split line is inside header. */ 4070 skb_split_inside_header(skb, skb1, len, pos); 4071 else /* Second chunk has no header, nothing to copy. */ 4072 skb_split_no_header(skb, skb1, len, pos); 4073 } 4074 EXPORT_SYMBOL(skb_split); 4075 4076 /* Shifting from/to a cloned skb is a no-go. 4077 * 4078 * Caller cannot keep skb_shinfo related pointers past calling here! 4079 */ 4080 static int skb_prepare_for_shift(struct sk_buff *skb) 4081 { 4082 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 4083 } 4084 4085 /** 4086 * skb_shift - Shifts paged data partially from skb to another 4087 * @tgt: buffer into which tail data gets added 4088 * @skb: buffer from which the paged data comes from 4089 * @shiftlen: shift up to this many bytes 4090 * 4091 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4092 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4093 * It's up to caller to free skb if everything was shifted. 4094 * 4095 * If @tgt runs out of frags, the whole operation is aborted. 4096 * 4097 * Skb cannot include anything else but paged data while tgt is allowed 4098 * to have non-paged data as well. 4099 * 4100 * TODO: full sized shift could be optimized but that would need 4101 * specialized skb free'er to handle frags without up-to-date nr_frags. 4102 */ 4103 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4104 { 4105 int from, to, merge, todo; 4106 skb_frag_t *fragfrom, *fragto; 4107 4108 BUG_ON(shiftlen > skb->len); 4109 4110 if (skb_headlen(skb)) 4111 return 0; 4112 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4113 return 0; 4114 4115 todo = shiftlen; 4116 from = 0; 4117 to = skb_shinfo(tgt)->nr_frags; 4118 fragfrom = &skb_shinfo(skb)->frags[from]; 4119 4120 /* Actual merge is delayed until the point when we know we can 4121 * commit all, so that we don't have to undo partial changes 4122 */ 4123 if (!to || 4124 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4125 skb_frag_off(fragfrom))) { 4126 merge = -1; 4127 } else { 4128 merge = to - 1; 4129 4130 todo -= skb_frag_size(fragfrom); 4131 if (todo < 0) { 4132 if (skb_prepare_for_shift(skb) || 4133 skb_prepare_for_shift(tgt)) 4134 return 0; 4135 4136 /* All previous frag pointers might be stale! */ 4137 fragfrom = &skb_shinfo(skb)->frags[from]; 4138 fragto = &skb_shinfo(tgt)->frags[merge]; 4139 4140 skb_frag_size_add(fragto, shiftlen); 4141 skb_frag_size_sub(fragfrom, shiftlen); 4142 skb_frag_off_add(fragfrom, shiftlen); 4143 4144 goto onlymerged; 4145 } 4146 4147 from++; 4148 } 4149 4150 /* Skip full, not-fitting skb to avoid expensive operations */ 4151 if ((shiftlen == skb->len) && 4152 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4153 return 0; 4154 4155 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4156 return 0; 4157 4158 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4159 if (to == MAX_SKB_FRAGS) 4160 return 0; 4161 4162 fragfrom = &skb_shinfo(skb)->frags[from]; 4163 fragto = &skb_shinfo(tgt)->frags[to]; 4164 4165 if (todo >= skb_frag_size(fragfrom)) { 4166 *fragto = *fragfrom; 4167 todo -= skb_frag_size(fragfrom); 4168 from++; 4169 to++; 4170 4171 } else { 4172 __skb_frag_ref(fragfrom); 4173 skb_frag_page_copy(fragto, fragfrom); 4174 skb_frag_off_copy(fragto, fragfrom); 4175 skb_frag_size_set(fragto, todo); 4176 4177 skb_frag_off_add(fragfrom, todo); 4178 skb_frag_size_sub(fragfrom, todo); 4179 todo = 0; 4180 4181 to++; 4182 break; 4183 } 4184 } 4185 4186 /* Ready to "commit" this state change to tgt */ 4187 skb_shinfo(tgt)->nr_frags = to; 4188 4189 if (merge >= 0) { 4190 fragfrom = &skb_shinfo(skb)->frags[0]; 4191 fragto = &skb_shinfo(tgt)->frags[merge]; 4192 4193 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4194 __skb_frag_unref(fragfrom, skb->pp_recycle); 4195 } 4196 4197 /* Reposition in the original skb */ 4198 to = 0; 4199 while (from < skb_shinfo(skb)->nr_frags) 4200 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4201 skb_shinfo(skb)->nr_frags = to; 4202 4203 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4204 4205 onlymerged: 4206 /* Most likely the tgt won't ever need its checksum anymore, skb on 4207 * the other hand might need it if it needs to be resent 4208 */ 4209 tgt->ip_summed = CHECKSUM_PARTIAL; 4210 skb->ip_summed = CHECKSUM_PARTIAL; 4211 4212 skb_len_add(skb, -shiftlen); 4213 skb_len_add(tgt, shiftlen); 4214 4215 return shiftlen; 4216 } 4217 4218 /** 4219 * skb_prepare_seq_read - Prepare a sequential read of skb data 4220 * @skb: the buffer to read 4221 * @from: lower offset of data to be read 4222 * @to: upper offset of data to be read 4223 * @st: state variable 4224 * 4225 * Initializes the specified state variable. Must be called before 4226 * invoking skb_seq_read() for the first time. 4227 */ 4228 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4229 unsigned int to, struct skb_seq_state *st) 4230 { 4231 st->lower_offset = from; 4232 st->upper_offset = to; 4233 st->root_skb = st->cur_skb = skb; 4234 st->frag_idx = st->stepped_offset = 0; 4235 st->frag_data = NULL; 4236 st->frag_off = 0; 4237 } 4238 EXPORT_SYMBOL(skb_prepare_seq_read); 4239 4240 /** 4241 * skb_seq_read - Sequentially read skb data 4242 * @consumed: number of bytes consumed by the caller so far 4243 * @data: destination pointer for data to be returned 4244 * @st: state variable 4245 * 4246 * Reads a block of skb data at @consumed relative to the 4247 * lower offset specified to skb_prepare_seq_read(). Assigns 4248 * the head of the data block to @data and returns the length 4249 * of the block or 0 if the end of the skb data or the upper 4250 * offset has been reached. 4251 * 4252 * The caller is not required to consume all of the data 4253 * returned, i.e. @consumed is typically set to the number 4254 * of bytes already consumed and the next call to 4255 * skb_seq_read() will return the remaining part of the block. 4256 * 4257 * Note 1: The size of each block of data returned can be arbitrary, 4258 * this limitation is the cost for zerocopy sequential 4259 * reads of potentially non linear data. 4260 * 4261 * Note 2: Fragment lists within fragments are not implemented 4262 * at the moment, state->root_skb could be replaced with 4263 * a stack for this purpose. 4264 */ 4265 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4266 struct skb_seq_state *st) 4267 { 4268 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4269 skb_frag_t *frag; 4270 4271 if (unlikely(abs_offset >= st->upper_offset)) { 4272 if (st->frag_data) { 4273 kunmap_atomic(st->frag_data); 4274 st->frag_data = NULL; 4275 } 4276 return 0; 4277 } 4278 4279 next_skb: 4280 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4281 4282 if (abs_offset < block_limit && !st->frag_data) { 4283 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4284 return block_limit - abs_offset; 4285 } 4286 4287 if (st->frag_idx == 0 && !st->frag_data) 4288 st->stepped_offset += skb_headlen(st->cur_skb); 4289 4290 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4291 unsigned int pg_idx, pg_off, pg_sz; 4292 4293 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4294 4295 pg_idx = 0; 4296 pg_off = skb_frag_off(frag); 4297 pg_sz = skb_frag_size(frag); 4298 4299 if (skb_frag_must_loop(skb_frag_page(frag))) { 4300 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4301 pg_off = offset_in_page(pg_off + st->frag_off); 4302 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4303 PAGE_SIZE - pg_off); 4304 } 4305 4306 block_limit = pg_sz + st->stepped_offset; 4307 if (abs_offset < block_limit) { 4308 if (!st->frag_data) 4309 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4310 4311 *data = (u8 *)st->frag_data + pg_off + 4312 (abs_offset - st->stepped_offset); 4313 4314 return block_limit - abs_offset; 4315 } 4316 4317 if (st->frag_data) { 4318 kunmap_atomic(st->frag_data); 4319 st->frag_data = NULL; 4320 } 4321 4322 st->stepped_offset += pg_sz; 4323 st->frag_off += pg_sz; 4324 if (st->frag_off == skb_frag_size(frag)) { 4325 st->frag_off = 0; 4326 st->frag_idx++; 4327 } 4328 } 4329 4330 if (st->frag_data) { 4331 kunmap_atomic(st->frag_data); 4332 st->frag_data = NULL; 4333 } 4334 4335 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4336 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4337 st->frag_idx = 0; 4338 goto next_skb; 4339 } else if (st->cur_skb->next) { 4340 st->cur_skb = st->cur_skb->next; 4341 st->frag_idx = 0; 4342 goto next_skb; 4343 } 4344 4345 return 0; 4346 } 4347 EXPORT_SYMBOL(skb_seq_read); 4348 4349 /** 4350 * skb_abort_seq_read - Abort a sequential read of skb data 4351 * @st: state variable 4352 * 4353 * Must be called if skb_seq_read() was not called until it 4354 * returned 0. 4355 */ 4356 void skb_abort_seq_read(struct skb_seq_state *st) 4357 { 4358 if (st->frag_data) 4359 kunmap_atomic(st->frag_data); 4360 } 4361 EXPORT_SYMBOL(skb_abort_seq_read); 4362 4363 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4364 4365 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4366 struct ts_config *conf, 4367 struct ts_state *state) 4368 { 4369 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4370 } 4371 4372 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4373 { 4374 skb_abort_seq_read(TS_SKB_CB(state)); 4375 } 4376 4377 /** 4378 * skb_find_text - Find a text pattern in skb data 4379 * @skb: the buffer to look in 4380 * @from: search offset 4381 * @to: search limit 4382 * @config: textsearch configuration 4383 * 4384 * Finds a pattern in the skb data according to the specified 4385 * textsearch configuration. Use textsearch_next() to retrieve 4386 * subsequent occurrences of the pattern. Returns the offset 4387 * to the first occurrence or UINT_MAX if no match was found. 4388 */ 4389 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4390 unsigned int to, struct ts_config *config) 4391 { 4392 unsigned int patlen = config->ops->get_pattern_len(config); 4393 struct ts_state state; 4394 unsigned int ret; 4395 4396 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4397 4398 config->get_next_block = skb_ts_get_next_block; 4399 config->finish = skb_ts_finish; 4400 4401 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4402 4403 ret = textsearch_find(config, &state); 4404 return (ret + patlen <= to - from ? ret : UINT_MAX); 4405 } 4406 EXPORT_SYMBOL(skb_find_text); 4407 4408 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4409 int offset, size_t size, size_t max_frags) 4410 { 4411 int i = skb_shinfo(skb)->nr_frags; 4412 4413 if (skb_can_coalesce(skb, i, page, offset)) { 4414 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4415 } else if (i < max_frags) { 4416 skb_zcopy_downgrade_managed(skb); 4417 get_page(page); 4418 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4419 } else { 4420 return -EMSGSIZE; 4421 } 4422 4423 return 0; 4424 } 4425 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4426 4427 /** 4428 * skb_pull_rcsum - pull skb and update receive checksum 4429 * @skb: buffer to update 4430 * @len: length of data pulled 4431 * 4432 * This function performs an skb_pull on the packet and updates 4433 * the CHECKSUM_COMPLETE checksum. It should be used on 4434 * receive path processing instead of skb_pull unless you know 4435 * that the checksum difference is zero (e.g., a valid IP header) 4436 * or you are setting ip_summed to CHECKSUM_NONE. 4437 */ 4438 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4439 { 4440 unsigned char *data = skb->data; 4441 4442 BUG_ON(len > skb->len); 4443 __skb_pull(skb, len); 4444 skb_postpull_rcsum(skb, data, len); 4445 return skb->data; 4446 } 4447 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4448 4449 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4450 { 4451 skb_frag_t head_frag; 4452 struct page *page; 4453 4454 page = virt_to_head_page(frag_skb->head); 4455 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4456 (unsigned char *)page_address(page), 4457 skb_headlen(frag_skb)); 4458 return head_frag; 4459 } 4460 4461 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4462 netdev_features_t features, 4463 unsigned int offset) 4464 { 4465 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4466 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4467 unsigned int delta_truesize = 0; 4468 unsigned int delta_len = 0; 4469 struct sk_buff *tail = NULL; 4470 struct sk_buff *nskb, *tmp; 4471 int len_diff, err; 4472 4473 skb_push(skb, -skb_network_offset(skb) + offset); 4474 4475 /* Ensure the head is writeable before touching the shared info */ 4476 err = skb_unclone(skb, GFP_ATOMIC); 4477 if (err) 4478 goto err_linearize; 4479 4480 skb_shinfo(skb)->frag_list = NULL; 4481 4482 while (list_skb) { 4483 nskb = list_skb; 4484 list_skb = list_skb->next; 4485 4486 err = 0; 4487 delta_truesize += nskb->truesize; 4488 if (skb_shared(nskb)) { 4489 tmp = skb_clone(nskb, GFP_ATOMIC); 4490 if (tmp) { 4491 consume_skb(nskb); 4492 nskb = tmp; 4493 err = skb_unclone(nskb, GFP_ATOMIC); 4494 } else { 4495 err = -ENOMEM; 4496 } 4497 } 4498 4499 if (!tail) 4500 skb->next = nskb; 4501 else 4502 tail->next = nskb; 4503 4504 if (unlikely(err)) { 4505 nskb->next = list_skb; 4506 goto err_linearize; 4507 } 4508 4509 tail = nskb; 4510 4511 delta_len += nskb->len; 4512 4513 skb_push(nskb, -skb_network_offset(nskb) + offset); 4514 4515 skb_release_head_state(nskb); 4516 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4517 __copy_skb_header(nskb, skb); 4518 4519 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4520 nskb->transport_header += len_diff; 4521 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4522 nskb->data - tnl_hlen, 4523 offset + tnl_hlen); 4524 4525 if (skb_needs_linearize(nskb, features) && 4526 __skb_linearize(nskb)) 4527 goto err_linearize; 4528 } 4529 4530 skb->truesize = skb->truesize - delta_truesize; 4531 skb->data_len = skb->data_len - delta_len; 4532 skb->len = skb->len - delta_len; 4533 4534 skb_gso_reset(skb); 4535 4536 skb->prev = tail; 4537 4538 if (skb_needs_linearize(skb, features) && 4539 __skb_linearize(skb)) 4540 goto err_linearize; 4541 4542 skb_get(skb); 4543 4544 return skb; 4545 4546 err_linearize: 4547 kfree_skb_list(skb->next); 4548 skb->next = NULL; 4549 return ERR_PTR(-ENOMEM); 4550 } 4551 EXPORT_SYMBOL_GPL(skb_segment_list); 4552 4553 /** 4554 * skb_segment - Perform protocol segmentation on skb. 4555 * @head_skb: buffer to segment 4556 * @features: features for the output path (see dev->features) 4557 * 4558 * This function performs segmentation on the given skb. It returns 4559 * a pointer to the first in a list of new skbs for the segments. 4560 * In case of error it returns ERR_PTR(err). 4561 */ 4562 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4563 netdev_features_t features) 4564 { 4565 struct sk_buff *segs = NULL; 4566 struct sk_buff *tail = NULL; 4567 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4568 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4569 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4570 unsigned int offset = doffset; 4571 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4572 unsigned int partial_segs = 0; 4573 unsigned int headroom; 4574 unsigned int len = head_skb->len; 4575 struct sk_buff *frag_skb; 4576 skb_frag_t *frag; 4577 __be16 proto; 4578 bool csum, sg; 4579 int err = -ENOMEM; 4580 int i = 0; 4581 int nfrags, pos; 4582 4583 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4584 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4585 struct sk_buff *check_skb; 4586 4587 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4588 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4589 /* gso_size is untrusted, and we have a frag_list with 4590 * a linear non head_frag item. 4591 * 4592 * If head_skb's headlen does not fit requested gso_size, 4593 * it means that the frag_list members do NOT terminate 4594 * on exact gso_size boundaries. Hence we cannot perform 4595 * skb_frag_t page sharing. Therefore we must fallback to 4596 * copying the frag_list skbs; we do so by disabling SG. 4597 */ 4598 features &= ~NETIF_F_SG; 4599 break; 4600 } 4601 } 4602 } 4603 4604 __skb_push(head_skb, doffset); 4605 proto = skb_network_protocol(head_skb, NULL); 4606 if (unlikely(!proto)) 4607 return ERR_PTR(-EINVAL); 4608 4609 sg = !!(features & NETIF_F_SG); 4610 csum = !!can_checksum_protocol(features, proto); 4611 4612 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4613 if (!(features & NETIF_F_GSO_PARTIAL)) { 4614 struct sk_buff *iter; 4615 unsigned int frag_len; 4616 4617 if (!list_skb || 4618 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4619 goto normal; 4620 4621 /* If we get here then all the required 4622 * GSO features except frag_list are supported. 4623 * Try to split the SKB to multiple GSO SKBs 4624 * with no frag_list. 4625 * Currently we can do that only when the buffers don't 4626 * have a linear part and all the buffers except 4627 * the last are of the same length. 4628 */ 4629 frag_len = list_skb->len; 4630 skb_walk_frags(head_skb, iter) { 4631 if (frag_len != iter->len && iter->next) 4632 goto normal; 4633 if (skb_headlen(iter) && !iter->head_frag) 4634 goto normal; 4635 4636 len -= iter->len; 4637 } 4638 4639 if (len != frag_len) 4640 goto normal; 4641 } 4642 4643 /* GSO partial only requires that we trim off any excess that 4644 * doesn't fit into an MSS sized block, so take care of that 4645 * now. 4646 * Cap len to not accidentally hit GSO_BY_FRAGS. 4647 */ 4648 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4649 if (partial_segs > 1) 4650 mss *= partial_segs; 4651 else 4652 partial_segs = 0; 4653 } 4654 4655 normal: 4656 headroom = skb_headroom(head_skb); 4657 pos = skb_headlen(head_skb); 4658 4659 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4660 return ERR_PTR(-ENOMEM); 4661 4662 nfrags = skb_shinfo(head_skb)->nr_frags; 4663 frag = skb_shinfo(head_skb)->frags; 4664 frag_skb = head_skb; 4665 4666 do { 4667 struct sk_buff *nskb; 4668 skb_frag_t *nskb_frag; 4669 int hsize; 4670 int size; 4671 4672 if (unlikely(mss == GSO_BY_FRAGS)) { 4673 len = list_skb->len; 4674 } else { 4675 len = head_skb->len - offset; 4676 if (len > mss) 4677 len = mss; 4678 } 4679 4680 hsize = skb_headlen(head_skb) - offset; 4681 4682 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4683 (skb_headlen(list_skb) == len || sg)) { 4684 BUG_ON(skb_headlen(list_skb) > len); 4685 4686 nskb = skb_clone(list_skb, GFP_ATOMIC); 4687 if (unlikely(!nskb)) 4688 goto err; 4689 4690 i = 0; 4691 nfrags = skb_shinfo(list_skb)->nr_frags; 4692 frag = skb_shinfo(list_skb)->frags; 4693 frag_skb = list_skb; 4694 pos += skb_headlen(list_skb); 4695 4696 while (pos < offset + len) { 4697 BUG_ON(i >= nfrags); 4698 4699 size = skb_frag_size(frag); 4700 if (pos + size > offset + len) 4701 break; 4702 4703 i++; 4704 pos += size; 4705 frag++; 4706 } 4707 4708 list_skb = list_skb->next; 4709 4710 if (unlikely(pskb_trim(nskb, len))) { 4711 kfree_skb(nskb); 4712 goto err; 4713 } 4714 4715 hsize = skb_end_offset(nskb); 4716 if (skb_cow_head(nskb, doffset + headroom)) { 4717 kfree_skb(nskb); 4718 goto err; 4719 } 4720 4721 nskb->truesize += skb_end_offset(nskb) - hsize; 4722 skb_release_head_state(nskb); 4723 __skb_push(nskb, doffset); 4724 } else { 4725 if (hsize < 0) 4726 hsize = 0; 4727 if (hsize > len || !sg) 4728 hsize = len; 4729 4730 nskb = __alloc_skb(hsize + doffset + headroom, 4731 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4732 NUMA_NO_NODE); 4733 4734 if (unlikely(!nskb)) 4735 goto err; 4736 4737 skb_reserve(nskb, headroom); 4738 __skb_put(nskb, doffset); 4739 } 4740 4741 if (segs) 4742 tail->next = nskb; 4743 else 4744 segs = nskb; 4745 tail = nskb; 4746 4747 __copy_skb_header(nskb, head_skb); 4748 4749 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4750 skb_reset_mac_len(nskb); 4751 4752 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4753 nskb->data - tnl_hlen, 4754 doffset + tnl_hlen); 4755 4756 if (nskb->len == len + doffset) 4757 goto perform_csum_check; 4758 4759 if (!sg) { 4760 if (!csum) { 4761 if (!nskb->remcsum_offload) 4762 nskb->ip_summed = CHECKSUM_NONE; 4763 SKB_GSO_CB(nskb)->csum = 4764 skb_copy_and_csum_bits(head_skb, offset, 4765 skb_put(nskb, 4766 len), 4767 len); 4768 SKB_GSO_CB(nskb)->csum_start = 4769 skb_headroom(nskb) + doffset; 4770 } else { 4771 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4772 goto err; 4773 } 4774 continue; 4775 } 4776 4777 nskb_frag = skb_shinfo(nskb)->frags; 4778 4779 skb_copy_from_linear_data_offset(head_skb, offset, 4780 skb_put(nskb, hsize), hsize); 4781 4782 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4783 SKBFL_SHARED_FRAG; 4784 4785 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4786 goto err; 4787 4788 while (pos < offset + len) { 4789 if (i >= nfrags) { 4790 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4791 skb_zerocopy_clone(nskb, list_skb, 4792 GFP_ATOMIC)) 4793 goto err; 4794 4795 i = 0; 4796 nfrags = skb_shinfo(list_skb)->nr_frags; 4797 frag = skb_shinfo(list_skb)->frags; 4798 frag_skb = list_skb; 4799 if (!skb_headlen(list_skb)) { 4800 BUG_ON(!nfrags); 4801 } else { 4802 BUG_ON(!list_skb->head_frag); 4803 4804 /* to make room for head_frag. */ 4805 i--; 4806 frag--; 4807 } 4808 4809 list_skb = list_skb->next; 4810 } 4811 4812 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4813 MAX_SKB_FRAGS)) { 4814 net_warn_ratelimited( 4815 "skb_segment: too many frags: %u %u\n", 4816 pos, mss); 4817 err = -EINVAL; 4818 goto err; 4819 } 4820 4821 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4822 __skb_frag_ref(nskb_frag); 4823 size = skb_frag_size(nskb_frag); 4824 4825 if (pos < offset) { 4826 skb_frag_off_add(nskb_frag, offset - pos); 4827 skb_frag_size_sub(nskb_frag, offset - pos); 4828 } 4829 4830 skb_shinfo(nskb)->nr_frags++; 4831 4832 if (pos + size <= offset + len) { 4833 i++; 4834 frag++; 4835 pos += size; 4836 } else { 4837 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4838 goto skip_fraglist; 4839 } 4840 4841 nskb_frag++; 4842 } 4843 4844 skip_fraglist: 4845 nskb->data_len = len - hsize; 4846 nskb->len += nskb->data_len; 4847 nskb->truesize += nskb->data_len; 4848 4849 perform_csum_check: 4850 if (!csum) { 4851 if (skb_has_shared_frag(nskb) && 4852 __skb_linearize(nskb)) 4853 goto err; 4854 4855 if (!nskb->remcsum_offload) 4856 nskb->ip_summed = CHECKSUM_NONE; 4857 SKB_GSO_CB(nskb)->csum = 4858 skb_checksum(nskb, doffset, 4859 nskb->len - doffset, 0); 4860 SKB_GSO_CB(nskb)->csum_start = 4861 skb_headroom(nskb) + doffset; 4862 } 4863 } while ((offset += len) < head_skb->len); 4864 4865 /* Some callers want to get the end of the list. 4866 * Put it in segs->prev to avoid walking the list. 4867 * (see validate_xmit_skb_list() for example) 4868 */ 4869 segs->prev = tail; 4870 4871 if (partial_segs) { 4872 struct sk_buff *iter; 4873 int type = skb_shinfo(head_skb)->gso_type; 4874 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4875 4876 /* Update type to add partial and then remove dodgy if set */ 4877 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4878 type &= ~SKB_GSO_DODGY; 4879 4880 /* Update GSO info and prepare to start updating headers on 4881 * our way back down the stack of protocols. 4882 */ 4883 for (iter = segs; iter; iter = iter->next) { 4884 skb_shinfo(iter)->gso_size = gso_size; 4885 skb_shinfo(iter)->gso_segs = partial_segs; 4886 skb_shinfo(iter)->gso_type = type; 4887 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4888 } 4889 4890 if (tail->len - doffset <= gso_size) 4891 skb_shinfo(tail)->gso_size = 0; 4892 else if (tail != segs) 4893 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4894 } 4895 4896 /* Following permits correct backpressure, for protocols 4897 * using skb_set_owner_w(). 4898 * Idea is to tranfert ownership from head_skb to last segment. 4899 */ 4900 if (head_skb->destructor == sock_wfree) { 4901 swap(tail->truesize, head_skb->truesize); 4902 swap(tail->destructor, head_skb->destructor); 4903 swap(tail->sk, head_skb->sk); 4904 } 4905 return segs; 4906 4907 err: 4908 kfree_skb_list(segs); 4909 return ERR_PTR(err); 4910 } 4911 EXPORT_SYMBOL_GPL(skb_segment); 4912 4913 #ifdef CONFIG_SKB_EXTENSIONS 4914 #define SKB_EXT_ALIGN_VALUE 8 4915 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4916 4917 static const u8 skb_ext_type_len[] = { 4918 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4919 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4920 #endif 4921 #ifdef CONFIG_XFRM 4922 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4923 #endif 4924 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4925 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4926 #endif 4927 #if IS_ENABLED(CONFIG_MPTCP) 4928 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4929 #endif 4930 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 4931 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 4932 #endif 4933 }; 4934 4935 static __always_inline unsigned int skb_ext_total_length(void) 4936 { 4937 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 4938 int i; 4939 4940 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 4941 l += skb_ext_type_len[i]; 4942 4943 return l; 4944 } 4945 4946 static void skb_extensions_init(void) 4947 { 4948 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4949 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 4950 BUILD_BUG_ON(skb_ext_total_length() > 255); 4951 #endif 4952 4953 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4954 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4955 0, 4956 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4957 NULL); 4958 } 4959 #else 4960 static void skb_extensions_init(void) {} 4961 #endif 4962 4963 /* The SKB kmem_cache slab is critical for network performance. Never 4964 * merge/alias the slab with similar sized objects. This avoids fragmentation 4965 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 4966 */ 4967 #ifndef CONFIG_SLUB_TINY 4968 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 4969 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 4970 #define FLAG_SKB_NO_MERGE 0 4971 #endif 4972 4973 void __init skb_init(void) 4974 { 4975 net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 4976 sizeof(struct sk_buff), 4977 0, 4978 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 4979 FLAG_SKB_NO_MERGE, 4980 offsetof(struct sk_buff, cb), 4981 sizeof_field(struct sk_buff, cb), 4982 NULL); 4983 net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4984 sizeof(struct sk_buff_fclones), 4985 0, 4986 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4987 NULL); 4988 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 4989 * struct skb_shared_info is located at the end of skb->head, 4990 * and should not be copied to/from user. 4991 */ 4992 net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 4993 SKB_SMALL_HEAD_CACHE_SIZE, 4994 0, 4995 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 4996 0, 4997 SKB_SMALL_HEAD_HEADROOM, 4998 NULL); 4999 skb_extensions_init(); 5000 } 5001 5002 static int 5003 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 5004 unsigned int recursion_level) 5005 { 5006 int start = skb_headlen(skb); 5007 int i, copy = start - offset; 5008 struct sk_buff *frag_iter; 5009 int elt = 0; 5010 5011 if (unlikely(recursion_level >= 24)) 5012 return -EMSGSIZE; 5013 5014 if (copy > 0) { 5015 if (copy > len) 5016 copy = len; 5017 sg_set_buf(sg, skb->data + offset, copy); 5018 elt++; 5019 if ((len -= copy) == 0) 5020 return elt; 5021 offset += copy; 5022 } 5023 5024 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5025 int end; 5026 5027 WARN_ON(start > offset + len); 5028 5029 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 5030 if ((copy = end - offset) > 0) { 5031 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5032 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5033 return -EMSGSIZE; 5034 5035 if (copy > len) 5036 copy = len; 5037 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 5038 skb_frag_off(frag) + offset - start); 5039 elt++; 5040 if (!(len -= copy)) 5041 return elt; 5042 offset += copy; 5043 } 5044 start = end; 5045 } 5046 5047 skb_walk_frags(skb, frag_iter) { 5048 int end, ret; 5049 5050 WARN_ON(start > offset + len); 5051 5052 end = start + frag_iter->len; 5053 if ((copy = end - offset) > 0) { 5054 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5055 return -EMSGSIZE; 5056 5057 if (copy > len) 5058 copy = len; 5059 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 5060 copy, recursion_level + 1); 5061 if (unlikely(ret < 0)) 5062 return ret; 5063 elt += ret; 5064 if ((len -= copy) == 0) 5065 return elt; 5066 offset += copy; 5067 } 5068 start = end; 5069 } 5070 BUG_ON(len); 5071 return elt; 5072 } 5073 5074 /** 5075 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 5076 * @skb: Socket buffer containing the buffers to be mapped 5077 * @sg: The scatter-gather list to map into 5078 * @offset: The offset into the buffer's contents to start mapping 5079 * @len: Length of buffer space to be mapped 5080 * 5081 * Fill the specified scatter-gather list with mappings/pointers into a 5082 * region of the buffer space attached to a socket buffer. Returns either 5083 * the number of scatterlist items used, or -EMSGSIZE if the contents 5084 * could not fit. 5085 */ 5086 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5087 { 5088 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5089 5090 if (nsg <= 0) 5091 return nsg; 5092 5093 sg_mark_end(&sg[nsg - 1]); 5094 5095 return nsg; 5096 } 5097 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5098 5099 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5100 * sglist without mark the sg which contain last skb data as the end. 5101 * So the caller can mannipulate sg list as will when padding new data after 5102 * the first call without calling sg_unmark_end to expend sg list. 5103 * 5104 * Scenario to use skb_to_sgvec_nomark: 5105 * 1. sg_init_table 5106 * 2. skb_to_sgvec_nomark(payload1) 5107 * 3. skb_to_sgvec_nomark(payload2) 5108 * 5109 * This is equivalent to: 5110 * 1. sg_init_table 5111 * 2. skb_to_sgvec(payload1) 5112 * 3. sg_unmark_end 5113 * 4. skb_to_sgvec(payload2) 5114 * 5115 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 5116 * is more preferable. 5117 */ 5118 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5119 int offset, int len) 5120 { 5121 return __skb_to_sgvec(skb, sg, offset, len, 0); 5122 } 5123 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5124 5125 5126 5127 /** 5128 * skb_cow_data - Check that a socket buffer's data buffers are writable 5129 * @skb: The socket buffer to check. 5130 * @tailbits: Amount of trailing space to be added 5131 * @trailer: Returned pointer to the skb where the @tailbits space begins 5132 * 5133 * Make sure that the data buffers attached to a socket buffer are 5134 * writable. If they are not, private copies are made of the data buffers 5135 * and the socket buffer is set to use these instead. 5136 * 5137 * If @tailbits is given, make sure that there is space to write @tailbits 5138 * bytes of data beyond current end of socket buffer. @trailer will be 5139 * set to point to the skb in which this space begins. 5140 * 5141 * The number of scatterlist elements required to completely map the 5142 * COW'd and extended socket buffer will be returned. 5143 */ 5144 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5145 { 5146 int copyflag; 5147 int elt; 5148 struct sk_buff *skb1, **skb_p; 5149 5150 /* If skb is cloned or its head is paged, reallocate 5151 * head pulling out all the pages (pages are considered not writable 5152 * at the moment even if they are anonymous). 5153 */ 5154 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5155 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5156 return -ENOMEM; 5157 5158 /* Easy case. Most of packets will go this way. */ 5159 if (!skb_has_frag_list(skb)) { 5160 /* A little of trouble, not enough of space for trailer. 5161 * This should not happen, when stack is tuned to generate 5162 * good frames. OK, on miss we reallocate and reserve even more 5163 * space, 128 bytes is fair. */ 5164 5165 if (skb_tailroom(skb) < tailbits && 5166 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5167 return -ENOMEM; 5168 5169 /* Voila! */ 5170 *trailer = skb; 5171 return 1; 5172 } 5173 5174 /* Misery. We are in troubles, going to mincer fragments... */ 5175 5176 elt = 1; 5177 skb_p = &skb_shinfo(skb)->frag_list; 5178 copyflag = 0; 5179 5180 while ((skb1 = *skb_p) != NULL) { 5181 int ntail = 0; 5182 5183 /* The fragment is partially pulled by someone, 5184 * this can happen on input. Copy it and everything 5185 * after it. */ 5186 5187 if (skb_shared(skb1)) 5188 copyflag = 1; 5189 5190 /* If the skb is the last, worry about trailer. */ 5191 5192 if (skb1->next == NULL && tailbits) { 5193 if (skb_shinfo(skb1)->nr_frags || 5194 skb_has_frag_list(skb1) || 5195 skb_tailroom(skb1) < tailbits) 5196 ntail = tailbits + 128; 5197 } 5198 5199 if (copyflag || 5200 skb_cloned(skb1) || 5201 ntail || 5202 skb_shinfo(skb1)->nr_frags || 5203 skb_has_frag_list(skb1)) { 5204 struct sk_buff *skb2; 5205 5206 /* Fuck, we are miserable poor guys... */ 5207 if (ntail == 0) 5208 skb2 = skb_copy(skb1, GFP_ATOMIC); 5209 else 5210 skb2 = skb_copy_expand(skb1, 5211 skb_headroom(skb1), 5212 ntail, 5213 GFP_ATOMIC); 5214 if (unlikely(skb2 == NULL)) 5215 return -ENOMEM; 5216 5217 if (skb1->sk) 5218 skb_set_owner_w(skb2, skb1->sk); 5219 5220 /* Looking around. Are we still alive? 5221 * OK, link new skb, drop old one */ 5222 5223 skb2->next = skb1->next; 5224 *skb_p = skb2; 5225 kfree_skb(skb1); 5226 skb1 = skb2; 5227 } 5228 elt++; 5229 *trailer = skb1; 5230 skb_p = &skb1->next; 5231 } 5232 5233 return elt; 5234 } 5235 EXPORT_SYMBOL_GPL(skb_cow_data); 5236 5237 static void sock_rmem_free(struct sk_buff *skb) 5238 { 5239 struct sock *sk = skb->sk; 5240 5241 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5242 } 5243 5244 static void skb_set_err_queue(struct sk_buff *skb) 5245 { 5246 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5247 * So, it is safe to (mis)use it to mark skbs on the error queue. 5248 */ 5249 skb->pkt_type = PACKET_OUTGOING; 5250 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5251 } 5252 5253 /* 5254 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5255 */ 5256 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5257 { 5258 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5259 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5260 return -ENOMEM; 5261 5262 skb_orphan(skb); 5263 skb->sk = sk; 5264 skb->destructor = sock_rmem_free; 5265 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5266 skb_set_err_queue(skb); 5267 5268 /* before exiting rcu section, make sure dst is refcounted */ 5269 skb_dst_force(skb); 5270 5271 skb_queue_tail(&sk->sk_error_queue, skb); 5272 if (!sock_flag(sk, SOCK_DEAD)) 5273 sk_error_report(sk); 5274 return 0; 5275 } 5276 EXPORT_SYMBOL(sock_queue_err_skb); 5277 5278 static bool is_icmp_err_skb(const struct sk_buff *skb) 5279 { 5280 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5281 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5282 } 5283 5284 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5285 { 5286 struct sk_buff_head *q = &sk->sk_error_queue; 5287 struct sk_buff *skb, *skb_next = NULL; 5288 bool icmp_next = false; 5289 unsigned long flags; 5290 5291 if (skb_queue_empty_lockless(q)) 5292 return NULL; 5293 5294 spin_lock_irqsave(&q->lock, flags); 5295 skb = __skb_dequeue(q); 5296 if (skb && (skb_next = skb_peek(q))) { 5297 icmp_next = is_icmp_err_skb(skb_next); 5298 if (icmp_next) 5299 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5300 } 5301 spin_unlock_irqrestore(&q->lock, flags); 5302 5303 if (is_icmp_err_skb(skb) && !icmp_next) 5304 sk->sk_err = 0; 5305 5306 if (skb_next) 5307 sk_error_report(sk); 5308 5309 return skb; 5310 } 5311 EXPORT_SYMBOL(sock_dequeue_err_skb); 5312 5313 /** 5314 * skb_clone_sk - create clone of skb, and take reference to socket 5315 * @skb: the skb to clone 5316 * 5317 * This function creates a clone of a buffer that holds a reference on 5318 * sk_refcnt. Buffers created via this function are meant to be 5319 * returned using sock_queue_err_skb, or free via kfree_skb. 5320 * 5321 * When passing buffers allocated with this function to sock_queue_err_skb 5322 * it is necessary to wrap the call with sock_hold/sock_put in order to 5323 * prevent the socket from being released prior to being enqueued on 5324 * the sk_error_queue. 5325 */ 5326 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5327 { 5328 struct sock *sk = skb->sk; 5329 struct sk_buff *clone; 5330 5331 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5332 return NULL; 5333 5334 clone = skb_clone(skb, GFP_ATOMIC); 5335 if (!clone) { 5336 sock_put(sk); 5337 return NULL; 5338 } 5339 5340 clone->sk = sk; 5341 clone->destructor = sock_efree; 5342 5343 return clone; 5344 } 5345 EXPORT_SYMBOL(skb_clone_sk); 5346 5347 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5348 struct sock *sk, 5349 int tstype, 5350 bool opt_stats) 5351 { 5352 struct sock_exterr_skb *serr; 5353 int err; 5354 5355 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5356 5357 serr = SKB_EXT_ERR(skb); 5358 memset(serr, 0, sizeof(*serr)); 5359 serr->ee.ee_errno = ENOMSG; 5360 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5361 serr->ee.ee_info = tstype; 5362 serr->opt_stats = opt_stats; 5363 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5364 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5365 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5366 if (sk_is_tcp(sk)) 5367 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5368 } 5369 5370 err = sock_queue_err_skb(sk, skb); 5371 5372 if (err) 5373 kfree_skb(skb); 5374 } 5375 5376 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5377 { 5378 bool ret; 5379 5380 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) 5381 return true; 5382 5383 read_lock_bh(&sk->sk_callback_lock); 5384 ret = sk->sk_socket && sk->sk_socket->file && 5385 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5386 read_unlock_bh(&sk->sk_callback_lock); 5387 return ret; 5388 } 5389 5390 void skb_complete_tx_timestamp(struct sk_buff *skb, 5391 struct skb_shared_hwtstamps *hwtstamps) 5392 { 5393 struct sock *sk = skb->sk; 5394 5395 if (!skb_may_tx_timestamp(sk, false)) 5396 goto err; 5397 5398 /* Take a reference to prevent skb_orphan() from freeing the socket, 5399 * but only if the socket refcount is not zero. 5400 */ 5401 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5402 *skb_hwtstamps(skb) = *hwtstamps; 5403 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5404 sock_put(sk); 5405 return; 5406 } 5407 5408 err: 5409 kfree_skb(skb); 5410 } 5411 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5412 5413 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5414 const struct sk_buff *ack_skb, 5415 struct skb_shared_hwtstamps *hwtstamps, 5416 struct sock *sk, int tstype) 5417 { 5418 struct sk_buff *skb; 5419 bool tsonly, opt_stats = false; 5420 u32 tsflags; 5421 5422 if (!sk) 5423 return; 5424 5425 tsflags = READ_ONCE(sk->sk_tsflags); 5426 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5427 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5428 return; 5429 5430 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5431 if (!skb_may_tx_timestamp(sk, tsonly)) 5432 return; 5433 5434 if (tsonly) { 5435 #ifdef CONFIG_INET 5436 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5437 sk_is_tcp(sk)) { 5438 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5439 ack_skb); 5440 opt_stats = true; 5441 } else 5442 #endif 5443 skb = alloc_skb(0, GFP_ATOMIC); 5444 } else { 5445 skb = skb_clone(orig_skb, GFP_ATOMIC); 5446 5447 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5448 kfree_skb(skb); 5449 return; 5450 } 5451 } 5452 if (!skb) 5453 return; 5454 5455 if (tsonly) { 5456 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5457 SKBTX_ANY_TSTAMP; 5458 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5459 } 5460 5461 if (hwtstamps) 5462 *skb_hwtstamps(skb) = *hwtstamps; 5463 else 5464 __net_timestamp(skb); 5465 5466 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5467 } 5468 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5469 5470 void skb_tstamp_tx(struct sk_buff *orig_skb, 5471 struct skb_shared_hwtstamps *hwtstamps) 5472 { 5473 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5474 SCM_TSTAMP_SND); 5475 } 5476 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5477 5478 #ifdef CONFIG_WIRELESS 5479 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5480 { 5481 struct sock *sk = skb->sk; 5482 struct sock_exterr_skb *serr; 5483 int err = 1; 5484 5485 skb->wifi_acked_valid = 1; 5486 skb->wifi_acked = acked; 5487 5488 serr = SKB_EXT_ERR(skb); 5489 memset(serr, 0, sizeof(*serr)); 5490 serr->ee.ee_errno = ENOMSG; 5491 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5492 5493 /* Take a reference to prevent skb_orphan() from freeing the socket, 5494 * but only if the socket refcount is not zero. 5495 */ 5496 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5497 err = sock_queue_err_skb(sk, skb); 5498 sock_put(sk); 5499 } 5500 if (err) 5501 kfree_skb(skb); 5502 } 5503 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5504 #endif /* CONFIG_WIRELESS */ 5505 5506 /** 5507 * skb_partial_csum_set - set up and verify partial csum values for packet 5508 * @skb: the skb to set 5509 * @start: the number of bytes after skb->data to start checksumming. 5510 * @off: the offset from start to place the checksum. 5511 * 5512 * For untrusted partially-checksummed packets, we need to make sure the values 5513 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5514 * 5515 * This function checks and sets those values and skb->ip_summed: if this 5516 * returns false you should drop the packet. 5517 */ 5518 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5519 { 5520 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5521 u32 csum_start = skb_headroom(skb) + (u32)start; 5522 5523 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5524 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5525 start, off, skb_headroom(skb), skb_headlen(skb)); 5526 return false; 5527 } 5528 skb->ip_summed = CHECKSUM_PARTIAL; 5529 skb->csum_start = csum_start; 5530 skb->csum_offset = off; 5531 skb->transport_header = csum_start; 5532 return true; 5533 } 5534 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5535 5536 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5537 unsigned int max) 5538 { 5539 if (skb_headlen(skb) >= len) 5540 return 0; 5541 5542 /* If we need to pullup then pullup to the max, so we 5543 * won't need to do it again. 5544 */ 5545 if (max > skb->len) 5546 max = skb->len; 5547 5548 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5549 return -ENOMEM; 5550 5551 if (skb_headlen(skb) < len) 5552 return -EPROTO; 5553 5554 return 0; 5555 } 5556 5557 #define MAX_TCP_HDR_LEN (15 * 4) 5558 5559 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5560 typeof(IPPROTO_IP) proto, 5561 unsigned int off) 5562 { 5563 int err; 5564 5565 switch (proto) { 5566 case IPPROTO_TCP: 5567 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5568 off + MAX_TCP_HDR_LEN); 5569 if (!err && !skb_partial_csum_set(skb, off, 5570 offsetof(struct tcphdr, 5571 check))) 5572 err = -EPROTO; 5573 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5574 5575 case IPPROTO_UDP: 5576 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5577 off + sizeof(struct udphdr)); 5578 if (!err && !skb_partial_csum_set(skb, off, 5579 offsetof(struct udphdr, 5580 check))) 5581 err = -EPROTO; 5582 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5583 } 5584 5585 return ERR_PTR(-EPROTO); 5586 } 5587 5588 /* This value should be large enough to cover a tagged ethernet header plus 5589 * maximally sized IP and TCP or UDP headers. 5590 */ 5591 #define MAX_IP_HDR_LEN 128 5592 5593 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5594 { 5595 unsigned int off; 5596 bool fragment; 5597 __sum16 *csum; 5598 int err; 5599 5600 fragment = false; 5601 5602 err = skb_maybe_pull_tail(skb, 5603 sizeof(struct iphdr), 5604 MAX_IP_HDR_LEN); 5605 if (err < 0) 5606 goto out; 5607 5608 if (ip_is_fragment(ip_hdr(skb))) 5609 fragment = true; 5610 5611 off = ip_hdrlen(skb); 5612 5613 err = -EPROTO; 5614 5615 if (fragment) 5616 goto out; 5617 5618 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5619 if (IS_ERR(csum)) 5620 return PTR_ERR(csum); 5621 5622 if (recalculate) 5623 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5624 ip_hdr(skb)->daddr, 5625 skb->len - off, 5626 ip_hdr(skb)->protocol, 0); 5627 err = 0; 5628 5629 out: 5630 return err; 5631 } 5632 5633 /* This value should be large enough to cover a tagged ethernet header plus 5634 * an IPv6 header, all options, and a maximal TCP or UDP header. 5635 */ 5636 #define MAX_IPV6_HDR_LEN 256 5637 5638 #define OPT_HDR(type, skb, off) \ 5639 (type *)(skb_network_header(skb) + (off)) 5640 5641 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5642 { 5643 int err; 5644 u8 nexthdr; 5645 unsigned int off; 5646 unsigned int len; 5647 bool fragment; 5648 bool done; 5649 __sum16 *csum; 5650 5651 fragment = false; 5652 done = false; 5653 5654 off = sizeof(struct ipv6hdr); 5655 5656 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5657 if (err < 0) 5658 goto out; 5659 5660 nexthdr = ipv6_hdr(skb)->nexthdr; 5661 5662 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5663 while (off <= len && !done) { 5664 switch (nexthdr) { 5665 case IPPROTO_DSTOPTS: 5666 case IPPROTO_HOPOPTS: 5667 case IPPROTO_ROUTING: { 5668 struct ipv6_opt_hdr *hp; 5669 5670 err = skb_maybe_pull_tail(skb, 5671 off + 5672 sizeof(struct ipv6_opt_hdr), 5673 MAX_IPV6_HDR_LEN); 5674 if (err < 0) 5675 goto out; 5676 5677 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5678 nexthdr = hp->nexthdr; 5679 off += ipv6_optlen(hp); 5680 break; 5681 } 5682 case IPPROTO_AH: { 5683 struct ip_auth_hdr *hp; 5684 5685 err = skb_maybe_pull_tail(skb, 5686 off + 5687 sizeof(struct ip_auth_hdr), 5688 MAX_IPV6_HDR_LEN); 5689 if (err < 0) 5690 goto out; 5691 5692 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5693 nexthdr = hp->nexthdr; 5694 off += ipv6_authlen(hp); 5695 break; 5696 } 5697 case IPPROTO_FRAGMENT: { 5698 struct frag_hdr *hp; 5699 5700 err = skb_maybe_pull_tail(skb, 5701 off + 5702 sizeof(struct frag_hdr), 5703 MAX_IPV6_HDR_LEN); 5704 if (err < 0) 5705 goto out; 5706 5707 hp = OPT_HDR(struct frag_hdr, skb, off); 5708 5709 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5710 fragment = true; 5711 5712 nexthdr = hp->nexthdr; 5713 off += sizeof(struct frag_hdr); 5714 break; 5715 } 5716 default: 5717 done = true; 5718 break; 5719 } 5720 } 5721 5722 err = -EPROTO; 5723 5724 if (!done || fragment) 5725 goto out; 5726 5727 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5728 if (IS_ERR(csum)) 5729 return PTR_ERR(csum); 5730 5731 if (recalculate) 5732 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5733 &ipv6_hdr(skb)->daddr, 5734 skb->len - off, nexthdr, 0); 5735 err = 0; 5736 5737 out: 5738 return err; 5739 } 5740 5741 /** 5742 * skb_checksum_setup - set up partial checksum offset 5743 * @skb: the skb to set up 5744 * @recalculate: if true the pseudo-header checksum will be recalculated 5745 */ 5746 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5747 { 5748 int err; 5749 5750 switch (skb->protocol) { 5751 case htons(ETH_P_IP): 5752 err = skb_checksum_setup_ipv4(skb, recalculate); 5753 break; 5754 5755 case htons(ETH_P_IPV6): 5756 err = skb_checksum_setup_ipv6(skb, recalculate); 5757 break; 5758 5759 default: 5760 err = -EPROTO; 5761 break; 5762 } 5763 5764 return err; 5765 } 5766 EXPORT_SYMBOL(skb_checksum_setup); 5767 5768 /** 5769 * skb_checksum_maybe_trim - maybe trims the given skb 5770 * @skb: the skb to check 5771 * @transport_len: the data length beyond the network header 5772 * 5773 * Checks whether the given skb has data beyond the given transport length. 5774 * If so, returns a cloned skb trimmed to this transport length. 5775 * Otherwise returns the provided skb. Returns NULL in error cases 5776 * (e.g. transport_len exceeds skb length or out-of-memory). 5777 * 5778 * Caller needs to set the skb transport header and free any returned skb if it 5779 * differs from the provided skb. 5780 */ 5781 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5782 unsigned int transport_len) 5783 { 5784 struct sk_buff *skb_chk; 5785 unsigned int len = skb_transport_offset(skb) + transport_len; 5786 int ret; 5787 5788 if (skb->len < len) 5789 return NULL; 5790 else if (skb->len == len) 5791 return skb; 5792 5793 skb_chk = skb_clone(skb, GFP_ATOMIC); 5794 if (!skb_chk) 5795 return NULL; 5796 5797 ret = pskb_trim_rcsum(skb_chk, len); 5798 if (ret) { 5799 kfree_skb(skb_chk); 5800 return NULL; 5801 } 5802 5803 return skb_chk; 5804 } 5805 5806 /** 5807 * skb_checksum_trimmed - validate checksum of an skb 5808 * @skb: the skb to check 5809 * @transport_len: the data length beyond the network header 5810 * @skb_chkf: checksum function to use 5811 * 5812 * Applies the given checksum function skb_chkf to the provided skb. 5813 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5814 * 5815 * If the skb has data beyond the given transport length, then a 5816 * trimmed & cloned skb is checked and returned. 5817 * 5818 * Caller needs to set the skb transport header and free any returned skb if it 5819 * differs from the provided skb. 5820 */ 5821 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5822 unsigned int transport_len, 5823 __sum16(*skb_chkf)(struct sk_buff *skb)) 5824 { 5825 struct sk_buff *skb_chk; 5826 unsigned int offset = skb_transport_offset(skb); 5827 __sum16 ret; 5828 5829 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5830 if (!skb_chk) 5831 goto err; 5832 5833 if (!pskb_may_pull(skb_chk, offset)) 5834 goto err; 5835 5836 skb_pull_rcsum(skb_chk, offset); 5837 ret = skb_chkf(skb_chk); 5838 skb_push_rcsum(skb_chk, offset); 5839 5840 if (ret) 5841 goto err; 5842 5843 return skb_chk; 5844 5845 err: 5846 if (skb_chk && skb_chk != skb) 5847 kfree_skb(skb_chk); 5848 5849 return NULL; 5850 5851 } 5852 EXPORT_SYMBOL(skb_checksum_trimmed); 5853 5854 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5855 { 5856 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5857 skb->dev->name); 5858 } 5859 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5860 5861 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5862 { 5863 if (head_stolen) { 5864 skb_release_head_state(skb); 5865 kmem_cache_free(net_hotdata.skbuff_cache, skb); 5866 } else { 5867 __kfree_skb(skb); 5868 } 5869 } 5870 EXPORT_SYMBOL(kfree_skb_partial); 5871 5872 /** 5873 * skb_try_coalesce - try to merge skb to prior one 5874 * @to: prior buffer 5875 * @from: buffer to add 5876 * @fragstolen: pointer to boolean 5877 * @delta_truesize: how much more was allocated than was requested 5878 */ 5879 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5880 bool *fragstolen, int *delta_truesize) 5881 { 5882 struct skb_shared_info *to_shinfo, *from_shinfo; 5883 int i, delta, len = from->len; 5884 5885 *fragstolen = false; 5886 5887 if (skb_cloned(to)) 5888 return false; 5889 5890 /* In general, avoid mixing page_pool and non-page_pool allocated 5891 * pages within the same SKB. In theory we could take full 5892 * references if @from is cloned and !@to->pp_recycle but its 5893 * tricky (due to potential race with the clone disappearing) and 5894 * rare, so not worth dealing with. 5895 */ 5896 if (to->pp_recycle != from->pp_recycle) 5897 return false; 5898 5899 if (len <= skb_tailroom(to)) { 5900 if (len) 5901 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5902 *delta_truesize = 0; 5903 return true; 5904 } 5905 5906 to_shinfo = skb_shinfo(to); 5907 from_shinfo = skb_shinfo(from); 5908 if (to_shinfo->frag_list || from_shinfo->frag_list) 5909 return false; 5910 if (skb_zcopy(to) || skb_zcopy(from)) 5911 return false; 5912 5913 if (skb_headlen(from) != 0) { 5914 struct page *page; 5915 unsigned int offset; 5916 5917 if (to_shinfo->nr_frags + 5918 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5919 return false; 5920 5921 if (skb_head_is_locked(from)) 5922 return false; 5923 5924 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5925 5926 page = virt_to_head_page(from->head); 5927 offset = from->data - (unsigned char *)page_address(page); 5928 5929 skb_fill_page_desc(to, to_shinfo->nr_frags, 5930 page, offset, skb_headlen(from)); 5931 *fragstolen = true; 5932 } else { 5933 if (to_shinfo->nr_frags + 5934 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5935 return false; 5936 5937 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5938 } 5939 5940 WARN_ON_ONCE(delta < len); 5941 5942 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5943 from_shinfo->frags, 5944 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5945 to_shinfo->nr_frags += from_shinfo->nr_frags; 5946 5947 if (!skb_cloned(from)) 5948 from_shinfo->nr_frags = 0; 5949 5950 /* if the skb is not cloned this does nothing 5951 * since we set nr_frags to 0. 5952 */ 5953 if (skb_pp_frag_ref(from)) { 5954 for (i = 0; i < from_shinfo->nr_frags; i++) 5955 __skb_frag_ref(&from_shinfo->frags[i]); 5956 } 5957 5958 to->truesize += delta; 5959 to->len += len; 5960 to->data_len += len; 5961 5962 *delta_truesize = delta; 5963 return true; 5964 } 5965 EXPORT_SYMBOL(skb_try_coalesce); 5966 5967 /** 5968 * skb_scrub_packet - scrub an skb 5969 * 5970 * @skb: buffer to clean 5971 * @xnet: packet is crossing netns 5972 * 5973 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 5974 * into/from a tunnel. Some information have to be cleared during these 5975 * operations. 5976 * skb_scrub_packet can also be used to clean a skb before injecting it in 5977 * another namespace (@xnet == true). We have to clear all information in the 5978 * skb that could impact namespace isolation. 5979 */ 5980 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5981 { 5982 skb->pkt_type = PACKET_HOST; 5983 skb->skb_iif = 0; 5984 skb->ignore_df = 0; 5985 skb_dst_drop(skb); 5986 skb_ext_reset(skb); 5987 nf_reset_ct(skb); 5988 nf_reset_trace(skb); 5989 5990 #ifdef CONFIG_NET_SWITCHDEV 5991 skb->offload_fwd_mark = 0; 5992 skb->offload_l3_fwd_mark = 0; 5993 #endif 5994 5995 if (!xnet) 5996 return; 5997 5998 ipvs_reset(skb); 5999 skb->mark = 0; 6000 skb_clear_tstamp(skb); 6001 } 6002 EXPORT_SYMBOL_GPL(skb_scrub_packet); 6003 6004 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 6005 { 6006 int mac_len, meta_len; 6007 void *meta; 6008 6009 if (skb_cow(skb, skb_headroom(skb)) < 0) { 6010 kfree_skb(skb); 6011 return NULL; 6012 } 6013 6014 mac_len = skb->data - skb_mac_header(skb); 6015 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 6016 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 6017 mac_len - VLAN_HLEN - ETH_TLEN); 6018 } 6019 6020 meta_len = skb_metadata_len(skb); 6021 if (meta_len) { 6022 meta = skb_metadata_end(skb) - meta_len; 6023 memmove(meta + VLAN_HLEN, meta, meta_len); 6024 } 6025 6026 skb->mac_header += VLAN_HLEN; 6027 return skb; 6028 } 6029 6030 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 6031 { 6032 struct vlan_hdr *vhdr; 6033 u16 vlan_tci; 6034 6035 if (unlikely(skb_vlan_tag_present(skb))) { 6036 /* vlan_tci is already set-up so leave this for another time */ 6037 return skb; 6038 } 6039 6040 skb = skb_share_check(skb, GFP_ATOMIC); 6041 if (unlikely(!skb)) 6042 goto err_free; 6043 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 6044 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 6045 goto err_free; 6046 6047 vhdr = (struct vlan_hdr *)skb->data; 6048 vlan_tci = ntohs(vhdr->h_vlan_TCI); 6049 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 6050 6051 skb_pull_rcsum(skb, VLAN_HLEN); 6052 vlan_set_encap_proto(skb, vhdr); 6053 6054 skb = skb_reorder_vlan_header(skb); 6055 if (unlikely(!skb)) 6056 goto err_free; 6057 6058 skb_reset_network_header(skb); 6059 if (!skb_transport_header_was_set(skb)) 6060 skb_reset_transport_header(skb); 6061 skb_reset_mac_len(skb); 6062 6063 return skb; 6064 6065 err_free: 6066 kfree_skb(skb); 6067 return NULL; 6068 } 6069 EXPORT_SYMBOL(skb_vlan_untag); 6070 6071 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 6072 { 6073 if (!pskb_may_pull(skb, write_len)) 6074 return -ENOMEM; 6075 6076 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 6077 return 0; 6078 6079 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6080 } 6081 EXPORT_SYMBOL(skb_ensure_writable); 6082 6083 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 6084 { 6085 int needed_headroom = dev->needed_headroom; 6086 int needed_tailroom = dev->needed_tailroom; 6087 6088 /* For tail taggers, we need to pad short frames ourselves, to ensure 6089 * that the tail tag does not fail at its role of being at the end of 6090 * the packet, once the conduit interface pads the frame. Account for 6091 * that pad length here, and pad later. 6092 */ 6093 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6094 needed_tailroom += ETH_ZLEN - skb->len; 6095 /* skb_headroom() returns unsigned int... */ 6096 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6097 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6098 6099 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6100 /* No reallocation needed, yay! */ 6101 return 0; 6102 6103 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6104 GFP_ATOMIC); 6105 } 6106 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6107 6108 /* remove VLAN header from packet and update csum accordingly. 6109 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6110 */ 6111 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6112 { 6113 int offset = skb->data - skb_mac_header(skb); 6114 int err; 6115 6116 if (WARN_ONCE(offset, 6117 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6118 offset)) { 6119 return -EINVAL; 6120 } 6121 6122 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6123 if (unlikely(err)) 6124 return err; 6125 6126 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6127 6128 vlan_remove_tag(skb, vlan_tci); 6129 6130 skb->mac_header += VLAN_HLEN; 6131 6132 if (skb_network_offset(skb) < ETH_HLEN) 6133 skb_set_network_header(skb, ETH_HLEN); 6134 6135 skb_reset_mac_len(skb); 6136 6137 return err; 6138 } 6139 EXPORT_SYMBOL(__skb_vlan_pop); 6140 6141 /* Pop a vlan tag either from hwaccel or from payload. 6142 * Expects skb->data at mac header. 6143 */ 6144 int skb_vlan_pop(struct sk_buff *skb) 6145 { 6146 u16 vlan_tci; 6147 __be16 vlan_proto; 6148 int err; 6149 6150 if (likely(skb_vlan_tag_present(skb))) { 6151 __vlan_hwaccel_clear_tag(skb); 6152 } else { 6153 if (unlikely(!eth_type_vlan(skb->protocol))) 6154 return 0; 6155 6156 err = __skb_vlan_pop(skb, &vlan_tci); 6157 if (err) 6158 return err; 6159 } 6160 /* move next vlan tag to hw accel tag */ 6161 if (likely(!eth_type_vlan(skb->protocol))) 6162 return 0; 6163 6164 vlan_proto = skb->protocol; 6165 err = __skb_vlan_pop(skb, &vlan_tci); 6166 if (unlikely(err)) 6167 return err; 6168 6169 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6170 return 0; 6171 } 6172 EXPORT_SYMBOL(skb_vlan_pop); 6173 6174 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6175 * Expects skb->data at mac header. 6176 */ 6177 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6178 { 6179 if (skb_vlan_tag_present(skb)) { 6180 int offset = skb->data - skb_mac_header(skb); 6181 int err; 6182 6183 if (WARN_ONCE(offset, 6184 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6185 offset)) { 6186 return -EINVAL; 6187 } 6188 6189 err = __vlan_insert_tag(skb, skb->vlan_proto, 6190 skb_vlan_tag_get(skb)); 6191 if (err) 6192 return err; 6193 6194 skb->protocol = skb->vlan_proto; 6195 skb->mac_len += VLAN_HLEN; 6196 6197 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6198 } 6199 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6200 return 0; 6201 } 6202 EXPORT_SYMBOL(skb_vlan_push); 6203 6204 /** 6205 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6206 * 6207 * @skb: Socket buffer to modify 6208 * 6209 * Drop the Ethernet header of @skb. 6210 * 6211 * Expects that skb->data points to the mac header and that no VLAN tags are 6212 * present. 6213 * 6214 * Returns 0 on success, -errno otherwise. 6215 */ 6216 int skb_eth_pop(struct sk_buff *skb) 6217 { 6218 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6219 skb_network_offset(skb) < ETH_HLEN) 6220 return -EPROTO; 6221 6222 skb_pull_rcsum(skb, ETH_HLEN); 6223 skb_reset_mac_header(skb); 6224 skb_reset_mac_len(skb); 6225 6226 return 0; 6227 } 6228 EXPORT_SYMBOL(skb_eth_pop); 6229 6230 /** 6231 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6232 * 6233 * @skb: Socket buffer to modify 6234 * @dst: Destination MAC address of the new header 6235 * @src: Source MAC address of the new header 6236 * 6237 * Prepend @skb with a new Ethernet header. 6238 * 6239 * Expects that skb->data points to the mac header, which must be empty. 6240 * 6241 * Returns 0 on success, -errno otherwise. 6242 */ 6243 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6244 const unsigned char *src) 6245 { 6246 struct ethhdr *eth; 6247 int err; 6248 6249 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6250 return -EPROTO; 6251 6252 err = skb_cow_head(skb, sizeof(*eth)); 6253 if (err < 0) 6254 return err; 6255 6256 skb_push(skb, sizeof(*eth)); 6257 skb_reset_mac_header(skb); 6258 skb_reset_mac_len(skb); 6259 6260 eth = eth_hdr(skb); 6261 ether_addr_copy(eth->h_dest, dst); 6262 ether_addr_copy(eth->h_source, src); 6263 eth->h_proto = skb->protocol; 6264 6265 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6266 6267 return 0; 6268 } 6269 EXPORT_SYMBOL(skb_eth_push); 6270 6271 /* Update the ethertype of hdr and the skb csum value if required. */ 6272 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6273 __be16 ethertype) 6274 { 6275 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6276 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6277 6278 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6279 } 6280 6281 hdr->h_proto = ethertype; 6282 } 6283 6284 /** 6285 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6286 * the packet 6287 * 6288 * @skb: buffer 6289 * @mpls_lse: MPLS label stack entry to push 6290 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6291 * @mac_len: length of the MAC header 6292 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6293 * ethernet 6294 * 6295 * Expects skb->data at mac header. 6296 * 6297 * Returns 0 on success, -errno otherwise. 6298 */ 6299 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6300 int mac_len, bool ethernet) 6301 { 6302 struct mpls_shim_hdr *lse; 6303 int err; 6304 6305 if (unlikely(!eth_p_mpls(mpls_proto))) 6306 return -EINVAL; 6307 6308 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6309 if (skb->encapsulation) 6310 return -EINVAL; 6311 6312 err = skb_cow_head(skb, MPLS_HLEN); 6313 if (unlikely(err)) 6314 return err; 6315 6316 if (!skb->inner_protocol) { 6317 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6318 skb_set_inner_protocol(skb, skb->protocol); 6319 } 6320 6321 skb_push(skb, MPLS_HLEN); 6322 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6323 mac_len); 6324 skb_reset_mac_header(skb); 6325 skb_set_network_header(skb, mac_len); 6326 skb_reset_mac_len(skb); 6327 6328 lse = mpls_hdr(skb); 6329 lse->label_stack_entry = mpls_lse; 6330 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6331 6332 if (ethernet && mac_len >= ETH_HLEN) 6333 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6334 skb->protocol = mpls_proto; 6335 6336 return 0; 6337 } 6338 EXPORT_SYMBOL_GPL(skb_mpls_push); 6339 6340 /** 6341 * skb_mpls_pop() - pop the outermost MPLS header 6342 * 6343 * @skb: buffer 6344 * @next_proto: ethertype of header after popped MPLS header 6345 * @mac_len: length of the MAC header 6346 * @ethernet: flag to indicate if the packet is ethernet 6347 * 6348 * Expects skb->data at mac header. 6349 * 6350 * Returns 0 on success, -errno otherwise. 6351 */ 6352 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6353 bool ethernet) 6354 { 6355 int err; 6356 6357 if (unlikely(!eth_p_mpls(skb->protocol))) 6358 return 0; 6359 6360 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6361 if (unlikely(err)) 6362 return err; 6363 6364 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6365 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6366 mac_len); 6367 6368 __skb_pull(skb, MPLS_HLEN); 6369 skb_reset_mac_header(skb); 6370 skb_set_network_header(skb, mac_len); 6371 6372 if (ethernet && mac_len >= ETH_HLEN) { 6373 struct ethhdr *hdr; 6374 6375 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6376 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6377 skb_mod_eth_type(skb, hdr, next_proto); 6378 } 6379 skb->protocol = next_proto; 6380 6381 return 0; 6382 } 6383 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6384 6385 /** 6386 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6387 * 6388 * @skb: buffer 6389 * @mpls_lse: new MPLS label stack entry to update to 6390 * 6391 * Expects skb->data at mac header. 6392 * 6393 * Returns 0 on success, -errno otherwise. 6394 */ 6395 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6396 { 6397 int err; 6398 6399 if (unlikely(!eth_p_mpls(skb->protocol))) 6400 return -EINVAL; 6401 6402 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6403 if (unlikely(err)) 6404 return err; 6405 6406 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6407 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6408 6409 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6410 } 6411 6412 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6413 6414 return 0; 6415 } 6416 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6417 6418 /** 6419 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6420 * 6421 * @skb: buffer 6422 * 6423 * Expects skb->data at mac header. 6424 * 6425 * Returns 0 on success, -errno otherwise. 6426 */ 6427 int skb_mpls_dec_ttl(struct sk_buff *skb) 6428 { 6429 u32 lse; 6430 u8 ttl; 6431 6432 if (unlikely(!eth_p_mpls(skb->protocol))) 6433 return -EINVAL; 6434 6435 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6436 return -ENOMEM; 6437 6438 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6439 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6440 if (!--ttl) 6441 return -EINVAL; 6442 6443 lse &= ~MPLS_LS_TTL_MASK; 6444 lse |= ttl << MPLS_LS_TTL_SHIFT; 6445 6446 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6447 } 6448 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6449 6450 /** 6451 * alloc_skb_with_frags - allocate skb with page frags 6452 * 6453 * @header_len: size of linear part 6454 * @data_len: needed length in frags 6455 * @order: max page order desired. 6456 * @errcode: pointer to error code if any 6457 * @gfp_mask: allocation mask 6458 * 6459 * This can be used to allocate a paged skb, given a maximal order for frags. 6460 */ 6461 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6462 unsigned long data_len, 6463 int order, 6464 int *errcode, 6465 gfp_t gfp_mask) 6466 { 6467 unsigned long chunk; 6468 struct sk_buff *skb; 6469 struct page *page; 6470 int nr_frags = 0; 6471 6472 *errcode = -EMSGSIZE; 6473 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6474 return NULL; 6475 6476 *errcode = -ENOBUFS; 6477 skb = alloc_skb(header_len, gfp_mask); 6478 if (!skb) 6479 return NULL; 6480 6481 while (data_len) { 6482 if (nr_frags == MAX_SKB_FRAGS - 1) 6483 goto failure; 6484 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6485 order--; 6486 6487 if (order) { 6488 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6489 __GFP_COMP | 6490 __GFP_NOWARN, 6491 order); 6492 if (!page) { 6493 order--; 6494 continue; 6495 } 6496 } else { 6497 page = alloc_page(gfp_mask); 6498 if (!page) 6499 goto failure; 6500 } 6501 chunk = min_t(unsigned long, data_len, 6502 PAGE_SIZE << order); 6503 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6504 nr_frags++; 6505 skb->truesize += (PAGE_SIZE << order); 6506 data_len -= chunk; 6507 } 6508 return skb; 6509 6510 failure: 6511 kfree_skb(skb); 6512 return NULL; 6513 } 6514 EXPORT_SYMBOL(alloc_skb_with_frags); 6515 6516 /* carve out the first off bytes from skb when off < headlen */ 6517 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6518 const int headlen, gfp_t gfp_mask) 6519 { 6520 int i; 6521 unsigned int size = skb_end_offset(skb); 6522 int new_hlen = headlen - off; 6523 u8 *data; 6524 6525 if (skb_pfmemalloc(skb)) 6526 gfp_mask |= __GFP_MEMALLOC; 6527 6528 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6529 if (!data) 6530 return -ENOMEM; 6531 size = SKB_WITH_OVERHEAD(size); 6532 6533 /* Copy real data, and all frags */ 6534 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6535 skb->len -= off; 6536 6537 memcpy((struct skb_shared_info *)(data + size), 6538 skb_shinfo(skb), 6539 offsetof(struct skb_shared_info, 6540 frags[skb_shinfo(skb)->nr_frags])); 6541 if (skb_cloned(skb)) { 6542 /* drop the old head gracefully */ 6543 if (skb_orphan_frags(skb, gfp_mask)) { 6544 skb_kfree_head(data, size); 6545 return -ENOMEM; 6546 } 6547 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6548 skb_frag_ref(skb, i); 6549 if (skb_has_frag_list(skb)) 6550 skb_clone_fraglist(skb); 6551 skb_release_data(skb, SKB_CONSUMED); 6552 } else { 6553 /* we can reuse existing recount- all we did was 6554 * relocate values 6555 */ 6556 skb_free_head(skb); 6557 } 6558 6559 skb->head = data; 6560 skb->data = data; 6561 skb->head_frag = 0; 6562 skb_set_end_offset(skb, size); 6563 skb_set_tail_pointer(skb, skb_headlen(skb)); 6564 skb_headers_offset_update(skb, 0); 6565 skb->cloned = 0; 6566 skb->hdr_len = 0; 6567 skb->nohdr = 0; 6568 atomic_set(&skb_shinfo(skb)->dataref, 1); 6569 6570 return 0; 6571 } 6572 6573 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6574 6575 /* carve out the first eat bytes from skb's frag_list. May recurse into 6576 * pskb_carve() 6577 */ 6578 static int pskb_carve_frag_list(struct sk_buff *skb, 6579 struct skb_shared_info *shinfo, int eat, 6580 gfp_t gfp_mask) 6581 { 6582 struct sk_buff *list = shinfo->frag_list; 6583 struct sk_buff *clone = NULL; 6584 struct sk_buff *insp = NULL; 6585 6586 do { 6587 if (!list) { 6588 pr_err("Not enough bytes to eat. Want %d\n", eat); 6589 return -EFAULT; 6590 } 6591 if (list->len <= eat) { 6592 /* Eaten as whole. */ 6593 eat -= list->len; 6594 list = list->next; 6595 insp = list; 6596 } else { 6597 /* Eaten partially. */ 6598 if (skb_shared(list)) { 6599 clone = skb_clone(list, gfp_mask); 6600 if (!clone) 6601 return -ENOMEM; 6602 insp = list->next; 6603 list = clone; 6604 } else { 6605 /* This may be pulled without problems. */ 6606 insp = list; 6607 } 6608 if (pskb_carve(list, eat, gfp_mask) < 0) { 6609 kfree_skb(clone); 6610 return -ENOMEM; 6611 } 6612 break; 6613 } 6614 } while (eat); 6615 6616 /* Free pulled out fragments. */ 6617 while ((list = shinfo->frag_list) != insp) { 6618 shinfo->frag_list = list->next; 6619 consume_skb(list); 6620 } 6621 /* And insert new clone at head. */ 6622 if (clone) { 6623 clone->next = list; 6624 shinfo->frag_list = clone; 6625 } 6626 return 0; 6627 } 6628 6629 /* carve off first len bytes from skb. Split line (off) is in the 6630 * non-linear part of skb 6631 */ 6632 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6633 int pos, gfp_t gfp_mask) 6634 { 6635 int i, k = 0; 6636 unsigned int size = skb_end_offset(skb); 6637 u8 *data; 6638 const int nfrags = skb_shinfo(skb)->nr_frags; 6639 struct skb_shared_info *shinfo; 6640 6641 if (skb_pfmemalloc(skb)) 6642 gfp_mask |= __GFP_MEMALLOC; 6643 6644 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6645 if (!data) 6646 return -ENOMEM; 6647 size = SKB_WITH_OVERHEAD(size); 6648 6649 memcpy((struct skb_shared_info *)(data + size), 6650 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6651 if (skb_orphan_frags(skb, gfp_mask)) { 6652 skb_kfree_head(data, size); 6653 return -ENOMEM; 6654 } 6655 shinfo = (struct skb_shared_info *)(data + size); 6656 for (i = 0; i < nfrags; i++) { 6657 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6658 6659 if (pos + fsize > off) { 6660 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6661 6662 if (pos < off) { 6663 /* Split frag. 6664 * We have two variants in this case: 6665 * 1. Move all the frag to the second 6666 * part, if it is possible. F.e. 6667 * this approach is mandatory for TUX, 6668 * where splitting is expensive. 6669 * 2. Split is accurately. We make this. 6670 */ 6671 skb_frag_off_add(&shinfo->frags[0], off - pos); 6672 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6673 } 6674 skb_frag_ref(skb, i); 6675 k++; 6676 } 6677 pos += fsize; 6678 } 6679 shinfo->nr_frags = k; 6680 if (skb_has_frag_list(skb)) 6681 skb_clone_fraglist(skb); 6682 6683 /* split line is in frag list */ 6684 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6685 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6686 if (skb_has_frag_list(skb)) 6687 kfree_skb_list(skb_shinfo(skb)->frag_list); 6688 skb_kfree_head(data, size); 6689 return -ENOMEM; 6690 } 6691 skb_release_data(skb, SKB_CONSUMED); 6692 6693 skb->head = data; 6694 skb->head_frag = 0; 6695 skb->data = data; 6696 skb_set_end_offset(skb, size); 6697 skb_reset_tail_pointer(skb); 6698 skb_headers_offset_update(skb, 0); 6699 skb->cloned = 0; 6700 skb->hdr_len = 0; 6701 skb->nohdr = 0; 6702 skb->len -= off; 6703 skb->data_len = skb->len; 6704 atomic_set(&skb_shinfo(skb)->dataref, 1); 6705 return 0; 6706 } 6707 6708 /* remove len bytes from the beginning of the skb */ 6709 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6710 { 6711 int headlen = skb_headlen(skb); 6712 6713 if (len < headlen) 6714 return pskb_carve_inside_header(skb, len, headlen, gfp); 6715 else 6716 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6717 } 6718 6719 /* Extract to_copy bytes starting at off from skb, and return this in 6720 * a new skb 6721 */ 6722 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6723 int to_copy, gfp_t gfp) 6724 { 6725 struct sk_buff *clone = skb_clone(skb, gfp); 6726 6727 if (!clone) 6728 return NULL; 6729 6730 if (pskb_carve(clone, off, gfp) < 0 || 6731 pskb_trim(clone, to_copy)) { 6732 kfree_skb(clone); 6733 return NULL; 6734 } 6735 return clone; 6736 } 6737 EXPORT_SYMBOL(pskb_extract); 6738 6739 /** 6740 * skb_condense - try to get rid of fragments/frag_list if possible 6741 * @skb: buffer 6742 * 6743 * Can be used to save memory before skb is added to a busy queue. 6744 * If packet has bytes in frags and enough tail room in skb->head, 6745 * pull all of them, so that we can free the frags right now and adjust 6746 * truesize. 6747 * Notes: 6748 * We do not reallocate skb->head thus can not fail. 6749 * Caller must re-evaluate skb->truesize if needed. 6750 */ 6751 void skb_condense(struct sk_buff *skb) 6752 { 6753 if (skb->data_len) { 6754 if (skb->data_len > skb->end - skb->tail || 6755 skb_cloned(skb)) 6756 return; 6757 6758 /* Nice, we can free page frag(s) right now */ 6759 __pskb_pull_tail(skb, skb->data_len); 6760 } 6761 /* At this point, skb->truesize might be over estimated, 6762 * because skb had a fragment, and fragments do not tell 6763 * their truesize. 6764 * When we pulled its content into skb->head, fragment 6765 * was freed, but __pskb_pull_tail() could not possibly 6766 * adjust skb->truesize, not knowing the frag truesize. 6767 */ 6768 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6769 } 6770 EXPORT_SYMBOL(skb_condense); 6771 6772 #ifdef CONFIG_SKB_EXTENSIONS 6773 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6774 { 6775 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6776 } 6777 6778 /** 6779 * __skb_ext_alloc - allocate a new skb extensions storage 6780 * 6781 * @flags: See kmalloc(). 6782 * 6783 * Returns the newly allocated pointer. The pointer can later attached to a 6784 * skb via __skb_ext_set(). 6785 * Note: caller must handle the skb_ext as an opaque data. 6786 */ 6787 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6788 { 6789 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6790 6791 if (new) { 6792 memset(new->offset, 0, sizeof(new->offset)); 6793 refcount_set(&new->refcnt, 1); 6794 } 6795 6796 return new; 6797 } 6798 6799 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6800 unsigned int old_active) 6801 { 6802 struct skb_ext *new; 6803 6804 if (refcount_read(&old->refcnt) == 1) 6805 return old; 6806 6807 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6808 if (!new) 6809 return NULL; 6810 6811 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6812 refcount_set(&new->refcnt, 1); 6813 6814 #ifdef CONFIG_XFRM 6815 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6816 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6817 unsigned int i; 6818 6819 for (i = 0; i < sp->len; i++) 6820 xfrm_state_hold(sp->xvec[i]); 6821 } 6822 #endif 6823 #ifdef CONFIG_MCTP_FLOWS 6824 if (old_active & (1 << SKB_EXT_MCTP)) { 6825 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6826 6827 if (flow->key) 6828 refcount_inc(&flow->key->refs); 6829 } 6830 #endif 6831 __skb_ext_put(old); 6832 return new; 6833 } 6834 6835 /** 6836 * __skb_ext_set - attach the specified extension storage to this skb 6837 * @skb: buffer 6838 * @id: extension id 6839 * @ext: extension storage previously allocated via __skb_ext_alloc() 6840 * 6841 * Existing extensions, if any, are cleared. 6842 * 6843 * Returns the pointer to the extension. 6844 */ 6845 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6846 struct skb_ext *ext) 6847 { 6848 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6849 6850 skb_ext_put(skb); 6851 newlen = newoff + skb_ext_type_len[id]; 6852 ext->chunks = newlen; 6853 ext->offset[id] = newoff; 6854 skb->extensions = ext; 6855 skb->active_extensions = 1 << id; 6856 return skb_ext_get_ptr(ext, id); 6857 } 6858 6859 /** 6860 * skb_ext_add - allocate space for given extension, COW if needed 6861 * @skb: buffer 6862 * @id: extension to allocate space for 6863 * 6864 * Allocates enough space for the given extension. 6865 * If the extension is already present, a pointer to that extension 6866 * is returned. 6867 * 6868 * If the skb was cloned, COW applies and the returned memory can be 6869 * modified without changing the extension space of clones buffers. 6870 * 6871 * Returns pointer to the extension or NULL on allocation failure. 6872 */ 6873 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6874 { 6875 struct skb_ext *new, *old = NULL; 6876 unsigned int newlen, newoff; 6877 6878 if (skb->active_extensions) { 6879 old = skb->extensions; 6880 6881 new = skb_ext_maybe_cow(old, skb->active_extensions); 6882 if (!new) 6883 return NULL; 6884 6885 if (__skb_ext_exist(new, id)) 6886 goto set_active; 6887 6888 newoff = new->chunks; 6889 } else { 6890 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6891 6892 new = __skb_ext_alloc(GFP_ATOMIC); 6893 if (!new) 6894 return NULL; 6895 } 6896 6897 newlen = newoff + skb_ext_type_len[id]; 6898 new->chunks = newlen; 6899 new->offset[id] = newoff; 6900 set_active: 6901 skb->slow_gro = 1; 6902 skb->extensions = new; 6903 skb->active_extensions |= 1 << id; 6904 return skb_ext_get_ptr(new, id); 6905 } 6906 EXPORT_SYMBOL(skb_ext_add); 6907 6908 #ifdef CONFIG_XFRM 6909 static void skb_ext_put_sp(struct sec_path *sp) 6910 { 6911 unsigned int i; 6912 6913 for (i = 0; i < sp->len; i++) 6914 xfrm_state_put(sp->xvec[i]); 6915 } 6916 #endif 6917 6918 #ifdef CONFIG_MCTP_FLOWS 6919 static void skb_ext_put_mctp(struct mctp_flow *flow) 6920 { 6921 if (flow->key) 6922 mctp_key_unref(flow->key); 6923 } 6924 #endif 6925 6926 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6927 { 6928 struct skb_ext *ext = skb->extensions; 6929 6930 skb->active_extensions &= ~(1 << id); 6931 if (skb->active_extensions == 0) { 6932 skb->extensions = NULL; 6933 __skb_ext_put(ext); 6934 #ifdef CONFIG_XFRM 6935 } else if (id == SKB_EXT_SEC_PATH && 6936 refcount_read(&ext->refcnt) == 1) { 6937 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6938 6939 skb_ext_put_sp(sp); 6940 sp->len = 0; 6941 #endif 6942 } 6943 } 6944 EXPORT_SYMBOL(__skb_ext_del); 6945 6946 void __skb_ext_put(struct skb_ext *ext) 6947 { 6948 /* If this is last clone, nothing can increment 6949 * it after check passes. Avoids one atomic op. 6950 */ 6951 if (refcount_read(&ext->refcnt) == 1) 6952 goto free_now; 6953 6954 if (!refcount_dec_and_test(&ext->refcnt)) 6955 return; 6956 free_now: 6957 #ifdef CONFIG_XFRM 6958 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6959 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6960 #endif 6961 #ifdef CONFIG_MCTP_FLOWS 6962 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 6963 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 6964 #endif 6965 6966 kmem_cache_free(skbuff_ext_cache, ext); 6967 } 6968 EXPORT_SYMBOL(__skb_ext_put); 6969 #endif /* CONFIG_SKB_EXTENSIONS */ 6970 6971 /** 6972 * skb_attempt_defer_free - queue skb for remote freeing 6973 * @skb: buffer 6974 * 6975 * Put @skb in a per-cpu list, using the cpu which 6976 * allocated the skb/pages to reduce false sharing 6977 * and memory zone spinlock contention. 6978 */ 6979 void skb_attempt_defer_free(struct sk_buff *skb) 6980 { 6981 int cpu = skb->alloc_cpu; 6982 struct softnet_data *sd; 6983 unsigned int defer_max; 6984 bool kick; 6985 6986 if (WARN_ON_ONCE(cpu >= nr_cpu_ids) || 6987 !cpu_online(cpu) || 6988 cpu == raw_smp_processor_id()) { 6989 nodefer: __kfree_skb(skb); 6990 return; 6991 } 6992 6993 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 6994 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 6995 6996 sd = &per_cpu(softnet_data, cpu); 6997 defer_max = READ_ONCE(sysctl_skb_defer_max); 6998 if (READ_ONCE(sd->defer_count) >= defer_max) 6999 goto nodefer; 7000 7001 spin_lock_bh(&sd->defer_lock); 7002 /* Send an IPI every time queue reaches half capacity. */ 7003 kick = sd->defer_count == (defer_max >> 1); 7004 /* Paired with the READ_ONCE() few lines above */ 7005 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 7006 7007 skb->next = sd->defer_list; 7008 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 7009 WRITE_ONCE(sd->defer_list, skb); 7010 spin_unlock_bh(&sd->defer_lock); 7011 7012 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 7013 * if we are unlucky enough (this seems very unlikely). 7014 */ 7015 if (unlikely(kick)) 7016 kick_defer_list_purge(sd, cpu); 7017 } 7018 7019 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 7020 size_t offset, size_t len) 7021 { 7022 const char *kaddr; 7023 __wsum csum; 7024 7025 kaddr = kmap_local_page(page); 7026 csum = csum_partial(kaddr + offset, len, 0); 7027 kunmap_local(kaddr); 7028 skb->csum = csum_block_add(skb->csum, csum, skb->len); 7029 } 7030 7031 /** 7032 * skb_splice_from_iter - Splice (or copy) pages to skbuff 7033 * @skb: The buffer to add pages to 7034 * @iter: Iterator representing the pages to be added 7035 * @maxsize: Maximum amount of pages to be added 7036 * @gfp: Allocation flags 7037 * 7038 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 7039 * extracts pages from an iterator and adds them to the socket buffer if 7040 * possible, copying them to fragments if not possible (such as if they're slab 7041 * pages). 7042 * 7043 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 7044 * insufficient space in the buffer to transfer anything. 7045 */ 7046 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 7047 ssize_t maxsize, gfp_t gfp) 7048 { 7049 size_t frag_limit = READ_ONCE(sysctl_max_skb_frags); 7050 struct page *pages[8], **ppages = pages; 7051 ssize_t spliced = 0, ret = 0; 7052 unsigned int i; 7053 7054 while (iter->count > 0) { 7055 ssize_t space, nr, len; 7056 size_t off; 7057 7058 ret = -EMSGSIZE; 7059 space = frag_limit - skb_shinfo(skb)->nr_frags; 7060 if (space < 0) 7061 break; 7062 7063 /* We might be able to coalesce without increasing nr_frags */ 7064 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 7065 7066 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 7067 if (len <= 0) { 7068 ret = len ?: -EIO; 7069 break; 7070 } 7071 7072 i = 0; 7073 do { 7074 struct page *page = pages[i++]; 7075 size_t part = min_t(size_t, PAGE_SIZE - off, len); 7076 7077 ret = -EIO; 7078 if (WARN_ON_ONCE(!sendpage_ok(page))) 7079 goto out; 7080 7081 ret = skb_append_pagefrags(skb, page, off, part, 7082 frag_limit); 7083 if (ret < 0) { 7084 iov_iter_revert(iter, len); 7085 goto out; 7086 } 7087 7088 if (skb->ip_summed == CHECKSUM_NONE) 7089 skb_splice_csum_page(skb, page, off, part); 7090 7091 off = 0; 7092 spliced += part; 7093 maxsize -= part; 7094 len -= part; 7095 } while (len > 0); 7096 7097 if (maxsize <= 0) 7098 break; 7099 } 7100 7101 out: 7102 skb_len_add(skb, spliced); 7103 return spliced ?: ret; 7104 } 7105 EXPORT_SYMBOL(skb_splice_from_iter); 7106 7107 static __always_inline 7108 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7109 size_t len, void *to, void *priv2) 7110 { 7111 __wsum *csum = priv2; 7112 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7113 7114 *csum = csum_block_add(*csum, next, progress); 7115 return 0; 7116 } 7117 7118 static __always_inline 7119 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7120 size_t len, void *to, void *priv2) 7121 { 7122 __wsum next, *csum = priv2; 7123 7124 next = csum_and_copy_from_user(iter_from, to + progress, len); 7125 *csum = csum_block_add(*csum, next, progress); 7126 return next ? 0 : len; 7127 } 7128 7129 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7130 __wsum *csum, struct iov_iter *i) 7131 { 7132 size_t copied; 7133 7134 if (WARN_ON_ONCE(!i->data_source)) 7135 return false; 7136 copied = iterate_and_advance2(i, bytes, addr, csum, 7137 copy_from_user_iter_csum, 7138 memcpy_from_iter_csum); 7139 if (likely(copied == bytes)) 7140 return true; 7141 iov_iter_revert(i, copied); 7142 return false; 7143 } 7144 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7145