1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/skbuff_ref.h> 55 #include <linux/splice.h> 56 #include <linux/cache.h> 57 #include <linux/rtnetlink.h> 58 #include <linux/init.h> 59 #include <linux/scatterlist.h> 60 #include <linux/errqueue.h> 61 #include <linux/prefetch.h> 62 #include <linux/bitfield.h> 63 #include <linux/if_vlan.h> 64 #include <linux/mpls.h> 65 #include <linux/kcov.h> 66 #include <linux/iov_iter.h> 67 68 #include <net/protocol.h> 69 #include <net/dst.h> 70 #include <net/sock.h> 71 #include <net/checksum.h> 72 #include <net/gso.h> 73 #include <net/hotdata.h> 74 #include <net/ip6_checksum.h> 75 #include <net/xfrm.h> 76 #include <net/mpls.h> 77 #include <net/mptcp.h> 78 #include <net/mctp.h> 79 #include <net/page_pool/helpers.h> 80 #include <net/dropreason.h> 81 82 #include <linux/uaccess.h> 83 #include <trace/events/skb.h> 84 #include <linux/highmem.h> 85 #include <linux/capability.h> 86 #include <linux/user_namespace.h> 87 #include <linux/indirect_call_wrapper.h> 88 #include <linux/textsearch.h> 89 90 #include "dev.h" 91 #include "sock_destructor.h" 92 93 #ifdef CONFIG_SKB_EXTENSIONS 94 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 95 #endif 96 97 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) 98 99 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 100 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 101 * size, and we can differentiate heads from skb_small_head_cache 102 * vs system slabs by looking at their size (skb_end_offset()). 103 */ 104 #define SKB_SMALL_HEAD_CACHE_SIZE \ 105 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 106 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 107 SKB_SMALL_HEAD_SIZE) 108 109 #define SKB_SMALL_HEAD_HEADROOM \ 110 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 111 112 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 113 EXPORT_SYMBOL(sysctl_max_skb_frags); 114 115 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use 116 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the 117 * netmem is a page. 118 */ 119 static_assert(offsetof(struct bio_vec, bv_page) == 120 offsetof(skb_frag_t, netmem)); 121 static_assert(sizeof_field(struct bio_vec, bv_page) == 122 sizeof_field(skb_frag_t, netmem)); 123 124 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); 125 static_assert(sizeof_field(struct bio_vec, bv_len) == 126 sizeof_field(skb_frag_t, len)); 127 128 static_assert(offsetof(struct bio_vec, bv_offset) == 129 offsetof(skb_frag_t, offset)); 130 static_assert(sizeof_field(struct bio_vec, bv_offset) == 131 sizeof_field(skb_frag_t, offset)); 132 133 #undef FN 134 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 135 static const char * const drop_reasons[] = { 136 [SKB_CONSUMED] = "CONSUMED", 137 DEFINE_DROP_REASON(FN, FN) 138 }; 139 140 static const struct drop_reason_list drop_reasons_core = { 141 .reasons = drop_reasons, 142 .n_reasons = ARRAY_SIZE(drop_reasons), 143 }; 144 145 const struct drop_reason_list __rcu * 146 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 147 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 148 }; 149 EXPORT_SYMBOL(drop_reasons_by_subsys); 150 151 /** 152 * drop_reasons_register_subsys - register another drop reason subsystem 153 * @subsys: the subsystem to register, must not be the core 154 * @list: the list of drop reasons within the subsystem, must point to 155 * a statically initialized list 156 */ 157 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 158 const struct drop_reason_list *list) 159 { 160 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 161 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 162 "invalid subsystem %d\n", subsys)) 163 return; 164 165 /* must point to statically allocated memory, so INIT is OK */ 166 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 167 } 168 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 169 170 /** 171 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 172 * @subsys: the subsystem to remove, must not be the core 173 * 174 * Note: This will synchronize_rcu() to ensure no users when it returns. 175 */ 176 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 177 { 178 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 179 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 180 "invalid subsystem %d\n", subsys)) 181 return; 182 183 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 184 185 synchronize_rcu(); 186 } 187 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 188 189 /** 190 * skb_panic - private function for out-of-line support 191 * @skb: buffer 192 * @sz: size 193 * @addr: address 194 * @msg: skb_over_panic or skb_under_panic 195 * 196 * Out-of-line support for skb_put() and skb_push(). 197 * Called via the wrapper skb_over_panic() or skb_under_panic(). 198 * Keep out of line to prevent kernel bloat. 199 * __builtin_return_address is not used because it is not always reliable. 200 */ 201 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 202 const char msg[]) 203 { 204 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 205 msg, addr, skb->len, sz, skb->head, skb->data, 206 (unsigned long)skb->tail, (unsigned long)skb->end, 207 skb->dev ? skb->dev->name : "<NULL>"); 208 BUG(); 209 } 210 211 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 212 { 213 skb_panic(skb, sz, addr, __func__); 214 } 215 216 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 217 { 218 skb_panic(skb, sz, addr, __func__); 219 } 220 221 #define NAPI_SKB_CACHE_SIZE 64 222 #define NAPI_SKB_CACHE_BULK 16 223 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 224 225 #if PAGE_SIZE == SZ_4K 226 227 #define NAPI_HAS_SMALL_PAGE_FRAG 1 228 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) 229 230 /* specialized page frag allocator using a single order 0 page 231 * and slicing it into 1K sized fragment. Constrained to systems 232 * with a very limited amount of 1K fragments fitting a single 233 * page - to avoid excessive truesize underestimation 234 */ 235 236 struct page_frag_1k { 237 void *va; 238 u16 offset; 239 bool pfmemalloc; 240 }; 241 242 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) 243 { 244 struct page *page; 245 int offset; 246 247 offset = nc->offset - SZ_1K; 248 if (likely(offset >= 0)) 249 goto use_frag; 250 251 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 252 if (!page) 253 return NULL; 254 255 nc->va = page_address(page); 256 nc->pfmemalloc = page_is_pfmemalloc(page); 257 offset = PAGE_SIZE - SZ_1K; 258 page_ref_add(page, offset / SZ_1K); 259 260 use_frag: 261 nc->offset = offset; 262 return nc->va + offset; 263 } 264 #else 265 266 /* the small page is actually unused in this build; add dummy helpers 267 * to please the compiler and avoid later preprocessor's conditionals 268 */ 269 #define NAPI_HAS_SMALL_PAGE_FRAG 0 270 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false 271 272 struct page_frag_1k { 273 }; 274 275 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) 276 { 277 return NULL; 278 } 279 280 #endif 281 282 struct napi_alloc_cache { 283 struct page_frag_cache page; 284 struct page_frag_1k page_small; 285 unsigned int skb_count; 286 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 287 }; 288 289 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 290 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 291 292 /* Double check that napi_get_frags() allocates skbs with 293 * skb->head being backed by slab, not a page fragment. 294 * This is to make sure bug fixed in 3226b158e67c 295 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 296 * does not accidentally come back. 297 */ 298 void napi_get_frags_check(struct napi_struct *napi) 299 { 300 struct sk_buff *skb; 301 302 local_bh_disable(); 303 skb = napi_get_frags(napi); 304 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); 305 napi_free_frags(napi); 306 local_bh_enable(); 307 } 308 309 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 310 { 311 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 312 313 fragsz = SKB_DATA_ALIGN(fragsz); 314 315 return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 316 align_mask); 317 } 318 EXPORT_SYMBOL(__napi_alloc_frag_align); 319 320 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 321 { 322 void *data; 323 324 fragsz = SKB_DATA_ALIGN(fragsz); 325 if (in_hardirq() || irqs_disabled()) { 326 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 327 328 data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, 329 align_mask); 330 } else { 331 struct napi_alloc_cache *nc; 332 333 local_bh_disable(); 334 nc = this_cpu_ptr(&napi_alloc_cache); 335 data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, 336 align_mask); 337 local_bh_enable(); 338 } 339 return data; 340 } 341 EXPORT_SYMBOL(__netdev_alloc_frag_align); 342 343 static struct sk_buff *napi_skb_cache_get(void) 344 { 345 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 346 struct sk_buff *skb; 347 348 if (unlikely(!nc->skb_count)) { 349 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 350 GFP_ATOMIC, 351 NAPI_SKB_CACHE_BULK, 352 nc->skb_cache); 353 if (unlikely(!nc->skb_count)) 354 return NULL; 355 } 356 357 skb = nc->skb_cache[--nc->skb_count]; 358 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); 359 360 return skb; 361 } 362 363 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 364 unsigned int size) 365 { 366 struct skb_shared_info *shinfo; 367 368 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 369 370 /* Assumes caller memset cleared SKB */ 371 skb->truesize = SKB_TRUESIZE(size); 372 refcount_set(&skb->users, 1); 373 skb->head = data; 374 skb->data = data; 375 skb_reset_tail_pointer(skb); 376 skb_set_end_offset(skb, size); 377 skb->mac_header = (typeof(skb->mac_header))~0U; 378 skb->transport_header = (typeof(skb->transport_header))~0U; 379 skb->alloc_cpu = raw_smp_processor_id(); 380 /* make sure we initialize shinfo sequentially */ 381 shinfo = skb_shinfo(skb); 382 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 383 atomic_set(&shinfo->dataref, 1); 384 385 skb_set_kcov_handle(skb, kcov_common_handle()); 386 } 387 388 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 389 unsigned int *size) 390 { 391 void *resized; 392 393 /* Must find the allocation size (and grow it to match). */ 394 *size = ksize(data); 395 /* krealloc() will immediately return "data" when 396 * "ksize(data)" is requested: it is the existing upper 397 * bounds. As a result, GFP_ATOMIC will be ignored. Note 398 * that this "new" pointer needs to be passed back to the 399 * caller for use so the __alloc_size hinting will be 400 * tracked correctly. 401 */ 402 resized = krealloc(data, *size, GFP_ATOMIC); 403 WARN_ON_ONCE(resized != data); 404 return resized; 405 } 406 407 /* build_skb() variant which can operate on slab buffers. 408 * Note that this should be used sparingly as slab buffers 409 * cannot be combined efficiently by GRO! 410 */ 411 struct sk_buff *slab_build_skb(void *data) 412 { 413 struct sk_buff *skb; 414 unsigned int size; 415 416 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 417 if (unlikely(!skb)) 418 return NULL; 419 420 memset(skb, 0, offsetof(struct sk_buff, tail)); 421 data = __slab_build_skb(skb, data, &size); 422 __finalize_skb_around(skb, data, size); 423 424 return skb; 425 } 426 EXPORT_SYMBOL(slab_build_skb); 427 428 /* Caller must provide SKB that is memset cleared */ 429 static void __build_skb_around(struct sk_buff *skb, void *data, 430 unsigned int frag_size) 431 { 432 unsigned int size = frag_size; 433 434 /* frag_size == 0 is considered deprecated now. Callers 435 * using slab buffer should use slab_build_skb() instead. 436 */ 437 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 438 data = __slab_build_skb(skb, data, &size); 439 440 __finalize_skb_around(skb, data, size); 441 } 442 443 /** 444 * __build_skb - build a network buffer 445 * @data: data buffer provided by caller 446 * @frag_size: size of data (must not be 0) 447 * 448 * Allocate a new &sk_buff. Caller provides space holding head and 449 * skb_shared_info. @data must have been allocated from the page 450 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 451 * allocation is deprecated, and callers should use slab_build_skb() 452 * instead.) 453 * The return is the new skb buffer. 454 * On a failure the return is %NULL, and @data is not freed. 455 * Notes : 456 * Before IO, driver allocates only data buffer where NIC put incoming frame 457 * Driver should add room at head (NET_SKB_PAD) and 458 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 459 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 460 * before giving packet to stack. 461 * RX rings only contains data buffers, not full skbs. 462 */ 463 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 464 { 465 struct sk_buff *skb; 466 467 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 468 if (unlikely(!skb)) 469 return NULL; 470 471 memset(skb, 0, offsetof(struct sk_buff, tail)); 472 __build_skb_around(skb, data, frag_size); 473 474 return skb; 475 } 476 477 /* build_skb() is wrapper over __build_skb(), that specifically 478 * takes care of skb->head and skb->pfmemalloc 479 */ 480 struct sk_buff *build_skb(void *data, unsigned int frag_size) 481 { 482 struct sk_buff *skb = __build_skb(data, frag_size); 483 484 if (likely(skb && frag_size)) { 485 skb->head_frag = 1; 486 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 487 } 488 return skb; 489 } 490 EXPORT_SYMBOL(build_skb); 491 492 /** 493 * build_skb_around - build a network buffer around provided skb 494 * @skb: sk_buff provide by caller, must be memset cleared 495 * @data: data buffer provided by caller 496 * @frag_size: size of data 497 */ 498 struct sk_buff *build_skb_around(struct sk_buff *skb, 499 void *data, unsigned int frag_size) 500 { 501 if (unlikely(!skb)) 502 return NULL; 503 504 __build_skb_around(skb, data, frag_size); 505 506 if (frag_size) { 507 skb->head_frag = 1; 508 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 509 } 510 return skb; 511 } 512 EXPORT_SYMBOL(build_skb_around); 513 514 /** 515 * __napi_build_skb - build a network buffer 516 * @data: data buffer provided by caller 517 * @frag_size: size of data 518 * 519 * Version of __build_skb() that uses NAPI percpu caches to obtain 520 * skbuff_head instead of inplace allocation. 521 * 522 * Returns a new &sk_buff on success, %NULL on allocation failure. 523 */ 524 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 525 { 526 struct sk_buff *skb; 527 528 skb = napi_skb_cache_get(); 529 if (unlikely(!skb)) 530 return NULL; 531 532 memset(skb, 0, offsetof(struct sk_buff, tail)); 533 __build_skb_around(skb, data, frag_size); 534 535 return skb; 536 } 537 538 /** 539 * napi_build_skb - build a network buffer 540 * @data: data buffer provided by caller 541 * @frag_size: size of data 542 * 543 * Version of __napi_build_skb() that takes care of skb->head_frag 544 * and skb->pfmemalloc when the data is a page or page fragment. 545 * 546 * Returns a new &sk_buff on success, %NULL on allocation failure. 547 */ 548 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 549 { 550 struct sk_buff *skb = __napi_build_skb(data, frag_size); 551 552 if (likely(skb) && frag_size) { 553 skb->head_frag = 1; 554 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 555 } 556 557 return skb; 558 } 559 EXPORT_SYMBOL(napi_build_skb); 560 561 /* 562 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 563 * the caller if emergency pfmemalloc reserves are being used. If it is and 564 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 565 * may be used. Otherwise, the packet data may be discarded until enough 566 * memory is free 567 */ 568 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 569 bool *pfmemalloc) 570 { 571 bool ret_pfmemalloc = false; 572 size_t obj_size; 573 void *obj; 574 575 obj_size = SKB_HEAD_ALIGN(*size); 576 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 577 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 578 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, 579 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 580 node); 581 *size = SKB_SMALL_HEAD_CACHE_SIZE; 582 if (obj || !(gfp_pfmemalloc_allowed(flags))) 583 goto out; 584 /* Try again but now we are using pfmemalloc reserves */ 585 ret_pfmemalloc = true; 586 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); 587 goto out; 588 } 589 590 obj_size = kmalloc_size_roundup(obj_size); 591 /* The following cast might truncate high-order bits of obj_size, this 592 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 593 */ 594 *size = (unsigned int)obj_size; 595 596 /* 597 * Try a regular allocation, when that fails and we're not entitled 598 * to the reserves, fail. 599 */ 600 obj = kmalloc_node_track_caller(obj_size, 601 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 602 node); 603 if (obj || !(gfp_pfmemalloc_allowed(flags))) 604 goto out; 605 606 /* Try again but now we are using pfmemalloc reserves */ 607 ret_pfmemalloc = true; 608 obj = kmalloc_node_track_caller(obj_size, flags, node); 609 610 out: 611 if (pfmemalloc) 612 *pfmemalloc = ret_pfmemalloc; 613 614 return obj; 615 } 616 617 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 618 * 'private' fields and also do memory statistics to find all the 619 * [BEEP] leaks. 620 * 621 */ 622 623 /** 624 * __alloc_skb - allocate a network buffer 625 * @size: size to allocate 626 * @gfp_mask: allocation mask 627 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 628 * instead of head cache and allocate a cloned (child) skb. 629 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 630 * allocations in case the data is required for writeback 631 * @node: numa node to allocate memory on 632 * 633 * Allocate a new &sk_buff. The returned buffer has no headroom and a 634 * tail room of at least size bytes. The object has a reference count 635 * of one. The return is the buffer. On a failure the return is %NULL. 636 * 637 * Buffers may only be allocated from interrupts using a @gfp_mask of 638 * %GFP_ATOMIC. 639 */ 640 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 641 int flags, int node) 642 { 643 struct kmem_cache *cache; 644 struct sk_buff *skb; 645 bool pfmemalloc; 646 u8 *data; 647 648 cache = (flags & SKB_ALLOC_FCLONE) 649 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; 650 651 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 652 gfp_mask |= __GFP_MEMALLOC; 653 654 /* Get the HEAD */ 655 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 656 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 657 skb = napi_skb_cache_get(); 658 else 659 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 660 if (unlikely(!skb)) 661 return NULL; 662 prefetchw(skb); 663 664 /* We do our best to align skb_shared_info on a separate cache 665 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 666 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 667 * Both skb->head and skb_shared_info are cache line aligned. 668 */ 669 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 670 if (unlikely(!data)) 671 goto nodata; 672 /* kmalloc_size_roundup() might give us more room than requested. 673 * Put skb_shared_info exactly at the end of allocated zone, 674 * to allow max possible filling before reallocation. 675 */ 676 prefetchw(data + SKB_WITH_OVERHEAD(size)); 677 678 /* 679 * Only clear those fields we need to clear, not those that we will 680 * actually initialise below. Hence, don't put any more fields after 681 * the tail pointer in struct sk_buff! 682 */ 683 memset(skb, 0, offsetof(struct sk_buff, tail)); 684 __build_skb_around(skb, data, size); 685 skb->pfmemalloc = pfmemalloc; 686 687 if (flags & SKB_ALLOC_FCLONE) { 688 struct sk_buff_fclones *fclones; 689 690 fclones = container_of(skb, struct sk_buff_fclones, skb1); 691 692 skb->fclone = SKB_FCLONE_ORIG; 693 refcount_set(&fclones->fclone_ref, 1); 694 } 695 696 return skb; 697 698 nodata: 699 kmem_cache_free(cache, skb); 700 return NULL; 701 } 702 EXPORT_SYMBOL(__alloc_skb); 703 704 /** 705 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 706 * @dev: network device to receive on 707 * @len: length to allocate 708 * @gfp_mask: get_free_pages mask, passed to alloc_skb 709 * 710 * Allocate a new &sk_buff and assign it a usage count of one. The 711 * buffer has NET_SKB_PAD headroom built in. Users should allocate 712 * the headroom they think they need without accounting for the 713 * built in space. The built in space is used for optimisations. 714 * 715 * %NULL is returned if there is no free memory. 716 */ 717 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 718 gfp_t gfp_mask) 719 { 720 struct page_frag_cache *nc; 721 struct sk_buff *skb; 722 bool pfmemalloc; 723 void *data; 724 725 len += NET_SKB_PAD; 726 727 /* If requested length is either too small or too big, 728 * we use kmalloc() for skb->head allocation. 729 */ 730 if (len <= SKB_WITH_OVERHEAD(1024) || 731 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 732 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 733 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 734 if (!skb) 735 goto skb_fail; 736 goto skb_success; 737 } 738 739 len = SKB_HEAD_ALIGN(len); 740 741 if (sk_memalloc_socks()) 742 gfp_mask |= __GFP_MEMALLOC; 743 744 if (in_hardirq() || irqs_disabled()) { 745 nc = this_cpu_ptr(&netdev_alloc_cache); 746 data = page_frag_alloc(nc, len, gfp_mask); 747 pfmemalloc = nc->pfmemalloc; 748 } else { 749 local_bh_disable(); 750 nc = this_cpu_ptr(&napi_alloc_cache.page); 751 data = page_frag_alloc(nc, len, gfp_mask); 752 pfmemalloc = nc->pfmemalloc; 753 local_bh_enable(); 754 } 755 756 if (unlikely(!data)) 757 return NULL; 758 759 skb = __build_skb(data, len); 760 if (unlikely(!skb)) { 761 skb_free_frag(data); 762 return NULL; 763 } 764 765 if (pfmemalloc) 766 skb->pfmemalloc = 1; 767 skb->head_frag = 1; 768 769 skb_success: 770 skb_reserve(skb, NET_SKB_PAD); 771 skb->dev = dev; 772 773 skb_fail: 774 return skb; 775 } 776 EXPORT_SYMBOL(__netdev_alloc_skb); 777 778 /** 779 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 780 * @napi: napi instance this buffer was allocated for 781 * @len: length to allocate 782 * 783 * Allocate a new sk_buff for use in NAPI receive. This buffer will 784 * attempt to allocate the head from a special reserved region used 785 * only for NAPI Rx allocation. By doing this we can save several 786 * CPU cycles by avoiding having to disable and re-enable IRQs. 787 * 788 * %NULL is returned if there is no free memory. 789 */ 790 struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) 791 { 792 gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; 793 struct napi_alloc_cache *nc; 794 struct sk_buff *skb; 795 bool pfmemalloc; 796 void *data; 797 798 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 799 len += NET_SKB_PAD + NET_IP_ALIGN; 800 801 /* If requested length is either too small or too big, 802 * we use kmalloc() for skb->head allocation. 803 * When the small frag allocator is available, prefer it over kmalloc 804 * for small fragments 805 */ 806 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || 807 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 808 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 809 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 810 NUMA_NO_NODE); 811 if (!skb) 812 goto skb_fail; 813 goto skb_success; 814 } 815 816 nc = this_cpu_ptr(&napi_alloc_cache); 817 818 if (sk_memalloc_socks()) 819 gfp_mask |= __GFP_MEMALLOC; 820 821 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { 822 /* we are artificially inflating the allocation size, but 823 * that is not as bad as it may look like, as: 824 * - 'len' less than GRO_MAX_HEAD makes little sense 825 * - On most systems, larger 'len' values lead to fragment 826 * size above 512 bytes 827 * - kmalloc would use the kmalloc-1k slab for such values 828 * - Builds with smaller GRO_MAX_HEAD will very likely do 829 * little networking, as that implies no WiFi and no 830 * tunnels support, and 32 bits arches. 831 */ 832 len = SZ_1K; 833 834 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); 835 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); 836 } else { 837 len = SKB_HEAD_ALIGN(len); 838 839 data = page_frag_alloc(&nc->page, len, gfp_mask); 840 pfmemalloc = nc->page.pfmemalloc; 841 } 842 843 if (unlikely(!data)) 844 return NULL; 845 846 skb = __napi_build_skb(data, len); 847 if (unlikely(!skb)) { 848 skb_free_frag(data); 849 return NULL; 850 } 851 852 if (pfmemalloc) 853 skb->pfmemalloc = 1; 854 skb->head_frag = 1; 855 856 skb_success: 857 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 858 skb->dev = napi->dev; 859 860 skb_fail: 861 return skb; 862 } 863 EXPORT_SYMBOL(napi_alloc_skb); 864 865 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, 866 int off, int size, unsigned int truesize) 867 { 868 DEBUG_NET_WARN_ON_ONCE(size > truesize); 869 870 skb_fill_netmem_desc(skb, i, netmem, off, size); 871 skb->len += size; 872 skb->data_len += size; 873 skb->truesize += truesize; 874 } 875 EXPORT_SYMBOL(skb_add_rx_frag_netmem); 876 877 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 878 unsigned int truesize) 879 { 880 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 881 882 DEBUG_NET_WARN_ON_ONCE(size > truesize); 883 884 skb_frag_size_add(frag, size); 885 skb->len += size; 886 skb->data_len += size; 887 skb->truesize += truesize; 888 } 889 EXPORT_SYMBOL(skb_coalesce_rx_frag); 890 891 static void skb_drop_list(struct sk_buff **listp) 892 { 893 kfree_skb_list(*listp); 894 *listp = NULL; 895 } 896 897 static inline void skb_drop_fraglist(struct sk_buff *skb) 898 { 899 skb_drop_list(&skb_shinfo(skb)->frag_list); 900 } 901 902 static void skb_clone_fraglist(struct sk_buff *skb) 903 { 904 struct sk_buff *list; 905 906 skb_walk_frags(skb, list) 907 skb_get(list); 908 } 909 910 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 911 unsigned int headroom) 912 { 913 #if IS_ENABLED(CONFIG_PAGE_POOL) 914 u32 size, truesize, len, max_head_size, off; 915 struct sk_buff *skb = *pskb, *nskb; 916 int err, i, head_off; 917 void *data; 918 919 /* XDP does not support fraglist so we need to linearize 920 * the skb. 921 */ 922 if (skb_has_frag_list(skb)) 923 return -EOPNOTSUPP; 924 925 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); 926 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) 927 return -ENOMEM; 928 929 size = min_t(u32, skb->len, max_head_size); 930 truesize = SKB_HEAD_ALIGN(size) + headroom; 931 data = page_pool_dev_alloc_va(pool, &truesize); 932 if (!data) 933 return -ENOMEM; 934 935 nskb = napi_build_skb(data, truesize); 936 if (!nskb) { 937 page_pool_free_va(pool, data, true); 938 return -ENOMEM; 939 } 940 941 skb_reserve(nskb, headroom); 942 skb_copy_header(nskb, skb); 943 skb_mark_for_recycle(nskb); 944 945 err = skb_copy_bits(skb, 0, nskb->data, size); 946 if (err) { 947 consume_skb(nskb); 948 return err; 949 } 950 skb_put(nskb, size); 951 952 head_off = skb_headroom(nskb) - skb_headroom(skb); 953 skb_headers_offset_update(nskb, head_off); 954 955 off = size; 956 len = skb->len - off; 957 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { 958 struct page *page; 959 u32 page_off; 960 961 size = min_t(u32, len, PAGE_SIZE); 962 truesize = size; 963 964 page = page_pool_dev_alloc(pool, &page_off, &truesize); 965 if (!page) { 966 consume_skb(nskb); 967 return -ENOMEM; 968 } 969 970 skb_add_rx_frag(nskb, i, page, page_off, size, truesize); 971 err = skb_copy_bits(skb, off, page_address(page) + page_off, 972 size); 973 if (err) { 974 consume_skb(nskb); 975 return err; 976 } 977 978 len -= size; 979 off += size; 980 } 981 982 consume_skb(skb); 983 *pskb = nskb; 984 985 return 0; 986 #else 987 return -EOPNOTSUPP; 988 #endif 989 } 990 EXPORT_SYMBOL(skb_pp_cow_data); 991 992 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, 993 struct bpf_prog *prog) 994 { 995 if (!prog->aux->xdp_has_frags) 996 return -EINVAL; 997 998 return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); 999 } 1000 EXPORT_SYMBOL(skb_cow_data_for_xdp); 1001 1002 #if IS_ENABLED(CONFIG_PAGE_POOL) 1003 bool napi_pp_put_page(struct page *page) 1004 { 1005 page = compound_head(page); 1006 1007 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 1008 * in order to preserve any existing bits, such as bit 0 for the 1009 * head page of compound page and bit 1 for pfmemalloc page, so 1010 * mask those bits for freeing side when doing below checking, 1011 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 1012 * to avoid recycling the pfmemalloc page. 1013 */ 1014 if (unlikely(!is_pp_page(page))) 1015 return false; 1016 1017 page_pool_put_full_page(page->pp, page, false); 1018 1019 return true; 1020 } 1021 EXPORT_SYMBOL(napi_pp_put_page); 1022 #endif 1023 1024 static bool skb_pp_recycle(struct sk_buff *skb, void *data) 1025 { 1026 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 1027 return false; 1028 return napi_pp_put_page(virt_to_page(data)); 1029 } 1030 1031 static void skb_kfree_head(void *head, unsigned int end_offset) 1032 { 1033 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1034 kmem_cache_free(net_hotdata.skb_small_head_cache, head); 1035 else 1036 kfree(head); 1037 } 1038 1039 static void skb_free_head(struct sk_buff *skb) 1040 { 1041 unsigned char *head = skb->head; 1042 1043 if (skb->head_frag) { 1044 if (skb_pp_recycle(skb, head)) 1045 return; 1046 skb_free_frag(head); 1047 } else { 1048 skb_kfree_head(head, skb_end_offset(skb)); 1049 } 1050 } 1051 1052 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) 1053 { 1054 struct skb_shared_info *shinfo = skb_shinfo(skb); 1055 int i; 1056 1057 if (!skb_data_unref(skb, shinfo)) 1058 goto exit; 1059 1060 if (skb_zcopy(skb)) { 1061 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1062 1063 skb_zcopy_clear(skb, true); 1064 if (skip_unref) 1065 goto free_head; 1066 } 1067 1068 for (i = 0; i < shinfo->nr_frags; i++) 1069 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); 1070 1071 free_head: 1072 if (shinfo->frag_list) 1073 kfree_skb_list_reason(shinfo->frag_list, reason); 1074 1075 skb_free_head(skb); 1076 exit: 1077 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1078 * bit is only set on the head though, so in order to avoid races 1079 * while trying to recycle fragments on __skb_frag_unref() we need 1080 * to make one SKB responsible for triggering the recycle path. 1081 * So disable the recycling bit if an SKB is cloned and we have 1082 * additional references to the fragmented part of the SKB. 1083 * Eventually the last SKB will have the recycling bit set and it's 1084 * dataref set to 0, which will trigger the recycling 1085 */ 1086 skb->pp_recycle = 0; 1087 } 1088 1089 /* 1090 * Free an skbuff by memory without cleaning the state. 1091 */ 1092 static void kfree_skbmem(struct sk_buff *skb) 1093 { 1094 struct sk_buff_fclones *fclones; 1095 1096 switch (skb->fclone) { 1097 case SKB_FCLONE_UNAVAILABLE: 1098 kmem_cache_free(net_hotdata.skbuff_cache, skb); 1099 return; 1100 1101 case SKB_FCLONE_ORIG: 1102 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1103 1104 /* We usually free the clone (TX completion) before original skb 1105 * This test would have no chance to be true for the clone, 1106 * while here, branch prediction will be good. 1107 */ 1108 if (refcount_read(&fclones->fclone_ref) == 1) 1109 goto fastpath; 1110 break; 1111 1112 default: /* SKB_FCLONE_CLONE */ 1113 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1114 break; 1115 } 1116 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1117 return; 1118 fastpath: 1119 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); 1120 } 1121 1122 void skb_release_head_state(struct sk_buff *skb) 1123 { 1124 skb_dst_drop(skb); 1125 if (skb->destructor) { 1126 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1127 skb->destructor(skb); 1128 } 1129 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1130 nf_conntrack_put(skb_nfct(skb)); 1131 #endif 1132 skb_ext_put(skb); 1133 } 1134 1135 /* Free everything but the sk_buff shell. */ 1136 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) 1137 { 1138 skb_release_head_state(skb); 1139 if (likely(skb->head)) 1140 skb_release_data(skb, reason); 1141 } 1142 1143 /** 1144 * __kfree_skb - private function 1145 * @skb: buffer 1146 * 1147 * Free an sk_buff. Release anything attached to the buffer. 1148 * Clean the state. This is an internal helper function. Users should 1149 * always call kfree_skb 1150 */ 1151 1152 void __kfree_skb(struct sk_buff *skb) 1153 { 1154 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); 1155 kfree_skbmem(skb); 1156 } 1157 EXPORT_SYMBOL(__kfree_skb); 1158 1159 static __always_inline 1160 bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1161 { 1162 if (unlikely(!skb_unref(skb))) 1163 return false; 1164 1165 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1166 u32_get_bits(reason, 1167 SKB_DROP_REASON_SUBSYS_MASK) >= 1168 SKB_DROP_REASON_SUBSYS_NUM); 1169 1170 if (reason == SKB_CONSUMED) 1171 trace_consume_skb(skb, __builtin_return_address(0)); 1172 else 1173 trace_kfree_skb(skb, __builtin_return_address(0), reason); 1174 return true; 1175 } 1176 1177 /** 1178 * kfree_skb_reason - free an sk_buff with special reason 1179 * @skb: buffer to free 1180 * @reason: reason why this skb is dropped 1181 * 1182 * Drop a reference to the buffer and free it if the usage count has 1183 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' 1184 * tracepoint. 1185 */ 1186 void __fix_address 1187 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) 1188 { 1189 if (__kfree_skb_reason(skb, reason)) 1190 __kfree_skb(skb); 1191 } 1192 EXPORT_SYMBOL(kfree_skb_reason); 1193 1194 #define KFREE_SKB_BULK_SIZE 16 1195 1196 struct skb_free_array { 1197 unsigned int skb_count; 1198 void *skb_array[KFREE_SKB_BULK_SIZE]; 1199 }; 1200 1201 static void kfree_skb_add_bulk(struct sk_buff *skb, 1202 struct skb_free_array *sa, 1203 enum skb_drop_reason reason) 1204 { 1205 /* if SKB is a clone, don't handle this case */ 1206 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1207 __kfree_skb(skb); 1208 return; 1209 } 1210 1211 skb_release_all(skb, reason); 1212 sa->skb_array[sa->skb_count++] = skb; 1213 1214 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1215 kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, 1216 sa->skb_array); 1217 sa->skb_count = 0; 1218 } 1219 } 1220 1221 void __fix_address 1222 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1223 { 1224 struct skb_free_array sa; 1225 1226 sa.skb_count = 0; 1227 1228 while (segs) { 1229 struct sk_buff *next = segs->next; 1230 1231 if (__kfree_skb_reason(segs, reason)) { 1232 skb_poison_list(segs); 1233 kfree_skb_add_bulk(segs, &sa, reason); 1234 } 1235 1236 segs = next; 1237 } 1238 1239 if (sa.skb_count) 1240 kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array); 1241 } 1242 EXPORT_SYMBOL(kfree_skb_list_reason); 1243 1244 /* Dump skb information and contents. 1245 * 1246 * Must only be called from net_ratelimit()-ed paths. 1247 * 1248 * Dumps whole packets if full_pkt, only headers otherwise. 1249 */ 1250 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1251 { 1252 struct skb_shared_info *sh = skb_shinfo(skb); 1253 struct net_device *dev = skb->dev; 1254 struct sock *sk = skb->sk; 1255 struct sk_buff *list_skb; 1256 bool has_mac, has_trans; 1257 int headroom, tailroom; 1258 int i, len, seg_len; 1259 1260 if (full_pkt) 1261 len = skb->len; 1262 else 1263 len = min_t(int, skb->len, MAX_HEADER + 128); 1264 1265 headroom = skb_headroom(skb); 1266 tailroom = skb_tailroom(skb); 1267 1268 has_mac = skb_mac_header_was_set(skb); 1269 has_trans = skb_transport_header_was_set(skb); 1270 1271 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1272 "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n" 1273 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1274 "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1275 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n" 1276 "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n" 1277 "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n", 1278 level, skb->len, headroom, skb_headlen(skb), tailroom, 1279 has_mac ? skb->mac_header : -1, 1280 has_mac ? skb_mac_header_len(skb) : -1, 1281 skb->mac_len, 1282 skb->network_header, 1283 has_trans ? skb_network_header_len(skb) : -1, 1284 has_trans ? skb->transport_header : -1, 1285 sh->tx_flags, sh->nr_frags, 1286 sh->gso_size, sh->gso_type, sh->gso_segs, 1287 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, 1288 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, 1289 skb->hash, skb->sw_hash, skb->l4_hash, 1290 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, 1291 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, 1292 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, 1293 skb->inner_network_header, skb->inner_transport_header); 1294 1295 if (dev) 1296 printk("%sdev name=%s feat=%pNF\n", 1297 level, dev->name, &dev->features); 1298 if (sk) 1299 printk("%ssk family=%hu type=%u proto=%u\n", 1300 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1301 1302 if (full_pkt && headroom) 1303 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1304 16, 1, skb->head, headroom, false); 1305 1306 seg_len = min_t(int, skb_headlen(skb), len); 1307 if (seg_len) 1308 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1309 16, 1, skb->data, seg_len, false); 1310 len -= seg_len; 1311 1312 if (full_pkt && tailroom) 1313 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1314 16, 1, skb_tail_pointer(skb), tailroom, false); 1315 1316 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1317 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1318 u32 p_off, p_len, copied; 1319 struct page *p; 1320 u8 *vaddr; 1321 1322 skb_frag_foreach_page(frag, skb_frag_off(frag), 1323 skb_frag_size(frag), p, p_off, p_len, 1324 copied) { 1325 seg_len = min_t(int, p_len, len); 1326 vaddr = kmap_atomic(p); 1327 print_hex_dump(level, "skb frag: ", 1328 DUMP_PREFIX_OFFSET, 1329 16, 1, vaddr + p_off, seg_len, false); 1330 kunmap_atomic(vaddr); 1331 len -= seg_len; 1332 if (!len) 1333 break; 1334 } 1335 } 1336 1337 if (full_pkt && skb_has_frag_list(skb)) { 1338 printk("skb fraglist:\n"); 1339 skb_walk_frags(skb, list_skb) 1340 skb_dump(level, list_skb, true); 1341 } 1342 } 1343 EXPORT_SYMBOL(skb_dump); 1344 1345 /** 1346 * skb_tx_error - report an sk_buff xmit error 1347 * @skb: buffer that triggered an error 1348 * 1349 * Report xmit error if a device callback is tracking this skb. 1350 * skb must be freed afterwards. 1351 */ 1352 void skb_tx_error(struct sk_buff *skb) 1353 { 1354 if (skb) { 1355 skb_zcopy_downgrade_managed(skb); 1356 skb_zcopy_clear(skb, true); 1357 } 1358 } 1359 EXPORT_SYMBOL(skb_tx_error); 1360 1361 #ifdef CONFIG_TRACEPOINTS 1362 /** 1363 * consume_skb - free an skbuff 1364 * @skb: buffer to free 1365 * 1366 * Drop a ref to the buffer and free it if the usage count has hit zero 1367 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1368 * is being dropped after a failure and notes that 1369 */ 1370 void consume_skb(struct sk_buff *skb) 1371 { 1372 if (!skb_unref(skb)) 1373 return; 1374 1375 trace_consume_skb(skb, __builtin_return_address(0)); 1376 __kfree_skb(skb); 1377 } 1378 EXPORT_SYMBOL(consume_skb); 1379 #endif 1380 1381 /** 1382 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1383 * @skb: buffer to free 1384 * 1385 * Alike consume_skb(), but this variant assumes that this is the last 1386 * skb reference and all the head states have been already dropped 1387 */ 1388 void __consume_stateless_skb(struct sk_buff *skb) 1389 { 1390 trace_consume_skb(skb, __builtin_return_address(0)); 1391 skb_release_data(skb, SKB_CONSUMED); 1392 kfree_skbmem(skb); 1393 } 1394 1395 static void napi_skb_cache_put(struct sk_buff *skb) 1396 { 1397 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1398 u32 i; 1399 1400 if (!kasan_mempool_poison_object(skb)) 1401 return; 1402 1403 nc->skb_cache[nc->skb_count++] = skb; 1404 1405 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1406 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1407 kasan_mempool_unpoison_object(nc->skb_cache[i], 1408 kmem_cache_size(net_hotdata.skbuff_cache)); 1409 1410 kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, 1411 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1412 nc->skb_count = NAPI_SKB_CACHE_HALF; 1413 } 1414 } 1415 1416 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1417 { 1418 skb_release_all(skb, reason); 1419 napi_skb_cache_put(skb); 1420 } 1421 1422 void napi_skb_free_stolen_head(struct sk_buff *skb) 1423 { 1424 if (unlikely(skb->slow_gro)) { 1425 nf_reset_ct(skb); 1426 skb_dst_drop(skb); 1427 skb_ext_put(skb); 1428 skb_orphan(skb); 1429 skb->slow_gro = 0; 1430 } 1431 napi_skb_cache_put(skb); 1432 } 1433 1434 void napi_consume_skb(struct sk_buff *skb, int budget) 1435 { 1436 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1437 if (unlikely(!budget)) { 1438 dev_consume_skb_any(skb); 1439 return; 1440 } 1441 1442 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1443 1444 if (!skb_unref(skb)) 1445 return; 1446 1447 /* if reaching here SKB is ready to free */ 1448 trace_consume_skb(skb, __builtin_return_address(0)); 1449 1450 /* if SKB is a clone, don't handle this case */ 1451 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1452 __kfree_skb(skb); 1453 return; 1454 } 1455 1456 skb_release_all(skb, SKB_CONSUMED); 1457 napi_skb_cache_put(skb); 1458 } 1459 EXPORT_SYMBOL(napi_consume_skb); 1460 1461 /* Make sure a field is contained by headers group */ 1462 #define CHECK_SKB_FIELD(field) \ 1463 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1464 offsetof(struct sk_buff, headers.field)); \ 1465 1466 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1467 { 1468 new->tstamp = old->tstamp; 1469 /* We do not copy old->sk */ 1470 new->dev = old->dev; 1471 memcpy(new->cb, old->cb, sizeof(old->cb)); 1472 skb_dst_copy(new, old); 1473 __skb_ext_copy(new, old); 1474 __nf_copy(new, old, false); 1475 1476 /* Note : this field could be in the headers group. 1477 * It is not yet because we do not want to have a 16 bit hole 1478 */ 1479 new->queue_mapping = old->queue_mapping; 1480 1481 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1482 CHECK_SKB_FIELD(protocol); 1483 CHECK_SKB_FIELD(csum); 1484 CHECK_SKB_FIELD(hash); 1485 CHECK_SKB_FIELD(priority); 1486 CHECK_SKB_FIELD(skb_iif); 1487 CHECK_SKB_FIELD(vlan_proto); 1488 CHECK_SKB_FIELD(vlan_tci); 1489 CHECK_SKB_FIELD(transport_header); 1490 CHECK_SKB_FIELD(network_header); 1491 CHECK_SKB_FIELD(mac_header); 1492 CHECK_SKB_FIELD(inner_protocol); 1493 CHECK_SKB_FIELD(inner_transport_header); 1494 CHECK_SKB_FIELD(inner_network_header); 1495 CHECK_SKB_FIELD(inner_mac_header); 1496 CHECK_SKB_FIELD(mark); 1497 #ifdef CONFIG_NETWORK_SECMARK 1498 CHECK_SKB_FIELD(secmark); 1499 #endif 1500 #ifdef CONFIG_NET_RX_BUSY_POLL 1501 CHECK_SKB_FIELD(napi_id); 1502 #endif 1503 CHECK_SKB_FIELD(alloc_cpu); 1504 #ifdef CONFIG_XPS 1505 CHECK_SKB_FIELD(sender_cpu); 1506 #endif 1507 #ifdef CONFIG_NET_SCHED 1508 CHECK_SKB_FIELD(tc_index); 1509 #endif 1510 1511 } 1512 1513 /* 1514 * You should not add any new code to this function. Add it to 1515 * __copy_skb_header above instead. 1516 */ 1517 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1518 { 1519 #define C(x) n->x = skb->x 1520 1521 n->next = n->prev = NULL; 1522 n->sk = NULL; 1523 __copy_skb_header(n, skb); 1524 1525 C(len); 1526 C(data_len); 1527 C(mac_len); 1528 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1529 n->cloned = 1; 1530 n->nohdr = 0; 1531 n->peeked = 0; 1532 C(pfmemalloc); 1533 C(pp_recycle); 1534 n->destructor = NULL; 1535 C(tail); 1536 C(end); 1537 C(head); 1538 C(head_frag); 1539 C(data); 1540 C(truesize); 1541 refcount_set(&n->users, 1); 1542 1543 atomic_inc(&(skb_shinfo(skb)->dataref)); 1544 skb->cloned = 1; 1545 1546 return n; 1547 #undef C 1548 } 1549 1550 /** 1551 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1552 * @first: first sk_buff of the msg 1553 */ 1554 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1555 { 1556 struct sk_buff *n; 1557 1558 n = alloc_skb(0, GFP_ATOMIC); 1559 if (!n) 1560 return NULL; 1561 1562 n->len = first->len; 1563 n->data_len = first->len; 1564 n->truesize = first->truesize; 1565 1566 skb_shinfo(n)->frag_list = first; 1567 1568 __copy_skb_header(n, first); 1569 n->destructor = NULL; 1570 1571 return n; 1572 } 1573 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1574 1575 /** 1576 * skb_morph - morph one skb into another 1577 * @dst: the skb to receive the contents 1578 * @src: the skb to supply the contents 1579 * 1580 * This is identical to skb_clone except that the target skb is 1581 * supplied by the user. 1582 * 1583 * The target skb is returned upon exit. 1584 */ 1585 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1586 { 1587 skb_release_all(dst, SKB_CONSUMED); 1588 return __skb_clone(dst, src); 1589 } 1590 EXPORT_SYMBOL_GPL(skb_morph); 1591 1592 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1593 { 1594 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1595 struct user_struct *user; 1596 1597 if (capable(CAP_IPC_LOCK) || !size) 1598 return 0; 1599 1600 rlim = rlimit(RLIMIT_MEMLOCK); 1601 if (rlim == RLIM_INFINITY) 1602 return 0; 1603 1604 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1605 max_pg = rlim >> PAGE_SHIFT; 1606 user = mmp->user ? : current_user(); 1607 1608 old_pg = atomic_long_read(&user->locked_vm); 1609 do { 1610 new_pg = old_pg + num_pg; 1611 if (new_pg > max_pg) 1612 return -ENOBUFS; 1613 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1614 1615 if (!mmp->user) { 1616 mmp->user = get_uid(user); 1617 mmp->num_pg = num_pg; 1618 } else { 1619 mmp->num_pg += num_pg; 1620 } 1621 1622 return 0; 1623 } 1624 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1625 1626 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1627 { 1628 if (mmp->user) { 1629 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1630 free_uid(mmp->user); 1631 } 1632 } 1633 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1634 1635 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1636 { 1637 struct ubuf_info_msgzc *uarg; 1638 struct sk_buff *skb; 1639 1640 WARN_ON_ONCE(!in_task()); 1641 1642 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1643 if (!skb) 1644 return NULL; 1645 1646 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1647 uarg = (void *)skb->cb; 1648 uarg->mmp.user = NULL; 1649 1650 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1651 kfree_skb(skb); 1652 return NULL; 1653 } 1654 1655 uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; 1656 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1657 uarg->len = 1; 1658 uarg->bytelen = size; 1659 uarg->zerocopy = 1; 1660 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1661 refcount_set(&uarg->ubuf.refcnt, 1); 1662 sock_hold(sk); 1663 1664 return &uarg->ubuf; 1665 } 1666 1667 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1668 { 1669 return container_of((void *)uarg, struct sk_buff, cb); 1670 } 1671 1672 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1673 struct ubuf_info *uarg) 1674 { 1675 if (uarg) { 1676 struct ubuf_info_msgzc *uarg_zc; 1677 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1678 u32 bytelen, next; 1679 1680 /* there might be non MSG_ZEROCOPY users */ 1681 if (uarg->ops != &msg_zerocopy_ubuf_ops) 1682 return NULL; 1683 1684 /* realloc only when socket is locked (TCP, UDP cork), 1685 * so uarg->len and sk_zckey access is serialized 1686 */ 1687 if (!sock_owned_by_user(sk)) { 1688 WARN_ON_ONCE(1); 1689 return NULL; 1690 } 1691 1692 uarg_zc = uarg_to_msgzc(uarg); 1693 bytelen = uarg_zc->bytelen + size; 1694 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1695 /* TCP can create new skb to attach new uarg */ 1696 if (sk->sk_type == SOCK_STREAM) 1697 goto new_alloc; 1698 return NULL; 1699 } 1700 1701 next = (u32)atomic_read(&sk->sk_zckey); 1702 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1703 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1704 return NULL; 1705 uarg_zc->len++; 1706 uarg_zc->bytelen = bytelen; 1707 atomic_set(&sk->sk_zckey, ++next); 1708 1709 /* no extra ref when appending to datagram (MSG_MORE) */ 1710 if (sk->sk_type == SOCK_STREAM) 1711 net_zcopy_get(uarg); 1712 1713 return uarg; 1714 } 1715 } 1716 1717 new_alloc: 1718 return msg_zerocopy_alloc(sk, size); 1719 } 1720 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1721 1722 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1723 { 1724 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1725 u32 old_lo, old_hi; 1726 u64 sum_len; 1727 1728 old_lo = serr->ee.ee_info; 1729 old_hi = serr->ee.ee_data; 1730 sum_len = old_hi - old_lo + 1ULL + len; 1731 1732 if (sum_len >= (1ULL << 32)) 1733 return false; 1734 1735 if (lo != old_hi + 1) 1736 return false; 1737 1738 serr->ee.ee_data += len; 1739 return true; 1740 } 1741 1742 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1743 { 1744 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1745 struct sock_exterr_skb *serr; 1746 struct sock *sk = skb->sk; 1747 struct sk_buff_head *q; 1748 unsigned long flags; 1749 bool is_zerocopy; 1750 u32 lo, hi; 1751 u16 len; 1752 1753 mm_unaccount_pinned_pages(&uarg->mmp); 1754 1755 /* if !len, there was only 1 call, and it was aborted 1756 * so do not queue a completion notification 1757 */ 1758 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1759 goto release; 1760 1761 len = uarg->len; 1762 lo = uarg->id; 1763 hi = uarg->id + len - 1; 1764 is_zerocopy = uarg->zerocopy; 1765 1766 serr = SKB_EXT_ERR(skb); 1767 memset(serr, 0, sizeof(*serr)); 1768 serr->ee.ee_errno = 0; 1769 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1770 serr->ee.ee_data = hi; 1771 serr->ee.ee_info = lo; 1772 if (!is_zerocopy) 1773 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1774 1775 q = &sk->sk_error_queue; 1776 spin_lock_irqsave(&q->lock, flags); 1777 tail = skb_peek_tail(q); 1778 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1779 !skb_zerocopy_notify_extend(tail, lo, len)) { 1780 __skb_queue_tail(q, skb); 1781 skb = NULL; 1782 } 1783 spin_unlock_irqrestore(&q->lock, flags); 1784 1785 sk_error_report(sk); 1786 1787 release: 1788 consume_skb(skb); 1789 sock_put(sk); 1790 } 1791 1792 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, 1793 bool success) 1794 { 1795 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1796 1797 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1798 1799 if (refcount_dec_and_test(&uarg->refcnt)) 1800 __msg_zerocopy_callback(uarg_zc); 1801 } 1802 1803 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1804 { 1805 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1806 1807 atomic_dec(&sk->sk_zckey); 1808 uarg_to_msgzc(uarg)->len--; 1809 1810 if (have_uref) 1811 msg_zerocopy_complete(NULL, uarg, true); 1812 } 1813 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1814 1815 const struct ubuf_info_ops msg_zerocopy_ubuf_ops = { 1816 .complete = msg_zerocopy_complete, 1817 }; 1818 EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops); 1819 1820 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1821 struct msghdr *msg, int len, 1822 struct ubuf_info *uarg) 1823 { 1824 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1825 int err, orig_len = skb->len; 1826 1827 if (uarg->ops->link_skb) { 1828 err = uarg->ops->link_skb(skb, uarg); 1829 if (err) 1830 return err; 1831 } else { 1832 /* An skb can only point to one uarg. This edge case happens 1833 * when TCP appends to an skb, but zerocopy_realloc triggered 1834 * a new alloc. 1835 */ 1836 if (orig_uarg && uarg != orig_uarg) 1837 return -EEXIST; 1838 } 1839 1840 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1841 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1842 struct sock *save_sk = skb->sk; 1843 1844 /* Streams do not free skb on error. Reset to prev state. */ 1845 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1846 skb->sk = sk; 1847 ___pskb_trim(skb, orig_len); 1848 skb->sk = save_sk; 1849 return err; 1850 } 1851 1852 if (!uarg->ops->link_skb) 1853 skb_zcopy_set(skb, uarg, NULL); 1854 return skb->len - orig_len; 1855 } 1856 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1857 1858 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1859 { 1860 int i; 1861 1862 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1863 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1864 skb_frag_ref(skb, i); 1865 } 1866 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1867 1868 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1869 gfp_t gfp_mask) 1870 { 1871 if (skb_zcopy(orig)) { 1872 if (skb_zcopy(nskb)) { 1873 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1874 if (!gfp_mask) { 1875 WARN_ON_ONCE(1); 1876 return -ENOMEM; 1877 } 1878 if (skb_uarg(nskb) == skb_uarg(orig)) 1879 return 0; 1880 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1881 return -EIO; 1882 } 1883 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1884 } 1885 return 0; 1886 } 1887 1888 /** 1889 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1890 * @skb: the skb to modify 1891 * @gfp_mask: allocation priority 1892 * 1893 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1894 * It will copy all frags into kernel and drop the reference 1895 * to userspace pages. 1896 * 1897 * If this function is called from an interrupt gfp_mask() must be 1898 * %GFP_ATOMIC. 1899 * 1900 * Returns 0 on success or a negative error code on failure 1901 * to allocate kernel memory to copy to. 1902 */ 1903 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1904 { 1905 int num_frags = skb_shinfo(skb)->nr_frags; 1906 struct page *page, *head = NULL; 1907 int i, order, psize, new_frags; 1908 u32 d_off; 1909 1910 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1911 return -EINVAL; 1912 1913 if (!num_frags) 1914 goto release; 1915 1916 /* We might have to allocate high order pages, so compute what minimum 1917 * page order is needed. 1918 */ 1919 order = 0; 1920 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1921 order++; 1922 psize = (PAGE_SIZE << order); 1923 1924 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1925 for (i = 0; i < new_frags; i++) { 1926 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1927 if (!page) { 1928 while (head) { 1929 struct page *next = (struct page *)page_private(head); 1930 put_page(head); 1931 head = next; 1932 } 1933 return -ENOMEM; 1934 } 1935 set_page_private(page, (unsigned long)head); 1936 head = page; 1937 } 1938 1939 page = head; 1940 d_off = 0; 1941 for (i = 0; i < num_frags; i++) { 1942 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1943 u32 p_off, p_len, copied; 1944 struct page *p; 1945 u8 *vaddr; 1946 1947 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1948 p, p_off, p_len, copied) { 1949 u32 copy, done = 0; 1950 vaddr = kmap_atomic(p); 1951 1952 while (done < p_len) { 1953 if (d_off == psize) { 1954 d_off = 0; 1955 page = (struct page *)page_private(page); 1956 } 1957 copy = min_t(u32, psize - d_off, p_len - done); 1958 memcpy(page_address(page) + d_off, 1959 vaddr + p_off + done, copy); 1960 done += copy; 1961 d_off += copy; 1962 } 1963 kunmap_atomic(vaddr); 1964 } 1965 } 1966 1967 /* skb frags release userspace buffers */ 1968 for (i = 0; i < num_frags; i++) 1969 skb_frag_unref(skb, i); 1970 1971 /* skb frags point to kernel buffers */ 1972 for (i = 0; i < new_frags - 1; i++) { 1973 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); 1974 head = (struct page *)page_private(head); 1975 } 1976 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, 1977 d_off); 1978 skb_shinfo(skb)->nr_frags = new_frags; 1979 1980 release: 1981 skb_zcopy_clear(skb, false); 1982 return 0; 1983 } 1984 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1985 1986 /** 1987 * skb_clone - duplicate an sk_buff 1988 * @skb: buffer to clone 1989 * @gfp_mask: allocation priority 1990 * 1991 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1992 * copies share the same packet data but not structure. The new 1993 * buffer has a reference count of 1. If the allocation fails the 1994 * function returns %NULL otherwise the new buffer is returned. 1995 * 1996 * If this function is called from an interrupt gfp_mask() must be 1997 * %GFP_ATOMIC. 1998 */ 1999 2000 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 2001 { 2002 struct sk_buff_fclones *fclones = container_of(skb, 2003 struct sk_buff_fclones, 2004 skb1); 2005 struct sk_buff *n; 2006 2007 if (skb_orphan_frags(skb, gfp_mask)) 2008 return NULL; 2009 2010 if (skb->fclone == SKB_FCLONE_ORIG && 2011 refcount_read(&fclones->fclone_ref) == 1) { 2012 n = &fclones->skb2; 2013 refcount_set(&fclones->fclone_ref, 2); 2014 n->fclone = SKB_FCLONE_CLONE; 2015 } else { 2016 if (skb_pfmemalloc(skb)) 2017 gfp_mask |= __GFP_MEMALLOC; 2018 2019 n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); 2020 if (!n) 2021 return NULL; 2022 2023 n->fclone = SKB_FCLONE_UNAVAILABLE; 2024 } 2025 2026 return __skb_clone(n, skb); 2027 } 2028 EXPORT_SYMBOL(skb_clone); 2029 2030 void skb_headers_offset_update(struct sk_buff *skb, int off) 2031 { 2032 /* Only adjust this if it actually is csum_start rather than csum */ 2033 if (skb->ip_summed == CHECKSUM_PARTIAL) 2034 skb->csum_start += off; 2035 /* {transport,network,mac}_header and tail are relative to skb->head */ 2036 skb->transport_header += off; 2037 skb->network_header += off; 2038 if (skb_mac_header_was_set(skb)) 2039 skb->mac_header += off; 2040 skb->inner_transport_header += off; 2041 skb->inner_network_header += off; 2042 skb->inner_mac_header += off; 2043 } 2044 EXPORT_SYMBOL(skb_headers_offset_update); 2045 2046 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 2047 { 2048 __copy_skb_header(new, old); 2049 2050 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 2051 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 2052 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 2053 } 2054 EXPORT_SYMBOL(skb_copy_header); 2055 2056 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 2057 { 2058 if (skb_pfmemalloc(skb)) 2059 return SKB_ALLOC_RX; 2060 return 0; 2061 } 2062 2063 /** 2064 * skb_copy - create private copy of an sk_buff 2065 * @skb: buffer to copy 2066 * @gfp_mask: allocation priority 2067 * 2068 * Make a copy of both an &sk_buff and its data. This is used when the 2069 * caller wishes to modify the data and needs a private copy of the 2070 * data to alter. Returns %NULL on failure or the pointer to the buffer 2071 * on success. The returned buffer has a reference count of 1. 2072 * 2073 * As by-product this function converts non-linear &sk_buff to linear 2074 * one, so that &sk_buff becomes completely private and caller is allowed 2075 * to modify all the data of returned buffer. This means that this 2076 * function is not recommended for use in circumstances when only 2077 * header is going to be modified. Use pskb_copy() instead. 2078 */ 2079 2080 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2081 { 2082 int headerlen = skb_headroom(skb); 2083 unsigned int size = skb_end_offset(skb) + skb->data_len; 2084 struct sk_buff *n = __alloc_skb(size, gfp_mask, 2085 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2086 2087 if (!n) 2088 return NULL; 2089 2090 /* Set the data pointer */ 2091 skb_reserve(n, headerlen); 2092 /* Set the tail pointer and length */ 2093 skb_put(n, skb->len); 2094 2095 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2096 2097 skb_copy_header(n, skb); 2098 return n; 2099 } 2100 EXPORT_SYMBOL(skb_copy); 2101 2102 /** 2103 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2104 * @skb: buffer to copy 2105 * @headroom: headroom of new skb 2106 * @gfp_mask: allocation priority 2107 * @fclone: if true allocate the copy of the skb from the fclone 2108 * cache instead of the head cache; it is recommended to set this 2109 * to true for the cases where the copy will likely be cloned 2110 * 2111 * Make a copy of both an &sk_buff and part of its data, located 2112 * in header. Fragmented data remain shared. This is used when 2113 * the caller wishes to modify only header of &sk_buff and needs 2114 * private copy of the header to alter. Returns %NULL on failure 2115 * or the pointer to the buffer on success. 2116 * The returned buffer has a reference count of 1. 2117 */ 2118 2119 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2120 gfp_t gfp_mask, bool fclone) 2121 { 2122 unsigned int size = skb_headlen(skb) + headroom; 2123 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2124 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2125 2126 if (!n) 2127 goto out; 2128 2129 /* Set the data pointer */ 2130 skb_reserve(n, headroom); 2131 /* Set the tail pointer and length */ 2132 skb_put(n, skb_headlen(skb)); 2133 /* Copy the bytes */ 2134 skb_copy_from_linear_data(skb, n->data, n->len); 2135 2136 n->truesize += skb->data_len; 2137 n->data_len = skb->data_len; 2138 n->len = skb->len; 2139 2140 if (skb_shinfo(skb)->nr_frags) { 2141 int i; 2142 2143 if (skb_orphan_frags(skb, gfp_mask) || 2144 skb_zerocopy_clone(n, skb, gfp_mask)) { 2145 kfree_skb(n); 2146 n = NULL; 2147 goto out; 2148 } 2149 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2150 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2151 skb_frag_ref(skb, i); 2152 } 2153 skb_shinfo(n)->nr_frags = i; 2154 } 2155 2156 if (skb_has_frag_list(skb)) { 2157 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2158 skb_clone_fraglist(n); 2159 } 2160 2161 skb_copy_header(n, skb); 2162 out: 2163 return n; 2164 } 2165 EXPORT_SYMBOL(__pskb_copy_fclone); 2166 2167 /** 2168 * pskb_expand_head - reallocate header of &sk_buff 2169 * @skb: buffer to reallocate 2170 * @nhead: room to add at head 2171 * @ntail: room to add at tail 2172 * @gfp_mask: allocation priority 2173 * 2174 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2175 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2176 * reference count of 1. Returns zero in the case of success or error, 2177 * if expansion failed. In the last case, &sk_buff is not changed. 2178 * 2179 * All the pointers pointing into skb header may change and must be 2180 * reloaded after call to this function. 2181 */ 2182 2183 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2184 gfp_t gfp_mask) 2185 { 2186 unsigned int osize = skb_end_offset(skb); 2187 unsigned int size = osize + nhead + ntail; 2188 long off; 2189 u8 *data; 2190 int i; 2191 2192 BUG_ON(nhead < 0); 2193 2194 BUG_ON(skb_shared(skb)); 2195 2196 skb_zcopy_downgrade_managed(skb); 2197 2198 if (skb_pfmemalloc(skb)) 2199 gfp_mask |= __GFP_MEMALLOC; 2200 2201 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2202 if (!data) 2203 goto nodata; 2204 size = SKB_WITH_OVERHEAD(size); 2205 2206 /* Copy only real data... and, alas, header. This should be 2207 * optimized for the cases when header is void. 2208 */ 2209 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2210 2211 memcpy((struct skb_shared_info *)(data + size), 2212 skb_shinfo(skb), 2213 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2214 2215 /* 2216 * if shinfo is shared we must drop the old head gracefully, but if it 2217 * is not we can just drop the old head and let the existing refcount 2218 * be since all we did is relocate the values 2219 */ 2220 if (skb_cloned(skb)) { 2221 if (skb_orphan_frags(skb, gfp_mask)) 2222 goto nofrags; 2223 if (skb_zcopy(skb)) 2224 refcount_inc(&skb_uarg(skb)->refcnt); 2225 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2226 skb_frag_ref(skb, i); 2227 2228 if (skb_has_frag_list(skb)) 2229 skb_clone_fraglist(skb); 2230 2231 skb_release_data(skb, SKB_CONSUMED); 2232 } else { 2233 skb_free_head(skb); 2234 } 2235 off = (data + nhead) - skb->head; 2236 2237 skb->head = data; 2238 skb->head_frag = 0; 2239 skb->data += off; 2240 2241 skb_set_end_offset(skb, size); 2242 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2243 off = nhead; 2244 #endif 2245 skb->tail += off; 2246 skb_headers_offset_update(skb, nhead); 2247 skb->cloned = 0; 2248 skb->hdr_len = 0; 2249 skb->nohdr = 0; 2250 atomic_set(&skb_shinfo(skb)->dataref, 1); 2251 2252 skb_metadata_clear(skb); 2253 2254 /* It is not generally safe to change skb->truesize. 2255 * For the moment, we really care of rx path, or 2256 * when skb is orphaned (not attached to a socket). 2257 */ 2258 if (!skb->sk || skb->destructor == sock_edemux) 2259 skb->truesize += size - osize; 2260 2261 return 0; 2262 2263 nofrags: 2264 skb_kfree_head(data, size); 2265 nodata: 2266 return -ENOMEM; 2267 } 2268 EXPORT_SYMBOL(pskb_expand_head); 2269 2270 /* Make private copy of skb with writable head and some headroom */ 2271 2272 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2273 { 2274 struct sk_buff *skb2; 2275 int delta = headroom - skb_headroom(skb); 2276 2277 if (delta <= 0) 2278 skb2 = pskb_copy(skb, GFP_ATOMIC); 2279 else { 2280 skb2 = skb_clone(skb, GFP_ATOMIC); 2281 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2282 GFP_ATOMIC)) { 2283 kfree_skb(skb2); 2284 skb2 = NULL; 2285 } 2286 } 2287 return skb2; 2288 } 2289 EXPORT_SYMBOL(skb_realloc_headroom); 2290 2291 /* Note: We plan to rework this in linux-6.4 */ 2292 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2293 { 2294 unsigned int saved_end_offset, saved_truesize; 2295 struct skb_shared_info *shinfo; 2296 int res; 2297 2298 saved_end_offset = skb_end_offset(skb); 2299 saved_truesize = skb->truesize; 2300 2301 res = pskb_expand_head(skb, 0, 0, pri); 2302 if (res) 2303 return res; 2304 2305 skb->truesize = saved_truesize; 2306 2307 if (likely(skb_end_offset(skb) == saved_end_offset)) 2308 return 0; 2309 2310 /* We can not change skb->end if the original or new value 2311 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2312 */ 2313 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2314 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2315 /* We think this path should not be taken. 2316 * Add a temporary trace to warn us just in case. 2317 */ 2318 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2319 saved_end_offset, skb_end_offset(skb)); 2320 WARN_ON_ONCE(1); 2321 return 0; 2322 } 2323 2324 shinfo = skb_shinfo(skb); 2325 2326 /* We are about to change back skb->end, 2327 * we need to move skb_shinfo() to its new location. 2328 */ 2329 memmove(skb->head + saved_end_offset, 2330 shinfo, 2331 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2332 2333 skb_set_end_offset(skb, saved_end_offset); 2334 2335 return 0; 2336 } 2337 2338 /** 2339 * skb_expand_head - reallocate header of &sk_buff 2340 * @skb: buffer to reallocate 2341 * @headroom: needed headroom 2342 * 2343 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2344 * if possible; copies skb->sk to new skb as needed 2345 * and frees original skb in case of failures. 2346 * 2347 * It expect increased headroom and generates warning otherwise. 2348 */ 2349 2350 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2351 { 2352 int delta = headroom - skb_headroom(skb); 2353 int osize = skb_end_offset(skb); 2354 struct sock *sk = skb->sk; 2355 2356 if (WARN_ONCE(delta <= 0, 2357 "%s is expecting an increase in the headroom", __func__)) 2358 return skb; 2359 2360 delta = SKB_DATA_ALIGN(delta); 2361 /* pskb_expand_head() might crash, if skb is shared. */ 2362 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2363 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2364 2365 if (unlikely(!nskb)) 2366 goto fail; 2367 2368 if (sk) 2369 skb_set_owner_w(nskb, sk); 2370 consume_skb(skb); 2371 skb = nskb; 2372 } 2373 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2374 goto fail; 2375 2376 if (sk && is_skb_wmem(skb)) { 2377 delta = skb_end_offset(skb) - osize; 2378 refcount_add(delta, &sk->sk_wmem_alloc); 2379 skb->truesize += delta; 2380 } 2381 return skb; 2382 2383 fail: 2384 kfree_skb(skb); 2385 return NULL; 2386 } 2387 EXPORT_SYMBOL(skb_expand_head); 2388 2389 /** 2390 * skb_copy_expand - copy and expand sk_buff 2391 * @skb: buffer to copy 2392 * @newheadroom: new free bytes at head 2393 * @newtailroom: new free bytes at tail 2394 * @gfp_mask: allocation priority 2395 * 2396 * Make a copy of both an &sk_buff and its data and while doing so 2397 * allocate additional space. 2398 * 2399 * This is used when the caller wishes to modify the data and needs a 2400 * private copy of the data to alter as well as more space for new fields. 2401 * Returns %NULL on failure or the pointer to the buffer 2402 * on success. The returned buffer has a reference count of 1. 2403 * 2404 * You must pass %GFP_ATOMIC as the allocation priority if this function 2405 * is called from an interrupt. 2406 */ 2407 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2408 int newheadroom, int newtailroom, 2409 gfp_t gfp_mask) 2410 { 2411 /* 2412 * Allocate the copy buffer 2413 */ 2414 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 2415 gfp_mask, skb_alloc_rx_flag(skb), 2416 NUMA_NO_NODE); 2417 int oldheadroom = skb_headroom(skb); 2418 int head_copy_len, head_copy_off; 2419 2420 if (!n) 2421 return NULL; 2422 2423 skb_reserve(n, newheadroom); 2424 2425 /* Set the tail pointer and length */ 2426 skb_put(n, skb->len); 2427 2428 head_copy_len = oldheadroom; 2429 head_copy_off = 0; 2430 if (newheadroom <= head_copy_len) 2431 head_copy_len = newheadroom; 2432 else 2433 head_copy_off = newheadroom - head_copy_len; 2434 2435 /* Copy the linear header and data. */ 2436 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2437 skb->len + head_copy_len)); 2438 2439 skb_copy_header(n, skb); 2440 2441 skb_headers_offset_update(n, newheadroom - oldheadroom); 2442 2443 return n; 2444 } 2445 EXPORT_SYMBOL(skb_copy_expand); 2446 2447 /** 2448 * __skb_pad - zero pad the tail of an skb 2449 * @skb: buffer to pad 2450 * @pad: space to pad 2451 * @free_on_error: free buffer on error 2452 * 2453 * Ensure that a buffer is followed by a padding area that is zero 2454 * filled. Used by network drivers which may DMA or transfer data 2455 * beyond the buffer end onto the wire. 2456 * 2457 * May return error in out of memory cases. The skb is freed on error 2458 * if @free_on_error is true. 2459 */ 2460 2461 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2462 { 2463 int err; 2464 int ntail; 2465 2466 /* If the skbuff is non linear tailroom is always zero.. */ 2467 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2468 memset(skb->data+skb->len, 0, pad); 2469 return 0; 2470 } 2471 2472 ntail = skb->data_len + pad - (skb->end - skb->tail); 2473 if (likely(skb_cloned(skb) || ntail > 0)) { 2474 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2475 if (unlikely(err)) 2476 goto free_skb; 2477 } 2478 2479 /* FIXME: The use of this function with non-linear skb's really needs 2480 * to be audited. 2481 */ 2482 err = skb_linearize(skb); 2483 if (unlikely(err)) 2484 goto free_skb; 2485 2486 memset(skb->data + skb->len, 0, pad); 2487 return 0; 2488 2489 free_skb: 2490 if (free_on_error) 2491 kfree_skb(skb); 2492 return err; 2493 } 2494 EXPORT_SYMBOL(__skb_pad); 2495 2496 /** 2497 * pskb_put - add data to the tail of a potentially fragmented buffer 2498 * @skb: start of the buffer to use 2499 * @tail: tail fragment of the buffer to use 2500 * @len: amount of data to add 2501 * 2502 * This function extends the used data area of the potentially 2503 * fragmented buffer. @tail must be the last fragment of @skb -- or 2504 * @skb itself. If this would exceed the total buffer size the kernel 2505 * will panic. A pointer to the first byte of the extra data is 2506 * returned. 2507 */ 2508 2509 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2510 { 2511 if (tail != skb) { 2512 skb->data_len += len; 2513 skb->len += len; 2514 } 2515 return skb_put(tail, len); 2516 } 2517 EXPORT_SYMBOL_GPL(pskb_put); 2518 2519 /** 2520 * skb_put - add data to a buffer 2521 * @skb: buffer to use 2522 * @len: amount of data to add 2523 * 2524 * This function extends the used data area of the buffer. If this would 2525 * exceed the total buffer size the kernel will panic. A pointer to the 2526 * first byte of the extra data is returned. 2527 */ 2528 void *skb_put(struct sk_buff *skb, unsigned int len) 2529 { 2530 void *tmp = skb_tail_pointer(skb); 2531 SKB_LINEAR_ASSERT(skb); 2532 skb->tail += len; 2533 skb->len += len; 2534 if (unlikely(skb->tail > skb->end)) 2535 skb_over_panic(skb, len, __builtin_return_address(0)); 2536 return tmp; 2537 } 2538 EXPORT_SYMBOL(skb_put); 2539 2540 /** 2541 * skb_push - add data to the start of a buffer 2542 * @skb: buffer to use 2543 * @len: amount of data to add 2544 * 2545 * This function extends the used data area of the buffer at the buffer 2546 * start. If this would exceed the total buffer headroom the kernel will 2547 * panic. A pointer to the first byte of the extra data is returned. 2548 */ 2549 void *skb_push(struct sk_buff *skb, unsigned int len) 2550 { 2551 skb->data -= len; 2552 skb->len += len; 2553 if (unlikely(skb->data < skb->head)) 2554 skb_under_panic(skb, len, __builtin_return_address(0)); 2555 return skb->data; 2556 } 2557 EXPORT_SYMBOL(skb_push); 2558 2559 /** 2560 * skb_pull - remove data from the start of a buffer 2561 * @skb: buffer to use 2562 * @len: amount of data to remove 2563 * 2564 * This function removes data from the start of a buffer, returning 2565 * the memory to the headroom. A pointer to the next data in the buffer 2566 * is returned. Once the data has been pulled future pushes will overwrite 2567 * the old data. 2568 */ 2569 void *skb_pull(struct sk_buff *skb, unsigned int len) 2570 { 2571 return skb_pull_inline(skb, len); 2572 } 2573 EXPORT_SYMBOL(skb_pull); 2574 2575 /** 2576 * skb_pull_data - remove data from the start of a buffer returning its 2577 * original position. 2578 * @skb: buffer to use 2579 * @len: amount of data to remove 2580 * 2581 * This function removes data from the start of a buffer, returning 2582 * the memory to the headroom. A pointer to the original data in the buffer 2583 * is returned after checking if there is enough data to pull. Once the 2584 * data has been pulled future pushes will overwrite the old data. 2585 */ 2586 void *skb_pull_data(struct sk_buff *skb, size_t len) 2587 { 2588 void *data = skb->data; 2589 2590 if (skb->len < len) 2591 return NULL; 2592 2593 skb_pull(skb, len); 2594 2595 return data; 2596 } 2597 EXPORT_SYMBOL(skb_pull_data); 2598 2599 /** 2600 * skb_trim - remove end from a buffer 2601 * @skb: buffer to alter 2602 * @len: new length 2603 * 2604 * Cut the length of a buffer down by removing data from the tail. If 2605 * the buffer is already under the length specified it is not modified. 2606 * The skb must be linear. 2607 */ 2608 void skb_trim(struct sk_buff *skb, unsigned int len) 2609 { 2610 if (skb->len > len) 2611 __skb_trim(skb, len); 2612 } 2613 EXPORT_SYMBOL(skb_trim); 2614 2615 /* Trims skb to length len. It can change skb pointers. 2616 */ 2617 2618 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2619 { 2620 struct sk_buff **fragp; 2621 struct sk_buff *frag; 2622 int offset = skb_headlen(skb); 2623 int nfrags = skb_shinfo(skb)->nr_frags; 2624 int i; 2625 int err; 2626 2627 if (skb_cloned(skb) && 2628 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2629 return err; 2630 2631 i = 0; 2632 if (offset >= len) 2633 goto drop_pages; 2634 2635 for (; i < nfrags; i++) { 2636 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2637 2638 if (end < len) { 2639 offset = end; 2640 continue; 2641 } 2642 2643 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2644 2645 drop_pages: 2646 skb_shinfo(skb)->nr_frags = i; 2647 2648 for (; i < nfrags; i++) 2649 skb_frag_unref(skb, i); 2650 2651 if (skb_has_frag_list(skb)) 2652 skb_drop_fraglist(skb); 2653 goto done; 2654 } 2655 2656 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2657 fragp = &frag->next) { 2658 int end = offset + frag->len; 2659 2660 if (skb_shared(frag)) { 2661 struct sk_buff *nfrag; 2662 2663 nfrag = skb_clone(frag, GFP_ATOMIC); 2664 if (unlikely(!nfrag)) 2665 return -ENOMEM; 2666 2667 nfrag->next = frag->next; 2668 consume_skb(frag); 2669 frag = nfrag; 2670 *fragp = frag; 2671 } 2672 2673 if (end < len) { 2674 offset = end; 2675 continue; 2676 } 2677 2678 if (end > len && 2679 unlikely((err = pskb_trim(frag, len - offset)))) 2680 return err; 2681 2682 if (frag->next) 2683 skb_drop_list(&frag->next); 2684 break; 2685 } 2686 2687 done: 2688 if (len > skb_headlen(skb)) { 2689 skb->data_len -= skb->len - len; 2690 skb->len = len; 2691 } else { 2692 skb->len = len; 2693 skb->data_len = 0; 2694 skb_set_tail_pointer(skb, len); 2695 } 2696 2697 if (!skb->sk || skb->destructor == sock_edemux) 2698 skb_condense(skb); 2699 return 0; 2700 } 2701 EXPORT_SYMBOL(___pskb_trim); 2702 2703 /* Note : use pskb_trim_rcsum() instead of calling this directly 2704 */ 2705 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2706 { 2707 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2708 int delta = skb->len - len; 2709 2710 skb->csum = csum_block_sub(skb->csum, 2711 skb_checksum(skb, len, delta, 0), 2712 len); 2713 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2714 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2715 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2716 2717 if (offset + sizeof(__sum16) > hdlen) 2718 return -EINVAL; 2719 } 2720 return __pskb_trim(skb, len); 2721 } 2722 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2723 2724 /** 2725 * __pskb_pull_tail - advance tail of skb header 2726 * @skb: buffer to reallocate 2727 * @delta: number of bytes to advance tail 2728 * 2729 * The function makes a sense only on a fragmented &sk_buff, 2730 * it expands header moving its tail forward and copying necessary 2731 * data from fragmented part. 2732 * 2733 * &sk_buff MUST have reference count of 1. 2734 * 2735 * Returns %NULL (and &sk_buff does not change) if pull failed 2736 * or value of new tail of skb in the case of success. 2737 * 2738 * All the pointers pointing into skb header may change and must be 2739 * reloaded after call to this function. 2740 */ 2741 2742 /* Moves tail of skb head forward, copying data from fragmented part, 2743 * when it is necessary. 2744 * 1. It may fail due to malloc failure. 2745 * 2. It may change skb pointers. 2746 * 2747 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2748 */ 2749 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2750 { 2751 /* If skb has not enough free space at tail, get new one 2752 * plus 128 bytes for future expansions. If we have enough 2753 * room at tail, reallocate without expansion only if skb is cloned. 2754 */ 2755 int i, k, eat = (skb->tail + delta) - skb->end; 2756 2757 if (eat > 0 || skb_cloned(skb)) { 2758 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2759 GFP_ATOMIC)) 2760 return NULL; 2761 } 2762 2763 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2764 skb_tail_pointer(skb), delta)); 2765 2766 /* Optimization: no fragments, no reasons to preestimate 2767 * size of pulled pages. Superb. 2768 */ 2769 if (!skb_has_frag_list(skb)) 2770 goto pull_pages; 2771 2772 /* Estimate size of pulled pages. */ 2773 eat = delta; 2774 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2775 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2776 2777 if (size >= eat) 2778 goto pull_pages; 2779 eat -= size; 2780 } 2781 2782 /* If we need update frag list, we are in troubles. 2783 * Certainly, it is possible to add an offset to skb data, 2784 * but taking into account that pulling is expected to 2785 * be very rare operation, it is worth to fight against 2786 * further bloating skb head and crucify ourselves here instead. 2787 * Pure masohism, indeed. 8)8) 2788 */ 2789 if (eat) { 2790 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2791 struct sk_buff *clone = NULL; 2792 struct sk_buff *insp = NULL; 2793 2794 do { 2795 if (list->len <= eat) { 2796 /* Eaten as whole. */ 2797 eat -= list->len; 2798 list = list->next; 2799 insp = list; 2800 } else { 2801 /* Eaten partially. */ 2802 if (skb_is_gso(skb) && !list->head_frag && 2803 skb_headlen(list)) 2804 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2805 2806 if (skb_shared(list)) { 2807 /* Sucks! We need to fork list. :-( */ 2808 clone = skb_clone(list, GFP_ATOMIC); 2809 if (!clone) 2810 return NULL; 2811 insp = list->next; 2812 list = clone; 2813 } else { 2814 /* This may be pulled without 2815 * problems. */ 2816 insp = list; 2817 } 2818 if (!pskb_pull(list, eat)) { 2819 kfree_skb(clone); 2820 return NULL; 2821 } 2822 break; 2823 } 2824 } while (eat); 2825 2826 /* Free pulled out fragments. */ 2827 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2828 skb_shinfo(skb)->frag_list = list->next; 2829 consume_skb(list); 2830 } 2831 /* And insert new clone at head. */ 2832 if (clone) { 2833 clone->next = list; 2834 skb_shinfo(skb)->frag_list = clone; 2835 } 2836 } 2837 /* Success! Now we may commit changes to skb data. */ 2838 2839 pull_pages: 2840 eat = delta; 2841 k = 0; 2842 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2843 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2844 2845 if (size <= eat) { 2846 skb_frag_unref(skb, i); 2847 eat -= size; 2848 } else { 2849 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2850 2851 *frag = skb_shinfo(skb)->frags[i]; 2852 if (eat) { 2853 skb_frag_off_add(frag, eat); 2854 skb_frag_size_sub(frag, eat); 2855 if (!i) 2856 goto end; 2857 eat = 0; 2858 } 2859 k++; 2860 } 2861 } 2862 skb_shinfo(skb)->nr_frags = k; 2863 2864 end: 2865 skb->tail += delta; 2866 skb->data_len -= delta; 2867 2868 if (!skb->data_len) 2869 skb_zcopy_clear(skb, false); 2870 2871 return skb_tail_pointer(skb); 2872 } 2873 EXPORT_SYMBOL(__pskb_pull_tail); 2874 2875 /** 2876 * skb_copy_bits - copy bits from skb to kernel buffer 2877 * @skb: source skb 2878 * @offset: offset in source 2879 * @to: destination buffer 2880 * @len: number of bytes to copy 2881 * 2882 * Copy the specified number of bytes from the source skb to the 2883 * destination buffer. 2884 * 2885 * CAUTION ! : 2886 * If its prototype is ever changed, 2887 * check arch/{*}/net/{*}.S files, 2888 * since it is called from BPF assembly code. 2889 */ 2890 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2891 { 2892 int start = skb_headlen(skb); 2893 struct sk_buff *frag_iter; 2894 int i, copy; 2895 2896 if (offset > (int)skb->len - len) 2897 goto fault; 2898 2899 /* Copy header. */ 2900 if ((copy = start - offset) > 0) { 2901 if (copy > len) 2902 copy = len; 2903 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2904 if ((len -= copy) == 0) 2905 return 0; 2906 offset += copy; 2907 to += copy; 2908 } 2909 2910 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2911 int end; 2912 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2913 2914 WARN_ON(start > offset + len); 2915 2916 end = start + skb_frag_size(f); 2917 if ((copy = end - offset) > 0) { 2918 u32 p_off, p_len, copied; 2919 struct page *p; 2920 u8 *vaddr; 2921 2922 if (copy > len) 2923 copy = len; 2924 2925 skb_frag_foreach_page(f, 2926 skb_frag_off(f) + offset - start, 2927 copy, p, p_off, p_len, copied) { 2928 vaddr = kmap_atomic(p); 2929 memcpy(to + copied, vaddr + p_off, p_len); 2930 kunmap_atomic(vaddr); 2931 } 2932 2933 if ((len -= copy) == 0) 2934 return 0; 2935 offset += copy; 2936 to += copy; 2937 } 2938 start = end; 2939 } 2940 2941 skb_walk_frags(skb, frag_iter) { 2942 int end; 2943 2944 WARN_ON(start > offset + len); 2945 2946 end = start + frag_iter->len; 2947 if ((copy = end - offset) > 0) { 2948 if (copy > len) 2949 copy = len; 2950 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2951 goto fault; 2952 if ((len -= copy) == 0) 2953 return 0; 2954 offset += copy; 2955 to += copy; 2956 } 2957 start = end; 2958 } 2959 2960 if (!len) 2961 return 0; 2962 2963 fault: 2964 return -EFAULT; 2965 } 2966 EXPORT_SYMBOL(skb_copy_bits); 2967 2968 /* 2969 * Callback from splice_to_pipe(), if we need to release some pages 2970 * at the end of the spd in case we error'ed out in filling the pipe. 2971 */ 2972 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 2973 { 2974 put_page(spd->pages[i]); 2975 } 2976 2977 static struct page *linear_to_page(struct page *page, unsigned int *len, 2978 unsigned int *offset, 2979 struct sock *sk) 2980 { 2981 struct page_frag *pfrag = sk_page_frag(sk); 2982 2983 if (!sk_page_frag_refill(sk, pfrag)) 2984 return NULL; 2985 2986 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 2987 2988 memcpy(page_address(pfrag->page) + pfrag->offset, 2989 page_address(page) + *offset, *len); 2990 *offset = pfrag->offset; 2991 pfrag->offset += *len; 2992 2993 return pfrag->page; 2994 } 2995 2996 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 2997 struct page *page, 2998 unsigned int offset) 2999 { 3000 return spd->nr_pages && 3001 spd->pages[spd->nr_pages - 1] == page && 3002 (spd->partial[spd->nr_pages - 1].offset + 3003 spd->partial[spd->nr_pages - 1].len == offset); 3004 } 3005 3006 /* 3007 * Fill page/offset/length into spd, if it can hold more pages. 3008 */ 3009 static bool spd_fill_page(struct splice_pipe_desc *spd, 3010 struct pipe_inode_info *pipe, struct page *page, 3011 unsigned int *len, unsigned int offset, 3012 bool linear, 3013 struct sock *sk) 3014 { 3015 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 3016 return true; 3017 3018 if (linear) { 3019 page = linear_to_page(page, len, &offset, sk); 3020 if (!page) 3021 return true; 3022 } 3023 if (spd_can_coalesce(spd, page, offset)) { 3024 spd->partial[spd->nr_pages - 1].len += *len; 3025 return false; 3026 } 3027 get_page(page); 3028 spd->pages[spd->nr_pages] = page; 3029 spd->partial[spd->nr_pages].len = *len; 3030 spd->partial[spd->nr_pages].offset = offset; 3031 spd->nr_pages++; 3032 3033 return false; 3034 } 3035 3036 static bool __splice_segment(struct page *page, unsigned int poff, 3037 unsigned int plen, unsigned int *off, 3038 unsigned int *len, 3039 struct splice_pipe_desc *spd, bool linear, 3040 struct sock *sk, 3041 struct pipe_inode_info *pipe) 3042 { 3043 if (!*len) 3044 return true; 3045 3046 /* skip this segment if already processed */ 3047 if (*off >= plen) { 3048 *off -= plen; 3049 return false; 3050 } 3051 3052 /* ignore any bits we already processed */ 3053 poff += *off; 3054 plen -= *off; 3055 *off = 0; 3056 3057 do { 3058 unsigned int flen = min(*len, plen); 3059 3060 if (spd_fill_page(spd, pipe, page, &flen, poff, 3061 linear, sk)) 3062 return true; 3063 poff += flen; 3064 plen -= flen; 3065 *len -= flen; 3066 } while (*len && plen); 3067 3068 return false; 3069 } 3070 3071 /* 3072 * Map linear and fragment data from the skb to spd. It reports true if the 3073 * pipe is full or if we already spliced the requested length. 3074 */ 3075 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3076 unsigned int *offset, unsigned int *len, 3077 struct splice_pipe_desc *spd, struct sock *sk) 3078 { 3079 int seg; 3080 struct sk_buff *iter; 3081 3082 /* map the linear part : 3083 * If skb->head_frag is set, this 'linear' part is backed by a 3084 * fragment, and if the head is not shared with any clones then 3085 * we can avoid a copy since we own the head portion of this page. 3086 */ 3087 if (__splice_segment(virt_to_page(skb->data), 3088 (unsigned long) skb->data & (PAGE_SIZE - 1), 3089 skb_headlen(skb), 3090 offset, len, spd, 3091 skb_head_is_locked(skb), 3092 sk, pipe)) 3093 return true; 3094 3095 /* 3096 * then map the fragments 3097 */ 3098 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3099 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3100 3101 if (__splice_segment(skb_frag_page(f), 3102 skb_frag_off(f), skb_frag_size(f), 3103 offset, len, spd, false, sk, pipe)) 3104 return true; 3105 } 3106 3107 skb_walk_frags(skb, iter) { 3108 if (*offset >= iter->len) { 3109 *offset -= iter->len; 3110 continue; 3111 } 3112 /* __skb_splice_bits() only fails if the output has no room 3113 * left, so no point in going over the frag_list for the error 3114 * case. 3115 */ 3116 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3117 return true; 3118 } 3119 3120 return false; 3121 } 3122 3123 /* 3124 * Map data from the skb to a pipe. Should handle both the linear part, 3125 * the fragments, and the frag list. 3126 */ 3127 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3128 struct pipe_inode_info *pipe, unsigned int tlen, 3129 unsigned int flags) 3130 { 3131 struct partial_page partial[MAX_SKB_FRAGS]; 3132 struct page *pages[MAX_SKB_FRAGS]; 3133 struct splice_pipe_desc spd = { 3134 .pages = pages, 3135 .partial = partial, 3136 .nr_pages_max = MAX_SKB_FRAGS, 3137 .ops = &nosteal_pipe_buf_ops, 3138 .spd_release = sock_spd_release, 3139 }; 3140 int ret = 0; 3141 3142 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3143 3144 if (spd.nr_pages) 3145 ret = splice_to_pipe(pipe, &spd); 3146 3147 return ret; 3148 } 3149 EXPORT_SYMBOL_GPL(skb_splice_bits); 3150 3151 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3152 { 3153 struct socket *sock = sk->sk_socket; 3154 size_t size = msg_data_left(msg); 3155 3156 if (!sock) 3157 return -EINVAL; 3158 3159 if (!sock->ops->sendmsg_locked) 3160 return sock_no_sendmsg_locked(sk, msg, size); 3161 3162 return sock->ops->sendmsg_locked(sk, msg, size); 3163 } 3164 3165 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3166 { 3167 struct socket *sock = sk->sk_socket; 3168 3169 if (!sock) 3170 return -EINVAL; 3171 return sock_sendmsg(sock, msg); 3172 } 3173 3174 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3175 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3176 int len, sendmsg_func sendmsg) 3177 { 3178 unsigned int orig_len = len; 3179 struct sk_buff *head = skb; 3180 unsigned short fragidx; 3181 int slen, ret; 3182 3183 do_frag_list: 3184 3185 /* Deal with head data */ 3186 while (offset < skb_headlen(skb) && len) { 3187 struct kvec kv; 3188 struct msghdr msg; 3189 3190 slen = min_t(int, len, skb_headlen(skb) - offset); 3191 kv.iov_base = skb->data + offset; 3192 kv.iov_len = slen; 3193 memset(&msg, 0, sizeof(msg)); 3194 msg.msg_flags = MSG_DONTWAIT; 3195 3196 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3197 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3198 sendmsg_unlocked, sk, &msg); 3199 if (ret <= 0) 3200 goto error; 3201 3202 offset += ret; 3203 len -= ret; 3204 } 3205 3206 /* All the data was skb head? */ 3207 if (!len) 3208 goto out; 3209 3210 /* Make offset relative to start of frags */ 3211 offset -= skb_headlen(skb); 3212 3213 /* Find where we are in frag list */ 3214 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3215 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3216 3217 if (offset < skb_frag_size(frag)) 3218 break; 3219 3220 offset -= skb_frag_size(frag); 3221 } 3222 3223 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3224 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3225 3226 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3227 3228 while (slen) { 3229 struct bio_vec bvec; 3230 struct msghdr msg = { 3231 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3232 }; 3233 3234 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3235 skb_frag_off(frag) + offset); 3236 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3237 slen); 3238 3239 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3240 sendmsg_unlocked, sk, &msg); 3241 if (ret <= 0) 3242 goto error; 3243 3244 len -= ret; 3245 offset += ret; 3246 slen -= ret; 3247 } 3248 3249 offset = 0; 3250 } 3251 3252 if (len) { 3253 /* Process any frag lists */ 3254 3255 if (skb == head) { 3256 if (skb_has_frag_list(skb)) { 3257 skb = skb_shinfo(skb)->frag_list; 3258 goto do_frag_list; 3259 } 3260 } else if (skb->next) { 3261 skb = skb->next; 3262 goto do_frag_list; 3263 } 3264 } 3265 3266 out: 3267 return orig_len - len; 3268 3269 error: 3270 return orig_len == len ? ret : orig_len - len; 3271 } 3272 3273 /* Send skb data on a socket. Socket must be locked. */ 3274 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3275 int len) 3276 { 3277 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3278 } 3279 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3280 3281 /* Send skb data on a socket. Socket must be unlocked. */ 3282 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3283 { 3284 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3285 } 3286 3287 /** 3288 * skb_store_bits - store bits from kernel buffer to skb 3289 * @skb: destination buffer 3290 * @offset: offset in destination 3291 * @from: source buffer 3292 * @len: number of bytes to copy 3293 * 3294 * Copy the specified number of bytes from the source buffer to the 3295 * destination skb. This function handles all the messy bits of 3296 * traversing fragment lists and such. 3297 */ 3298 3299 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3300 { 3301 int start = skb_headlen(skb); 3302 struct sk_buff *frag_iter; 3303 int i, copy; 3304 3305 if (offset > (int)skb->len - len) 3306 goto fault; 3307 3308 if ((copy = start - offset) > 0) { 3309 if (copy > len) 3310 copy = len; 3311 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3312 if ((len -= copy) == 0) 3313 return 0; 3314 offset += copy; 3315 from += copy; 3316 } 3317 3318 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3319 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3320 int end; 3321 3322 WARN_ON(start > offset + len); 3323 3324 end = start + skb_frag_size(frag); 3325 if ((copy = end - offset) > 0) { 3326 u32 p_off, p_len, copied; 3327 struct page *p; 3328 u8 *vaddr; 3329 3330 if (copy > len) 3331 copy = len; 3332 3333 skb_frag_foreach_page(frag, 3334 skb_frag_off(frag) + offset - start, 3335 copy, p, p_off, p_len, copied) { 3336 vaddr = kmap_atomic(p); 3337 memcpy(vaddr + p_off, from + copied, p_len); 3338 kunmap_atomic(vaddr); 3339 } 3340 3341 if ((len -= copy) == 0) 3342 return 0; 3343 offset += copy; 3344 from += copy; 3345 } 3346 start = end; 3347 } 3348 3349 skb_walk_frags(skb, frag_iter) { 3350 int end; 3351 3352 WARN_ON(start > offset + len); 3353 3354 end = start + frag_iter->len; 3355 if ((copy = end - offset) > 0) { 3356 if (copy > len) 3357 copy = len; 3358 if (skb_store_bits(frag_iter, offset - start, 3359 from, copy)) 3360 goto fault; 3361 if ((len -= copy) == 0) 3362 return 0; 3363 offset += copy; 3364 from += copy; 3365 } 3366 start = end; 3367 } 3368 if (!len) 3369 return 0; 3370 3371 fault: 3372 return -EFAULT; 3373 } 3374 EXPORT_SYMBOL(skb_store_bits); 3375 3376 /* Checksum skb data. */ 3377 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3378 __wsum csum, const struct skb_checksum_ops *ops) 3379 { 3380 int start = skb_headlen(skb); 3381 int i, copy = start - offset; 3382 struct sk_buff *frag_iter; 3383 int pos = 0; 3384 3385 /* Checksum header. */ 3386 if (copy > 0) { 3387 if (copy > len) 3388 copy = len; 3389 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3390 skb->data + offset, copy, csum); 3391 if ((len -= copy) == 0) 3392 return csum; 3393 offset += copy; 3394 pos = copy; 3395 } 3396 3397 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3398 int end; 3399 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3400 3401 WARN_ON(start > offset + len); 3402 3403 end = start + skb_frag_size(frag); 3404 if ((copy = end - offset) > 0) { 3405 u32 p_off, p_len, copied; 3406 struct page *p; 3407 __wsum csum2; 3408 u8 *vaddr; 3409 3410 if (copy > len) 3411 copy = len; 3412 3413 skb_frag_foreach_page(frag, 3414 skb_frag_off(frag) + offset - start, 3415 copy, p, p_off, p_len, copied) { 3416 vaddr = kmap_atomic(p); 3417 csum2 = INDIRECT_CALL_1(ops->update, 3418 csum_partial_ext, 3419 vaddr + p_off, p_len, 0); 3420 kunmap_atomic(vaddr); 3421 csum = INDIRECT_CALL_1(ops->combine, 3422 csum_block_add_ext, csum, 3423 csum2, pos, p_len); 3424 pos += p_len; 3425 } 3426 3427 if (!(len -= copy)) 3428 return csum; 3429 offset += copy; 3430 } 3431 start = end; 3432 } 3433 3434 skb_walk_frags(skb, frag_iter) { 3435 int end; 3436 3437 WARN_ON(start > offset + len); 3438 3439 end = start + frag_iter->len; 3440 if ((copy = end - offset) > 0) { 3441 __wsum csum2; 3442 if (copy > len) 3443 copy = len; 3444 csum2 = __skb_checksum(frag_iter, offset - start, 3445 copy, 0, ops); 3446 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3447 csum, csum2, pos, copy); 3448 if ((len -= copy) == 0) 3449 return csum; 3450 offset += copy; 3451 pos += copy; 3452 } 3453 start = end; 3454 } 3455 BUG_ON(len); 3456 3457 return csum; 3458 } 3459 EXPORT_SYMBOL(__skb_checksum); 3460 3461 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3462 int len, __wsum csum) 3463 { 3464 const struct skb_checksum_ops ops = { 3465 .update = csum_partial_ext, 3466 .combine = csum_block_add_ext, 3467 }; 3468 3469 return __skb_checksum(skb, offset, len, csum, &ops); 3470 } 3471 EXPORT_SYMBOL(skb_checksum); 3472 3473 /* Both of above in one bottle. */ 3474 3475 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3476 u8 *to, int len) 3477 { 3478 int start = skb_headlen(skb); 3479 int i, copy = start - offset; 3480 struct sk_buff *frag_iter; 3481 int pos = 0; 3482 __wsum csum = 0; 3483 3484 /* Copy header. */ 3485 if (copy > 0) { 3486 if (copy > len) 3487 copy = len; 3488 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3489 copy); 3490 if ((len -= copy) == 0) 3491 return csum; 3492 offset += copy; 3493 to += copy; 3494 pos = copy; 3495 } 3496 3497 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3498 int end; 3499 3500 WARN_ON(start > offset + len); 3501 3502 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3503 if ((copy = end - offset) > 0) { 3504 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3505 u32 p_off, p_len, copied; 3506 struct page *p; 3507 __wsum csum2; 3508 u8 *vaddr; 3509 3510 if (copy > len) 3511 copy = len; 3512 3513 skb_frag_foreach_page(frag, 3514 skb_frag_off(frag) + offset - start, 3515 copy, p, p_off, p_len, copied) { 3516 vaddr = kmap_atomic(p); 3517 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3518 to + copied, 3519 p_len); 3520 kunmap_atomic(vaddr); 3521 csum = csum_block_add(csum, csum2, pos); 3522 pos += p_len; 3523 } 3524 3525 if (!(len -= copy)) 3526 return csum; 3527 offset += copy; 3528 to += copy; 3529 } 3530 start = end; 3531 } 3532 3533 skb_walk_frags(skb, frag_iter) { 3534 __wsum csum2; 3535 int end; 3536 3537 WARN_ON(start > offset + len); 3538 3539 end = start + frag_iter->len; 3540 if ((copy = end - offset) > 0) { 3541 if (copy > len) 3542 copy = len; 3543 csum2 = skb_copy_and_csum_bits(frag_iter, 3544 offset - start, 3545 to, copy); 3546 csum = csum_block_add(csum, csum2, pos); 3547 if ((len -= copy) == 0) 3548 return csum; 3549 offset += copy; 3550 to += copy; 3551 pos += copy; 3552 } 3553 start = end; 3554 } 3555 BUG_ON(len); 3556 return csum; 3557 } 3558 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3559 3560 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3561 { 3562 __sum16 sum; 3563 3564 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3565 /* See comments in __skb_checksum_complete(). */ 3566 if (likely(!sum)) { 3567 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3568 !skb->csum_complete_sw) 3569 netdev_rx_csum_fault(skb->dev, skb); 3570 } 3571 if (!skb_shared(skb)) 3572 skb->csum_valid = !sum; 3573 return sum; 3574 } 3575 EXPORT_SYMBOL(__skb_checksum_complete_head); 3576 3577 /* This function assumes skb->csum already holds pseudo header's checksum, 3578 * which has been changed from the hardware checksum, for example, by 3579 * __skb_checksum_validate_complete(). And, the original skb->csum must 3580 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3581 * 3582 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3583 * zero. The new checksum is stored back into skb->csum unless the skb is 3584 * shared. 3585 */ 3586 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3587 { 3588 __wsum csum; 3589 __sum16 sum; 3590 3591 csum = skb_checksum(skb, 0, skb->len, 0); 3592 3593 sum = csum_fold(csum_add(skb->csum, csum)); 3594 /* This check is inverted, because we already knew the hardware 3595 * checksum is invalid before calling this function. So, if the 3596 * re-computed checksum is valid instead, then we have a mismatch 3597 * between the original skb->csum and skb_checksum(). This means either 3598 * the original hardware checksum is incorrect or we screw up skb->csum 3599 * when moving skb->data around. 3600 */ 3601 if (likely(!sum)) { 3602 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3603 !skb->csum_complete_sw) 3604 netdev_rx_csum_fault(skb->dev, skb); 3605 } 3606 3607 if (!skb_shared(skb)) { 3608 /* Save full packet checksum */ 3609 skb->csum = csum; 3610 skb->ip_summed = CHECKSUM_COMPLETE; 3611 skb->csum_complete_sw = 1; 3612 skb->csum_valid = !sum; 3613 } 3614 3615 return sum; 3616 } 3617 EXPORT_SYMBOL(__skb_checksum_complete); 3618 3619 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3620 { 3621 net_warn_ratelimited( 3622 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3623 __func__); 3624 return 0; 3625 } 3626 3627 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3628 int offset, int len) 3629 { 3630 net_warn_ratelimited( 3631 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3632 __func__); 3633 return 0; 3634 } 3635 3636 static const struct skb_checksum_ops default_crc32c_ops = { 3637 .update = warn_crc32c_csum_update, 3638 .combine = warn_crc32c_csum_combine, 3639 }; 3640 3641 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3642 &default_crc32c_ops; 3643 EXPORT_SYMBOL(crc32c_csum_stub); 3644 3645 /** 3646 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3647 * @from: source buffer 3648 * 3649 * Calculates the amount of linear headroom needed in the 'to' skb passed 3650 * into skb_zerocopy(). 3651 */ 3652 unsigned int 3653 skb_zerocopy_headlen(const struct sk_buff *from) 3654 { 3655 unsigned int hlen = 0; 3656 3657 if (!from->head_frag || 3658 skb_headlen(from) < L1_CACHE_BYTES || 3659 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3660 hlen = skb_headlen(from); 3661 if (!hlen) 3662 hlen = from->len; 3663 } 3664 3665 if (skb_has_frag_list(from)) 3666 hlen = from->len; 3667 3668 return hlen; 3669 } 3670 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3671 3672 /** 3673 * skb_zerocopy - Zero copy skb to skb 3674 * @to: destination buffer 3675 * @from: source buffer 3676 * @len: number of bytes to copy from source buffer 3677 * @hlen: size of linear headroom in destination buffer 3678 * 3679 * Copies up to `len` bytes from `from` to `to` by creating references 3680 * to the frags in the source buffer. 3681 * 3682 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3683 * headroom in the `to` buffer. 3684 * 3685 * Return value: 3686 * 0: everything is OK 3687 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3688 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3689 */ 3690 int 3691 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3692 { 3693 int i, j = 0; 3694 int plen = 0; /* length of skb->head fragment */ 3695 int ret; 3696 struct page *page; 3697 unsigned int offset; 3698 3699 BUG_ON(!from->head_frag && !hlen); 3700 3701 /* dont bother with small payloads */ 3702 if (len <= skb_tailroom(to)) 3703 return skb_copy_bits(from, 0, skb_put(to, len), len); 3704 3705 if (hlen) { 3706 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3707 if (unlikely(ret)) 3708 return ret; 3709 len -= hlen; 3710 } else { 3711 plen = min_t(int, skb_headlen(from), len); 3712 if (plen) { 3713 page = virt_to_head_page(from->head); 3714 offset = from->data - (unsigned char *)page_address(page); 3715 __skb_fill_netmem_desc(to, 0, page_to_netmem(page), 3716 offset, plen); 3717 get_page(page); 3718 j = 1; 3719 len -= plen; 3720 } 3721 } 3722 3723 skb_len_add(to, len + plen); 3724 3725 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3726 skb_tx_error(from); 3727 return -ENOMEM; 3728 } 3729 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3730 3731 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3732 int size; 3733 3734 if (!len) 3735 break; 3736 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3737 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3738 len); 3739 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3740 len -= size; 3741 skb_frag_ref(to, j); 3742 j++; 3743 } 3744 skb_shinfo(to)->nr_frags = j; 3745 3746 return 0; 3747 } 3748 EXPORT_SYMBOL_GPL(skb_zerocopy); 3749 3750 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3751 { 3752 __wsum csum; 3753 long csstart; 3754 3755 if (skb->ip_summed == CHECKSUM_PARTIAL) 3756 csstart = skb_checksum_start_offset(skb); 3757 else 3758 csstart = skb_headlen(skb); 3759 3760 BUG_ON(csstart > skb_headlen(skb)); 3761 3762 skb_copy_from_linear_data(skb, to, csstart); 3763 3764 csum = 0; 3765 if (csstart != skb->len) 3766 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3767 skb->len - csstart); 3768 3769 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3770 long csstuff = csstart + skb->csum_offset; 3771 3772 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3773 } 3774 } 3775 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3776 3777 /** 3778 * skb_dequeue - remove from the head of the queue 3779 * @list: list to dequeue from 3780 * 3781 * Remove the head of the list. The list lock is taken so the function 3782 * may be used safely with other locking list functions. The head item is 3783 * returned or %NULL if the list is empty. 3784 */ 3785 3786 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3787 { 3788 unsigned long flags; 3789 struct sk_buff *result; 3790 3791 spin_lock_irqsave(&list->lock, flags); 3792 result = __skb_dequeue(list); 3793 spin_unlock_irqrestore(&list->lock, flags); 3794 return result; 3795 } 3796 EXPORT_SYMBOL(skb_dequeue); 3797 3798 /** 3799 * skb_dequeue_tail - remove from the tail of the queue 3800 * @list: list to dequeue from 3801 * 3802 * Remove the tail of the list. The list lock is taken so the function 3803 * may be used safely with other locking list functions. The tail item is 3804 * returned or %NULL if the list is empty. 3805 */ 3806 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3807 { 3808 unsigned long flags; 3809 struct sk_buff *result; 3810 3811 spin_lock_irqsave(&list->lock, flags); 3812 result = __skb_dequeue_tail(list); 3813 spin_unlock_irqrestore(&list->lock, flags); 3814 return result; 3815 } 3816 EXPORT_SYMBOL(skb_dequeue_tail); 3817 3818 /** 3819 * skb_queue_purge_reason - empty a list 3820 * @list: list to empty 3821 * @reason: drop reason 3822 * 3823 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3824 * the list and one reference dropped. This function takes the list 3825 * lock and is atomic with respect to other list locking functions. 3826 */ 3827 void skb_queue_purge_reason(struct sk_buff_head *list, 3828 enum skb_drop_reason reason) 3829 { 3830 struct sk_buff_head tmp; 3831 unsigned long flags; 3832 3833 if (skb_queue_empty_lockless(list)) 3834 return; 3835 3836 __skb_queue_head_init(&tmp); 3837 3838 spin_lock_irqsave(&list->lock, flags); 3839 skb_queue_splice_init(list, &tmp); 3840 spin_unlock_irqrestore(&list->lock, flags); 3841 3842 __skb_queue_purge_reason(&tmp, reason); 3843 } 3844 EXPORT_SYMBOL(skb_queue_purge_reason); 3845 3846 /** 3847 * skb_rbtree_purge - empty a skb rbtree 3848 * @root: root of the rbtree to empty 3849 * Return value: the sum of truesizes of all purged skbs. 3850 * 3851 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3852 * the list and one reference dropped. This function does not take 3853 * any lock. Synchronization should be handled by the caller (e.g., TCP 3854 * out-of-order queue is protected by the socket lock). 3855 */ 3856 unsigned int skb_rbtree_purge(struct rb_root *root) 3857 { 3858 struct rb_node *p = rb_first(root); 3859 unsigned int sum = 0; 3860 3861 while (p) { 3862 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3863 3864 p = rb_next(p); 3865 rb_erase(&skb->rbnode, root); 3866 sum += skb->truesize; 3867 kfree_skb(skb); 3868 } 3869 return sum; 3870 } 3871 3872 void skb_errqueue_purge(struct sk_buff_head *list) 3873 { 3874 struct sk_buff *skb, *next; 3875 struct sk_buff_head kill; 3876 unsigned long flags; 3877 3878 __skb_queue_head_init(&kill); 3879 3880 spin_lock_irqsave(&list->lock, flags); 3881 skb_queue_walk_safe(list, skb, next) { 3882 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3883 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3884 continue; 3885 __skb_unlink(skb, list); 3886 __skb_queue_tail(&kill, skb); 3887 } 3888 spin_unlock_irqrestore(&list->lock, flags); 3889 __skb_queue_purge(&kill); 3890 } 3891 EXPORT_SYMBOL(skb_errqueue_purge); 3892 3893 /** 3894 * skb_queue_head - queue a buffer at the list head 3895 * @list: list to use 3896 * @newsk: buffer to queue 3897 * 3898 * Queue a buffer at the start of the list. This function takes the 3899 * list lock and can be used safely with other locking &sk_buff functions 3900 * safely. 3901 * 3902 * A buffer cannot be placed on two lists at the same time. 3903 */ 3904 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3905 { 3906 unsigned long flags; 3907 3908 spin_lock_irqsave(&list->lock, flags); 3909 __skb_queue_head(list, newsk); 3910 spin_unlock_irqrestore(&list->lock, flags); 3911 } 3912 EXPORT_SYMBOL(skb_queue_head); 3913 3914 /** 3915 * skb_queue_tail - queue a buffer at the list tail 3916 * @list: list to use 3917 * @newsk: buffer to queue 3918 * 3919 * Queue a buffer at the tail of the list. This function takes the 3920 * list lock and can be used safely with other locking &sk_buff functions 3921 * safely. 3922 * 3923 * A buffer cannot be placed on two lists at the same time. 3924 */ 3925 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3926 { 3927 unsigned long flags; 3928 3929 spin_lock_irqsave(&list->lock, flags); 3930 __skb_queue_tail(list, newsk); 3931 spin_unlock_irqrestore(&list->lock, flags); 3932 } 3933 EXPORT_SYMBOL(skb_queue_tail); 3934 3935 /** 3936 * skb_unlink - remove a buffer from a list 3937 * @skb: buffer to remove 3938 * @list: list to use 3939 * 3940 * Remove a packet from a list. The list locks are taken and this 3941 * function is atomic with respect to other list locked calls 3942 * 3943 * You must know what list the SKB is on. 3944 */ 3945 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3946 { 3947 unsigned long flags; 3948 3949 spin_lock_irqsave(&list->lock, flags); 3950 __skb_unlink(skb, list); 3951 spin_unlock_irqrestore(&list->lock, flags); 3952 } 3953 EXPORT_SYMBOL(skb_unlink); 3954 3955 /** 3956 * skb_append - append a buffer 3957 * @old: buffer to insert after 3958 * @newsk: buffer to insert 3959 * @list: list to use 3960 * 3961 * Place a packet after a given packet in a list. The list locks are taken 3962 * and this function is atomic with respect to other list locked calls. 3963 * A buffer cannot be placed on two lists at the same time. 3964 */ 3965 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 3966 { 3967 unsigned long flags; 3968 3969 spin_lock_irqsave(&list->lock, flags); 3970 __skb_queue_after(list, old, newsk); 3971 spin_unlock_irqrestore(&list->lock, flags); 3972 } 3973 EXPORT_SYMBOL(skb_append); 3974 3975 static inline void skb_split_inside_header(struct sk_buff *skb, 3976 struct sk_buff* skb1, 3977 const u32 len, const int pos) 3978 { 3979 int i; 3980 3981 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3982 pos - len); 3983 /* And move data appendix as is. */ 3984 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 3985 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 3986 3987 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 3988 skb_shinfo(skb)->nr_frags = 0; 3989 skb1->data_len = skb->data_len; 3990 skb1->len += skb1->data_len; 3991 skb->data_len = 0; 3992 skb->len = len; 3993 skb_set_tail_pointer(skb, len); 3994 } 3995 3996 static inline void skb_split_no_header(struct sk_buff *skb, 3997 struct sk_buff* skb1, 3998 const u32 len, int pos) 3999 { 4000 int i, k = 0; 4001 const int nfrags = skb_shinfo(skb)->nr_frags; 4002 4003 skb_shinfo(skb)->nr_frags = 0; 4004 skb1->len = skb1->data_len = skb->len - len; 4005 skb->len = len; 4006 skb->data_len = len - pos; 4007 4008 for (i = 0; i < nfrags; i++) { 4009 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4010 4011 if (pos + size > len) { 4012 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 4013 4014 if (pos < len) { 4015 /* Split frag. 4016 * We have two variants in this case: 4017 * 1. Move all the frag to the second 4018 * part, if it is possible. F.e. 4019 * this approach is mandatory for TUX, 4020 * where splitting is expensive. 4021 * 2. Split is accurately. We make this. 4022 */ 4023 skb_frag_ref(skb, i); 4024 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 4025 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 4026 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 4027 skb_shinfo(skb)->nr_frags++; 4028 } 4029 k++; 4030 } else 4031 skb_shinfo(skb)->nr_frags++; 4032 pos += size; 4033 } 4034 skb_shinfo(skb1)->nr_frags = k; 4035 } 4036 4037 /** 4038 * skb_split - Split fragmented skb to two parts at length len. 4039 * @skb: the buffer to split 4040 * @skb1: the buffer to receive the second part 4041 * @len: new length for skb 4042 */ 4043 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 4044 { 4045 int pos = skb_headlen(skb); 4046 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 4047 4048 skb_zcopy_downgrade_managed(skb); 4049 4050 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 4051 skb_zerocopy_clone(skb1, skb, 0); 4052 if (len < pos) /* Split line is inside header. */ 4053 skb_split_inside_header(skb, skb1, len, pos); 4054 else /* Second chunk has no header, nothing to copy. */ 4055 skb_split_no_header(skb, skb1, len, pos); 4056 } 4057 EXPORT_SYMBOL(skb_split); 4058 4059 /* Shifting from/to a cloned skb is a no-go. 4060 * 4061 * Caller cannot keep skb_shinfo related pointers past calling here! 4062 */ 4063 static int skb_prepare_for_shift(struct sk_buff *skb) 4064 { 4065 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 4066 } 4067 4068 /** 4069 * skb_shift - Shifts paged data partially from skb to another 4070 * @tgt: buffer into which tail data gets added 4071 * @skb: buffer from which the paged data comes from 4072 * @shiftlen: shift up to this many bytes 4073 * 4074 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4075 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4076 * It's up to caller to free skb if everything was shifted. 4077 * 4078 * If @tgt runs out of frags, the whole operation is aborted. 4079 * 4080 * Skb cannot include anything else but paged data while tgt is allowed 4081 * to have non-paged data as well. 4082 * 4083 * TODO: full sized shift could be optimized but that would need 4084 * specialized skb free'er to handle frags without up-to-date nr_frags. 4085 */ 4086 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4087 { 4088 int from, to, merge, todo; 4089 skb_frag_t *fragfrom, *fragto; 4090 4091 BUG_ON(shiftlen > skb->len); 4092 4093 if (skb_headlen(skb)) 4094 return 0; 4095 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4096 return 0; 4097 4098 todo = shiftlen; 4099 from = 0; 4100 to = skb_shinfo(tgt)->nr_frags; 4101 fragfrom = &skb_shinfo(skb)->frags[from]; 4102 4103 /* Actual merge is delayed until the point when we know we can 4104 * commit all, so that we don't have to undo partial changes 4105 */ 4106 if (!to || 4107 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4108 skb_frag_off(fragfrom))) { 4109 merge = -1; 4110 } else { 4111 merge = to - 1; 4112 4113 todo -= skb_frag_size(fragfrom); 4114 if (todo < 0) { 4115 if (skb_prepare_for_shift(skb) || 4116 skb_prepare_for_shift(tgt)) 4117 return 0; 4118 4119 /* All previous frag pointers might be stale! */ 4120 fragfrom = &skb_shinfo(skb)->frags[from]; 4121 fragto = &skb_shinfo(tgt)->frags[merge]; 4122 4123 skb_frag_size_add(fragto, shiftlen); 4124 skb_frag_size_sub(fragfrom, shiftlen); 4125 skb_frag_off_add(fragfrom, shiftlen); 4126 4127 goto onlymerged; 4128 } 4129 4130 from++; 4131 } 4132 4133 /* Skip full, not-fitting skb to avoid expensive operations */ 4134 if ((shiftlen == skb->len) && 4135 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4136 return 0; 4137 4138 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4139 return 0; 4140 4141 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4142 if (to == MAX_SKB_FRAGS) 4143 return 0; 4144 4145 fragfrom = &skb_shinfo(skb)->frags[from]; 4146 fragto = &skb_shinfo(tgt)->frags[to]; 4147 4148 if (todo >= skb_frag_size(fragfrom)) { 4149 *fragto = *fragfrom; 4150 todo -= skb_frag_size(fragfrom); 4151 from++; 4152 to++; 4153 4154 } else { 4155 __skb_frag_ref(fragfrom, skb->pp_recycle); 4156 skb_frag_page_copy(fragto, fragfrom); 4157 skb_frag_off_copy(fragto, fragfrom); 4158 skb_frag_size_set(fragto, todo); 4159 4160 skb_frag_off_add(fragfrom, todo); 4161 skb_frag_size_sub(fragfrom, todo); 4162 todo = 0; 4163 4164 to++; 4165 break; 4166 } 4167 } 4168 4169 /* Ready to "commit" this state change to tgt */ 4170 skb_shinfo(tgt)->nr_frags = to; 4171 4172 if (merge >= 0) { 4173 fragfrom = &skb_shinfo(skb)->frags[0]; 4174 fragto = &skb_shinfo(tgt)->frags[merge]; 4175 4176 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4177 __skb_frag_unref(fragfrom, skb->pp_recycle); 4178 } 4179 4180 /* Reposition in the original skb */ 4181 to = 0; 4182 while (from < skb_shinfo(skb)->nr_frags) 4183 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4184 skb_shinfo(skb)->nr_frags = to; 4185 4186 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4187 4188 onlymerged: 4189 /* Most likely the tgt won't ever need its checksum anymore, skb on 4190 * the other hand might need it if it needs to be resent 4191 */ 4192 tgt->ip_summed = CHECKSUM_PARTIAL; 4193 skb->ip_summed = CHECKSUM_PARTIAL; 4194 4195 skb_len_add(skb, -shiftlen); 4196 skb_len_add(tgt, shiftlen); 4197 4198 return shiftlen; 4199 } 4200 4201 /** 4202 * skb_prepare_seq_read - Prepare a sequential read of skb data 4203 * @skb: the buffer to read 4204 * @from: lower offset of data to be read 4205 * @to: upper offset of data to be read 4206 * @st: state variable 4207 * 4208 * Initializes the specified state variable. Must be called before 4209 * invoking skb_seq_read() for the first time. 4210 */ 4211 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4212 unsigned int to, struct skb_seq_state *st) 4213 { 4214 st->lower_offset = from; 4215 st->upper_offset = to; 4216 st->root_skb = st->cur_skb = skb; 4217 st->frag_idx = st->stepped_offset = 0; 4218 st->frag_data = NULL; 4219 st->frag_off = 0; 4220 } 4221 EXPORT_SYMBOL(skb_prepare_seq_read); 4222 4223 /** 4224 * skb_seq_read - Sequentially read skb data 4225 * @consumed: number of bytes consumed by the caller so far 4226 * @data: destination pointer for data to be returned 4227 * @st: state variable 4228 * 4229 * Reads a block of skb data at @consumed relative to the 4230 * lower offset specified to skb_prepare_seq_read(). Assigns 4231 * the head of the data block to @data and returns the length 4232 * of the block or 0 if the end of the skb data or the upper 4233 * offset has been reached. 4234 * 4235 * The caller is not required to consume all of the data 4236 * returned, i.e. @consumed is typically set to the number 4237 * of bytes already consumed and the next call to 4238 * skb_seq_read() will return the remaining part of the block. 4239 * 4240 * Note 1: The size of each block of data returned can be arbitrary, 4241 * this limitation is the cost for zerocopy sequential 4242 * reads of potentially non linear data. 4243 * 4244 * Note 2: Fragment lists within fragments are not implemented 4245 * at the moment, state->root_skb could be replaced with 4246 * a stack for this purpose. 4247 */ 4248 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4249 struct skb_seq_state *st) 4250 { 4251 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4252 skb_frag_t *frag; 4253 4254 if (unlikely(abs_offset >= st->upper_offset)) { 4255 if (st->frag_data) { 4256 kunmap_atomic(st->frag_data); 4257 st->frag_data = NULL; 4258 } 4259 return 0; 4260 } 4261 4262 next_skb: 4263 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4264 4265 if (abs_offset < block_limit && !st->frag_data) { 4266 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4267 return block_limit - abs_offset; 4268 } 4269 4270 if (st->frag_idx == 0 && !st->frag_data) 4271 st->stepped_offset += skb_headlen(st->cur_skb); 4272 4273 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4274 unsigned int pg_idx, pg_off, pg_sz; 4275 4276 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4277 4278 pg_idx = 0; 4279 pg_off = skb_frag_off(frag); 4280 pg_sz = skb_frag_size(frag); 4281 4282 if (skb_frag_must_loop(skb_frag_page(frag))) { 4283 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4284 pg_off = offset_in_page(pg_off + st->frag_off); 4285 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4286 PAGE_SIZE - pg_off); 4287 } 4288 4289 block_limit = pg_sz + st->stepped_offset; 4290 if (abs_offset < block_limit) { 4291 if (!st->frag_data) 4292 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4293 4294 *data = (u8 *)st->frag_data + pg_off + 4295 (abs_offset - st->stepped_offset); 4296 4297 return block_limit - abs_offset; 4298 } 4299 4300 if (st->frag_data) { 4301 kunmap_atomic(st->frag_data); 4302 st->frag_data = NULL; 4303 } 4304 4305 st->stepped_offset += pg_sz; 4306 st->frag_off += pg_sz; 4307 if (st->frag_off == skb_frag_size(frag)) { 4308 st->frag_off = 0; 4309 st->frag_idx++; 4310 } 4311 } 4312 4313 if (st->frag_data) { 4314 kunmap_atomic(st->frag_data); 4315 st->frag_data = NULL; 4316 } 4317 4318 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4319 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4320 st->frag_idx = 0; 4321 goto next_skb; 4322 } else if (st->cur_skb->next) { 4323 st->cur_skb = st->cur_skb->next; 4324 st->frag_idx = 0; 4325 goto next_skb; 4326 } 4327 4328 return 0; 4329 } 4330 EXPORT_SYMBOL(skb_seq_read); 4331 4332 /** 4333 * skb_abort_seq_read - Abort a sequential read of skb data 4334 * @st: state variable 4335 * 4336 * Must be called if skb_seq_read() was not called until it 4337 * returned 0. 4338 */ 4339 void skb_abort_seq_read(struct skb_seq_state *st) 4340 { 4341 if (st->frag_data) 4342 kunmap_atomic(st->frag_data); 4343 } 4344 EXPORT_SYMBOL(skb_abort_seq_read); 4345 4346 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4347 4348 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4349 struct ts_config *conf, 4350 struct ts_state *state) 4351 { 4352 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4353 } 4354 4355 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4356 { 4357 skb_abort_seq_read(TS_SKB_CB(state)); 4358 } 4359 4360 /** 4361 * skb_find_text - Find a text pattern in skb data 4362 * @skb: the buffer to look in 4363 * @from: search offset 4364 * @to: search limit 4365 * @config: textsearch configuration 4366 * 4367 * Finds a pattern in the skb data according to the specified 4368 * textsearch configuration. Use textsearch_next() to retrieve 4369 * subsequent occurrences of the pattern. Returns the offset 4370 * to the first occurrence or UINT_MAX if no match was found. 4371 */ 4372 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4373 unsigned int to, struct ts_config *config) 4374 { 4375 unsigned int patlen = config->ops->get_pattern_len(config); 4376 struct ts_state state; 4377 unsigned int ret; 4378 4379 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4380 4381 config->get_next_block = skb_ts_get_next_block; 4382 config->finish = skb_ts_finish; 4383 4384 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4385 4386 ret = textsearch_find(config, &state); 4387 return (ret + patlen <= to - from ? ret : UINT_MAX); 4388 } 4389 EXPORT_SYMBOL(skb_find_text); 4390 4391 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4392 int offset, size_t size, size_t max_frags) 4393 { 4394 int i = skb_shinfo(skb)->nr_frags; 4395 4396 if (skb_can_coalesce(skb, i, page, offset)) { 4397 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4398 } else if (i < max_frags) { 4399 skb_zcopy_downgrade_managed(skb); 4400 get_page(page); 4401 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4402 } else { 4403 return -EMSGSIZE; 4404 } 4405 4406 return 0; 4407 } 4408 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4409 4410 /** 4411 * skb_pull_rcsum - pull skb and update receive checksum 4412 * @skb: buffer to update 4413 * @len: length of data pulled 4414 * 4415 * This function performs an skb_pull on the packet and updates 4416 * the CHECKSUM_COMPLETE checksum. It should be used on 4417 * receive path processing instead of skb_pull unless you know 4418 * that the checksum difference is zero (e.g., a valid IP header) 4419 * or you are setting ip_summed to CHECKSUM_NONE. 4420 */ 4421 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4422 { 4423 unsigned char *data = skb->data; 4424 4425 BUG_ON(len > skb->len); 4426 __skb_pull(skb, len); 4427 skb_postpull_rcsum(skb, data, len); 4428 return skb->data; 4429 } 4430 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4431 4432 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4433 { 4434 skb_frag_t head_frag; 4435 struct page *page; 4436 4437 page = virt_to_head_page(frag_skb->head); 4438 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4439 (unsigned char *)page_address(page), 4440 skb_headlen(frag_skb)); 4441 return head_frag; 4442 } 4443 4444 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4445 netdev_features_t features, 4446 unsigned int offset) 4447 { 4448 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4449 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4450 unsigned int delta_truesize = 0; 4451 unsigned int delta_len = 0; 4452 struct sk_buff *tail = NULL; 4453 struct sk_buff *nskb, *tmp; 4454 int len_diff, err; 4455 4456 skb_push(skb, -skb_network_offset(skb) + offset); 4457 4458 /* Ensure the head is writeable before touching the shared info */ 4459 err = skb_unclone(skb, GFP_ATOMIC); 4460 if (err) 4461 goto err_linearize; 4462 4463 skb_shinfo(skb)->frag_list = NULL; 4464 4465 while (list_skb) { 4466 nskb = list_skb; 4467 list_skb = list_skb->next; 4468 4469 err = 0; 4470 delta_truesize += nskb->truesize; 4471 if (skb_shared(nskb)) { 4472 tmp = skb_clone(nskb, GFP_ATOMIC); 4473 if (tmp) { 4474 consume_skb(nskb); 4475 nskb = tmp; 4476 err = skb_unclone(nskb, GFP_ATOMIC); 4477 } else { 4478 err = -ENOMEM; 4479 } 4480 } 4481 4482 if (!tail) 4483 skb->next = nskb; 4484 else 4485 tail->next = nskb; 4486 4487 if (unlikely(err)) { 4488 nskb->next = list_skb; 4489 goto err_linearize; 4490 } 4491 4492 tail = nskb; 4493 4494 delta_len += nskb->len; 4495 4496 skb_push(nskb, -skb_network_offset(nskb) + offset); 4497 4498 skb_release_head_state(nskb); 4499 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4500 __copy_skb_header(nskb, skb); 4501 4502 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4503 nskb->transport_header += len_diff; 4504 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4505 nskb->data - tnl_hlen, 4506 offset + tnl_hlen); 4507 4508 if (skb_needs_linearize(nskb, features) && 4509 __skb_linearize(nskb)) 4510 goto err_linearize; 4511 } 4512 4513 skb->truesize = skb->truesize - delta_truesize; 4514 skb->data_len = skb->data_len - delta_len; 4515 skb->len = skb->len - delta_len; 4516 4517 skb_gso_reset(skb); 4518 4519 skb->prev = tail; 4520 4521 if (skb_needs_linearize(skb, features) && 4522 __skb_linearize(skb)) 4523 goto err_linearize; 4524 4525 skb_get(skb); 4526 4527 return skb; 4528 4529 err_linearize: 4530 kfree_skb_list(skb->next); 4531 skb->next = NULL; 4532 return ERR_PTR(-ENOMEM); 4533 } 4534 EXPORT_SYMBOL_GPL(skb_segment_list); 4535 4536 /** 4537 * skb_segment - Perform protocol segmentation on skb. 4538 * @head_skb: buffer to segment 4539 * @features: features for the output path (see dev->features) 4540 * 4541 * This function performs segmentation on the given skb. It returns 4542 * a pointer to the first in a list of new skbs for the segments. 4543 * In case of error it returns ERR_PTR(err). 4544 */ 4545 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4546 netdev_features_t features) 4547 { 4548 struct sk_buff *segs = NULL; 4549 struct sk_buff *tail = NULL; 4550 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4551 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4552 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4553 unsigned int offset = doffset; 4554 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4555 unsigned int partial_segs = 0; 4556 unsigned int headroom; 4557 unsigned int len = head_skb->len; 4558 struct sk_buff *frag_skb; 4559 skb_frag_t *frag; 4560 __be16 proto; 4561 bool csum, sg; 4562 int err = -ENOMEM; 4563 int i = 0; 4564 int nfrags, pos; 4565 4566 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4567 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4568 struct sk_buff *check_skb; 4569 4570 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4571 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4572 /* gso_size is untrusted, and we have a frag_list with 4573 * a linear non head_frag item. 4574 * 4575 * If head_skb's headlen does not fit requested gso_size, 4576 * it means that the frag_list members do NOT terminate 4577 * on exact gso_size boundaries. Hence we cannot perform 4578 * skb_frag_t page sharing. Therefore we must fallback to 4579 * copying the frag_list skbs; we do so by disabling SG. 4580 */ 4581 features &= ~NETIF_F_SG; 4582 break; 4583 } 4584 } 4585 } 4586 4587 __skb_push(head_skb, doffset); 4588 proto = skb_network_protocol(head_skb, NULL); 4589 if (unlikely(!proto)) 4590 return ERR_PTR(-EINVAL); 4591 4592 sg = !!(features & NETIF_F_SG); 4593 csum = !!can_checksum_protocol(features, proto); 4594 4595 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4596 if (!(features & NETIF_F_GSO_PARTIAL)) { 4597 struct sk_buff *iter; 4598 unsigned int frag_len; 4599 4600 if (!list_skb || 4601 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4602 goto normal; 4603 4604 /* If we get here then all the required 4605 * GSO features except frag_list are supported. 4606 * Try to split the SKB to multiple GSO SKBs 4607 * with no frag_list. 4608 * Currently we can do that only when the buffers don't 4609 * have a linear part and all the buffers except 4610 * the last are of the same length. 4611 */ 4612 frag_len = list_skb->len; 4613 skb_walk_frags(head_skb, iter) { 4614 if (frag_len != iter->len && iter->next) 4615 goto normal; 4616 if (skb_headlen(iter) && !iter->head_frag) 4617 goto normal; 4618 4619 len -= iter->len; 4620 } 4621 4622 if (len != frag_len) 4623 goto normal; 4624 } 4625 4626 /* GSO partial only requires that we trim off any excess that 4627 * doesn't fit into an MSS sized block, so take care of that 4628 * now. 4629 * Cap len to not accidentally hit GSO_BY_FRAGS. 4630 */ 4631 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4632 if (partial_segs > 1) 4633 mss *= partial_segs; 4634 else 4635 partial_segs = 0; 4636 } 4637 4638 normal: 4639 headroom = skb_headroom(head_skb); 4640 pos = skb_headlen(head_skb); 4641 4642 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4643 return ERR_PTR(-ENOMEM); 4644 4645 nfrags = skb_shinfo(head_skb)->nr_frags; 4646 frag = skb_shinfo(head_skb)->frags; 4647 frag_skb = head_skb; 4648 4649 do { 4650 struct sk_buff *nskb; 4651 skb_frag_t *nskb_frag; 4652 int hsize; 4653 int size; 4654 4655 if (unlikely(mss == GSO_BY_FRAGS)) { 4656 len = list_skb->len; 4657 } else { 4658 len = head_skb->len - offset; 4659 if (len > mss) 4660 len = mss; 4661 } 4662 4663 hsize = skb_headlen(head_skb) - offset; 4664 4665 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4666 (skb_headlen(list_skb) == len || sg)) { 4667 BUG_ON(skb_headlen(list_skb) > len); 4668 4669 nskb = skb_clone(list_skb, GFP_ATOMIC); 4670 if (unlikely(!nskb)) 4671 goto err; 4672 4673 i = 0; 4674 nfrags = skb_shinfo(list_skb)->nr_frags; 4675 frag = skb_shinfo(list_skb)->frags; 4676 frag_skb = list_skb; 4677 pos += skb_headlen(list_skb); 4678 4679 while (pos < offset + len) { 4680 BUG_ON(i >= nfrags); 4681 4682 size = skb_frag_size(frag); 4683 if (pos + size > offset + len) 4684 break; 4685 4686 i++; 4687 pos += size; 4688 frag++; 4689 } 4690 4691 list_skb = list_skb->next; 4692 4693 if (unlikely(pskb_trim(nskb, len))) { 4694 kfree_skb(nskb); 4695 goto err; 4696 } 4697 4698 hsize = skb_end_offset(nskb); 4699 if (skb_cow_head(nskb, doffset + headroom)) { 4700 kfree_skb(nskb); 4701 goto err; 4702 } 4703 4704 nskb->truesize += skb_end_offset(nskb) - hsize; 4705 skb_release_head_state(nskb); 4706 __skb_push(nskb, doffset); 4707 } else { 4708 if (hsize < 0) 4709 hsize = 0; 4710 if (hsize > len || !sg) 4711 hsize = len; 4712 4713 nskb = __alloc_skb(hsize + doffset + headroom, 4714 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4715 NUMA_NO_NODE); 4716 4717 if (unlikely(!nskb)) 4718 goto err; 4719 4720 skb_reserve(nskb, headroom); 4721 __skb_put(nskb, doffset); 4722 } 4723 4724 if (segs) 4725 tail->next = nskb; 4726 else 4727 segs = nskb; 4728 tail = nskb; 4729 4730 __copy_skb_header(nskb, head_skb); 4731 4732 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4733 skb_reset_mac_len(nskb); 4734 4735 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4736 nskb->data - tnl_hlen, 4737 doffset + tnl_hlen); 4738 4739 if (nskb->len == len + doffset) 4740 goto perform_csum_check; 4741 4742 if (!sg) { 4743 if (!csum) { 4744 if (!nskb->remcsum_offload) 4745 nskb->ip_summed = CHECKSUM_NONE; 4746 SKB_GSO_CB(nskb)->csum = 4747 skb_copy_and_csum_bits(head_skb, offset, 4748 skb_put(nskb, 4749 len), 4750 len); 4751 SKB_GSO_CB(nskb)->csum_start = 4752 skb_headroom(nskb) + doffset; 4753 } else { 4754 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4755 goto err; 4756 } 4757 continue; 4758 } 4759 4760 nskb_frag = skb_shinfo(nskb)->frags; 4761 4762 skb_copy_from_linear_data_offset(head_skb, offset, 4763 skb_put(nskb, hsize), hsize); 4764 4765 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4766 SKBFL_SHARED_FRAG; 4767 4768 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4769 goto err; 4770 4771 while (pos < offset + len) { 4772 if (i >= nfrags) { 4773 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4774 skb_zerocopy_clone(nskb, list_skb, 4775 GFP_ATOMIC)) 4776 goto err; 4777 4778 i = 0; 4779 nfrags = skb_shinfo(list_skb)->nr_frags; 4780 frag = skb_shinfo(list_skb)->frags; 4781 frag_skb = list_skb; 4782 if (!skb_headlen(list_skb)) { 4783 BUG_ON(!nfrags); 4784 } else { 4785 BUG_ON(!list_skb->head_frag); 4786 4787 /* to make room for head_frag. */ 4788 i--; 4789 frag--; 4790 } 4791 4792 list_skb = list_skb->next; 4793 } 4794 4795 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4796 MAX_SKB_FRAGS)) { 4797 net_warn_ratelimited( 4798 "skb_segment: too many frags: %u %u\n", 4799 pos, mss); 4800 err = -EINVAL; 4801 goto err; 4802 } 4803 4804 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4805 __skb_frag_ref(nskb_frag, nskb->pp_recycle); 4806 size = skb_frag_size(nskb_frag); 4807 4808 if (pos < offset) { 4809 skb_frag_off_add(nskb_frag, offset - pos); 4810 skb_frag_size_sub(nskb_frag, offset - pos); 4811 } 4812 4813 skb_shinfo(nskb)->nr_frags++; 4814 4815 if (pos + size <= offset + len) { 4816 i++; 4817 frag++; 4818 pos += size; 4819 } else { 4820 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4821 goto skip_fraglist; 4822 } 4823 4824 nskb_frag++; 4825 } 4826 4827 skip_fraglist: 4828 nskb->data_len = len - hsize; 4829 nskb->len += nskb->data_len; 4830 nskb->truesize += nskb->data_len; 4831 4832 perform_csum_check: 4833 if (!csum) { 4834 if (skb_has_shared_frag(nskb) && 4835 __skb_linearize(nskb)) 4836 goto err; 4837 4838 if (!nskb->remcsum_offload) 4839 nskb->ip_summed = CHECKSUM_NONE; 4840 SKB_GSO_CB(nskb)->csum = 4841 skb_checksum(nskb, doffset, 4842 nskb->len - doffset, 0); 4843 SKB_GSO_CB(nskb)->csum_start = 4844 skb_headroom(nskb) + doffset; 4845 } 4846 } while ((offset += len) < head_skb->len); 4847 4848 /* Some callers want to get the end of the list. 4849 * Put it in segs->prev to avoid walking the list. 4850 * (see validate_xmit_skb_list() for example) 4851 */ 4852 segs->prev = tail; 4853 4854 if (partial_segs) { 4855 struct sk_buff *iter; 4856 int type = skb_shinfo(head_skb)->gso_type; 4857 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4858 4859 /* Update type to add partial and then remove dodgy if set */ 4860 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4861 type &= ~SKB_GSO_DODGY; 4862 4863 /* Update GSO info and prepare to start updating headers on 4864 * our way back down the stack of protocols. 4865 */ 4866 for (iter = segs; iter; iter = iter->next) { 4867 skb_shinfo(iter)->gso_size = gso_size; 4868 skb_shinfo(iter)->gso_segs = partial_segs; 4869 skb_shinfo(iter)->gso_type = type; 4870 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4871 } 4872 4873 if (tail->len - doffset <= gso_size) 4874 skb_shinfo(tail)->gso_size = 0; 4875 else if (tail != segs) 4876 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4877 } 4878 4879 /* Following permits correct backpressure, for protocols 4880 * using skb_set_owner_w(). 4881 * Idea is to tranfert ownership from head_skb to last segment. 4882 */ 4883 if (head_skb->destructor == sock_wfree) { 4884 swap(tail->truesize, head_skb->truesize); 4885 swap(tail->destructor, head_skb->destructor); 4886 swap(tail->sk, head_skb->sk); 4887 } 4888 return segs; 4889 4890 err: 4891 kfree_skb_list(segs); 4892 return ERR_PTR(err); 4893 } 4894 EXPORT_SYMBOL_GPL(skb_segment); 4895 4896 #ifdef CONFIG_SKB_EXTENSIONS 4897 #define SKB_EXT_ALIGN_VALUE 8 4898 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4899 4900 static const u8 skb_ext_type_len[] = { 4901 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4902 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4903 #endif 4904 #ifdef CONFIG_XFRM 4905 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4906 #endif 4907 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4908 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4909 #endif 4910 #if IS_ENABLED(CONFIG_MPTCP) 4911 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4912 #endif 4913 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 4914 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 4915 #endif 4916 }; 4917 4918 static __always_inline unsigned int skb_ext_total_length(void) 4919 { 4920 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 4921 int i; 4922 4923 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 4924 l += skb_ext_type_len[i]; 4925 4926 return l; 4927 } 4928 4929 static void skb_extensions_init(void) 4930 { 4931 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4932 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 4933 BUILD_BUG_ON(skb_ext_total_length() > 255); 4934 #endif 4935 4936 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4937 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4938 0, 4939 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4940 NULL); 4941 } 4942 #else 4943 static void skb_extensions_init(void) {} 4944 #endif 4945 4946 /* The SKB kmem_cache slab is critical for network performance. Never 4947 * merge/alias the slab with similar sized objects. This avoids fragmentation 4948 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 4949 */ 4950 #ifndef CONFIG_SLUB_TINY 4951 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 4952 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 4953 #define FLAG_SKB_NO_MERGE 0 4954 #endif 4955 4956 void __init skb_init(void) 4957 { 4958 net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 4959 sizeof(struct sk_buff), 4960 0, 4961 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 4962 FLAG_SKB_NO_MERGE, 4963 offsetof(struct sk_buff, cb), 4964 sizeof_field(struct sk_buff, cb), 4965 NULL); 4966 net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4967 sizeof(struct sk_buff_fclones), 4968 0, 4969 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4970 NULL); 4971 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 4972 * struct skb_shared_info is located at the end of skb->head, 4973 * and should not be copied to/from user. 4974 */ 4975 net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 4976 SKB_SMALL_HEAD_CACHE_SIZE, 4977 0, 4978 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 4979 0, 4980 SKB_SMALL_HEAD_HEADROOM, 4981 NULL); 4982 skb_extensions_init(); 4983 } 4984 4985 static int 4986 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 4987 unsigned int recursion_level) 4988 { 4989 int start = skb_headlen(skb); 4990 int i, copy = start - offset; 4991 struct sk_buff *frag_iter; 4992 int elt = 0; 4993 4994 if (unlikely(recursion_level >= 24)) 4995 return -EMSGSIZE; 4996 4997 if (copy > 0) { 4998 if (copy > len) 4999 copy = len; 5000 sg_set_buf(sg, skb->data + offset, copy); 5001 elt++; 5002 if ((len -= copy) == 0) 5003 return elt; 5004 offset += copy; 5005 } 5006 5007 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5008 int end; 5009 5010 WARN_ON(start > offset + len); 5011 5012 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 5013 if ((copy = end - offset) > 0) { 5014 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5015 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5016 return -EMSGSIZE; 5017 5018 if (copy > len) 5019 copy = len; 5020 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 5021 skb_frag_off(frag) + offset - start); 5022 elt++; 5023 if (!(len -= copy)) 5024 return elt; 5025 offset += copy; 5026 } 5027 start = end; 5028 } 5029 5030 skb_walk_frags(skb, frag_iter) { 5031 int end, ret; 5032 5033 WARN_ON(start > offset + len); 5034 5035 end = start + frag_iter->len; 5036 if ((copy = end - offset) > 0) { 5037 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5038 return -EMSGSIZE; 5039 5040 if (copy > len) 5041 copy = len; 5042 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 5043 copy, recursion_level + 1); 5044 if (unlikely(ret < 0)) 5045 return ret; 5046 elt += ret; 5047 if ((len -= copy) == 0) 5048 return elt; 5049 offset += copy; 5050 } 5051 start = end; 5052 } 5053 BUG_ON(len); 5054 return elt; 5055 } 5056 5057 /** 5058 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 5059 * @skb: Socket buffer containing the buffers to be mapped 5060 * @sg: The scatter-gather list to map into 5061 * @offset: The offset into the buffer's contents to start mapping 5062 * @len: Length of buffer space to be mapped 5063 * 5064 * Fill the specified scatter-gather list with mappings/pointers into a 5065 * region of the buffer space attached to a socket buffer. Returns either 5066 * the number of scatterlist items used, or -EMSGSIZE if the contents 5067 * could not fit. 5068 */ 5069 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5070 { 5071 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5072 5073 if (nsg <= 0) 5074 return nsg; 5075 5076 sg_mark_end(&sg[nsg - 1]); 5077 5078 return nsg; 5079 } 5080 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5081 5082 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5083 * sglist without mark the sg which contain last skb data as the end. 5084 * So the caller can mannipulate sg list as will when padding new data after 5085 * the first call without calling sg_unmark_end to expend sg list. 5086 * 5087 * Scenario to use skb_to_sgvec_nomark: 5088 * 1. sg_init_table 5089 * 2. skb_to_sgvec_nomark(payload1) 5090 * 3. skb_to_sgvec_nomark(payload2) 5091 * 5092 * This is equivalent to: 5093 * 1. sg_init_table 5094 * 2. skb_to_sgvec(payload1) 5095 * 3. sg_unmark_end 5096 * 4. skb_to_sgvec(payload2) 5097 * 5098 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 5099 * is more preferable. 5100 */ 5101 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5102 int offset, int len) 5103 { 5104 return __skb_to_sgvec(skb, sg, offset, len, 0); 5105 } 5106 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5107 5108 5109 5110 /** 5111 * skb_cow_data - Check that a socket buffer's data buffers are writable 5112 * @skb: The socket buffer to check. 5113 * @tailbits: Amount of trailing space to be added 5114 * @trailer: Returned pointer to the skb where the @tailbits space begins 5115 * 5116 * Make sure that the data buffers attached to a socket buffer are 5117 * writable. If they are not, private copies are made of the data buffers 5118 * and the socket buffer is set to use these instead. 5119 * 5120 * If @tailbits is given, make sure that there is space to write @tailbits 5121 * bytes of data beyond current end of socket buffer. @trailer will be 5122 * set to point to the skb in which this space begins. 5123 * 5124 * The number of scatterlist elements required to completely map the 5125 * COW'd and extended socket buffer will be returned. 5126 */ 5127 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5128 { 5129 int copyflag; 5130 int elt; 5131 struct sk_buff *skb1, **skb_p; 5132 5133 /* If skb is cloned or its head is paged, reallocate 5134 * head pulling out all the pages (pages are considered not writable 5135 * at the moment even if they are anonymous). 5136 */ 5137 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5138 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5139 return -ENOMEM; 5140 5141 /* Easy case. Most of packets will go this way. */ 5142 if (!skb_has_frag_list(skb)) { 5143 /* A little of trouble, not enough of space for trailer. 5144 * This should not happen, when stack is tuned to generate 5145 * good frames. OK, on miss we reallocate and reserve even more 5146 * space, 128 bytes is fair. */ 5147 5148 if (skb_tailroom(skb) < tailbits && 5149 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5150 return -ENOMEM; 5151 5152 /* Voila! */ 5153 *trailer = skb; 5154 return 1; 5155 } 5156 5157 /* Misery. We are in troubles, going to mincer fragments... */ 5158 5159 elt = 1; 5160 skb_p = &skb_shinfo(skb)->frag_list; 5161 copyflag = 0; 5162 5163 while ((skb1 = *skb_p) != NULL) { 5164 int ntail = 0; 5165 5166 /* The fragment is partially pulled by someone, 5167 * this can happen on input. Copy it and everything 5168 * after it. */ 5169 5170 if (skb_shared(skb1)) 5171 copyflag = 1; 5172 5173 /* If the skb is the last, worry about trailer. */ 5174 5175 if (skb1->next == NULL && tailbits) { 5176 if (skb_shinfo(skb1)->nr_frags || 5177 skb_has_frag_list(skb1) || 5178 skb_tailroom(skb1) < tailbits) 5179 ntail = tailbits + 128; 5180 } 5181 5182 if (copyflag || 5183 skb_cloned(skb1) || 5184 ntail || 5185 skb_shinfo(skb1)->nr_frags || 5186 skb_has_frag_list(skb1)) { 5187 struct sk_buff *skb2; 5188 5189 /* Fuck, we are miserable poor guys... */ 5190 if (ntail == 0) 5191 skb2 = skb_copy(skb1, GFP_ATOMIC); 5192 else 5193 skb2 = skb_copy_expand(skb1, 5194 skb_headroom(skb1), 5195 ntail, 5196 GFP_ATOMIC); 5197 if (unlikely(skb2 == NULL)) 5198 return -ENOMEM; 5199 5200 if (skb1->sk) 5201 skb_set_owner_w(skb2, skb1->sk); 5202 5203 /* Looking around. Are we still alive? 5204 * OK, link new skb, drop old one */ 5205 5206 skb2->next = skb1->next; 5207 *skb_p = skb2; 5208 kfree_skb(skb1); 5209 skb1 = skb2; 5210 } 5211 elt++; 5212 *trailer = skb1; 5213 skb_p = &skb1->next; 5214 } 5215 5216 return elt; 5217 } 5218 EXPORT_SYMBOL_GPL(skb_cow_data); 5219 5220 static void sock_rmem_free(struct sk_buff *skb) 5221 { 5222 struct sock *sk = skb->sk; 5223 5224 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5225 } 5226 5227 static void skb_set_err_queue(struct sk_buff *skb) 5228 { 5229 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5230 * So, it is safe to (mis)use it to mark skbs on the error queue. 5231 */ 5232 skb->pkt_type = PACKET_OUTGOING; 5233 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5234 } 5235 5236 /* 5237 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5238 */ 5239 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5240 { 5241 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5242 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5243 return -ENOMEM; 5244 5245 skb_orphan(skb); 5246 skb->sk = sk; 5247 skb->destructor = sock_rmem_free; 5248 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5249 skb_set_err_queue(skb); 5250 5251 /* before exiting rcu section, make sure dst is refcounted */ 5252 skb_dst_force(skb); 5253 5254 skb_queue_tail(&sk->sk_error_queue, skb); 5255 if (!sock_flag(sk, SOCK_DEAD)) 5256 sk_error_report(sk); 5257 return 0; 5258 } 5259 EXPORT_SYMBOL(sock_queue_err_skb); 5260 5261 static bool is_icmp_err_skb(const struct sk_buff *skb) 5262 { 5263 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5264 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5265 } 5266 5267 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5268 { 5269 struct sk_buff_head *q = &sk->sk_error_queue; 5270 struct sk_buff *skb, *skb_next = NULL; 5271 bool icmp_next = false; 5272 unsigned long flags; 5273 5274 if (skb_queue_empty_lockless(q)) 5275 return NULL; 5276 5277 spin_lock_irqsave(&q->lock, flags); 5278 skb = __skb_dequeue(q); 5279 if (skb && (skb_next = skb_peek(q))) { 5280 icmp_next = is_icmp_err_skb(skb_next); 5281 if (icmp_next) 5282 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5283 } 5284 spin_unlock_irqrestore(&q->lock, flags); 5285 5286 if (is_icmp_err_skb(skb) && !icmp_next) 5287 sk->sk_err = 0; 5288 5289 if (skb_next) 5290 sk_error_report(sk); 5291 5292 return skb; 5293 } 5294 EXPORT_SYMBOL(sock_dequeue_err_skb); 5295 5296 /** 5297 * skb_clone_sk - create clone of skb, and take reference to socket 5298 * @skb: the skb to clone 5299 * 5300 * This function creates a clone of a buffer that holds a reference on 5301 * sk_refcnt. Buffers created via this function are meant to be 5302 * returned using sock_queue_err_skb, or free via kfree_skb. 5303 * 5304 * When passing buffers allocated with this function to sock_queue_err_skb 5305 * it is necessary to wrap the call with sock_hold/sock_put in order to 5306 * prevent the socket from being released prior to being enqueued on 5307 * the sk_error_queue. 5308 */ 5309 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5310 { 5311 struct sock *sk = skb->sk; 5312 struct sk_buff *clone; 5313 5314 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5315 return NULL; 5316 5317 clone = skb_clone(skb, GFP_ATOMIC); 5318 if (!clone) { 5319 sock_put(sk); 5320 return NULL; 5321 } 5322 5323 clone->sk = sk; 5324 clone->destructor = sock_efree; 5325 5326 return clone; 5327 } 5328 EXPORT_SYMBOL(skb_clone_sk); 5329 5330 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5331 struct sock *sk, 5332 int tstype, 5333 bool opt_stats) 5334 { 5335 struct sock_exterr_skb *serr; 5336 int err; 5337 5338 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5339 5340 serr = SKB_EXT_ERR(skb); 5341 memset(serr, 0, sizeof(*serr)); 5342 serr->ee.ee_errno = ENOMSG; 5343 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5344 serr->ee.ee_info = tstype; 5345 serr->opt_stats = opt_stats; 5346 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5347 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5348 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5349 if (sk_is_tcp(sk)) 5350 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5351 } 5352 5353 err = sock_queue_err_skb(sk, skb); 5354 5355 if (err) 5356 kfree_skb(skb); 5357 } 5358 5359 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5360 { 5361 bool ret; 5362 5363 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) 5364 return true; 5365 5366 read_lock_bh(&sk->sk_callback_lock); 5367 ret = sk->sk_socket && sk->sk_socket->file && 5368 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5369 read_unlock_bh(&sk->sk_callback_lock); 5370 return ret; 5371 } 5372 5373 void skb_complete_tx_timestamp(struct sk_buff *skb, 5374 struct skb_shared_hwtstamps *hwtstamps) 5375 { 5376 struct sock *sk = skb->sk; 5377 5378 if (!skb_may_tx_timestamp(sk, false)) 5379 goto err; 5380 5381 /* Take a reference to prevent skb_orphan() from freeing the socket, 5382 * but only if the socket refcount is not zero. 5383 */ 5384 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5385 *skb_hwtstamps(skb) = *hwtstamps; 5386 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5387 sock_put(sk); 5388 return; 5389 } 5390 5391 err: 5392 kfree_skb(skb); 5393 } 5394 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5395 5396 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5397 const struct sk_buff *ack_skb, 5398 struct skb_shared_hwtstamps *hwtstamps, 5399 struct sock *sk, int tstype) 5400 { 5401 struct sk_buff *skb; 5402 bool tsonly, opt_stats = false; 5403 u32 tsflags; 5404 5405 if (!sk) 5406 return; 5407 5408 tsflags = READ_ONCE(sk->sk_tsflags); 5409 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5410 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5411 return; 5412 5413 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5414 if (!skb_may_tx_timestamp(sk, tsonly)) 5415 return; 5416 5417 if (tsonly) { 5418 #ifdef CONFIG_INET 5419 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5420 sk_is_tcp(sk)) { 5421 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5422 ack_skb); 5423 opt_stats = true; 5424 } else 5425 #endif 5426 skb = alloc_skb(0, GFP_ATOMIC); 5427 } else { 5428 skb = skb_clone(orig_skb, GFP_ATOMIC); 5429 5430 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5431 kfree_skb(skb); 5432 return; 5433 } 5434 } 5435 if (!skb) 5436 return; 5437 5438 if (tsonly) { 5439 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5440 SKBTX_ANY_TSTAMP; 5441 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5442 } 5443 5444 if (hwtstamps) 5445 *skb_hwtstamps(skb) = *hwtstamps; 5446 else 5447 __net_timestamp(skb); 5448 5449 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5450 } 5451 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5452 5453 void skb_tstamp_tx(struct sk_buff *orig_skb, 5454 struct skb_shared_hwtstamps *hwtstamps) 5455 { 5456 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5457 SCM_TSTAMP_SND); 5458 } 5459 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5460 5461 #ifdef CONFIG_WIRELESS 5462 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5463 { 5464 struct sock *sk = skb->sk; 5465 struct sock_exterr_skb *serr; 5466 int err = 1; 5467 5468 skb->wifi_acked_valid = 1; 5469 skb->wifi_acked = acked; 5470 5471 serr = SKB_EXT_ERR(skb); 5472 memset(serr, 0, sizeof(*serr)); 5473 serr->ee.ee_errno = ENOMSG; 5474 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5475 5476 /* Take a reference to prevent skb_orphan() from freeing the socket, 5477 * but only if the socket refcount is not zero. 5478 */ 5479 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5480 err = sock_queue_err_skb(sk, skb); 5481 sock_put(sk); 5482 } 5483 if (err) 5484 kfree_skb(skb); 5485 } 5486 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5487 #endif /* CONFIG_WIRELESS */ 5488 5489 /** 5490 * skb_partial_csum_set - set up and verify partial csum values for packet 5491 * @skb: the skb to set 5492 * @start: the number of bytes after skb->data to start checksumming. 5493 * @off: the offset from start to place the checksum. 5494 * 5495 * For untrusted partially-checksummed packets, we need to make sure the values 5496 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5497 * 5498 * This function checks and sets those values and skb->ip_summed: if this 5499 * returns false you should drop the packet. 5500 */ 5501 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5502 { 5503 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5504 u32 csum_start = skb_headroom(skb) + (u32)start; 5505 5506 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5507 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5508 start, off, skb_headroom(skb), skb_headlen(skb)); 5509 return false; 5510 } 5511 skb->ip_summed = CHECKSUM_PARTIAL; 5512 skb->csum_start = csum_start; 5513 skb->csum_offset = off; 5514 skb->transport_header = csum_start; 5515 return true; 5516 } 5517 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5518 5519 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5520 unsigned int max) 5521 { 5522 if (skb_headlen(skb) >= len) 5523 return 0; 5524 5525 /* If we need to pullup then pullup to the max, so we 5526 * won't need to do it again. 5527 */ 5528 if (max > skb->len) 5529 max = skb->len; 5530 5531 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5532 return -ENOMEM; 5533 5534 if (skb_headlen(skb) < len) 5535 return -EPROTO; 5536 5537 return 0; 5538 } 5539 5540 #define MAX_TCP_HDR_LEN (15 * 4) 5541 5542 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5543 typeof(IPPROTO_IP) proto, 5544 unsigned int off) 5545 { 5546 int err; 5547 5548 switch (proto) { 5549 case IPPROTO_TCP: 5550 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5551 off + MAX_TCP_HDR_LEN); 5552 if (!err && !skb_partial_csum_set(skb, off, 5553 offsetof(struct tcphdr, 5554 check))) 5555 err = -EPROTO; 5556 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5557 5558 case IPPROTO_UDP: 5559 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5560 off + sizeof(struct udphdr)); 5561 if (!err && !skb_partial_csum_set(skb, off, 5562 offsetof(struct udphdr, 5563 check))) 5564 err = -EPROTO; 5565 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5566 } 5567 5568 return ERR_PTR(-EPROTO); 5569 } 5570 5571 /* This value should be large enough to cover a tagged ethernet header plus 5572 * maximally sized IP and TCP or UDP headers. 5573 */ 5574 #define MAX_IP_HDR_LEN 128 5575 5576 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5577 { 5578 unsigned int off; 5579 bool fragment; 5580 __sum16 *csum; 5581 int err; 5582 5583 fragment = false; 5584 5585 err = skb_maybe_pull_tail(skb, 5586 sizeof(struct iphdr), 5587 MAX_IP_HDR_LEN); 5588 if (err < 0) 5589 goto out; 5590 5591 if (ip_is_fragment(ip_hdr(skb))) 5592 fragment = true; 5593 5594 off = ip_hdrlen(skb); 5595 5596 err = -EPROTO; 5597 5598 if (fragment) 5599 goto out; 5600 5601 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5602 if (IS_ERR(csum)) 5603 return PTR_ERR(csum); 5604 5605 if (recalculate) 5606 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5607 ip_hdr(skb)->daddr, 5608 skb->len - off, 5609 ip_hdr(skb)->protocol, 0); 5610 err = 0; 5611 5612 out: 5613 return err; 5614 } 5615 5616 /* This value should be large enough to cover a tagged ethernet header plus 5617 * an IPv6 header, all options, and a maximal TCP or UDP header. 5618 */ 5619 #define MAX_IPV6_HDR_LEN 256 5620 5621 #define OPT_HDR(type, skb, off) \ 5622 (type *)(skb_network_header(skb) + (off)) 5623 5624 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5625 { 5626 int err; 5627 u8 nexthdr; 5628 unsigned int off; 5629 unsigned int len; 5630 bool fragment; 5631 bool done; 5632 __sum16 *csum; 5633 5634 fragment = false; 5635 done = false; 5636 5637 off = sizeof(struct ipv6hdr); 5638 5639 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5640 if (err < 0) 5641 goto out; 5642 5643 nexthdr = ipv6_hdr(skb)->nexthdr; 5644 5645 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5646 while (off <= len && !done) { 5647 switch (nexthdr) { 5648 case IPPROTO_DSTOPTS: 5649 case IPPROTO_HOPOPTS: 5650 case IPPROTO_ROUTING: { 5651 struct ipv6_opt_hdr *hp; 5652 5653 err = skb_maybe_pull_tail(skb, 5654 off + 5655 sizeof(struct ipv6_opt_hdr), 5656 MAX_IPV6_HDR_LEN); 5657 if (err < 0) 5658 goto out; 5659 5660 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5661 nexthdr = hp->nexthdr; 5662 off += ipv6_optlen(hp); 5663 break; 5664 } 5665 case IPPROTO_AH: { 5666 struct ip_auth_hdr *hp; 5667 5668 err = skb_maybe_pull_tail(skb, 5669 off + 5670 sizeof(struct ip_auth_hdr), 5671 MAX_IPV6_HDR_LEN); 5672 if (err < 0) 5673 goto out; 5674 5675 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5676 nexthdr = hp->nexthdr; 5677 off += ipv6_authlen(hp); 5678 break; 5679 } 5680 case IPPROTO_FRAGMENT: { 5681 struct frag_hdr *hp; 5682 5683 err = skb_maybe_pull_tail(skb, 5684 off + 5685 sizeof(struct frag_hdr), 5686 MAX_IPV6_HDR_LEN); 5687 if (err < 0) 5688 goto out; 5689 5690 hp = OPT_HDR(struct frag_hdr, skb, off); 5691 5692 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5693 fragment = true; 5694 5695 nexthdr = hp->nexthdr; 5696 off += sizeof(struct frag_hdr); 5697 break; 5698 } 5699 default: 5700 done = true; 5701 break; 5702 } 5703 } 5704 5705 err = -EPROTO; 5706 5707 if (!done || fragment) 5708 goto out; 5709 5710 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5711 if (IS_ERR(csum)) 5712 return PTR_ERR(csum); 5713 5714 if (recalculate) 5715 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5716 &ipv6_hdr(skb)->daddr, 5717 skb->len - off, nexthdr, 0); 5718 err = 0; 5719 5720 out: 5721 return err; 5722 } 5723 5724 /** 5725 * skb_checksum_setup - set up partial checksum offset 5726 * @skb: the skb to set up 5727 * @recalculate: if true the pseudo-header checksum will be recalculated 5728 */ 5729 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5730 { 5731 int err; 5732 5733 switch (skb->protocol) { 5734 case htons(ETH_P_IP): 5735 err = skb_checksum_setup_ipv4(skb, recalculate); 5736 break; 5737 5738 case htons(ETH_P_IPV6): 5739 err = skb_checksum_setup_ipv6(skb, recalculate); 5740 break; 5741 5742 default: 5743 err = -EPROTO; 5744 break; 5745 } 5746 5747 return err; 5748 } 5749 EXPORT_SYMBOL(skb_checksum_setup); 5750 5751 /** 5752 * skb_checksum_maybe_trim - maybe trims the given skb 5753 * @skb: the skb to check 5754 * @transport_len: the data length beyond the network header 5755 * 5756 * Checks whether the given skb has data beyond the given transport length. 5757 * If so, returns a cloned skb trimmed to this transport length. 5758 * Otherwise returns the provided skb. Returns NULL in error cases 5759 * (e.g. transport_len exceeds skb length or out-of-memory). 5760 * 5761 * Caller needs to set the skb transport header and free any returned skb if it 5762 * differs from the provided skb. 5763 */ 5764 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5765 unsigned int transport_len) 5766 { 5767 struct sk_buff *skb_chk; 5768 unsigned int len = skb_transport_offset(skb) + transport_len; 5769 int ret; 5770 5771 if (skb->len < len) 5772 return NULL; 5773 else if (skb->len == len) 5774 return skb; 5775 5776 skb_chk = skb_clone(skb, GFP_ATOMIC); 5777 if (!skb_chk) 5778 return NULL; 5779 5780 ret = pskb_trim_rcsum(skb_chk, len); 5781 if (ret) { 5782 kfree_skb(skb_chk); 5783 return NULL; 5784 } 5785 5786 return skb_chk; 5787 } 5788 5789 /** 5790 * skb_checksum_trimmed - validate checksum of an skb 5791 * @skb: the skb to check 5792 * @transport_len: the data length beyond the network header 5793 * @skb_chkf: checksum function to use 5794 * 5795 * Applies the given checksum function skb_chkf to the provided skb. 5796 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5797 * 5798 * If the skb has data beyond the given transport length, then a 5799 * trimmed & cloned skb is checked and returned. 5800 * 5801 * Caller needs to set the skb transport header and free any returned skb if it 5802 * differs from the provided skb. 5803 */ 5804 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5805 unsigned int transport_len, 5806 __sum16(*skb_chkf)(struct sk_buff *skb)) 5807 { 5808 struct sk_buff *skb_chk; 5809 unsigned int offset = skb_transport_offset(skb); 5810 __sum16 ret; 5811 5812 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5813 if (!skb_chk) 5814 goto err; 5815 5816 if (!pskb_may_pull(skb_chk, offset)) 5817 goto err; 5818 5819 skb_pull_rcsum(skb_chk, offset); 5820 ret = skb_chkf(skb_chk); 5821 skb_push_rcsum(skb_chk, offset); 5822 5823 if (ret) 5824 goto err; 5825 5826 return skb_chk; 5827 5828 err: 5829 if (skb_chk && skb_chk != skb) 5830 kfree_skb(skb_chk); 5831 5832 return NULL; 5833 5834 } 5835 EXPORT_SYMBOL(skb_checksum_trimmed); 5836 5837 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5838 { 5839 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5840 skb->dev->name); 5841 } 5842 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5843 5844 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5845 { 5846 if (head_stolen) { 5847 skb_release_head_state(skb); 5848 kmem_cache_free(net_hotdata.skbuff_cache, skb); 5849 } else { 5850 __kfree_skb(skb); 5851 } 5852 } 5853 EXPORT_SYMBOL(kfree_skb_partial); 5854 5855 /** 5856 * skb_try_coalesce - try to merge skb to prior one 5857 * @to: prior buffer 5858 * @from: buffer to add 5859 * @fragstolen: pointer to boolean 5860 * @delta_truesize: how much more was allocated than was requested 5861 */ 5862 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5863 bool *fragstolen, int *delta_truesize) 5864 { 5865 struct skb_shared_info *to_shinfo, *from_shinfo; 5866 int i, delta, len = from->len; 5867 5868 *fragstolen = false; 5869 5870 if (skb_cloned(to)) 5871 return false; 5872 5873 /* In general, avoid mixing page_pool and non-page_pool allocated 5874 * pages within the same SKB. In theory we could take full 5875 * references if @from is cloned and !@to->pp_recycle but its 5876 * tricky (due to potential race with the clone disappearing) and 5877 * rare, so not worth dealing with. 5878 */ 5879 if (to->pp_recycle != from->pp_recycle) 5880 return false; 5881 5882 if (len <= skb_tailroom(to)) { 5883 if (len) 5884 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5885 *delta_truesize = 0; 5886 return true; 5887 } 5888 5889 to_shinfo = skb_shinfo(to); 5890 from_shinfo = skb_shinfo(from); 5891 if (to_shinfo->frag_list || from_shinfo->frag_list) 5892 return false; 5893 if (skb_zcopy(to) || skb_zcopy(from)) 5894 return false; 5895 5896 if (skb_headlen(from) != 0) { 5897 struct page *page; 5898 unsigned int offset; 5899 5900 if (to_shinfo->nr_frags + 5901 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5902 return false; 5903 5904 if (skb_head_is_locked(from)) 5905 return false; 5906 5907 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5908 5909 page = virt_to_head_page(from->head); 5910 offset = from->data - (unsigned char *)page_address(page); 5911 5912 skb_fill_page_desc(to, to_shinfo->nr_frags, 5913 page, offset, skb_headlen(from)); 5914 *fragstolen = true; 5915 } else { 5916 if (to_shinfo->nr_frags + 5917 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5918 return false; 5919 5920 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5921 } 5922 5923 WARN_ON_ONCE(delta < len); 5924 5925 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5926 from_shinfo->frags, 5927 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5928 to_shinfo->nr_frags += from_shinfo->nr_frags; 5929 5930 if (!skb_cloned(from)) 5931 from_shinfo->nr_frags = 0; 5932 5933 /* if the skb is not cloned this does nothing 5934 * since we set nr_frags to 0. 5935 */ 5936 for (i = 0; i < from_shinfo->nr_frags; i++) 5937 __skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle); 5938 5939 to->truesize += delta; 5940 to->len += len; 5941 to->data_len += len; 5942 5943 *delta_truesize = delta; 5944 return true; 5945 } 5946 EXPORT_SYMBOL(skb_try_coalesce); 5947 5948 /** 5949 * skb_scrub_packet - scrub an skb 5950 * 5951 * @skb: buffer to clean 5952 * @xnet: packet is crossing netns 5953 * 5954 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 5955 * into/from a tunnel. Some information have to be cleared during these 5956 * operations. 5957 * skb_scrub_packet can also be used to clean a skb before injecting it in 5958 * another namespace (@xnet == true). We have to clear all information in the 5959 * skb that could impact namespace isolation. 5960 */ 5961 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5962 { 5963 skb->pkt_type = PACKET_HOST; 5964 skb->skb_iif = 0; 5965 skb->ignore_df = 0; 5966 skb_dst_drop(skb); 5967 skb_ext_reset(skb); 5968 nf_reset_ct(skb); 5969 nf_reset_trace(skb); 5970 5971 #ifdef CONFIG_NET_SWITCHDEV 5972 skb->offload_fwd_mark = 0; 5973 skb->offload_l3_fwd_mark = 0; 5974 #endif 5975 5976 if (!xnet) 5977 return; 5978 5979 ipvs_reset(skb); 5980 skb->mark = 0; 5981 skb_clear_tstamp(skb); 5982 } 5983 EXPORT_SYMBOL_GPL(skb_scrub_packet); 5984 5985 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5986 { 5987 int mac_len, meta_len; 5988 void *meta; 5989 5990 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5991 kfree_skb(skb); 5992 return NULL; 5993 } 5994 5995 mac_len = skb->data - skb_mac_header(skb); 5996 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 5997 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 5998 mac_len - VLAN_HLEN - ETH_TLEN); 5999 } 6000 6001 meta_len = skb_metadata_len(skb); 6002 if (meta_len) { 6003 meta = skb_metadata_end(skb) - meta_len; 6004 memmove(meta + VLAN_HLEN, meta, meta_len); 6005 } 6006 6007 skb->mac_header += VLAN_HLEN; 6008 return skb; 6009 } 6010 6011 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 6012 { 6013 struct vlan_hdr *vhdr; 6014 u16 vlan_tci; 6015 6016 if (unlikely(skb_vlan_tag_present(skb))) { 6017 /* vlan_tci is already set-up so leave this for another time */ 6018 return skb; 6019 } 6020 6021 skb = skb_share_check(skb, GFP_ATOMIC); 6022 if (unlikely(!skb)) 6023 goto err_free; 6024 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 6025 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 6026 goto err_free; 6027 6028 vhdr = (struct vlan_hdr *)skb->data; 6029 vlan_tci = ntohs(vhdr->h_vlan_TCI); 6030 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 6031 6032 skb_pull_rcsum(skb, VLAN_HLEN); 6033 vlan_set_encap_proto(skb, vhdr); 6034 6035 skb = skb_reorder_vlan_header(skb); 6036 if (unlikely(!skb)) 6037 goto err_free; 6038 6039 skb_reset_network_header(skb); 6040 if (!skb_transport_header_was_set(skb)) 6041 skb_reset_transport_header(skb); 6042 skb_reset_mac_len(skb); 6043 6044 return skb; 6045 6046 err_free: 6047 kfree_skb(skb); 6048 return NULL; 6049 } 6050 EXPORT_SYMBOL(skb_vlan_untag); 6051 6052 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 6053 { 6054 if (!pskb_may_pull(skb, write_len)) 6055 return -ENOMEM; 6056 6057 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 6058 return 0; 6059 6060 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6061 } 6062 EXPORT_SYMBOL(skb_ensure_writable); 6063 6064 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 6065 { 6066 int needed_headroom = dev->needed_headroom; 6067 int needed_tailroom = dev->needed_tailroom; 6068 6069 /* For tail taggers, we need to pad short frames ourselves, to ensure 6070 * that the tail tag does not fail at its role of being at the end of 6071 * the packet, once the conduit interface pads the frame. Account for 6072 * that pad length here, and pad later. 6073 */ 6074 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6075 needed_tailroom += ETH_ZLEN - skb->len; 6076 /* skb_headroom() returns unsigned int... */ 6077 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6078 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6079 6080 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6081 /* No reallocation needed, yay! */ 6082 return 0; 6083 6084 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6085 GFP_ATOMIC); 6086 } 6087 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6088 6089 /* remove VLAN header from packet and update csum accordingly. 6090 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6091 */ 6092 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6093 { 6094 int offset = skb->data - skb_mac_header(skb); 6095 int err; 6096 6097 if (WARN_ONCE(offset, 6098 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6099 offset)) { 6100 return -EINVAL; 6101 } 6102 6103 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6104 if (unlikely(err)) 6105 return err; 6106 6107 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6108 6109 vlan_remove_tag(skb, vlan_tci); 6110 6111 skb->mac_header += VLAN_HLEN; 6112 6113 if (skb_network_offset(skb) < ETH_HLEN) 6114 skb_set_network_header(skb, ETH_HLEN); 6115 6116 skb_reset_mac_len(skb); 6117 6118 return err; 6119 } 6120 EXPORT_SYMBOL(__skb_vlan_pop); 6121 6122 /* Pop a vlan tag either from hwaccel or from payload. 6123 * Expects skb->data at mac header. 6124 */ 6125 int skb_vlan_pop(struct sk_buff *skb) 6126 { 6127 u16 vlan_tci; 6128 __be16 vlan_proto; 6129 int err; 6130 6131 if (likely(skb_vlan_tag_present(skb))) { 6132 __vlan_hwaccel_clear_tag(skb); 6133 } else { 6134 if (unlikely(!eth_type_vlan(skb->protocol))) 6135 return 0; 6136 6137 err = __skb_vlan_pop(skb, &vlan_tci); 6138 if (err) 6139 return err; 6140 } 6141 /* move next vlan tag to hw accel tag */ 6142 if (likely(!eth_type_vlan(skb->protocol))) 6143 return 0; 6144 6145 vlan_proto = skb->protocol; 6146 err = __skb_vlan_pop(skb, &vlan_tci); 6147 if (unlikely(err)) 6148 return err; 6149 6150 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6151 return 0; 6152 } 6153 EXPORT_SYMBOL(skb_vlan_pop); 6154 6155 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6156 * Expects skb->data at mac header. 6157 */ 6158 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6159 { 6160 if (skb_vlan_tag_present(skb)) { 6161 int offset = skb->data - skb_mac_header(skb); 6162 int err; 6163 6164 if (WARN_ONCE(offset, 6165 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6166 offset)) { 6167 return -EINVAL; 6168 } 6169 6170 err = __vlan_insert_tag(skb, skb->vlan_proto, 6171 skb_vlan_tag_get(skb)); 6172 if (err) 6173 return err; 6174 6175 skb->protocol = skb->vlan_proto; 6176 skb->mac_len += VLAN_HLEN; 6177 6178 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6179 } 6180 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6181 return 0; 6182 } 6183 EXPORT_SYMBOL(skb_vlan_push); 6184 6185 /** 6186 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6187 * 6188 * @skb: Socket buffer to modify 6189 * 6190 * Drop the Ethernet header of @skb. 6191 * 6192 * Expects that skb->data points to the mac header and that no VLAN tags are 6193 * present. 6194 * 6195 * Returns 0 on success, -errno otherwise. 6196 */ 6197 int skb_eth_pop(struct sk_buff *skb) 6198 { 6199 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6200 skb_network_offset(skb) < ETH_HLEN) 6201 return -EPROTO; 6202 6203 skb_pull_rcsum(skb, ETH_HLEN); 6204 skb_reset_mac_header(skb); 6205 skb_reset_mac_len(skb); 6206 6207 return 0; 6208 } 6209 EXPORT_SYMBOL(skb_eth_pop); 6210 6211 /** 6212 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6213 * 6214 * @skb: Socket buffer to modify 6215 * @dst: Destination MAC address of the new header 6216 * @src: Source MAC address of the new header 6217 * 6218 * Prepend @skb with a new Ethernet header. 6219 * 6220 * Expects that skb->data points to the mac header, which must be empty. 6221 * 6222 * Returns 0 on success, -errno otherwise. 6223 */ 6224 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6225 const unsigned char *src) 6226 { 6227 struct ethhdr *eth; 6228 int err; 6229 6230 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6231 return -EPROTO; 6232 6233 err = skb_cow_head(skb, sizeof(*eth)); 6234 if (err < 0) 6235 return err; 6236 6237 skb_push(skb, sizeof(*eth)); 6238 skb_reset_mac_header(skb); 6239 skb_reset_mac_len(skb); 6240 6241 eth = eth_hdr(skb); 6242 ether_addr_copy(eth->h_dest, dst); 6243 ether_addr_copy(eth->h_source, src); 6244 eth->h_proto = skb->protocol; 6245 6246 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6247 6248 return 0; 6249 } 6250 EXPORT_SYMBOL(skb_eth_push); 6251 6252 /* Update the ethertype of hdr and the skb csum value if required. */ 6253 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6254 __be16 ethertype) 6255 { 6256 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6257 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6258 6259 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6260 } 6261 6262 hdr->h_proto = ethertype; 6263 } 6264 6265 /** 6266 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6267 * the packet 6268 * 6269 * @skb: buffer 6270 * @mpls_lse: MPLS label stack entry to push 6271 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6272 * @mac_len: length of the MAC header 6273 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6274 * ethernet 6275 * 6276 * Expects skb->data at mac header. 6277 * 6278 * Returns 0 on success, -errno otherwise. 6279 */ 6280 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6281 int mac_len, bool ethernet) 6282 { 6283 struct mpls_shim_hdr *lse; 6284 int err; 6285 6286 if (unlikely(!eth_p_mpls(mpls_proto))) 6287 return -EINVAL; 6288 6289 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6290 if (skb->encapsulation) 6291 return -EINVAL; 6292 6293 err = skb_cow_head(skb, MPLS_HLEN); 6294 if (unlikely(err)) 6295 return err; 6296 6297 if (!skb->inner_protocol) { 6298 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6299 skb_set_inner_protocol(skb, skb->protocol); 6300 } 6301 6302 skb_push(skb, MPLS_HLEN); 6303 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6304 mac_len); 6305 skb_reset_mac_header(skb); 6306 skb_set_network_header(skb, mac_len); 6307 skb_reset_mac_len(skb); 6308 6309 lse = mpls_hdr(skb); 6310 lse->label_stack_entry = mpls_lse; 6311 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6312 6313 if (ethernet && mac_len >= ETH_HLEN) 6314 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6315 skb->protocol = mpls_proto; 6316 6317 return 0; 6318 } 6319 EXPORT_SYMBOL_GPL(skb_mpls_push); 6320 6321 /** 6322 * skb_mpls_pop() - pop the outermost MPLS header 6323 * 6324 * @skb: buffer 6325 * @next_proto: ethertype of header after popped MPLS header 6326 * @mac_len: length of the MAC header 6327 * @ethernet: flag to indicate if the packet is ethernet 6328 * 6329 * Expects skb->data at mac header. 6330 * 6331 * Returns 0 on success, -errno otherwise. 6332 */ 6333 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6334 bool ethernet) 6335 { 6336 int err; 6337 6338 if (unlikely(!eth_p_mpls(skb->protocol))) 6339 return 0; 6340 6341 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6342 if (unlikely(err)) 6343 return err; 6344 6345 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6346 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6347 mac_len); 6348 6349 __skb_pull(skb, MPLS_HLEN); 6350 skb_reset_mac_header(skb); 6351 skb_set_network_header(skb, mac_len); 6352 6353 if (ethernet && mac_len >= ETH_HLEN) { 6354 struct ethhdr *hdr; 6355 6356 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6357 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6358 skb_mod_eth_type(skb, hdr, next_proto); 6359 } 6360 skb->protocol = next_proto; 6361 6362 return 0; 6363 } 6364 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6365 6366 /** 6367 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6368 * 6369 * @skb: buffer 6370 * @mpls_lse: new MPLS label stack entry to update to 6371 * 6372 * Expects skb->data at mac header. 6373 * 6374 * Returns 0 on success, -errno otherwise. 6375 */ 6376 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6377 { 6378 int err; 6379 6380 if (unlikely(!eth_p_mpls(skb->protocol))) 6381 return -EINVAL; 6382 6383 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6384 if (unlikely(err)) 6385 return err; 6386 6387 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6388 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6389 6390 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6391 } 6392 6393 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6394 6395 return 0; 6396 } 6397 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6398 6399 /** 6400 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6401 * 6402 * @skb: buffer 6403 * 6404 * Expects skb->data at mac header. 6405 * 6406 * Returns 0 on success, -errno otherwise. 6407 */ 6408 int skb_mpls_dec_ttl(struct sk_buff *skb) 6409 { 6410 u32 lse; 6411 u8 ttl; 6412 6413 if (unlikely(!eth_p_mpls(skb->protocol))) 6414 return -EINVAL; 6415 6416 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6417 return -ENOMEM; 6418 6419 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6420 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6421 if (!--ttl) 6422 return -EINVAL; 6423 6424 lse &= ~MPLS_LS_TTL_MASK; 6425 lse |= ttl << MPLS_LS_TTL_SHIFT; 6426 6427 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6428 } 6429 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6430 6431 /** 6432 * alloc_skb_with_frags - allocate skb with page frags 6433 * 6434 * @header_len: size of linear part 6435 * @data_len: needed length in frags 6436 * @order: max page order desired. 6437 * @errcode: pointer to error code if any 6438 * @gfp_mask: allocation mask 6439 * 6440 * This can be used to allocate a paged skb, given a maximal order for frags. 6441 */ 6442 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6443 unsigned long data_len, 6444 int order, 6445 int *errcode, 6446 gfp_t gfp_mask) 6447 { 6448 unsigned long chunk; 6449 struct sk_buff *skb; 6450 struct page *page; 6451 int nr_frags = 0; 6452 6453 *errcode = -EMSGSIZE; 6454 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6455 return NULL; 6456 6457 *errcode = -ENOBUFS; 6458 skb = alloc_skb(header_len, gfp_mask); 6459 if (!skb) 6460 return NULL; 6461 6462 while (data_len) { 6463 if (nr_frags == MAX_SKB_FRAGS - 1) 6464 goto failure; 6465 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6466 order--; 6467 6468 if (order) { 6469 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6470 __GFP_COMP | 6471 __GFP_NOWARN, 6472 order); 6473 if (!page) { 6474 order--; 6475 continue; 6476 } 6477 } else { 6478 page = alloc_page(gfp_mask); 6479 if (!page) 6480 goto failure; 6481 } 6482 chunk = min_t(unsigned long, data_len, 6483 PAGE_SIZE << order); 6484 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6485 nr_frags++; 6486 skb->truesize += (PAGE_SIZE << order); 6487 data_len -= chunk; 6488 } 6489 return skb; 6490 6491 failure: 6492 kfree_skb(skb); 6493 return NULL; 6494 } 6495 EXPORT_SYMBOL(alloc_skb_with_frags); 6496 6497 /* carve out the first off bytes from skb when off < headlen */ 6498 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6499 const int headlen, gfp_t gfp_mask) 6500 { 6501 int i; 6502 unsigned int size = skb_end_offset(skb); 6503 int new_hlen = headlen - off; 6504 u8 *data; 6505 6506 if (skb_pfmemalloc(skb)) 6507 gfp_mask |= __GFP_MEMALLOC; 6508 6509 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6510 if (!data) 6511 return -ENOMEM; 6512 size = SKB_WITH_OVERHEAD(size); 6513 6514 /* Copy real data, and all frags */ 6515 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6516 skb->len -= off; 6517 6518 memcpy((struct skb_shared_info *)(data + size), 6519 skb_shinfo(skb), 6520 offsetof(struct skb_shared_info, 6521 frags[skb_shinfo(skb)->nr_frags])); 6522 if (skb_cloned(skb)) { 6523 /* drop the old head gracefully */ 6524 if (skb_orphan_frags(skb, gfp_mask)) { 6525 skb_kfree_head(data, size); 6526 return -ENOMEM; 6527 } 6528 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6529 skb_frag_ref(skb, i); 6530 if (skb_has_frag_list(skb)) 6531 skb_clone_fraglist(skb); 6532 skb_release_data(skb, SKB_CONSUMED); 6533 } else { 6534 /* we can reuse existing recount- all we did was 6535 * relocate values 6536 */ 6537 skb_free_head(skb); 6538 } 6539 6540 skb->head = data; 6541 skb->data = data; 6542 skb->head_frag = 0; 6543 skb_set_end_offset(skb, size); 6544 skb_set_tail_pointer(skb, skb_headlen(skb)); 6545 skb_headers_offset_update(skb, 0); 6546 skb->cloned = 0; 6547 skb->hdr_len = 0; 6548 skb->nohdr = 0; 6549 atomic_set(&skb_shinfo(skb)->dataref, 1); 6550 6551 return 0; 6552 } 6553 6554 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6555 6556 /* carve out the first eat bytes from skb's frag_list. May recurse into 6557 * pskb_carve() 6558 */ 6559 static int pskb_carve_frag_list(struct sk_buff *skb, 6560 struct skb_shared_info *shinfo, int eat, 6561 gfp_t gfp_mask) 6562 { 6563 struct sk_buff *list = shinfo->frag_list; 6564 struct sk_buff *clone = NULL; 6565 struct sk_buff *insp = NULL; 6566 6567 do { 6568 if (!list) { 6569 pr_err("Not enough bytes to eat. Want %d\n", eat); 6570 return -EFAULT; 6571 } 6572 if (list->len <= eat) { 6573 /* Eaten as whole. */ 6574 eat -= list->len; 6575 list = list->next; 6576 insp = list; 6577 } else { 6578 /* Eaten partially. */ 6579 if (skb_shared(list)) { 6580 clone = skb_clone(list, gfp_mask); 6581 if (!clone) 6582 return -ENOMEM; 6583 insp = list->next; 6584 list = clone; 6585 } else { 6586 /* This may be pulled without problems. */ 6587 insp = list; 6588 } 6589 if (pskb_carve(list, eat, gfp_mask) < 0) { 6590 kfree_skb(clone); 6591 return -ENOMEM; 6592 } 6593 break; 6594 } 6595 } while (eat); 6596 6597 /* Free pulled out fragments. */ 6598 while ((list = shinfo->frag_list) != insp) { 6599 shinfo->frag_list = list->next; 6600 consume_skb(list); 6601 } 6602 /* And insert new clone at head. */ 6603 if (clone) { 6604 clone->next = list; 6605 shinfo->frag_list = clone; 6606 } 6607 return 0; 6608 } 6609 6610 /* carve off first len bytes from skb. Split line (off) is in the 6611 * non-linear part of skb 6612 */ 6613 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6614 int pos, gfp_t gfp_mask) 6615 { 6616 int i, k = 0; 6617 unsigned int size = skb_end_offset(skb); 6618 u8 *data; 6619 const int nfrags = skb_shinfo(skb)->nr_frags; 6620 struct skb_shared_info *shinfo; 6621 6622 if (skb_pfmemalloc(skb)) 6623 gfp_mask |= __GFP_MEMALLOC; 6624 6625 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6626 if (!data) 6627 return -ENOMEM; 6628 size = SKB_WITH_OVERHEAD(size); 6629 6630 memcpy((struct skb_shared_info *)(data + size), 6631 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6632 if (skb_orphan_frags(skb, gfp_mask)) { 6633 skb_kfree_head(data, size); 6634 return -ENOMEM; 6635 } 6636 shinfo = (struct skb_shared_info *)(data + size); 6637 for (i = 0; i < nfrags; i++) { 6638 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6639 6640 if (pos + fsize > off) { 6641 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6642 6643 if (pos < off) { 6644 /* Split frag. 6645 * We have two variants in this case: 6646 * 1. Move all the frag to the second 6647 * part, if it is possible. F.e. 6648 * this approach is mandatory for TUX, 6649 * where splitting is expensive. 6650 * 2. Split is accurately. We make this. 6651 */ 6652 skb_frag_off_add(&shinfo->frags[0], off - pos); 6653 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6654 } 6655 skb_frag_ref(skb, i); 6656 k++; 6657 } 6658 pos += fsize; 6659 } 6660 shinfo->nr_frags = k; 6661 if (skb_has_frag_list(skb)) 6662 skb_clone_fraglist(skb); 6663 6664 /* split line is in frag list */ 6665 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6666 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6667 if (skb_has_frag_list(skb)) 6668 kfree_skb_list(skb_shinfo(skb)->frag_list); 6669 skb_kfree_head(data, size); 6670 return -ENOMEM; 6671 } 6672 skb_release_data(skb, SKB_CONSUMED); 6673 6674 skb->head = data; 6675 skb->head_frag = 0; 6676 skb->data = data; 6677 skb_set_end_offset(skb, size); 6678 skb_reset_tail_pointer(skb); 6679 skb_headers_offset_update(skb, 0); 6680 skb->cloned = 0; 6681 skb->hdr_len = 0; 6682 skb->nohdr = 0; 6683 skb->len -= off; 6684 skb->data_len = skb->len; 6685 atomic_set(&skb_shinfo(skb)->dataref, 1); 6686 return 0; 6687 } 6688 6689 /* remove len bytes from the beginning of the skb */ 6690 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6691 { 6692 int headlen = skb_headlen(skb); 6693 6694 if (len < headlen) 6695 return pskb_carve_inside_header(skb, len, headlen, gfp); 6696 else 6697 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6698 } 6699 6700 /* Extract to_copy bytes starting at off from skb, and return this in 6701 * a new skb 6702 */ 6703 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6704 int to_copy, gfp_t gfp) 6705 { 6706 struct sk_buff *clone = skb_clone(skb, gfp); 6707 6708 if (!clone) 6709 return NULL; 6710 6711 if (pskb_carve(clone, off, gfp) < 0 || 6712 pskb_trim(clone, to_copy)) { 6713 kfree_skb(clone); 6714 return NULL; 6715 } 6716 return clone; 6717 } 6718 EXPORT_SYMBOL(pskb_extract); 6719 6720 /** 6721 * skb_condense - try to get rid of fragments/frag_list if possible 6722 * @skb: buffer 6723 * 6724 * Can be used to save memory before skb is added to a busy queue. 6725 * If packet has bytes in frags and enough tail room in skb->head, 6726 * pull all of them, so that we can free the frags right now and adjust 6727 * truesize. 6728 * Notes: 6729 * We do not reallocate skb->head thus can not fail. 6730 * Caller must re-evaluate skb->truesize if needed. 6731 */ 6732 void skb_condense(struct sk_buff *skb) 6733 { 6734 if (skb->data_len) { 6735 if (skb->data_len > skb->end - skb->tail || 6736 skb_cloned(skb)) 6737 return; 6738 6739 /* Nice, we can free page frag(s) right now */ 6740 __pskb_pull_tail(skb, skb->data_len); 6741 } 6742 /* At this point, skb->truesize might be over estimated, 6743 * because skb had a fragment, and fragments do not tell 6744 * their truesize. 6745 * When we pulled its content into skb->head, fragment 6746 * was freed, but __pskb_pull_tail() could not possibly 6747 * adjust skb->truesize, not knowing the frag truesize. 6748 */ 6749 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6750 } 6751 EXPORT_SYMBOL(skb_condense); 6752 6753 #ifdef CONFIG_SKB_EXTENSIONS 6754 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6755 { 6756 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6757 } 6758 6759 /** 6760 * __skb_ext_alloc - allocate a new skb extensions storage 6761 * 6762 * @flags: See kmalloc(). 6763 * 6764 * Returns the newly allocated pointer. The pointer can later attached to a 6765 * skb via __skb_ext_set(). 6766 * Note: caller must handle the skb_ext as an opaque data. 6767 */ 6768 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6769 { 6770 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6771 6772 if (new) { 6773 memset(new->offset, 0, sizeof(new->offset)); 6774 refcount_set(&new->refcnt, 1); 6775 } 6776 6777 return new; 6778 } 6779 6780 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6781 unsigned int old_active) 6782 { 6783 struct skb_ext *new; 6784 6785 if (refcount_read(&old->refcnt) == 1) 6786 return old; 6787 6788 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6789 if (!new) 6790 return NULL; 6791 6792 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6793 refcount_set(&new->refcnt, 1); 6794 6795 #ifdef CONFIG_XFRM 6796 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6797 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6798 unsigned int i; 6799 6800 for (i = 0; i < sp->len; i++) 6801 xfrm_state_hold(sp->xvec[i]); 6802 } 6803 #endif 6804 #ifdef CONFIG_MCTP_FLOWS 6805 if (old_active & (1 << SKB_EXT_MCTP)) { 6806 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6807 6808 if (flow->key) 6809 refcount_inc(&flow->key->refs); 6810 } 6811 #endif 6812 __skb_ext_put(old); 6813 return new; 6814 } 6815 6816 /** 6817 * __skb_ext_set - attach the specified extension storage to this skb 6818 * @skb: buffer 6819 * @id: extension id 6820 * @ext: extension storage previously allocated via __skb_ext_alloc() 6821 * 6822 * Existing extensions, if any, are cleared. 6823 * 6824 * Returns the pointer to the extension. 6825 */ 6826 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6827 struct skb_ext *ext) 6828 { 6829 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6830 6831 skb_ext_put(skb); 6832 newlen = newoff + skb_ext_type_len[id]; 6833 ext->chunks = newlen; 6834 ext->offset[id] = newoff; 6835 skb->extensions = ext; 6836 skb->active_extensions = 1 << id; 6837 return skb_ext_get_ptr(ext, id); 6838 } 6839 6840 /** 6841 * skb_ext_add - allocate space for given extension, COW if needed 6842 * @skb: buffer 6843 * @id: extension to allocate space for 6844 * 6845 * Allocates enough space for the given extension. 6846 * If the extension is already present, a pointer to that extension 6847 * is returned. 6848 * 6849 * If the skb was cloned, COW applies and the returned memory can be 6850 * modified without changing the extension space of clones buffers. 6851 * 6852 * Returns pointer to the extension or NULL on allocation failure. 6853 */ 6854 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6855 { 6856 struct skb_ext *new, *old = NULL; 6857 unsigned int newlen, newoff; 6858 6859 if (skb->active_extensions) { 6860 old = skb->extensions; 6861 6862 new = skb_ext_maybe_cow(old, skb->active_extensions); 6863 if (!new) 6864 return NULL; 6865 6866 if (__skb_ext_exist(new, id)) 6867 goto set_active; 6868 6869 newoff = new->chunks; 6870 } else { 6871 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6872 6873 new = __skb_ext_alloc(GFP_ATOMIC); 6874 if (!new) 6875 return NULL; 6876 } 6877 6878 newlen = newoff + skb_ext_type_len[id]; 6879 new->chunks = newlen; 6880 new->offset[id] = newoff; 6881 set_active: 6882 skb->slow_gro = 1; 6883 skb->extensions = new; 6884 skb->active_extensions |= 1 << id; 6885 return skb_ext_get_ptr(new, id); 6886 } 6887 EXPORT_SYMBOL(skb_ext_add); 6888 6889 #ifdef CONFIG_XFRM 6890 static void skb_ext_put_sp(struct sec_path *sp) 6891 { 6892 unsigned int i; 6893 6894 for (i = 0; i < sp->len; i++) 6895 xfrm_state_put(sp->xvec[i]); 6896 } 6897 #endif 6898 6899 #ifdef CONFIG_MCTP_FLOWS 6900 static void skb_ext_put_mctp(struct mctp_flow *flow) 6901 { 6902 if (flow->key) 6903 mctp_key_unref(flow->key); 6904 } 6905 #endif 6906 6907 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6908 { 6909 struct skb_ext *ext = skb->extensions; 6910 6911 skb->active_extensions &= ~(1 << id); 6912 if (skb->active_extensions == 0) { 6913 skb->extensions = NULL; 6914 __skb_ext_put(ext); 6915 #ifdef CONFIG_XFRM 6916 } else if (id == SKB_EXT_SEC_PATH && 6917 refcount_read(&ext->refcnt) == 1) { 6918 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6919 6920 skb_ext_put_sp(sp); 6921 sp->len = 0; 6922 #endif 6923 } 6924 } 6925 EXPORT_SYMBOL(__skb_ext_del); 6926 6927 void __skb_ext_put(struct skb_ext *ext) 6928 { 6929 /* If this is last clone, nothing can increment 6930 * it after check passes. Avoids one atomic op. 6931 */ 6932 if (refcount_read(&ext->refcnt) == 1) 6933 goto free_now; 6934 6935 if (!refcount_dec_and_test(&ext->refcnt)) 6936 return; 6937 free_now: 6938 #ifdef CONFIG_XFRM 6939 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6940 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6941 #endif 6942 #ifdef CONFIG_MCTP_FLOWS 6943 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 6944 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 6945 #endif 6946 6947 kmem_cache_free(skbuff_ext_cache, ext); 6948 } 6949 EXPORT_SYMBOL(__skb_ext_put); 6950 #endif /* CONFIG_SKB_EXTENSIONS */ 6951 6952 static void kfree_skb_napi_cache(struct sk_buff *skb) 6953 { 6954 /* if SKB is a clone, don't handle this case */ 6955 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 6956 __kfree_skb(skb); 6957 return; 6958 } 6959 6960 local_bh_disable(); 6961 __napi_kfree_skb(skb, SKB_CONSUMED); 6962 local_bh_enable(); 6963 } 6964 6965 /** 6966 * skb_attempt_defer_free - queue skb for remote freeing 6967 * @skb: buffer 6968 * 6969 * Put @skb in a per-cpu list, using the cpu which 6970 * allocated the skb/pages to reduce false sharing 6971 * and memory zone spinlock contention. 6972 */ 6973 void skb_attempt_defer_free(struct sk_buff *skb) 6974 { 6975 int cpu = skb->alloc_cpu; 6976 struct softnet_data *sd; 6977 unsigned int defer_max; 6978 bool kick; 6979 6980 if (cpu == raw_smp_processor_id() || 6981 WARN_ON_ONCE(cpu >= nr_cpu_ids) || 6982 !cpu_online(cpu)) { 6983 nodefer: kfree_skb_napi_cache(skb); 6984 return; 6985 } 6986 6987 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 6988 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 6989 6990 sd = &per_cpu(softnet_data, cpu); 6991 defer_max = READ_ONCE(sysctl_skb_defer_max); 6992 if (READ_ONCE(sd->defer_count) >= defer_max) 6993 goto nodefer; 6994 6995 spin_lock_bh(&sd->defer_lock); 6996 /* Send an IPI every time queue reaches half capacity. */ 6997 kick = sd->defer_count == (defer_max >> 1); 6998 /* Paired with the READ_ONCE() few lines above */ 6999 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 7000 7001 skb->next = sd->defer_list; 7002 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 7003 WRITE_ONCE(sd->defer_list, skb); 7004 spin_unlock_bh(&sd->defer_lock); 7005 7006 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 7007 * if we are unlucky enough (this seems very unlikely). 7008 */ 7009 if (unlikely(kick)) 7010 kick_defer_list_purge(sd, cpu); 7011 } 7012 7013 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 7014 size_t offset, size_t len) 7015 { 7016 const char *kaddr; 7017 __wsum csum; 7018 7019 kaddr = kmap_local_page(page); 7020 csum = csum_partial(kaddr + offset, len, 0); 7021 kunmap_local(kaddr); 7022 skb->csum = csum_block_add(skb->csum, csum, skb->len); 7023 } 7024 7025 /** 7026 * skb_splice_from_iter - Splice (or copy) pages to skbuff 7027 * @skb: The buffer to add pages to 7028 * @iter: Iterator representing the pages to be added 7029 * @maxsize: Maximum amount of pages to be added 7030 * @gfp: Allocation flags 7031 * 7032 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 7033 * extracts pages from an iterator and adds them to the socket buffer if 7034 * possible, copying them to fragments if not possible (such as if they're slab 7035 * pages). 7036 * 7037 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 7038 * insufficient space in the buffer to transfer anything. 7039 */ 7040 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 7041 ssize_t maxsize, gfp_t gfp) 7042 { 7043 size_t frag_limit = READ_ONCE(sysctl_max_skb_frags); 7044 struct page *pages[8], **ppages = pages; 7045 ssize_t spliced = 0, ret = 0; 7046 unsigned int i; 7047 7048 while (iter->count > 0) { 7049 ssize_t space, nr, len; 7050 size_t off; 7051 7052 ret = -EMSGSIZE; 7053 space = frag_limit - skb_shinfo(skb)->nr_frags; 7054 if (space < 0) 7055 break; 7056 7057 /* We might be able to coalesce without increasing nr_frags */ 7058 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 7059 7060 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 7061 if (len <= 0) { 7062 ret = len ?: -EIO; 7063 break; 7064 } 7065 7066 i = 0; 7067 do { 7068 struct page *page = pages[i++]; 7069 size_t part = min_t(size_t, PAGE_SIZE - off, len); 7070 7071 ret = -EIO; 7072 if (WARN_ON_ONCE(!sendpage_ok(page))) 7073 goto out; 7074 7075 ret = skb_append_pagefrags(skb, page, off, part, 7076 frag_limit); 7077 if (ret < 0) { 7078 iov_iter_revert(iter, len); 7079 goto out; 7080 } 7081 7082 if (skb->ip_summed == CHECKSUM_NONE) 7083 skb_splice_csum_page(skb, page, off, part); 7084 7085 off = 0; 7086 spliced += part; 7087 maxsize -= part; 7088 len -= part; 7089 } while (len > 0); 7090 7091 if (maxsize <= 0) 7092 break; 7093 } 7094 7095 out: 7096 skb_len_add(skb, spliced); 7097 return spliced ?: ret; 7098 } 7099 EXPORT_SYMBOL(skb_splice_from_iter); 7100 7101 static __always_inline 7102 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7103 size_t len, void *to, void *priv2) 7104 { 7105 __wsum *csum = priv2; 7106 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7107 7108 *csum = csum_block_add(*csum, next, progress); 7109 return 0; 7110 } 7111 7112 static __always_inline 7113 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7114 size_t len, void *to, void *priv2) 7115 { 7116 __wsum next, *csum = priv2; 7117 7118 next = csum_and_copy_from_user(iter_from, to + progress, len); 7119 *csum = csum_block_add(*csum, next, progress); 7120 return next ? 0 : len; 7121 } 7122 7123 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7124 __wsum *csum, struct iov_iter *i) 7125 { 7126 size_t copied; 7127 7128 if (WARN_ON_ONCE(!i->data_source)) 7129 return false; 7130 copied = iterate_and_advance2(i, bytes, addr, csum, 7131 copy_from_user_iter_csum, 7132 memcpy_from_iter_csum); 7133 if (likely(copied == bytes)) 7134 return true; 7135 iov_iter_revert(i, copied); 7136 return false; 7137 } 7138 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7139