1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/skbuff_ref.h> 55 #include <linux/splice.h> 56 #include <linux/cache.h> 57 #include <linux/rtnetlink.h> 58 #include <linux/init.h> 59 #include <linux/scatterlist.h> 60 #include <linux/errqueue.h> 61 #include <linux/prefetch.h> 62 #include <linux/bitfield.h> 63 #include <linux/if_vlan.h> 64 #include <linux/mpls.h> 65 #include <linux/kcov.h> 66 #include <linux/iov_iter.h> 67 68 #include <net/protocol.h> 69 #include <net/dst.h> 70 #include <net/sock.h> 71 #include <net/checksum.h> 72 #include <net/gro.h> 73 #include <net/gso.h> 74 #include <net/hotdata.h> 75 #include <net/ip6_checksum.h> 76 #include <net/xfrm.h> 77 #include <net/mpls.h> 78 #include <net/mptcp.h> 79 #include <net/mctp.h> 80 #include <net/page_pool/helpers.h> 81 #include <net/dropreason.h> 82 83 #include <linux/uaccess.h> 84 #include <trace/events/skb.h> 85 #include <linux/highmem.h> 86 #include <linux/capability.h> 87 #include <linux/user_namespace.h> 88 #include <linux/indirect_call_wrapper.h> 89 #include <linux/textsearch.h> 90 91 #include "dev.h" 92 #include "netmem_priv.h" 93 #include "sock_destructor.h" 94 95 #ifdef CONFIG_SKB_EXTENSIONS 96 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 97 #endif 98 99 #define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN) 100 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \ 101 GRO_MAX_HEAD_PAD)) 102 103 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. 104 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique 105 * size, and we can differentiate heads from skb_small_head_cache 106 * vs system slabs by looking at their size (skb_end_offset()). 107 */ 108 #define SKB_SMALL_HEAD_CACHE_SIZE \ 109 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ 110 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ 111 SKB_SMALL_HEAD_SIZE) 112 113 #define SKB_SMALL_HEAD_HEADROOM \ 114 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) 115 116 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use 117 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the 118 * netmem is a page. 119 */ 120 static_assert(offsetof(struct bio_vec, bv_page) == 121 offsetof(skb_frag_t, netmem)); 122 static_assert(sizeof_field(struct bio_vec, bv_page) == 123 sizeof_field(skb_frag_t, netmem)); 124 125 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); 126 static_assert(sizeof_field(struct bio_vec, bv_len) == 127 sizeof_field(skb_frag_t, len)); 128 129 static_assert(offsetof(struct bio_vec, bv_offset) == 130 offsetof(skb_frag_t, offset)); 131 static_assert(sizeof_field(struct bio_vec, bv_offset) == 132 sizeof_field(skb_frag_t, offset)); 133 134 #undef FN 135 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, 136 static const char * const drop_reasons[] = { 137 [SKB_CONSUMED] = "CONSUMED", 138 DEFINE_DROP_REASON(FN, FN) 139 }; 140 141 static const struct drop_reason_list drop_reasons_core = { 142 .reasons = drop_reasons, 143 .n_reasons = ARRAY_SIZE(drop_reasons), 144 }; 145 146 const struct drop_reason_list __rcu * 147 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { 148 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), 149 }; 150 EXPORT_SYMBOL(drop_reasons_by_subsys); 151 152 /** 153 * drop_reasons_register_subsys - register another drop reason subsystem 154 * @subsys: the subsystem to register, must not be the core 155 * @list: the list of drop reasons within the subsystem, must point to 156 * a statically initialized list 157 */ 158 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, 159 const struct drop_reason_list *list) 160 { 161 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 162 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 163 "invalid subsystem %d\n", subsys)) 164 return; 165 166 /* must point to statically allocated memory, so INIT is OK */ 167 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); 168 } 169 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); 170 171 /** 172 * drop_reasons_unregister_subsys - unregister a drop reason subsystem 173 * @subsys: the subsystem to remove, must not be the core 174 * 175 * Note: This will synchronize_rcu() to ensure no users when it returns. 176 */ 177 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) 178 { 179 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || 180 subsys >= ARRAY_SIZE(drop_reasons_by_subsys), 181 "invalid subsystem %d\n", subsys)) 182 return; 183 184 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); 185 186 synchronize_rcu(); 187 } 188 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); 189 190 /** 191 * skb_panic - private function for out-of-line support 192 * @skb: buffer 193 * @sz: size 194 * @addr: address 195 * @msg: skb_over_panic or skb_under_panic 196 * 197 * Out-of-line support for skb_put() and skb_push(). 198 * Called via the wrapper skb_over_panic() or skb_under_panic(). 199 * Keep out of line to prevent kernel bloat. 200 * __builtin_return_address is not used because it is not always reliable. 201 */ 202 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 203 const char msg[]) 204 { 205 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 206 msg, addr, skb->len, sz, skb->head, skb->data, 207 (unsigned long)skb->tail, (unsigned long)skb->end, 208 skb->dev ? skb->dev->name : "<NULL>"); 209 BUG(); 210 } 211 212 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 213 { 214 skb_panic(skb, sz, addr, __func__); 215 } 216 217 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 218 { 219 skb_panic(skb, sz, addr, __func__); 220 } 221 222 #define NAPI_SKB_CACHE_SIZE 64 223 #define NAPI_SKB_CACHE_BULK 16 224 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 225 226 struct napi_alloc_cache { 227 local_lock_t bh_lock; 228 struct page_frag_cache page; 229 unsigned int skb_count; 230 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 231 }; 232 233 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 234 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = { 235 .bh_lock = INIT_LOCAL_LOCK(bh_lock), 236 }; 237 238 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 239 { 240 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 241 void *data; 242 243 fragsz = SKB_DATA_ALIGN(fragsz); 244 245 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 246 data = __page_frag_alloc_align(&nc->page, fragsz, 247 GFP_ATOMIC | __GFP_NOWARN, align_mask); 248 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 249 return data; 250 251 } 252 EXPORT_SYMBOL(__napi_alloc_frag_align); 253 254 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 255 { 256 void *data; 257 258 if (in_hardirq() || irqs_disabled()) { 259 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); 260 261 fragsz = SKB_DATA_ALIGN(fragsz); 262 data = __page_frag_alloc_align(nc, fragsz, 263 GFP_ATOMIC | __GFP_NOWARN, 264 align_mask); 265 } else { 266 local_bh_disable(); 267 data = __napi_alloc_frag_align(fragsz, align_mask); 268 local_bh_enable(); 269 } 270 return data; 271 } 272 EXPORT_SYMBOL(__netdev_alloc_frag_align); 273 274 static struct sk_buff *napi_skb_cache_get(void) 275 { 276 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 277 struct sk_buff *skb; 278 279 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 280 if (unlikely(!nc->skb_count)) { 281 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 282 GFP_ATOMIC | __GFP_NOWARN, 283 NAPI_SKB_CACHE_BULK, 284 nc->skb_cache); 285 if (unlikely(!nc->skb_count)) { 286 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 287 return NULL; 288 } 289 } 290 291 skb = nc->skb_cache[--nc->skb_count]; 292 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 293 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); 294 295 return skb; 296 } 297 298 /** 299 * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache 300 * @skbs: pointer to an at least @n-sized array to fill with skb pointers 301 * @n: number of entries to provide 302 * 303 * Tries to obtain @n &sk_buff entries from the NAPI percpu cache and writes 304 * the pointers into the provided array @skbs. If there are less entries 305 * available, tries to replenish the cache and bulk-allocates the diff from 306 * the MM layer if needed. 307 * The heads are being zeroed with either memset() or %__GFP_ZERO, so they are 308 * ready for {,__}build_skb_around() and don't have any data buffers attached. 309 * Must be called *only* from the BH context. 310 * 311 * Return: number of successfully allocated skbs (@n if no actual allocation 312 * needed or kmem_cache_alloc_bulk() didn't fail). 313 */ 314 u32 napi_skb_cache_get_bulk(void **skbs, u32 n) 315 { 316 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 317 u32 bulk, total = n; 318 319 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 320 321 if (nc->skb_count >= n) 322 goto get; 323 324 /* No enough cached skbs. Try refilling the cache first */ 325 bulk = min(NAPI_SKB_CACHE_SIZE - nc->skb_count, NAPI_SKB_CACHE_BULK); 326 nc->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 327 GFP_ATOMIC | __GFP_NOWARN, bulk, 328 &nc->skb_cache[nc->skb_count]); 329 if (likely(nc->skb_count >= n)) 330 goto get; 331 332 /* Still not enough. Bulk-allocate the missing part directly, zeroed */ 333 n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, 334 GFP_ATOMIC | __GFP_ZERO | __GFP_NOWARN, 335 n - nc->skb_count, &skbs[nc->skb_count]); 336 if (likely(nc->skb_count >= n)) 337 goto get; 338 339 /* kmem_cache didn't allocate the number we need, limit the output */ 340 total -= n - nc->skb_count; 341 n = nc->skb_count; 342 343 get: 344 for (u32 base = nc->skb_count - n, i = 0; i < n; i++) { 345 u32 cache_size = kmem_cache_size(net_hotdata.skbuff_cache); 346 347 skbs[i] = nc->skb_cache[base + i]; 348 349 kasan_mempool_unpoison_object(skbs[i], cache_size); 350 memset(skbs[i], 0, offsetof(struct sk_buff, tail)); 351 } 352 353 nc->skb_count -= n; 354 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 355 356 return total; 357 } 358 EXPORT_SYMBOL_GPL(napi_skb_cache_get_bulk); 359 360 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, 361 unsigned int size) 362 { 363 struct skb_shared_info *shinfo; 364 365 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 366 367 /* Assumes caller memset cleared SKB */ 368 skb->truesize = SKB_TRUESIZE(size); 369 refcount_set(&skb->users, 1); 370 skb->head = data; 371 skb->data = data; 372 skb_reset_tail_pointer(skb); 373 skb_set_end_offset(skb, size); 374 skb->mac_header = (typeof(skb->mac_header))~0U; 375 skb->transport_header = (typeof(skb->transport_header))~0U; 376 skb->alloc_cpu = raw_smp_processor_id(); 377 /* make sure we initialize shinfo sequentially */ 378 shinfo = skb_shinfo(skb); 379 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 380 atomic_set(&shinfo->dataref, 1); 381 382 skb_set_kcov_handle(skb, kcov_common_handle()); 383 } 384 385 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, 386 unsigned int *size) 387 { 388 void *resized; 389 390 /* Must find the allocation size (and grow it to match). */ 391 *size = ksize(data); 392 /* krealloc() will immediately return "data" when 393 * "ksize(data)" is requested: it is the existing upper 394 * bounds. As a result, GFP_ATOMIC will be ignored. Note 395 * that this "new" pointer needs to be passed back to the 396 * caller for use so the __alloc_size hinting will be 397 * tracked correctly. 398 */ 399 resized = krealloc(data, *size, GFP_ATOMIC); 400 WARN_ON_ONCE(resized != data); 401 return resized; 402 } 403 404 /* build_skb() variant which can operate on slab buffers. 405 * Note that this should be used sparingly as slab buffers 406 * cannot be combined efficiently by GRO! 407 */ 408 struct sk_buff *slab_build_skb(void *data) 409 { 410 struct sk_buff *skb; 411 unsigned int size; 412 413 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, 414 GFP_ATOMIC | __GFP_NOWARN); 415 if (unlikely(!skb)) 416 return NULL; 417 418 memset(skb, 0, offsetof(struct sk_buff, tail)); 419 data = __slab_build_skb(skb, data, &size); 420 __finalize_skb_around(skb, data, size); 421 422 return skb; 423 } 424 EXPORT_SYMBOL(slab_build_skb); 425 426 /* Caller must provide SKB that is memset cleared */ 427 static void __build_skb_around(struct sk_buff *skb, void *data, 428 unsigned int frag_size) 429 { 430 unsigned int size = frag_size; 431 432 /* frag_size == 0 is considered deprecated now. Callers 433 * using slab buffer should use slab_build_skb() instead. 434 */ 435 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) 436 data = __slab_build_skb(skb, data, &size); 437 438 __finalize_skb_around(skb, data, size); 439 } 440 441 /** 442 * __build_skb - build a network buffer 443 * @data: data buffer provided by caller 444 * @frag_size: size of data (must not be 0) 445 * 446 * Allocate a new &sk_buff. Caller provides space holding head and 447 * skb_shared_info. @data must have been allocated from the page 448 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() 449 * allocation is deprecated, and callers should use slab_build_skb() 450 * instead.) 451 * The return is the new skb buffer. 452 * On a failure the return is %NULL, and @data is not freed. 453 * Notes : 454 * Before IO, driver allocates only data buffer where NIC put incoming frame 455 * Driver should add room at head (NET_SKB_PAD) and 456 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 457 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 458 * before giving packet to stack. 459 * RX rings only contains data buffers, not full skbs. 460 */ 461 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 462 { 463 struct sk_buff *skb; 464 465 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, 466 GFP_ATOMIC | __GFP_NOWARN); 467 if (unlikely(!skb)) 468 return NULL; 469 470 memset(skb, 0, offsetof(struct sk_buff, tail)); 471 __build_skb_around(skb, data, frag_size); 472 473 return skb; 474 } 475 476 /* build_skb() is wrapper over __build_skb(), that specifically 477 * takes care of skb->head and skb->pfmemalloc 478 */ 479 struct sk_buff *build_skb(void *data, unsigned int frag_size) 480 { 481 struct sk_buff *skb = __build_skb(data, frag_size); 482 483 if (likely(skb && frag_size)) { 484 skb->head_frag = 1; 485 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 486 } 487 return skb; 488 } 489 EXPORT_SYMBOL(build_skb); 490 491 /** 492 * build_skb_around - build a network buffer around provided skb 493 * @skb: sk_buff provide by caller, must be memset cleared 494 * @data: data buffer provided by caller 495 * @frag_size: size of data 496 */ 497 struct sk_buff *build_skb_around(struct sk_buff *skb, 498 void *data, unsigned int frag_size) 499 { 500 if (unlikely(!skb)) 501 return NULL; 502 503 __build_skb_around(skb, data, frag_size); 504 505 if (frag_size) { 506 skb->head_frag = 1; 507 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 508 } 509 return skb; 510 } 511 EXPORT_SYMBOL(build_skb_around); 512 513 /** 514 * __napi_build_skb - build a network buffer 515 * @data: data buffer provided by caller 516 * @frag_size: size of data 517 * 518 * Version of __build_skb() that uses NAPI percpu caches to obtain 519 * skbuff_head instead of inplace allocation. 520 * 521 * Returns a new &sk_buff on success, %NULL on allocation failure. 522 */ 523 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 524 { 525 struct sk_buff *skb; 526 527 skb = napi_skb_cache_get(); 528 if (unlikely(!skb)) 529 return NULL; 530 531 memset(skb, 0, offsetof(struct sk_buff, tail)); 532 __build_skb_around(skb, data, frag_size); 533 534 return skb; 535 } 536 537 /** 538 * napi_build_skb - build a network buffer 539 * @data: data buffer provided by caller 540 * @frag_size: size of data 541 * 542 * Version of __napi_build_skb() that takes care of skb->head_frag 543 * and skb->pfmemalloc when the data is a page or page fragment. 544 * 545 * Returns a new &sk_buff on success, %NULL on allocation failure. 546 */ 547 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 548 { 549 struct sk_buff *skb = __napi_build_skb(data, frag_size); 550 551 if (likely(skb) && frag_size) { 552 skb->head_frag = 1; 553 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 554 } 555 556 return skb; 557 } 558 EXPORT_SYMBOL(napi_build_skb); 559 560 /* 561 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 562 * the caller if emergency pfmemalloc reserves are being used. If it is and 563 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 564 * may be used. Otherwise, the packet data may be discarded until enough 565 * memory is free 566 */ 567 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, 568 bool *pfmemalloc) 569 { 570 bool ret_pfmemalloc = false; 571 size_t obj_size; 572 void *obj; 573 574 obj_size = SKB_HEAD_ALIGN(*size); 575 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && 576 !(flags & KMALLOC_NOT_NORMAL_BITS)) { 577 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, 578 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 579 node); 580 *size = SKB_SMALL_HEAD_CACHE_SIZE; 581 if (obj || !(gfp_pfmemalloc_allowed(flags))) 582 goto out; 583 /* Try again but now we are using pfmemalloc reserves */ 584 ret_pfmemalloc = true; 585 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); 586 goto out; 587 } 588 589 obj_size = kmalloc_size_roundup(obj_size); 590 /* The following cast might truncate high-order bits of obj_size, this 591 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. 592 */ 593 *size = (unsigned int)obj_size; 594 595 /* 596 * Try a regular allocation, when that fails and we're not entitled 597 * to the reserves, fail. 598 */ 599 obj = kmalloc_node_track_caller(obj_size, 600 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 601 node); 602 if (obj || !(gfp_pfmemalloc_allowed(flags))) 603 goto out; 604 605 /* Try again but now we are using pfmemalloc reserves */ 606 ret_pfmemalloc = true; 607 obj = kmalloc_node_track_caller(obj_size, flags, node); 608 609 out: 610 if (pfmemalloc) 611 *pfmemalloc = ret_pfmemalloc; 612 613 return obj; 614 } 615 616 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 617 * 'private' fields and also do memory statistics to find all the 618 * [BEEP] leaks. 619 * 620 */ 621 622 /** 623 * __alloc_skb - allocate a network buffer 624 * @size: size to allocate 625 * @gfp_mask: allocation mask 626 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 627 * instead of head cache and allocate a cloned (child) skb. 628 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 629 * allocations in case the data is required for writeback 630 * @node: numa node to allocate memory on 631 * 632 * Allocate a new &sk_buff. The returned buffer has no headroom and a 633 * tail room of at least size bytes. The object has a reference count 634 * of one. The return is the buffer. On a failure the return is %NULL. 635 * 636 * Buffers may only be allocated from interrupts using a @gfp_mask of 637 * %GFP_ATOMIC. 638 */ 639 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 640 int flags, int node) 641 { 642 struct kmem_cache *cache; 643 struct sk_buff *skb; 644 bool pfmemalloc; 645 u8 *data; 646 647 cache = (flags & SKB_ALLOC_FCLONE) 648 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; 649 650 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 651 gfp_mask |= __GFP_MEMALLOC; 652 653 /* Get the HEAD */ 654 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 655 likely(node == NUMA_NO_NODE || node == numa_mem_id())) 656 skb = napi_skb_cache_get(); 657 else 658 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 659 if (unlikely(!skb)) 660 return NULL; 661 prefetchw(skb); 662 663 /* We do our best to align skb_shared_info on a separate cache 664 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 665 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 666 * Both skb->head and skb_shared_info are cache line aligned. 667 */ 668 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); 669 if (unlikely(!data)) 670 goto nodata; 671 /* kmalloc_size_roundup() might give us more room than requested. 672 * Put skb_shared_info exactly at the end of allocated zone, 673 * to allow max possible filling before reallocation. 674 */ 675 prefetchw(data + SKB_WITH_OVERHEAD(size)); 676 677 /* 678 * Only clear those fields we need to clear, not those that we will 679 * actually initialise below. Hence, don't put any more fields after 680 * the tail pointer in struct sk_buff! 681 */ 682 memset(skb, 0, offsetof(struct sk_buff, tail)); 683 __build_skb_around(skb, data, size); 684 skb->pfmemalloc = pfmemalloc; 685 686 if (flags & SKB_ALLOC_FCLONE) { 687 struct sk_buff_fclones *fclones; 688 689 fclones = container_of(skb, struct sk_buff_fclones, skb1); 690 691 skb->fclone = SKB_FCLONE_ORIG; 692 refcount_set(&fclones->fclone_ref, 1); 693 } 694 695 return skb; 696 697 nodata: 698 kmem_cache_free(cache, skb); 699 return NULL; 700 } 701 EXPORT_SYMBOL(__alloc_skb); 702 703 /** 704 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 705 * @dev: network device to receive on 706 * @len: length to allocate 707 * @gfp_mask: get_free_pages mask, passed to alloc_skb 708 * 709 * Allocate a new &sk_buff and assign it a usage count of one. The 710 * buffer has NET_SKB_PAD headroom built in. Users should allocate 711 * the headroom they think they need without accounting for the 712 * built in space. The built in space is used for optimisations. 713 * 714 * %NULL is returned if there is no free memory. 715 */ 716 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 717 gfp_t gfp_mask) 718 { 719 struct page_frag_cache *nc; 720 struct sk_buff *skb; 721 bool pfmemalloc; 722 void *data; 723 724 len += NET_SKB_PAD; 725 726 /* If requested length is either too small or too big, 727 * we use kmalloc() for skb->head allocation. 728 */ 729 if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || 730 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 731 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 732 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 733 if (!skb) 734 goto skb_fail; 735 goto skb_success; 736 } 737 738 len = SKB_HEAD_ALIGN(len); 739 740 if (sk_memalloc_socks()) 741 gfp_mask |= __GFP_MEMALLOC; 742 743 if (in_hardirq() || irqs_disabled()) { 744 nc = this_cpu_ptr(&netdev_alloc_cache); 745 data = page_frag_alloc(nc, len, gfp_mask); 746 pfmemalloc = page_frag_cache_is_pfmemalloc(nc); 747 } else { 748 local_bh_disable(); 749 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 750 751 nc = this_cpu_ptr(&napi_alloc_cache.page); 752 data = page_frag_alloc(nc, len, gfp_mask); 753 pfmemalloc = page_frag_cache_is_pfmemalloc(nc); 754 755 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 756 local_bh_enable(); 757 } 758 759 if (unlikely(!data)) 760 return NULL; 761 762 skb = __build_skb(data, len); 763 if (unlikely(!skb)) { 764 skb_free_frag(data); 765 return NULL; 766 } 767 768 if (pfmemalloc) 769 skb->pfmemalloc = 1; 770 skb->head_frag = 1; 771 772 skb_success: 773 skb_reserve(skb, NET_SKB_PAD); 774 skb->dev = dev; 775 776 skb_fail: 777 return skb; 778 } 779 EXPORT_SYMBOL(__netdev_alloc_skb); 780 781 /** 782 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 783 * @napi: napi instance this buffer was allocated for 784 * @len: length to allocate 785 * 786 * Allocate a new sk_buff for use in NAPI receive. This buffer will 787 * attempt to allocate the head from a special reserved region used 788 * only for NAPI Rx allocation. By doing this we can save several 789 * CPU cycles by avoiding having to disable and re-enable IRQs. 790 * 791 * %NULL is returned if there is no free memory. 792 */ 793 struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) 794 { 795 gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; 796 struct napi_alloc_cache *nc; 797 struct sk_buff *skb; 798 bool pfmemalloc; 799 void *data; 800 801 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 802 len += NET_SKB_PAD + NET_IP_ALIGN; 803 804 /* If requested length is either too small or too big, 805 * we use kmalloc() for skb->head allocation. 806 */ 807 if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || 808 len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 809 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 810 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 811 NUMA_NO_NODE); 812 if (!skb) 813 goto skb_fail; 814 goto skb_success; 815 } 816 817 len = SKB_HEAD_ALIGN(len); 818 819 if (sk_memalloc_socks()) 820 gfp_mask |= __GFP_MEMALLOC; 821 822 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 823 nc = this_cpu_ptr(&napi_alloc_cache); 824 825 data = page_frag_alloc(&nc->page, len, gfp_mask); 826 pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page); 827 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 828 829 if (unlikely(!data)) 830 return NULL; 831 832 skb = __napi_build_skb(data, len); 833 if (unlikely(!skb)) { 834 skb_free_frag(data); 835 return NULL; 836 } 837 838 if (pfmemalloc) 839 skb->pfmemalloc = 1; 840 skb->head_frag = 1; 841 842 skb_success: 843 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 844 skb->dev = napi->dev; 845 846 skb_fail: 847 return skb; 848 } 849 EXPORT_SYMBOL(napi_alloc_skb); 850 851 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, 852 int off, int size, unsigned int truesize) 853 { 854 DEBUG_NET_WARN_ON_ONCE(size > truesize); 855 856 skb_fill_netmem_desc(skb, i, netmem, off, size); 857 skb->len += size; 858 skb->data_len += size; 859 skb->truesize += truesize; 860 } 861 EXPORT_SYMBOL(skb_add_rx_frag_netmem); 862 863 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 864 unsigned int truesize) 865 { 866 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 867 868 DEBUG_NET_WARN_ON_ONCE(size > truesize); 869 870 skb_frag_size_add(frag, size); 871 skb->len += size; 872 skb->data_len += size; 873 skb->truesize += truesize; 874 } 875 EXPORT_SYMBOL(skb_coalesce_rx_frag); 876 877 static void skb_drop_list(struct sk_buff **listp) 878 { 879 kfree_skb_list(*listp); 880 *listp = NULL; 881 } 882 883 static inline void skb_drop_fraglist(struct sk_buff *skb) 884 { 885 skb_drop_list(&skb_shinfo(skb)->frag_list); 886 } 887 888 static void skb_clone_fraglist(struct sk_buff *skb) 889 { 890 struct sk_buff *list; 891 892 skb_walk_frags(skb, list) 893 skb_get(list); 894 } 895 896 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 897 unsigned int headroom) 898 { 899 #if IS_ENABLED(CONFIG_PAGE_POOL) 900 u32 size, truesize, len, max_head_size, off; 901 struct sk_buff *skb = *pskb, *nskb; 902 int err, i, head_off; 903 void *data; 904 905 /* XDP does not support fraglist so we need to linearize 906 * the skb. 907 */ 908 if (skb_has_frag_list(skb)) 909 return -EOPNOTSUPP; 910 911 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); 912 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) 913 return -ENOMEM; 914 915 size = min_t(u32, skb->len, max_head_size); 916 truesize = SKB_HEAD_ALIGN(size) + headroom; 917 data = page_pool_dev_alloc_va(pool, &truesize); 918 if (!data) 919 return -ENOMEM; 920 921 nskb = napi_build_skb(data, truesize); 922 if (!nskb) { 923 page_pool_free_va(pool, data, true); 924 return -ENOMEM; 925 } 926 927 skb_reserve(nskb, headroom); 928 skb_copy_header(nskb, skb); 929 skb_mark_for_recycle(nskb); 930 931 err = skb_copy_bits(skb, 0, nskb->data, size); 932 if (err) { 933 consume_skb(nskb); 934 return err; 935 } 936 skb_put(nskb, size); 937 938 head_off = skb_headroom(nskb) - skb_headroom(skb); 939 skb_headers_offset_update(nskb, head_off); 940 941 off = size; 942 len = skb->len - off; 943 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { 944 struct page *page; 945 u32 page_off; 946 947 size = min_t(u32, len, PAGE_SIZE); 948 truesize = size; 949 950 page = page_pool_dev_alloc(pool, &page_off, &truesize); 951 if (!page) { 952 consume_skb(nskb); 953 return -ENOMEM; 954 } 955 956 skb_add_rx_frag(nskb, i, page, page_off, size, truesize); 957 err = skb_copy_bits(skb, off, page_address(page) + page_off, 958 size); 959 if (err) { 960 consume_skb(nskb); 961 return err; 962 } 963 964 len -= size; 965 off += size; 966 } 967 968 consume_skb(skb); 969 *pskb = nskb; 970 971 return 0; 972 #else 973 return -EOPNOTSUPP; 974 #endif 975 } 976 EXPORT_SYMBOL(skb_pp_cow_data); 977 978 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, 979 const struct bpf_prog *prog) 980 { 981 if (!prog->aux->xdp_has_frags) 982 return -EINVAL; 983 984 return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); 985 } 986 EXPORT_SYMBOL(skb_cow_data_for_xdp); 987 988 #if IS_ENABLED(CONFIG_PAGE_POOL) 989 bool napi_pp_put_page(netmem_ref netmem) 990 { 991 netmem = netmem_compound_head(netmem); 992 993 if (unlikely(!netmem_is_pp(netmem))) 994 return false; 995 996 page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false); 997 998 return true; 999 } 1000 EXPORT_SYMBOL(napi_pp_put_page); 1001 #endif 1002 1003 static bool skb_pp_recycle(struct sk_buff *skb, void *data) 1004 { 1005 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 1006 return false; 1007 return napi_pp_put_page(page_to_netmem(virt_to_page(data))); 1008 } 1009 1010 /** 1011 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb 1012 * @skb: page pool aware skb 1013 * 1014 * Increase the fragment reference count (pp_ref_count) of a skb. This is 1015 * intended to gain fragment references only for page pool aware skbs, 1016 * i.e. when skb->pp_recycle is true, and not for fragments in a 1017 * non-pp-recycling skb. It has a fallback to increase references on normal 1018 * pages, as page pool aware skbs may also have normal page fragments. 1019 */ 1020 static int skb_pp_frag_ref(struct sk_buff *skb) 1021 { 1022 struct skb_shared_info *shinfo; 1023 netmem_ref head_netmem; 1024 int i; 1025 1026 if (!skb->pp_recycle) 1027 return -EINVAL; 1028 1029 shinfo = skb_shinfo(skb); 1030 1031 for (i = 0; i < shinfo->nr_frags; i++) { 1032 head_netmem = netmem_compound_head(shinfo->frags[i].netmem); 1033 if (likely(netmem_is_pp(head_netmem))) 1034 page_pool_ref_netmem(head_netmem); 1035 else 1036 page_ref_inc(netmem_to_page(head_netmem)); 1037 } 1038 return 0; 1039 } 1040 1041 static void skb_kfree_head(void *head, unsigned int end_offset) 1042 { 1043 if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1044 kmem_cache_free(net_hotdata.skb_small_head_cache, head); 1045 else 1046 kfree(head); 1047 } 1048 1049 static void skb_free_head(struct sk_buff *skb) 1050 { 1051 unsigned char *head = skb->head; 1052 1053 if (skb->head_frag) { 1054 if (skb_pp_recycle(skb, head)) 1055 return; 1056 skb_free_frag(head); 1057 } else { 1058 skb_kfree_head(head, skb_end_offset(skb)); 1059 } 1060 } 1061 1062 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) 1063 { 1064 struct skb_shared_info *shinfo = skb_shinfo(skb); 1065 int i; 1066 1067 if (!skb_data_unref(skb, shinfo)) 1068 goto exit; 1069 1070 if (skb_zcopy(skb)) { 1071 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; 1072 1073 skb_zcopy_clear(skb, true); 1074 if (skip_unref) 1075 goto free_head; 1076 } 1077 1078 for (i = 0; i < shinfo->nr_frags; i++) 1079 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); 1080 1081 free_head: 1082 if (shinfo->frag_list) 1083 kfree_skb_list_reason(shinfo->frag_list, reason); 1084 1085 skb_free_head(skb); 1086 exit: 1087 /* When we clone an SKB we copy the reycling bit. The pp_recycle 1088 * bit is only set on the head though, so in order to avoid races 1089 * while trying to recycle fragments on __skb_frag_unref() we need 1090 * to make one SKB responsible for triggering the recycle path. 1091 * So disable the recycling bit if an SKB is cloned and we have 1092 * additional references to the fragmented part of the SKB. 1093 * Eventually the last SKB will have the recycling bit set and it's 1094 * dataref set to 0, which will trigger the recycling 1095 */ 1096 skb->pp_recycle = 0; 1097 } 1098 1099 /* 1100 * Free an skbuff by memory without cleaning the state. 1101 */ 1102 static void kfree_skbmem(struct sk_buff *skb) 1103 { 1104 struct sk_buff_fclones *fclones; 1105 1106 switch (skb->fclone) { 1107 case SKB_FCLONE_UNAVAILABLE: 1108 kmem_cache_free(net_hotdata.skbuff_cache, skb); 1109 return; 1110 1111 case SKB_FCLONE_ORIG: 1112 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1113 1114 /* We usually free the clone (TX completion) before original skb 1115 * This test would have no chance to be true for the clone, 1116 * while here, branch prediction will be good. 1117 */ 1118 if (refcount_read(&fclones->fclone_ref) == 1) 1119 goto fastpath; 1120 break; 1121 1122 default: /* SKB_FCLONE_CLONE */ 1123 fclones = container_of(skb, struct sk_buff_fclones, skb2); 1124 break; 1125 } 1126 if (!refcount_dec_and_test(&fclones->fclone_ref)) 1127 return; 1128 fastpath: 1129 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); 1130 } 1131 1132 void skb_release_head_state(struct sk_buff *skb) 1133 { 1134 skb_dst_drop(skb); 1135 if (skb->destructor) { 1136 DEBUG_NET_WARN_ON_ONCE(in_hardirq()); 1137 skb->destructor(skb); 1138 } 1139 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1140 nf_conntrack_put(skb_nfct(skb)); 1141 #endif 1142 skb_ext_put(skb); 1143 } 1144 1145 /* Free everything but the sk_buff shell. */ 1146 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) 1147 { 1148 skb_release_head_state(skb); 1149 if (likely(skb->head)) 1150 skb_release_data(skb, reason); 1151 } 1152 1153 /** 1154 * __kfree_skb - private function 1155 * @skb: buffer 1156 * 1157 * Free an sk_buff. Release anything attached to the buffer. 1158 * Clean the state. This is an internal helper function. Users should 1159 * always call kfree_skb 1160 */ 1161 1162 void __kfree_skb(struct sk_buff *skb) 1163 { 1164 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); 1165 kfree_skbmem(skb); 1166 } 1167 EXPORT_SYMBOL(__kfree_skb); 1168 1169 static __always_inline 1170 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, 1171 enum skb_drop_reason reason) 1172 { 1173 if (unlikely(!skb_unref(skb))) 1174 return false; 1175 1176 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || 1177 u32_get_bits(reason, 1178 SKB_DROP_REASON_SUBSYS_MASK) >= 1179 SKB_DROP_REASON_SUBSYS_NUM); 1180 1181 if (reason == SKB_CONSUMED) 1182 trace_consume_skb(skb, __builtin_return_address(0)); 1183 else 1184 trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); 1185 return true; 1186 } 1187 1188 /** 1189 * sk_skb_reason_drop - free an sk_buff with special reason 1190 * @sk: the socket to receive @skb, or NULL if not applicable 1191 * @skb: buffer to free 1192 * @reason: reason why this skb is dropped 1193 * 1194 * Drop a reference to the buffer and free it if the usage count has hit 1195 * zero. Meanwhile, pass the receiving socket and drop reason to 1196 * 'kfree_skb' tracepoint. 1197 */ 1198 void __fix_address 1199 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) 1200 { 1201 if (__sk_skb_reason_drop(sk, skb, reason)) 1202 __kfree_skb(skb); 1203 } 1204 EXPORT_SYMBOL(sk_skb_reason_drop); 1205 1206 #define KFREE_SKB_BULK_SIZE 16 1207 1208 struct skb_free_array { 1209 unsigned int skb_count; 1210 void *skb_array[KFREE_SKB_BULK_SIZE]; 1211 }; 1212 1213 static void kfree_skb_add_bulk(struct sk_buff *skb, 1214 struct skb_free_array *sa, 1215 enum skb_drop_reason reason) 1216 { 1217 /* if SKB is a clone, don't handle this case */ 1218 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { 1219 __kfree_skb(skb); 1220 return; 1221 } 1222 1223 skb_release_all(skb, reason); 1224 sa->skb_array[sa->skb_count++] = skb; 1225 1226 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { 1227 kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, 1228 sa->skb_array); 1229 sa->skb_count = 0; 1230 } 1231 } 1232 1233 void __fix_address 1234 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) 1235 { 1236 struct skb_free_array sa; 1237 1238 sa.skb_count = 0; 1239 1240 while (segs) { 1241 struct sk_buff *next = segs->next; 1242 1243 if (__sk_skb_reason_drop(NULL, segs, reason)) { 1244 skb_poison_list(segs); 1245 kfree_skb_add_bulk(segs, &sa, reason); 1246 } 1247 1248 segs = next; 1249 } 1250 1251 if (sa.skb_count) 1252 kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array); 1253 } 1254 EXPORT_SYMBOL(kfree_skb_list_reason); 1255 1256 /* Dump skb information and contents. 1257 * 1258 * Must only be called from net_ratelimit()-ed paths. 1259 * 1260 * Dumps whole packets if full_pkt, only headers otherwise. 1261 */ 1262 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 1263 { 1264 struct skb_shared_info *sh = skb_shinfo(skb); 1265 struct net_device *dev = skb->dev; 1266 struct sock *sk = skb->sk; 1267 struct sk_buff *list_skb; 1268 bool has_mac, has_trans; 1269 int headroom, tailroom; 1270 int i, len, seg_len; 1271 1272 if (full_pkt) 1273 len = skb->len; 1274 else 1275 len = min_t(int, skb->len, MAX_HEADER + 128); 1276 1277 headroom = skb_headroom(skb); 1278 tailroom = skb_tailroom(skb); 1279 1280 has_mac = skb_mac_header_was_set(skb); 1281 has_trans = skb_transport_header_was_set(skb); 1282 1283 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 1284 "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n" 1285 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 1286 "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 1287 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n" 1288 "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n" 1289 "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n", 1290 level, skb->len, headroom, skb_headlen(skb), tailroom, 1291 has_mac ? skb->mac_header : -1, 1292 has_mac ? skb_mac_header_len(skb) : -1, 1293 skb->mac_len, 1294 skb->network_header, 1295 has_trans ? skb_network_header_len(skb) : -1, 1296 has_trans ? skb->transport_header : -1, 1297 sh->tx_flags, sh->nr_frags, 1298 sh->gso_size, sh->gso_type, sh->gso_segs, 1299 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, 1300 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, 1301 skb->hash, skb->sw_hash, skb->l4_hash, 1302 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, 1303 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, 1304 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, 1305 skb->inner_network_header, skb->inner_transport_header); 1306 1307 if (dev) 1308 printk("%sdev name=%s feat=%pNF\n", 1309 level, dev->name, &dev->features); 1310 if (sk) 1311 printk("%ssk family=%hu type=%u proto=%u\n", 1312 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 1313 1314 if (full_pkt && headroom) 1315 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 1316 16, 1, skb->head, headroom, false); 1317 1318 seg_len = min_t(int, skb_headlen(skb), len); 1319 if (seg_len) 1320 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 1321 16, 1, skb->data, seg_len, false); 1322 len -= seg_len; 1323 1324 if (full_pkt && tailroom) 1325 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 1326 16, 1, skb_tail_pointer(skb), tailroom, false); 1327 1328 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 1329 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1330 u32 p_off, p_len, copied; 1331 struct page *p; 1332 u8 *vaddr; 1333 1334 if (skb_frag_is_net_iov(frag)) { 1335 printk("%sskb frag %d: not readable\n", level, i); 1336 len -= skb_frag_size(frag); 1337 if (!len) 1338 break; 1339 continue; 1340 } 1341 1342 skb_frag_foreach_page(frag, skb_frag_off(frag), 1343 skb_frag_size(frag), p, p_off, p_len, 1344 copied) { 1345 seg_len = min_t(int, p_len, len); 1346 vaddr = kmap_atomic(p); 1347 print_hex_dump(level, "skb frag: ", 1348 DUMP_PREFIX_OFFSET, 1349 16, 1, vaddr + p_off, seg_len, false); 1350 kunmap_atomic(vaddr); 1351 len -= seg_len; 1352 if (!len) 1353 break; 1354 } 1355 } 1356 1357 if (full_pkt && skb_has_frag_list(skb)) { 1358 printk("skb fraglist:\n"); 1359 skb_walk_frags(skb, list_skb) 1360 skb_dump(level, list_skb, true); 1361 } 1362 } 1363 EXPORT_SYMBOL(skb_dump); 1364 1365 /** 1366 * skb_tx_error - report an sk_buff xmit error 1367 * @skb: buffer that triggered an error 1368 * 1369 * Report xmit error if a device callback is tracking this skb. 1370 * skb must be freed afterwards. 1371 */ 1372 void skb_tx_error(struct sk_buff *skb) 1373 { 1374 if (skb) { 1375 skb_zcopy_downgrade_managed(skb); 1376 skb_zcopy_clear(skb, true); 1377 } 1378 } 1379 EXPORT_SYMBOL(skb_tx_error); 1380 1381 #ifdef CONFIG_TRACEPOINTS 1382 /** 1383 * consume_skb - free an skbuff 1384 * @skb: buffer to free 1385 * 1386 * Drop a ref to the buffer and free it if the usage count has hit zero 1387 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 1388 * is being dropped after a failure and notes that 1389 */ 1390 void consume_skb(struct sk_buff *skb) 1391 { 1392 if (!skb_unref(skb)) 1393 return; 1394 1395 trace_consume_skb(skb, __builtin_return_address(0)); 1396 __kfree_skb(skb); 1397 } 1398 EXPORT_SYMBOL(consume_skb); 1399 #endif 1400 1401 /** 1402 * __consume_stateless_skb - free an skbuff, assuming it is stateless 1403 * @skb: buffer to free 1404 * 1405 * Alike consume_skb(), but this variant assumes that this is the last 1406 * skb reference and all the head states have been already dropped 1407 */ 1408 void __consume_stateless_skb(struct sk_buff *skb) 1409 { 1410 trace_consume_skb(skb, __builtin_return_address(0)); 1411 skb_release_data(skb, SKB_CONSUMED); 1412 kfree_skbmem(skb); 1413 } 1414 1415 static void napi_skb_cache_put(struct sk_buff *skb) 1416 { 1417 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 1418 u32 i; 1419 1420 if (!kasan_mempool_poison_object(skb)) 1421 return; 1422 1423 local_lock_nested_bh(&napi_alloc_cache.bh_lock); 1424 nc->skb_cache[nc->skb_count++] = skb; 1425 1426 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 1427 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 1428 kasan_mempool_unpoison_object(nc->skb_cache[i], 1429 kmem_cache_size(net_hotdata.skbuff_cache)); 1430 1431 kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, 1432 nc->skb_cache + NAPI_SKB_CACHE_HALF); 1433 nc->skb_count = NAPI_SKB_CACHE_HALF; 1434 } 1435 local_unlock_nested_bh(&napi_alloc_cache.bh_lock); 1436 } 1437 1438 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) 1439 { 1440 skb_release_all(skb, reason); 1441 napi_skb_cache_put(skb); 1442 } 1443 1444 void napi_skb_free_stolen_head(struct sk_buff *skb) 1445 { 1446 if (unlikely(skb->slow_gro)) { 1447 nf_reset_ct(skb); 1448 skb_dst_drop(skb); 1449 skb_ext_put(skb); 1450 skb_orphan(skb); 1451 skb->slow_gro = 0; 1452 } 1453 napi_skb_cache_put(skb); 1454 } 1455 1456 void napi_consume_skb(struct sk_buff *skb, int budget) 1457 { 1458 /* Zero budget indicate non-NAPI context called us, like netpoll */ 1459 if (unlikely(!budget)) { 1460 dev_consume_skb_any(skb); 1461 return; 1462 } 1463 1464 DEBUG_NET_WARN_ON_ONCE(!in_softirq()); 1465 1466 if (!skb_unref(skb)) 1467 return; 1468 1469 /* if reaching here SKB is ready to free */ 1470 trace_consume_skb(skb, __builtin_return_address(0)); 1471 1472 /* if SKB is a clone, don't handle this case */ 1473 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 1474 __kfree_skb(skb); 1475 return; 1476 } 1477 1478 skb_release_all(skb, SKB_CONSUMED); 1479 napi_skb_cache_put(skb); 1480 } 1481 EXPORT_SYMBOL(napi_consume_skb); 1482 1483 /* Make sure a field is contained by headers group */ 1484 #define CHECK_SKB_FIELD(field) \ 1485 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ 1486 offsetof(struct sk_buff, headers.field)); \ 1487 1488 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1489 { 1490 new->tstamp = old->tstamp; 1491 /* We do not copy old->sk */ 1492 new->dev = old->dev; 1493 memcpy(new->cb, old->cb, sizeof(old->cb)); 1494 skb_dst_copy(new, old); 1495 __skb_ext_copy(new, old); 1496 __nf_copy(new, old, false); 1497 1498 /* Note : this field could be in the headers group. 1499 * It is not yet because we do not want to have a 16 bit hole 1500 */ 1501 new->queue_mapping = old->queue_mapping; 1502 1503 memcpy(&new->headers, &old->headers, sizeof(new->headers)); 1504 CHECK_SKB_FIELD(protocol); 1505 CHECK_SKB_FIELD(csum); 1506 CHECK_SKB_FIELD(hash); 1507 CHECK_SKB_FIELD(priority); 1508 CHECK_SKB_FIELD(skb_iif); 1509 CHECK_SKB_FIELD(vlan_proto); 1510 CHECK_SKB_FIELD(vlan_tci); 1511 CHECK_SKB_FIELD(transport_header); 1512 CHECK_SKB_FIELD(network_header); 1513 CHECK_SKB_FIELD(mac_header); 1514 CHECK_SKB_FIELD(inner_protocol); 1515 CHECK_SKB_FIELD(inner_transport_header); 1516 CHECK_SKB_FIELD(inner_network_header); 1517 CHECK_SKB_FIELD(inner_mac_header); 1518 CHECK_SKB_FIELD(mark); 1519 #ifdef CONFIG_NETWORK_SECMARK 1520 CHECK_SKB_FIELD(secmark); 1521 #endif 1522 #ifdef CONFIG_NET_RX_BUSY_POLL 1523 CHECK_SKB_FIELD(napi_id); 1524 #endif 1525 CHECK_SKB_FIELD(alloc_cpu); 1526 #ifdef CONFIG_XPS 1527 CHECK_SKB_FIELD(sender_cpu); 1528 #endif 1529 #ifdef CONFIG_NET_SCHED 1530 CHECK_SKB_FIELD(tc_index); 1531 #endif 1532 1533 } 1534 1535 /* 1536 * You should not add any new code to this function. Add it to 1537 * __copy_skb_header above instead. 1538 */ 1539 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 1540 { 1541 #define C(x) n->x = skb->x 1542 1543 n->next = n->prev = NULL; 1544 n->sk = NULL; 1545 __copy_skb_header(n, skb); 1546 1547 C(len); 1548 C(data_len); 1549 C(mac_len); 1550 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 1551 n->cloned = 1; 1552 n->nohdr = 0; 1553 n->peeked = 0; 1554 C(pfmemalloc); 1555 C(pp_recycle); 1556 n->destructor = NULL; 1557 C(tail); 1558 C(end); 1559 C(head); 1560 C(head_frag); 1561 C(data); 1562 C(truesize); 1563 refcount_set(&n->users, 1); 1564 1565 atomic_inc(&(skb_shinfo(skb)->dataref)); 1566 skb->cloned = 1; 1567 1568 return n; 1569 #undef C 1570 } 1571 1572 /** 1573 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1574 * @first: first sk_buff of the msg 1575 */ 1576 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1577 { 1578 struct sk_buff *n; 1579 1580 n = alloc_skb(0, GFP_ATOMIC); 1581 if (!n) 1582 return NULL; 1583 1584 n->len = first->len; 1585 n->data_len = first->len; 1586 n->truesize = first->truesize; 1587 1588 skb_shinfo(n)->frag_list = first; 1589 1590 __copy_skb_header(n, first); 1591 n->destructor = NULL; 1592 1593 return n; 1594 } 1595 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1596 1597 /** 1598 * skb_morph - morph one skb into another 1599 * @dst: the skb to receive the contents 1600 * @src: the skb to supply the contents 1601 * 1602 * This is identical to skb_clone except that the target skb is 1603 * supplied by the user. 1604 * 1605 * The target skb is returned upon exit. 1606 */ 1607 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1608 { 1609 skb_release_all(dst, SKB_CONSUMED); 1610 return __skb_clone(dst, src); 1611 } 1612 EXPORT_SYMBOL_GPL(skb_morph); 1613 1614 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1615 { 1616 unsigned long max_pg, num_pg, new_pg, old_pg, rlim; 1617 struct user_struct *user; 1618 1619 if (capable(CAP_IPC_LOCK) || !size) 1620 return 0; 1621 1622 rlim = rlimit(RLIMIT_MEMLOCK); 1623 if (rlim == RLIM_INFINITY) 1624 return 0; 1625 1626 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1627 max_pg = rlim >> PAGE_SHIFT; 1628 user = mmp->user ? : current_user(); 1629 1630 old_pg = atomic_long_read(&user->locked_vm); 1631 do { 1632 new_pg = old_pg + num_pg; 1633 if (new_pg > max_pg) 1634 return -ENOBUFS; 1635 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); 1636 1637 if (!mmp->user) { 1638 mmp->user = get_uid(user); 1639 mmp->num_pg = num_pg; 1640 } else { 1641 mmp->num_pg += num_pg; 1642 } 1643 1644 return 0; 1645 } 1646 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1647 1648 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1649 { 1650 if (mmp->user) { 1651 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1652 free_uid(mmp->user); 1653 } 1654 } 1655 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1656 1657 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 1658 { 1659 struct ubuf_info_msgzc *uarg; 1660 struct sk_buff *skb; 1661 1662 WARN_ON_ONCE(!in_task()); 1663 1664 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1665 if (!skb) 1666 return NULL; 1667 1668 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1669 uarg = (void *)skb->cb; 1670 uarg->mmp.user = NULL; 1671 1672 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1673 kfree_skb(skb); 1674 return NULL; 1675 } 1676 1677 uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; 1678 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1679 uarg->len = 1; 1680 uarg->bytelen = size; 1681 uarg->zerocopy = 1; 1682 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; 1683 refcount_set(&uarg->ubuf.refcnt, 1); 1684 sock_hold(sk); 1685 1686 return &uarg->ubuf; 1687 } 1688 1689 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) 1690 { 1691 return container_of((void *)uarg, struct sk_buff, cb); 1692 } 1693 1694 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1695 struct ubuf_info *uarg) 1696 { 1697 if (uarg) { 1698 struct ubuf_info_msgzc *uarg_zc; 1699 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1700 u32 bytelen, next; 1701 1702 /* there might be non MSG_ZEROCOPY users */ 1703 if (uarg->ops != &msg_zerocopy_ubuf_ops) 1704 return NULL; 1705 1706 /* realloc only when socket is locked (TCP, UDP cork), 1707 * so uarg->len and sk_zckey access is serialized 1708 */ 1709 if (!sock_owned_by_user(sk)) { 1710 WARN_ON_ONCE(1); 1711 return NULL; 1712 } 1713 1714 uarg_zc = uarg_to_msgzc(uarg); 1715 bytelen = uarg_zc->bytelen + size; 1716 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1717 /* TCP can create new skb to attach new uarg */ 1718 if (sk->sk_type == SOCK_STREAM) 1719 goto new_alloc; 1720 return NULL; 1721 } 1722 1723 next = (u32)atomic_read(&sk->sk_zckey); 1724 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { 1725 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) 1726 return NULL; 1727 uarg_zc->len++; 1728 uarg_zc->bytelen = bytelen; 1729 atomic_set(&sk->sk_zckey, ++next); 1730 1731 /* no extra ref when appending to datagram (MSG_MORE) */ 1732 if (sk->sk_type == SOCK_STREAM) 1733 net_zcopy_get(uarg); 1734 1735 return uarg; 1736 } 1737 } 1738 1739 new_alloc: 1740 return msg_zerocopy_alloc(sk, size); 1741 } 1742 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 1743 1744 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1745 { 1746 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1747 u32 old_lo, old_hi; 1748 u64 sum_len; 1749 1750 old_lo = serr->ee.ee_info; 1751 old_hi = serr->ee.ee_data; 1752 sum_len = old_hi - old_lo + 1ULL + len; 1753 1754 if (sum_len >= (1ULL << 32)) 1755 return false; 1756 1757 if (lo != old_hi + 1) 1758 return false; 1759 1760 serr->ee.ee_data += len; 1761 return true; 1762 } 1763 1764 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) 1765 { 1766 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1767 struct sock_exterr_skb *serr; 1768 struct sock *sk = skb->sk; 1769 struct sk_buff_head *q; 1770 unsigned long flags; 1771 bool is_zerocopy; 1772 u32 lo, hi; 1773 u16 len; 1774 1775 mm_unaccount_pinned_pages(&uarg->mmp); 1776 1777 /* if !len, there was only 1 call, and it was aborted 1778 * so do not queue a completion notification 1779 */ 1780 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1781 goto release; 1782 1783 len = uarg->len; 1784 lo = uarg->id; 1785 hi = uarg->id + len - 1; 1786 is_zerocopy = uarg->zerocopy; 1787 1788 serr = SKB_EXT_ERR(skb); 1789 memset(serr, 0, sizeof(*serr)); 1790 serr->ee.ee_errno = 0; 1791 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1792 serr->ee.ee_data = hi; 1793 serr->ee.ee_info = lo; 1794 if (!is_zerocopy) 1795 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1796 1797 q = &sk->sk_error_queue; 1798 spin_lock_irqsave(&q->lock, flags); 1799 tail = skb_peek_tail(q); 1800 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1801 !skb_zerocopy_notify_extend(tail, lo, len)) { 1802 __skb_queue_tail(q, skb); 1803 skb = NULL; 1804 } 1805 spin_unlock_irqrestore(&q->lock, flags); 1806 1807 sk_error_report(sk); 1808 1809 release: 1810 consume_skb(skb); 1811 sock_put(sk); 1812 } 1813 1814 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, 1815 bool success) 1816 { 1817 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); 1818 1819 uarg_zc->zerocopy = uarg_zc->zerocopy & success; 1820 1821 if (refcount_dec_and_test(&uarg->refcnt)) 1822 __msg_zerocopy_callback(uarg_zc); 1823 } 1824 1825 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1826 { 1827 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; 1828 1829 atomic_dec(&sk->sk_zckey); 1830 uarg_to_msgzc(uarg)->len--; 1831 1832 if (have_uref) 1833 msg_zerocopy_complete(NULL, uarg, true); 1834 } 1835 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 1836 1837 const struct ubuf_info_ops msg_zerocopy_ubuf_ops = { 1838 .complete = msg_zerocopy_complete, 1839 }; 1840 EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops); 1841 1842 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1843 struct msghdr *msg, int len, 1844 struct ubuf_info *uarg) 1845 { 1846 int err, orig_len = skb->len; 1847 1848 if (uarg->ops->link_skb) { 1849 err = uarg->ops->link_skb(skb, uarg); 1850 if (err) 1851 return err; 1852 } else { 1853 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1854 1855 /* An skb can only point to one uarg. This edge case happens 1856 * when TCP appends to an skb, but zerocopy_realloc triggered 1857 * a new alloc. 1858 */ 1859 if (orig_uarg && uarg != orig_uarg) 1860 return -EEXIST; 1861 } 1862 1863 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); 1864 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1865 struct sock *save_sk = skb->sk; 1866 1867 /* Streams do not free skb on error. Reset to prev state. */ 1868 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); 1869 skb->sk = sk; 1870 ___pskb_trim(skb, orig_len); 1871 skb->sk = save_sk; 1872 return err; 1873 } 1874 1875 skb_zcopy_set(skb, uarg, NULL); 1876 return skb->len - orig_len; 1877 } 1878 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1879 1880 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) 1881 { 1882 int i; 1883 1884 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; 1885 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1886 skb_frag_ref(skb, i); 1887 } 1888 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); 1889 1890 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1891 gfp_t gfp_mask) 1892 { 1893 if (skb_zcopy(orig)) { 1894 if (skb_zcopy(nskb)) { 1895 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1896 if (!gfp_mask) { 1897 WARN_ON_ONCE(1); 1898 return -ENOMEM; 1899 } 1900 if (skb_uarg(nskb) == skb_uarg(orig)) 1901 return 0; 1902 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1903 return -EIO; 1904 } 1905 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1906 } 1907 return 0; 1908 } 1909 1910 /** 1911 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1912 * @skb: the skb to modify 1913 * @gfp_mask: allocation priority 1914 * 1915 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 1916 * It will copy all frags into kernel and drop the reference 1917 * to userspace pages. 1918 * 1919 * If this function is called from an interrupt gfp_mask() must be 1920 * %GFP_ATOMIC. 1921 * 1922 * Returns 0 on success or a negative error code on failure 1923 * to allocate kernel memory to copy to. 1924 */ 1925 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1926 { 1927 int num_frags = skb_shinfo(skb)->nr_frags; 1928 struct page *page, *head = NULL; 1929 int i, order, psize, new_frags; 1930 u32 d_off; 1931 1932 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1933 return -EINVAL; 1934 1935 if (!skb_frags_readable(skb)) 1936 return -EFAULT; 1937 1938 if (!num_frags) 1939 goto release; 1940 1941 /* We might have to allocate high order pages, so compute what minimum 1942 * page order is needed. 1943 */ 1944 order = 0; 1945 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1946 order++; 1947 psize = (PAGE_SIZE << order); 1948 1949 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1950 for (i = 0; i < new_frags; i++) { 1951 page = alloc_pages(gfp_mask | __GFP_COMP, order); 1952 if (!page) { 1953 while (head) { 1954 struct page *next = (struct page *)page_private(head); 1955 put_page(head); 1956 head = next; 1957 } 1958 return -ENOMEM; 1959 } 1960 set_page_private(page, (unsigned long)head); 1961 head = page; 1962 } 1963 1964 page = head; 1965 d_off = 0; 1966 for (i = 0; i < num_frags; i++) { 1967 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1968 u32 p_off, p_len, copied; 1969 struct page *p; 1970 u8 *vaddr; 1971 1972 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1973 p, p_off, p_len, copied) { 1974 u32 copy, done = 0; 1975 vaddr = kmap_atomic(p); 1976 1977 while (done < p_len) { 1978 if (d_off == psize) { 1979 d_off = 0; 1980 page = (struct page *)page_private(page); 1981 } 1982 copy = min_t(u32, psize - d_off, p_len - done); 1983 memcpy(page_address(page) + d_off, 1984 vaddr + p_off + done, copy); 1985 done += copy; 1986 d_off += copy; 1987 } 1988 kunmap_atomic(vaddr); 1989 } 1990 } 1991 1992 /* skb frags release userspace buffers */ 1993 for (i = 0; i < num_frags; i++) 1994 skb_frag_unref(skb, i); 1995 1996 /* skb frags point to kernel buffers */ 1997 for (i = 0; i < new_frags - 1; i++) { 1998 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); 1999 head = (struct page *)page_private(head); 2000 } 2001 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, 2002 d_off); 2003 skb_shinfo(skb)->nr_frags = new_frags; 2004 2005 release: 2006 skb_zcopy_clear(skb, false); 2007 return 0; 2008 } 2009 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 2010 2011 /** 2012 * skb_clone - duplicate an sk_buff 2013 * @skb: buffer to clone 2014 * @gfp_mask: allocation priority 2015 * 2016 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 2017 * copies share the same packet data but not structure. The new 2018 * buffer has a reference count of 1. If the allocation fails the 2019 * function returns %NULL otherwise the new buffer is returned. 2020 * 2021 * If this function is called from an interrupt gfp_mask() must be 2022 * %GFP_ATOMIC. 2023 */ 2024 2025 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 2026 { 2027 struct sk_buff_fclones *fclones = container_of(skb, 2028 struct sk_buff_fclones, 2029 skb1); 2030 struct sk_buff *n; 2031 2032 if (skb_orphan_frags(skb, gfp_mask)) 2033 return NULL; 2034 2035 if (skb->fclone == SKB_FCLONE_ORIG && 2036 refcount_read(&fclones->fclone_ref) == 1) { 2037 n = &fclones->skb2; 2038 refcount_set(&fclones->fclone_ref, 2); 2039 n->fclone = SKB_FCLONE_CLONE; 2040 } else { 2041 if (skb_pfmemalloc(skb)) 2042 gfp_mask |= __GFP_MEMALLOC; 2043 2044 n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); 2045 if (!n) 2046 return NULL; 2047 2048 n->fclone = SKB_FCLONE_UNAVAILABLE; 2049 } 2050 2051 return __skb_clone(n, skb); 2052 } 2053 EXPORT_SYMBOL(skb_clone); 2054 2055 void skb_headers_offset_update(struct sk_buff *skb, int off) 2056 { 2057 /* Only adjust this if it actually is csum_start rather than csum */ 2058 if (skb->ip_summed == CHECKSUM_PARTIAL) 2059 skb->csum_start += off; 2060 /* {transport,network,mac}_header and tail are relative to skb->head */ 2061 skb->transport_header += off; 2062 skb->network_header += off; 2063 if (skb_mac_header_was_set(skb)) 2064 skb->mac_header += off; 2065 skb->inner_transport_header += off; 2066 skb->inner_network_header += off; 2067 skb->inner_mac_header += off; 2068 } 2069 EXPORT_SYMBOL(skb_headers_offset_update); 2070 2071 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 2072 { 2073 __copy_skb_header(new, old); 2074 2075 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 2076 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 2077 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 2078 } 2079 EXPORT_SYMBOL(skb_copy_header); 2080 2081 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 2082 { 2083 if (skb_pfmemalloc(skb)) 2084 return SKB_ALLOC_RX; 2085 return 0; 2086 } 2087 2088 /** 2089 * skb_copy - create private copy of an sk_buff 2090 * @skb: buffer to copy 2091 * @gfp_mask: allocation priority 2092 * 2093 * Make a copy of both an &sk_buff and its data. This is used when the 2094 * caller wishes to modify the data and needs a private copy of the 2095 * data to alter. Returns %NULL on failure or the pointer to the buffer 2096 * on success. The returned buffer has a reference count of 1. 2097 * 2098 * As by-product this function converts non-linear &sk_buff to linear 2099 * one, so that &sk_buff becomes completely private and caller is allowed 2100 * to modify all the data of returned buffer. This means that this 2101 * function is not recommended for use in circumstances when only 2102 * header is going to be modified. Use pskb_copy() instead. 2103 */ 2104 2105 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 2106 { 2107 struct sk_buff *n; 2108 unsigned int size; 2109 int headerlen; 2110 2111 if (!skb_frags_readable(skb)) 2112 return NULL; 2113 2114 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2115 return NULL; 2116 2117 headerlen = skb_headroom(skb); 2118 size = skb_end_offset(skb) + skb->data_len; 2119 n = __alloc_skb(size, gfp_mask, 2120 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 2121 if (!n) 2122 return NULL; 2123 2124 /* Set the data pointer */ 2125 skb_reserve(n, headerlen); 2126 /* Set the tail pointer and length */ 2127 skb_put(n, skb->len); 2128 2129 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 2130 2131 skb_copy_header(n, skb); 2132 return n; 2133 } 2134 EXPORT_SYMBOL(skb_copy); 2135 2136 /** 2137 * __pskb_copy_fclone - create copy of an sk_buff with private head. 2138 * @skb: buffer to copy 2139 * @headroom: headroom of new skb 2140 * @gfp_mask: allocation priority 2141 * @fclone: if true allocate the copy of the skb from the fclone 2142 * cache instead of the head cache; it is recommended to set this 2143 * to true for the cases where the copy will likely be cloned 2144 * 2145 * Make a copy of both an &sk_buff and part of its data, located 2146 * in header. Fragmented data remain shared. This is used when 2147 * the caller wishes to modify only header of &sk_buff and needs 2148 * private copy of the header to alter. Returns %NULL on failure 2149 * or the pointer to the buffer on success. 2150 * The returned buffer has a reference count of 1. 2151 */ 2152 2153 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 2154 gfp_t gfp_mask, bool fclone) 2155 { 2156 unsigned int size = skb_headlen(skb) + headroom; 2157 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 2158 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 2159 2160 if (!n) 2161 goto out; 2162 2163 /* Set the data pointer */ 2164 skb_reserve(n, headroom); 2165 /* Set the tail pointer and length */ 2166 skb_put(n, skb_headlen(skb)); 2167 /* Copy the bytes */ 2168 skb_copy_from_linear_data(skb, n->data, n->len); 2169 2170 n->truesize += skb->data_len; 2171 n->data_len = skb->data_len; 2172 n->len = skb->len; 2173 2174 if (skb_shinfo(skb)->nr_frags) { 2175 int i; 2176 2177 if (skb_orphan_frags(skb, gfp_mask) || 2178 skb_zerocopy_clone(n, skb, gfp_mask)) { 2179 kfree_skb(n); 2180 n = NULL; 2181 goto out; 2182 } 2183 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2184 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 2185 skb_frag_ref(skb, i); 2186 } 2187 skb_shinfo(n)->nr_frags = i; 2188 } 2189 2190 if (skb_has_frag_list(skb)) { 2191 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 2192 skb_clone_fraglist(n); 2193 } 2194 2195 skb_copy_header(n, skb); 2196 out: 2197 return n; 2198 } 2199 EXPORT_SYMBOL(__pskb_copy_fclone); 2200 2201 /** 2202 * pskb_expand_head - reallocate header of &sk_buff 2203 * @skb: buffer to reallocate 2204 * @nhead: room to add at head 2205 * @ntail: room to add at tail 2206 * @gfp_mask: allocation priority 2207 * 2208 * Expands (or creates identical copy, if @nhead and @ntail are zero) 2209 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 2210 * reference count of 1. Returns zero in the case of success or error, 2211 * if expansion failed. In the last case, &sk_buff is not changed. 2212 * 2213 * All the pointers pointing into skb header may change and must be 2214 * reloaded after call to this function. 2215 */ 2216 2217 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 2218 gfp_t gfp_mask) 2219 { 2220 unsigned int osize = skb_end_offset(skb); 2221 unsigned int size = osize + nhead + ntail; 2222 long off; 2223 u8 *data; 2224 int i; 2225 2226 BUG_ON(nhead < 0); 2227 2228 BUG_ON(skb_shared(skb)); 2229 2230 skb_zcopy_downgrade_managed(skb); 2231 2232 if (skb_pfmemalloc(skb)) 2233 gfp_mask |= __GFP_MEMALLOC; 2234 2235 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 2236 if (!data) 2237 goto nodata; 2238 size = SKB_WITH_OVERHEAD(size); 2239 2240 /* Copy only real data... and, alas, header. This should be 2241 * optimized for the cases when header is void. 2242 */ 2243 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 2244 2245 memcpy((struct skb_shared_info *)(data + size), 2246 skb_shinfo(skb), 2247 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 2248 2249 /* 2250 * if shinfo is shared we must drop the old head gracefully, but if it 2251 * is not we can just drop the old head and let the existing refcount 2252 * be since all we did is relocate the values 2253 */ 2254 if (skb_cloned(skb)) { 2255 if (skb_orphan_frags(skb, gfp_mask)) 2256 goto nofrags; 2257 if (skb_zcopy(skb)) 2258 refcount_inc(&skb_uarg(skb)->refcnt); 2259 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2260 skb_frag_ref(skb, i); 2261 2262 if (skb_has_frag_list(skb)) 2263 skb_clone_fraglist(skb); 2264 2265 skb_release_data(skb, SKB_CONSUMED); 2266 } else { 2267 skb_free_head(skb); 2268 } 2269 off = (data + nhead) - skb->head; 2270 2271 skb->head = data; 2272 skb->head_frag = 0; 2273 skb->data += off; 2274 2275 skb_set_end_offset(skb, size); 2276 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2277 off = nhead; 2278 #endif 2279 skb->tail += off; 2280 skb_headers_offset_update(skb, nhead); 2281 skb->cloned = 0; 2282 skb->hdr_len = 0; 2283 skb->nohdr = 0; 2284 atomic_set(&skb_shinfo(skb)->dataref, 1); 2285 2286 skb_metadata_clear(skb); 2287 2288 /* It is not generally safe to change skb->truesize. 2289 * For the moment, we really care of rx path, or 2290 * when skb is orphaned (not attached to a socket). 2291 */ 2292 if (!skb->sk || skb->destructor == sock_edemux) 2293 skb->truesize += size - osize; 2294 2295 return 0; 2296 2297 nofrags: 2298 skb_kfree_head(data, size); 2299 nodata: 2300 return -ENOMEM; 2301 } 2302 EXPORT_SYMBOL(pskb_expand_head); 2303 2304 /* Make private copy of skb with writable head and some headroom */ 2305 2306 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 2307 { 2308 struct sk_buff *skb2; 2309 int delta = headroom - skb_headroom(skb); 2310 2311 if (delta <= 0) 2312 skb2 = pskb_copy(skb, GFP_ATOMIC); 2313 else { 2314 skb2 = skb_clone(skb, GFP_ATOMIC); 2315 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 2316 GFP_ATOMIC)) { 2317 kfree_skb(skb2); 2318 skb2 = NULL; 2319 } 2320 } 2321 return skb2; 2322 } 2323 EXPORT_SYMBOL(skb_realloc_headroom); 2324 2325 /* Note: We plan to rework this in linux-6.4 */ 2326 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2327 { 2328 unsigned int saved_end_offset, saved_truesize; 2329 struct skb_shared_info *shinfo; 2330 int res; 2331 2332 saved_end_offset = skb_end_offset(skb); 2333 saved_truesize = skb->truesize; 2334 2335 res = pskb_expand_head(skb, 0, 0, pri); 2336 if (res) 2337 return res; 2338 2339 skb->truesize = saved_truesize; 2340 2341 if (likely(skb_end_offset(skb) == saved_end_offset)) 2342 return 0; 2343 2344 /* We can not change skb->end if the original or new value 2345 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). 2346 */ 2347 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || 2348 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { 2349 /* We think this path should not be taken. 2350 * Add a temporary trace to warn us just in case. 2351 */ 2352 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", 2353 saved_end_offset, skb_end_offset(skb)); 2354 WARN_ON_ONCE(1); 2355 return 0; 2356 } 2357 2358 shinfo = skb_shinfo(skb); 2359 2360 /* We are about to change back skb->end, 2361 * we need to move skb_shinfo() to its new location. 2362 */ 2363 memmove(skb->head + saved_end_offset, 2364 shinfo, 2365 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); 2366 2367 skb_set_end_offset(skb, saved_end_offset); 2368 2369 return 0; 2370 } 2371 2372 /** 2373 * skb_expand_head - reallocate header of &sk_buff 2374 * @skb: buffer to reallocate 2375 * @headroom: needed headroom 2376 * 2377 * Unlike skb_realloc_headroom, this one does not allocate a new skb 2378 * if possible; copies skb->sk to new skb as needed 2379 * and frees original skb in case of failures. 2380 * 2381 * It expect increased headroom and generates warning otherwise. 2382 */ 2383 2384 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) 2385 { 2386 int delta = headroom - skb_headroom(skb); 2387 int osize = skb_end_offset(skb); 2388 struct sock *sk = skb->sk; 2389 2390 if (WARN_ONCE(delta <= 0, 2391 "%s is expecting an increase in the headroom", __func__)) 2392 return skb; 2393 2394 delta = SKB_DATA_ALIGN(delta); 2395 /* pskb_expand_head() might crash, if skb is shared. */ 2396 if (skb_shared(skb) || !is_skb_wmem(skb)) { 2397 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2398 2399 if (unlikely(!nskb)) 2400 goto fail; 2401 2402 if (sk) 2403 skb_set_owner_w(nskb, sk); 2404 consume_skb(skb); 2405 skb = nskb; 2406 } 2407 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) 2408 goto fail; 2409 2410 if (sk && is_skb_wmem(skb)) { 2411 delta = skb_end_offset(skb) - osize; 2412 refcount_add(delta, &sk->sk_wmem_alloc); 2413 skb->truesize += delta; 2414 } 2415 return skb; 2416 2417 fail: 2418 kfree_skb(skb); 2419 return NULL; 2420 } 2421 EXPORT_SYMBOL(skb_expand_head); 2422 2423 /** 2424 * skb_copy_expand - copy and expand sk_buff 2425 * @skb: buffer to copy 2426 * @newheadroom: new free bytes at head 2427 * @newtailroom: new free bytes at tail 2428 * @gfp_mask: allocation priority 2429 * 2430 * Make a copy of both an &sk_buff and its data and while doing so 2431 * allocate additional space. 2432 * 2433 * This is used when the caller wishes to modify the data and needs a 2434 * private copy of the data to alter as well as more space for new fields. 2435 * Returns %NULL on failure or the pointer to the buffer 2436 * on success. The returned buffer has a reference count of 1. 2437 * 2438 * You must pass %GFP_ATOMIC as the allocation priority if this function 2439 * is called from an interrupt. 2440 */ 2441 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 2442 int newheadroom, int newtailroom, 2443 gfp_t gfp_mask) 2444 { 2445 /* 2446 * Allocate the copy buffer 2447 */ 2448 int head_copy_len, head_copy_off; 2449 struct sk_buff *n; 2450 int oldheadroom; 2451 2452 if (!skb_frags_readable(skb)) 2453 return NULL; 2454 2455 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) 2456 return NULL; 2457 2458 oldheadroom = skb_headroom(skb); 2459 n = __alloc_skb(newheadroom + skb->len + newtailroom, 2460 gfp_mask, skb_alloc_rx_flag(skb), 2461 NUMA_NO_NODE); 2462 if (!n) 2463 return NULL; 2464 2465 skb_reserve(n, newheadroom); 2466 2467 /* Set the tail pointer and length */ 2468 skb_put(n, skb->len); 2469 2470 head_copy_len = oldheadroom; 2471 head_copy_off = 0; 2472 if (newheadroom <= head_copy_len) 2473 head_copy_len = newheadroom; 2474 else 2475 head_copy_off = newheadroom - head_copy_len; 2476 2477 /* Copy the linear header and data. */ 2478 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 2479 skb->len + head_copy_len)); 2480 2481 skb_copy_header(n, skb); 2482 2483 skb_headers_offset_update(n, newheadroom - oldheadroom); 2484 2485 return n; 2486 } 2487 EXPORT_SYMBOL(skb_copy_expand); 2488 2489 /** 2490 * __skb_pad - zero pad the tail of an skb 2491 * @skb: buffer to pad 2492 * @pad: space to pad 2493 * @free_on_error: free buffer on error 2494 * 2495 * Ensure that a buffer is followed by a padding area that is zero 2496 * filled. Used by network drivers which may DMA or transfer data 2497 * beyond the buffer end onto the wire. 2498 * 2499 * May return error in out of memory cases. The skb is freed on error 2500 * if @free_on_error is true. 2501 */ 2502 2503 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 2504 { 2505 int err; 2506 int ntail; 2507 2508 /* If the skbuff is non linear tailroom is always zero.. */ 2509 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 2510 memset(skb->data+skb->len, 0, pad); 2511 return 0; 2512 } 2513 2514 ntail = skb->data_len + pad - (skb->end - skb->tail); 2515 if (likely(skb_cloned(skb) || ntail > 0)) { 2516 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 2517 if (unlikely(err)) 2518 goto free_skb; 2519 } 2520 2521 /* FIXME: The use of this function with non-linear skb's really needs 2522 * to be audited. 2523 */ 2524 err = skb_linearize(skb); 2525 if (unlikely(err)) 2526 goto free_skb; 2527 2528 memset(skb->data + skb->len, 0, pad); 2529 return 0; 2530 2531 free_skb: 2532 if (free_on_error) 2533 kfree_skb(skb); 2534 return err; 2535 } 2536 EXPORT_SYMBOL(__skb_pad); 2537 2538 /** 2539 * pskb_put - add data to the tail of a potentially fragmented buffer 2540 * @skb: start of the buffer to use 2541 * @tail: tail fragment of the buffer to use 2542 * @len: amount of data to add 2543 * 2544 * This function extends the used data area of the potentially 2545 * fragmented buffer. @tail must be the last fragment of @skb -- or 2546 * @skb itself. If this would exceed the total buffer size the kernel 2547 * will panic. A pointer to the first byte of the extra data is 2548 * returned. 2549 */ 2550 2551 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 2552 { 2553 if (tail != skb) { 2554 skb->data_len += len; 2555 skb->len += len; 2556 } 2557 return skb_put(tail, len); 2558 } 2559 EXPORT_SYMBOL_GPL(pskb_put); 2560 2561 /** 2562 * skb_put - add data to a buffer 2563 * @skb: buffer to use 2564 * @len: amount of data to add 2565 * 2566 * This function extends the used data area of the buffer. If this would 2567 * exceed the total buffer size the kernel will panic. A pointer to the 2568 * first byte of the extra data is returned. 2569 */ 2570 void *skb_put(struct sk_buff *skb, unsigned int len) 2571 { 2572 void *tmp = skb_tail_pointer(skb); 2573 SKB_LINEAR_ASSERT(skb); 2574 skb->tail += len; 2575 skb->len += len; 2576 if (unlikely(skb->tail > skb->end)) 2577 skb_over_panic(skb, len, __builtin_return_address(0)); 2578 return tmp; 2579 } 2580 EXPORT_SYMBOL(skb_put); 2581 2582 /** 2583 * skb_push - add data to the start of a buffer 2584 * @skb: buffer to use 2585 * @len: amount of data to add 2586 * 2587 * This function extends the used data area of the buffer at the buffer 2588 * start. If this would exceed the total buffer headroom the kernel will 2589 * panic. A pointer to the first byte of the extra data is returned. 2590 */ 2591 void *skb_push(struct sk_buff *skb, unsigned int len) 2592 { 2593 skb->data -= len; 2594 skb->len += len; 2595 if (unlikely(skb->data < skb->head)) 2596 skb_under_panic(skb, len, __builtin_return_address(0)); 2597 return skb->data; 2598 } 2599 EXPORT_SYMBOL(skb_push); 2600 2601 /** 2602 * skb_pull - remove data from the start of a buffer 2603 * @skb: buffer to use 2604 * @len: amount of data to remove 2605 * 2606 * This function removes data from the start of a buffer, returning 2607 * the memory to the headroom. A pointer to the next data in the buffer 2608 * is returned. Once the data has been pulled future pushes will overwrite 2609 * the old data. 2610 */ 2611 void *skb_pull(struct sk_buff *skb, unsigned int len) 2612 { 2613 return skb_pull_inline(skb, len); 2614 } 2615 EXPORT_SYMBOL(skb_pull); 2616 2617 /** 2618 * skb_pull_data - remove data from the start of a buffer returning its 2619 * original position. 2620 * @skb: buffer to use 2621 * @len: amount of data to remove 2622 * 2623 * This function removes data from the start of a buffer, returning 2624 * the memory to the headroom. A pointer to the original data in the buffer 2625 * is returned after checking if there is enough data to pull. Once the 2626 * data has been pulled future pushes will overwrite the old data. 2627 */ 2628 void *skb_pull_data(struct sk_buff *skb, size_t len) 2629 { 2630 void *data = skb->data; 2631 2632 if (skb->len < len) 2633 return NULL; 2634 2635 skb_pull(skb, len); 2636 2637 return data; 2638 } 2639 EXPORT_SYMBOL(skb_pull_data); 2640 2641 /** 2642 * skb_trim - remove end from a buffer 2643 * @skb: buffer to alter 2644 * @len: new length 2645 * 2646 * Cut the length of a buffer down by removing data from the tail. If 2647 * the buffer is already under the length specified it is not modified. 2648 * The skb must be linear. 2649 */ 2650 void skb_trim(struct sk_buff *skb, unsigned int len) 2651 { 2652 if (skb->len > len) 2653 __skb_trim(skb, len); 2654 } 2655 EXPORT_SYMBOL(skb_trim); 2656 2657 /* Trims skb to length len. It can change skb pointers. 2658 */ 2659 2660 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 2661 { 2662 struct sk_buff **fragp; 2663 struct sk_buff *frag; 2664 int offset = skb_headlen(skb); 2665 int nfrags = skb_shinfo(skb)->nr_frags; 2666 int i; 2667 int err; 2668 2669 if (skb_cloned(skb) && 2670 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 2671 return err; 2672 2673 i = 0; 2674 if (offset >= len) 2675 goto drop_pages; 2676 2677 for (; i < nfrags; i++) { 2678 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2679 2680 if (end < len) { 2681 offset = end; 2682 continue; 2683 } 2684 2685 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 2686 2687 drop_pages: 2688 skb_shinfo(skb)->nr_frags = i; 2689 2690 for (; i < nfrags; i++) 2691 skb_frag_unref(skb, i); 2692 2693 if (skb_has_frag_list(skb)) 2694 skb_drop_fraglist(skb); 2695 goto done; 2696 } 2697 2698 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 2699 fragp = &frag->next) { 2700 int end = offset + frag->len; 2701 2702 if (skb_shared(frag)) { 2703 struct sk_buff *nfrag; 2704 2705 nfrag = skb_clone(frag, GFP_ATOMIC); 2706 if (unlikely(!nfrag)) 2707 return -ENOMEM; 2708 2709 nfrag->next = frag->next; 2710 consume_skb(frag); 2711 frag = nfrag; 2712 *fragp = frag; 2713 } 2714 2715 if (end < len) { 2716 offset = end; 2717 continue; 2718 } 2719 2720 if (end > len && 2721 unlikely((err = pskb_trim(frag, len - offset)))) 2722 return err; 2723 2724 if (frag->next) 2725 skb_drop_list(&frag->next); 2726 break; 2727 } 2728 2729 done: 2730 if (len > skb_headlen(skb)) { 2731 skb->data_len -= skb->len - len; 2732 skb->len = len; 2733 } else { 2734 skb->len = len; 2735 skb->data_len = 0; 2736 skb_set_tail_pointer(skb, len); 2737 } 2738 2739 if (!skb->sk || skb->destructor == sock_edemux) 2740 skb_condense(skb); 2741 return 0; 2742 } 2743 EXPORT_SYMBOL(___pskb_trim); 2744 2745 /* Note : use pskb_trim_rcsum() instead of calling this directly 2746 */ 2747 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2748 { 2749 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2750 int delta = skb->len - len; 2751 2752 skb->csum = csum_block_sub(skb->csum, 2753 skb_checksum(skb, len, delta, 0), 2754 len); 2755 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2756 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 2757 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 2758 2759 if (offset + sizeof(__sum16) > hdlen) 2760 return -EINVAL; 2761 } 2762 return __pskb_trim(skb, len); 2763 } 2764 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2765 2766 /** 2767 * __pskb_pull_tail - advance tail of skb header 2768 * @skb: buffer to reallocate 2769 * @delta: number of bytes to advance tail 2770 * 2771 * The function makes a sense only on a fragmented &sk_buff, 2772 * it expands header moving its tail forward and copying necessary 2773 * data from fragmented part. 2774 * 2775 * &sk_buff MUST have reference count of 1. 2776 * 2777 * Returns %NULL (and &sk_buff does not change) if pull failed 2778 * or value of new tail of skb in the case of success. 2779 * 2780 * All the pointers pointing into skb header may change and must be 2781 * reloaded after call to this function. 2782 */ 2783 2784 /* Moves tail of skb head forward, copying data from fragmented part, 2785 * when it is necessary. 2786 * 1. It may fail due to malloc failure. 2787 * 2. It may change skb pointers. 2788 * 2789 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2790 */ 2791 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2792 { 2793 /* If skb has not enough free space at tail, get new one 2794 * plus 128 bytes for future expansions. If we have enough 2795 * room at tail, reallocate without expansion only if skb is cloned. 2796 */ 2797 int i, k, eat = (skb->tail + delta) - skb->end; 2798 2799 if (!skb_frags_readable(skb)) 2800 return NULL; 2801 2802 if (eat > 0 || skb_cloned(skb)) { 2803 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2804 GFP_ATOMIC)) 2805 return NULL; 2806 } 2807 2808 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2809 skb_tail_pointer(skb), delta)); 2810 2811 /* Optimization: no fragments, no reasons to preestimate 2812 * size of pulled pages. Superb. 2813 */ 2814 if (!skb_has_frag_list(skb)) 2815 goto pull_pages; 2816 2817 /* Estimate size of pulled pages. */ 2818 eat = delta; 2819 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2820 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2821 2822 if (size >= eat) 2823 goto pull_pages; 2824 eat -= size; 2825 } 2826 2827 /* If we need update frag list, we are in troubles. 2828 * Certainly, it is possible to add an offset to skb data, 2829 * but taking into account that pulling is expected to 2830 * be very rare operation, it is worth to fight against 2831 * further bloating skb head and crucify ourselves here instead. 2832 * Pure masohism, indeed. 8)8) 2833 */ 2834 if (eat) { 2835 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2836 struct sk_buff *clone = NULL; 2837 struct sk_buff *insp = NULL; 2838 2839 do { 2840 if (list->len <= eat) { 2841 /* Eaten as whole. */ 2842 eat -= list->len; 2843 list = list->next; 2844 insp = list; 2845 } else { 2846 /* Eaten partially. */ 2847 if (skb_is_gso(skb) && !list->head_frag && 2848 skb_headlen(list)) 2849 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2850 2851 if (skb_shared(list)) { 2852 /* Sucks! We need to fork list. :-( */ 2853 clone = skb_clone(list, GFP_ATOMIC); 2854 if (!clone) 2855 return NULL; 2856 insp = list->next; 2857 list = clone; 2858 } else { 2859 /* This may be pulled without 2860 * problems. */ 2861 insp = list; 2862 } 2863 if (!pskb_pull(list, eat)) { 2864 kfree_skb(clone); 2865 return NULL; 2866 } 2867 break; 2868 } 2869 } while (eat); 2870 2871 /* Free pulled out fragments. */ 2872 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2873 skb_shinfo(skb)->frag_list = list->next; 2874 consume_skb(list); 2875 } 2876 /* And insert new clone at head. */ 2877 if (clone) { 2878 clone->next = list; 2879 skb_shinfo(skb)->frag_list = clone; 2880 } 2881 } 2882 /* Success! Now we may commit changes to skb data. */ 2883 2884 pull_pages: 2885 eat = delta; 2886 k = 0; 2887 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2888 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2889 2890 if (size <= eat) { 2891 skb_frag_unref(skb, i); 2892 eat -= size; 2893 } else { 2894 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2895 2896 *frag = skb_shinfo(skb)->frags[i]; 2897 if (eat) { 2898 skb_frag_off_add(frag, eat); 2899 skb_frag_size_sub(frag, eat); 2900 if (!i) 2901 goto end; 2902 eat = 0; 2903 } 2904 k++; 2905 } 2906 } 2907 skb_shinfo(skb)->nr_frags = k; 2908 2909 end: 2910 skb->tail += delta; 2911 skb->data_len -= delta; 2912 2913 if (!skb->data_len) 2914 skb_zcopy_clear(skb, false); 2915 2916 return skb_tail_pointer(skb); 2917 } 2918 EXPORT_SYMBOL(__pskb_pull_tail); 2919 2920 /** 2921 * skb_copy_bits - copy bits from skb to kernel buffer 2922 * @skb: source skb 2923 * @offset: offset in source 2924 * @to: destination buffer 2925 * @len: number of bytes to copy 2926 * 2927 * Copy the specified number of bytes from the source skb to the 2928 * destination buffer. 2929 * 2930 * CAUTION ! : 2931 * If its prototype is ever changed, 2932 * check arch/{*}/net/{*}.S files, 2933 * since it is called from BPF assembly code. 2934 */ 2935 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2936 { 2937 int start = skb_headlen(skb); 2938 struct sk_buff *frag_iter; 2939 int i, copy; 2940 2941 if (offset > (int)skb->len - len) 2942 goto fault; 2943 2944 /* Copy header. */ 2945 if ((copy = start - offset) > 0) { 2946 if (copy > len) 2947 copy = len; 2948 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2949 if ((len -= copy) == 0) 2950 return 0; 2951 offset += copy; 2952 to += copy; 2953 } 2954 2955 if (!skb_frags_readable(skb)) 2956 goto fault; 2957 2958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2959 int end; 2960 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2961 2962 WARN_ON(start > offset + len); 2963 2964 end = start + skb_frag_size(f); 2965 if ((copy = end - offset) > 0) { 2966 u32 p_off, p_len, copied; 2967 struct page *p; 2968 u8 *vaddr; 2969 2970 if (copy > len) 2971 copy = len; 2972 2973 skb_frag_foreach_page(f, 2974 skb_frag_off(f) + offset - start, 2975 copy, p, p_off, p_len, copied) { 2976 vaddr = kmap_atomic(p); 2977 memcpy(to + copied, vaddr + p_off, p_len); 2978 kunmap_atomic(vaddr); 2979 } 2980 2981 if ((len -= copy) == 0) 2982 return 0; 2983 offset += copy; 2984 to += copy; 2985 } 2986 start = end; 2987 } 2988 2989 skb_walk_frags(skb, frag_iter) { 2990 int end; 2991 2992 WARN_ON(start > offset + len); 2993 2994 end = start + frag_iter->len; 2995 if ((copy = end - offset) > 0) { 2996 if (copy > len) 2997 copy = len; 2998 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2999 goto fault; 3000 if ((len -= copy) == 0) 3001 return 0; 3002 offset += copy; 3003 to += copy; 3004 } 3005 start = end; 3006 } 3007 3008 if (!len) 3009 return 0; 3010 3011 fault: 3012 return -EFAULT; 3013 } 3014 EXPORT_SYMBOL(skb_copy_bits); 3015 3016 /* 3017 * Callback from splice_to_pipe(), if we need to release some pages 3018 * at the end of the spd in case we error'ed out in filling the pipe. 3019 */ 3020 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 3021 { 3022 put_page(spd->pages[i]); 3023 } 3024 3025 static struct page *linear_to_page(struct page *page, unsigned int *len, 3026 unsigned int *offset, 3027 struct sock *sk) 3028 { 3029 struct page_frag *pfrag = sk_page_frag(sk); 3030 3031 if (!sk_page_frag_refill(sk, pfrag)) 3032 return NULL; 3033 3034 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 3035 3036 memcpy(page_address(pfrag->page) + pfrag->offset, 3037 page_address(page) + *offset, *len); 3038 *offset = pfrag->offset; 3039 pfrag->offset += *len; 3040 3041 return pfrag->page; 3042 } 3043 3044 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 3045 struct page *page, 3046 unsigned int offset) 3047 { 3048 return spd->nr_pages && 3049 spd->pages[spd->nr_pages - 1] == page && 3050 (spd->partial[spd->nr_pages - 1].offset + 3051 spd->partial[spd->nr_pages - 1].len == offset); 3052 } 3053 3054 /* 3055 * Fill page/offset/length into spd, if it can hold more pages. 3056 */ 3057 static bool spd_fill_page(struct splice_pipe_desc *spd, 3058 struct pipe_inode_info *pipe, struct page *page, 3059 unsigned int *len, unsigned int offset, 3060 bool linear, 3061 struct sock *sk) 3062 { 3063 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 3064 return true; 3065 3066 if (linear) { 3067 page = linear_to_page(page, len, &offset, sk); 3068 if (!page) 3069 return true; 3070 } 3071 if (spd_can_coalesce(spd, page, offset)) { 3072 spd->partial[spd->nr_pages - 1].len += *len; 3073 return false; 3074 } 3075 get_page(page); 3076 spd->pages[spd->nr_pages] = page; 3077 spd->partial[spd->nr_pages].len = *len; 3078 spd->partial[spd->nr_pages].offset = offset; 3079 spd->nr_pages++; 3080 3081 return false; 3082 } 3083 3084 static bool __splice_segment(struct page *page, unsigned int poff, 3085 unsigned int plen, unsigned int *off, 3086 unsigned int *len, 3087 struct splice_pipe_desc *spd, bool linear, 3088 struct sock *sk, 3089 struct pipe_inode_info *pipe) 3090 { 3091 if (!*len) 3092 return true; 3093 3094 /* skip this segment if already processed */ 3095 if (*off >= plen) { 3096 *off -= plen; 3097 return false; 3098 } 3099 3100 /* ignore any bits we already processed */ 3101 poff += *off; 3102 plen -= *off; 3103 *off = 0; 3104 3105 do { 3106 unsigned int flen = min(*len, plen); 3107 3108 if (spd_fill_page(spd, pipe, page, &flen, poff, 3109 linear, sk)) 3110 return true; 3111 poff += flen; 3112 plen -= flen; 3113 *len -= flen; 3114 } while (*len && plen); 3115 3116 return false; 3117 } 3118 3119 /* 3120 * Map linear and fragment data from the skb to spd. It reports true if the 3121 * pipe is full or if we already spliced the requested length. 3122 */ 3123 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 3124 unsigned int *offset, unsigned int *len, 3125 struct splice_pipe_desc *spd, struct sock *sk) 3126 { 3127 int seg; 3128 struct sk_buff *iter; 3129 3130 /* map the linear part : 3131 * If skb->head_frag is set, this 'linear' part is backed by a 3132 * fragment, and if the head is not shared with any clones then 3133 * we can avoid a copy since we own the head portion of this page. 3134 */ 3135 if (__splice_segment(virt_to_page(skb->data), 3136 (unsigned long) skb->data & (PAGE_SIZE - 1), 3137 skb_headlen(skb), 3138 offset, len, spd, 3139 skb_head_is_locked(skb), 3140 sk, pipe)) 3141 return true; 3142 3143 /* 3144 * then map the fragments 3145 */ 3146 if (!skb_frags_readable(skb)) 3147 return false; 3148 3149 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 3150 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 3151 3152 if (WARN_ON_ONCE(!skb_frag_page(f))) 3153 return false; 3154 3155 if (__splice_segment(skb_frag_page(f), 3156 skb_frag_off(f), skb_frag_size(f), 3157 offset, len, spd, false, sk, pipe)) 3158 return true; 3159 } 3160 3161 skb_walk_frags(skb, iter) { 3162 if (*offset >= iter->len) { 3163 *offset -= iter->len; 3164 continue; 3165 } 3166 /* __skb_splice_bits() only fails if the output has no room 3167 * left, so no point in going over the frag_list for the error 3168 * case. 3169 */ 3170 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 3171 return true; 3172 } 3173 3174 return false; 3175 } 3176 3177 /* 3178 * Map data from the skb to a pipe. Should handle both the linear part, 3179 * the fragments, and the frag list. 3180 */ 3181 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 3182 struct pipe_inode_info *pipe, unsigned int tlen, 3183 unsigned int flags) 3184 { 3185 struct partial_page partial[MAX_SKB_FRAGS]; 3186 struct page *pages[MAX_SKB_FRAGS]; 3187 struct splice_pipe_desc spd = { 3188 .pages = pages, 3189 .partial = partial, 3190 .nr_pages_max = MAX_SKB_FRAGS, 3191 .ops = &nosteal_pipe_buf_ops, 3192 .spd_release = sock_spd_release, 3193 }; 3194 int ret = 0; 3195 3196 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 3197 3198 if (spd.nr_pages) 3199 ret = splice_to_pipe(pipe, &spd); 3200 3201 return ret; 3202 } 3203 EXPORT_SYMBOL_GPL(skb_splice_bits); 3204 3205 static int sendmsg_locked(struct sock *sk, struct msghdr *msg) 3206 { 3207 struct socket *sock = sk->sk_socket; 3208 size_t size = msg_data_left(msg); 3209 3210 if (!sock) 3211 return -EINVAL; 3212 3213 if (!sock->ops->sendmsg_locked) 3214 return sock_no_sendmsg_locked(sk, msg, size); 3215 3216 return sock->ops->sendmsg_locked(sk, msg, size); 3217 } 3218 3219 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) 3220 { 3221 struct socket *sock = sk->sk_socket; 3222 3223 if (!sock) 3224 return -EINVAL; 3225 return sock_sendmsg(sock, msg); 3226 } 3227 3228 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); 3229 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 3230 int len, sendmsg_func sendmsg) 3231 { 3232 unsigned int orig_len = len; 3233 struct sk_buff *head = skb; 3234 unsigned short fragidx; 3235 int slen, ret; 3236 3237 do_frag_list: 3238 3239 /* Deal with head data */ 3240 while (offset < skb_headlen(skb) && len) { 3241 struct kvec kv; 3242 struct msghdr msg; 3243 3244 slen = min_t(int, len, skb_headlen(skb) - offset); 3245 kv.iov_base = skb->data + offset; 3246 kv.iov_len = slen; 3247 memset(&msg, 0, sizeof(msg)); 3248 msg.msg_flags = MSG_DONTWAIT; 3249 3250 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); 3251 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3252 sendmsg_unlocked, sk, &msg); 3253 if (ret <= 0) 3254 goto error; 3255 3256 offset += ret; 3257 len -= ret; 3258 } 3259 3260 /* All the data was skb head? */ 3261 if (!len) 3262 goto out; 3263 3264 /* Make offset relative to start of frags */ 3265 offset -= skb_headlen(skb); 3266 3267 /* Find where we are in frag list */ 3268 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3269 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3270 3271 if (offset < skb_frag_size(frag)) 3272 break; 3273 3274 offset -= skb_frag_size(frag); 3275 } 3276 3277 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 3278 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 3279 3280 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 3281 3282 while (slen) { 3283 struct bio_vec bvec; 3284 struct msghdr msg = { 3285 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, 3286 }; 3287 3288 bvec_set_page(&bvec, skb_frag_page(frag), slen, 3289 skb_frag_off(frag) + offset); 3290 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, 3291 slen); 3292 3293 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, 3294 sendmsg_unlocked, sk, &msg); 3295 if (ret <= 0) 3296 goto error; 3297 3298 len -= ret; 3299 offset += ret; 3300 slen -= ret; 3301 } 3302 3303 offset = 0; 3304 } 3305 3306 if (len) { 3307 /* Process any frag lists */ 3308 3309 if (skb == head) { 3310 if (skb_has_frag_list(skb)) { 3311 skb = skb_shinfo(skb)->frag_list; 3312 goto do_frag_list; 3313 } 3314 } else if (skb->next) { 3315 skb = skb->next; 3316 goto do_frag_list; 3317 } 3318 } 3319 3320 out: 3321 return orig_len - len; 3322 3323 error: 3324 return orig_len == len ? ret : orig_len - len; 3325 } 3326 3327 /* Send skb data on a socket. Socket must be locked. */ 3328 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 3329 int len) 3330 { 3331 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); 3332 } 3333 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 3334 3335 /* Send skb data on a socket. Socket must be unlocked. */ 3336 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 3337 { 3338 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); 3339 } 3340 3341 /** 3342 * skb_store_bits - store bits from kernel buffer to skb 3343 * @skb: destination buffer 3344 * @offset: offset in destination 3345 * @from: source buffer 3346 * @len: number of bytes to copy 3347 * 3348 * Copy the specified number of bytes from the source buffer to the 3349 * destination skb. This function handles all the messy bits of 3350 * traversing fragment lists and such. 3351 */ 3352 3353 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 3354 { 3355 int start = skb_headlen(skb); 3356 struct sk_buff *frag_iter; 3357 int i, copy; 3358 3359 if (offset > (int)skb->len - len) 3360 goto fault; 3361 3362 if ((copy = start - offset) > 0) { 3363 if (copy > len) 3364 copy = len; 3365 skb_copy_to_linear_data_offset(skb, offset, from, copy); 3366 if ((len -= copy) == 0) 3367 return 0; 3368 offset += copy; 3369 from += copy; 3370 } 3371 3372 if (!skb_frags_readable(skb)) 3373 goto fault; 3374 3375 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3376 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3377 int end; 3378 3379 WARN_ON(start > offset + len); 3380 3381 end = start + skb_frag_size(frag); 3382 if ((copy = end - offset) > 0) { 3383 u32 p_off, p_len, copied; 3384 struct page *p; 3385 u8 *vaddr; 3386 3387 if (copy > len) 3388 copy = len; 3389 3390 skb_frag_foreach_page(frag, 3391 skb_frag_off(frag) + offset - start, 3392 copy, p, p_off, p_len, copied) { 3393 vaddr = kmap_atomic(p); 3394 memcpy(vaddr + p_off, from + copied, p_len); 3395 kunmap_atomic(vaddr); 3396 } 3397 3398 if ((len -= copy) == 0) 3399 return 0; 3400 offset += copy; 3401 from += copy; 3402 } 3403 start = end; 3404 } 3405 3406 skb_walk_frags(skb, frag_iter) { 3407 int end; 3408 3409 WARN_ON(start > offset + len); 3410 3411 end = start + frag_iter->len; 3412 if ((copy = end - offset) > 0) { 3413 if (copy > len) 3414 copy = len; 3415 if (skb_store_bits(frag_iter, offset - start, 3416 from, copy)) 3417 goto fault; 3418 if ((len -= copy) == 0) 3419 return 0; 3420 offset += copy; 3421 from += copy; 3422 } 3423 start = end; 3424 } 3425 if (!len) 3426 return 0; 3427 3428 fault: 3429 return -EFAULT; 3430 } 3431 EXPORT_SYMBOL(skb_store_bits); 3432 3433 /* Checksum skb data. */ 3434 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3435 __wsum csum, const struct skb_checksum_ops *ops) 3436 { 3437 int start = skb_headlen(skb); 3438 int i, copy = start - offset; 3439 struct sk_buff *frag_iter; 3440 int pos = 0; 3441 3442 /* Checksum header. */ 3443 if (copy > 0) { 3444 if (copy > len) 3445 copy = len; 3446 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 3447 skb->data + offset, copy, csum); 3448 if ((len -= copy) == 0) 3449 return csum; 3450 offset += copy; 3451 pos = copy; 3452 } 3453 3454 if (WARN_ON_ONCE(!skb_frags_readable(skb))) 3455 return 0; 3456 3457 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3458 int end; 3459 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3460 3461 WARN_ON(start > offset + len); 3462 3463 end = start + skb_frag_size(frag); 3464 if ((copy = end - offset) > 0) { 3465 u32 p_off, p_len, copied; 3466 struct page *p; 3467 __wsum csum2; 3468 u8 *vaddr; 3469 3470 if (copy > len) 3471 copy = len; 3472 3473 skb_frag_foreach_page(frag, 3474 skb_frag_off(frag) + offset - start, 3475 copy, p, p_off, p_len, copied) { 3476 vaddr = kmap_atomic(p); 3477 csum2 = INDIRECT_CALL_1(ops->update, 3478 csum_partial_ext, 3479 vaddr + p_off, p_len, 0); 3480 kunmap_atomic(vaddr); 3481 csum = INDIRECT_CALL_1(ops->combine, 3482 csum_block_add_ext, csum, 3483 csum2, pos, p_len); 3484 pos += p_len; 3485 } 3486 3487 if (!(len -= copy)) 3488 return csum; 3489 offset += copy; 3490 } 3491 start = end; 3492 } 3493 3494 skb_walk_frags(skb, frag_iter) { 3495 int end; 3496 3497 WARN_ON(start > offset + len); 3498 3499 end = start + frag_iter->len; 3500 if ((copy = end - offset) > 0) { 3501 __wsum csum2; 3502 if (copy > len) 3503 copy = len; 3504 csum2 = __skb_checksum(frag_iter, offset - start, 3505 copy, 0, ops); 3506 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 3507 csum, csum2, pos, copy); 3508 if ((len -= copy) == 0) 3509 return csum; 3510 offset += copy; 3511 pos += copy; 3512 } 3513 start = end; 3514 } 3515 BUG_ON(len); 3516 3517 return csum; 3518 } 3519 EXPORT_SYMBOL(__skb_checksum); 3520 3521 __wsum skb_checksum(const struct sk_buff *skb, int offset, 3522 int len, __wsum csum) 3523 { 3524 const struct skb_checksum_ops ops = { 3525 .update = csum_partial_ext, 3526 .combine = csum_block_add_ext, 3527 }; 3528 3529 return __skb_checksum(skb, offset, len, csum, &ops); 3530 } 3531 EXPORT_SYMBOL(skb_checksum); 3532 3533 /* Both of above in one bottle. */ 3534 3535 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 3536 u8 *to, int len) 3537 { 3538 int start = skb_headlen(skb); 3539 int i, copy = start - offset; 3540 struct sk_buff *frag_iter; 3541 int pos = 0; 3542 __wsum csum = 0; 3543 3544 /* Copy header. */ 3545 if (copy > 0) { 3546 if (copy > len) 3547 copy = len; 3548 csum = csum_partial_copy_nocheck(skb->data + offset, to, 3549 copy); 3550 if ((len -= copy) == 0) 3551 return csum; 3552 offset += copy; 3553 to += copy; 3554 pos = copy; 3555 } 3556 3557 if (!skb_frags_readable(skb)) 3558 return 0; 3559 3560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3561 int end; 3562 3563 WARN_ON(start > offset + len); 3564 3565 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3566 if ((copy = end - offset) > 0) { 3567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3568 u32 p_off, p_len, copied; 3569 struct page *p; 3570 __wsum csum2; 3571 u8 *vaddr; 3572 3573 if (copy > len) 3574 copy = len; 3575 3576 skb_frag_foreach_page(frag, 3577 skb_frag_off(frag) + offset - start, 3578 copy, p, p_off, p_len, copied) { 3579 vaddr = kmap_atomic(p); 3580 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 3581 to + copied, 3582 p_len); 3583 kunmap_atomic(vaddr); 3584 csum = csum_block_add(csum, csum2, pos); 3585 pos += p_len; 3586 } 3587 3588 if (!(len -= copy)) 3589 return csum; 3590 offset += copy; 3591 to += copy; 3592 } 3593 start = end; 3594 } 3595 3596 skb_walk_frags(skb, frag_iter) { 3597 __wsum csum2; 3598 int end; 3599 3600 WARN_ON(start > offset + len); 3601 3602 end = start + frag_iter->len; 3603 if ((copy = end - offset) > 0) { 3604 if (copy > len) 3605 copy = len; 3606 csum2 = skb_copy_and_csum_bits(frag_iter, 3607 offset - start, 3608 to, copy); 3609 csum = csum_block_add(csum, csum2, pos); 3610 if ((len -= copy) == 0) 3611 return csum; 3612 offset += copy; 3613 to += copy; 3614 pos += copy; 3615 } 3616 start = end; 3617 } 3618 BUG_ON(len); 3619 return csum; 3620 } 3621 EXPORT_SYMBOL(skb_copy_and_csum_bits); 3622 3623 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 3624 { 3625 __sum16 sum; 3626 3627 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 3628 /* See comments in __skb_checksum_complete(). */ 3629 if (likely(!sum)) { 3630 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3631 !skb->csum_complete_sw) 3632 netdev_rx_csum_fault(skb->dev, skb); 3633 } 3634 if (!skb_shared(skb)) 3635 skb->csum_valid = !sum; 3636 return sum; 3637 } 3638 EXPORT_SYMBOL(__skb_checksum_complete_head); 3639 3640 /* This function assumes skb->csum already holds pseudo header's checksum, 3641 * which has been changed from the hardware checksum, for example, by 3642 * __skb_checksum_validate_complete(). And, the original skb->csum must 3643 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 3644 * 3645 * It returns non-zero if the recomputed checksum is still invalid, otherwise 3646 * zero. The new checksum is stored back into skb->csum unless the skb is 3647 * shared. 3648 */ 3649 __sum16 __skb_checksum_complete(struct sk_buff *skb) 3650 { 3651 __wsum csum; 3652 __sum16 sum; 3653 3654 csum = skb_checksum(skb, 0, skb->len, 0); 3655 3656 sum = csum_fold(csum_add(skb->csum, csum)); 3657 /* This check is inverted, because we already knew the hardware 3658 * checksum is invalid before calling this function. So, if the 3659 * re-computed checksum is valid instead, then we have a mismatch 3660 * between the original skb->csum and skb_checksum(). This means either 3661 * the original hardware checksum is incorrect or we screw up skb->csum 3662 * when moving skb->data around. 3663 */ 3664 if (likely(!sum)) { 3665 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 3666 !skb->csum_complete_sw) 3667 netdev_rx_csum_fault(skb->dev, skb); 3668 } 3669 3670 if (!skb_shared(skb)) { 3671 /* Save full packet checksum */ 3672 skb->csum = csum; 3673 skb->ip_summed = CHECKSUM_COMPLETE; 3674 skb->csum_complete_sw = 1; 3675 skb->csum_valid = !sum; 3676 } 3677 3678 return sum; 3679 } 3680 EXPORT_SYMBOL(__skb_checksum_complete); 3681 3682 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 3683 { 3684 net_warn_ratelimited( 3685 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3686 __func__); 3687 return 0; 3688 } 3689 3690 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 3691 int offset, int len) 3692 { 3693 net_warn_ratelimited( 3694 "%s: attempt to compute crc32c without libcrc32c.ko\n", 3695 __func__); 3696 return 0; 3697 } 3698 3699 static const struct skb_checksum_ops default_crc32c_ops = { 3700 .update = warn_crc32c_csum_update, 3701 .combine = warn_crc32c_csum_combine, 3702 }; 3703 3704 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 3705 &default_crc32c_ops; 3706 EXPORT_SYMBOL(crc32c_csum_stub); 3707 3708 /** 3709 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3710 * @from: source buffer 3711 * 3712 * Calculates the amount of linear headroom needed in the 'to' skb passed 3713 * into skb_zerocopy(). 3714 */ 3715 unsigned int 3716 skb_zerocopy_headlen(const struct sk_buff *from) 3717 { 3718 unsigned int hlen = 0; 3719 3720 if (!from->head_frag || 3721 skb_headlen(from) < L1_CACHE_BYTES || 3722 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3723 hlen = skb_headlen(from); 3724 if (!hlen) 3725 hlen = from->len; 3726 } 3727 3728 if (skb_has_frag_list(from)) 3729 hlen = from->len; 3730 3731 return hlen; 3732 } 3733 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3734 3735 /** 3736 * skb_zerocopy - Zero copy skb to skb 3737 * @to: destination buffer 3738 * @from: source buffer 3739 * @len: number of bytes to copy from source buffer 3740 * @hlen: size of linear headroom in destination buffer 3741 * 3742 * Copies up to `len` bytes from `from` to `to` by creating references 3743 * to the frags in the source buffer. 3744 * 3745 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3746 * headroom in the `to` buffer. 3747 * 3748 * Return value: 3749 * 0: everything is OK 3750 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 3751 * -EFAULT: skb_copy_bits() found some problem with skb geometry 3752 */ 3753 int 3754 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3755 { 3756 int i, j = 0; 3757 int plen = 0; /* length of skb->head fragment */ 3758 int ret; 3759 struct page *page; 3760 unsigned int offset; 3761 3762 BUG_ON(!from->head_frag && !hlen); 3763 3764 /* dont bother with small payloads */ 3765 if (len <= skb_tailroom(to)) 3766 return skb_copy_bits(from, 0, skb_put(to, len), len); 3767 3768 if (hlen) { 3769 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 3770 if (unlikely(ret)) 3771 return ret; 3772 len -= hlen; 3773 } else { 3774 plen = min_t(int, skb_headlen(from), len); 3775 if (plen) { 3776 page = virt_to_head_page(from->head); 3777 offset = from->data - (unsigned char *)page_address(page); 3778 __skb_fill_netmem_desc(to, 0, page_to_netmem(page), 3779 offset, plen); 3780 get_page(page); 3781 j = 1; 3782 len -= plen; 3783 } 3784 } 3785 3786 skb_len_add(to, len + plen); 3787 3788 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 3789 skb_tx_error(from); 3790 return -ENOMEM; 3791 } 3792 skb_zerocopy_clone(to, from, GFP_ATOMIC); 3793 3794 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3795 int size; 3796 3797 if (!len) 3798 break; 3799 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3800 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3801 len); 3802 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3803 len -= size; 3804 skb_frag_ref(to, j); 3805 j++; 3806 } 3807 skb_shinfo(to)->nr_frags = j; 3808 3809 return 0; 3810 } 3811 EXPORT_SYMBOL_GPL(skb_zerocopy); 3812 3813 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3814 { 3815 __wsum csum; 3816 long csstart; 3817 3818 if (skb->ip_summed == CHECKSUM_PARTIAL) 3819 csstart = skb_checksum_start_offset(skb); 3820 else 3821 csstart = skb_headlen(skb); 3822 3823 BUG_ON(csstart > skb_headlen(skb)); 3824 3825 skb_copy_from_linear_data(skb, to, csstart); 3826 3827 csum = 0; 3828 if (csstart != skb->len) 3829 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3830 skb->len - csstart); 3831 3832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3833 long csstuff = csstart + skb->csum_offset; 3834 3835 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3836 } 3837 } 3838 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3839 3840 /** 3841 * skb_dequeue - remove from the head of the queue 3842 * @list: list to dequeue from 3843 * 3844 * Remove the head of the list. The list lock is taken so the function 3845 * may be used safely with other locking list functions. The head item is 3846 * returned or %NULL if the list is empty. 3847 */ 3848 3849 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3850 { 3851 unsigned long flags; 3852 struct sk_buff *result; 3853 3854 spin_lock_irqsave(&list->lock, flags); 3855 result = __skb_dequeue(list); 3856 spin_unlock_irqrestore(&list->lock, flags); 3857 return result; 3858 } 3859 EXPORT_SYMBOL(skb_dequeue); 3860 3861 /** 3862 * skb_dequeue_tail - remove from the tail of the queue 3863 * @list: list to dequeue from 3864 * 3865 * Remove the tail of the list. The list lock is taken so the function 3866 * may be used safely with other locking list functions. The tail item is 3867 * returned or %NULL if the list is empty. 3868 */ 3869 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3870 { 3871 unsigned long flags; 3872 struct sk_buff *result; 3873 3874 spin_lock_irqsave(&list->lock, flags); 3875 result = __skb_dequeue_tail(list); 3876 spin_unlock_irqrestore(&list->lock, flags); 3877 return result; 3878 } 3879 EXPORT_SYMBOL(skb_dequeue_tail); 3880 3881 /** 3882 * skb_queue_purge_reason - empty a list 3883 * @list: list to empty 3884 * @reason: drop reason 3885 * 3886 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3887 * the list and one reference dropped. This function takes the list 3888 * lock and is atomic with respect to other list locking functions. 3889 */ 3890 void skb_queue_purge_reason(struct sk_buff_head *list, 3891 enum skb_drop_reason reason) 3892 { 3893 struct sk_buff_head tmp; 3894 unsigned long flags; 3895 3896 if (skb_queue_empty_lockless(list)) 3897 return; 3898 3899 __skb_queue_head_init(&tmp); 3900 3901 spin_lock_irqsave(&list->lock, flags); 3902 skb_queue_splice_init(list, &tmp); 3903 spin_unlock_irqrestore(&list->lock, flags); 3904 3905 __skb_queue_purge_reason(&tmp, reason); 3906 } 3907 EXPORT_SYMBOL(skb_queue_purge_reason); 3908 3909 /** 3910 * skb_rbtree_purge - empty a skb rbtree 3911 * @root: root of the rbtree to empty 3912 * Return value: the sum of truesizes of all purged skbs. 3913 * 3914 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3915 * the list and one reference dropped. This function does not take 3916 * any lock. Synchronization should be handled by the caller (e.g., TCP 3917 * out-of-order queue is protected by the socket lock). 3918 */ 3919 unsigned int skb_rbtree_purge(struct rb_root *root) 3920 { 3921 struct rb_node *p = rb_first(root); 3922 unsigned int sum = 0; 3923 3924 while (p) { 3925 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3926 3927 p = rb_next(p); 3928 rb_erase(&skb->rbnode, root); 3929 sum += skb->truesize; 3930 kfree_skb(skb); 3931 } 3932 return sum; 3933 } 3934 3935 void skb_errqueue_purge(struct sk_buff_head *list) 3936 { 3937 struct sk_buff *skb, *next; 3938 struct sk_buff_head kill; 3939 unsigned long flags; 3940 3941 __skb_queue_head_init(&kill); 3942 3943 spin_lock_irqsave(&list->lock, flags); 3944 skb_queue_walk_safe(list, skb, next) { 3945 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || 3946 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) 3947 continue; 3948 __skb_unlink(skb, list); 3949 __skb_queue_tail(&kill, skb); 3950 } 3951 spin_unlock_irqrestore(&list->lock, flags); 3952 __skb_queue_purge(&kill); 3953 } 3954 EXPORT_SYMBOL(skb_errqueue_purge); 3955 3956 /** 3957 * skb_queue_head - queue a buffer at the list head 3958 * @list: list to use 3959 * @newsk: buffer to queue 3960 * 3961 * Queue a buffer at the start of the list. This function takes the 3962 * list lock and can be used safely with other locking &sk_buff functions 3963 * safely. 3964 * 3965 * A buffer cannot be placed on two lists at the same time. 3966 */ 3967 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3968 { 3969 unsigned long flags; 3970 3971 spin_lock_irqsave(&list->lock, flags); 3972 __skb_queue_head(list, newsk); 3973 spin_unlock_irqrestore(&list->lock, flags); 3974 } 3975 EXPORT_SYMBOL(skb_queue_head); 3976 3977 /** 3978 * skb_queue_tail - queue a buffer at the list tail 3979 * @list: list to use 3980 * @newsk: buffer to queue 3981 * 3982 * Queue a buffer at the tail of the list. This function takes the 3983 * list lock and can be used safely with other locking &sk_buff functions 3984 * safely. 3985 * 3986 * A buffer cannot be placed on two lists at the same time. 3987 */ 3988 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3989 { 3990 unsigned long flags; 3991 3992 spin_lock_irqsave(&list->lock, flags); 3993 __skb_queue_tail(list, newsk); 3994 spin_unlock_irqrestore(&list->lock, flags); 3995 } 3996 EXPORT_SYMBOL(skb_queue_tail); 3997 3998 /** 3999 * skb_unlink - remove a buffer from a list 4000 * @skb: buffer to remove 4001 * @list: list to use 4002 * 4003 * Remove a packet from a list. The list locks are taken and this 4004 * function is atomic with respect to other list locked calls 4005 * 4006 * You must know what list the SKB is on. 4007 */ 4008 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 4009 { 4010 unsigned long flags; 4011 4012 spin_lock_irqsave(&list->lock, flags); 4013 __skb_unlink(skb, list); 4014 spin_unlock_irqrestore(&list->lock, flags); 4015 } 4016 EXPORT_SYMBOL(skb_unlink); 4017 4018 /** 4019 * skb_append - append a buffer 4020 * @old: buffer to insert after 4021 * @newsk: buffer to insert 4022 * @list: list to use 4023 * 4024 * Place a packet after a given packet in a list. The list locks are taken 4025 * and this function is atomic with respect to other list locked calls. 4026 * A buffer cannot be placed on two lists at the same time. 4027 */ 4028 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 4029 { 4030 unsigned long flags; 4031 4032 spin_lock_irqsave(&list->lock, flags); 4033 __skb_queue_after(list, old, newsk); 4034 spin_unlock_irqrestore(&list->lock, flags); 4035 } 4036 EXPORT_SYMBOL(skb_append); 4037 4038 static inline void skb_split_inside_header(struct sk_buff *skb, 4039 struct sk_buff* skb1, 4040 const u32 len, const int pos) 4041 { 4042 int i; 4043 4044 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 4045 pos - len); 4046 /* And move data appendix as is. */ 4047 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4048 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 4049 4050 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 4051 skb1->unreadable = skb->unreadable; 4052 skb_shinfo(skb)->nr_frags = 0; 4053 skb1->data_len = skb->data_len; 4054 skb1->len += skb1->data_len; 4055 skb->data_len = 0; 4056 skb->len = len; 4057 skb_set_tail_pointer(skb, len); 4058 } 4059 4060 static inline void skb_split_no_header(struct sk_buff *skb, 4061 struct sk_buff* skb1, 4062 const u32 len, int pos) 4063 { 4064 int i, k = 0; 4065 const int nfrags = skb_shinfo(skb)->nr_frags; 4066 4067 skb_shinfo(skb)->nr_frags = 0; 4068 skb1->len = skb1->data_len = skb->len - len; 4069 skb->len = len; 4070 skb->data_len = len - pos; 4071 4072 for (i = 0; i < nfrags; i++) { 4073 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4074 4075 if (pos + size > len) { 4076 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 4077 4078 if (pos < len) { 4079 /* Split frag. 4080 * We have two variants in this case: 4081 * 1. Move all the frag to the second 4082 * part, if it is possible. F.e. 4083 * this approach is mandatory for TUX, 4084 * where splitting is expensive. 4085 * 2. Split is accurately. We make this. 4086 */ 4087 skb_frag_ref(skb, i); 4088 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 4089 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 4090 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 4091 skb_shinfo(skb)->nr_frags++; 4092 } 4093 k++; 4094 } else 4095 skb_shinfo(skb)->nr_frags++; 4096 pos += size; 4097 } 4098 skb_shinfo(skb1)->nr_frags = k; 4099 4100 skb1->unreadable = skb->unreadable; 4101 } 4102 4103 /** 4104 * skb_split - Split fragmented skb to two parts at length len. 4105 * @skb: the buffer to split 4106 * @skb1: the buffer to receive the second part 4107 * @len: new length for skb 4108 */ 4109 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 4110 { 4111 int pos = skb_headlen(skb); 4112 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; 4113 4114 skb_zcopy_downgrade_managed(skb); 4115 4116 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; 4117 skb_zerocopy_clone(skb1, skb, 0); 4118 if (len < pos) /* Split line is inside header. */ 4119 skb_split_inside_header(skb, skb1, len, pos); 4120 else /* Second chunk has no header, nothing to copy. */ 4121 skb_split_no_header(skb, skb1, len, pos); 4122 } 4123 EXPORT_SYMBOL(skb_split); 4124 4125 /* Shifting from/to a cloned skb is a no-go. 4126 * 4127 * Caller cannot keep skb_shinfo related pointers past calling here! 4128 */ 4129 static int skb_prepare_for_shift(struct sk_buff *skb) 4130 { 4131 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); 4132 } 4133 4134 /** 4135 * skb_shift - Shifts paged data partially from skb to another 4136 * @tgt: buffer into which tail data gets added 4137 * @skb: buffer from which the paged data comes from 4138 * @shiftlen: shift up to this many bytes 4139 * 4140 * Attempts to shift up to shiftlen worth of bytes, which may be less than 4141 * the length of the skb, from skb to tgt. Returns number bytes shifted. 4142 * It's up to caller to free skb if everything was shifted. 4143 * 4144 * If @tgt runs out of frags, the whole operation is aborted. 4145 * 4146 * Skb cannot include anything else but paged data while tgt is allowed 4147 * to have non-paged data as well. 4148 * 4149 * TODO: full sized shift could be optimized but that would need 4150 * specialized skb free'er to handle frags without up-to-date nr_frags. 4151 */ 4152 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 4153 { 4154 int from, to, merge, todo; 4155 skb_frag_t *fragfrom, *fragto; 4156 4157 BUG_ON(shiftlen > skb->len); 4158 4159 if (skb_headlen(skb)) 4160 return 0; 4161 if (skb_zcopy(tgt) || skb_zcopy(skb)) 4162 return 0; 4163 4164 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); 4165 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); 4166 4167 todo = shiftlen; 4168 from = 0; 4169 to = skb_shinfo(tgt)->nr_frags; 4170 fragfrom = &skb_shinfo(skb)->frags[from]; 4171 4172 /* Actual merge is delayed until the point when we know we can 4173 * commit all, so that we don't have to undo partial changes 4174 */ 4175 if (!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 4176 skb_frag_off(fragfrom))) { 4177 merge = -1; 4178 } else { 4179 merge = to - 1; 4180 4181 todo -= skb_frag_size(fragfrom); 4182 if (todo < 0) { 4183 if (skb_prepare_for_shift(skb) || 4184 skb_prepare_for_shift(tgt)) 4185 return 0; 4186 4187 /* All previous frag pointers might be stale! */ 4188 fragfrom = &skb_shinfo(skb)->frags[from]; 4189 fragto = &skb_shinfo(tgt)->frags[merge]; 4190 4191 skb_frag_size_add(fragto, shiftlen); 4192 skb_frag_size_sub(fragfrom, shiftlen); 4193 skb_frag_off_add(fragfrom, shiftlen); 4194 4195 goto onlymerged; 4196 } 4197 4198 from++; 4199 } 4200 4201 /* Skip full, not-fitting skb to avoid expensive operations */ 4202 if ((shiftlen == skb->len) && 4203 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 4204 return 0; 4205 4206 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 4207 return 0; 4208 4209 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 4210 if (to == MAX_SKB_FRAGS) 4211 return 0; 4212 4213 fragfrom = &skb_shinfo(skb)->frags[from]; 4214 fragto = &skb_shinfo(tgt)->frags[to]; 4215 4216 if (todo >= skb_frag_size(fragfrom)) { 4217 *fragto = *fragfrom; 4218 todo -= skb_frag_size(fragfrom); 4219 from++; 4220 to++; 4221 4222 } else { 4223 __skb_frag_ref(fragfrom); 4224 skb_frag_page_copy(fragto, fragfrom); 4225 skb_frag_off_copy(fragto, fragfrom); 4226 skb_frag_size_set(fragto, todo); 4227 4228 skb_frag_off_add(fragfrom, todo); 4229 skb_frag_size_sub(fragfrom, todo); 4230 todo = 0; 4231 4232 to++; 4233 break; 4234 } 4235 } 4236 4237 /* Ready to "commit" this state change to tgt */ 4238 skb_shinfo(tgt)->nr_frags = to; 4239 4240 if (merge >= 0) { 4241 fragfrom = &skb_shinfo(skb)->frags[0]; 4242 fragto = &skb_shinfo(tgt)->frags[merge]; 4243 4244 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 4245 __skb_frag_unref(fragfrom, skb->pp_recycle); 4246 } 4247 4248 /* Reposition in the original skb */ 4249 to = 0; 4250 while (from < skb_shinfo(skb)->nr_frags) 4251 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 4252 skb_shinfo(skb)->nr_frags = to; 4253 4254 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 4255 4256 onlymerged: 4257 /* Most likely the tgt won't ever need its checksum anymore, skb on 4258 * the other hand might need it if it needs to be resent 4259 */ 4260 tgt->ip_summed = CHECKSUM_PARTIAL; 4261 skb->ip_summed = CHECKSUM_PARTIAL; 4262 4263 skb_len_add(skb, -shiftlen); 4264 skb_len_add(tgt, shiftlen); 4265 4266 return shiftlen; 4267 } 4268 4269 /** 4270 * skb_prepare_seq_read - Prepare a sequential read of skb data 4271 * @skb: the buffer to read 4272 * @from: lower offset of data to be read 4273 * @to: upper offset of data to be read 4274 * @st: state variable 4275 * 4276 * Initializes the specified state variable. Must be called before 4277 * invoking skb_seq_read() for the first time. 4278 */ 4279 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 4280 unsigned int to, struct skb_seq_state *st) 4281 { 4282 st->lower_offset = from; 4283 st->upper_offset = to; 4284 st->root_skb = st->cur_skb = skb; 4285 st->frag_idx = st->stepped_offset = 0; 4286 st->frag_data = NULL; 4287 st->frag_off = 0; 4288 } 4289 EXPORT_SYMBOL(skb_prepare_seq_read); 4290 4291 /** 4292 * skb_seq_read - Sequentially read skb data 4293 * @consumed: number of bytes consumed by the caller so far 4294 * @data: destination pointer for data to be returned 4295 * @st: state variable 4296 * 4297 * Reads a block of skb data at @consumed relative to the 4298 * lower offset specified to skb_prepare_seq_read(). Assigns 4299 * the head of the data block to @data and returns the length 4300 * of the block or 0 if the end of the skb data or the upper 4301 * offset has been reached. 4302 * 4303 * The caller is not required to consume all of the data 4304 * returned, i.e. @consumed is typically set to the number 4305 * of bytes already consumed and the next call to 4306 * skb_seq_read() will return the remaining part of the block. 4307 * 4308 * Note 1: The size of each block of data returned can be arbitrary, 4309 * this limitation is the cost for zerocopy sequential 4310 * reads of potentially non linear data. 4311 * 4312 * Note 2: Fragment lists within fragments are not implemented 4313 * at the moment, state->root_skb could be replaced with 4314 * a stack for this purpose. 4315 */ 4316 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 4317 struct skb_seq_state *st) 4318 { 4319 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 4320 skb_frag_t *frag; 4321 4322 if (unlikely(abs_offset >= st->upper_offset)) { 4323 if (st->frag_data) { 4324 kunmap_atomic(st->frag_data); 4325 st->frag_data = NULL; 4326 } 4327 return 0; 4328 } 4329 4330 next_skb: 4331 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 4332 4333 if (abs_offset < block_limit && !st->frag_data) { 4334 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 4335 return block_limit - abs_offset; 4336 } 4337 4338 if (!skb_frags_readable(st->cur_skb)) 4339 return 0; 4340 4341 if (st->frag_idx == 0 && !st->frag_data) 4342 st->stepped_offset += skb_headlen(st->cur_skb); 4343 4344 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 4345 unsigned int pg_idx, pg_off, pg_sz; 4346 4347 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 4348 4349 pg_idx = 0; 4350 pg_off = skb_frag_off(frag); 4351 pg_sz = skb_frag_size(frag); 4352 4353 if (skb_frag_must_loop(skb_frag_page(frag))) { 4354 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 4355 pg_off = offset_in_page(pg_off + st->frag_off); 4356 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 4357 PAGE_SIZE - pg_off); 4358 } 4359 4360 block_limit = pg_sz + st->stepped_offset; 4361 if (abs_offset < block_limit) { 4362 if (!st->frag_data) 4363 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 4364 4365 *data = (u8 *)st->frag_data + pg_off + 4366 (abs_offset - st->stepped_offset); 4367 4368 return block_limit - abs_offset; 4369 } 4370 4371 if (st->frag_data) { 4372 kunmap_atomic(st->frag_data); 4373 st->frag_data = NULL; 4374 } 4375 4376 st->stepped_offset += pg_sz; 4377 st->frag_off += pg_sz; 4378 if (st->frag_off == skb_frag_size(frag)) { 4379 st->frag_off = 0; 4380 st->frag_idx++; 4381 } 4382 } 4383 4384 if (st->frag_data) { 4385 kunmap_atomic(st->frag_data); 4386 st->frag_data = NULL; 4387 } 4388 4389 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 4390 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 4391 st->frag_idx = 0; 4392 goto next_skb; 4393 } else if (st->cur_skb->next) { 4394 st->cur_skb = st->cur_skb->next; 4395 st->frag_idx = 0; 4396 goto next_skb; 4397 } 4398 4399 return 0; 4400 } 4401 EXPORT_SYMBOL(skb_seq_read); 4402 4403 /** 4404 * skb_abort_seq_read - Abort a sequential read of skb data 4405 * @st: state variable 4406 * 4407 * Must be called if skb_seq_read() was not called until it 4408 * returned 0. 4409 */ 4410 void skb_abort_seq_read(struct skb_seq_state *st) 4411 { 4412 if (st->frag_data) 4413 kunmap_atomic(st->frag_data); 4414 } 4415 EXPORT_SYMBOL(skb_abort_seq_read); 4416 4417 /** 4418 * skb_copy_seq_read() - copy from a skb_seq_state to a buffer 4419 * @st: source skb_seq_state 4420 * @offset: offset in source 4421 * @to: destination buffer 4422 * @len: number of bytes to copy 4423 * 4424 * Copy @len bytes from @offset bytes into the source @st to the destination 4425 * buffer @to. `offset` should increase (or be unchanged) with each subsequent 4426 * call to this function. If offset needs to decrease from the previous use `st` 4427 * should be reset first. 4428 * 4429 * Return: 0 on success or -EINVAL if the copy ended early 4430 */ 4431 int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len) 4432 { 4433 const u8 *data; 4434 u32 sqlen; 4435 4436 for (;;) { 4437 sqlen = skb_seq_read(offset, &data, st); 4438 if (sqlen == 0) 4439 return -EINVAL; 4440 if (sqlen >= len) { 4441 memcpy(to, data, len); 4442 return 0; 4443 } 4444 memcpy(to, data, sqlen); 4445 to += sqlen; 4446 offset += sqlen; 4447 len -= sqlen; 4448 } 4449 } 4450 EXPORT_SYMBOL(skb_copy_seq_read); 4451 4452 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 4453 4454 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 4455 struct ts_config *conf, 4456 struct ts_state *state) 4457 { 4458 return skb_seq_read(offset, text, TS_SKB_CB(state)); 4459 } 4460 4461 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 4462 { 4463 skb_abort_seq_read(TS_SKB_CB(state)); 4464 } 4465 4466 /** 4467 * skb_find_text - Find a text pattern in skb data 4468 * @skb: the buffer to look in 4469 * @from: search offset 4470 * @to: search limit 4471 * @config: textsearch configuration 4472 * 4473 * Finds a pattern in the skb data according to the specified 4474 * textsearch configuration. Use textsearch_next() to retrieve 4475 * subsequent occurrences of the pattern. Returns the offset 4476 * to the first occurrence or UINT_MAX if no match was found. 4477 */ 4478 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 4479 unsigned int to, struct ts_config *config) 4480 { 4481 unsigned int patlen = config->ops->get_pattern_len(config); 4482 struct ts_state state; 4483 unsigned int ret; 4484 4485 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 4486 4487 config->get_next_block = skb_ts_get_next_block; 4488 config->finish = skb_ts_finish; 4489 4490 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 4491 4492 ret = textsearch_find(config, &state); 4493 return (ret + patlen <= to - from ? ret : UINT_MAX); 4494 } 4495 EXPORT_SYMBOL(skb_find_text); 4496 4497 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 4498 int offset, size_t size, size_t max_frags) 4499 { 4500 int i = skb_shinfo(skb)->nr_frags; 4501 4502 if (skb_can_coalesce(skb, i, page, offset)) { 4503 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 4504 } else if (i < max_frags) { 4505 skb_zcopy_downgrade_managed(skb); 4506 get_page(page); 4507 skb_fill_page_desc_noacc(skb, i, page, offset, size); 4508 } else { 4509 return -EMSGSIZE; 4510 } 4511 4512 return 0; 4513 } 4514 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 4515 4516 /** 4517 * skb_pull_rcsum - pull skb and update receive checksum 4518 * @skb: buffer to update 4519 * @len: length of data pulled 4520 * 4521 * This function performs an skb_pull on the packet and updates 4522 * the CHECKSUM_COMPLETE checksum. It should be used on 4523 * receive path processing instead of skb_pull unless you know 4524 * that the checksum difference is zero (e.g., a valid IP header) 4525 * or you are setting ip_summed to CHECKSUM_NONE. 4526 */ 4527 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 4528 { 4529 unsigned char *data = skb->data; 4530 4531 BUG_ON(len > skb->len); 4532 __skb_pull(skb, len); 4533 skb_postpull_rcsum(skb, data, len); 4534 return skb->data; 4535 } 4536 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 4537 4538 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 4539 { 4540 skb_frag_t head_frag; 4541 struct page *page; 4542 4543 page = virt_to_head_page(frag_skb->head); 4544 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - 4545 (unsigned char *)page_address(page), 4546 skb_headlen(frag_skb)); 4547 return head_frag; 4548 } 4549 4550 struct sk_buff *skb_segment_list(struct sk_buff *skb, 4551 netdev_features_t features, 4552 unsigned int offset) 4553 { 4554 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 4555 unsigned int tnl_hlen = skb_tnl_header_len(skb); 4556 unsigned int delta_truesize = 0; 4557 unsigned int delta_len = 0; 4558 struct sk_buff *tail = NULL; 4559 struct sk_buff *nskb, *tmp; 4560 int len_diff, err; 4561 4562 skb_push(skb, -skb_network_offset(skb) + offset); 4563 4564 /* Ensure the head is writeable before touching the shared info */ 4565 err = skb_unclone(skb, GFP_ATOMIC); 4566 if (err) 4567 goto err_linearize; 4568 4569 skb_shinfo(skb)->frag_list = NULL; 4570 4571 while (list_skb) { 4572 nskb = list_skb; 4573 list_skb = list_skb->next; 4574 4575 err = 0; 4576 delta_truesize += nskb->truesize; 4577 if (skb_shared(nskb)) { 4578 tmp = skb_clone(nskb, GFP_ATOMIC); 4579 if (tmp) { 4580 consume_skb(nskb); 4581 nskb = tmp; 4582 err = skb_unclone(nskb, GFP_ATOMIC); 4583 } else { 4584 err = -ENOMEM; 4585 } 4586 } 4587 4588 if (!tail) 4589 skb->next = nskb; 4590 else 4591 tail->next = nskb; 4592 4593 if (unlikely(err)) { 4594 nskb->next = list_skb; 4595 goto err_linearize; 4596 } 4597 4598 tail = nskb; 4599 4600 delta_len += nskb->len; 4601 4602 skb_push(nskb, -skb_network_offset(nskb) + offset); 4603 4604 skb_release_head_state(nskb); 4605 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); 4606 __copy_skb_header(nskb, skb); 4607 4608 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 4609 nskb->transport_header += len_diff; 4610 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 4611 nskb->data - tnl_hlen, 4612 offset + tnl_hlen); 4613 4614 if (skb_needs_linearize(nskb, features) && 4615 __skb_linearize(nskb)) 4616 goto err_linearize; 4617 } 4618 4619 skb->truesize = skb->truesize - delta_truesize; 4620 skb->data_len = skb->data_len - delta_len; 4621 skb->len = skb->len - delta_len; 4622 4623 skb_gso_reset(skb); 4624 4625 skb->prev = tail; 4626 4627 if (skb_needs_linearize(skb, features) && 4628 __skb_linearize(skb)) 4629 goto err_linearize; 4630 4631 skb_get(skb); 4632 4633 return skb; 4634 4635 err_linearize: 4636 kfree_skb_list(skb->next); 4637 skb->next = NULL; 4638 return ERR_PTR(-ENOMEM); 4639 } 4640 EXPORT_SYMBOL_GPL(skb_segment_list); 4641 4642 /** 4643 * skb_segment - Perform protocol segmentation on skb. 4644 * @head_skb: buffer to segment 4645 * @features: features for the output path (see dev->features) 4646 * 4647 * This function performs segmentation on the given skb. It returns 4648 * a pointer to the first in a list of new skbs for the segments. 4649 * In case of error it returns ERR_PTR(err). 4650 */ 4651 struct sk_buff *skb_segment(struct sk_buff *head_skb, 4652 netdev_features_t features) 4653 { 4654 struct sk_buff *segs = NULL; 4655 struct sk_buff *tail = NULL; 4656 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 4657 unsigned int mss = skb_shinfo(head_skb)->gso_size; 4658 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 4659 unsigned int offset = doffset; 4660 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 4661 unsigned int partial_segs = 0; 4662 unsigned int headroom; 4663 unsigned int len = head_skb->len; 4664 struct sk_buff *frag_skb; 4665 skb_frag_t *frag; 4666 __be16 proto; 4667 bool csum, sg; 4668 int err = -ENOMEM; 4669 int i = 0; 4670 int nfrags, pos; 4671 4672 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && 4673 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { 4674 struct sk_buff *check_skb; 4675 4676 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { 4677 if (skb_headlen(check_skb) && !check_skb->head_frag) { 4678 /* gso_size is untrusted, and we have a frag_list with 4679 * a linear non head_frag item. 4680 * 4681 * If head_skb's headlen does not fit requested gso_size, 4682 * it means that the frag_list members do NOT terminate 4683 * on exact gso_size boundaries. Hence we cannot perform 4684 * skb_frag_t page sharing. Therefore we must fallback to 4685 * copying the frag_list skbs; we do so by disabling SG. 4686 */ 4687 features &= ~NETIF_F_SG; 4688 break; 4689 } 4690 } 4691 } 4692 4693 __skb_push(head_skb, doffset); 4694 proto = skb_network_protocol(head_skb, NULL); 4695 if (unlikely(!proto)) 4696 return ERR_PTR(-EINVAL); 4697 4698 sg = !!(features & NETIF_F_SG); 4699 csum = !!can_checksum_protocol(features, proto); 4700 4701 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4702 if (!(features & NETIF_F_GSO_PARTIAL)) { 4703 struct sk_buff *iter; 4704 unsigned int frag_len; 4705 4706 if (!list_skb || 4707 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 4708 goto normal; 4709 4710 /* If we get here then all the required 4711 * GSO features except frag_list are supported. 4712 * Try to split the SKB to multiple GSO SKBs 4713 * with no frag_list. 4714 * Currently we can do that only when the buffers don't 4715 * have a linear part and all the buffers except 4716 * the last are of the same length. 4717 */ 4718 frag_len = list_skb->len; 4719 skb_walk_frags(head_skb, iter) { 4720 if (frag_len != iter->len && iter->next) 4721 goto normal; 4722 if (skb_headlen(iter) && !iter->head_frag) 4723 goto normal; 4724 4725 len -= iter->len; 4726 } 4727 4728 if (len != frag_len) 4729 goto normal; 4730 } 4731 4732 /* GSO partial only requires that we trim off any excess that 4733 * doesn't fit into an MSS sized block, so take care of that 4734 * now. 4735 * Cap len to not accidentally hit GSO_BY_FRAGS. 4736 */ 4737 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; 4738 if (partial_segs > 1) 4739 mss *= partial_segs; 4740 else 4741 partial_segs = 0; 4742 } 4743 4744 normal: 4745 headroom = skb_headroom(head_skb); 4746 pos = skb_headlen(head_skb); 4747 4748 if (skb_orphan_frags(head_skb, GFP_ATOMIC)) 4749 return ERR_PTR(-ENOMEM); 4750 4751 nfrags = skb_shinfo(head_skb)->nr_frags; 4752 frag = skb_shinfo(head_skb)->frags; 4753 frag_skb = head_skb; 4754 4755 do { 4756 struct sk_buff *nskb; 4757 skb_frag_t *nskb_frag; 4758 int hsize; 4759 int size; 4760 4761 if (unlikely(mss == GSO_BY_FRAGS)) { 4762 len = list_skb->len; 4763 } else { 4764 len = head_skb->len - offset; 4765 if (len > mss) 4766 len = mss; 4767 } 4768 4769 hsize = skb_headlen(head_skb) - offset; 4770 4771 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 4772 (skb_headlen(list_skb) == len || sg)) { 4773 BUG_ON(skb_headlen(list_skb) > len); 4774 4775 nskb = skb_clone(list_skb, GFP_ATOMIC); 4776 if (unlikely(!nskb)) 4777 goto err; 4778 4779 i = 0; 4780 nfrags = skb_shinfo(list_skb)->nr_frags; 4781 frag = skb_shinfo(list_skb)->frags; 4782 frag_skb = list_skb; 4783 pos += skb_headlen(list_skb); 4784 4785 while (pos < offset + len) { 4786 BUG_ON(i >= nfrags); 4787 4788 size = skb_frag_size(frag); 4789 if (pos + size > offset + len) 4790 break; 4791 4792 i++; 4793 pos += size; 4794 frag++; 4795 } 4796 4797 list_skb = list_skb->next; 4798 4799 if (unlikely(pskb_trim(nskb, len))) { 4800 kfree_skb(nskb); 4801 goto err; 4802 } 4803 4804 hsize = skb_end_offset(nskb); 4805 if (skb_cow_head(nskb, doffset + headroom)) { 4806 kfree_skb(nskb); 4807 goto err; 4808 } 4809 4810 nskb->truesize += skb_end_offset(nskb) - hsize; 4811 skb_release_head_state(nskb); 4812 __skb_push(nskb, doffset); 4813 } else { 4814 if (hsize < 0) 4815 hsize = 0; 4816 if (hsize > len || !sg) 4817 hsize = len; 4818 4819 nskb = __alloc_skb(hsize + doffset + headroom, 4820 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4821 NUMA_NO_NODE); 4822 4823 if (unlikely(!nskb)) 4824 goto err; 4825 4826 skb_reserve(nskb, headroom); 4827 __skb_put(nskb, doffset); 4828 } 4829 4830 if (segs) 4831 tail->next = nskb; 4832 else 4833 segs = nskb; 4834 tail = nskb; 4835 4836 __copy_skb_header(nskb, head_skb); 4837 4838 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4839 skb_reset_mac_len(nskb); 4840 4841 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 4842 nskb->data - tnl_hlen, 4843 doffset + tnl_hlen); 4844 4845 if (nskb->len == len + doffset) 4846 goto perform_csum_check; 4847 4848 if (!sg) { 4849 if (!csum) { 4850 if (!nskb->remcsum_offload) 4851 nskb->ip_summed = CHECKSUM_NONE; 4852 SKB_GSO_CB(nskb)->csum = 4853 skb_copy_and_csum_bits(head_skb, offset, 4854 skb_put(nskb, 4855 len), 4856 len); 4857 SKB_GSO_CB(nskb)->csum_start = 4858 skb_headroom(nskb) + doffset; 4859 } else { 4860 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) 4861 goto err; 4862 } 4863 continue; 4864 } 4865 4866 nskb_frag = skb_shinfo(nskb)->frags; 4867 4868 skb_copy_from_linear_data_offset(head_skb, offset, 4869 skb_put(nskb, hsize), hsize); 4870 4871 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 4872 SKBFL_SHARED_FRAG; 4873 4874 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4875 goto err; 4876 4877 while (pos < offset + len) { 4878 if (i >= nfrags) { 4879 if (skb_orphan_frags(list_skb, GFP_ATOMIC) || 4880 skb_zerocopy_clone(nskb, list_skb, 4881 GFP_ATOMIC)) 4882 goto err; 4883 4884 i = 0; 4885 nfrags = skb_shinfo(list_skb)->nr_frags; 4886 frag = skb_shinfo(list_skb)->frags; 4887 frag_skb = list_skb; 4888 if (!skb_headlen(list_skb)) { 4889 BUG_ON(!nfrags); 4890 } else { 4891 BUG_ON(!list_skb->head_frag); 4892 4893 /* to make room for head_frag. */ 4894 i--; 4895 frag--; 4896 } 4897 4898 list_skb = list_skb->next; 4899 } 4900 4901 if (unlikely(skb_shinfo(nskb)->nr_frags >= 4902 MAX_SKB_FRAGS)) { 4903 net_warn_ratelimited( 4904 "skb_segment: too many frags: %u %u\n", 4905 pos, mss); 4906 err = -EINVAL; 4907 goto err; 4908 } 4909 4910 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 4911 __skb_frag_ref(nskb_frag); 4912 size = skb_frag_size(nskb_frag); 4913 4914 if (pos < offset) { 4915 skb_frag_off_add(nskb_frag, offset - pos); 4916 skb_frag_size_sub(nskb_frag, offset - pos); 4917 } 4918 4919 skb_shinfo(nskb)->nr_frags++; 4920 4921 if (pos + size <= offset + len) { 4922 i++; 4923 frag++; 4924 pos += size; 4925 } else { 4926 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4927 goto skip_fraglist; 4928 } 4929 4930 nskb_frag++; 4931 } 4932 4933 skip_fraglist: 4934 nskb->data_len = len - hsize; 4935 nskb->len += nskb->data_len; 4936 nskb->truesize += nskb->data_len; 4937 4938 perform_csum_check: 4939 if (!csum) { 4940 if (skb_has_shared_frag(nskb) && 4941 __skb_linearize(nskb)) 4942 goto err; 4943 4944 if (!nskb->remcsum_offload) 4945 nskb->ip_summed = CHECKSUM_NONE; 4946 SKB_GSO_CB(nskb)->csum = 4947 skb_checksum(nskb, doffset, 4948 nskb->len - doffset, 0); 4949 SKB_GSO_CB(nskb)->csum_start = 4950 skb_headroom(nskb) + doffset; 4951 } 4952 } while ((offset += len) < head_skb->len); 4953 4954 /* Some callers want to get the end of the list. 4955 * Put it in segs->prev to avoid walking the list. 4956 * (see validate_xmit_skb_list() for example) 4957 */ 4958 segs->prev = tail; 4959 4960 if (partial_segs) { 4961 struct sk_buff *iter; 4962 int type = skb_shinfo(head_skb)->gso_type; 4963 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4964 4965 /* Update type to add partial and then remove dodgy if set */ 4966 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4967 type &= ~SKB_GSO_DODGY; 4968 4969 /* Update GSO info and prepare to start updating headers on 4970 * our way back down the stack of protocols. 4971 */ 4972 for (iter = segs; iter; iter = iter->next) { 4973 skb_shinfo(iter)->gso_size = gso_size; 4974 skb_shinfo(iter)->gso_segs = partial_segs; 4975 skb_shinfo(iter)->gso_type = type; 4976 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4977 } 4978 4979 if (tail->len - doffset <= gso_size) 4980 skb_shinfo(tail)->gso_size = 0; 4981 else if (tail != segs) 4982 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4983 } 4984 4985 /* Following permits correct backpressure, for protocols 4986 * using skb_set_owner_w(). 4987 * Idea is to tranfert ownership from head_skb to last segment. 4988 */ 4989 if (head_skb->destructor == sock_wfree) { 4990 swap(tail->truesize, head_skb->truesize); 4991 swap(tail->destructor, head_skb->destructor); 4992 swap(tail->sk, head_skb->sk); 4993 } 4994 return segs; 4995 4996 err: 4997 kfree_skb_list(segs); 4998 return ERR_PTR(err); 4999 } 5000 EXPORT_SYMBOL_GPL(skb_segment); 5001 5002 #ifdef CONFIG_SKB_EXTENSIONS 5003 #define SKB_EXT_ALIGN_VALUE 8 5004 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 5005 5006 static const u8 skb_ext_type_len[] = { 5007 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 5008 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 5009 #endif 5010 #ifdef CONFIG_XFRM 5011 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 5012 #endif 5013 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 5014 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 5015 #endif 5016 #if IS_ENABLED(CONFIG_MPTCP) 5017 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 5018 #endif 5019 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 5020 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), 5021 #endif 5022 }; 5023 5024 static __always_inline unsigned int skb_ext_total_length(void) 5025 { 5026 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); 5027 int i; 5028 5029 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) 5030 l += skb_ext_type_len[i]; 5031 5032 return l; 5033 } 5034 5035 static void skb_extensions_init(void) 5036 { 5037 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 5038 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) 5039 BUILD_BUG_ON(skb_ext_total_length() > 255); 5040 #endif 5041 5042 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 5043 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 5044 0, 5045 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 5046 NULL); 5047 } 5048 #else 5049 static void skb_extensions_init(void) {} 5050 #endif 5051 5052 /* The SKB kmem_cache slab is critical for network performance. Never 5053 * merge/alias the slab with similar sized objects. This avoids fragmentation 5054 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. 5055 */ 5056 #ifndef CONFIG_SLUB_TINY 5057 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE 5058 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ 5059 #define FLAG_SKB_NO_MERGE 0 5060 #endif 5061 5062 void __init skb_init(void) 5063 { 5064 net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", 5065 sizeof(struct sk_buff), 5066 0, 5067 SLAB_HWCACHE_ALIGN|SLAB_PANIC| 5068 FLAG_SKB_NO_MERGE, 5069 offsetof(struct sk_buff, cb), 5070 sizeof_field(struct sk_buff, cb), 5071 NULL); 5072 net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 5073 sizeof(struct sk_buff_fclones), 5074 0, 5075 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 5076 NULL); 5077 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. 5078 * struct skb_shared_info is located at the end of skb->head, 5079 * and should not be copied to/from user. 5080 */ 5081 net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", 5082 SKB_SMALL_HEAD_CACHE_SIZE, 5083 0, 5084 SLAB_HWCACHE_ALIGN | SLAB_PANIC, 5085 0, 5086 SKB_SMALL_HEAD_HEADROOM, 5087 NULL); 5088 skb_extensions_init(); 5089 } 5090 5091 static int 5092 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 5093 unsigned int recursion_level) 5094 { 5095 int start = skb_headlen(skb); 5096 int i, copy = start - offset; 5097 struct sk_buff *frag_iter; 5098 int elt = 0; 5099 5100 if (unlikely(recursion_level >= 24)) 5101 return -EMSGSIZE; 5102 5103 if (copy > 0) { 5104 if (copy > len) 5105 copy = len; 5106 sg_set_buf(sg, skb->data + offset, copy); 5107 elt++; 5108 if ((len -= copy) == 0) 5109 return elt; 5110 offset += copy; 5111 } 5112 5113 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5114 int end; 5115 5116 WARN_ON(start > offset + len); 5117 5118 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 5119 if ((copy = end - offset) > 0) { 5120 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5121 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5122 return -EMSGSIZE; 5123 5124 if (copy > len) 5125 copy = len; 5126 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 5127 skb_frag_off(frag) + offset - start); 5128 elt++; 5129 if (!(len -= copy)) 5130 return elt; 5131 offset += copy; 5132 } 5133 start = end; 5134 } 5135 5136 skb_walk_frags(skb, frag_iter) { 5137 int end, ret; 5138 5139 WARN_ON(start > offset + len); 5140 5141 end = start + frag_iter->len; 5142 if ((copy = end - offset) > 0) { 5143 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5144 return -EMSGSIZE; 5145 5146 if (copy > len) 5147 copy = len; 5148 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 5149 copy, recursion_level + 1); 5150 if (unlikely(ret < 0)) 5151 return ret; 5152 elt += ret; 5153 if ((len -= copy) == 0) 5154 return elt; 5155 offset += copy; 5156 } 5157 start = end; 5158 } 5159 BUG_ON(len); 5160 return elt; 5161 } 5162 5163 /** 5164 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 5165 * @skb: Socket buffer containing the buffers to be mapped 5166 * @sg: The scatter-gather list to map into 5167 * @offset: The offset into the buffer's contents to start mapping 5168 * @len: Length of buffer space to be mapped 5169 * 5170 * Fill the specified scatter-gather list with mappings/pointers into a 5171 * region of the buffer space attached to a socket buffer. Returns either 5172 * the number of scatterlist items used, or -EMSGSIZE if the contents 5173 * could not fit. 5174 */ 5175 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 5176 { 5177 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 5178 5179 if (nsg <= 0) 5180 return nsg; 5181 5182 sg_mark_end(&sg[nsg - 1]); 5183 5184 return nsg; 5185 } 5186 EXPORT_SYMBOL_GPL(skb_to_sgvec); 5187 5188 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 5189 * sglist without mark the sg which contain last skb data as the end. 5190 * So the caller can mannipulate sg list as will when padding new data after 5191 * the first call without calling sg_unmark_end to expend sg list. 5192 * 5193 * Scenario to use skb_to_sgvec_nomark: 5194 * 1. sg_init_table 5195 * 2. skb_to_sgvec_nomark(payload1) 5196 * 3. skb_to_sgvec_nomark(payload2) 5197 * 5198 * This is equivalent to: 5199 * 1. sg_init_table 5200 * 2. skb_to_sgvec(payload1) 5201 * 3. sg_unmark_end 5202 * 4. skb_to_sgvec(payload2) 5203 * 5204 * When mapping multiple payload conditionally, skb_to_sgvec_nomark 5205 * is more preferable. 5206 */ 5207 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 5208 int offset, int len) 5209 { 5210 return __skb_to_sgvec(skb, sg, offset, len, 0); 5211 } 5212 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 5213 5214 5215 5216 /** 5217 * skb_cow_data - Check that a socket buffer's data buffers are writable 5218 * @skb: The socket buffer to check. 5219 * @tailbits: Amount of trailing space to be added 5220 * @trailer: Returned pointer to the skb where the @tailbits space begins 5221 * 5222 * Make sure that the data buffers attached to a socket buffer are 5223 * writable. If they are not, private copies are made of the data buffers 5224 * and the socket buffer is set to use these instead. 5225 * 5226 * If @tailbits is given, make sure that there is space to write @tailbits 5227 * bytes of data beyond current end of socket buffer. @trailer will be 5228 * set to point to the skb in which this space begins. 5229 * 5230 * The number of scatterlist elements required to completely map the 5231 * COW'd and extended socket buffer will be returned. 5232 */ 5233 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 5234 { 5235 int copyflag; 5236 int elt; 5237 struct sk_buff *skb1, **skb_p; 5238 5239 /* If skb is cloned or its head is paged, reallocate 5240 * head pulling out all the pages (pages are considered not writable 5241 * at the moment even if they are anonymous). 5242 */ 5243 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 5244 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 5245 return -ENOMEM; 5246 5247 /* Easy case. Most of packets will go this way. */ 5248 if (!skb_has_frag_list(skb)) { 5249 /* A little of trouble, not enough of space for trailer. 5250 * This should not happen, when stack is tuned to generate 5251 * good frames. OK, on miss we reallocate and reserve even more 5252 * space, 128 bytes is fair. */ 5253 5254 if (skb_tailroom(skb) < tailbits && 5255 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 5256 return -ENOMEM; 5257 5258 /* Voila! */ 5259 *trailer = skb; 5260 return 1; 5261 } 5262 5263 /* Misery. We are in troubles, going to mincer fragments... */ 5264 5265 elt = 1; 5266 skb_p = &skb_shinfo(skb)->frag_list; 5267 copyflag = 0; 5268 5269 while ((skb1 = *skb_p) != NULL) { 5270 int ntail = 0; 5271 5272 /* The fragment is partially pulled by someone, 5273 * this can happen on input. Copy it and everything 5274 * after it. */ 5275 5276 if (skb_shared(skb1)) 5277 copyflag = 1; 5278 5279 /* If the skb is the last, worry about trailer. */ 5280 5281 if (skb1->next == NULL && tailbits) { 5282 if (skb_shinfo(skb1)->nr_frags || 5283 skb_has_frag_list(skb1) || 5284 skb_tailroom(skb1) < tailbits) 5285 ntail = tailbits + 128; 5286 } 5287 5288 if (copyflag || 5289 skb_cloned(skb1) || 5290 ntail || 5291 skb_shinfo(skb1)->nr_frags || 5292 skb_has_frag_list(skb1)) { 5293 struct sk_buff *skb2; 5294 5295 /* Fuck, we are miserable poor guys... */ 5296 if (ntail == 0) 5297 skb2 = skb_copy(skb1, GFP_ATOMIC); 5298 else 5299 skb2 = skb_copy_expand(skb1, 5300 skb_headroom(skb1), 5301 ntail, 5302 GFP_ATOMIC); 5303 if (unlikely(skb2 == NULL)) 5304 return -ENOMEM; 5305 5306 if (skb1->sk) 5307 skb_set_owner_w(skb2, skb1->sk); 5308 5309 /* Looking around. Are we still alive? 5310 * OK, link new skb, drop old one */ 5311 5312 skb2->next = skb1->next; 5313 *skb_p = skb2; 5314 kfree_skb(skb1); 5315 skb1 = skb2; 5316 } 5317 elt++; 5318 *trailer = skb1; 5319 skb_p = &skb1->next; 5320 } 5321 5322 return elt; 5323 } 5324 EXPORT_SYMBOL_GPL(skb_cow_data); 5325 5326 static void sock_rmem_free(struct sk_buff *skb) 5327 { 5328 struct sock *sk = skb->sk; 5329 5330 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 5331 } 5332 5333 static void skb_set_err_queue(struct sk_buff *skb) 5334 { 5335 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 5336 * So, it is safe to (mis)use it to mark skbs on the error queue. 5337 */ 5338 skb->pkt_type = PACKET_OUTGOING; 5339 BUILD_BUG_ON(PACKET_OUTGOING == 0); 5340 } 5341 5342 /* 5343 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 5344 */ 5345 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 5346 { 5347 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 5348 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 5349 return -ENOMEM; 5350 5351 skb_orphan(skb); 5352 skb->sk = sk; 5353 skb->destructor = sock_rmem_free; 5354 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 5355 skb_set_err_queue(skb); 5356 5357 /* before exiting rcu section, make sure dst is refcounted */ 5358 skb_dst_force(skb); 5359 5360 skb_queue_tail(&sk->sk_error_queue, skb); 5361 if (!sock_flag(sk, SOCK_DEAD)) 5362 sk_error_report(sk); 5363 return 0; 5364 } 5365 EXPORT_SYMBOL(sock_queue_err_skb); 5366 5367 static bool is_icmp_err_skb(const struct sk_buff *skb) 5368 { 5369 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 5370 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 5371 } 5372 5373 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 5374 { 5375 struct sk_buff_head *q = &sk->sk_error_queue; 5376 struct sk_buff *skb, *skb_next = NULL; 5377 bool icmp_next = false; 5378 unsigned long flags; 5379 5380 if (skb_queue_empty_lockless(q)) 5381 return NULL; 5382 5383 spin_lock_irqsave(&q->lock, flags); 5384 skb = __skb_dequeue(q); 5385 if (skb && (skb_next = skb_peek(q))) { 5386 icmp_next = is_icmp_err_skb(skb_next); 5387 if (icmp_next) 5388 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 5389 } 5390 spin_unlock_irqrestore(&q->lock, flags); 5391 5392 if (is_icmp_err_skb(skb) && !icmp_next) 5393 sk->sk_err = 0; 5394 5395 if (skb_next) 5396 sk_error_report(sk); 5397 5398 return skb; 5399 } 5400 EXPORT_SYMBOL(sock_dequeue_err_skb); 5401 5402 /** 5403 * skb_clone_sk - create clone of skb, and take reference to socket 5404 * @skb: the skb to clone 5405 * 5406 * This function creates a clone of a buffer that holds a reference on 5407 * sk_refcnt. Buffers created via this function are meant to be 5408 * returned using sock_queue_err_skb, or free via kfree_skb. 5409 * 5410 * When passing buffers allocated with this function to sock_queue_err_skb 5411 * it is necessary to wrap the call with sock_hold/sock_put in order to 5412 * prevent the socket from being released prior to being enqueued on 5413 * the sk_error_queue. 5414 */ 5415 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 5416 { 5417 struct sock *sk = skb->sk; 5418 struct sk_buff *clone; 5419 5420 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 5421 return NULL; 5422 5423 clone = skb_clone(skb, GFP_ATOMIC); 5424 if (!clone) { 5425 sock_put(sk); 5426 return NULL; 5427 } 5428 5429 clone->sk = sk; 5430 clone->destructor = sock_efree; 5431 5432 return clone; 5433 } 5434 EXPORT_SYMBOL(skb_clone_sk); 5435 5436 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 5437 struct sock *sk, 5438 int tstype, 5439 bool opt_stats) 5440 { 5441 struct sock_exterr_skb *serr; 5442 int err; 5443 5444 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 5445 5446 serr = SKB_EXT_ERR(skb); 5447 memset(serr, 0, sizeof(*serr)); 5448 serr->ee.ee_errno = ENOMSG; 5449 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 5450 serr->ee.ee_info = tstype; 5451 serr->opt_stats = opt_stats; 5452 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 5453 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 5454 serr->ee.ee_data = skb_shinfo(skb)->tskey; 5455 if (sk_is_tcp(sk)) 5456 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 5457 } 5458 5459 err = sock_queue_err_skb(sk, skb); 5460 5461 if (err) 5462 kfree_skb(skb); 5463 } 5464 5465 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5466 { 5467 bool ret; 5468 5469 if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data))) 5470 return true; 5471 5472 read_lock_bh(&sk->sk_callback_lock); 5473 ret = sk->sk_socket && sk->sk_socket->file && 5474 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5475 read_unlock_bh(&sk->sk_callback_lock); 5476 return ret; 5477 } 5478 5479 void skb_complete_tx_timestamp(struct sk_buff *skb, 5480 struct skb_shared_hwtstamps *hwtstamps) 5481 { 5482 struct sock *sk = skb->sk; 5483 5484 if (!skb_may_tx_timestamp(sk, false)) 5485 goto err; 5486 5487 /* Take a reference to prevent skb_orphan() from freeing the socket, 5488 * but only if the socket refcount is not zero. 5489 */ 5490 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5491 *skb_hwtstamps(skb) = *hwtstamps; 5492 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 5493 sock_put(sk); 5494 return; 5495 } 5496 5497 err: 5498 kfree_skb(skb); 5499 } 5500 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 5501 5502 static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb, 5503 struct skb_shared_hwtstamps *hwtstamps, 5504 int tstype) 5505 { 5506 switch (tstype) { 5507 case SCM_TSTAMP_SCHED: 5508 return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP; 5509 case SCM_TSTAMP_SND: 5510 return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF : 5511 SKBTX_SW_TSTAMP); 5512 case SCM_TSTAMP_ACK: 5513 return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK; 5514 case SCM_TSTAMP_COMPLETION: 5515 return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP; 5516 } 5517 5518 return false; 5519 } 5520 5521 static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb, 5522 struct skb_shared_hwtstamps *hwtstamps, 5523 struct sock *sk, 5524 int tstype) 5525 { 5526 int op; 5527 5528 switch (tstype) { 5529 case SCM_TSTAMP_SCHED: 5530 op = BPF_SOCK_OPS_TSTAMP_SCHED_CB; 5531 break; 5532 case SCM_TSTAMP_SND: 5533 if (hwtstamps) { 5534 op = BPF_SOCK_OPS_TSTAMP_SND_HW_CB; 5535 *skb_hwtstamps(skb) = *hwtstamps; 5536 } else { 5537 op = BPF_SOCK_OPS_TSTAMP_SND_SW_CB; 5538 } 5539 break; 5540 case SCM_TSTAMP_ACK: 5541 op = BPF_SOCK_OPS_TSTAMP_ACK_CB; 5542 break; 5543 default: 5544 return; 5545 } 5546 5547 bpf_skops_tx_timestamping(sk, skb, op); 5548 } 5549 5550 void __skb_tstamp_tx(struct sk_buff *orig_skb, 5551 const struct sk_buff *ack_skb, 5552 struct skb_shared_hwtstamps *hwtstamps, 5553 struct sock *sk, int tstype) 5554 { 5555 struct sk_buff *skb; 5556 bool tsonly, opt_stats = false; 5557 u32 tsflags; 5558 5559 if (!sk) 5560 return; 5561 5562 if (skb_shinfo(orig_skb)->tx_flags & SKBTX_BPF) 5563 skb_tstamp_tx_report_bpf_timestamping(orig_skb, hwtstamps, 5564 sk, tstype); 5565 5566 if (!skb_tstamp_tx_report_so_timestamping(orig_skb, hwtstamps, tstype)) 5567 return; 5568 5569 tsflags = READ_ONCE(sk->sk_tsflags); 5570 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 5571 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 5572 return; 5573 5574 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 5575 if (!skb_may_tx_timestamp(sk, tsonly)) 5576 return; 5577 5578 if (tsonly) { 5579 #ifdef CONFIG_INET 5580 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && 5581 sk_is_tcp(sk)) { 5582 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 5583 ack_skb); 5584 opt_stats = true; 5585 } else 5586 #endif 5587 skb = alloc_skb(0, GFP_ATOMIC); 5588 } else { 5589 skb = skb_clone(orig_skb, GFP_ATOMIC); 5590 5591 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { 5592 kfree_skb(skb); 5593 return; 5594 } 5595 } 5596 if (!skb) 5597 return; 5598 5599 if (tsonly) { 5600 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 5601 SKBTX_ANY_TSTAMP; 5602 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 5603 } 5604 5605 if (hwtstamps) 5606 *skb_hwtstamps(skb) = *hwtstamps; 5607 else 5608 __net_timestamp(skb); 5609 5610 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 5611 } 5612 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 5613 5614 void skb_tstamp_tx(struct sk_buff *orig_skb, 5615 struct skb_shared_hwtstamps *hwtstamps) 5616 { 5617 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 5618 SCM_TSTAMP_SND); 5619 } 5620 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 5621 5622 #ifdef CONFIG_WIRELESS 5623 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 5624 { 5625 struct sock *sk = skb->sk; 5626 struct sock_exterr_skb *serr; 5627 int err = 1; 5628 5629 skb->wifi_acked_valid = 1; 5630 skb->wifi_acked = acked; 5631 5632 serr = SKB_EXT_ERR(skb); 5633 memset(serr, 0, sizeof(*serr)); 5634 serr->ee.ee_errno = ENOMSG; 5635 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 5636 5637 /* Take a reference to prevent skb_orphan() from freeing the socket, 5638 * but only if the socket refcount is not zero. 5639 */ 5640 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 5641 err = sock_queue_err_skb(sk, skb); 5642 sock_put(sk); 5643 } 5644 if (err) 5645 kfree_skb(skb); 5646 } 5647 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 5648 #endif /* CONFIG_WIRELESS */ 5649 5650 /** 5651 * skb_partial_csum_set - set up and verify partial csum values for packet 5652 * @skb: the skb to set 5653 * @start: the number of bytes after skb->data to start checksumming. 5654 * @off: the offset from start to place the checksum. 5655 * 5656 * For untrusted partially-checksummed packets, we need to make sure the values 5657 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 5658 * 5659 * This function checks and sets those values and skb->ip_summed: if this 5660 * returns false you should drop the packet. 5661 */ 5662 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 5663 { 5664 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 5665 u32 csum_start = skb_headroom(skb) + (u32)start; 5666 5667 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { 5668 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 5669 start, off, skb_headroom(skb), skb_headlen(skb)); 5670 return false; 5671 } 5672 skb->ip_summed = CHECKSUM_PARTIAL; 5673 skb->csum_start = csum_start; 5674 skb->csum_offset = off; 5675 skb->transport_header = csum_start; 5676 return true; 5677 } 5678 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 5679 5680 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 5681 unsigned int max) 5682 { 5683 if (skb_headlen(skb) >= len) 5684 return 0; 5685 5686 /* If we need to pullup then pullup to the max, so we 5687 * won't need to do it again. 5688 */ 5689 if (max > skb->len) 5690 max = skb->len; 5691 5692 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 5693 return -ENOMEM; 5694 5695 if (skb_headlen(skb) < len) 5696 return -EPROTO; 5697 5698 return 0; 5699 } 5700 5701 #define MAX_TCP_HDR_LEN (15 * 4) 5702 5703 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 5704 typeof(IPPROTO_IP) proto, 5705 unsigned int off) 5706 { 5707 int err; 5708 5709 switch (proto) { 5710 case IPPROTO_TCP: 5711 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 5712 off + MAX_TCP_HDR_LEN); 5713 if (!err && !skb_partial_csum_set(skb, off, 5714 offsetof(struct tcphdr, 5715 check))) 5716 err = -EPROTO; 5717 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 5718 5719 case IPPROTO_UDP: 5720 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 5721 off + sizeof(struct udphdr)); 5722 if (!err && !skb_partial_csum_set(skb, off, 5723 offsetof(struct udphdr, 5724 check))) 5725 err = -EPROTO; 5726 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5727 } 5728 5729 return ERR_PTR(-EPROTO); 5730 } 5731 5732 /* This value should be large enough to cover a tagged ethernet header plus 5733 * maximally sized IP and TCP or UDP headers. 5734 */ 5735 #define MAX_IP_HDR_LEN 128 5736 5737 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5738 { 5739 unsigned int off; 5740 bool fragment; 5741 __sum16 *csum; 5742 int err; 5743 5744 fragment = false; 5745 5746 err = skb_maybe_pull_tail(skb, 5747 sizeof(struct iphdr), 5748 MAX_IP_HDR_LEN); 5749 if (err < 0) 5750 goto out; 5751 5752 if (ip_is_fragment(ip_hdr(skb))) 5753 fragment = true; 5754 5755 off = ip_hdrlen(skb); 5756 5757 err = -EPROTO; 5758 5759 if (fragment) 5760 goto out; 5761 5762 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5763 if (IS_ERR(csum)) 5764 return PTR_ERR(csum); 5765 5766 if (recalculate) 5767 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5768 ip_hdr(skb)->daddr, 5769 skb->len - off, 5770 ip_hdr(skb)->protocol, 0); 5771 err = 0; 5772 5773 out: 5774 return err; 5775 } 5776 5777 /* This value should be large enough to cover a tagged ethernet header plus 5778 * an IPv6 header, all options, and a maximal TCP or UDP header. 5779 */ 5780 #define MAX_IPV6_HDR_LEN 256 5781 5782 #define OPT_HDR(type, skb, off) \ 5783 (type *)(skb_network_header(skb) + (off)) 5784 5785 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5786 { 5787 int err; 5788 u8 nexthdr; 5789 unsigned int off; 5790 unsigned int len; 5791 bool fragment; 5792 bool done; 5793 __sum16 *csum; 5794 5795 fragment = false; 5796 done = false; 5797 5798 off = sizeof(struct ipv6hdr); 5799 5800 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5801 if (err < 0) 5802 goto out; 5803 5804 nexthdr = ipv6_hdr(skb)->nexthdr; 5805 5806 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5807 while (off <= len && !done) { 5808 switch (nexthdr) { 5809 case IPPROTO_DSTOPTS: 5810 case IPPROTO_HOPOPTS: 5811 case IPPROTO_ROUTING: { 5812 struct ipv6_opt_hdr *hp; 5813 5814 err = skb_maybe_pull_tail(skb, 5815 off + 5816 sizeof(struct ipv6_opt_hdr), 5817 MAX_IPV6_HDR_LEN); 5818 if (err < 0) 5819 goto out; 5820 5821 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5822 nexthdr = hp->nexthdr; 5823 off += ipv6_optlen(hp); 5824 break; 5825 } 5826 case IPPROTO_AH: { 5827 struct ip_auth_hdr *hp; 5828 5829 err = skb_maybe_pull_tail(skb, 5830 off + 5831 sizeof(struct ip_auth_hdr), 5832 MAX_IPV6_HDR_LEN); 5833 if (err < 0) 5834 goto out; 5835 5836 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5837 nexthdr = hp->nexthdr; 5838 off += ipv6_authlen(hp); 5839 break; 5840 } 5841 case IPPROTO_FRAGMENT: { 5842 struct frag_hdr *hp; 5843 5844 err = skb_maybe_pull_tail(skb, 5845 off + 5846 sizeof(struct frag_hdr), 5847 MAX_IPV6_HDR_LEN); 5848 if (err < 0) 5849 goto out; 5850 5851 hp = OPT_HDR(struct frag_hdr, skb, off); 5852 5853 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5854 fragment = true; 5855 5856 nexthdr = hp->nexthdr; 5857 off += sizeof(struct frag_hdr); 5858 break; 5859 } 5860 default: 5861 done = true; 5862 break; 5863 } 5864 } 5865 5866 err = -EPROTO; 5867 5868 if (!done || fragment) 5869 goto out; 5870 5871 csum = skb_checksum_setup_ip(skb, nexthdr, off); 5872 if (IS_ERR(csum)) 5873 return PTR_ERR(csum); 5874 5875 if (recalculate) 5876 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5877 &ipv6_hdr(skb)->daddr, 5878 skb->len - off, nexthdr, 0); 5879 err = 0; 5880 5881 out: 5882 return err; 5883 } 5884 5885 /** 5886 * skb_checksum_setup - set up partial checksum offset 5887 * @skb: the skb to set up 5888 * @recalculate: if true the pseudo-header checksum will be recalculated 5889 */ 5890 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5891 { 5892 int err; 5893 5894 switch (skb->protocol) { 5895 case htons(ETH_P_IP): 5896 err = skb_checksum_setup_ipv4(skb, recalculate); 5897 break; 5898 5899 case htons(ETH_P_IPV6): 5900 err = skb_checksum_setup_ipv6(skb, recalculate); 5901 break; 5902 5903 default: 5904 err = -EPROTO; 5905 break; 5906 } 5907 5908 return err; 5909 } 5910 EXPORT_SYMBOL(skb_checksum_setup); 5911 5912 /** 5913 * skb_checksum_maybe_trim - maybe trims the given skb 5914 * @skb: the skb to check 5915 * @transport_len: the data length beyond the network header 5916 * 5917 * Checks whether the given skb has data beyond the given transport length. 5918 * If so, returns a cloned skb trimmed to this transport length. 5919 * Otherwise returns the provided skb. Returns NULL in error cases 5920 * (e.g. transport_len exceeds skb length or out-of-memory). 5921 * 5922 * Caller needs to set the skb transport header and free any returned skb if it 5923 * differs from the provided skb. 5924 */ 5925 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5926 unsigned int transport_len) 5927 { 5928 struct sk_buff *skb_chk; 5929 unsigned int len = skb_transport_offset(skb) + transport_len; 5930 int ret; 5931 5932 if (skb->len < len) 5933 return NULL; 5934 else if (skb->len == len) 5935 return skb; 5936 5937 skb_chk = skb_clone(skb, GFP_ATOMIC); 5938 if (!skb_chk) 5939 return NULL; 5940 5941 ret = pskb_trim_rcsum(skb_chk, len); 5942 if (ret) { 5943 kfree_skb(skb_chk); 5944 return NULL; 5945 } 5946 5947 return skb_chk; 5948 } 5949 5950 /** 5951 * skb_checksum_trimmed - validate checksum of an skb 5952 * @skb: the skb to check 5953 * @transport_len: the data length beyond the network header 5954 * @skb_chkf: checksum function to use 5955 * 5956 * Applies the given checksum function skb_chkf to the provided skb. 5957 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5958 * 5959 * If the skb has data beyond the given transport length, then a 5960 * trimmed & cloned skb is checked and returned. 5961 * 5962 * Caller needs to set the skb transport header and free any returned skb if it 5963 * differs from the provided skb. 5964 */ 5965 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5966 unsigned int transport_len, 5967 __sum16(*skb_chkf)(struct sk_buff *skb)) 5968 { 5969 struct sk_buff *skb_chk; 5970 unsigned int offset = skb_transport_offset(skb); 5971 __sum16 ret; 5972 5973 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5974 if (!skb_chk) 5975 goto err; 5976 5977 if (!pskb_may_pull(skb_chk, offset)) 5978 goto err; 5979 5980 skb_pull_rcsum(skb_chk, offset); 5981 ret = skb_chkf(skb_chk); 5982 skb_push_rcsum(skb_chk, offset); 5983 5984 if (ret) 5985 goto err; 5986 5987 return skb_chk; 5988 5989 err: 5990 if (skb_chk && skb_chk != skb) 5991 kfree_skb(skb_chk); 5992 5993 return NULL; 5994 5995 } 5996 EXPORT_SYMBOL(skb_checksum_trimmed); 5997 5998 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5999 { 6000 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 6001 skb->dev->name); 6002 } 6003 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 6004 6005 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 6006 { 6007 if (head_stolen) { 6008 skb_release_head_state(skb); 6009 kmem_cache_free(net_hotdata.skbuff_cache, skb); 6010 } else { 6011 __kfree_skb(skb); 6012 } 6013 } 6014 EXPORT_SYMBOL(kfree_skb_partial); 6015 6016 /** 6017 * skb_try_coalesce - try to merge skb to prior one 6018 * @to: prior buffer 6019 * @from: buffer to add 6020 * @fragstolen: pointer to boolean 6021 * @delta_truesize: how much more was allocated than was requested 6022 */ 6023 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 6024 bool *fragstolen, int *delta_truesize) 6025 { 6026 struct skb_shared_info *to_shinfo, *from_shinfo; 6027 int i, delta, len = from->len; 6028 6029 *fragstolen = false; 6030 6031 if (skb_cloned(to)) 6032 return false; 6033 6034 /* In general, avoid mixing page_pool and non-page_pool allocated 6035 * pages within the same SKB. In theory we could take full 6036 * references if @from is cloned and !@to->pp_recycle but its 6037 * tricky (due to potential race with the clone disappearing) and 6038 * rare, so not worth dealing with. 6039 */ 6040 if (to->pp_recycle != from->pp_recycle) 6041 return false; 6042 6043 if (skb_frags_readable(from) != skb_frags_readable(to)) 6044 return false; 6045 6046 if (len <= skb_tailroom(to) && skb_frags_readable(from)) { 6047 if (len) 6048 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 6049 *delta_truesize = 0; 6050 return true; 6051 } 6052 6053 to_shinfo = skb_shinfo(to); 6054 from_shinfo = skb_shinfo(from); 6055 if (to_shinfo->frag_list || from_shinfo->frag_list) 6056 return false; 6057 if (skb_zcopy(to) || skb_zcopy(from)) 6058 return false; 6059 6060 if (skb_headlen(from) != 0) { 6061 struct page *page; 6062 unsigned int offset; 6063 6064 if (to_shinfo->nr_frags + 6065 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 6066 return false; 6067 6068 if (skb_head_is_locked(from)) 6069 return false; 6070 6071 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 6072 6073 page = virt_to_head_page(from->head); 6074 offset = from->data - (unsigned char *)page_address(page); 6075 6076 skb_fill_page_desc(to, to_shinfo->nr_frags, 6077 page, offset, skb_headlen(from)); 6078 *fragstolen = true; 6079 } else { 6080 if (to_shinfo->nr_frags + 6081 from_shinfo->nr_frags > MAX_SKB_FRAGS) 6082 return false; 6083 6084 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 6085 } 6086 6087 WARN_ON_ONCE(delta < len); 6088 6089 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 6090 from_shinfo->frags, 6091 from_shinfo->nr_frags * sizeof(skb_frag_t)); 6092 to_shinfo->nr_frags += from_shinfo->nr_frags; 6093 6094 if (!skb_cloned(from)) 6095 from_shinfo->nr_frags = 0; 6096 6097 /* if the skb is not cloned this does nothing 6098 * since we set nr_frags to 0. 6099 */ 6100 if (skb_pp_frag_ref(from)) { 6101 for (i = 0; i < from_shinfo->nr_frags; i++) 6102 __skb_frag_ref(&from_shinfo->frags[i]); 6103 } 6104 6105 to->truesize += delta; 6106 to->len += len; 6107 to->data_len += len; 6108 6109 *delta_truesize = delta; 6110 return true; 6111 } 6112 EXPORT_SYMBOL(skb_try_coalesce); 6113 6114 /** 6115 * skb_scrub_packet - scrub an skb 6116 * 6117 * @skb: buffer to clean 6118 * @xnet: packet is crossing netns 6119 * 6120 * skb_scrub_packet can be used after encapsulating or decapsulating a packet 6121 * into/from a tunnel. Some information have to be cleared during these 6122 * operations. 6123 * skb_scrub_packet can also be used to clean a skb before injecting it in 6124 * another namespace (@xnet == true). We have to clear all information in the 6125 * skb that could impact namespace isolation. 6126 */ 6127 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 6128 { 6129 skb->pkt_type = PACKET_HOST; 6130 skb->skb_iif = 0; 6131 skb->ignore_df = 0; 6132 skb_dst_drop(skb); 6133 skb_ext_reset(skb); 6134 nf_reset_ct(skb); 6135 nf_reset_trace(skb); 6136 6137 #ifdef CONFIG_NET_SWITCHDEV 6138 skb->offload_fwd_mark = 0; 6139 skb->offload_l3_fwd_mark = 0; 6140 #endif 6141 ipvs_reset(skb); 6142 6143 if (!xnet) 6144 return; 6145 6146 skb->mark = 0; 6147 skb_clear_tstamp(skb); 6148 } 6149 EXPORT_SYMBOL_GPL(skb_scrub_packet); 6150 6151 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 6152 { 6153 int mac_len, meta_len; 6154 void *meta; 6155 6156 if (skb_cow(skb, skb_headroom(skb)) < 0) { 6157 kfree_skb(skb); 6158 return NULL; 6159 } 6160 6161 mac_len = skb->data - skb_mac_header(skb); 6162 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 6163 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 6164 mac_len - VLAN_HLEN - ETH_TLEN); 6165 } 6166 6167 meta_len = skb_metadata_len(skb); 6168 if (meta_len) { 6169 meta = skb_metadata_end(skb) - meta_len; 6170 memmove(meta + VLAN_HLEN, meta, meta_len); 6171 } 6172 6173 skb->mac_header += VLAN_HLEN; 6174 return skb; 6175 } 6176 6177 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 6178 { 6179 struct vlan_hdr *vhdr; 6180 u16 vlan_tci; 6181 6182 if (unlikely(skb_vlan_tag_present(skb))) { 6183 /* vlan_tci is already set-up so leave this for another time */ 6184 return skb; 6185 } 6186 6187 skb = skb_share_check(skb, GFP_ATOMIC); 6188 if (unlikely(!skb)) 6189 goto err_free; 6190 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 6191 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 6192 goto err_free; 6193 6194 vhdr = (struct vlan_hdr *)skb->data; 6195 vlan_tci = ntohs(vhdr->h_vlan_TCI); 6196 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 6197 6198 skb_pull_rcsum(skb, VLAN_HLEN); 6199 vlan_set_encap_proto(skb, vhdr); 6200 6201 skb = skb_reorder_vlan_header(skb); 6202 if (unlikely(!skb)) 6203 goto err_free; 6204 6205 skb_reset_network_header(skb); 6206 if (!skb_transport_header_was_set(skb)) 6207 skb_reset_transport_header(skb); 6208 skb_reset_mac_len(skb); 6209 6210 return skb; 6211 6212 err_free: 6213 kfree_skb(skb); 6214 return NULL; 6215 } 6216 EXPORT_SYMBOL(skb_vlan_untag); 6217 6218 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) 6219 { 6220 if (!pskb_may_pull(skb, write_len)) 6221 return -ENOMEM; 6222 6223 if (!skb_frags_readable(skb)) 6224 return -EFAULT; 6225 6226 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 6227 return 0; 6228 6229 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6230 } 6231 EXPORT_SYMBOL(skb_ensure_writable); 6232 6233 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) 6234 { 6235 int needed_headroom = dev->needed_headroom; 6236 int needed_tailroom = dev->needed_tailroom; 6237 6238 /* For tail taggers, we need to pad short frames ourselves, to ensure 6239 * that the tail tag does not fail at its role of being at the end of 6240 * the packet, once the conduit interface pads the frame. Account for 6241 * that pad length here, and pad later. 6242 */ 6243 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 6244 needed_tailroom += ETH_ZLEN - skb->len; 6245 /* skb_headroom() returns unsigned int... */ 6246 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 6247 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 6248 6249 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 6250 /* No reallocation needed, yay! */ 6251 return 0; 6252 6253 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 6254 GFP_ATOMIC); 6255 } 6256 EXPORT_SYMBOL(skb_ensure_writable_head_tail); 6257 6258 /* remove VLAN header from packet and update csum accordingly. 6259 * expects a non skb_vlan_tag_present skb with a vlan tag payload 6260 */ 6261 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 6262 { 6263 int offset = skb->data - skb_mac_header(skb); 6264 int err; 6265 6266 if (WARN_ONCE(offset, 6267 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 6268 offset)) { 6269 return -EINVAL; 6270 } 6271 6272 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 6273 if (unlikely(err)) 6274 return err; 6275 6276 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6277 6278 vlan_remove_tag(skb, vlan_tci); 6279 6280 skb->mac_header += VLAN_HLEN; 6281 6282 if (skb_network_offset(skb) < ETH_HLEN) 6283 skb_set_network_header(skb, ETH_HLEN); 6284 6285 skb_reset_mac_len(skb); 6286 6287 return err; 6288 } 6289 EXPORT_SYMBOL(__skb_vlan_pop); 6290 6291 /* Pop a vlan tag either from hwaccel or from payload. 6292 * Expects skb->data at mac header. 6293 */ 6294 int skb_vlan_pop(struct sk_buff *skb) 6295 { 6296 u16 vlan_tci; 6297 __be16 vlan_proto; 6298 int err; 6299 6300 if (likely(skb_vlan_tag_present(skb))) { 6301 __vlan_hwaccel_clear_tag(skb); 6302 } else { 6303 if (unlikely(!eth_type_vlan(skb->protocol))) 6304 return 0; 6305 6306 err = __skb_vlan_pop(skb, &vlan_tci); 6307 if (err) 6308 return err; 6309 } 6310 /* move next vlan tag to hw accel tag */ 6311 if (likely(!eth_type_vlan(skb->protocol))) 6312 return 0; 6313 6314 vlan_proto = skb->protocol; 6315 err = __skb_vlan_pop(skb, &vlan_tci); 6316 if (unlikely(err)) 6317 return err; 6318 6319 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6320 return 0; 6321 } 6322 EXPORT_SYMBOL(skb_vlan_pop); 6323 6324 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 6325 * Expects skb->data at mac header. 6326 */ 6327 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 6328 { 6329 if (skb_vlan_tag_present(skb)) { 6330 int offset = skb->data - skb_mac_header(skb); 6331 int err; 6332 6333 if (WARN_ONCE(offset, 6334 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 6335 offset)) { 6336 return -EINVAL; 6337 } 6338 6339 err = __vlan_insert_tag(skb, skb->vlan_proto, 6340 skb_vlan_tag_get(skb)); 6341 if (err) 6342 return err; 6343 6344 skb->protocol = skb->vlan_proto; 6345 skb->network_header -= VLAN_HLEN; 6346 6347 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 6348 } 6349 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 6350 return 0; 6351 } 6352 EXPORT_SYMBOL(skb_vlan_push); 6353 6354 /** 6355 * skb_eth_pop() - Drop the Ethernet header at the head of a packet 6356 * 6357 * @skb: Socket buffer to modify 6358 * 6359 * Drop the Ethernet header of @skb. 6360 * 6361 * Expects that skb->data points to the mac header and that no VLAN tags are 6362 * present. 6363 * 6364 * Returns 0 on success, -errno otherwise. 6365 */ 6366 int skb_eth_pop(struct sk_buff *skb) 6367 { 6368 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 6369 skb_network_offset(skb) < ETH_HLEN) 6370 return -EPROTO; 6371 6372 skb_pull_rcsum(skb, ETH_HLEN); 6373 skb_reset_mac_header(skb); 6374 skb_reset_mac_len(skb); 6375 6376 return 0; 6377 } 6378 EXPORT_SYMBOL(skb_eth_pop); 6379 6380 /** 6381 * skb_eth_push() - Add a new Ethernet header at the head of a packet 6382 * 6383 * @skb: Socket buffer to modify 6384 * @dst: Destination MAC address of the new header 6385 * @src: Source MAC address of the new header 6386 * 6387 * Prepend @skb with a new Ethernet header. 6388 * 6389 * Expects that skb->data points to the mac header, which must be empty. 6390 * 6391 * Returns 0 on success, -errno otherwise. 6392 */ 6393 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 6394 const unsigned char *src) 6395 { 6396 struct ethhdr *eth; 6397 int err; 6398 6399 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 6400 return -EPROTO; 6401 6402 err = skb_cow_head(skb, sizeof(*eth)); 6403 if (err < 0) 6404 return err; 6405 6406 skb_push(skb, sizeof(*eth)); 6407 skb_reset_mac_header(skb); 6408 skb_reset_mac_len(skb); 6409 6410 eth = eth_hdr(skb); 6411 ether_addr_copy(eth->h_dest, dst); 6412 ether_addr_copy(eth->h_source, src); 6413 eth->h_proto = skb->protocol; 6414 6415 skb_postpush_rcsum(skb, eth, sizeof(*eth)); 6416 6417 return 0; 6418 } 6419 EXPORT_SYMBOL(skb_eth_push); 6420 6421 /* Update the ethertype of hdr and the skb csum value if required. */ 6422 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 6423 __be16 ethertype) 6424 { 6425 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6426 __be16 diff[] = { ~hdr->h_proto, ethertype }; 6427 6428 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6429 } 6430 6431 hdr->h_proto = ethertype; 6432 } 6433 6434 /** 6435 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 6436 * the packet 6437 * 6438 * @skb: buffer 6439 * @mpls_lse: MPLS label stack entry to push 6440 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 6441 * @mac_len: length of the MAC header 6442 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 6443 * ethernet 6444 * 6445 * Expects skb->data at mac header. 6446 * 6447 * Returns 0 on success, -errno otherwise. 6448 */ 6449 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 6450 int mac_len, bool ethernet) 6451 { 6452 struct mpls_shim_hdr *lse; 6453 int err; 6454 6455 if (unlikely(!eth_p_mpls(mpls_proto))) 6456 return -EINVAL; 6457 6458 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 6459 if (skb->encapsulation) 6460 return -EINVAL; 6461 6462 err = skb_cow_head(skb, MPLS_HLEN); 6463 if (unlikely(err)) 6464 return err; 6465 6466 if (!skb->inner_protocol) { 6467 skb_set_inner_network_header(skb, skb_network_offset(skb)); 6468 skb_set_inner_protocol(skb, skb->protocol); 6469 } 6470 6471 skb_push(skb, MPLS_HLEN); 6472 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 6473 mac_len); 6474 skb_reset_mac_header(skb); 6475 skb_set_network_header(skb, mac_len); 6476 skb_reset_mac_len(skb); 6477 6478 lse = mpls_hdr(skb); 6479 lse->label_stack_entry = mpls_lse; 6480 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 6481 6482 if (ethernet && mac_len >= ETH_HLEN) 6483 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 6484 skb->protocol = mpls_proto; 6485 6486 return 0; 6487 } 6488 EXPORT_SYMBOL_GPL(skb_mpls_push); 6489 6490 /** 6491 * skb_mpls_pop() - pop the outermost MPLS header 6492 * 6493 * @skb: buffer 6494 * @next_proto: ethertype of header after popped MPLS header 6495 * @mac_len: length of the MAC header 6496 * @ethernet: flag to indicate if the packet is ethernet 6497 * 6498 * Expects skb->data at mac header. 6499 * 6500 * Returns 0 on success, -errno otherwise. 6501 */ 6502 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 6503 bool ethernet) 6504 { 6505 int err; 6506 6507 if (unlikely(!eth_p_mpls(skb->protocol))) 6508 return 0; 6509 6510 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 6511 if (unlikely(err)) 6512 return err; 6513 6514 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 6515 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 6516 mac_len); 6517 6518 __skb_pull(skb, MPLS_HLEN); 6519 skb_reset_mac_header(skb); 6520 skb_set_network_header(skb, mac_len); 6521 6522 if (ethernet && mac_len >= ETH_HLEN) { 6523 struct ethhdr *hdr; 6524 6525 /* use mpls_hdr() to get ethertype to account for VLANs. */ 6526 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 6527 skb_mod_eth_type(skb, hdr, next_proto); 6528 } 6529 skb->protocol = next_proto; 6530 6531 return 0; 6532 } 6533 EXPORT_SYMBOL_GPL(skb_mpls_pop); 6534 6535 /** 6536 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 6537 * 6538 * @skb: buffer 6539 * @mpls_lse: new MPLS label stack entry to update to 6540 * 6541 * Expects skb->data at mac header. 6542 * 6543 * Returns 0 on success, -errno otherwise. 6544 */ 6545 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 6546 { 6547 int err; 6548 6549 if (unlikely(!eth_p_mpls(skb->protocol))) 6550 return -EINVAL; 6551 6552 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 6553 if (unlikely(err)) 6554 return err; 6555 6556 if (skb->ip_summed == CHECKSUM_COMPLETE) { 6557 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 6558 6559 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 6560 } 6561 6562 mpls_hdr(skb)->label_stack_entry = mpls_lse; 6563 6564 return 0; 6565 } 6566 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 6567 6568 /** 6569 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 6570 * 6571 * @skb: buffer 6572 * 6573 * Expects skb->data at mac header. 6574 * 6575 * Returns 0 on success, -errno otherwise. 6576 */ 6577 int skb_mpls_dec_ttl(struct sk_buff *skb) 6578 { 6579 u32 lse; 6580 u8 ttl; 6581 6582 if (unlikely(!eth_p_mpls(skb->protocol))) 6583 return -EINVAL; 6584 6585 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 6586 return -ENOMEM; 6587 6588 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 6589 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 6590 if (!--ttl) 6591 return -EINVAL; 6592 6593 lse &= ~MPLS_LS_TTL_MASK; 6594 lse |= ttl << MPLS_LS_TTL_SHIFT; 6595 6596 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 6597 } 6598 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 6599 6600 /** 6601 * alloc_skb_with_frags - allocate skb with page frags 6602 * 6603 * @header_len: size of linear part 6604 * @data_len: needed length in frags 6605 * @order: max page order desired. 6606 * @errcode: pointer to error code if any 6607 * @gfp_mask: allocation mask 6608 * 6609 * This can be used to allocate a paged skb, given a maximal order for frags. 6610 */ 6611 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 6612 unsigned long data_len, 6613 int order, 6614 int *errcode, 6615 gfp_t gfp_mask) 6616 { 6617 unsigned long chunk; 6618 struct sk_buff *skb; 6619 struct page *page; 6620 int nr_frags = 0; 6621 6622 *errcode = -EMSGSIZE; 6623 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) 6624 return NULL; 6625 6626 *errcode = -ENOBUFS; 6627 skb = alloc_skb(header_len, gfp_mask); 6628 if (!skb) 6629 return NULL; 6630 6631 while (data_len) { 6632 if (nr_frags == MAX_SKB_FRAGS - 1) 6633 goto failure; 6634 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) 6635 order--; 6636 6637 if (order) { 6638 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 6639 __GFP_COMP | 6640 __GFP_NOWARN, 6641 order); 6642 if (!page) { 6643 order--; 6644 continue; 6645 } 6646 } else { 6647 page = alloc_page(gfp_mask); 6648 if (!page) 6649 goto failure; 6650 } 6651 chunk = min_t(unsigned long, data_len, 6652 PAGE_SIZE << order); 6653 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); 6654 nr_frags++; 6655 skb->truesize += (PAGE_SIZE << order); 6656 data_len -= chunk; 6657 } 6658 return skb; 6659 6660 failure: 6661 kfree_skb(skb); 6662 return NULL; 6663 } 6664 EXPORT_SYMBOL(alloc_skb_with_frags); 6665 6666 /* carve out the first off bytes from skb when off < headlen */ 6667 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 6668 const int headlen, gfp_t gfp_mask) 6669 { 6670 int i; 6671 unsigned int size = skb_end_offset(skb); 6672 int new_hlen = headlen - off; 6673 u8 *data; 6674 6675 if (skb_pfmemalloc(skb)) 6676 gfp_mask |= __GFP_MEMALLOC; 6677 6678 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6679 if (!data) 6680 return -ENOMEM; 6681 size = SKB_WITH_OVERHEAD(size); 6682 6683 /* Copy real data, and all frags */ 6684 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 6685 skb->len -= off; 6686 6687 memcpy((struct skb_shared_info *)(data + size), 6688 skb_shinfo(skb), 6689 offsetof(struct skb_shared_info, 6690 frags[skb_shinfo(skb)->nr_frags])); 6691 if (skb_cloned(skb)) { 6692 /* drop the old head gracefully */ 6693 if (skb_orphan_frags(skb, gfp_mask)) { 6694 skb_kfree_head(data, size); 6695 return -ENOMEM; 6696 } 6697 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 6698 skb_frag_ref(skb, i); 6699 if (skb_has_frag_list(skb)) 6700 skb_clone_fraglist(skb); 6701 skb_release_data(skb, SKB_CONSUMED); 6702 } else { 6703 /* we can reuse existing recount- all we did was 6704 * relocate values 6705 */ 6706 skb_free_head(skb); 6707 } 6708 6709 skb->head = data; 6710 skb->data = data; 6711 skb->head_frag = 0; 6712 skb_set_end_offset(skb, size); 6713 skb_set_tail_pointer(skb, skb_headlen(skb)); 6714 skb_headers_offset_update(skb, 0); 6715 skb->cloned = 0; 6716 skb->hdr_len = 0; 6717 skb->nohdr = 0; 6718 atomic_set(&skb_shinfo(skb)->dataref, 1); 6719 6720 return 0; 6721 } 6722 6723 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 6724 6725 /* carve out the first eat bytes from skb's frag_list. May recurse into 6726 * pskb_carve() 6727 */ 6728 static int pskb_carve_frag_list(struct sk_buff *skb, 6729 struct skb_shared_info *shinfo, int eat, 6730 gfp_t gfp_mask) 6731 { 6732 struct sk_buff *list = shinfo->frag_list; 6733 struct sk_buff *clone = NULL; 6734 struct sk_buff *insp = NULL; 6735 6736 do { 6737 if (!list) { 6738 pr_err("Not enough bytes to eat. Want %d\n", eat); 6739 return -EFAULT; 6740 } 6741 if (list->len <= eat) { 6742 /* Eaten as whole. */ 6743 eat -= list->len; 6744 list = list->next; 6745 insp = list; 6746 } else { 6747 /* Eaten partially. */ 6748 if (skb_shared(list)) { 6749 clone = skb_clone(list, gfp_mask); 6750 if (!clone) 6751 return -ENOMEM; 6752 insp = list->next; 6753 list = clone; 6754 } else { 6755 /* This may be pulled without problems. */ 6756 insp = list; 6757 } 6758 if (pskb_carve(list, eat, gfp_mask) < 0) { 6759 kfree_skb(clone); 6760 return -ENOMEM; 6761 } 6762 break; 6763 } 6764 } while (eat); 6765 6766 /* Free pulled out fragments. */ 6767 while ((list = shinfo->frag_list) != insp) { 6768 shinfo->frag_list = list->next; 6769 consume_skb(list); 6770 } 6771 /* And insert new clone at head. */ 6772 if (clone) { 6773 clone->next = list; 6774 shinfo->frag_list = clone; 6775 } 6776 return 0; 6777 } 6778 6779 /* carve off first len bytes from skb. Split line (off) is in the 6780 * non-linear part of skb 6781 */ 6782 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 6783 int pos, gfp_t gfp_mask) 6784 { 6785 int i, k = 0; 6786 unsigned int size = skb_end_offset(skb); 6787 u8 *data; 6788 const int nfrags = skb_shinfo(skb)->nr_frags; 6789 struct skb_shared_info *shinfo; 6790 6791 if (skb_pfmemalloc(skb)) 6792 gfp_mask |= __GFP_MEMALLOC; 6793 6794 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); 6795 if (!data) 6796 return -ENOMEM; 6797 size = SKB_WITH_OVERHEAD(size); 6798 6799 memcpy((struct skb_shared_info *)(data + size), 6800 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 6801 if (skb_orphan_frags(skb, gfp_mask)) { 6802 skb_kfree_head(data, size); 6803 return -ENOMEM; 6804 } 6805 shinfo = (struct skb_shared_info *)(data + size); 6806 for (i = 0; i < nfrags; i++) { 6807 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 6808 6809 if (pos + fsize > off) { 6810 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 6811 6812 if (pos < off) { 6813 /* Split frag. 6814 * We have two variants in this case: 6815 * 1. Move all the frag to the second 6816 * part, if it is possible. F.e. 6817 * this approach is mandatory for TUX, 6818 * where splitting is expensive. 6819 * 2. Split is accurately. We make this. 6820 */ 6821 skb_frag_off_add(&shinfo->frags[0], off - pos); 6822 skb_frag_size_sub(&shinfo->frags[0], off - pos); 6823 } 6824 skb_frag_ref(skb, i); 6825 k++; 6826 } 6827 pos += fsize; 6828 } 6829 shinfo->nr_frags = k; 6830 if (skb_has_frag_list(skb)) 6831 skb_clone_fraglist(skb); 6832 6833 /* split line is in frag list */ 6834 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6835 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6836 if (skb_has_frag_list(skb)) 6837 kfree_skb_list(skb_shinfo(skb)->frag_list); 6838 skb_kfree_head(data, size); 6839 return -ENOMEM; 6840 } 6841 skb_release_data(skb, SKB_CONSUMED); 6842 6843 skb->head = data; 6844 skb->head_frag = 0; 6845 skb->data = data; 6846 skb_set_end_offset(skb, size); 6847 skb_reset_tail_pointer(skb); 6848 skb_headers_offset_update(skb, 0); 6849 skb->cloned = 0; 6850 skb->hdr_len = 0; 6851 skb->nohdr = 0; 6852 skb->len -= off; 6853 skb->data_len = skb->len; 6854 atomic_set(&skb_shinfo(skb)->dataref, 1); 6855 return 0; 6856 } 6857 6858 /* remove len bytes from the beginning of the skb */ 6859 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6860 { 6861 int headlen = skb_headlen(skb); 6862 6863 if (len < headlen) 6864 return pskb_carve_inside_header(skb, len, headlen, gfp); 6865 else 6866 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6867 } 6868 6869 /* Extract to_copy bytes starting at off from skb, and return this in 6870 * a new skb 6871 */ 6872 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6873 int to_copy, gfp_t gfp) 6874 { 6875 struct sk_buff *clone = skb_clone(skb, gfp); 6876 6877 if (!clone) 6878 return NULL; 6879 6880 if (pskb_carve(clone, off, gfp) < 0 || 6881 pskb_trim(clone, to_copy)) { 6882 kfree_skb(clone); 6883 return NULL; 6884 } 6885 return clone; 6886 } 6887 EXPORT_SYMBOL(pskb_extract); 6888 6889 /** 6890 * skb_condense - try to get rid of fragments/frag_list if possible 6891 * @skb: buffer 6892 * 6893 * Can be used to save memory before skb is added to a busy queue. 6894 * If packet has bytes in frags and enough tail room in skb->head, 6895 * pull all of them, so that we can free the frags right now and adjust 6896 * truesize. 6897 * Notes: 6898 * We do not reallocate skb->head thus can not fail. 6899 * Caller must re-evaluate skb->truesize if needed. 6900 */ 6901 void skb_condense(struct sk_buff *skb) 6902 { 6903 if (skb->data_len) { 6904 if (skb->data_len > skb->end - skb->tail || 6905 skb_cloned(skb) || !skb_frags_readable(skb)) 6906 return; 6907 6908 /* Nice, we can free page frag(s) right now */ 6909 __pskb_pull_tail(skb, skb->data_len); 6910 } 6911 /* At this point, skb->truesize might be over estimated, 6912 * because skb had a fragment, and fragments do not tell 6913 * their truesize. 6914 * When we pulled its content into skb->head, fragment 6915 * was freed, but __pskb_pull_tail() could not possibly 6916 * adjust skb->truesize, not knowing the frag truesize. 6917 */ 6918 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6919 } 6920 EXPORT_SYMBOL(skb_condense); 6921 6922 #ifdef CONFIG_SKB_EXTENSIONS 6923 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6924 { 6925 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6926 } 6927 6928 /** 6929 * __skb_ext_alloc - allocate a new skb extensions storage 6930 * 6931 * @flags: See kmalloc(). 6932 * 6933 * Returns the newly allocated pointer. The pointer can later attached to a 6934 * skb via __skb_ext_set(). 6935 * Note: caller must handle the skb_ext as an opaque data. 6936 */ 6937 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6938 { 6939 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6940 6941 if (new) { 6942 memset(new->offset, 0, sizeof(new->offset)); 6943 refcount_set(&new->refcnt, 1); 6944 } 6945 6946 return new; 6947 } 6948 6949 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6950 unsigned int old_active) 6951 { 6952 struct skb_ext *new; 6953 6954 if (refcount_read(&old->refcnt) == 1) 6955 return old; 6956 6957 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6958 if (!new) 6959 return NULL; 6960 6961 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6962 refcount_set(&new->refcnt, 1); 6963 6964 #ifdef CONFIG_XFRM 6965 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6966 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6967 unsigned int i; 6968 6969 for (i = 0; i < sp->len; i++) 6970 xfrm_state_hold(sp->xvec[i]); 6971 } 6972 #endif 6973 #ifdef CONFIG_MCTP_FLOWS 6974 if (old_active & (1 << SKB_EXT_MCTP)) { 6975 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6976 6977 if (flow->key) 6978 refcount_inc(&flow->key->refs); 6979 } 6980 #endif 6981 __skb_ext_put(old); 6982 return new; 6983 } 6984 6985 /** 6986 * __skb_ext_set - attach the specified extension storage to this skb 6987 * @skb: buffer 6988 * @id: extension id 6989 * @ext: extension storage previously allocated via __skb_ext_alloc() 6990 * 6991 * Existing extensions, if any, are cleared. 6992 * 6993 * Returns the pointer to the extension. 6994 */ 6995 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6996 struct skb_ext *ext) 6997 { 6998 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6999 7000 skb_ext_put(skb); 7001 newlen = newoff + skb_ext_type_len[id]; 7002 ext->chunks = newlen; 7003 ext->offset[id] = newoff; 7004 skb->extensions = ext; 7005 skb->active_extensions = 1 << id; 7006 return skb_ext_get_ptr(ext, id); 7007 } 7008 7009 /** 7010 * skb_ext_add - allocate space for given extension, COW if needed 7011 * @skb: buffer 7012 * @id: extension to allocate space for 7013 * 7014 * Allocates enough space for the given extension. 7015 * If the extension is already present, a pointer to that extension 7016 * is returned. 7017 * 7018 * If the skb was cloned, COW applies and the returned memory can be 7019 * modified without changing the extension space of clones buffers. 7020 * 7021 * Returns pointer to the extension or NULL on allocation failure. 7022 */ 7023 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 7024 { 7025 struct skb_ext *new, *old = NULL; 7026 unsigned int newlen, newoff; 7027 7028 if (skb->active_extensions) { 7029 old = skb->extensions; 7030 7031 new = skb_ext_maybe_cow(old, skb->active_extensions); 7032 if (!new) 7033 return NULL; 7034 7035 if (__skb_ext_exist(new, id)) 7036 goto set_active; 7037 7038 newoff = new->chunks; 7039 } else { 7040 newoff = SKB_EXT_CHUNKSIZEOF(*new); 7041 7042 new = __skb_ext_alloc(GFP_ATOMIC); 7043 if (!new) 7044 return NULL; 7045 } 7046 7047 newlen = newoff + skb_ext_type_len[id]; 7048 new->chunks = newlen; 7049 new->offset[id] = newoff; 7050 set_active: 7051 skb->slow_gro = 1; 7052 skb->extensions = new; 7053 skb->active_extensions |= 1 << id; 7054 return skb_ext_get_ptr(new, id); 7055 } 7056 EXPORT_SYMBOL(skb_ext_add); 7057 7058 #ifdef CONFIG_XFRM 7059 static void skb_ext_put_sp(struct sec_path *sp) 7060 { 7061 unsigned int i; 7062 7063 for (i = 0; i < sp->len; i++) 7064 xfrm_state_put(sp->xvec[i]); 7065 } 7066 #endif 7067 7068 #ifdef CONFIG_MCTP_FLOWS 7069 static void skb_ext_put_mctp(struct mctp_flow *flow) 7070 { 7071 if (flow->key) 7072 mctp_key_unref(flow->key); 7073 } 7074 #endif 7075 7076 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 7077 { 7078 struct skb_ext *ext = skb->extensions; 7079 7080 skb->active_extensions &= ~(1 << id); 7081 if (skb->active_extensions == 0) { 7082 skb->extensions = NULL; 7083 __skb_ext_put(ext); 7084 #ifdef CONFIG_XFRM 7085 } else if (id == SKB_EXT_SEC_PATH && 7086 refcount_read(&ext->refcnt) == 1) { 7087 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 7088 7089 skb_ext_put_sp(sp); 7090 sp->len = 0; 7091 #endif 7092 } 7093 } 7094 EXPORT_SYMBOL(__skb_ext_del); 7095 7096 void __skb_ext_put(struct skb_ext *ext) 7097 { 7098 /* If this is last clone, nothing can increment 7099 * it after check passes. Avoids one atomic op. 7100 */ 7101 if (refcount_read(&ext->refcnt) == 1) 7102 goto free_now; 7103 7104 if (!refcount_dec_and_test(&ext->refcnt)) 7105 return; 7106 free_now: 7107 #ifdef CONFIG_XFRM 7108 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 7109 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 7110 #endif 7111 #ifdef CONFIG_MCTP_FLOWS 7112 if (__skb_ext_exist(ext, SKB_EXT_MCTP)) 7113 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); 7114 #endif 7115 7116 kmem_cache_free(skbuff_ext_cache, ext); 7117 } 7118 EXPORT_SYMBOL(__skb_ext_put); 7119 #endif /* CONFIG_SKB_EXTENSIONS */ 7120 7121 static void kfree_skb_napi_cache(struct sk_buff *skb) 7122 { 7123 /* if SKB is a clone, don't handle this case */ 7124 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 7125 __kfree_skb(skb); 7126 return; 7127 } 7128 7129 local_bh_disable(); 7130 __napi_kfree_skb(skb, SKB_CONSUMED); 7131 local_bh_enable(); 7132 } 7133 7134 /** 7135 * skb_attempt_defer_free - queue skb for remote freeing 7136 * @skb: buffer 7137 * 7138 * Put @skb in a per-cpu list, using the cpu which 7139 * allocated the skb/pages to reduce false sharing 7140 * and memory zone spinlock contention. 7141 */ 7142 void skb_attempt_defer_free(struct sk_buff *skb) 7143 { 7144 int cpu = skb->alloc_cpu; 7145 struct softnet_data *sd; 7146 unsigned int defer_max; 7147 bool kick; 7148 7149 if (cpu == raw_smp_processor_id() || 7150 WARN_ON_ONCE(cpu >= nr_cpu_ids) || 7151 !cpu_online(cpu)) { 7152 nodefer: kfree_skb_napi_cache(skb); 7153 return; 7154 } 7155 7156 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 7157 DEBUG_NET_WARN_ON_ONCE(skb->destructor); 7158 7159 sd = &per_cpu(softnet_data, cpu); 7160 defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max); 7161 if (READ_ONCE(sd->defer_count) >= defer_max) 7162 goto nodefer; 7163 7164 spin_lock_bh(&sd->defer_lock); 7165 /* Send an IPI every time queue reaches half capacity. */ 7166 kick = sd->defer_count == (defer_max >> 1); 7167 /* Paired with the READ_ONCE() few lines above */ 7168 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); 7169 7170 skb->next = sd->defer_list; 7171 /* Paired with READ_ONCE() in skb_defer_free_flush() */ 7172 WRITE_ONCE(sd->defer_list, skb); 7173 spin_unlock_bh(&sd->defer_lock); 7174 7175 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU 7176 * if we are unlucky enough (this seems very unlikely). 7177 */ 7178 if (unlikely(kick)) 7179 kick_defer_list_purge(sd, cpu); 7180 } 7181 7182 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, 7183 size_t offset, size_t len) 7184 { 7185 const char *kaddr; 7186 __wsum csum; 7187 7188 kaddr = kmap_local_page(page); 7189 csum = csum_partial(kaddr + offset, len, 0); 7190 kunmap_local(kaddr); 7191 skb->csum = csum_block_add(skb->csum, csum, skb->len); 7192 } 7193 7194 /** 7195 * skb_splice_from_iter - Splice (or copy) pages to skbuff 7196 * @skb: The buffer to add pages to 7197 * @iter: Iterator representing the pages to be added 7198 * @maxsize: Maximum amount of pages to be added 7199 * @gfp: Allocation flags 7200 * 7201 * This is a common helper function for supporting MSG_SPLICE_PAGES. It 7202 * extracts pages from an iterator and adds them to the socket buffer if 7203 * possible, copying them to fragments if not possible (such as if they're slab 7204 * pages). 7205 * 7206 * Returns the amount of data spliced/copied or -EMSGSIZE if there's 7207 * insufficient space in the buffer to transfer anything. 7208 */ 7209 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, 7210 ssize_t maxsize, gfp_t gfp) 7211 { 7212 size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags); 7213 struct page *pages[8], **ppages = pages; 7214 ssize_t spliced = 0, ret = 0; 7215 unsigned int i; 7216 7217 while (iter->count > 0) { 7218 ssize_t space, nr, len; 7219 size_t off; 7220 7221 ret = -EMSGSIZE; 7222 space = frag_limit - skb_shinfo(skb)->nr_frags; 7223 if (space < 0) 7224 break; 7225 7226 /* We might be able to coalesce without increasing nr_frags */ 7227 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); 7228 7229 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); 7230 if (len <= 0) { 7231 ret = len ?: -EIO; 7232 break; 7233 } 7234 7235 i = 0; 7236 do { 7237 struct page *page = pages[i++]; 7238 size_t part = min_t(size_t, PAGE_SIZE - off, len); 7239 7240 ret = -EIO; 7241 if (WARN_ON_ONCE(!sendpage_ok(page))) 7242 goto out; 7243 7244 ret = skb_append_pagefrags(skb, page, off, part, 7245 frag_limit); 7246 if (ret < 0) { 7247 iov_iter_revert(iter, len); 7248 goto out; 7249 } 7250 7251 if (skb->ip_summed == CHECKSUM_NONE) 7252 skb_splice_csum_page(skb, page, off, part); 7253 7254 off = 0; 7255 spliced += part; 7256 maxsize -= part; 7257 len -= part; 7258 } while (len > 0); 7259 7260 if (maxsize <= 0) 7261 break; 7262 } 7263 7264 out: 7265 skb_len_add(skb, spliced); 7266 return spliced ?: ret; 7267 } 7268 EXPORT_SYMBOL(skb_splice_from_iter); 7269 7270 static __always_inline 7271 size_t memcpy_from_iter_csum(void *iter_from, size_t progress, 7272 size_t len, void *to, void *priv2) 7273 { 7274 __wsum *csum = priv2; 7275 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); 7276 7277 *csum = csum_block_add(*csum, next, progress); 7278 return 0; 7279 } 7280 7281 static __always_inline 7282 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, 7283 size_t len, void *to, void *priv2) 7284 { 7285 __wsum next, *csum = priv2; 7286 7287 next = csum_and_copy_from_user(iter_from, to + progress, len); 7288 *csum = csum_block_add(*csum, next, progress); 7289 return next ? 0 : len; 7290 } 7291 7292 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 7293 __wsum *csum, struct iov_iter *i) 7294 { 7295 size_t copied; 7296 7297 if (WARN_ON_ONCE(!i->data_source)) 7298 return false; 7299 copied = iterate_and_advance2(i, bytes, addr, csum, 7300 copy_from_user_iter_csum, 7301 memcpy_from_iter_csum); 7302 if (likely(copied == bytes)) 7303 return true; 7304 iov_iter_revert(i, copied); 7305 return false; 7306 } 7307 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7308