xref: /linux/net/core/skbuff.c (revision 9c577f09989f921e7a7cc724949fa81f9444dffb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Routines having to do with the 'struct sk_buff' memory handlers.
4  *
5  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
6  *			Florian La Roche <rzsfl@rz.uni-sb.de>
7  *
8  *	Fixes:
9  *		Alan Cox	:	Fixed the worst of the load
10  *					balancer bugs.
11  *		Dave Platt	:	Interrupt stacking fix.
12  *	Richard Kooijman	:	Timestamp fixes.
13  *		Alan Cox	:	Changed buffer format.
14  *		Alan Cox	:	destructor hook for AF_UNIX etc.
15  *		Linus Torvalds	:	Better skb_clone.
16  *		Alan Cox	:	Added skb_copy.
17  *		Alan Cox	:	Added all the changed routines Linus
18  *					only put in the headers
19  *		Ray VanTassle	:	Fixed --skb->lock in free
20  *		Alan Cox	:	skb_copy copy arp field
21  *		Andi Kleen	:	slabified it.
22  *		Robert Olsson	:	Removed skb_head_pool
23  *
24  *	NOTE:
25  *		The __skb_ routines should be called with interrupts
26  *	disabled, or you better be *real* sure that the operation is atomic
27  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
28  *	or via disabling bottom half handlers, etc).
29  */
30 
31 /*
32  *	The functions in this file will not compile correctly with gcc 2.4.x
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/in.h>
43 #include <linux/inet.h>
44 #include <linux/slab.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
47 #include <linux/sctp.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/skbuff_ref.h>
55 #include <linux/splice.h>
56 #include <linux/cache.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/init.h>
59 #include <linux/scatterlist.h>
60 #include <linux/errqueue.h>
61 #include <linux/prefetch.h>
62 #include <linux/bitfield.h>
63 #include <linux/if_vlan.h>
64 #include <linux/mpls.h>
65 #include <linux/kcov.h>
66 #include <linux/iov_iter.h>
67 #include <linux/crc32.h>
68 
69 #include <net/protocol.h>
70 #include <net/dst.h>
71 #include <net/sock.h>
72 #include <net/checksum.h>
73 #include <net/gro.h>
74 #include <net/gso.h>
75 #include <net/hotdata.h>
76 #include <net/ip6_checksum.h>
77 #include <net/xfrm.h>
78 #include <net/mpls.h>
79 #include <net/mptcp.h>
80 #include <net/mctp.h>
81 #include <net/page_pool/helpers.h>
82 #include <net/psp/types.h>
83 #include <net/dropreason.h>
84 #include <net/xdp_sock.h>
85 
86 #include <linux/uaccess.h>
87 #include <trace/events/skb.h>
88 #include <linux/highmem.h>
89 #include <linux/capability.h>
90 #include <linux/user_namespace.h>
91 #include <linux/indirect_call_wrapper.h>
92 #include <linux/textsearch.h>
93 
94 #include "dev.h"
95 #include "devmem.h"
96 #include "netmem_priv.h"
97 #include "sock_destructor.h"
98 
99 #ifdef CONFIG_SKB_EXTENSIONS
100 static struct kmem_cache *skbuff_ext_cache __ro_after_init;
101 #endif
102 
103 #define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN)
104 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \
105 					       GRO_MAX_HEAD_PAD))
106 
107 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
108  * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
109  * size, and we can differentiate heads from skb_small_head_cache
110  * vs system slabs by looking at their size (skb_end_offset()).
111  */
112 #define SKB_SMALL_HEAD_CACHE_SIZE					\
113 	(is_power_of_2(SKB_SMALL_HEAD_SIZE) ?			\
114 		(SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) :	\
115 		SKB_SMALL_HEAD_SIZE)
116 
117 #define SKB_SMALL_HEAD_HEADROOM						\
118 	SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
119 
120 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use
121  * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
122  * netmem is a page.
123  */
124 static_assert(offsetof(struct bio_vec, bv_page) ==
125 	      offsetof(skb_frag_t, netmem));
126 static_assert(sizeof_field(struct bio_vec, bv_page) ==
127 	      sizeof_field(skb_frag_t, netmem));
128 
129 static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len));
130 static_assert(sizeof_field(struct bio_vec, bv_len) ==
131 	      sizeof_field(skb_frag_t, len));
132 
133 static_assert(offsetof(struct bio_vec, bv_offset) ==
134 	      offsetof(skb_frag_t, offset));
135 static_assert(sizeof_field(struct bio_vec, bv_offset) ==
136 	      sizeof_field(skb_frag_t, offset));
137 
138 #undef FN
139 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
140 static const char * const drop_reasons[] = {
141 	[SKB_CONSUMED] = "CONSUMED",
142 	DEFINE_DROP_REASON(FN, FN)
143 };
144 
145 static const struct drop_reason_list drop_reasons_core = {
146 	.reasons = drop_reasons,
147 	.n_reasons = ARRAY_SIZE(drop_reasons),
148 };
149 
150 const struct drop_reason_list __rcu *
151 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = {
152 	[SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core),
153 };
154 EXPORT_SYMBOL(drop_reasons_by_subsys);
155 
156 /**
157  * drop_reasons_register_subsys - register another drop reason subsystem
158  * @subsys: the subsystem to register, must not be the core
159  * @list: the list of drop reasons within the subsystem, must point to
160  *	a statically initialized list
161  */
162 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys,
163 				  const struct drop_reason_list *list)
164 {
165 	if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
166 		 subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
167 		 "invalid subsystem %d\n", subsys))
168 		return;
169 
170 	/* must point to statically allocated memory, so INIT is OK */
171 	RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list);
172 }
173 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys);
174 
175 /**
176  * drop_reasons_unregister_subsys - unregister a drop reason subsystem
177  * @subsys: the subsystem to remove, must not be the core
178  *
179  * Note: This will synchronize_rcu() to ensure no users when it returns.
180  */
181 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys)
182 {
183 	if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
184 		 subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
185 		 "invalid subsystem %d\n", subsys))
186 		return;
187 
188 	RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL);
189 
190 	synchronize_rcu();
191 }
192 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys);
193 
194 /**
195  *	skb_panic - private function for out-of-line support
196  *	@skb:	buffer
197  *	@sz:	size
198  *	@addr:	address
199  *	@msg:	skb_over_panic or skb_under_panic
200  *
201  *	Out-of-line support for skb_put() and skb_push().
202  *	Called via the wrapper skb_over_panic() or skb_under_panic().
203  *	Keep out of line to prevent kernel bloat.
204  *	__builtin_return_address is not used because it is not always reliable.
205  */
206 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
207 		      const char msg[])
208 {
209 	pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
210 		 msg, addr, skb->len, sz, skb->head, skb->data,
211 		 (unsigned long)skb->tail, (unsigned long)skb->end,
212 		 skb->dev ? skb->dev->name : "<NULL>");
213 	BUG();
214 }
215 
216 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
217 {
218 	skb_panic(skb, sz, addr, __func__);
219 }
220 
221 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
222 {
223 	skb_panic(skb, sz, addr, __func__);
224 }
225 
226 #define NAPI_SKB_CACHE_SIZE	64
227 #define NAPI_SKB_CACHE_BULK	16
228 #define NAPI_SKB_CACHE_HALF	(NAPI_SKB_CACHE_SIZE / 2)
229 
230 struct napi_alloc_cache {
231 	local_lock_t bh_lock;
232 	struct page_frag_cache page;
233 	unsigned int skb_count;
234 	void *skb_cache[NAPI_SKB_CACHE_SIZE];
235 };
236 
237 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
238 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
239 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
240 };
241 
242 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
243 {
244 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
245 	void *data;
246 
247 	fragsz = SKB_DATA_ALIGN(fragsz);
248 
249 	local_lock_nested_bh(&napi_alloc_cache.bh_lock);
250 	data = __page_frag_alloc_align(&nc->page, fragsz,
251 				       GFP_ATOMIC | __GFP_NOWARN, align_mask);
252 	local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
253 	return data;
254 
255 }
256 EXPORT_SYMBOL(__napi_alloc_frag_align);
257 
258 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
259 {
260 	void *data;
261 
262 	if (in_hardirq() || irqs_disabled()) {
263 		struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
264 
265 		fragsz = SKB_DATA_ALIGN(fragsz);
266 		data = __page_frag_alloc_align(nc, fragsz,
267 					       GFP_ATOMIC | __GFP_NOWARN,
268 					       align_mask);
269 	} else {
270 		local_bh_disable();
271 		data = __napi_alloc_frag_align(fragsz, align_mask);
272 		local_bh_enable();
273 	}
274 	return data;
275 }
276 EXPORT_SYMBOL(__netdev_alloc_frag_align);
277 
278 /* Cache kmem_cache_size(net_hotdata.skbuff_cache) to help the compiler
279  * remove dead code (and skbuff_cache_size) when CONFIG_KASAN is unset.
280  */
281 static u32 skbuff_cache_size __read_mostly;
282 
283 static struct sk_buff *napi_skb_cache_get(void)
284 {
285 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
286 	struct sk_buff *skb;
287 
288 	local_lock_nested_bh(&napi_alloc_cache.bh_lock);
289 	if (unlikely(!nc->skb_count)) {
290 		nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
291 						      GFP_ATOMIC | __GFP_NOWARN,
292 						      NAPI_SKB_CACHE_BULK,
293 						      nc->skb_cache);
294 		if (unlikely(!nc->skb_count)) {
295 			local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
296 			return NULL;
297 		}
298 	}
299 
300 	skb = nc->skb_cache[--nc->skb_count];
301 	local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
302 	kasan_mempool_unpoison_object(skb, skbuff_cache_size);
303 
304 	return skb;
305 }
306 
307 /**
308  * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache
309  * @skbs: pointer to an at least @n-sized array to fill with skb pointers
310  * @n: number of entries to provide
311  *
312  * Tries to obtain @n &sk_buff entries from the NAPI percpu cache and writes
313  * the pointers into the provided array @skbs. If there are less entries
314  * available, tries to replenish the cache and bulk-allocates the diff from
315  * the MM layer if needed.
316  * The heads are being zeroed with either memset() or %__GFP_ZERO, so they are
317  * ready for {,__}build_skb_around() and don't have any data buffers attached.
318  * Must be called *only* from the BH context.
319  *
320  * Return: number of successfully allocated skbs (@n if no actual allocation
321  *	   needed or kmem_cache_alloc_bulk() didn't fail).
322  */
323 u32 napi_skb_cache_get_bulk(void **skbs, u32 n)
324 {
325 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
326 	u32 bulk, total = n;
327 
328 	local_lock_nested_bh(&napi_alloc_cache.bh_lock);
329 
330 	if (nc->skb_count >= n)
331 		goto get;
332 
333 	/* No enough cached skbs. Try refilling the cache first */
334 	bulk = min(NAPI_SKB_CACHE_SIZE - nc->skb_count, NAPI_SKB_CACHE_BULK);
335 	nc->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
336 					       GFP_ATOMIC | __GFP_NOWARN, bulk,
337 					       &nc->skb_cache[nc->skb_count]);
338 	if (likely(nc->skb_count >= n))
339 		goto get;
340 
341 	/* Still not enough. Bulk-allocate the missing part directly, zeroed */
342 	n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
343 				   GFP_ATOMIC | __GFP_ZERO | __GFP_NOWARN,
344 				   n - nc->skb_count, &skbs[nc->skb_count]);
345 	if (likely(nc->skb_count >= n))
346 		goto get;
347 
348 	/* kmem_cache didn't allocate the number we need, limit the output */
349 	total -= n - nc->skb_count;
350 	n = nc->skb_count;
351 
352 get:
353 	for (u32 base = nc->skb_count - n, i = 0; i < n; i++) {
354 		skbs[i] = nc->skb_cache[base + i];
355 
356 		kasan_mempool_unpoison_object(skbs[i], skbuff_cache_size);
357 		memset(skbs[i], 0, offsetof(struct sk_buff, tail));
358 	}
359 
360 	nc->skb_count -= n;
361 	local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
362 
363 	return total;
364 }
365 EXPORT_SYMBOL_GPL(napi_skb_cache_get_bulk);
366 
367 static inline void __finalize_skb_around(struct sk_buff *skb, void *data,
368 					 unsigned int size)
369 {
370 	struct skb_shared_info *shinfo;
371 
372 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
373 
374 	/* Assumes caller memset cleared SKB */
375 	skb->truesize = SKB_TRUESIZE(size);
376 	refcount_set(&skb->users, 1);
377 	skb->head = data;
378 	skb->data = data;
379 	skb_reset_tail_pointer(skb);
380 	skb_set_end_offset(skb, size);
381 	skb->mac_header = (typeof(skb->mac_header))~0U;
382 	skb->transport_header = (typeof(skb->transport_header))~0U;
383 	skb->alloc_cpu = raw_smp_processor_id();
384 	/* make sure we initialize shinfo sequentially */
385 	shinfo = skb_shinfo(skb);
386 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
387 	atomic_set(&shinfo->dataref, 1);
388 
389 	skb_set_kcov_handle(skb, kcov_common_handle());
390 }
391 
392 static inline void *__slab_build_skb(void *data, unsigned int *size)
393 {
394 	void *resized;
395 
396 	/* Must find the allocation size (and grow it to match). */
397 	*size = ksize(data);
398 	/* krealloc() will immediately return "data" when
399 	 * "ksize(data)" is requested: it is the existing upper
400 	 * bounds. As a result, GFP_ATOMIC will be ignored. Note
401 	 * that this "new" pointer needs to be passed back to the
402 	 * caller for use so the __alloc_size hinting will be
403 	 * tracked correctly.
404 	 */
405 	resized = krealloc(data, *size, GFP_ATOMIC);
406 	WARN_ON_ONCE(resized != data);
407 	return resized;
408 }
409 
410 /* build_skb() variant which can operate on slab buffers.
411  * Note that this should be used sparingly as slab buffers
412  * cannot be combined efficiently by GRO!
413  */
414 struct sk_buff *slab_build_skb(void *data)
415 {
416 	struct sk_buff *skb;
417 	unsigned int size;
418 
419 	skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
420 			       GFP_ATOMIC | __GFP_NOWARN);
421 	if (unlikely(!skb))
422 		return NULL;
423 
424 	memset(skb, 0, offsetof(struct sk_buff, tail));
425 	data = __slab_build_skb(data, &size);
426 	__finalize_skb_around(skb, data, size);
427 
428 	return skb;
429 }
430 EXPORT_SYMBOL(slab_build_skb);
431 
432 /* Caller must provide SKB that is memset cleared */
433 static void __build_skb_around(struct sk_buff *skb, void *data,
434 			       unsigned int frag_size)
435 {
436 	unsigned int size = frag_size;
437 
438 	/* frag_size == 0 is considered deprecated now. Callers
439 	 * using slab buffer should use slab_build_skb() instead.
440 	 */
441 	if (WARN_ONCE(size == 0, "Use slab_build_skb() instead"))
442 		data = __slab_build_skb(data, &size);
443 
444 	__finalize_skb_around(skb, data, size);
445 }
446 
447 /**
448  * __build_skb - build a network buffer
449  * @data: data buffer provided by caller
450  * @frag_size: size of data (must not be 0)
451  *
452  * Allocate a new &sk_buff. Caller provides space holding head and
453  * skb_shared_info. @data must have been allocated from the page
454  * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc()
455  * allocation is deprecated, and callers should use slab_build_skb()
456  * instead.)
457  * The return is the new skb buffer.
458  * On a failure the return is %NULL, and @data is not freed.
459  * Notes :
460  *  Before IO, driver allocates only data buffer where NIC put incoming frame
461  *  Driver should add room at head (NET_SKB_PAD) and
462  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
463  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
464  *  before giving packet to stack.
465  *  RX rings only contains data buffers, not full skbs.
466  */
467 struct sk_buff *__build_skb(void *data, unsigned int frag_size)
468 {
469 	struct sk_buff *skb;
470 
471 	skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
472 			       GFP_ATOMIC | __GFP_NOWARN);
473 	if (unlikely(!skb))
474 		return NULL;
475 
476 	memset(skb, 0, offsetof(struct sk_buff, tail));
477 	__build_skb_around(skb, data, frag_size);
478 
479 	return skb;
480 }
481 
482 /* build_skb() is wrapper over __build_skb(), that specifically
483  * takes care of skb->head and skb->pfmemalloc
484  */
485 struct sk_buff *build_skb(void *data, unsigned int frag_size)
486 {
487 	struct sk_buff *skb = __build_skb(data, frag_size);
488 
489 	if (likely(skb && frag_size)) {
490 		skb->head_frag = 1;
491 		skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
492 	}
493 	return skb;
494 }
495 EXPORT_SYMBOL(build_skb);
496 
497 /**
498  * build_skb_around - build a network buffer around provided skb
499  * @skb: sk_buff provide by caller, must be memset cleared
500  * @data: data buffer provided by caller
501  * @frag_size: size of data
502  */
503 struct sk_buff *build_skb_around(struct sk_buff *skb,
504 				 void *data, unsigned int frag_size)
505 {
506 	if (unlikely(!skb))
507 		return NULL;
508 
509 	__build_skb_around(skb, data, frag_size);
510 
511 	if (frag_size) {
512 		skb->head_frag = 1;
513 		skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
514 	}
515 	return skb;
516 }
517 EXPORT_SYMBOL(build_skb_around);
518 
519 /**
520  * __napi_build_skb - build a network buffer
521  * @data: data buffer provided by caller
522  * @frag_size: size of data
523  *
524  * Version of __build_skb() that uses NAPI percpu caches to obtain
525  * skbuff_head instead of inplace allocation.
526  *
527  * Returns a new &sk_buff on success, %NULL on allocation failure.
528  */
529 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
530 {
531 	struct sk_buff *skb;
532 
533 	skb = napi_skb_cache_get();
534 	if (unlikely(!skb))
535 		return NULL;
536 
537 	memset(skb, 0, offsetof(struct sk_buff, tail));
538 	__build_skb_around(skb, data, frag_size);
539 
540 	return skb;
541 }
542 
543 /**
544  * napi_build_skb - build a network buffer
545  * @data: data buffer provided by caller
546  * @frag_size: size of data
547  *
548  * Version of __napi_build_skb() that takes care of skb->head_frag
549  * and skb->pfmemalloc when the data is a page or page fragment.
550  *
551  * Returns a new &sk_buff on success, %NULL on allocation failure.
552  */
553 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
554 {
555 	struct sk_buff *skb = __napi_build_skb(data, frag_size);
556 
557 	if (likely(skb) && frag_size) {
558 		skb->head_frag = 1;
559 		skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
560 	}
561 
562 	return skb;
563 }
564 EXPORT_SYMBOL(napi_build_skb);
565 
566 /*
567  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
568  * the caller if emergency pfmemalloc reserves are being used. If it is and
569  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
570  * may be used. Otherwise, the packet data may be discarded until enough
571  * memory is free
572  */
573 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
574 			     bool *pfmemalloc)
575 {
576 	bool ret_pfmemalloc = false;
577 	size_t obj_size;
578 	void *obj;
579 
580 	obj_size = SKB_HEAD_ALIGN(*size);
581 	if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
582 	    !(flags & KMALLOC_NOT_NORMAL_BITS)) {
583 		obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
584 				flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
585 				node);
586 		*size = SKB_SMALL_HEAD_CACHE_SIZE;
587 		if (obj || !(gfp_pfmemalloc_allowed(flags)))
588 			goto out;
589 		/* Try again but now we are using pfmemalloc reserves */
590 		ret_pfmemalloc = true;
591 		obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node);
592 		goto out;
593 	}
594 
595 	obj_size = kmalloc_size_roundup(obj_size);
596 	/* The following cast might truncate high-order bits of obj_size, this
597 	 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
598 	 */
599 	*size = (unsigned int)obj_size;
600 
601 	/*
602 	 * Try a regular allocation, when that fails and we're not entitled
603 	 * to the reserves, fail.
604 	 */
605 	obj = kmalloc_node_track_caller(obj_size,
606 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
607 					node);
608 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
609 		goto out;
610 
611 	/* Try again but now we are using pfmemalloc reserves */
612 	ret_pfmemalloc = true;
613 	obj = kmalloc_node_track_caller(obj_size, flags, node);
614 
615 out:
616 	if (pfmemalloc)
617 		*pfmemalloc = ret_pfmemalloc;
618 
619 	return obj;
620 }
621 
622 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
623  *	'private' fields and also do memory statistics to find all the
624  *	[BEEP] leaks.
625  *
626  */
627 
628 /**
629  *	__alloc_skb	-	allocate a network buffer
630  *	@size: size to allocate
631  *	@gfp_mask: allocation mask
632  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
633  *		instead of head cache and allocate a cloned (child) skb.
634  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
635  *		allocations in case the data is required for writeback
636  *	@node: numa node to allocate memory on
637  *
638  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
639  *	tail room of at least size bytes. The object has a reference count
640  *	of one. The return is the buffer. On a failure the return is %NULL.
641  *
642  *	Buffers may only be allocated from interrupts using a @gfp_mask of
643  *	%GFP_ATOMIC.
644  */
645 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
646 			    int flags, int node)
647 {
648 	struct kmem_cache *cache;
649 	struct sk_buff *skb;
650 	bool pfmemalloc;
651 	u8 *data;
652 
653 	cache = (flags & SKB_ALLOC_FCLONE)
654 		? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
655 
656 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
657 		gfp_mask |= __GFP_MEMALLOC;
658 
659 	/* Get the HEAD */
660 	if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
661 	    likely(node == NUMA_NO_NODE || node == numa_mem_id()))
662 		skb = napi_skb_cache_get();
663 	else
664 		skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
665 	if (unlikely(!skb))
666 		return NULL;
667 	prefetchw(skb);
668 
669 	/* We do our best to align skb_shared_info on a separate cache
670 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
671 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
672 	 * Both skb->head and skb_shared_info are cache line aligned.
673 	 */
674 	data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
675 	if (unlikely(!data))
676 		goto nodata;
677 	/* kmalloc_size_roundup() might give us more room than requested.
678 	 * Put skb_shared_info exactly at the end of allocated zone,
679 	 * to allow max possible filling before reallocation.
680 	 */
681 	prefetchw(data + SKB_WITH_OVERHEAD(size));
682 
683 	/*
684 	 * Only clear those fields we need to clear, not those that we will
685 	 * actually initialise below. Hence, don't put any more fields after
686 	 * the tail pointer in struct sk_buff!
687 	 */
688 	memset(skb, 0, offsetof(struct sk_buff, tail));
689 	__build_skb_around(skb, data, size);
690 	skb->pfmemalloc = pfmemalloc;
691 
692 	if (flags & SKB_ALLOC_FCLONE) {
693 		struct sk_buff_fclones *fclones;
694 
695 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
696 
697 		skb->fclone = SKB_FCLONE_ORIG;
698 		refcount_set(&fclones->fclone_ref, 1);
699 	}
700 
701 	return skb;
702 
703 nodata:
704 	kmem_cache_free(cache, skb);
705 	return NULL;
706 }
707 EXPORT_SYMBOL(__alloc_skb);
708 
709 /**
710  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
711  *	@dev: network device to receive on
712  *	@len: length to allocate
713  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
714  *
715  *	Allocate a new &sk_buff and assign it a usage count of one. The
716  *	buffer has NET_SKB_PAD headroom built in. Users should allocate
717  *	the headroom they think they need without accounting for the
718  *	built in space. The built in space is used for optimisations.
719  *
720  *	%NULL is returned if there is no free memory.
721  */
722 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
723 				   gfp_t gfp_mask)
724 {
725 	struct page_frag_cache *nc;
726 	struct sk_buff *skb;
727 	bool pfmemalloc;
728 	void *data;
729 
730 	len += NET_SKB_PAD;
731 
732 	/* If requested length is either too small or too big,
733 	 * we use kmalloc() for skb->head allocation.
734 	 */
735 	if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
736 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
737 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
738 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
739 		if (!skb)
740 			goto skb_fail;
741 		goto skb_success;
742 	}
743 
744 	len = SKB_HEAD_ALIGN(len);
745 
746 	if (sk_memalloc_socks())
747 		gfp_mask |= __GFP_MEMALLOC;
748 
749 	if (in_hardirq() || irqs_disabled()) {
750 		nc = this_cpu_ptr(&netdev_alloc_cache);
751 		data = page_frag_alloc(nc, len, gfp_mask);
752 		pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
753 	} else {
754 		local_bh_disable();
755 		local_lock_nested_bh(&napi_alloc_cache.bh_lock);
756 
757 		nc = this_cpu_ptr(&napi_alloc_cache.page);
758 		data = page_frag_alloc(nc, len, gfp_mask);
759 		pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
760 
761 		local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
762 		local_bh_enable();
763 	}
764 
765 	if (unlikely(!data))
766 		return NULL;
767 
768 	skb = __build_skb(data, len);
769 	if (unlikely(!skb)) {
770 		skb_free_frag(data);
771 		return NULL;
772 	}
773 
774 	if (pfmemalloc)
775 		skb->pfmemalloc = 1;
776 	skb->head_frag = 1;
777 
778 skb_success:
779 	skb_reserve(skb, NET_SKB_PAD);
780 	skb->dev = dev;
781 
782 skb_fail:
783 	return skb;
784 }
785 EXPORT_SYMBOL(__netdev_alloc_skb);
786 
787 /**
788  *	napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
789  *	@napi: napi instance this buffer was allocated for
790  *	@len: length to allocate
791  *
792  *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
793  *	attempt to allocate the head from a special reserved region used
794  *	only for NAPI Rx allocation.  By doing this we can save several
795  *	CPU cycles by avoiding having to disable and re-enable IRQs.
796  *
797  *	%NULL is returned if there is no free memory.
798  */
799 struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
800 {
801 	gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN;
802 	struct napi_alloc_cache *nc;
803 	struct sk_buff *skb;
804 	bool pfmemalloc;
805 	void *data;
806 
807 	DEBUG_NET_WARN_ON_ONCE(!in_softirq());
808 	len += NET_SKB_PAD + NET_IP_ALIGN;
809 
810 	/* If requested length is either too small or too big,
811 	 * we use kmalloc() for skb->head allocation.
812 	 */
813 	if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
814 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
815 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
816 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
817 				  NUMA_NO_NODE);
818 		if (!skb)
819 			goto skb_fail;
820 		goto skb_success;
821 	}
822 
823 	len = SKB_HEAD_ALIGN(len);
824 
825 	if (sk_memalloc_socks())
826 		gfp_mask |= __GFP_MEMALLOC;
827 
828 	local_lock_nested_bh(&napi_alloc_cache.bh_lock);
829 	nc = this_cpu_ptr(&napi_alloc_cache);
830 
831 	data = page_frag_alloc(&nc->page, len, gfp_mask);
832 	pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
833 	local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
834 
835 	if (unlikely(!data))
836 		return NULL;
837 
838 	skb = __napi_build_skb(data, len);
839 	if (unlikely(!skb)) {
840 		skb_free_frag(data);
841 		return NULL;
842 	}
843 
844 	if (pfmemalloc)
845 		skb->pfmemalloc = 1;
846 	skb->head_frag = 1;
847 
848 skb_success:
849 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
850 	skb->dev = napi->dev;
851 
852 skb_fail:
853 	return skb;
854 }
855 EXPORT_SYMBOL(napi_alloc_skb);
856 
857 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
858 			    int off, int size, unsigned int truesize)
859 {
860 	DEBUG_NET_WARN_ON_ONCE(size > truesize);
861 
862 	skb_fill_netmem_desc(skb, i, netmem, off, size);
863 	skb->len += size;
864 	skb->data_len += size;
865 	skb->truesize += truesize;
866 }
867 EXPORT_SYMBOL(skb_add_rx_frag_netmem);
868 
869 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
870 			  unsigned int truesize)
871 {
872 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
873 
874 	DEBUG_NET_WARN_ON_ONCE(size > truesize);
875 
876 	skb_frag_size_add(frag, size);
877 	skb->len += size;
878 	skb->data_len += size;
879 	skb->truesize += truesize;
880 }
881 EXPORT_SYMBOL(skb_coalesce_rx_frag);
882 
883 static void skb_drop_list(struct sk_buff **listp)
884 {
885 	kfree_skb_list(*listp);
886 	*listp = NULL;
887 }
888 
889 static inline void skb_drop_fraglist(struct sk_buff *skb)
890 {
891 	skb_drop_list(&skb_shinfo(skb)->frag_list);
892 }
893 
894 static void skb_clone_fraglist(struct sk_buff *skb)
895 {
896 	struct sk_buff *list;
897 
898 	skb_walk_frags(skb, list)
899 		skb_get(list);
900 }
901 
902 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
903 		    unsigned int headroom)
904 {
905 #if IS_ENABLED(CONFIG_PAGE_POOL)
906 	u32 size, truesize, len, max_head_size, off;
907 	struct sk_buff *skb = *pskb, *nskb;
908 	int err, i, head_off;
909 	void *data;
910 
911 	/* XDP does not support fraglist so we need to linearize
912 	 * the skb.
913 	 */
914 	if (skb_has_frag_list(skb))
915 		return -EOPNOTSUPP;
916 
917 	max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
918 	if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
919 		return -ENOMEM;
920 
921 	size = min_t(u32, skb->len, max_head_size);
922 	truesize = SKB_HEAD_ALIGN(size) + headroom;
923 	data = page_pool_dev_alloc_va(pool, &truesize);
924 	if (!data)
925 		return -ENOMEM;
926 
927 	nskb = napi_build_skb(data, truesize);
928 	if (!nskb) {
929 		page_pool_free_va(pool, data, true);
930 		return -ENOMEM;
931 	}
932 
933 	skb_reserve(nskb, headroom);
934 	skb_copy_header(nskb, skb);
935 	skb_mark_for_recycle(nskb);
936 
937 	err = skb_copy_bits(skb, 0, nskb->data, size);
938 	if (err) {
939 		consume_skb(nskb);
940 		return err;
941 	}
942 	skb_put(nskb, size);
943 
944 	head_off = skb_headroom(nskb) - skb_headroom(skb);
945 	skb_headers_offset_update(nskb, head_off);
946 
947 	off = size;
948 	len = skb->len - off;
949 	for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
950 		struct page *page;
951 		u32 page_off;
952 
953 		size = min_t(u32, len, PAGE_SIZE);
954 		truesize = size;
955 
956 		page = page_pool_dev_alloc(pool, &page_off, &truesize);
957 		if (!page) {
958 			consume_skb(nskb);
959 			return -ENOMEM;
960 		}
961 
962 		skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
963 		err = skb_copy_bits(skb, off, page_address(page) + page_off,
964 				    size);
965 		if (err) {
966 			consume_skb(nskb);
967 			return err;
968 		}
969 
970 		len -= size;
971 		off += size;
972 	}
973 
974 	consume_skb(skb);
975 	*pskb = nskb;
976 
977 	return 0;
978 #else
979 	return -EOPNOTSUPP;
980 #endif
981 }
982 EXPORT_SYMBOL(skb_pp_cow_data);
983 
984 int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
985 			 const struct bpf_prog *prog)
986 {
987 	if (!prog->aux->xdp_has_frags)
988 		return -EINVAL;
989 
990 	return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM);
991 }
992 EXPORT_SYMBOL(skb_cow_data_for_xdp);
993 
994 #if IS_ENABLED(CONFIG_PAGE_POOL)
995 bool napi_pp_put_page(netmem_ref netmem)
996 {
997 	netmem = netmem_compound_head(netmem);
998 
999 	if (unlikely(!netmem_is_pp(netmem)))
1000 		return false;
1001 
1002 	page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
1003 
1004 	return true;
1005 }
1006 EXPORT_SYMBOL(napi_pp_put_page);
1007 #endif
1008 
1009 static bool skb_pp_recycle(struct sk_buff *skb, void *data)
1010 {
1011 	if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
1012 		return false;
1013 	return napi_pp_put_page(page_to_netmem(virt_to_page(data)));
1014 }
1015 
1016 /**
1017  * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
1018  * @skb:	page pool aware skb
1019  *
1020  * Increase the fragment reference count (pp_ref_count) of a skb. This is
1021  * intended to gain fragment references only for page pool aware skbs,
1022  * i.e. when skb->pp_recycle is true, and not for fragments in a
1023  * non-pp-recycling skb. It has a fallback to increase references on normal
1024  * pages, as page pool aware skbs may also have normal page fragments.
1025  */
1026 static int skb_pp_frag_ref(struct sk_buff *skb)
1027 {
1028 	struct skb_shared_info *shinfo;
1029 	netmem_ref head_netmem;
1030 	int i;
1031 
1032 	if (!skb->pp_recycle)
1033 		return -EINVAL;
1034 
1035 	shinfo = skb_shinfo(skb);
1036 
1037 	for (i = 0; i < shinfo->nr_frags; i++) {
1038 		head_netmem = netmem_compound_head(shinfo->frags[i].netmem);
1039 		if (likely(netmem_is_pp(head_netmem)))
1040 			page_pool_ref_netmem(head_netmem);
1041 		else
1042 			page_ref_inc(netmem_to_page(head_netmem));
1043 	}
1044 	return 0;
1045 }
1046 
1047 static void skb_kfree_head(void *head, unsigned int end_offset)
1048 {
1049 	if (end_offset == SKB_SMALL_HEAD_HEADROOM)
1050 		kmem_cache_free(net_hotdata.skb_small_head_cache, head);
1051 	else
1052 		kfree(head);
1053 }
1054 
1055 static void skb_free_head(struct sk_buff *skb)
1056 {
1057 	unsigned char *head = skb->head;
1058 
1059 	if (skb->head_frag) {
1060 		if (skb_pp_recycle(skb, head))
1061 			return;
1062 		skb_free_frag(head);
1063 	} else {
1064 		skb_kfree_head(head, skb_end_offset(skb));
1065 	}
1066 }
1067 
1068 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason)
1069 {
1070 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1071 	int i;
1072 
1073 	if (!skb_data_unref(skb, shinfo))
1074 		goto exit;
1075 
1076 	if (skb_zcopy(skb)) {
1077 		bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS;
1078 
1079 		skb_zcopy_clear(skb, true);
1080 		if (skip_unref)
1081 			goto free_head;
1082 	}
1083 
1084 	for (i = 0; i < shinfo->nr_frags; i++)
1085 		__skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
1086 
1087 free_head:
1088 	if (shinfo->frag_list)
1089 		kfree_skb_list_reason(shinfo->frag_list, reason);
1090 
1091 	skb_free_head(skb);
1092 exit:
1093 	/* When we clone an SKB we copy the reycling bit. The pp_recycle
1094 	 * bit is only set on the head though, so in order to avoid races
1095 	 * while trying to recycle fragments on __skb_frag_unref() we need
1096 	 * to make one SKB responsible for triggering the recycle path.
1097 	 * So disable the recycling bit if an SKB is cloned and we have
1098 	 * additional references to the fragmented part of the SKB.
1099 	 * Eventually the last SKB will have the recycling bit set and it's
1100 	 * dataref set to 0, which will trigger the recycling
1101 	 */
1102 	skb->pp_recycle = 0;
1103 }
1104 
1105 /*
1106  *	Free an skbuff by memory without cleaning the state.
1107  */
1108 static void kfree_skbmem(struct sk_buff *skb)
1109 {
1110 	struct sk_buff_fclones *fclones;
1111 
1112 	switch (skb->fclone) {
1113 	case SKB_FCLONE_UNAVAILABLE:
1114 		kmem_cache_free(net_hotdata.skbuff_cache, skb);
1115 		return;
1116 
1117 	case SKB_FCLONE_ORIG:
1118 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
1119 
1120 		/* We usually free the clone (TX completion) before original skb
1121 		 * This test would have no chance to be true for the clone,
1122 		 * while here, branch prediction will be good.
1123 		 */
1124 		if (refcount_read(&fclones->fclone_ref) == 1)
1125 			goto fastpath;
1126 		break;
1127 
1128 	default: /* SKB_FCLONE_CLONE */
1129 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
1130 		break;
1131 	}
1132 	if (!refcount_dec_and_test(&fclones->fclone_ref))
1133 		return;
1134 fastpath:
1135 	kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones);
1136 }
1137 
1138 void skb_release_head_state(struct sk_buff *skb)
1139 {
1140 	skb_dst_drop(skb);
1141 	if (skb->destructor) {
1142 		DEBUG_NET_WARN_ON_ONCE(in_hardirq());
1143 #ifdef CONFIG_INET
1144 		INDIRECT_CALL_4(skb->destructor,
1145 				tcp_wfree, __sock_wfree, sock_wfree,
1146 				xsk_destruct_skb,
1147 				skb);
1148 #else
1149 		INDIRECT_CALL_2(skb->destructor,
1150 				sock_wfree, xsk_destruct_skb,
1151 				skb);
1152 
1153 #endif
1154 		skb->destructor = NULL;
1155 		skb->sk = NULL;
1156 	}
1157 	nf_reset_ct(skb);
1158 	skb_ext_reset(skb);
1159 }
1160 
1161 /* Free everything but the sk_buff shell. */
1162 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason)
1163 {
1164 	skb_release_head_state(skb);
1165 	if (likely(skb->head))
1166 		skb_release_data(skb, reason);
1167 }
1168 
1169 /**
1170  *	__kfree_skb - private function
1171  *	@skb: buffer
1172  *
1173  *	Free an sk_buff. Release anything attached to the buffer.
1174  *	Clean the state. This is an internal helper function. Users should
1175  *	always call kfree_skb
1176  */
1177 
1178 void __kfree_skb(struct sk_buff *skb)
1179 {
1180 	skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED);
1181 	kfree_skbmem(skb);
1182 }
1183 EXPORT_SYMBOL(__kfree_skb);
1184 
1185 static __always_inline
1186 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
1187 			  enum skb_drop_reason reason)
1188 {
1189 	if (unlikely(!skb_unref(skb)))
1190 		return false;
1191 
1192 	DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET ||
1193 			       u32_get_bits(reason,
1194 					    SKB_DROP_REASON_SUBSYS_MASK) >=
1195 				SKB_DROP_REASON_SUBSYS_NUM);
1196 
1197 	if (reason == SKB_CONSUMED)
1198 		trace_consume_skb(skb, __builtin_return_address(0));
1199 	else
1200 		trace_kfree_skb(skb, __builtin_return_address(0), reason, sk);
1201 	return true;
1202 }
1203 
1204 /**
1205  *	sk_skb_reason_drop - free an sk_buff with special reason
1206  *	@sk: the socket to receive @skb, or NULL if not applicable
1207  *	@skb: buffer to free
1208  *	@reason: reason why this skb is dropped
1209  *
1210  *	Drop a reference to the buffer and free it if the usage count has hit
1211  *	zero. Meanwhile, pass the receiving socket and drop reason to
1212  *	'kfree_skb' tracepoint.
1213  */
1214 void __fix_address
1215 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
1216 {
1217 	if (__sk_skb_reason_drop(sk, skb, reason))
1218 		__kfree_skb(skb);
1219 }
1220 EXPORT_SYMBOL(sk_skb_reason_drop);
1221 
1222 #define KFREE_SKB_BULK_SIZE	16
1223 
1224 struct skb_free_array {
1225 	unsigned int skb_count;
1226 	void *skb_array[KFREE_SKB_BULK_SIZE];
1227 };
1228 
1229 static void kfree_skb_add_bulk(struct sk_buff *skb,
1230 			       struct skb_free_array *sa,
1231 			       enum skb_drop_reason reason)
1232 {
1233 	/* if SKB is a clone, don't handle this case */
1234 	if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
1235 		__kfree_skb(skb);
1236 		return;
1237 	}
1238 
1239 	skb_release_all(skb, reason);
1240 	sa->skb_array[sa->skb_count++] = skb;
1241 
1242 	if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
1243 		kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE,
1244 				     sa->skb_array);
1245 		sa->skb_count = 0;
1246 	}
1247 }
1248 
1249 void __fix_address
1250 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
1251 {
1252 	struct skb_free_array sa;
1253 
1254 	sa.skb_count = 0;
1255 
1256 	while (segs) {
1257 		struct sk_buff *next = segs->next;
1258 
1259 		if (__sk_skb_reason_drop(NULL, segs, reason)) {
1260 			skb_poison_list(segs);
1261 			kfree_skb_add_bulk(segs, &sa, reason);
1262 		}
1263 
1264 		segs = next;
1265 	}
1266 
1267 	if (sa.skb_count)
1268 		kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array);
1269 }
1270 EXPORT_SYMBOL(kfree_skb_list_reason);
1271 
1272 /* Dump skb information and contents.
1273  *
1274  * Must only be called from net_ratelimit()-ed paths.
1275  *
1276  * Dumps whole packets if full_pkt, only headers otherwise.
1277  */
1278 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
1279 {
1280 	struct skb_shared_info *sh = skb_shinfo(skb);
1281 	struct net_device *dev = skb->dev;
1282 	struct sock *sk = skb->sk;
1283 	struct sk_buff *list_skb;
1284 	bool has_mac, has_trans;
1285 	int headroom, tailroom;
1286 	int i, len, seg_len;
1287 
1288 	if (full_pkt)
1289 		len = skb->len;
1290 	else
1291 		len = min_t(int, skb->len, MAX_HEADER + 128);
1292 
1293 	headroom = skb_headroom(skb);
1294 	tailroom = skb_tailroom(skb);
1295 
1296 	has_mac = skb_mac_header_was_set(skb);
1297 	has_trans = skb_transport_header_was_set(skb);
1298 
1299 	printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
1300 	       "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
1301 	       "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
1302 	       "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
1303 	       "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n"
1304 	       "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n"
1305 	       "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n",
1306 	       level, skb->len, headroom, skb_headlen(skb), tailroom,
1307 	       has_mac ? skb->mac_header : -1,
1308 	       has_mac ? skb_mac_header_len(skb) : -1,
1309 	       skb->mac_len,
1310 	       skb->network_header,
1311 	       has_trans ? skb_network_header_len(skb) : -1,
1312 	       has_trans ? skb->transport_header : -1,
1313 	       sh->tx_flags, sh->nr_frags,
1314 	       sh->gso_size, sh->gso_type, sh->gso_segs,
1315 	       skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed,
1316 	       skb->csum_complete_sw, skb->csum_valid, skb->csum_level,
1317 	       skb->hash, skb->sw_hash, skb->l4_hash,
1318 	       ntohs(skb->protocol), skb->pkt_type, skb->skb_iif,
1319 	       skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all,
1320 	       skb->encapsulation, skb->inner_protocol, skb->inner_mac_header,
1321 	       skb->inner_network_header, skb->inner_transport_header);
1322 
1323 	if (dev)
1324 		printk("%sdev name=%s feat=%pNF\n",
1325 		       level, dev->name, &dev->features);
1326 	if (sk)
1327 		printk("%ssk family=%hu type=%u proto=%u\n",
1328 		       level, sk->sk_family, sk->sk_type, sk->sk_protocol);
1329 
1330 	if (full_pkt && headroom)
1331 		print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
1332 			       16, 1, skb->head, headroom, false);
1333 
1334 	seg_len = min_t(int, skb_headlen(skb), len);
1335 	if (seg_len)
1336 		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET,
1337 			       16, 1, skb->data, seg_len, false);
1338 	len -= seg_len;
1339 
1340 	if (full_pkt && tailroom)
1341 		print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
1342 			       16, 1, skb_tail_pointer(skb), tailroom, false);
1343 
1344 	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
1345 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1346 		u32 p_off, p_len, copied;
1347 		struct page *p;
1348 		u8 *vaddr;
1349 
1350 		if (skb_frag_is_net_iov(frag)) {
1351 			printk("%sskb frag %d: not readable\n", level, i);
1352 			len -= skb_frag_size(frag);
1353 			if (!len)
1354 				break;
1355 			continue;
1356 		}
1357 
1358 		skb_frag_foreach_page(frag, skb_frag_off(frag),
1359 				      skb_frag_size(frag), p, p_off, p_len,
1360 				      copied) {
1361 			seg_len = min_t(int, p_len, len);
1362 			vaddr = kmap_atomic(p);
1363 			print_hex_dump(level, "skb frag:     ",
1364 				       DUMP_PREFIX_OFFSET,
1365 				       16, 1, vaddr + p_off, seg_len, false);
1366 			kunmap_atomic(vaddr);
1367 			len -= seg_len;
1368 			if (!len)
1369 				break;
1370 		}
1371 	}
1372 
1373 	if (full_pkt && skb_has_frag_list(skb)) {
1374 		printk("skb fraglist:\n");
1375 		skb_walk_frags(skb, list_skb)
1376 			skb_dump(level, list_skb, true);
1377 	}
1378 }
1379 EXPORT_SYMBOL(skb_dump);
1380 
1381 /**
1382  *	skb_tx_error - report an sk_buff xmit error
1383  *	@skb: buffer that triggered an error
1384  *
1385  *	Report xmit error if a device callback is tracking this skb.
1386  *	skb must be freed afterwards.
1387  */
1388 void skb_tx_error(struct sk_buff *skb)
1389 {
1390 	if (skb) {
1391 		skb_zcopy_downgrade_managed(skb);
1392 		skb_zcopy_clear(skb, true);
1393 	}
1394 }
1395 EXPORT_SYMBOL(skb_tx_error);
1396 
1397 #ifdef CONFIG_TRACEPOINTS
1398 /**
1399  *	consume_skb - free an skbuff
1400  *	@skb: buffer to free
1401  *
1402  *	Drop a ref to the buffer and free it if the usage count has hit zero
1403  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
1404  *	is being dropped after a failure and notes that
1405  */
1406 void consume_skb(struct sk_buff *skb)
1407 {
1408 	if (!skb_unref(skb))
1409 		return;
1410 
1411 	trace_consume_skb(skb, __builtin_return_address(0));
1412 	__kfree_skb(skb);
1413 }
1414 EXPORT_SYMBOL(consume_skb);
1415 #endif
1416 
1417 /**
1418  *	__consume_stateless_skb - free an skbuff, assuming it is stateless
1419  *	@skb: buffer to free
1420  *
1421  *	Alike consume_skb(), but this variant assumes that this is the last
1422  *	skb reference and all the head states have been already dropped
1423  */
1424 void __consume_stateless_skb(struct sk_buff *skb)
1425 {
1426 	trace_consume_skb(skb, __builtin_return_address(0));
1427 	skb_release_data(skb, SKB_CONSUMED);
1428 	kfree_skbmem(skb);
1429 }
1430 
1431 static void napi_skb_cache_put(struct sk_buff *skb)
1432 {
1433 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
1434 	u32 i;
1435 
1436 	if (!kasan_mempool_poison_object(skb))
1437 		return;
1438 
1439 	local_lock_nested_bh(&napi_alloc_cache.bh_lock);
1440 	nc->skb_cache[nc->skb_count++] = skb;
1441 
1442 	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
1443 		for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
1444 			kasan_mempool_unpoison_object(nc->skb_cache[i],
1445 						skbuff_cache_size);
1446 
1447 		kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
1448 				     nc->skb_cache + NAPI_SKB_CACHE_HALF);
1449 		nc->skb_count = NAPI_SKB_CACHE_HALF;
1450 	}
1451 	local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
1452 }
1453 
1454 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
1455 {
1456 	skb_release_all(skb, reason);
1457 	napi_skb_cache_put(skb);
1458 }
1459 
1460 void napi_skb_free_stolen_head(struct sk_buff *skb)
1461 {
1462 	if (unlikely(skb->slow_gro)) {
1463 		nf_reset_ct(skb);
1464 		skb_dst_drop(skb);
1465 		skb_ext_put(skb);
1466 		skb_orphan(skb);
1467 		skb->slow_gro = 0;
1468 	}
1469 	napi_skb_cache_put(skb);
1470 }
1471 
1472 void napi_consume_skb(struct sk_buff *skb, int budget)
1473 {
1474 	/* Zero budget indicate non-NAPI context called us, like netpoll */
1475 	if (unlikely(!budget)) {
1476 		dev_consume_skb_any(skb);
1477 		return;
1478 	}
1479 
1480 	DEBUG_NET_WARN_ON_ONCE(!in_softirq());
1481 
1482 	if (skb->alloc_cpu != smp_processor_id() && !skb_shared(skb)) {
1483 		skb_release_head_state(skb);
1484 		return skb_attempt_defer_free(skb);
1485 	}
1486 
1487 	if (!skb_unref(skb))
1488 		return;
1489 
1490 	/* if reaching here SKB is ready to free */
1491 	trace_consume_skb(skb, __builtin_return_address(0));
1492 
1493 	/* if SKB is a clone, don't handle this case */
1494 	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
1495 		__kfree_skb(skb);
1496 		return;
1497 	}
1498 
1499 	skb_release_all(skb, SKB_CONSUMED);
1500 	napi_skb_cache_put(skb);
1501 }
1502 EXPORT_SYMBOL(napi_consume_skb);
1503 
1504 /* Make sure a field is contained by headers group */
1505 #define CHECK_SKB_FIELD(field) \
1506 	BUILD_BUG_ON(offsetof(struct sk_buff, field) !=		\
1507 		     offsetof(struct sk_buff, headers.field));	\
1508 
1509 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1510 {
1511 	new->tstamp		= old->tstamp;
1512 	/* We do not copy old->sk */
1513 	new->dev		= old->dev;
1514 	memcpy(new->cb, old->cb, sizeof(old->cb));
1515 	skb_dst_copy(new, old);
1516 	__skb_ext_copy(new, old);
1517 	__nf_copy(new, old, false);
1518 
1519 	/* Note : this field could be in the headers group.
1520 	 * It is not yet because we do not want to have a 16 bit hole
1521 	 */
1522 	new->queue_mapping = old->queue_mapping;
1523 
1524 	memcpy(&new->headers, &old->headers, sizeof(new->headers));
1525 	CHECK_SKB_FIELD(protocol);
1526 	CHECK_SKB_FIELD(csum);
1527 	CHECK_SKB_FIELD(hash);
1528 	CHECK_SKB_FIELD(priority);
1529 	CHECK_SKB_FIELD(skb_iif);
1530 	CHECK_SKB_FIELD(vlan_proto);
1531 	CHECK_SKB_FIELD(vlan_tci);
1532 	CHECK_SKB_FIELD(transport_header);
1533 	CHECK_SKB_FIELD(network_header);
1534 	CHECK_SKB_FIELD(mac_header);
1535 	CHECK_SKB_FIELD(inner_protocol);
1536 	CHECK_SKB_FIELD(inner_transport_header);
1537 	CHECK_SKB_FIELD(inner_network_header);
1538 	CHECK_SKB_FIELD(inner_mac_header);
1539 	CHECK_SKB_FIELD(mark);
1540 #ifdef CONFIG_NETWORK_SECMARK
1541 	CHECK_SKB_FIELD(secmark);
1542 #endif
1543 #ifdef CONFIG_NET_RX_BUSY_POLL
1544 	CHECK_SKB_FIELD(napi_id);
1545 #endif
1546 	CHECK_SKB_FIELD(alloc_cpu);
1547 #ifdef CONFIG_XPS
1548 	CHECK_SKB_FIELD(sender_cpu);
1549 #endif
1550 #ifdef CONFIG_NET_SCHED
1551 	CHECK_SKB_FIELD(tc_index);
1552 #endif
1553 
1554 }
1555 
1556 /*
1557  * You should not add any new code to this function.  Add it to
1558  * __copy_skb_header above instead.
1559  */
1560 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
1561 {
1562 #define C(x) n->x = skb->x
1563 
1564 	n->next = n->prev = NULL;
1565 	n->sk = NULL;
1566 	__copy_skb_header(n, skb);
1567 
1568 	C(len);
1569 	C(data_len);
1570 	C(mac_len);
1571 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1572 	n->cloned = 1;
1573 	n->nohdr = 0;
1574 	n->peeked = 0;
1575 	C(pfmemalloc);
1576 	C(pp_recycle);
1577 	n->destructor = NULL;
1578 	C(tail);
1579 	C(end);
1580 	C(head);
1581 	C(head_frag);
1582 	C(data);
1583 	C(truesize);
1584 	refcount_set(&n->users, 1);
1585 
1586 	atomic_inc(&(skb_shinfo(skb)->dataref));
1587 	skb->cloned = 1;
1588 
1589 	return n;
1590 #undef C
1591 }
1592 
1593 /**
1594  * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1595  * @first: first sk_buff of the msg
1596  */
1597 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1598 {
1599 	struct sk_buff *n;
1600 
1601 	n = alloc_skb(0, GFP_ATOMIC);
1602 	if (!n)
1603 		return NULL;
1604 
1605 	n->len = first->len;
1606 	n->data_len = first->len;
1607 	n->truesize = first->truesize;
1608 
1609 	skb_shinfo(n)->frag_list = first;
1610 
1611 	__copy_skb_header(n, first);
1612 	n->destructor = NULL;
1613 
1614 	return n;
1615 }
1616 EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1617 
1618 /**
1619  *	skb_morph	-	morph one skb into another
1620  *	@dst: the skb to receive the contents
1621  *	@src: the skb to supply the contents
1622  *
1623  *	This is identical to skb_clone except that the target skb is
1624  *	supplied by the user.
1625  *
1626  *	The target skb is returned upon exit.
1627  */
1628 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1629 {
1630 	skb_release_all(dst, SKB_CONSUMED);
1631 	return __skb_clone(dst, src);
1632 }
1633 EXPORT_SYMBOL_GPL(skb_morph);
1634 
1635 int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1636 {
1637 	unsigned long max_pg, num_pg, new_pg, old_pg, rlim;
1638 	struct user_struct *user;
1639 
1640 	if (capable(CAP_IPC_LOCK) || !size)
1641 		return 0;
1642 
1643 	rlim = rlimit(RLIMIT_MEMLOCK);
1644 	if (rlim == RLIM_INFINITY)
1645 		return 0;
1646 
1647 	num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */
1648 	max_pg = rlim >> PAGE_SHIFT;
1649 	user = mmp->user ? : current_user();
1650 
1651 	old_pg = atomic_long_read(&user->locked_vm);
1652 	do {
1653 		new_pg = old_pg + num_pg;
1654 		if (new_pg > max_pg)
1655 			return -ENOBUFS;
1656 	} while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg));
1657 
1658 	if (!mmp->user) {
1659 		mmp->user = get_uid(user);
1660 		mmp->num_pg = num_pg;
1661 	} else {
1662 		mmp->num_pg += num_pg;
1663 	}
1664 
1665 	return 0;
1666 }
1667 EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1668 
1669 void mm_unaccount_pinned_pages(struct mmpin *mmp)
1670 {
1671 	if (mmp->user) {
1672 		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1673 		free_uid(mmp->user);
1674 	}
1675 }
1676 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1677 
1678 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size,
1679 					    bool devmem)
1680 {
1681 	struct ubuf_info_msgzc *uarg;
1682 	struct sk_buff *skb;
1683 
1684 	WARN_ON_ONCE(!in_task());
1685 
1686 	skb = sock_omalloc(sk, 0, GFP_KERNEL);
1687 	if (!skb)
1688 		return NULL;
1689 
1690 	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1691 	uarg = (void *)skb->cb;
1692 	uarg->mmp.user = NULL;
1693 
1694 	if (likely(!devmem) && mm_account_pinned_pages(&uarg->mmp, size)) {
1695 		kfree_skb(skb);
1696 		return NULL;
1697 	}
1698 
1699 	uarg->ubuf.ops = &msg_zerocopy_ubuf_ops;
1700 	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1701 	uarg->len = 1;
1702 	uarg->bytelen = size;
1703 	uarg->zerocopy = 1;
1704 	uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
1705 	refcount_set(&uarg->ubuf.refcnt, 1);
1706 	sock_hold(sk);
1707 
1708 	return &uarg->ubuf;
1709 }
1710 
1711 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg)
1712 {
1713 	return container_of((void *)uarg, struct sk_buff, cb);
1714 }
1715 
1716 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
1717 				       struct ubuf_info *uarg, bool devmem)
1718 {
1719 	if (uarg) {
1720 		struct ubuf_info_msgzc *uarg_zc;
1721 		const u32 byte_limit = 1 << 19;		/* limit to a few TSO */
1722 		u32 bytelen, next;
1723 
1724 		/* there might be non MSG_ZEROCOPY users */
1725 		if (uarg->ops != &msg_zerocopy_ubuf_ops)
1726 			return NULL;
1727 
1728 		/* realloc only when socket is locked (TCP, UDP cork),
1729 		 * so uarg->len and sk_zckey access is serialized
1730 		 */
1731 		if (!sock_owned_by_user(sk)) {
1732 			WARN_ON_ONCE(1);
1733 			return NULL;
1734 		}
1735 
1736 		uarg_zc = uarg_to_msgzc(uarg);
1737 		bytelen = uarg_zc->bytelen + size;
1738 		if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1739 			/* TCP can create new skb to attach new uarg */
1740 			if (sk->sk_type == SOCK_STREAM)
1741 				goto new_alloc;
1742 			return NULL;
1743 		}
1744 
1745 		next = (u32)atomic_read(&sk->sk_zckey);
1746 		if ((u32)(uarg_zc->id + uarg_zc->len) == next) {
1747 			if (likely(!devmem) &&
1748 			    mm_account_pinned_pages(&uarg_zc->mmp, size))
1749 				return NULL;
1750 			uarg_zc->len++;
1751 			uarg_zc->bytelen = bytelen;
1752 			atomic_set(&sk->sk_zckey, ++next);
1753 
1754 			/* no extra ref when appending to datagram (MSG_MORE) */
1755 			if (sk->sk_type == SOCK_STREAM)
1756 				net_zcopy_get(uarg);
1757 
1758 			return uarg;
1759 		}
1760 	}
1761 
1762 new_alloc:
1763 	return msg_zerocopy_alloc(sk, size, devmem);
1764 }
1765 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc);
1766 
1767 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1768 {
1769 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1770 	u32 old_lo, old_hi;
1771 	u64 sum_len;
1772 
1773 	old_lo = serr->ee.ee_info;
1774 	old_hi = serr->ee.ee_data;
1775 	sum_len = old_hi - old_lo + 1ULL + len;
1776 
1777 	if (sum_len >= (1ULL << 32))
1778 		return false;
1779 
1780 	if (lo != old_hi + 1)
1781 		return false;
1782 
1783 	serr->ee.ee_data += len;
1784 	return true;
1785 }
1786 
1787 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg)
1788 {
1789 	struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1790 	struct sock_exterr_skb *serr;
1791 	struct sock *sk = skb->sk;
1792 	struct sk_buff_head *q;
1793 	unsigned long flags;
1794 	bool is_zerocopy;
1795 	u32 lo, hi;
1796 	u16 len;
1797 
1798 	mm_unaccount_pinned_pages(&uarg->mmp);
1799 
1800 	/* if !len, there was only 1 call, and it was aborted
1801 	 * so do not queue a completion notification
1802 	 */
1803 	if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1804 		goto release;
1805 
1806 	len = uarg->len;
1807 	lo = uarg->id;
1808 	hi = uarg->id + len - 1;
1809 	is_zerocopy = uarg->zerocopy;
1810 
1811 	serr = SKB_EXT_ERR(skb);
1812 	memset(serr, 0, sizeof(*serr));
1813 	serr->ee.ee_errno = 0;
1814 	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1815 	serr->ee.ee_data = hi;
1816 	serr->ee.ee_info = lo;
1817 	if (!is_zerocopy)
1818 		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1819 
1820 	q = &sk->sk_error_queue;
1821 	spin_lock_irqsave(&q->lock, flags);
1822 	tail = skb_peek_tail(q);
1823 	if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1824 	    !skb_zerocopy_notify_extend(tail, lo, len)) {
1825 		__skb_queue_tail(q, skb);
1826 		skb = NULL;
1827 	}
1828 	spin_unlock_irqrestore(&q->lock, flags);
1829 
1830 	sk_error_report(sk);
1831 
1832 release:
1833 	consume_skb(skb);
1834 	sock_put(sk);
1835 }
1836 
1837 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg,
1838 				  bool success)
1839 {
1840 	struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
1841 
1842 	uarg_zc->zerocopy = uarg_zc->zerocopy & success;
1843 
1844 	if (refcount_dec_and_test(&uarg->refcnt))
1845 		__msg_zerocopy_callback(uarg_zc);
1846 }
1847 
1848 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1849 {
1850 	struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk;
1851 
1852 	atomic_dec(&sk->sk_zckey);
1853 	uarg_to_msgzc(uarg)->len--;
1854 
1855 	if (have_uref)
1856 		msg_zerocopy_complete(NULL, uarg, true);
1857 }
1858 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
1859 
1860 const struct ubuf_info_ops msg_zerocopy_ubuf_ops = {
1861 	.complete = msg_zerocopy_complete,
1862 };
1863 EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops);
1864 
1865 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1866 			     struct msghdr *msg, int len,
1867 			     struct ubuf_info *uarg,
1868 			     struct net_devmem_dmabuf_binding *binding)
1869 {
1870 	int err, orig_len = skb->len;
1871 
1872 	if (uarg->ops->link_skb) {
1873 		err = uarg->ops->link_skb(skb, uarg);
1874 		if (err)
1875 			return err;
1876 	} else {
1877 		struct ubuf_info *orig_uarg = skb_zcopy(skb);
1878 
1879 		/* An skb can only point to one uarg. This edge case happens
1880 		 * when TCP appends to an skb, but zerocopy_realloc triggered
1881 		 * a new alloc.
1882 		 */
1883 		if (orig_uarg && uarg != orig_uarg)
1884 			return -EEXIST;
1885 	}
1886 
1887 	err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len,
1888 				      binding);
1889 	if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1890 		struct sock *save_sk = skb->sk;
1891 
1892 		/* Streams do not free skb on error. Reset to prev state. */
1893 		iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
1894 		skb->sk = sk;
1895 		___pskb_trim(skb, orig_len);
1896 		skb->sk = save_sk;
1897 		return err;
1898 	}
1899 
1900 	skb_zcopy_set(skb, uarg, NULL);
1901 	return skb->len - orig_len;
1902 }
1903 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1904 
1905 void __skb_zcopy_downgrade_managed(struct sk_buff *skb)
1906 {
1907 	int i;
1908 
1909 	skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS;
1910 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1911 		skb_frag_ref(skb, i);
1912 }
1913 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed);
1914 
1915 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1916 			      gfp_t gfp_mask)
1917 {
1918 	if (skb_zcopy(orig)) {
1919 		if (skb_zcopy(nskb)) {
1920 			/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1921 			if (!gfp_mask) {
1922 				WARN_ON_ONCE(1);
1923 				return -ENOMEM;
1924 			}
1925 			if (skb_uarg(nskb) == skb_uarg(orig))
1926 				return 0;
1927 			if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1928 				return -EIO;
1929 		}
1930 		skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1931 	}
1932 	return 0;
1933 }
1934 
1935 /**
1936  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
1937  *	@skb: the skb to modify
1938  *	@gfp_mask: allocation priority
1939  *
1940  *	This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1941  *	It will copy all frags into kernel and drop the reference
1942  *	to userspace pages.
1943  *
1944  *	If this function is called from an interrupt gfp_mask() must be
1945  *	%GFP_ATOMIC.
1946  *
1947  *	Returns 0 on success or a negative error code on failure
1948  *	to allocate kernel memory to copy to.
1949  */
1950 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1951 {
1952 	int num_frags = skb_shinfo(skb)->nr_frags;
1953 	struct page *page, *head = NULL;
1954 	int i, order, psize, new_frags;
1955 	u32 d_off;
1956 
1957 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1958 		return -EINVAL;
1959 
1960 	if (!skb_frags_readable(skb))
1961 		return -EFAULT;
1962 
1963 	if (!num_frags)
1964 		goto release;
1965 
1966 	/* We might have to allocate high order pages, so compute what minimum
1967 	 * page order is needed.
1968 	 */
1969 	order = 0;
1970 	while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
1971 		order++;
1972 	psize = (PAGE_SIZE << order);
1973 
1974 	new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
1975 	for (i = 0; i < new_frags; i++) {
1976 		page = alloc_pages(gfp_mask | __GFP_COMP, order);
1977 		if (!page) {
1978 			while (head) {
1979 				struct page *next = (struct page *)page_private(head);
1980 				put_page(head);
1981 				head = next;
1982 			}
1983 			return -ENOMEM;
1984 		}
1985 		set_page_private(page, (unsigned long)head);
1986 		head = page;
1987 	}
1988 
1989 	page = head;
1990 	d_off = 0;
1991 	for (i = 0; i < num_frags; i++) {
1992 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1993 		u32 p_off, p_len, copied;
1994 		struct page *p;
1995 		u8 *vaddr;
1996 
1997 		skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1998 				      p, p_off, p_len, copied) {
1999 			u32 copy, done = 0;
2000 			vaddr = kmap_atomic(p);
2001 
2002 			while (done < p_len) {
2003 				if (d_off == psize) {
2004 					d_off = 0;
2005 					page = (struct page *)page_private(page);
2006 				}
2007 				copy = min_t(u32, psize - d_off, p_len - done);
2008 				memcpy(page_address(page) + d_off,
2009 				       vaddr + p_off + done, copy);
2010 				done += copy;
2011 				d_off += copy;
2012 			}
2013 			kunmap_atomic(vaddr);
2014 		}
2015 	}
2016 
2017 	/* skb frags release userspace buffers */
2018 	for (i = 0; i < num_frags; i++)
2019 		skb_frag_unref(skb, i);
2020 
2021 	/* skb frags point to kernel buffers */
2022 	for (i = 0; i < new_frags - 1; i++) {
2023 		__skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize);
2024 		head = (struct page *)page_private(head);
2025 	}
2026 	__skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0,
2027 			       d_off);
2028 	skb_shinfo(skb)->nr_frags = new_frags;
2029 
2030 release:
2031 	skb_zcopy_clear(skb, false);
2032 	return 0;
2033 }
2034 EXPORT_SYMBOL_GPL(skb_copy_ubufs);
2035 
2036 /**
2037  *	skb_clone	-	duplicate an sk_buff
2038  *	@skb: buffer to clone
2039  *	@gfp_mask: allocation priority
2040  *
2041  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
2042  *	copies share the same packet data but not structure. The new
2043  *	buffer has a reference count of 1. If the allocation fails the
2044  *	function returns %NULL otherwise the new buffer is returned.
2045  *
2046  *	If this function is called from an interrupt gfp_mask() must be
2047  *	%GFP_ATOMIC.
2048  */
2049 
2050 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
2051 {
2052 	struct sk_buff_fclones *fclones = container_of(skb,
2053 						       struct sk_buff_fclones,
2054 						       skb1);
2055 	struct sk_buff *n;
2056 
2057 	if (skb_orphan_frags(skb, gfp_mask))
2058 		return NULL;
2059 
2060 	if (skb->fclone == SKB_FCLONE_ORIG &&
2061 	    refcount_read(&fclones->fclone_ref) == 1) {
2062 		n = &fclones->skb2;
2063 		refcount_set(&fclones->fclone_ref, 2);
2064 		n->fclone = SKB_FCLONE_CLONE;
2065 	} else {
2066 		if (skb_pfmemalloc(skb))
2067 			gfp_mask |= __GFP_MEMALLOC;
2068 
2069 		n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask);
2070 		if (!n)
2071 			return NULL;
2072 
2073 		n->fclone = SKB_FCLONE_UNAVAILABLE;
2074 	}
2075 
2076 	return __skb_clone(n, skb);
2077 }
2078 EXPORT_SYMBOL(skb_clone);
2079 
2080 void skb_headers_offset_update(struct sk_buff *skb, int off)
2081 {
2082 	/* Only adjust this if it actually is csum_start rather than csum */
2083 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2084 		skb->csum_start += off;
2085 	/* {transport,network,mac}_header and tail are relative to skb->head */
2086 	skb->transport_header += off;
2087 	skb->network_header   += off;
2088 	if (skb_mac_header_was_set(skb))
2089 		skb->mac_header += off;
2090 	skb->inner_transport_header += off;
2091 	skb->inner_network_header += off;
2092 	skb->inner_mac_header += off;
2093 }
2094 EXPORT_SYMBOL(skb_headers_offset_update);
2095 
2096 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
2097 {
2098 	__copy_skb_header(new, old);
2099 
2100 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
2101 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
2102 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
2103 }
2104 EXPORT_SYMBOL(skb_copy_header);
2105 
2106 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
2107 {
2108 	if (skb_pfmemalloc(skb))
2109 		return SKB_ALLOC_RX;
2110 	return 0;
2111 }
2112 
2113 /**
2114  *	skb_copy	-	create private copy of an sk_buff
2115  *	@skb: buffer to copy
2116  *	@gfp_mask: allocation priority
2117  *
2118  *	Make a copy of both an &sk_buff and its data. This is used when the
2119  *	caller wishes to modify the data and needs a private copy of the
2120  *	data to alter. Returns %NULL on failure or the pointer to the buffer
2121  *	on success. The returned buffer has a reference count of 1.
2122  *
2123  *	As by-product this function converts non-linear &sk_buff to linear
2124  *	one, so that &sk_buff becomes completely private and caller is allowed
2125  *	to modify all the data of returned buffer. This means that this
2126  *	function is not recommended for use in circumstances when only
2127  *	header is going to be modified. Use pskb_copy() instead.
2128  */
2129 
2130 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
2131 {
2132 	struct sk_buff *n;
2133 	unsigned int size;
2134 	int headerlen;
2135 
2136 	if (!skb_frags_readable(skb))
2137 		return NULL;
2138 
2139 	if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
2140 		return NULL;
2141 
2142 	headerlen = skb_headroom(skb);
2143 	size = skb_end_offset(skb) + skb->data_len;
2144 	n = __alloc_skb(size, gfp_mask,
2145 			skb_alloc_rx_flag(skb), NUMA_NO_NODE);
2146 	if (!n)
2147 		return NULL;
2148 
2149 	/* Set the data pointer */
2150 	skb_reserve(n, headerlen);
2151 	/* Set the tail pointer and length */
2152 	skb_put(n, skb->len);
2153 
2154 	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
2155 
2156 	skb_copy_header(n, skb);
2157 	return n;
2158 }
2159 EXPORT_SYMBOL(skb_copy);
2160 
2161 /**
2162  *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
2163  *	@skb: buffer to copy
2164  *	@headroom: headroom of new skb
2165  *	@gfp_mask: allocation priority
2166  *	@fclone: if true allocate the copy of the skb from the fclone
2167  *	cache instead of the head cache; it is recommended to set this
2168  *	to true for the cases where the copy will likely be cloned
2169  *
2170  *	Make a copy of both an &sk_buff and part of its data, located
2171  *	in header. Fragmented data remain shared. This is used when
2172  *	the caller wishes to modify only header of &sk_buff and needs
2173  *	private copy of the header to alter. Returns %NULL on failure
2174  *	or the pointer to the buffer on success.
2175  *	The returned buffer has a reference count of 1.
2176  */
2177 
2178 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
2179 				   gfp_t gfp_mask, bool fclone)
2180 {
2181 	unsigned int size = skb_headlen(skb) + headroom;
2182 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
2183 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
2184 
2185 	if (!n)
2186 		goto out;
2187 
2188 	/* Set the data pointer */
2189 	skb_reserve(n, headroom);
2190 	/* Set the tail pointer and length */
2191 	skb_put(n, skb_headlen(skb));
2192 	/* Copy the bytes */
2193 	skb_copy_from_linear_data(skb, n->data, n->len);
2194 
2195 	n->truesize += skb->data_len;
2196 	n->data_len  = skb->data_len;
2197 	n->len	     = skb->len;
2198 
2199 	if (skb_shinfo(skb)->nr_frags) {
2200 		int i;
2201 
2202 		if (skb_orphan_frags(skb, gfp_mask) ||
2203 		    skb_zerocopy_clone(n, skb, gfp_mask)) {
2204 			kfree_skb(n);
2205 			n = NULL;
2206 			goto out;
2207 		}
2208 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2209 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
2210 			skb_frag_ref(skb, i);
2211 		}
2212 		skb_shinfo(n)->nr_frags = i;
2213 	}
2214 
2215 	if (skb_has_frag_list(skb)) {
2216 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
2217 		skb_clone_fraglist(n);
2218 	}
2219 
2220 	skb_copy_header(n, skb);
2221 out:
2222 	return n;
2223 }
2224 EXPORT_SYMBOL(__pskb_copy_fclone);
2225 
2226 /**
2227  *	pskb_expand_head - reallocate header of &sk_buff
2228  *	@skb: buffer to reallocate
2229  *	@nhead: room to add at head
2230  *	@ntail: room to add at tail
2231  *	@gfp_mask: allocation priority
2232  *
2233  *	Expands (or creates identical copy, if @nhead and @ntail are zero)
2234  *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2235  *	reference count of 1. Returns zero in the case of success or error,
2236  *	if expansion failed. In the last case, &sk_buff is not changed.
2237  *
2238  *	All the pointers pointing into skb header may change and must be
2239  *	reloaded after call to this function.
2240  *
2241  *	Note: If you skb_push() the start of the buffer after reallocating the
2242  *	header, call skb_postpush_data_move() first to move the metadata out of
2243  *	the way before writing to &sk_buff->data.
2244  */
2245 
2246 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
2247 		     gfp_t gfp_mask)
2248 {
2249 	unsigned int osize = skb_end_offset(skb);
2250 	unsigned int size = osize + nhead + ntail;
2251 	long off;
2252 	u8 *data;
2253 	int i;
2254 
2255 	BUG_ON(nhead < 0);
2256 
2257 	BUG_ON(skb_shared(skb));
2258 
2259 	skb_zcopy_downgrade_managed(skb);
2260 
2261 	if (skb_pfmemalloc(skb))
2262 		gfp_mask |= __GFP_MEMALLOC;
2263 
2264 	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
2265 	if (!data)
2266 		goto nodata;
2267 	size = SKB_WITH_OVERHEAD(size);
2268 
2269 	/* Copy only real data... and, alas, header. This should be
2270 	 * optimized for the cases when header is void.
2271 	 */
2272 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
2273 
2274 	memcpy((struct skb_shared_info *)(data + size),
2275 	       skb_shinfo(skb),
2276 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
2277 
2278 	/*
2279 	 * if shinfo is shared we must drop the old head gracefully, but if it
2280 	 * is not we can just drop the old head and let the existing refcount
2281 	 * be since all we did is relocate the values
2282 	 */
2283 	if (skb_cloned(skb)) {
2284 		if (skb_orphan_frags(skb, gfp_mask))
2285 			goto nofrags;
2286 		if (skb_zcopy(skb))
2287 			refcount_inc(&skb_uarg(skb)->refcnt);
2288 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2289 			skb_frag_ref(skb, i);
2290 
2291 		if (skb_has_frag_list(skb))
2292 			skb_clone_fraglist(skb);
2293 
2294 		skb_release_data(skb, SKB_CONSUMED);
2295 	} else {
2296 		skb_free_head(skb);
2297 	}
2298 	off = (data + nhead) - skb->head;
2299 
2300 	skb->head     = data;
2301 	skb->head_frag = 0;
2302 	skb->data    += off;
2303 
2304 	skb_set_end_offset(skb, size);
2305 #ifdef NET_SKBUFF_DATA_USES_OFFSET
2306 	off           = nhead;
2307 #endif
2308 	skb->tail	      += off;
2309 	skb_headers_offset_update(skb, nhead);
2310 	skb->cloned   = 0;
2311 	skb->hdr_len  = 0;
2312 	skb->nohdr    = 0;
2313 	atomic_set(&skb_shinfo(skb)->dataref, 1);
2314 
2315 	/* It is not generally safe to change skb->truesize.
2316 	 * For the moment, we really care of rx path, or
2317 	 * when skb is orphaned (not attached to a socket).
2318 	 */
2319 	if (!skb->sk || skb->destructor == sock_edemux)
2320 		skb->truesize += size - osize;
2321 
2322 	return 0;
2323 
2324 nofrags:
2325 	skb_kfree_head(data, size);
2326 nodata:
2327 	return -ENOMEM;
2328 }
2329 EXPORT_SYMBOL(pskb_expand_head);
2330 
2331 /* Make private copy of skb with writable head and some headroom */
2332 
2333 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
2334 {
2335 	struct sk_buff *skb2;
2336 	int delta = headroom - skb_headroom(skb);
2337 
2338 	if (delta <= 0)
2339 		skb2 = pskb_copy(skb, GFP_ATOMIC);
2340 	else {
2341 		skb2 = skb_clone(skb, GFP_ATOMIC);
2342 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
2343 					     GFP_ATOMIC)) {
2344 			kfree_skb(skb2);
2345 			skb2 = NULL;
2346 		}
2347 	}
2348 	return skb2;
2349 }
2350 EXPORT_SYMBOL(skb_realloc_headroom);
2351 
2352 /* Note: We plan to rework this in linux-6.4 */
2353 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
2354 {
2355 	unsigned int saved_end_offset, saved_truesize;
2356 	struct skb_shared_info *shinfo;
2357 	int res;
2358 
2359 	saved_end_offset = skb_end_offset(skb);
2360 	saved_truesize = skb->truesize;
2361 
2362 	res = pskb_expand_head(skb, 0, 0, pri);
2363 	if (res)
2364 		return res;
2365 
2366 	skb->truesize = saved_truesize;
2367 
2368 	if (likely(skb_end_offset(skb) == saved_end_offset))
2369 		return 0;
2370 
2371 	/* We can not change skb->end if the original or new value
2372 	 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
2373 	 */
2374 	if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM ||
2375 	    skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) {
2376 		/* We think this path should not be taken.
2377 		 * Add a temporary trace to warn us just in case.
2378 		 */
2379 		pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n",
2380 			    saved_end_offset, skb_end_offset(skb));
2381 		WARN_ON_ONCE(1);
2382 		return 0;
2383 	}
2384 
2385 	shinfo = skb_shinfo(skb);
2386 
2387 	/* We are about to change back skb->end,
2388 	 * we need to move skb_shinfo() to its new location.
2389 	 */
2390 	memmove(skb->head + saved_end_offset,
2391 		shinfo,
2392 		offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
2393 
2394 	skb_set_end_offset(skb, saved_end_offset);
2395 
2396 	return 0;
2397 }
2398 
2399 /**
2400  *	skb_expand_head - reallocate header of &sk_buff
2401  *	@skb: buffer to reallocate
2402  *	@headroom: needed headroom
2403  *
2404  *	Unlike skb_realloc_headroom, this one does not allocate a new skb
2405  *	if possible; copies skb->sk to new skb as needed
2406  *	and frees original skb in case of failures.
2407  *
2408  *	It expect increased headroom and generates warning otherwise.
2409  */
2410 
2411 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
2412 {
2413 	int delta = headroom - skb_headroom(skb);
2414 	int osize = skb_end_offset(skb);
2415 	struct sock *sk = skb->sk;
2416 
2417 	if (WARN_ONCE(delta <= 0,
2418 		      "%s is expecting an increase in the headroom", __func__))
2419 		return skb;
2420 
2421 	delta = SKB_DATA_ALIGN(delta);
2422 	/* pskb_expand_head() might crash, if skb is shared. */
2423 	if (skb_shared(skb) || !is_skb_wmem(skb)) {
2424 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2425 
2426 		if (unlikely(!nskb))
2427 			goto fail;
2428 
2429 		if (sk)
2430 			skb_set_owner_w(nskb, sk);
2431 		consume_skb(skb);
2432 		skb = nskb;
2433 	}
2434 	if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
2435 		goto fail;
2436 
2437 	if (sk && is_skb_wmem(skb)) {
2438 		delta = skb_end_offset(skb) - osize;
2439 		refcount_add(delta, &sk->sk_wmem_alloc);
2440 		skb->truesize += delta;
2441 	}
2442 	return skb;
2443 
2444 fail:
2445 	kfree_skb(skb);
2446 	return NULL;
2447 }
2448 EXPORT_SYMBOL(skb_expand_head);
2449 
2450 /**
2451  *	skb_copy_expand	-	copy and expand sk_buff
2452  *	@skb: buffer to copy
2453  *	@newheadroom: new free bytes at head
2454  *	@newtailroom: new free bytes at tail
2455  *	@gfp_mask: allocation priority
2456  *
2457  *	Make a copy of both an &sk_buff and its data and while doing so
2458  *	allocate additional space.
2459  *
2460  *	This is used when the caller wishes to modify the data and needs a
2461  *	private copy of the data to alter as well as more space for new fields.
2462  *	Returns %NULL on failure or the pointer to the buffer
2463  *	on success. The returned buffer has a reference count of 1.
2464  *
2465  *	You must pass %GFP_ATOMIC as the allocation priority if this function
2466  *	is called from an interrupt.
2467  */
2468 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
2469 				int newheadroom, int newtailroom,
2470 				gfp_t gfp_mask)
2471 {
2472 	/*
2473 	 *	Allocate the copy buffer
2474 	 */
2475 	int head_copy_len, head_copy_off;
2476 	struct sk_buff *n;
2477 	int oldheadroom;
2478 
2479 	if (!skb_frags_readable(skb))
2480 		return NULL;
2481 
2482 	if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
2483 		return NULL;
2484 
2485 	oldheadroom = skb_headroom(skb);
2486 	n = __alloc_skb(newheadroom + skb->len + newtailroom,
2487 			gfp_mask, skb_alloc_rx_flag(skb),
2488 			NUMA_NO_NODE);
2489 	if (!n)
2490 		return NULL;
2491 
2492 	skb_reserve(n, newheadroom);
2493 
2494 	/* Set the tail pointer and length */
2495 	skb_put(n, skb->len);
2496 
2497 	head_copy_len = oldheadroom;
2498 	head_copy_off = 0;
2499 	if (newheadroom <= head_copy_len)
2500 		head_copy_len = newheadroom;
2501 	else
2502 		head_copy_off = newheadroom - head_copy_len;
2503 
2504 	/* Copy the linear header and data. */
2505 	BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
2506 			     skb->len + head_copy_len));
2507 
2508 	skb_copy_header(n, skb);
2509 
2510 	skb_headers_offset_update(n, newheadroom - oldheadroom);
2511 
2512 	return n;
2513 }
2514 EXPORT_SYMBOL(skb_copy_expand);
2515 
2516 /**
2517  *	__skb_pad		-	zero pad the tail of an skb
2518  *	@skb: buffer to pad
2519  *	@pad: space to pad
2520  *	@free_on_error: free buffer on error
2521  *
2522  *	Ensure that a buffer is followed by a padding area that is zero
2523  *	filled. Used by network drivers which may DMA or transfer data
2524  *	beyond the buffer end onto the wire.
2525  *
2526  *	May return error in out of memory cases. The skb is freed on error
2527  *	if @free_on_error is true.
2528  */
2529 
2530 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
2531 {
2532 	int err;
2533 	int ntail;
2534 
2535 	/* If the skbuff is non linear tailroom is always zero.. */
2536 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
2537 		memset(skb->data+skb->len, 0, pad);
2538 		return 0;
2539 	}
2540 
2541 	ntail = skb->data_len + pad - (skb->end - skb->tail);
2542 	if (likely(skb_cloned(skb) || ntail > 0)) {
2543 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
2544 		if (unlikely(err))
2545 			goto free_skb;
2546 	}
2547 
2548 	/* FIXME: The use of this function with non-linear skb's really needs
2549 	 * to be audited.
2550 	 */
2551 	err = skb_linearize(skb);
2552 	if (unlikely(err))
2553 		goto free_skb;
2554 
2555 	memset(skb->data + skb->len, 0, pad);
2556 	return 0;
2557 
2558 free_skb:
2559 	if (free_on_error)
2560 		kfree_skb(skb);
2561 	return err;
2562 }
2563 EXPORT_SYMBOL(__skb_pad);
2564 
2565 /**
2566  *	pskb_put - add data to the tail of a potentially fragmented buffer
2567  *	@skb: start of the buffer to use
2568  *	@tail: tail fragment of the buffer to use
2569  *	@len: amount of data to add
2570  *
2571  *	This function extends the used data area of the potentially
2572  *	fragmented buffer. @tail must be the last fragment of @skb -- or
2573  *	@skb itself. If this would exceed the total buffer size the kernel
2574  *	will panic. A pointer to the first byte of the extra data is
2575  *	returned.
2576  */
2577 
2578 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
2579 {
2580 	if (tail != skb) {
2581 		skb->data_len += len;
2582 		skb->len += len;
2583 	}
2584 	return skb_put(tail, len);
2585 }
2586 EXPORT_SYMBOL_GPL(pskb_put);
2587 
2588 /**
2589  *	skb_put - add data to a buffer
2590  *	@skb: buffer to use
2591  *	@len: amount of data to add
2592  *
2593  *	This function extends the used data area of the buffer. If this would
2594  *	exceed the total buffer size the kernel will panic. A pointer to the
2595  *	first byte of the extra data is returned.
2596  */
2597 void *skb_put(struct sk_buff *skb, unsigned int len)
2598 {
2599 	void *tmp = skb_tail_pointer(skb);
2600 	SKB_LINEAR_ASSERT(skb);
2601 	skb->tail += len;
2602 	skb->len  += len;
2603 	if (unlikely(skb->tail > skb->end))
2604 		skb_over_panic(skb, len, __builtin_return_address(0));
2605 	return tmp;
2606 }
2607 EXPORT_SYMBOL(skb_put);
2608 
2609 /**
2610  *	skb_push - add data to the start of a buffer
2611  *	@skb: buffer to use
2612  *	@len: amount of data to add
2613  *
2614  *	This function extends the used data area of the buffer at the buffer
2615  *	start. If this would exceed the total buffer headroom the kernel will
2616  *	panic. A pointer to the first byte of the extra data is returned.
2617  */
2618 void *skb_push(struct sk_buff *skb, unsigned int len)
2619 {
2620 	skb->data -= len;
2621 	skb->len  += len;
2622 	if (unlikely(skb->data < skb->head))
2623 		skb_under_panic(skb, len, __builtin_return_address(0));
2624 	return skb->data;
2625 }
2626 EXPORT_SYMBOL(skb_push);
2627 
2628 /**
2629  *	skb_pull - remove data from the start of a buffer
2630  *	@skb: buffer to use
2631  *	@len: amount of data to remove
2632  *
2633  *	This function removes data from the start of a buffer, returning
2634  *	the memory to the headroom. A pointer to the next data in the buffer
2635  *	is returned. Once the data has been pulled future pushes will overwrite
2636  *	the old data.
2637  */
2638 void *skb_pull(struct sk_buff *skb, unsigned int len)
2639 {
2640 	return skb_pull_inline(skb, len);
2641 }
2642 EXPORT_SYMBOL(skb_pull);
2643 
2644 /**
2645  *	skb_pull_data - remove data from the start of a buffer returning its
2646  *	original position.
2647  *	@skb: buffer to use
2648  *	@len: amount of data to remove
2649  *
2650  *	This function removes data from the start of a buffer, returning
2651  *	the memory to the headroom. A pointer to the original data in the buffer
2652  *	is returned after checking if there is enough data to pull. Once the
2653  *	data has been pulled future pushes will overwrite the old data.
2654  */
2655 void *skb_pull_data(struct sk_buff *skb, size_t len)
2656 {
2657 	void *data = skb->data;
2658 
2659 	if (skb->len < len)
2660 		return NULL;
2661 
2662 	skb_pull(skb, len);
2663 
2664 	return data;
2665 }
2666 EXPORT_SYMBOL(skb_pull_data);
2667 
2668 /**
2669  *	skb_trim - remove end from a buffer
2670  *	@skb: buffer to alter
2671  *	@len: new length
2672  *
2673  *	Cut the length of a buffer down by removing data from the tail. If
2674  *	the buffer is already under the length specified it is not modified.
2675  *	The skb must be linear.
2676  */
2677 void skb_trim(struct sk_buff *skb, unsigned int len)
2678 {
2679 	if (skb->len > len)
2680 		__skb_trim(skb, len);
2681 }
2682 EXPORT_SYMBOL(skb_trim);
2683 
2684 /* Trims skb to length len. It can change skb pointers.
2685  */
2686 
2687 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
2688 {
2689 	struct sk_buff **fragp;
2690 	struct sk_buff *frag;
2691 	int offset = skb_headlen(skb);
2692 	int nfrags = skb_shinfo(skb)->nr_frags;
2693 	int i;
2694 	int err;
2695 
2696 	if (skb_cloned(skb) &&
2697 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
2698 		return err;
2699 
2700 	i = 0;
2701 	if (offset >= len)
2702 		goto drop_pages;
2703 
2704 	for (; i < nfrags; i++) {
2705 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2706 
2707 		if (end < len) {
2708 			offset = end;
2709 			continue;
2710 		}
2711 
2712 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
2713 
2714 drop_pages:
2715 		skb_shinfo(skb)->nr_frags = i;
2716 
2717 		for (; i < nfrags; i++)
2718 			skb_frag_unref(skb, i);
2719 
2720 		if (skb_has_frag_list(skb))
2721 			skb_drop_fraglist(skb);
2722 		goto done;
2723 	}
2724 
2725 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
2726 	     fragp = &frag->next) {
2727 		int end = offset + frag->len;
2728 
2729 		if (skb_shared(frag)) {
2730 			struct sk_buff *nfrag;
2731 
2732 			nfrag = skb_clone(frag, GFP_ATOMIC);
2733 			if (unlikely(!nfrag))
2734 				return -ENOMEM;
2735 
2736 			nfrag->next = frag->next;
2737 			consume_skb(frag);
2738 			frag = nfrag;
2739 			*fragp = frag;
2740 		}
2741 
2742 		if (end < len) {
2743 			offset = end;
2744 			continue;
2745 		}
2746 
2747 		if (end > len &&
2748 		    unlikely((err = pskb_trim(frag, len - offset))))
2749 			return err;
2750 
2751 		if (frag->next)
2752 			skb_drop_list(&frag->next);
2753 		break;
2754 	}
2755 
2756 done:
2757 	if (len > skb_headlen(skb)) {
2758 		skb->data_len -= skb->len - len;
2759 		skb->len       = len;
2760 	} else {
2761 		skb->len       = len;
2762 		skb->data_len  = 0;
2763 		skb_set_tail_pointer(skb, len);
2764 	}
2765 
2766 	if (!skb->sk || skb->destructor == sock_edemux)
2767 		skb_condense(skb);
2768 	return 0;
2769 }
2770 EXPORT_SYMBOL(___pskb_trim);
2771 
2772 /* Note : use pskb_trim_rcsum() instead of calling this directly
2773  */
2774 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2775 {
2776 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
2777 		int delta = skb->len - len;
2778 
2779 		skb->csum = csum_block_sub(skb->csum,
2780 					   skb_checksum(skb, len, delta, 0),
2781 					   len);
2782 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2783 		int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2784 		int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
2785 
2786 		if (offset + sizeof(__sum16) > hdlen)
2787 			return -EINVAL;
2788 	}
2789 	return __pskb_trim(skb, len);
2790 }
2791 EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2792 
2793 /**
2794  *	__pskb_pull_tail - advance tail of skb header
2795  *	@skb: buffer to reallocate
2796  *	@delta: number of bytes to advance tail
2797  *
2798  *	The function makes a sense only on a fragmented &sk_buff,
2799  *	it expands header moving its tail forward and copying necessary
2800  *	data from fragmented part.
2801  *
2802  *	&sk_buff MUST have reference count of 1.
2803  *
2804  *	Returns %NULL (and &sk_buff does not change) if pull failed
2805  *	or value of new tail of skb in the case of success.
2806  *
2807  *	All the pointers pointing into skb header may change and must be
2808  *	reloaded after call to this function.
2809  */
2810 
2811 /* Moves tail of skb head forward, copying data from fragmented part,
2812  * when it is necessary.
2813  * 1. It may fail due to malloc failure.
2814  * 2. It may change skb pointers.
2815  *
2816  * It is pretty complicated. Luckily, it is called only in exceptional cases.
2817  */
2818 void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2819 {
2820 	/* If skb has not enough free space at tail, get new one
2821 	 * plus 128 bytes for future expansions. If we have enough
2822 	 * room at tail, reallocate without expansion only if skb is cloned.
2823 	 */
2824 	int i, k, eat = (skb->tail + delta) - skb->end;
2825 
2826 	if (!skb_frags_readable(skb))
2827 		return NULL;
2828 
2829 	if (eat > 0 || skb_cloned(skb)) {
2830 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2831 				     GFP_ATOMIC))
2832 			return NULL;
2833 	}
2834 
2835 	BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2836 			     skb_tail_pointer(skb), delta));
2837 
2838 	/* Optimization: no fragments, no reasons to preestimate
2839 	 * size of pulled pages. Superb.
2840 	 */
2841 	if (!skb_has_frag_list(skb))
2842 		goto pull_pages;
2843 
2844 	/* Estimate size of pulled pages. */
2845 	eat = delta;
2846 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2847 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2848 
2849 		if (size >= eat)
2850 			goto pull_pages;
2851 		eat -= size;
2852 	}
2853 
2854 	/* If we need update frag list, we are in troubles.
2855 	 * Certainly, it is possible to add an offset to skb data,
2856 	 * but taking into account that pulling is expected to
2857 	 * be very rare operation, it is worth to fight against
2858 	 * further bloating skb head and crucify ourselves here instead.
2859 	 * Pure masohism, indeed. 8)8)
2860 	 */
2861 	if (eat) {
2862 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
2863 		struct sk_buff *clone = NULL;
2864 		struct sk_buff *insp = NULL;
2865 
2866 		do {
2867 			if (list->len <= eat) {
2868 				/* Eaten as whole. */
2869 				eat -= list->len;
2870 				list = list->next;
2871 				insp = list;
2872 			} else {
2873 				/* Eaten partially. */
2874 				if (skb_is_gso(skb) && !list->head_frag &&
2875 				    skb_headlen(list))
2876 					skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2877 
2878 				if (skb_shared(list)) {
2879 					/* Sucks! We need to fork list. :-( */
2880 					clone = skb_clone(list, GFP_ATOMIC);
2881 					if (!clone)
2882 						return NULL;
2883 					insp = list->next;
2884 					list = clone;
2885 				} else {
2886 					/* This may be pulled without
2887 					 * problems. */
2888 					insp = list;
2889 				}
2890 				if (!pskb_pull(list, eat)) {
2891 					kfree_skb(clone);
2892 					return NULL;
2893 				}
2894 				break;
2895 			}
2896 		} while (eat);
2897 
2898 		/* Free pulled out fragments. */
2899 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
2900 			skb_shinfo(skb)->frag_list = list->next;
2901 			consume_skb(list);
2902 		}
2903 		/* And insert new clone at head. */
2904 		if (clone) {
2905 			clone->next = list;
2906 			skb_shinfo(skb)->frag_list = clone;
2907 		}
2908 	}
2909 	/* Success! Now we may commit changes to skb data. */
2910 
2911 pull_pages:
2912 	eat = delta;
2913 	k = 0;
2914 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2915 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2916 
2917 		if (size <= eat) {
2918 			skb_frag_unref(skb, i);
2919 			eat -= size;
2920 		} else {
2921 			skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2922 
2923 			*frag = skb_shinfo(skb)->frags[i];
2924 			if (eat) {
2925 				skb_frag_off_add(frag, eat);
2926 				skb_frag_size_sub(frag, eat);
2927 				if (!i)
2928 					goto end;
2929 				eat = 0;
2930 			}
2931 			k++;
2932 		}
2933 	}
2934 	skb_shinfo(skb)->nr_frags = k;
2935 
2936 end:
2937 	skb->tail     += delta;
2938 	skb->data_len -= delta;
2939 
2940 	if (!skb->data_len)
2941 		skb_zcopy_clear(skb, false);
2942 
2943 	return skb_tail_pointer(skb);
2944 }
2945 EXPORT_SYMBOL(__pskb_pull_tail);
2946 
2947 /**
2948  *	skb_copy_bits - copy bits from skb to kernel buffer
2949  *	@skb: source skb
2950  *	@offset: offset in source
2951  *	@to: destination buffer
2952  *	@len: number of bytes to copy
2953  *
2954  *	Copy the specified number of bytes from the source skb to the
2955  *	destination buffer.
2956  *
2957  *	CAUTION ! :
2958  *		If its prototype is ever changed,
2959  *		check arch/{*}/net/{*}.S files,
2960  *		since it is called from BPF assembly code.
2961  */
2962 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2963 {
2964 	int start = skb_headlen(skb);
2965 	struct sk_buff *frag_iter;
2966 	int i, copy;
2967 
2968 	if (offset > (int)skb->len - len)
2969 		goto fault;
2970 
2971 	/* Copy header. */
2972 	if ((copy = start - offset) > 0) {
2973 		if (copy > len)
2974 			copy = len;
2975 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
2976 		if ((len -= copy) == 0)
2977 			return 0;
2978 		offset += copy;
2979 		to     += copy;
2980 	}
2981 
2982 	if (!skb_frags_readable(skb))
2983 		goto fault;
2984 
2985 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2986 		int end;
2987 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2988 
2989 		WARN_ON(start > offset + len);
2990 
2991 		end = start + skb_frag_size(f);
2992 		if ((copy = end - offset) > 0) {
2993 			u32 p_off, p_len, copied;
2994 			struct page *p;
2995 			u8 *vaddr;
2996 
2997 			if (copy > len)
2998 				copy = len;
2999 
3000 			skb_frag_foreach_page(f,
3001 					      skb_frag_off(f) + offset - start,
3002 					      copy, p, p_off, p_len, copied) {
3003 				vaddr = kmap_atomic(p);
3004 				memcpy(to + copied, vaddr + p_off, p_len);
3005 				kunmap_atomic(vaddr);
3006 			}
3007 
3008 			if ((len -= copy) == 0)
3009 				return 0;
3010 			offset += copy;
3011 			to     += copy;
3012 		}
3013 		start = end;
3014 	}
3015 
3016 	skb_walk_frags(skb, frag_iter) {
3017 		int end;
3018 
3019 		WARN_ON(start > offset + len);
3020 
3021 		end = start + frag_iter->len;
3022 		if ((copy = end - offset) > 0) {
3023 			if (copy > len)
3024 				copy = len;
3025 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
3026 				goto fault;
3027 			if ((len -= copy) == 0)
3028 				return 0;
3029 			offset += copy;
3030 			to     += copy;
3031 		}
3032 		start = end;
3033 	}
3034 
3035 	if (!len)
3036 		return 0;
3037 
3038 fault:
3039 	return -EFAULT;
3040 }
3041 EXPORT_SYMBOL(skb_copy_bits);
3042 
3043 /*
3044  * Callback from splice_to_pipe(), if we need to release some pages
3045  * at the end of the spd in case we error'ed out in filling the pipe.
3046  */
3047 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
3048 {
3049 	put_page(spd->pages[i]);
3050 }
3051 
3052 static struct page *linear_to_page(struct page *page, unsigned int *len,
3053 				   unsigned int *offset,
3054 				   struct sock *sk)
3055 {
3056 	struct page_frag *pfrag = sk_page_frag(sk);
3057 
3058 	if (!sk_page_frag_refill(sk, pfrag))
3059 		return NULL;
3060 
3061 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
3062 
3063 	memcpy(page_address(pfrag->page) + pfrag->offset,
3064 	       page_address(page) + *offset, *len);
3065 	*offset = pfrag->offset;
3066 	pfrag->offset += *len;
3067 
3068 	return pfrag->page;
3069 }
3070 
3071 static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
3072 			     struct page *page,
3073 			     unsigned int offset)
3074 {
3075 	return	spd->nr_pages &&
3076 		spd->pages[spd->nr_pages - 1] == page &&
3077 		(spd->partial[spd->nr_pages - 1].offset +
3078 		 spd->partial[spd->nr_pages - 1].len == offset);
3079 }
3080 
3081 /*
3082  * Fill page/offset/length into spd, if it can hold more pages.
3083  */
3084 static bool spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
3085 			  unsigned int *len, unsigned int offset, bool linear,
3086 			  struct sock *sk)
3087 {
3088 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
3089 		return true;
3090 
3091 	if (linear) {
3092 		page = linear_to_page(page, len, &offset, sk);
3093 		if (!page)
3094 			return true;
3095 	}
3096 	if (spd_can_coalesce(spd, page, offset)) {
3097 		spd->partial[spd->nr_pages - 1].len += *len;
3098 		return false;
3099 	}
3100 	get_page(page);
3101 	spd->pages[spd->nr_pages] = page;
3102 	spd->partial[spd->nr_pages].len = *len;
3103 	spd->partial[spd->nr_pages].offset = offset;
3104 	spd->nr_pages++;
3105 
3106 	return false;
3107 }
3108 
3109 static bool __splice_segment(struct page *page, unsigned int poff,
3110 			     unsigned int plen, unsigned int *off,
3111 			     unsigned int *len,
3112 			     struct splice_pipe_desc *spd, bool linear,
3113 			     struct sock *sk)
3114 {
3115 	if (!*len)
3116 		return true;
3117 
3118 	/* skip this segment if already processed */
3119 	if (*off >= plen) {
3120 		*off -= plen;
3121 		return false;
3122 	}
3123 
3124 	/* ignore any bits we already processed */
3125 	poff += *off;
3126 	plen -= *off;
3127 	*off = 0;
3128 
3129 	do {
3130 		unsigned int flen = min(*len, plen);
3131 
3132 		if (spd_fill_page(spd, page, &flen, poff, linear, sk))
3133 			return true;
3134 		poff += flen;
3135 		plen -= flen;
3136 		*len -= flen;
3137 		if (!*len)
3138 			return true;
3139 	} while (plen);
3140 
3141 	return false;
3142 }
3143 
3144 /*
3145  * Map linear and fragment data from the skb to spd. It reports true if the
3146  * pipe is full or if we already spliced the requested length.
3147  */
3148 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
3149 			      unsigned int *offset, unsigned int *len,
3150 			      struct splice_pipe_desc *spd, struct sock *sk)
3151 {
3152 	struct sk_buff *iter;
3153 	int seg;
3154 
3155 	/* map the linear part :
3156 	 * If skb->head_frag is set, this 'linear' part is backed by a
3157 	 * fragment, and if the head is not shared with any clones then
3158 	 * we can avoid a copy since we own the head portion of this page.
3159 	 */
3160 	if (__splice_segment(virt_to_page(skb->data),
3161 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
3162 			     skb_headlen(skb),
3163 			     offset, len, spd,
3164 			     skb_head_is_locked(skb),
3165 			     sk))
3166 		return true;
3167 
3168 	/*
3169 	 * then map the fragments
3170 	 */
3171 	if (!skb_frags_readable(skb))
3172 		return false;
3173 
3174 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
3175 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
3176 
3177 		if (WARN_ON_ONCE(!skb_frag_page(f)))
3178 			return false;
3179 
3180 		if (__splice_segment(skb_frag_page(f),
3181 				     skb_frag_off(f), skb_frag_size(f),
3182 				     offset, len, spd, false, sk))
3183 			return true;
3184 	}
3185 
3186 	skb_walk_frags(skb, iter) {
3187 		if (*offset >= iter->len) {
3188 			*offset -= iter->len;
3189 			continue;
3190 		}
3191 		/* __skb_splice_bits() only fails if the output has no room
3192 		 * left, so no point in going over the frag_list for the error
3193 		 * case.
3194 		 */
3195 		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
3196 			return true;
3197 	}
3198 
3199 	return false;
3200 }
3201 
3202 /*
3203  * Map data from the skb to a pipe. Should handle both the linear part,
3204  * the fragments, and the frag list.
3205  */
3206 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3207 		    struct pipe_inode_info *pipe, unsigned int tlen,
3208 		    unsigned int flags)
3209 {
3210 	struct partial_page partial[MAX_SKB_FRAGS];
3211 	struct page *pages[MAX_SKB_FRAGS];
3212 	struct splice_pipe_desc spd = {
3213 		.pages = pages,
3214 		.partial = partial,
3215 		.nr_pages_max = MAX_SKB_FRAGS,
3216 		.ops = &nosteal_pipe_buf_ops,
3217 		.spd_release = sock_spd_release,
3218 	};
3219 	int ret = 0;
3220 
3221 	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
3222 
3223 	if (spd.nr_pages)
3224 		ret = splice_to_pipe(pipe, &spd);
3225 
3226 	return ret;
3227 }
3228 EXPORT_SYMBOL_GPL(skb_splice_bits);
3229 
3230 static int sendmsg_locked(struct sock *sk, struct msghdr *msg)
3231 {
3232 	struct socket *sock = sk->sk_socket;
3233 	size_t size = msg_data_left(msg);
3234 
3235 	if (!sock)
3236 		return -EINVAL;
3237 
3238 	if (!sock->ops->sendmsg_locked)
3239 		return sock_no_sendmsg_locked(sk, msg, size);
3240 
3241 	return sock->ops->sendmsg_locked(sk, msg, size);
3242 }
3243 
3244 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg)
3245 {
3246 	struct socket *sock = sk->sk_socket;
3247 
3248 	if (!sock)
3249 		return -EINVAL;
3250 	return sock_sendmsg(sock, msg);
3251 }
3252 
3253 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
3254 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
3255 			   int len, sendmsg_func sendmsg, int flags)
3256 {
3257 	int more_hint = sk_is_tcp(sk) ? MSG_MORE : 0;
3258 	unsigned int orig_len = len;
3259 	struct sk_buff *head = skb;
3260 	unsigned short fragidx;
3261 	int slen, ret;
3262 
3263 do_frag_list:
3264 
3265 	/* Deal with head data */
3266 	while (offset < skb_headlen(skb) && len) {
3267 		struct kvec kv;
3268 		struct msghdr msg;
3269 
3270 		slen = min_t(int, len, skb_headlen(skb) - offset);
3271 		kv.iov_base = skb->data + offset;
3272 		kv.iov_len = slen;
3273 		memset(&msg, 0, sizeof(msg));
3274 		msg.msg_flags = MSG_DONTWAIT | flags;
3275 		if (slen < len)
3276 			msg.msg_flags |= more_hint;
3277 
3278 		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
3279 		ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
3280 				      sendmsg_unlocked, sk, &msg);
3281 		if (ret <= 0)
3282 			goto error;
3283 
3284 		offset += ret;
3285 		len -= ret;
3286 	}
3287 
3288 	/* All the data was skb head? */
3289 	if (!len)
3290 		goto out;
3291 
3292 	/* Make offset relative to start of frags */
3293 	offset -= skb_headlen(skb);
3294 
3295 	/* Find where we are in frag list */
3296 	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3297 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
3298 
3299 		if (offset < skb_frag_size(frag))
3300 			break;
3301 
3302 		offset -= skb_frag_size(frag);
3303 	}
3304 
3305 	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3306 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
3307 
3308 		slen = min_t(size_t, len, skb_frag_size(frag) - offset);
3309 
3310 		while (slen) {
3311 			struct bio_vec bvec;
3312 			struct msghdr msg = {
3313 				.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT |
3314 					     flags,
3315 			};
3316 
3317 			if (slen < len)
3318 				msg.msg_flags |= more_hint;
3319 			bvec_set_page(&bvec, skb_frag_page(frag), slen,
3320 				      skb_frag_off(frag) + offset);
3321 			iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
3322 				      slen);
3323 
3324 			ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
3325 					      sendmsg_unlocked, sk, &msg);
3326 			if (ret <= 0)
3327 				goto error;
3328 
3329 			len -= ret;
3330 			offset += ret;
3331 			slen -= ret;
3332 		}
3333 
3334 		offset = 0;
3335 	}
3336 
3337 	if (len) {
3338 		/* Process any frag lists */
3339 
3340 		if (skb == head) {
3341 			if (skb_has_frag_list(skb)) {
3342 				skb = skb_shinfo(skb)->frag_list;
3343 				goto do_frag_list;
3344 			}
3345 		} else if (skb->next) {
3346 			skb = skb->next;
3347 			goto do_frag_list;
3348 		}
3349 	}
3350 
3351 out:
3352 	return orig_len - len;
3353 
3354 error:
3355 	return orig_len == len ? ret : orig_len - len;
3356 }
3357 
3358 /* Send skb data on a socket. Socket must be locked. */
3359 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3360 			 int len)
3361 {
3362 	return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0);
3363 }
3364 EXPORT_SYMBOL_GPL(skb_send_sock_locked);
3365 
3366 int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
3367 				    int offset, int len, int flags)
3368 {
3369 	return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags);
3370 }
3371 EXPORT_SYMBOL_GPL(skb_send_sock_locked_with_flags);
3372 
3373 /* Send skb data on a socket. Socket must be unlocked. */
3374 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
3375 {
3376 	return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0);
3377 }
3378 
3379 /**
3380  *	skb_store_bits - store bits from kernel buffer to skb
3381  *	@skb: destination buffer
3382  *	@offset: offset in destination
3383  *	@from: source buffer
3384  *	@len: number of bytes to copy
3385  *
3386  *	Copy the specified number of bytes from the source buffer to the
3387  *	destination skb.  This function handles all the messy bits of
3388  *	traversing fragment lists and such.
3389  */
3390 
3391 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
3392 {
3393 	int start = skb_headlen(skb);
3394 	struct sk_buff *frag_iter;
3395 	int i, copy;
3396 
3397 	if (offset > (int)skb->len - len)
3398 		goto fault;
3399 
3400 	if ((copy = start - offset) > 0) {
3401 		if (copy > len)
3402 			copy = len;
3403 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
3404 		if ((len -= copy) == 0)
3405 			return 0;
3406 		offset += copy;
3407 		from += copy;
3408 	}
3409 
3410 	if (!skb_frags_readable(skb))
3411 		goto fault;
3412 
3413 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3414 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3415 		int end;
3416 
3417 		WARN_ON(start > offset + len);
3418 
3419 		end = start + skb_frag_size(frag);
3420 		if ((copy = end - offset) > 0) {
3421 			u32 p_off, p_len, copied;
3422 			struct page *p;
3423 			u8 *vaddr;
3424 
3425 			if (copy > len)
3426 				copy = len;
3427 
3428 			skb_frag_foreach_page(frag,
3429 					      skb_frag_off(frag) + offset - start,
3430 					      copy, p, p_off, p_len, copied) {
3431 				vaddr = kmap_atomic(p);
3432 				memcpy(vaddr + p_off, from + copied, p_len);
3433 				kunmap_atomic(vaddr);
3434 			}
3435 
3436 			if ((len -= copy) == 0)
3437 				return 0;
3438 			offset += copy;
3439 			from += copy;
3440 		}
3441 		start = end;
3442 	}
3443 
3444 	skb_walk_frags(skb, frag_iter) {
3445 		int end;
3446 
3447 		WARN_ON(start > offset + len);
3448 
3449 		end = start + frag_iter->len;
3450 		if ((copy = end - offset) > 0) {
3451 			if (copy > len)
3452 				copy = len;
3453 			if (skb_store_bits(frag_iter, offset - start,
3454 					   from, copy))
3455 				goto fault;
3456 			if ((len -= copy) == 0)
3457 				return 0;
3458 			offset += copy;
3459 			from += copy;
3460 		}
3461 		start = end;
3462 	}
3463 	if (!len)
3464 		return 0;
3465 
3466 fault:
3467 	return -EFAULT;
3468 }
3469 EXPORT_SYMBOL(skb_store_bits);
3470 
3471 /* Checksum skb data. */
3472 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum)
3473 {
3474 	int start = skb_headlen(skb);
3475 	int i, copy = start - offset;
3476 	struct sk_buff *frag_iter;
3477 	int pos = 0;
3478 
3479 	/* Checksum header. */
3480 	if (copy > 0) {
3481 		if (copy > len)
3482 			copy = len;
3483 		csum = csum_partial(skb->data + offset, copy, csum);
3484 		if ((len -= copy) == 0)
3485 			return csum;
3486 		offset += copy;
3487 		pos	= copy;
3488 	}
3489 
3490 	if (WARN_ON_ONCE(!skb_frags_readable(skb)))
3491 		return 0;
3492 
3493 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3494 		int end;
3495 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3496 
3497 		WARN_ON(start > offset + len);
3498 
3499 		end = start + skb_frag_size(frag);
3500 		if ((copy = end - offset) > 0) {
3501 			u32 p_off, p_len, copied;
3502 			struct page *p;
3503 			__wsum csum2;
3504 			u8 *vaddr;
3505 
3506 			if (copy > len)
3507 				copy = len;
3508 
3509 			skb_frag_foreach_page(frag,
3510 					      skb_frag_off(frag) + offset - start,
3511 					      copy, p, p_off, p_len, copied) {
3512 				vaddr = kmap_atomic(p);
3513 				csum2 = csum_partial(vaddr + p_off, p_len, 0);
3514 				kunmap_atomic(vaddr);
3515 				csum = csum_block_add(csum, csum2, pos);
3516 				pos += p_len;
3517 			}
3518 
3519 			if (!(len -= copy))
3520 				return csum;
3521 			offset += copy;
3522 		}
3523 		start = end;
3524 	}
3525 
3526 	skb_walk_frags(skb, frag_iter) {
3527 		int end;
3528 
3529 		WARN_ON(start > offset + len);
3530 
3531 		end = start + frag_iter->len;
3532 		if ((copy = end - offset) > 0) {
3533 			__wsum csum2;
3534 			if (copy > len)
3535 				copy = len;
3536 			csum2 = skb_checksum(frag_iter, offset - start, copy,
3537 					     0);
3538 			csum = csum_block_add(csum, csum2, pos);
3539 			if ((len -= copy) == 0)
3540 				return csum;
3541 			offset += copy;
3542 			pos    += copy;
3543 		}
3544 		start = end;
3545 	}
3546 	BUG_ON(len);
3547 
3548 	return csum;
3549 }
3550 EXPORT_SYMBOL(skb_checksum);
3551 
3552 /* Both of above in one bottle. */
3553 
3554 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
3555 				    u8 *to, int len)
3556 {
3557 	int start = skb_headlen(skb);
3558 	int i, copy = start - offset;
3559 	struct sk_buff *frag_iter;
3560 	int pos = 0;
3561 	__wsum csum = 0;
3562 
3563 	/* Copy header. */
3564 	if (copy > 0) {
3565 		if (copy > len)
3566 			copy = len;
3567 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
3568 						 copy);
3569 		if ((len -= copy) == 0)
3570 			return csum;
3571 		offset += copy;
3572 		to     += copy;
3573 		pos	= copy;
3574 	}
3575 
3576 	if (!skb_frags_readable(skb))
3577 		return 0;
3578 
3579 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3580 		int end;
3581 
3582 		WARN_ON(start > offset + len);
3583 
3584 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3585 		if ((copy = end - offset) > 0) {
3586 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3587 			u32 p_off, p_len, copied;
3588 			struct page *p;
3589 			__wsum csum2;
3590 			u8 *vaddr;
3591 
3592 			if (copy > len)
3593 				copy = len;
3594 
3595 			skb_frag_foreach_page(frag,
3596 					      skb_frag_off(frag) + offset - start,
3597 					      copy, p, p_off, p_len, copied) {
3598 				vaddr = kmap_atomic(p);
3599 				csum2 = csum_partial_copy_nocheck(vaddr + p_off,
3600 								  to + copied,
3601 								  p_len);
3602 				kunmap_atomic(vaddr);
3603 				csum = csum_block_add(csum, csum2, pos);
3604 				pos += p_len;
3605 			}
3606 
3607 			if (!(len -= copy))
3608 				return csum;
3609 			offset += copy;
3610 			to     += copy;
3611 		}
3612 		start = end;
3613 	}
3614 
3615 	skb_walk_frags(skb, frag_iter) {
3616 		__wsum csum2;
3617 		int end;
3618 
3619 		WARN_ON(start > offset + len);
3620 
3621 		end = start + frag_iter->len;
3622 		if ((copy = end - offset) > 0) {
3623 			if (copy > len)
3624 				copy = len;
3625 			csum2 = skb_copy_and_csum_bits(frag_iter,
3626 						       offset - start,
3627 						       to, copy);
3628 			csum = csum_block_add(csum, csum2, pos);
3629 			if ((len -= copy) == 0)
3630 				return csum;
3631 			offset += copy;
3632 			to     += copy;
3633 			pos    += copy;
3634 		}
3635 		start = end;
3636 	}
3637 	BUG_ON(len);
3638 	return csum;
3639 }
3640 EXPORT_SYMBOL(skb_copy_and_csum_bits);
3641 
3642 #ifdef CONFIG_NET_CRC32C
3643 u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc)
3644 {
3645 	int start = skb_headlen(skb);
3646 	int i, copy = start - offset;
3647 	struct sk_buff *frag_iter;
3648 
3649 	if (copy > 0) {
3650 		copy = min(copy, len);
3651 		crc = crc32c(crc, skb->data + offset, copy);
3652 		len -= copy;
3653 		if (len == 0)
3654 			return crc;
3655 		offset += copy;
3656 	}
3657 
3658 	if (WARN_ON_ONCE(!skb_frags_readable(skb)))
3659 		return 0;
3660 
3661 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3662 		int end;
3663 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3664 
3665 		WARN_ON(start > offset + len);
3666 
3667 		end = start + skb_frag_size(frag);
3668 		copy = end - offset;
3669 		if (copy > 0) {
3670 			u32 p_off, p_len, copied;
3671 			struct page *p;
3672 			u8 *vaddr;
3673 
3674 			copy = min(copy, len);
3675 			skb_frag_foreach_page(frag,
3676 					      skb_frag_off(frag) + offset - start,
3677 					      copy, p, p_off, p_len, copied) {
3678 				vaddr = kmap_atomic(p);
3679 				crc = crc32c(crc, vaddr + p_off, p_len);
3680 				kunmap_atomic(vaddr);
3681 			}
3682 			len -= copy;
3683 			if (len == 0)
3684 				return crc;
3685 			offset += copy;
3686 		}
3687 		start = end;
3688 	}
3689 
3690 	skb_walk_frags(skb, frag_iter) {
3691 		int end;
3692 
3693 		WARN_ON(start > offset + len);
3694 
3695 		end = start + frag_iter->len;
3696 		copy = end - offset;
3697 		if (copy > 0) {
3698 			copy = min(copy, len);
3699 			crc = skb_crc32c(frag_iter, offset - start, copy, crc);
3700 			len -= copy;
3701 			if (len == 0)
3702 				return crc;
3703 			offset += copy;
3704 		}
3705 		start = end;
3706 	}
3707 	BUG_ON(len);
3708 
3709 	return crc;
3710 }
3711 EXPORT_SYMBOL(skb_crc32c);
3712 #endif /* CONFIG_NET_CRC32C */
3713 
3714 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
3715 {
3716 	__sum16 sum;
3717 
3718 	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
3719 	/* See comments in __skb_checksum_complete(). */
3720 	if (likely(!sum)) {
3721 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3722 		    !skb->csum_complete_sw)
3723 			netdev_rx_csum_fault(skb->dev, skb);
3724 	}
3725 	if (!skb_shared(skb))
3726 		skb->csum_valid = !sum;
3727 	return sum;
3728 }
3729 EXPORT_SYMBOL(__skb_checksum_complete_head);
3730 
3731 /* This function assumes skb->csum already holds pseudo header's checksum,
3732  * which has been changed from the hardware checksum, for example, by
3733  * __skb_checksum_validate_complete(). And, the original skb->csum must
3734  * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
3735  *
3736  * It returns non-zero if the recomputed checksum is still invalid, otherwise
3737  * zero. The new checksum is stored back into skb->csum unless the skb is
3738  * shared.
3739  */
3740 __sum16 __skb_checksum_complete(struct sk_buff *skb)
3741 {
3742 	__wsum csum;
3743 	__sum16 sum;
3744 
3745 	csum = skb_checksum(skb, 0, skb->len, 0);
3746 
3747 	sum = csum_fold(csum_add(skb->csum, csum));
3748 	/* This check is inverted, because we already knew the hardware
3749 	 * checksum is invalid before calling this function. So, if the
3750 	 * re-computed checksum is valid instead, then we have a mismatch
3751 	 * between the original skb->csum and skb_checksum(). This means either
3752 	 * the original hardware checksum is incorrect or we screw up skb->csum
3753 	 * when moving skb->data around.
3754 	 */
3755 	if (likely(!sum)) {
3756 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3757 		    !skb->csum_complete_sw)
3758 			netdev_rx_csum_fault(skb->dev, skb);
3759 	}
3760 
3761 	if (!skb_shared(skb)) {
3762 		/* Save full packet checksum */
3763 		skb->csum = csum;
3764 		skb->ip_summed = CHECKSUM_COMPLETE;
3765 		skb->csum_complete_sw = 1;
3766 		skb->csum_valid = !sum;
3767 	}
3768 
3769 	return sum;
3770 }
3771 EXPORT_SYMBOL(__skb_checksum_complete);
3772 
3773  /**
3774  *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3775  *	@from: source buffer
3776  *
3777  *	Calculates the amount of linear headroom needed in the 'to' skb passed
3778  *	into skb_zerocopy().
3779  */
3780 unsigned int
3781 skb_zerocopy_headlen(const struct sk_buff *from)
3782 {
3783 	unsigned int hlen = 0;
3784 
3785 	if (!from->head_frag ||
3786 	    skb_headlen(from) < L1_CACHE_BYTES ||
3787 	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
3788 		hlen = skb_headlen(from);
3789 		if (!hlen)
3790 			hlen = from->len;
3791 	}
3792 
3793 	if (skb_has_frag_list(from))
3794 		hlen = from->len;
3795 
3796 	return hlen;
3797 }
3798 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
3799 
3800 /**
3801  *	skb_zerocopy - Zero copy skb to skb
3802  *	@to: destination buffer
3803  *	@from: source buffer
3804  *	@len: number of bytes to copy from source buffer
3805  *	@hlen: size of linear headroom in destination buffer
3806  *
3807  *	Copies up to `len` bytes from `from` to `to` by creating references
3808  *	to the frags in the source buffer.
3809  *
3810  *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
3811  *	headroom in the `to` buffer.
3812  *
3813  *	Return value:
3814  *	0: everything is OK
3815  *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
3816  *	-EFAULT: skb_copy_bits() found some problem with skb geometry
3817  */
3818 int
3819 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
3820 {
3821 	int i, j = 0;
3822 	int plen = 0; /* length of skb->head fragment */
3823 	int ret;
3824 	struct page *page;
3825 	unsigned int offset;
3826 
3827 	BUG_ON(!from->head_frag && !hlen);
3828 
3829 	/* dont bother with small payloads */
3830 	if (len <= skb_tailroom(to))
3831 		return skb_copy_bits(from, 0, skb_put(to, len), len);
3832 
3833 	if (hlen) {
3834 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
3835 		if (unlikely(ret))
3836 			return ret;
3837 		len -= hlen;
3838 	} else {
3839 		plen = min_t(int, skb_headlen(from), len);
3840 		if (plen) {
3841 			page = virt_to_head_page(from->head);
3842 			offset = from->data - (unsigned char *)page_address(page);
3843 			__skb_fill_netmem_desc(to, 0, page_to_netmem(page),
3844 					       offset, plen);
3845 			get_page(page);
3846 			j = 1;
3847 			len -= plen;
3848 		}
3849 	}
3850 
3851 	skb_len_add(to, len + plen);
3852 
3853 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
3854 		skb_tx_error(from);
3855 		return -ENOMEM;
3856 	}
3857 	skb_zerocopy_clone(to, from, GFP_ATOMIC);
3858 
3859 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
3860 		int size;
3861 
3862 		if (!len)
3863 			break;
3864 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
3865 		size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
3866 					len);
3867 		skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
3868 		len -= size;
3869 		skb_frag_ref(to, j);
3870 		j++;
3871 	}
3872 	skb_shinfo(to)->nr_frags = j;
3873 
3874 	return 0;
3875 }
3876 EXPORT_SYMBOL_GPL(skb_zerocopy);
3877 
3878 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3879 {
3880 	__wsum csum;
3881 	long csstart;
3882 
3883 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3884 		csstart = skb_checksum_start_offset(skb);
3885 	else
3886 		csstart = skb_headlen(skb);
3887 
3888 	BUG_ON(csstart > skb_headlen(skb));
3889 
3890 	skb_copy_from_linear_data(skb, to, csstart);
3891 
3892 	csum = 0;
3893 	if (csstart != skb->len)
3894 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3895 					      skb->len - csstart);
3896 
3897 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3898 		long csstuff = csstart + skb->csum_offset;
3899 
3900 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
3901 	}
3902 }
3903 EXPORT_SYMBOL(skb_copy_and_csum_dev);
3904 
3905 /**
3906  *	skb_dequeue - remove from the head of the queue
3907  *	@list: list to dequeue from
3908  *
3909  *	Remove the head of the list. The list lock is taken so the function
3910  *	may be used safely with other locking list functions. The head item is
3911  *	returned or %NULL if the list is empty.
3912  */
3913 
3914 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3915 {
3916 	unsigned long flags;
3917 	struct sk_buff *result;
3918 
3919 	spin_lock_irqsave(&list->lock, flags);
3920 	result = __skb_dequeue(list);
3921 	spin_unlock_irqrestore(&list->lock, flags);
3922 	return result;
3923 }
3924 EXPORT_SYMBOL(skb_dequeue);
3925 
3926 /**
3927  *	skb_dequeue_tail - remove from the tail of the queue
3928  *	@list: list to dequeue from
3929  *
3930  *	Remove the tail of the list. The list lock is taken so the function
3931  *	may be used safely with other locking list functions. The tail item is
3932  *	returned or %NULL if the list is empty.
3933  */
3934 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3935 {
3936 	unsigned long flags;
3937 	struct sk_buff *result;
3938 
3939 	spin_lock_irqsave(&list->lock, flags);
3940 	result = __skb_dequeue_tail(list);
3941 	spin_unlock_irqrestore(&list->lock, flags);
3942 	return result;
3943 }
3944 EXPORT_SYMBOL(skb_dequeue_tail);
3945 
3946 /**
3947  *	skb_queue_purge_reason - empty a list
3948  *	@list: list to empty
3949  *	@reason: drop reason
3950  *
3951  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
3952  *	the list and one reference dropped. This function takes the list
3953  *	lock and is atomic with respect to other list locking functions.
3954  */
3955 void skb_queue_purge_reason(struct sk_buff_head *list,
3956 			    enum skb_drop_reason reason)
3957 {
3958 	struct sk_buff_head tmp;
3959 	unsigned long flags;
3960 
3961 	if (skb_queue_empty_lockless(list))
3962 		return;
3963 
3964 	__skb_queue_head_init(&tmp);
3965 
3966 	spin_lock_irqsave(&list->lock, flags);
3967 	skb_queue_splice_init(list, &tmp);
3968 	spin_unlock_irqrestore(&list->lock, flags);
3969 
3970 	__skb_queue_purge_reason(&tmp, reason);
3971 }
3972 EXPORT_SYMBOL(skb_queue_purge_reason);
3973 
3974 /**
3975  *	skb_rbtree_purge - empty a skb rbtree
3976  *	@root: root of the rbtree to empty
3977  *	Return value: the sum of truesizes of all purged skbs.
3978  *
3979  *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3980  *	the list and one reference dropped. This function does not take
3981  *	any lock. Synchronization should be handled by the caller (e.g., TCP
3982  *	out-of-order queue is protected by the socket lock).
3983  */
3984 unsigned int skb_rbtree_purge(struct rb_root *root)
3985 {
3986 	struct rb_node *p = rb_first(root);
3987 	unsigned int sum = 0;
3988 
3989 	while (p) {
3990 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3991 
3992 		p = rb_next(p);
3993 		rb_erase(&skb->rbnode, root);
3994 		sum += skb->truesize;
3995 		kfree_skb(skb);
3996 	}
3997 	return sum;
3998 }
3999 
4000 void skb_errqueue_purge(struct sk_buff_head *list)
4001 {
4002 	struct sk_buff *skb, *next;
4003 	struct sk_buff_head kill;
4004 	unsigned long flags;
4005 
4006 	__skb_queue_head_init(&kill);
4007 
4008 	spin_lock_irqsave(&list->lock, flags);
4009 	skb_queue_walk_safe(list, skb, next) {
4010 		if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY ||
4011 		    SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING)
4012 			continue;
4013 		__skb_unlink(skb, list);
4014 		__skb_queue_tail(&kill, skb);
4015 	}
4016 	spin_unlock_irqrestore(&list->lock, flags);
4017 	__skb_queue_purge(&kill);
4018 }
4019 EXPORT_SYMBOL(skb_errqueue_purge);
4020 
4021 /**
4022  *	skb_queue_head - queue a buffer at the list head
4023  *	@list: list to use
4024  *	@newsk: buffer to queue
4025  *
4026  *	Queue a buffer at the start of the list. This function takes the
4027  *	list lock and can be used safely with other locking &sk_buff functions
4028  *	safely.
4029  *
4030  *	A buffer cannot be placed on two lists at the same time.
4031  */
4032 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
4033 {
4034 	unsigned long flags;
4035 
4036 	spin_lock_irqsave(&list->lock, flags);
4037 	__skb_queue_head(list, newsk);
4038 	spin_unlock_irqrestore(&list->lock, flags);
4039 }
4040 EXPORT_SYMBOL(skb_queue_head);
4041 
4042 /**
4043  *	skb_queue_tail - queue a buffer at the list tail
4044  *	@list: list to use
4045  *	@newsk: buffer to queue
4046  *
4047  *	Queue a buffer at the tail of the list. This function takes the
4048  *	list lock and can be used safely with other locking &sk_buff functions
4049  *	safely.
4050  *
4051  *	A buffer cannot be placed on two lists at the same time.
4052  */
4053 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
4054 {
4055 	unsigned long flags;
4056 
4057 	spin_lock_irqsave(&list->lock, flags);
4058 	__skb_queue_tail(list, newsk);
4059 	spin_unlock_irqrestore(&list->lock, flags);
4060 }
4061 EXPORT_SYMBOL(skb_queue_tail);
4062 
4063 /**
4064  *	skb_unlink	-	remove a buffer from a list
4065  *	@skb: buffer to remove
4066  *	@list: list to use
4067  *
4068  *	Remove a packet from a list. The list locks are taken and this
4069  *	function is atomic with respect to other list locked calls
4070  *
4071  *	You must know what list the SKB is on.
4072  */
4073 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
4074 {
4075 	unsigned long flags;
4076 
4077 	spin_lock_irqsave(&list->lock, flags);
4078 	__skb_unlink(skb, list);
4079 	spin_unlock_irqrestore(&list->lock, flags);
4080 }
4081 EXPORT_SYMBOL(skb_unlink);
4082 
4083 /**
4084  *	skb_append	-	append a buffer
4085  *	@old: buffer to insert after
4086  *	@newsk: buffer to insert
4087  *	@list: list to use
4088  *
4089  *	Place a packet after a given packet in a list. The list locks are taken
4090  *	and this function is atomic with respect to other list locked calls.
4091  *	A buffer cannot be placed on two lists at the same time.
4092  */
4093 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
4094 {
4095 	unsigned long flags;
4096 
4097 	spin_lock_irqsave(&list->lock, flags);
4098 	__skb_queue_after(list, old, newsk);
4099 	spin_unlock_irqrestore(&list->lock, flags);
4100 }
4101 EXPORT_SYMBOL(skb_append);
4102 
4103 static inline void skb_split_inside_header(struct sk_buff *skb,
4104 					   struct sk_buff* skb1,
4105 					   const u32 len, const int pos)
4106 {
4107 	int i;
4108 
4109 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
4110 					 pos - len);
4111 	/* And move data appendix as is. */
4112 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
4113 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
4114 
4115 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
4116 	skb1->unreadable	   = skb->unreadable;
4117 	skb_shinfo(skb)->nr_frags  = 0;
4118 	skb1->data_len		   = skb->data_len;
4119 	skb1->len		   += skb1->data_len;
4120 	skb->data_len		   = 0;
4121 	skb->len		   = len;
4122 	skb_set_tail_pointer(skb, len);
4123 }
4124 
4125 static inline void skb_split_no_header(struct sk_buff *skb,
4126 				       struct sk_buff* skb1,
4127 				       const u32 len, int pos)
4128 {
4129 	int i, k = 0;
4130 	const int nfrags = skb_shinfo(skb)->nr_frags;
4131 
4132 	skb_shinfo(skb)->nr_frags = 0;
4133 	skb1->len		  = skb1->data_len = skb->len - len;
4134 	skb->len		  = len;
4135 	skb->data_len		  = len - pos;
4136 
4137 	for (i = 0; i < nfrags; i++) {
4138 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
4139 
4140 		if (pos + size > len) {
4141 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
4142 
4143 			if (pos < len) {
4144 				/* Split frag.
4145 				 * We have two variants in this case:
4146 				 * 1. Move all the frag to the second
4147 				 *    part, if it is possible. F.e.
4148 				 *    this approach is mandatory for TUX,
4149 				 *    where splitting is expensive.
4150 				 * 2. Split is accurately. We make this.
4151 				 */
4152 				skb_frag_ref(skb, i);
4153 				skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
4154 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
4155 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
4156 				skb_shinfo(skb)->nr_frags++;
4157 			}
4158 			k++;
4159 		} else
4160 			skb_shinfo(skb)->nr_frags++;
4161 		pos += size;
4162 	}
4163 	skb_shinfo(skb1)->nr_frags = k;
4164 
4165 	skb1->unreadable = skb->unreadable;
4166 }
4167 
4168 /**
4169  * skb_split - Split fragmented skb to two parts at length len.
4170  * @skb: the buffer to split
4171  * @skb1: the buffer to receive the second part
4172  * @len: new length for skb
4173  */
4174 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
4175 {
4176 	int pos = skb_headlen(skb);
4177 	const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
4178 
4179 	skb_zcopy_downgrade_managed(skb);
4180 
4181 	skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
4182 	skb_zerocopy_clone(skb1, skb, 0);
4183 	if (len < pos)	/* Split line is inside header. */
4184 		skb_split_inside_header(skb, skb1, len, pos);
4185 	else		/* Second chunk has no header, nothing to copy. */
4186 		skb_split_no_header(skb, skb1, len, pos);
4187 }
4188 EXPORT_SYMBOL(skb_split);
4189 
4190 /* Shifting from/to a cloned skb is a no-go.
4191  *
4192  * Caller cannot keep skb_shinfo related pointers past calling here!
4193  */
4194 static int skb_prepare_for_shift(struct sk_buff *skb)
4195 {
4196 	return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
4197 }
4198 
4199 /**
4200  * skb_shift - Shifts paged data partially from skb to another
4201  * @tgt: buffer into which tail data gets added
4202  * @skb: buffer from which the paged data comes from
4203  * @shiftlen: shift up to this many bytes
4204  *
4205  * Attempts to shift up to shiftlen worth of bytes, which may be less than
4206  * the length of the skb, from skb to tgt. Returns number bytes shifted.
4207  * It's up to caller to free skb if everything was shifted.
4208  *
4209  * If @tgt runs out of frags, the whole operation is aborted.
4210  *
4211  * Skb cannot include anything else but paged data while tgt is allowed
4212  * to have non-paged data as well.
4213  *
4214  * TODO: full sized shift could be optimized but that would need
4215  * specialized skb free'er to handle frags without up-to-date nr_frags.
4216  */
4217 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
4218 {
4219 	int from, to, merge, todo;
4220 	skb_frag_t *fragfrom, *fragto;
4221 
4222 	BUG_ON(shiftlen > skb->len);
4223 
4224 	if (skb_headlen(skb))
4225 		return 0;
4226 	if (skb_zcopy(tgt) || skb_zcopy(skb))
4227 		return 0;
4228 
4229 	DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle);
4230 	DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb));
4231 
4232 	todo = shiftlen;
4233 	from = 0;
4234 	to = skb_shinfo(tgt)->nr_frags;
4235 	fragfrom = &skb_shinfo(skb)->frags[from];
4236 
4237 	/* Actual merge is delayed until the point when we know we can
4238 	 * commit all, so that we don't have to undo partial changes
4239 	 */
4240 	if (!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
4241 			      skb_frag_off(fragfrom))) {
4242 		merge = -1;
4243 	} else {
4244 		merge = to - 1;
4245 
4246 		todo -= skb_frag_size(fragfrom);
4247 		if (todo < 0) {
4248 			if (skb_prepare_for_shift(skb) ||
4249 			    skb_prepare_for_shift(tgt))
4250 				return 0;
4251 
4252 			/* All previous frag pointers might be stale! */
4253 			fragfrom = &skb_shinfo(skb)->frags[from];
4254 			fragto = &skb_shinfo(tgt)->frags[merge];
4255 
4256 			skb_frag_size_add(fragto, shiftlen);
4257 			skb_frag_size_sub(fragfrom, shiftlen);
4258 			skb_frag_off_add(fragfrom, shiftlen);
4259 
4260 			goto onlymerged;
4261 		}
4262 
4263 		from++;
4264 	}
4265 
4266 	/* Skip full, not-fitting skb to avoid expensive operations */
4267 	if ((shiftlen == skb->len) &&
4268 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
4269 		return 0;
4270 
4271 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
4272 		return 0;
4273 
4274 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
4275 		if (to == MAX_SKB_FRAGS)
4276 			return 0;
4277 
4278 		fragfrom = &skb_shinfo(skb)->frags[from];
4279 		fragto = &skb_shinfo(tgt)->frags[to];
4280 
4281 		if (todo >= skb_frag_size(fragfrom)) {
4282 			*fragto = *fragfrom;
4283 			todo -= skb_frag_size(fragfrom);
4284 			from++;
4285 			to++;
4286 
4287 		} else {
4288 			__skb_frag_ref(fragfrom);
4289 			skb_frag_page_copy(fragto, fragfrom);
4290 			skb_frag_off_copy(fragto, fragfrom);
4291 			skb_frag_size_set(fragto, todo);
4292 
4293 			skb_frag_off_add(fragfrom, todo);
4294 			skb_frag_size_sub(fragfrom, todo);
4295 			todo = 0;
4296 
4297 			to++;
4298 			break;
4299 		}
4300 	}
4301 
4302 	/* Ready to "commit" this state change to tgt */
4303 	skb_shinfo(tgt)->nr_frags = to;
4304 
4305 	if (merge >= 0) {
4306 		fragfrom = &skb_shinfo(skb)->frags[0];
4307 		fragto = &skb_shinfo(tgt)->frags[merge];
4308 
4309 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
4310 		__skb_frag_unref(fragfrom, skb->pp_recycle);
4311 	}
4312 
4313 	/* Reposition in the original skb */
4314 	to = 0;
4315 	while (from < skb_shinfo(skb)->nr_frags)
4316 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
4317 	skb_shinfo(skb)->nr_frags = to;
4318 
4319 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
4320 
4321 onlymerged:
4322 	/* Most likely the tgt won't ever need its checksum anymore, skb on
4323 	 * the other hand might need it if it needs to be resent
4324 	 */
4325 	tgt->ip_summed = CHECKSUM_PARTIAL;
4326 	skb->ip_summed = CHECKSUM_PARTIAL;
4327 
4328 	skb_len_add(skb, -shiftlen);
4329 	skb_len_add(tgt, shiftlen);
4330 
4331 	return shiftlen;
4332 }
4333 
4334 /**
4335  * skb_prepare_seq_read - Prepare a sequential read of skb data
4336  * @skb: the buffer to read
4337  * @from: lower offset of data to be read
4338  * @to: upper offset of data to be read
4339  * @st: state variable
4340  *
4341  * Initializes the specified state variable. Must be called before
4342  * invoking skb_seq_read() for the first time.
4343  */
4344 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
4345 			  unsigned int to, struct skb_seq_state *st)
4346 {
4347 	st->lower_offset = from;
4348 	st->upper_offset = to;
4349 	st->root_skb = st->cur_skb = skb;
4350 	st->frag_idx = st->stepped_offset = 0;
4351 	st->frag_data = NULL;
4352 	st->frag_off = 0;
4353 }
4354 EXPORT_SYMBOL(skb_prepare_seq_read);
4355 
4356 /**
4357  * skb_seq_read - Sequentially read skb data
4358  * @consumed: number of bytes consumed by the caller so far
4359  * @data: destination pointer for data to be returned
4360  * @st: state variable
4361  *
4362  * Reads a block of skb data at @consumed relative to the
4363  * lower offset specified to skb_prepare_seq_read(). Assigns
4364  * the head of the data block to @data and returns the length
4365  * of the block or 0 if the end of the skb data or the upper
4366  * offset has been reached.
4367  *
4368  * The caller is not required to consume all of the data
4369  * returned, i.e. @consumed is typically set to the number
4370  * of bytes already consumed and the next call to
4371  * skb_seq_read() will return the remaining part of the block.
4372  *
4373  * Note 1: The size of each block of data returned can be arbitrary,
4374  *       this limitation is the cost for zerocopy sequential
4375  *       reads of potentially non linear data.
4376  *
4377  * Note 2: Fragment lists within fragments are not implemented
4378  *       at the moment, state->root_skb could be replaced with
4379  *       a stack for this purpose.
4380  */
4381 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
4382 			  struct skb_seq_state *st)
4383 {
4384 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
4385 	skb_frag_t *frag;
4386 
4387 	if (unlikely(abs_offset >= st->upper_offset)) {
4388 		if (st->frag_data) {
4389 			kunmap_atomic(st->frag_data);
4390 			st->frag_data = NULL;
4391 		}
4392 		return 0;
4393 	}
4394 
4395 next_skb:
4396 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
4397 
4398 	if (abs_offset < block_limit && !st->frag_data) {
4399 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
4400 		return block_limit - abs_offset;
4401 	}
4402 
4403 	if (!skb_frags_readable(st->cur_skb))
4404 		return 0;
4405 
4406 	if (st->frag_idx == 0 && !st->frag_data)
4407 		st->stepped_offset += skb_headlen(st->cur_skb);
4408 
4409 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
4410 		unsigned int pg_idx, pg_off, pg_sz;
4411 
4412 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
4413 
4414 		pg_idx = 0;
4415 		pg_off = skb_frag_off(frag);
4416 		pg_sz = skb_frag_size(frag);
4417 
4418 		if (skb_frag_must_loop(skb_frag_page(frag))) {
4419 			pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
4420 			pg_off = offset_in_page(pg_off + st->frag_off);
4421 			pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
4422 						    PAGE_SIZE - pg_off);
4423 		}
4424 
4425 		block_limit = pg_sz + st->stepped_offset;
4426 		if (abs_offset < block_limit) {
4427 			if (!st->frag_data)
4428 				st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
4429 
4430 			*data = (u8 *)st->frag_data + pg_off +
4431 				(abs_offset - st->stepped_offset);
4432 
4433 			return block_limit - abs_offset;
4434 		}
4435 
4436 		if (st->frag_data) {
4437 			kunmap_atomic(st->frag_data);
4438 			st->frag_data = NULL;
4439 		}
4440 
4441 		st->stepped_offset += pg_sz;
4442 		st->frag_off += pg_sz;
4443 		if (st->frag_off == skb_frag_size(frag)) {
4444 			st->frag_off = 0;
4445 			st->frag_idx++;
4446 		}
4447 	}
4448 
4449 	if (st->frag_data) {
4450 		kunmap_atomic(st->frag_data);
4451 		st->frag_data = NULL;
4452 	}
4453 
4454 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
4455 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
4456 		st->frag_idx = 0;
4457 		goto next_skb;
4458 	} else if (st->cur_skb->next) {
4459 		st->cur_skb = st->cur_skb->next;
4460 		st->frag_idx = 0;
4461 		goto next_skb;
4462 	}
4463 
4464 	return 0;
4465 }
4466 EXPORT_SYMBOL(skb_seq_read);
4467 
4468 /**
4469  * skb_abort_seq_read - Abort a sequential read of skb data
4470  * @st: state variable
4471  *
4472  * Must be called if skb_seq_read() was not called until it
4473  * returned 0.
4474  */
4475 void skb_abort_seq_read(struct skb_seq_state *st)
4476 {
4477 	if (st->frag_data)
4478 		kunmap_atomic(st->frag_data);
4479 }
4480 EXPORT_SYMBOL(skb_abort_seq_read);
4481 
4482 /**
4483  * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
4484  * @st: source skb_seq_state
4485  * @offset: offset in source
4486  * @to: destination buffer
4487  * @len: number of bytes to copy
4488  *
4489  * Copy @len bytes from @offset bytes into the source @st to the destination
4490  * buffer @to. `offset` should increase (or be unchanged) with each subsequent
4491  * call to this function. If offset needs to decrease from the previous use `st`
4492  * should be reset first.
4493  *
4494  * Return: 0 on success or -EINVAL if the copy ended early
4495  */
4496 int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len)
4497 {
4498 	const u8 *data;
4499 	u32 sqlen;
4500 
4501 	for (;;) {
4502 		sqlen = skb_seq_read(offset, &data, st);
4503 		if (sqlen == 0)
4504 			return -EINVAL;
4505 		if (sqlen >= len) {
4506 			memcpy(to, data, len);
4507 			return 0;
4508 		}
4509 		memcpy(to, data, sqlen);
4510 		to += sqlen;
4511 		offset += sqlen;
4512 		len -= sqlen;
4513 	}
4514 }
4515 EXPORT_SYMBOL(skb_copy_seq_read);
4516 
4517 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
4518 
4519 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
4520 					  struct ts_config *conf,
4521 					  struct ts_state *state)
4522 {
4523 	return skb_seq_read(offset, text, TS_SKB_CB(state));
4524 }
4525 
4526 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
4527 {
4528 	skb_abort_seq_read(TS_SKB_CB(state));
4529 }
4530 
4531 /**
4532  * skb_find_text - Find a text pattern in skb data
4533  * @skb: the buffer to look in
4534  * @from: search offset
4535  * @to: search limit
4536  * @config: textsearch configuration
4537  *
4538  * Finds a pattern in the skb data according to the specified
4539  * textsearch configuration. Use textsearch_next() to retrieve
4540  * subsequent occurrences of the pattern. Returns the offset
4541  * to the first occurrence or UINT_MAX if no match was found.
4542  */
4543 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
4544 			   unsigned int to, struct ts_config *config)
4545 {
4546 	unsigned int patlen = config->ops->get_pattern_len(config);
4547 	struct ts_state state;
4548 	unsigned int ret;
4549 
4550 	BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
4551 
4552 	config->get_next_block = skb_ts_get_next_block;
4553 	config->finish = skb_ts_finish;
4554 
4555 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
4556 
4557 	ret = textsearch_find(config, &state);
4558 	return (ret + patlen <= to - from ? ret : UINT_MAX);
4559 }
4560 EXPORT_SYMBOL(skb_find_text);
4561 
4562 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
4563 			 int offset, size_t size, size_t max_frags)
4564 {
4565 	int i = skb_shinfo(skb)->nr_frags;
4566 
4567 	if (skb_can_coalesce(skb, i, page, offset)) {
4568 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
4569 	} else if (i < max_frags) {
4570 		skb_zcopy_downgrade_managed(skb);
4571 		get_page(page);
4572 		skb_fill_page_desc_noacc(skb, i, page, offset, size);
4573 	} else {
4574 		return -EMSGSIZE;
4575 	}
4576 
4577 	return 0;
4578 }
4579 EXPORT_SYMBOL_GPL(skb_append_pagefrags);
4580 
4581 /**
4582  *	skb_pull_rcsum - pull skb and update receive checksum
4583  *	@skb: buffer to update
4584  *	@len: length of data pulled
4585  *
4586  *	This function performs an skb_pull on the packet and updates
4587  *	the CHECKSUM_COMPLETE checksum.  It should be used on
4588  *	receive path processing instead of skb_pull unless you know
4589  *	that the checksum difference is zero (e.g., a valid IP header)
4590  *	or you are setting ip_summed to CHECKSUM_NONE.
4591  */
4592 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
4593 {
4594 	unsigned char *data = skb->data;
4595 
4596 	BUG_ON(len > skb->len);
4597 	__skb_pull(skb, len);
4598 	skb_postpull_rcsum(skb, data, len);
4599 	return skb->data;
4600 }
4601 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
4602 
4603 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
4604 {
4605 	skb_frag_t head_frag;
4606 	struct page *page;
4607 
4608 	page = virt_to_head_page(frag_skb->head);
4609 	skb_frag_fill_page_desc(&head_frag, page, frag_skb->data -
4610 				(unsigned char *)page_address(page),
4611 				skb_headlen(frag_skb));
4612 	return head_frag;
4613 }
4614 
4615 struct sk_buff *skb_segment_list(struct sk_buff *skb,
4616 				 netdev_features_t features,
4617 				 unsigned int offset)
4618 {
4619 	struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
4620 	unsigned int tnl_hlen = skb_tnl_header_len(skb);
4621 	unsigned int delta_truesize = 0;
4622 	unsigned int delta_len = 0;
4623 	struct sk_buff *tail = NULL;
4624 	struct sk_buff *nskb, *tmp;
4625 	int len_diff, err;
4626 
4627 	skb_push(skb, -skb_network_offset(skb) + offset);
4628 
4629 	/* Ensure the head is writeable before touching the shared info */
4630 	err = skb_unclone(skb, GFP_ATOMIC);
4631 	if (err)
4632 		goto err_linearize;
4633 
4634 	skb_shinfo(skb)->frag_list = NULL;
4635 
4636 	while (list_skb) {
4637 		nskb = list_skb;
4638 		list_skb = list_skb->next;
4639 
4640 		err = 0;
4641 		delta_truesize += nskb->truesize;
4642 		if (skb_shared(nskb)) {
4643 			tmp = skb_clone(nskb, GFP_ATOMIC);
4644 			if (tmp) {
4645 				consume_skb(nskb);
4646 				nskb = tmp;
4647 				err = skb_unclone(nskb, GFP_ATOMIC);
4648 			} else {
4649 				err = -ENOMEM;
4650 			}
4651 		}
4652 
4653 		if (!tail)
4654 			skb->next = nskb;
4655 		else
4656 			tail->next = nskb;
4657 
4658 		if (unlikely(err)) {
4659 			nskb->next = list_skb;
4660 			goto err_linearize;
4661 		}
4662 
4663 		tail = nskb;
4664 
4665 		delta_len += nskb->len;
4666 
4667 		skb_push(nskb, -skb_network_offset(nskb) + offset);
4668 
4669 		skb_release_head_state(nskb);
4670 		len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
4671 		__copy_skb_header(nskb, skb);
4672 
4673 		skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
4674 		nskb->transport_header += len_diff;
4675 		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
4676 						 nskb->data - tnl_hlen,
4677 						 offset + tnl_hlen);
4678 
4679 		if (skb_needs_linearize(nskb, features) &&
4680 		    __skb_linearize(nskb))
4681 			goto err_linearize;
4682 	}
4683 
4684 	skb->truesize = skb->truesize - delta_truesize;
4685 	skb->data_len = skb->data_len - delta_len;
4686 	skb->len = skb->len - delta_len;
4687 
4688 	skb_gso_reset(skb);
4689 
4690 	skb->prev = tail;
4691 
4692 	if (skb_needs_linearize(skb, features) &&
4693 	    __skb_linearize(skb))
4694 		goto err_linearize;
4695 
4696 	skb_get(skb);
4697 
4698 	return skb;
4699 
4700 err_linearize:
4701 	kfree_skb_list(skb->next);
4702 	skb->next = NULL;
4703 	return ERR_PTR(-ENOMEM);
4704 }
4705 EXPORT_SYMBOL_GPL(skb_segment_list);
4706 
4707 /**
4708  *	skb_segment - Perform protocol segmentation on skb.
4709  *	@head_skb: buffer to segment
4710  *	@features: features for the output path (see dev->features)
4711  *
4712  *	This function performs segmentation on the given skb.  It returns
4713  *	a pointer to the first in a list of new skbs for the segments.
4714  *	In case of error it returns ERR_PTR(err).
4715  */
4716 struct sk_buff *skb_segment(struct sk_buff *head_skb,
4717 			    netdev_features_t features)
4718 {
4719 	struct sk_buff *segs = NULL;
4720 	struct sk_buff *tail = NULL;
4721 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
4722 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
4723 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
4724 	unsigned int offset = doffset;
4725 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
4726 	unsigned int partial_segs = 0;
4727 	unsigned int headroom;
4728 	unsigned int len = head_skb->len;
4729 	struct sk_buff *frag_skb;
4730 	skb_frag_t *frag;
4731 	__be16 proto;
4732 	bool csum, sg;
4733 	int err = -ENOMEM;
4734 	int i = 0;
4735 	int nfrags, pos;
4736 
4737 	if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
4738 	    mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
4739 		struct sk_buff *check_skb;
4740 
4741 		for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
4742 			if (skb_headlen(check_skb) && !check_skb->head_frag) {
4743 				/* gso_size is untrusted, and we have a frag_list with
4744 				 * a linear non head_frag item.
4745 				 *
4746 				 * If head_skb's headlen does not fit requested gso_size,
4747 				 * it means that the frag_list members do NOT terminate
4748 				 * on exact gso_size boundaries. Hence we cannot perform
4749 				 * skb_frag_t page sharing. Therefore we must fallback to
4750 				 * copying the frag_list skbs; we do so by disabling SG.
4751 				 */
4752 				features &= ~NETIF_F_SG;
4753 				break;
4754 			}
4755 		}
4756 	}
4757 
4758 	__skb_push(head_skb, doffset);
4759 	proto = skb_network_protocol(head_skb, NULL);
4760 	if (unlikely(!proto))
4761 		return ERR_PTR(-EINVAL);
4762 
4763 	sg = !!(features & NETIF_F_SG);
4764 	csum = !!can_checksum_protocol(features, proto);
4765 
4766 	if (sg && csum && (mss != GSO_BY_FRAGS))  {
4767 		if (!(features & NETIF_F_GSO_PARTIAL)) {
4768 			struct sk_buff *iter;
4769 			unsigned int frag_len;
4770 
4771 			if (!list_skb ||
4772 			    !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
4773 				goto normal;
4774 
4775 			/* If we get here then all the required
4776 			 * GSO features except frag_list are supported.
4777 			 * Try to split the SKB to multiple GSO SKBs
4778 			 * with no frag_list.
4779 			 * Currently we can do that only when the buffers don't
4780 			 * have a linear part and all the buffers except
4781 			 * the last are of the same length.
4782 			 */
4783 			frag_len = list_skb->len;
4784 			skb_walk_frags(head_skb, iter) {
4785 				if (frag_len != iter->len && iter->next)
4786 					goto normal;
4787 				if (skb_headlen(iter) && !iter->head_frag)
4788 					goto normal;
4789 
4790 				len -= iter->len;
4791 			}
4792 
4793 			if (len != frag_len)
4794 				goto normal;
4795 		}
4796 
4797 		/* GSO partial only requires that we trim off any excess that
4798 		 * doesn't fit into an MSS sized block, so take care of that
4799 		 * now.
4800 		 * Cap len to not accidentally hit GSO_BY_FRAGS.
4801 		 */
4802 		partial_segs = min(len, GSO_BY_FRAGS - 1) / mss;
4803 		if (partial_segs > 1)
4804 			mss *= partial_segs;
4805 		else
4806 			partial_segs = 0;
4807 	}
4808 
4809 normal:
4810 	headroom = skb_headroom(head_skb);
4811 	pos = skb_headlen(head_skb);
4812 
4813 	if (skb_orphan_frags(head_skb, GFP_ATOMIC))
4814 		return ERR_PTR(-ENOMEM);
4815 
4816 	nfrags = skb_shinfo(head_skb)->nr_frags;
4817 	frag = skb_shinfo(head_skb)->frags;
4818 	frag_skb = head_skb;
4819 
4820 	do {
4821 		struct sk_buff *nskb;
4822 		skb_frag_t *nskb_frag;
4823 		int hsize;
4824 		int size;
4825 
4826 		if (unlikely(mss == GSO_BY_FRAGS)) {
4827 			len = list_skb->len;
4828 		} else {
4829 			len = head_skb->len - offset;
4830 			if (len > mss)
4831 				len = mss;
4832 		}
4833 
4834 		hsize = skb_headlen(head_skb) - offset;
4835 
4836 		if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) &&
4837 		    (skb_headlen(list_skb) == len || sg)) {
4838 			BUG_ON(skb_headlen(list_skb) > len);
4839 
4840 			nskb = skb_clone(list_skb, GFP_ATOMIC);
4841 			if (unlikely(!nskb))
4842 				goto err;
4843 
4844 			i = 0;
4845 			nfrags = skb_shinfo(list_skb)->nr_frags;
4846 			frag = skb_shinfo(list_skb)->frags;
4847 			frag_skb = list_skb;
4848 			pos += skb_headlen(list_skb);
4849 
4850 			while (pos < offset + len) {
4851 				BUG_ON(i >= nfrags);
4852 
4853 				size = skb_frag_size(frag);
4854 				if (pos + size > offset + len)
4855 					break;
4856 
4857 				i++;
4858 				pos += size;
4859 				frag++;
4860 			}
4861 
4862 			list_skb = list_skb->next;
4863 
4864 			if (unlikely(pskb_trim(nskb, len))) {
4865 				kfree_skb(nskb);
4866 				goto err;
4867 			}
4868 
4869 			hsize = skb_end_offset(nskb);
4870 			if (skb_cow_head(nskb, doffset + headroom)) {
4871 				kfree_skb(nskb);
4872 				goto err;
4873 			}
4874 
4875 			nskb->truesize += skb_end_offset(nskb) - hsize;
4876 			skb_release_head_state(nskb);
4877 			__skb_push(nskb, doffset);
4878 		} else {
4879 			if (hsize < 0)
4880 				hsize = 0;
4881 			if (hsize > len || !sg)
4882 				hsize = len;
4883 
4884 			nskb = __alloc_skb(hsize + doffset + headroom,
4885 					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
4886 					   NUMA_NO_NODE);
4887 
4888 			if (unlikely(!nskb))
4889 				goto err;
4890 
4891 			skb_reserve(nskb, headroom);
4892 			__skb_put(nskb, doffset);
4893 		}
4894 
4895 		if (segs)
4896 			tail->next = nskb;
4897 		else
4898 			segs = nskb;
4899 		tail = nskb;
4900 
4901 		__copy_skb_header(nskb, head_skb);
4902 
4903 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
4904 		skb_reset_mac_len(nskb);
4905 
4906 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
4907 						 nskb->data - tnl_hlen,
4908 						 doffset + tnl_hlen);
4909 
4910 		if (nskb->len == len + doffset)
4911 			goto perform_csum_check;
4912 
4913 		if (!sg) {
4914 			if (!csum) {
4915 				if (!nskb->remcsum_offload)
4916 					nskb->ip_summed = CHECKSUM_NONE;
4917 				SKB_GSO_CB(nskb)->csum =
4918 					skb_copy_and_csum_bits(head_skb, offset,
4919 							       skb_put(nskb,
4920 								       len),
4921 							       len);
4922 				SKB_GSO_CB(nskb)->csum_start =
4923 					skb_headroom(nskb) + doffset;
4924 			} else {
4925 				if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
4926 					goto err;
4927 			}
4928 			continue;
4929 		}
4930 
4931 		nskb_frag = skb_shinfo(nskb)->frags;
4932 
4933 		skb_copy_from_linear_data_offset(head_skb, offset,
4934 						 skb_put(nskb, hsize), hsize);
4935 
4936 		skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
4937 					   SKBFL_SHARED_FRAG;
4938 
4939 		if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4940 			goto err;
4941 
4942 		while (pos < offset + len) {
4943 			if (i >= nfrags) {
4944 				if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
4945 				    skb_zerocopy_clone(nskb, list_skb,
4946 						       GFP_ATOMIC))
4947 					goto err;
4948 
4949 				i = 0;
4950 				nfrags = skb_shinfo(list_skb)->nr_frags;
4951 				frag = skb_shinfo(list_skb)->frags;
4952 				frag_skb = list_skb;
4953 				if (!skb_headlen(list_skb)) {
4954 					BUG_ON(!nfrags);
4955 				} else {
4956 					BUG_ON(!list_skb->head_frag);
4957 
4958 					/* to make room for head_frag. */
4959 					i--;
4960 					frag--;
4961 				}
4962 
4963 				list_skb = list_skb->next;
4964 			}
4965 
4966 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
4967 				     MAX_SKB_FRAGS)) {
4968 				net_warn_ratelimited(
4969 					"skb_segment: too many frags: %u %u\n",
4970 					pos, mss);
4971 				err = -EINVAL;
4972 				goto err;
4973 			}
4974 
4975 			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
4976 			__skb_frag_ref(nskb_frag);
4977 			size = skb_frag_size(nskb_frag);
4978 
4979 			if (pos < offset) {
4980 				skb_frag_off_add(nskb_frag, offset - pos);
4981 				skb_frag_size_sub(nskb_frag, offset - pos);
4982 			}
4983 
4984 			skb_shinfo(nskb)->nr_frags++;
4985 
4986 			if (pos + size <= offset + len) {
4987 				i++;
4988 				frag++;
4989 				pos += size;
4990 			} else {
4991 				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4992 				goto skip_fraglist;
4993 			}
4994 
4995 			nskb_frag++;
4996 		}
4997 
4998 skip_fraglist:
4999 		nskb->data_len = len - hsize;
5000 		nskb->len += nskb->data_len;
5001 		nskb->truesize += nskb->data_len;
5002 
5003 perform_csum_check:
5004 		if (!csum) {
5005 			if (skb_has_shared_frag(nskb) &&
5006 			    __skb_linearize(nskb))
5007 				goto err;
5008 
5009 			if (!nskb->remcsum_offload)
5010 				nskb->ip_summed = CHECKSUM_NONE;
5011 			SKB_GSO_CB(nskb)->csum =
5012 				skb_checksum(nskb, doffset,
5013 					     nskb->len - doffset, 0);
5014 			SKB_GSO_CB(nskb)->csum_start =
5015 				skb_headroom(nskb) + doffset;
5016 		}
5017 	} while ((offset += len) < head_skb->len);
5018 
5019 	/* Some callers want to get the end of the list.
5020 	 * Put it in segs->prev to avoid walking the list.
5021 	 * (see validate_xmit_skb_list() for example)
5022 	 */
5023 	segs->prev = tail;
5024 
5025 	if (partial_segs) {
5026 		struct sk_buff *iter;
5027 		int type = skb_shinfo(head_skb)->gso_type;
5028 		unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
5029 
5030 		/* Update type to add partial and then remove dodgy if set */
5031 		type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
5032 		type &= ~SKB_GSO_DODGY;
5033 
5034 		/* Update GSO info and prepare to start updating headers on
5035 		 * our way back down the stack of protocols.
5036 		 */
5037 		for (iter = segs; iter; iter = iter->next) {
5038 			skb_shinfo(iter)->gso_size = gso_size;
5039 			skb_shinfo(iter)->gso_segs = partial_segs;
5040 			skb_shinfo(iter)->gso_type = type;
5041 			SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
5042 		}
5043 
5044 		if (tail->len - doffset <= gso_size)
5045 			skb_shinfo(tail)->gso_size = 0;
5046 		else if (tail != segs)
5047 			skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
5048 	}
5049 
5050 	/* Following permits correct backpressure, for protocols
5051 	 * using skb_set_owner_w().
5052 	 * Idea is to tranfert ownership from head_skb to last segment.
5053 	 */
5054 	if (head_skb->destructor == sock_wfree) {
5055 		swap(tail->truesize, head_skb->truesize);
5056 		swap(tail->destructor, head_skb->destructor);
5057 		swap(tail->sk, head_skb->sk);
5058 	}
5059 	return segs;
5060 
5061 err:
5062 	kfree_skb_list(segs);
5063 	return ERR_PTR(err);
5064 }
5065 EXPORT_SYMBOL_GPL(skb_segment);
5066 
5067 #ifdef CONFIG_SKB_EXTENSIONS
5068 #define SKB_EXT_ALIGN_VALUE	8
5069 #define SKB_EXT_CHUNKSIZEOF(x)	(ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
5070 
5071 static const u8 skb_ext_type_len[] = {
5072 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
5073 	[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
5074 #endif
5075 #ifdef CONFIG_XFRM
5076 	[SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
5077 #endif
5078 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5079 	[TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
5080 #endif
5081 #if IS_ENABLED(CONFIG_MPTCP)
5082 	[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
5083 #endif
5084 #if IS_ENABLED(CONFIG_MCTP_FLOWS)
5085 	[SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
5086 #endif
5087 #if IS_ENABLED(CONFIG_INET_PSP)
5088 	[SKB_EXT_PSP] = SKB_EXT_CHUNKSIZEOF(struct psp_skb_ext),
5089 #endif
5090 };
5091 
5092 static __always_inline unsigned int skb_ext_total_length(void)
5093 {
5094 	unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext);
5095 	int i;
5096 
5097 	for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++)
5098 		l += skb_ext_type_len[i];
5099 
5100 	return l;
5101 }
5102 
5103 static void skb_extensions_init(void)
5104 {
5105 	BUILD_BUG_ON(SKB_EXT_NUM >= 8);
5106 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
5107 	BUILD_BUG_ON(skb_ext_total_length() > 255);
5108 #endif
5109 
5110 	skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
5111 					     SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
5112 					     0,
5113 					     SLAB_HWCACHE_ALIGN|SLAB_PANIC,
5114 					     NULL);
5115 }
5116 #else
5117 static void skb_extensions_init(void) {}
5118 #endif
5119 
5120 /* The SKB kmem_cache slab is critical for network performance.  Never
5121  * merge/alias the slab with similar sized objects.  This avoids fragmentation
5122  * that hurts performance of kmem_cache_{alloc,free}_bulk APIs.
5123  */
5124 #ifndef CONFIG_SLUB_TINY
5125 #define FLAG_SKB_NO_MERGE	SLAB_NO_MERGE
5126 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
5127 #define FLAG_SKB_NO_MERGE	0
5128 #endif
5129 
5130 void __init skb_init(void)
5131 {
5132 	net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
5133 					      sizeof(struct sk_buff),
5134 					      0,
5135 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC|
5136 						FLAG_SKB_NO_MERGE,
5137 					      offsetof(struct sk_buff, cb),
5138 					      sizeof_field(struct sk_buff, cb),
5139 					      NULL);
5140 	skbuff_cache_size = kmem_cache_size(net_hotdata.skbuff_cache);
5141 
5142 	net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
5143 						sizeof(struct sk_buff_fclones),
5144 						0,
5145 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
5146 						NULL);
5147 	/* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes.
5148 	 * struct skb_shared_info is located at the end of skb->head,
5149 	 * and should not be copied to/from user.
5150 	 */
5151 	net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
5152 						SKB_SMALL_HEAD_CACHE_SIZE,
5153 						0,
5154 						SLAB_HWCACHE_ALIGN | SLAB_PANIC,
5155 						0,
5156 						SKB_SMALL_HEAD_HEADROOM,
5157 						NULL);
5158 	skb_extensions_init();
5159 }
5160 
5161 static int
5162 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
5163 	       unsigned int recursion_level)
5164 {
5165 	int start = skb_headlen(skb);
5166 	int i, copy = start - offset;
5167 	struct sk_buff *frag_iter;
5168 	int elt = 0;
5169 
5170 	if (unlikely(recursion_level >= 24))
5171 		return -EMSGSIZE;
5172 
5173 	if (copy > 0) {
5174 		if (copy > len)
5175 			copy = len;
5176 		sg_set_buf(sg, skb->data + offset, copy);
5177 		elt++;
5178 		if ((len -= copy) == 0)
5179 			return elt;
5180 		offset += copy;
5181 	}
5182 
5183 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5184 		int end;
5185 
5186 		WARN_ON(start > offset + len);
5187 
5188 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
5189 		if ((copy = end - offset) > 0) {
5190 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5191 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
5192 				return -EMSGSIZE;
5193 
5194 			if (copy > len)
5195 				copy = len;
5196 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
5197 				    skb_frag_off(frag) + offset - start);
5198 			elt++;
5199 			if (!(len -= copy))
5200 				return elt;
5201 			offset += copy;
5202 		}
5203 		start = end;
5204 	}
5205 
5206 	skb_walk_frags(skb, frag_iter) {
5207 		int end, ret;
5208 
5209 		WARN_ON(start > offset + len);
5210 
5211 		end = start + frag_iter->len;
5212 		if ((copy = end - offset) > 0) {
5213 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
5214 				return -EMSGSIZE;
5215 
5216 			if (copy > len)
5217 				copy = len;
5218 			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
5219 					      copy, recursion_level + 1);
5220 			if (unlikely(ret < 0))
5221 				return ret;
5222 			elt += ret;
5223 			if ((len -= copy) == 0)
5224 				return elt;
5225 			offset += copy;
5226 		}
5227 		start = end;
5228 	}
5229 	BUG_ON(len);
5230 	return elt;
5231 }
5232 
5233 /**
5234  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
5235  *	@skb: Socket buffer containing the buffers to be mapped
5236  *	@sg: The scatter-gather list to map into
5237  *	@offset: The offset into the buffer's contents to start mapping
5238  *	@len: Length of buffer space to be mapped
5239  *
5240  *	Fill the specified scatter-gather list with mappings/pointers into a
5241  *	region of the buffer space attached to a socket buffer. Returns either
5242  *	the number of scatterlist items used, or -EMSGSIZE if the contents
5243  *	could not fit.
5244  */
5245 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
5246 {
5247 	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
5248 
5249 	if (nsg <= 0)
5250 		return nsg;
5251 
5252 	sg_mark_end(&sg[nsg - 1]);
5253 
5254 	return nsg;
5255 }
5256 EXPORT_SYMBOL_GPL(skb_to_sgvec);
5257 
5258 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
5259  * sglist without mark the sg which contain last skb data as the end.
5260  * So the caller can mannipulate sg list as will when padding new data after
5261  * the first call without calling sg_unmark_end to expend sg list.
5262  *
5263  * Scenario to use skb_to_sgvec_nomark:
5264  * 1. sg_init_table
5265  * 2. skb_to_sgvec_nomark(payload1)
5266  * 3. skb_to_sgvec_nomark(payload2)
5267  *
5268  * This is equivalent to:
5269  * 1. sg_init_table
5270  * 2. skb_to_sgvec(payload1)
5271  * 3. sg_unmark_end
5272  * 4. skb_to_sgvec(payload2)
5273  *
5274  * When mapping multiple payload conditionally, skb_to_sgvec_nomark
5275  * is more preferable.
5276  */
5277 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
5278 			int offset, int len)
5279 {
5280 	return __skb_to_sgvec(skb, sg, offset, len, 0);
5281 }
5282 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
5283 
5284 
5285 
5286 /**
5287  *	skb_cow_data - Check that a socket buffer's data buffers are writable
5288  *	@skb: The socket buffer to check.
5289  *	@tailbits: Amount of trailing space to be added
5290  *	@trailer: Returned pointer to the skb where the @tailbits space begins
5291  *
5292  *	Make sure that the data buffers attached to a socket buffer are
5293  *	writable. If they are not, private copies are made of the data buffers
5294  *	and the socket buffer is set to use these instead.
5295  *
5296  *	If @tailbits is given, make sure that there is space to write @tailbits
5297  *	bytes of data beyond current end of socket buffer.  @trailer will be
5298  *	set to point to the skb in which this space begins.
5299  *
5300  *	The number of scatterlist elements required to completely map the
5301  *	COW'd and extended socket buffer will be returned.
5302  */
5303 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
5304 {
5305 	int copyflag;
5306 	int elt;
5307 	struct sk_buff *skb1, **skb_p;
5308 
5309 	/* If skb is cloned or its head is paged, reallocate
5310 	 * head pulling out all the pages (pages are considered not writable
5311 	 * at the moment even if they are anonymous).
5312 	 */
5313 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
5314 	    !__pskb_pull_tail(skb, __skb_pagelen(skb)))
5315 		return -ENOMEM;
5316 
5317 	/* Easy case. Most of packets will go this way. */
5318 	if (!skb_has_frag_list(skb)) {
5319 		/* A little of trouble, not enough of space for trailer.
5320 		 * This should not happen, when stack is tuned to generate
5321 		 * good frames. OK, on miss we reallocate and reserve even more
5322 		 * space, 128 bytes is fair. */
5323 
5324 		if (skb_tailroom(skb) < tailbits &&
5325 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
5326 			return -ENOMEM;
5327 
5328 		/* Voila! */
5329 		*trailer = skb;
5330 		return 1;
5331 	}
5332 
5333 	/* Misery. We are in troubles, going to mincer fragments... */
5334 
5335 	elt = 1;
5336 	skb_p = &skb_shinfo(skb)->frag_list;
5337 	copyflag = 0;
5338 
5339 	while ((skb1 = *skb_p) != NULL) {
5340 		int ntail = 0;
5341 
5342 		/* The fragment is partially pulled by someone,
5343 		 * this can happen on input. Copy it and everything
5344 		 * after it. */
5345 
5346 		if (skb_shared(skb1))
5347 			copyflag = 1;
5348 
5349 		/* If the skb is the last, worry about trailer. */
5350 
5351 		if (skb1->next == NULL && tailbits) {
5352 			if (skb_shinfo(skb1)->nr_frags ||
5353 			    skb_has_frag_list(skb1) ||
5354 			    skb_tailroom(skb1) < tailbits)
5355 				ntail = tailbits + 128;
5356 		}
5357 
5358 		if (copyflag ||
5359 		    skb_cloned(skb1) ||
5360 		    ntail ||
5361 		    skb_shinfo(skb1)->nr_frags ||
5362 		    skb_has_frag_list(skb1)) {
5363 			struct sk_buff *skb2;
5364 
5365 			/* Fuck, we are miserable poor guys... */
5366 			if (ntail == 0)
5367 				skb2 = skb_copy(skb1, GFP_ATOMIC);
5368 			else
5369 				skb2 = skb_copy_expand(skb1,
5370 						       skb_headroom(skb1),
5371 						       ntail,
5372 						       GFP_ATOMIC);
5373 			if (unlikely(skb2 == NULL))
5374 				return -ENOMEM;
5375 
5376 			if (skb1->sk)
5377 				skb_set_owner_w(skb2, skb1->sk);
5378 
5379 			/* Looking around. Are we still alive?
5380 			 * OK, link new skb, drop old one */
5381 
5382 			skb2->next = skb1->next;
5383 			*skb_p = skb2;
5384 			kfree_skb(skb1);
5385 			skb1 = skb2;
5386 		}
5387 		elt++;
5388 		*trailer = skb1;
5389 		skb_p = &skb1->next;
5390 	}
5391 
5392 	return elt;
5393 }
5394 EXPORT_SYMBOL_GPL(skb_cow_data);
5395 
5396 static void sock_rmem_free(struct sk_buff *skb)
5397 {
5398 	struct sock *sk = skb->sk;
5399 
5400 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
5401 }
5402 
5403 static void skb_set_err_queue(struct sk_buff *skb)
5404 {
5405 	/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
5406 	 * So, it is safe to (mis)use it to mark skbs on the error queue.
5407 	 */
5408 	skb->pkt_type = PACKET_OUTGOING;
5409 	BUILD_BUG_ON(PACKET_OUTGOING == 0);
5410 }
5411 
5412 /*
5413  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
5414  */
5415 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
5416 {
5417 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
5418 	    (unsigned int)READ_ONCE(sk->sk_rcvbuf))
5419 		return -ENOMEM;
5420 
5421 	skb_orphan(skb);
5422 	skb->sk = sk;
5423 	skb->destructor = sock_rmem_free;
5424 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
5425 	skb_set_err_queue(skb);
5426 
5427 	/* before exiting rcu section, make sure dst is refcounted */
5428 	skb_dst_force(skb);
5429 
5430 	skb_queue_tail(&sk->sk_error_queue, skb);
5431 	if (!sock_flag(sk, SOCK_DEAD))
5432 		sk_error_report(sk);
5433 	return 0;
5434 }
5435 EXPORT_SYMBOL(sock_queue_err_skb);
5436 
5437 static bool is_icmp_err_skb(const struct sk_buff *skb)
5438 {
5439 	return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
5440 		       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
5441 }
5442 
5443 struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
5444 {
5445 	struct sk_buff_head *q = &sk->sk_error_queue;
5446 	struct sk_buff *skb, *skb_next = NULL;
5447 	bool icmp_next = false;
5448 	unsigned long flags;
5449 
5450 	if (skb_queue_empty_lockless(q))
5451 		return NULL;
5452 
5453 	spin_lock_irqsave(&q->lock, flags);
5454 	skb = __skb_dequeue(q);
5455 	if (skb && (skb_next = skb_peek(q))) {
5456 		icmp_next = is_icmp_err_skb(skb_next);
5457 		if (icmp_next)
5458 			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
5459 	}
5460 	spin_unlock_irqrestore(&q->lock, flags);
5461 
5462 	if (is_icmp_err_skb(skb) && !icmp_next)
5463 		sk->sk_err = 0;
5464 
5465 	if (skb_next)
5466 		sk_error_report(sk);
5467 
5468 	return skb;
5469 }
5470 EXPORT_SYMBOL(sock_dequeue_err_skb);
5471 
5472 /**
5473  * skb_clone_sk - create clone of skb, and take reference to socket
5474  * @skb: the skb to clone
5475  *
5476  * This function creates a clone of a buffer that holds a reference on
5477  * sk_refcnt.  Buffers created via this function are meant to be
5478  * returned using sock_queue_err_skb, or free via kfree_skb.
5479  *
5480  * When passing buffers allocated with this function to sock_queue_err_skb
5481  * it is necessary to wrap the call with sock_hold/sock_put in order to
5482  * prevent the socket from being released prior to being enqueued on
5483  * the sk_error_queue.
5484  */
5485 struct sk_buff *skb_clone_sk(struct sk_buff *skb)
5486 {
5487 	struct sock *sk = skb->sk;
5488 	struct sk_buff *clone;
5489 
5490 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
5491 		return NULL;
5492 
5493 	clone = skb_clone(skb, GFP_ATOMIC);
5494 	if (!clone) {
5495 		sock_put(sk);
5496 		return NULL;
5497 	}
5498 
5499 	clone->sk = sk;
5500 	clone->destructor = sock_efree;
5501 
5502 	return clone;
5503 }
5504 EXPORT_SYMBOL(skb_clone_sk);
5505 
5506 static void __skb_complete_tx_timestamp(struct sk_buff *skb,
5507 					struct sock *sk,
5508 					int tstype,
5509 					bool opt_stats)
5510 {
5511 	struct sock_exterr_skb *serr;
5512 	int err;
5513 
5514 	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
5515 
5516 	serr = SKB_EXT_ERR(skb);
5517 	memset(serr, 0, sizeof(*serr));
5518 	serr->ee.ee_errno = ENOMSG;
5519 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
5520 	serr->ee.ee_info = tstype;
5521 	serr->opt_stats = opt_stats;
5522 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
5523 	if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
5524 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
5525 		if (sk_is_tcp(sk))
5526 			serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
5527 	}
5528 
5529 	err = sock_queue_err_skb(sk, skb);
5530 
5531 	if (err)
5532 		kfree_skb(skb);
5533 }
5534 
5535 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
5536 {
5537 	bool ret;
5538 
5539 	if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data)))
5540 		return true;
5541 
5542 	read_lock_bh(&sk->sk_callback_lock);
5543 	ret = sk->sk_socket && sk->sk_socket->file &&
5544 	      file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
5545 	read_unlock_bh(&sk->sk_callback_lock);
5546 	return ret;
5547 }
5548 
5549 void skb_complete_tx_timestamp(struct sk_buff *skb,
5550 			       struct skb_shared_hwtstamps *hwtstamps)
5551 {
5552 	struct sock *sk = skb->sk;
5553 
5554 	if (!skb_may_tx_timestamp(sk, false))
5555 		goto err;
5556 
5557 	/* Take a reference to prevent skb_orphan() from freeing the socket,
5558 	 * but only if the socket refcount is not zero.
5559 	 */
5560 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
5561 		*skb_hwtstamps(skb) = *hwtstamps;
5562 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
5563 		sock_put(sk);
5564 		return;
5565 	}
5566 
5567 err:
5568 	kfree_skb(skb);
5569 }
5570 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
5571 
5572 static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb,
5573 						 struct skb_shared_hwtstamps *hwtstamps,
5574 						 int tstype)
5575 {
5576 	switch (tstype) {
5577 	case SCM_TSTAMP_SCHED:
5578 		return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP;
5579 	case SCM_TSTAMP_SND:
5580 		return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF :
5581 						    SKBTX_SW_TSTAMP);
5582 	case SCM_TSTAMP_ACK:
5583 		return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK;
5584 	case SCM_TSTAMP_COMPLETION:
5585 		return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP;
5586 	}
5587 
5588 	return false;
5589 }
5590 
5591 static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb,
5592 						  struct skb_shared_hwtstamps *hwtstamps,
5593 						  struct sock *sk,
5594 						  int tstype)
5595 {
5596 	int op;
5597 
5598 	switch (tstype) {
5599 	case SCM_TSTAMP_SCHED:
5600 		op = BPF_SOCK_OPS_TSTAMP_SCHED_CB;
5601 		break;
5602 	case SCM_TSTAMP_SND:
5603 		if (hwtstamps) {
5604 			op = BPF_SOCK_OPS_TSTAMP_SND_HW_CB;
5605 			*skb_hwtstamps(skb) = *hwtstamps;
5606 		} else {
5607 			op = BPF_SOCK_OPS_TSTAMP_SND_SW_CB;
5608 		}
5609 		break;
5610 	case SCM_TSTAMP_ACK:
5611 		op = BPF_SOCK_OPS_TSTAMP_ACK_CB;
5612 		break;
5613 	default:
5614 		return;
5615 	}
5616 
5617 	bpf_skops_tx_timestamping(sk, skb, op);
5618 }
5619 
5620 void __skb_tstamp_tx(struct sk_buff *orig_skb,
5621 		     const struct sk_buff *ack_skb,
5622 		     struct skb_shared_hwtstamps *hwtstamps,
5623 		     struct sock *sk, int tstype)
5624 {
5625 	struct sk_buff *skb;
5626 	bool tsonly, opt_stats = false;
5627 	u32 tsflags;
5628 
5629 	if (!sk)
5630 		return;
5631 
5632 	if (skb_shinfo(orig_skb)->tx_flags & SKBTX_BPF)
5633 		skb_tstamp_tx_report_bpf_timestamping(orig_skb, hwtstamps,
5634 						      sk, tstype);
5635 
5636 	if (!skb_tstamp_tx_report_so_timestamping(orig_skb, hwtstamps, tstype))
5637 		return;
5638 
5639 	tsflags = READ_ONCE(sk->sk_tsflags);
5640 	if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
5641 	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
5642 		return;
5643 
5644 	tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
5645 	if (!skb_may_tx_timestamp(sk, tsonly))
5646 		return;
5647 
5648 	if (tsonly) {
5649 #ifdef CONFIG_INET
5650 		if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
5651 		    sk_is_tcp(sk)) {
5652 			skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
5653 							     ack_skb);
5654 			opt_stats = true;
5655 		} else
5656 #endif
5657 			skb = alloc_skb(0, GFP_ATOMIC);
5658 	} else {
5659 		skb = skb_clone(orig_skb, GFP_ATOMIC);
5660 
5661 		if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
5662 			kfree_skb(skb);
5663 			return;
5664 		}
5665 	}
5666 	if (!skb)
5667 		return;
5668 
5669 	if (tsonly) {
5670 		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
5671 					     SKBTX_ANY_TSTAMP;
5672 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
5673 	}
5674 
5675 	if (hwtstamps)
5676 		*skb_hwtstamps(skb) = *hwtstamps;
5677 	else
5678 		__net_timestamp(skb);
5679 
5680 	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
5681 }
5682 EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
5683 
5684 void skb_tstamp_tx(struct sk_buff *orig_skb,
5685 		   struct skb_shared_hwtstamps *hwtstamps)
5686 {
5687 	return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
5688 			       SCM_TSTAMP_SND);
5689 }
5690 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
5691 
5692 #ifdef CONFIG_WIRELESS
5693 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
5694 {
5695 	struct sock *sk = skb->sk;
5696 	struct sock_exterr_skb *serr;
5697 	int err = 1;
5698 
5699 	skb->wifi_acked_valid = 1;
5700 	skb->wifi_acked = acked;
5701 
5702 	serr = SKB_EXT_ERR(skb);
5703 	memset(serr, 0, sizeof(*serr));
5704 	serr->ee.ee_errno = ENOMSG;
5705 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
5706 
5707 	/* Take a reference to prevent skb_orphan() from freeing the socket,
5708 	 * but only if the socket refcount is not zero.
5709 	 */
5710 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
5711 		err = sock_queue_err_skb(sk, skb);
5712 		sock_put(sk);
5713 	}
5714 	if (err)
5715 		kfree_skb(skb);
5716 }
5717 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
5718 #endif /* CONFIG_WIRELESS */
5719 
5720 /**
5721  * skb_partial_csum_set - set up and verify partial csum values for packet
5722  * @skb: the skb to set
5723  * @start: the number of bytes after skb->data to start checksumming.
5724  * @off: the offset from start to place the checksum.
5725  *
5726  * For untrusted partially-checksummed packets, we need to make sure the values
5727  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5728  *
5729  * This function checks and sets those values and skb->ip_summed: if this
5730  * returns false you should drop the packet.
5731  */
5732 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
5733 {
5734 	u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
5735 	u32 csum_start = skb_headroom(skb) + (u32)start;
5736 
5737 	if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) {
5738 		net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
5739 				     start, off, skb_headroom(skb), skb_headlen(skb));
5740 		return false;
5741 	}
5742 	skb->ip_summed = CHECKSUM_PARTIAL;
5743 	skb->csum_start = csum_start;
5744 	skb->csum_offset = off;
5745 	skb->transport_header = csum_start;
5746 	return true;
5747 }
5748 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
5749 
5750 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
5751 			       unsigned int max)
5752 {
5753 	if (skb_headlen(skb) >= len)
5754 		return 0;
5755 
5756 	/* If we need to pullup then pullup to the max, so we
5757 	 * won't need to do it again.
5758 	 */
5759 	if (max > skb->len)
5760 		max = skb->len;
5761 
5762 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
5763 		return -ENOMEM;
5764 
5765 	if (skb_headlen(skb) < len)
5766 		return -EPROTO;
5767 
5768 	return 0;
5769 }
5770 
5771 #define MAX_TCP_HDR_LEN (15 * 4)
5772 
5773 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
5774 				      typeof(IPPROTO_IP) proto,
5775 				      unsigned int off)
5776 {
5777 	int err;
5778 
5779 	switch (proto) {
5780 	case IPPROTO_TCP:
5781 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
5782 					  off + MAX_TCP_HDR_LEN);
5783 		if (!err && !skb_partial_csum_set(skb, off,
5784 						  offsetof(struct tcphdr,
5785 							   check)))
5786 			err = -EPROTO;
5787 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
5788 
5789 	case IPPROTO_UDP:
5790 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
5791 					  off + sizeof(struct udphdr));
5792 		if (!err && !skb_partial_csum_set(skb, off,
5793 						  offsetof(struct udphdr,
5794 							   check)))
5795 			err = -EPROTO;
5796 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
5797 	}
5798 
5799 	return ERR_PTR(-EPROTO);
5800 }
5801 
5802 /* This value should be large enough to cover a tagged ethernet header plus
5803  * maximally sized IP and TCP or UDP headers.
5804  */
5805 #define MAX_IP_HDR_LEN 128
5806 
5807 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
5808 {
5809 	unsigned int off;
5810 	bool fragment;
5811 	__sum16 *csum;
5812 	int err;
5813 
5814 	fragment = false;
5815 
5816 	err = skb_maybe_pull_tail(skb,
5817 				  sizeof(struct iphdr),
5818 				  MAX_IP_HDR_LEN);
5819 	if (err < 0)
5820 		goto out;
5821 
5822 	if (ip_is_fragment(ip_hdr(skb)))
5823 		fragment = true;
5824 
5825 	off = ip_hdrlen(skb);
5826 
5827 	err = -EPROTO;
5828 
5829 	if (fragment)
5830 		goto out;
5831 
5832 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
5833 	if (IS_ERR(csum))
5834 		return PTR_ERR(csum);
5835 
5836 	if (recalculate)
5837 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
5838 					   ip_hdr(skb)->daddr,
5839 					   skb->len - off,
5840 					   ip_hdr(skb)->protocol, 0);
5841 	err = 0;
5842 
5843 out:
5844 	return err;
5845 }
5846 
5847 /* This value should be large enough to cover a tagged ethernet header plus
5848  * an IPv6 header, all options, and a maximal TCP or UDP header.
5849  */
5850 #define MAX_IPV6_HDR_LEN 256
5851 
5852 #define OPT_HDR(type, skb, off) \
5853 	(type *)(skb_network_header(skb) + (off))
5854 
5855 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
5856 {
5857 	int err;
5858 	u8 nexthdr;
5859 	unsigned int off;
5860 	unsigned int len;
5861 	bool fragment;
5862 	bool done;
5863 	__sum16 *csum;
5864 
5865 	fragment = false;
5866 	done = false;
5867 
5868 	off = sizeof(struct ipv6hdr);
5869 
5870 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
5871 	if (err < 0)
5872 		goto out;
5873 
5874 	nexthdr = ipv6_hdr(skb)->nexthdr;
5875 
5876 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
5877 	while (off <= len && !done) {
5878 		switch (nexthdr) {
5879 		case IPPROTO_DSTOPTS:
5880 		case IPPROTO_HOPOPTS:
5881 		case IPPROTO_ROUTING: {
5882 			struct ipv6_opt_hdr *hp;
5883 
5884 			err = skb_maybe_pull_tail(skb,
5885 						  off +
5886 						  sizeof(struct ipv6_opt_hdr),
5887 						  MAX_IPV6_HDR_LEN);
5888 			if (err < 0)
5889 				goto out;
5890 
5891 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5892 			nexthdr = hp->nexthdr;
5893 			off += ipv6_optlen(hp);
5894 			break;
5895 		}
5896 		case IPPROTO_AH: {
5897 			struct ip_auth_hdr *hp;
5898 
5899 			err = skb_maybe_pull_tail(skb,
5900 						  off +
5901 						  sizeof(struct ip_auth_hdr),
5902 						  MAX_IPV6_HDR_LEN);
5903 			if (err < 0)
5904 				goto out;
5905 
5906 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5907 			nexthdr = hp->nexthdr;
5908 			off += ipv6_authlen(hp);
5909 			break;
5910 		}
5911 		case IPPROTO_FRAGMENT: {
5912 			struct frag_hdr *hp;
5913 
5914 			err = skb_maybe_pull_tail(skb,
5915 						  off +
5916 						  sizeof(struct frag_hdr),
5917 						  MAX_IPV6_HDR_LEN);
5918 			if (err < 0)
5919 				goto out;
5920 
5921 			hp = OPT_HDR(struct frag_hdr, skb, off);
5922 
5923 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5924 				fragment = true;
5925 
5926 			nexthdr = hp->nexthdr;
5927 			off += sizeof(struct frag_hdr);
5928 			break;
5929 		}
5930 		default:
5931 			done = true;
5932 			break;
5933 		}
5934 	}
5935 
5936 	err = -EPROTO;
5937 
5938 	if (!done || fragment)
5939 		goto out;
5940 
5941 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
5942 	if (IS_ERR(csum))
5943 		return PTR_ERR(csum);
5944 
5945 	if (recalculate)
5946 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5947 					 &ipv6_hdr(skb)->daddr,
5948 					 skb->len - off, nexthdr, 0);
5949 	err = 0;
5950 
5951 out:
5952 	return err;
5953 }
5954 
5955 /**
5956  * skb_checksum_setup - set up partial checksum offset
5957  * @skb: the skb to set up
5958  * @recalculate: if true the pseudo-header checksum will be recalculated
5959  */
5960 int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5961 {
5962 	int err;
5963 
5964 	switch (skb->protocol) {
5965 	case htons(ETH_P_IP):
5966 		err = skb_checksum_setup_ipv4(skb, recalculate);
5967 		break;
5968 
5969 	case htons(ETH_P_IPV6):
5970 		err = skb_checksum_setup_ipv6(skb, recalculate);
5971 		break;
5972 
5973 	default:
5974 		err = -EPROTO;
5975 		break;
5976 	}
5977 
5978 	return err;
5979 }
5980 EXPORT_SYMBOL(skb_checksum_setup);
5981 
5982 /**
5983  * skb_checksum_maybe_trim - maybe trims the given skb
5984  * @skb: the skb to check
5985  * @transport_len: the data length beyond the network header
5986  *
5987  * Checks whether the given skb has data beyond the given transport length.
5988  * If so, returns a cloned skb trimmed to this transport length.
5989  * Otherwise returns the provided skb. Returns NULL in error cases
5990  * (e.g. transport_len exceeds skb length or out-of-memory).
5991  *
5992  * Caller needs to set the skb transport header and free any returned skb if it
5993  * differs from the provided skb.
5994  */
5995 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5996 					       unsigned int transport_len)
5997 {
5998 	struct sk_buff *skb_chk;
5999 	unsigned int len = skb_transport_offset(skb) + transport_len;
6000 	int ret;
6001 
6002 	if (skb->len < len)
6003 		return NULL;
6004 	else if (skb->len == len)
6005 		return skb;
6006 
6007 	skb_chk = skb_clone(skb, GFP_ATOMIC);
6008 	if (!skb_chk)
6009 		return NULL;
6010 
6011 	ret = pskb_trim_rcsum(skb_chk, len);
6012 	if (ret) {
6013 		kfree_skb(skb_chk);
6014 		return NULL;
6015 	}
6016 
6017 	return skb_chk;
6018 }
6019 
6020 /**
6021  * skb_checksum_trimmed - validate checksum of an skb
6022  * @skb: the skb to check
6023  * @transport_len: the data length beyond the network header
6024  * @skb_chkf: checksum function to use
6025  *
6026  * Applies the given checksum function skb_chkf to the provided skb.
6027  * Returns a checked and maybe trimmed skb. Returns NULL on error.
6028  *
6029  * If the skb has data beyond the given transport length, then a
6030  * trimmed & cloned skb is checked and returned.
6031  *
6032  * Caller needs to set the skb transport header and free any returned skb if it
6033  * differs from the provided skb.
6034  */
6035 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
6036 				     unsigned int transport_len,
6037 				     __sum16(*skb_chkf)(struct sk_buff *skb))
6038 {
6039 	struct sk_buff *skb_chk;
6040 	unsigned int offset = skb_transport_offset(skb);
6041 	__sum16 ret;
6042 
6043 	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
6044 	if (!skb_chk)
6045 		goto err;
6046 
6047 	if (!pskb_may_pull(skb_chk, offset))
6048 		goto err;
6049 
6050 	skb_pull_rcsum(skb_chk, offset);
6051 	ret = skb_chkf(skb_chk);
6052 	skb_push_rcsum(skb_chk, offset);
6053 
6054 	if (ret)
6055 		goto err;
6056 
6057 	return skb_chk;
6058 
6059 err:
6060 	if (skb_chk && skb_chk != skb)
6061 		kfree_skb(skb_chk);
6062 
6063 	return NULL;
6064 
6065 }
6066 EXPORT_SYMBOL(skb_checksum_trimmed);
6067 
6068 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
6069 {
6070 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
6071 			     skb->dev->name);
6072 }
6073 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
6074 
6075 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
6076 {
6077 	if (head_stolen) {
6078 		skb_release_head_state(skb);
6079 		kmem_cache_free(net_hotdata.skbuff_cache, skb);
6080 	} else {
6081 		__kfree_skb(skb);
6082 	}
6083 }
6084 EXPORT_SYMBOL(kfree_skb_partial);
6085 
6086 /**
6087  * skb_try_coalesce - try to merge skb to prior one
6088  * @to: prior buffer
6089  * @from: buffer to add
6090  * @fragstolen: pointer to boolean
6091  * @delta_truesize: how much more was allocated than was requested
6092  */
6093 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
6094 		      bool *fragstolen, int *delta_truesize)
6095 {
6096 	struct skb_shared_info *to_shinfo, *from_shinfo;
6097 	int i, delta, len = from->len;
6098 
6099 	*fragstolen = false;
6100 
6101 	if (skb_cloned(to))
6102 		return false;
6103 
6104 	/* In general, avoid mixing page_pool and non-page_pool allocated
6105 	 * pages within the same SKB. In theory we could take full
6106 	 * references if @from is cloned and !@to->pp_recycle but its
6107 	 * tricky (due to potential race with the clone disappearing) and
6108 	 * rare, so not worth dealing with.
6109 	 */
6110 	if (to->pp_recycle != from->pp_recycle)
6111 		return false;
6112 
6113 	if (skb_frags_readable(from) != skb_frags_readable(to))
6114 		return false;
6115 
6116 	if (len <= skb_tailroom(to) && skb_frags_readable(from)) {
6117 		if (len)
6118 			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
6119 		*delta_truesize = 0;
6120 		return true;
6121 	}
6122 
6123 	to_shinfo = skb_shinfo(to);
6124 	from_shinfo = skb_shinfo(from);
6125 	if (to_shinfo->frag_list || from_shinfo->frag_list)
6126 		return false;
6127 	if (skb_zcopy(to) || skb_zcopy(from))
6128 		return false;
6129 
6130 	if (skb_headlen(from) != 0) {
6131 		struct page *page;
6132 		unsigned int offset;
6133 
6134 		if (to_shinfo->nr_frags +
6135 		    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
6136 			return false;
6137 
6138 		if (skb_head_is_locked(from))
6139 			return false;
6140 
6141 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
6142 
6143 		page = virt_to_head_page(from->head);
6144 		offset = from->data - (unsigned char *)page_address(page);
6145 
6146 		skb_fill_page_desc(to, to_shinfo->nr_frags,
6147 				   page, offset, skb_headlen(from));
6148 		*fragstolen = true;
6149 	} else {
6150 		if (to_shinfo->nr_frags +
6151 		    from_shinfo->nr_frags > MAX_SKB_FRAGS)
6152 			return false;
6153 
6154 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
6155 	}
6156 
6157 	WARN_ON_ONCE(delta < len);
6158 
6159 	memcpy(to_shinfo->frags + to_shinfo->nr_frags,
6160 	       from_shinfo->frags,
6161 	       from_shinfo->nr_frags * sizeof(skb_frag_t));
6162 	to_shinfo->nr_frags += from_shinfo->nr_frags;
6163 
6164 	if (!skb_cloned(from))
6165 		from_shinfo->nr_frags = 0;
6166 
6167 	/* if the skb is not cloned this does nothing
6168 	 * since we set nr_frags to 0.
6169 	 */
6170 	if (skb_pp_frag_ref(from)) {
6171 		for (i = 0; i < from_shinfo->nr_frags; i++)
6172 			__skb_frag_ref(&from_shinfo->frags[i]);
6173 	}
6174 
6175 	to->truesize += delta;
6176 	to->len += len;
6177 	to->data_len += len;
6178 
6179 	*delta_truesize = delta;
6180 	return true;
6181 }
6182 EXPORT_SYMBOL(skb_try_coalesce);
6183 
6184 /**
6185  * skb_scrub_packet - scrub an skb
6186  *
6187  * @skb: buffer to clean
6188  * @xnet: packet is crossing netns
6189  *
6190  * skb_scrub_packet can be used after encapsulating or decapsulating a packet
6191  * into/from a tunnel. Some information have to be cleared during these
6192  * operations.
6193  * skb_scrub_packet can also be used to clean a skb before injecting it in
6194  * another namespace (@xnet == true). We have to clear all information in the
6195  * skb that could impact namespace isolation.
6196  */
6197 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
6198 {
6199 	skb->pkt_type = PACKET_HOST;
6200 	skb->skb_iif = 0;
6201 	skb->ignore_df = 0;
6202 	skb_dst_drop(skb);
6203 	skb_ext_reset(skb);
6204 	nf_reset_ct(skb);
6205 	nf_reset_trace(skb);
6206 
6207 #ifdef CONFIG_NET_SWITCHDEV
6208 	skb->offload_fwd_mark = 0;
6209 	skb->offload_l3_fwd_mark = 0;
6210 #endif
6211 	ipvs_reset(skb);
6212 
6213 	if (!xnet)
6214 		return;
6215 
6216 	skb->mark = 0;
6217 	skb_clear_tstamp(skb);
6218 }
6219 EXPORT_SYMBOL_GPL(skb_scrub_packet);
6220 
6221 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
6222 {
6223 	int mac_len, meta_len;
6224 	void *meta;
6225 
6226 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
6227 		kfree_skb(skb);
6228 		return NULL;
6229 	}
6230 
6231 	mac_len = skb->data - skb_mac_header(skb);
6232 	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
6233 		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
6234 			mac_len - VLAN_HLEN - ETH_TLEN);
6235 	}
6236 
6237 	meta_len = skb_metadata_len(skb);
6238 	if (meta_len) {
6239 		meta = skb_metadata_end(skb) - meta_len;
6240 		memmove(meta + VLAN_HLEN, meta, meta_len);
6241 	}
6242 
6243 	skb->mac_header += VLAN_HLEN;
6244 	return skb;
6245 }
6246 
6247 struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
6248 {
6249 	struct vlan_hdr *vhdr;
6250 	u16 vlan_tci;
6251 
6252 	if (unlikely(skb_vlan_tag_present(skb))) {
6253 		/* vlan_tci is already set-up so leave this for another time */
6254 		return skb;
6255 	}
6256 
6257 	skb = skb_share_check(skb, GFP_ATOMIC);
6258 	if (unlikely(!skb))
6259 		goto err_free;
6260 	/* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
6261 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
6262 		goto err_free;
6263 
6264 	vhdr = (struct vlan_hdr *)skb->data;
6265 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
6266 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
6267 
6268 	skb_pull_rcsum(skb, VLAN_HLEN);
6269 	vlan_set_encap_proto(skb, vhdr);
6270 
6271 	skb = skb_reorder_vlan_header(skb);
6272 	if (unlikely(!skb))
6273 		goto err_free;
6274 
6275 	skb_reset_network_header(skb);
6276 	if (!skb_transport_header_was_set(skb))
6277 		skb_reset_transport_header(skb);
6278 	skb_reset_mac_len(skb);
6279 
6280 	return skb;
6281 
6282 err_free:
6283 	kfree_skb(skb);
6284 	return NULL;
6285 }
6286 EXPORT_SYMBOL(skb_vlan_untag);
6287 
6288 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
6289 {
6290 	if (!pskb_may_pull(skb, write_len))
6291 		return -ENOMEM;
6292 
6293 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
6294 		return 0;
6295 
6296 	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6297 }
6298 EXPORT_SYMBOL(skb_ensure_writable);
6299 
6300 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev)
6301 {
6302 	int needed_headroom = dev->needed_headroom;
6303 	int needed_tailroom = dev->needed_tailroom;
6304 
6305 	/* For tail taggers, we need to pad short frames ourselves, to ensure
6306 	 * that the tail tag does not fail at its role of being at the end of
6307 	 * the packet, once the conduit interface pads the frame. Account for
6308 	 * that pad length here, and pad later.
6309 	 */
6310 	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
6311 		needed_tailroom += ETH_ZLEN - skb->len;
6312 	/* skb_headroom() returns unsigned int... */
6313 	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
6314 	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
6315 
6316 	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
6317 		/* No reallocation needed, yay! */
6318 		return 0;
6319 
6320 	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
6321 				GFP_ATOMIC);
6322 }
6323 EXPORT_SYMBOL(skb_ensure_writable_head_tail);
6324 
6325 /* remove VLAN header from packet and update csum accordingly.
6326  * expects a non skb_vlan_tag_present skb with a vlan tag payload
6327  */
6328 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
6329 {
6330 	int offset = skb->data - skb_mac_header(skb);
6331 	int err;
6332 
6333 	if (WARN_ONCE(offset,
6334 		      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
6335 		      offset)) {
6336 		return -EINVAL;
6337 	}
6338 
6339 	err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
6340 	if (unlikely(err))
6341 		return err;
6342 
6343 	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
6344 
6345 	vlan_remove_tag(skb, vlan_tci);
6346 
6347 	skb->mac_header += VLAN_HLEN;
6348 
6349 	if (skb_network_offset(skb) < ETH_HLEN)
6350 		skb_set_network_header(skb, ETH_HLEN);
6351 
6352 	skb_reset_mac_len(skb);
6353 
6354 	return err;
6355 }
6356 EXPORT_SYMBOL(__skb_vlan_pop);
6357 
6358 /* Pop a vlan tag either from hwaccel or from payload.
6359  * Expects skb->data at mac header.
6360  */
6361 int skb_vlan_pop(struct sk_buff *skb)
6362 {
6363 	u16 vlan_tci;
6364 	__be16 vlan_proto;
6365 	int err;
6366 
6367 	if (likely(skb_vlan_tag_present(skb))) {
6368 		__vlan_hwaccel_clear_tag(skb);
6369 	} else {
6370 		if (unlikely(!eth_type_vlan(skb->protocol)))
6371 			return 0;
6372 
6373 		err = __skb_vlan_pop(skb, &vlan_tci);
6374 		if (err)
6375 			return err;
6376 	}
6377 	/* move next vlan tag to hw accel tag */
6378 	if (likely(!eth_type_vlan(skb->protocol)))
6379 		return 0;
6380 
6381 	vlan_proto = skb->protocol;
6382 	err = __skb_vlan_pop(skb, &vlan_tci);
6383 	if (unlikely(err))
6384 		return err;
6385 
6386 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6387 	return 0;
6388 }
6389 EXPORT_SYMBOL(skb_vlan_pop);
6390 
6391 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
6392  * Expects skb->data at mac header.
6393  */
6394 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
6395 {
6396 	if (skb_vlan_tag_present(skb)) {
6397 		int offset = skb->data - skb_mac_header(skb);
6398 		int err;
6399 
6400 		if (WARN_ONCE(offset,
6401 			      "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
6402 			      offset)) {
6403 			return -EINVAL;
6404 		}
6405 
6406 		err = __vlan_insert_tag(skb, skb->vlan_proto,
6407 					skb_vlan_tag_get(skb));
6408 		if (err)
6409 			return err;
6410 
6411 		skb->protocol = skb->vlan_proto;
6412 		skb->network_header -= VLAN_HLEN;
6413 
6414 		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
6415 	}
6416 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6417 	return 0;
6418 }
6419 EXPORT_SYMBOL(skb_vlan_push);
6420 
6421 /**
6422  * skb_eth_pop() - Drop the Ethernet header at the head of a packet
6423  *
6424  * @skb: Socket buffer to modify
6425  *
6426  * Drop the Ethernet header of @skb.
6427  *
6428  * Expects that skb->data points to the mac header and that no VLAN tags are
6429  * present.
6430  *
6431  * Returns 0 on success, -errno otherwise.
6432  */
6433 int skb_eth_pop(struct sk_buff *skb)
6434 {
6435 	if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
6436 	    skb_network_offset(skb) < ETH_HLEN)
6437 		return -EPROTO;
6438 
6439 	skb_pull_rcsum(skb, ETH_HLEN);
6440 	skb_reset_mac_header(skb);
6441 	skb_reset_mac_len(skb);
6442 
6443 	return 0;
6444 }
6445 EXPORT_SYMBOL(skb_eth_pop);
6446 
6447 /**
6448  * skb_eth_push() - Add a new Ethernet header at the head of a packet
6449  *
6450  * @skb: Socket buffer to modify
6451  * @dst: Destination MAC address of the new header
6452  * @src: Source MAC address of the new header
6453  *
6454  * Prepend @skb with a new Ethernet header.
6455  *
6456  * Expects that skb->data points to the mac header, which must be empty.
6457  *
6458  * Returns 0 on success, -errno otherwise.
6459  */
6460 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
6461 		 const unsigned char *src)
6462 {
6463 	struct ethhdr *eth;
6464 	int err;
6465 
6466 	if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
6467 		return -EPROTO;
6468 
6469 	err = skb_cow_head(skb, sizeof(*eth));
6470 	if (err < 0)
6471 		return err;
6472 
6473 	skb_push(skb, sizeof(*eth));
6474 	skb_reset_mac_header(skb);
6475 	skb_reset_mac_len(skb);
6476 
6477 	eth = eth_hdr(skb);
6478 	ether_addr_copy(eth->h_dest, dst);
6479 	ether_addr_copy(eth->h_source, src);
6480 	eth->h_proto = skb->protocol;
6481 
6482 	skb_postpush_rcsum(skb, eth, sizeof(*eth));
6483 
6484 	return 0;
6485 }
6486 EXPORT_SYMBOL(skb_eth_push);
6487 
6488 /* Update the ethertype of hdr and the skb csum value if required. */
6489 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
6490 			     __be16 ethertype)
6491 {
6492 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
6493 		__be16 diff[] = { ~hdr->h_proto, ethertype };
6494 
6495 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6496 	}
6497 
6498 	hdr->h_proto = ethertype;
6499 }
6500 
6501 /**
6502  * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
6503  *                   the packet
6504  *
6505  * @skb: buffer
6506  * @mpls_lse: MPLS label stack entry to push
6507  * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
6508  * @mac_len: length of the MAC header
6509  * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
6510  *            ethernet
6511  *
6512  * Expects skb->data at mac header.
6513  *
6514  * Returns 0 on success, -errno otherwise.
6515  */
6516 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
6517 		  int mac_len, bool ethernet)
6518 {
6519 	struct mpls_shim_hdr *lse;
6520 	int err;
6521 
6522 	if (unlikely(!eth_p_mpls(mpls_proto)))
6523 		return -EINVAL;
6524 
6525 	/* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
6526 	if (skb->encapsulation)
6527 		return -EINVAL;
6528 
6529 	err = skb_cow_head(skb, MPLS_HLEN);
6530 	if (unlikely(err))
6531 		return err;
6532 
6533 	if (!skb->inner_protocol) {
6534 		skb_set_inner_network_header(skb, skb_network_offset(skb));
6535 		skb_set_inner_protocol(skb, skb->protocol);
6536 	}
6537 
6538 	skb_push(skb, MPLS_HLEN);
6539 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
6540 		mac_len);
6541 	skb_reset_mac_header(skb);
6542 	skb_set_network_header(skb, mac_len);
6543 	skb_reset_mac_len(skb);
6544 
6545 	lse = mpls_hdr(skb);
6546 	lse->label_stack_entry = mpls_lse;
6547 	skb_postpush_rcsum(skb, lse, MPLS_HLEN);
6548 
6549 	if (ethernet && mac_len >= ETH_HLEN)
6550 		skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
6551 	skb->protocol = mpls_proto;
6552 
6553 	return 0;
6554 }
6555 EXPORT_SYMBOL_GPL(skb_mpls_push);
6556 
6557 /**
6558  * skb_mpls_pop() - pop the outermost MPLS header
6559  *
6560  * @skb: buffer
6561  * @next_proto: ethertype of header after popped MPLS header
6562  * @mac_len: length of the MAC header
6563  * @ethernet: flag to indicate if the packet is ethernet
6564  *
6565  * Expects skb->data at mac header.
6566  *
6567  * Returns 0 on success, -errno otherwise.
6568  */
6569 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
6570 		 bool ethernet)
6571 {
6572 	int err;
6573 
6574 	if (unlikely(!eth_p_mpls(skb->protocol)))
6575 		return 0;
6576 
6577 	err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
6578 	if (unlikely(err))
6579 		return err;
6580 
6581 	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
6582 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
6583 		mac_len);
6584 
6585 	__skb_pull(skb, MPLS_HLEN);
6586 	skb_reset_mac_header(skb);
6587 	skb_set_network_header(skb, mac_len);
6588 
6589 	if (ethernet && mac_len >= ETH_HLEN) {
6590 		struct ethhdr *hdr;
6591 
6592 		/* use mpls_hdr() to get ethertype to account for VLANs. */
6593 		hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
6594 		skb_mod_eth_type(skb, hdr, next_proto);
6595 	}
6596 	skb->protocol = next_proto;
6597 
6598 	return 0;
6599 }
6600 EXPORT_SYMBOL_GPL(skb_mpls_pop);
6601 
6602 /**
6603  * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6604  *
6605  * @skb: buffer
6606  * @mpls_lse: new MPLS label stack entry to update to
6607  *
6608  * Expects skb->data at mac header.
6609  *
6610  * Returns 0 on success, -errno otherwise.
6611  */
6612 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
6613 {
6614 	int err;
6615 
6616 	if (unlikely(!eth_p_mpls(skb->protocol)))
6617 		return -EINVAL;
6618 
6619 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
6620 	if (unlikely(err))
6621 		return err;
6622 
6623 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
6624 		__be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
6625 
6626 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6627 	}
6628 
6629 	mpls_hdr(skb)->label_stack_entry = mpls_lse;
6630 
6631 	return 0;
6632 }
6633 EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
6634 
6635 /**
6636  * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6637  *
6638  * @skb: buffer
6639  *
6640  * Expects skb->data at mac header.
6641  *
6642  * Returns 0 on success, -errno otherwise.
6643  */
6644 int skb_mpls_dec_ttl(struct sk_buff *skb)
6645 {
6646 	u32 lse;
6647 	u8 ttl;
6648 
6649 	if (unlikely(!eth_p_mpls(skb->protocol)))
6650 		return -EINVAL;
6651 
6652 	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
6653 		return -ENOMEM;
6654 
6655 	lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
6656 	ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
6657 	if (!--ttl)
6658 		return -EINVAL;
6659 
6660 	lse &= ~MPLS_LS_TTL_MASK;
6661 	lse |= ttl << MPLS_LS_TTL_SHIFT;
6662 
6663 	return skb_mpls_update_lse(skb, cpu_to_be32(lse));
6664 }
6665 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
6666 
6667 /**
6668  * alloc_skb_with_frags - allocate skb with page frags
6669  *
6670  * @header_len: size of linear part
6671  * @data_len: needed length in frags
6672  * @order: max page order desired.
6673  * @errcode: pointer to error code if any
6674  * @gfp_mask: allocation mask
6675  *
6676  * This can be used to allocate a paged skb, given a maximal order for frags.
6677  */
6678 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
6679 				     unsigned long data_len,
6680 				     int order,
6681 				     int *errcode,
6682 				     gfp_t gfp_mask)
6683 {
6684 	unsigned long chunk;
6685 	struct sk_buff *skb;
6686 	struct page *page;
6687 	int nr_frags = 0;
6688 
6689 	*errcode = -EMSGSIZE;
6690 	if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
6691 		return NULL;
6692 
6693 	*errcode = -ENOBUFS;
6694 	skb = alloc_skb(header_len, gfp_mask);
6695 	if (!skb)
6696 		return NULL;
6697 
6698 	while (data_len) {
6699 		if (nr_frags == MAX_SKB_FRAGS)
6700 			goto failure;
6701 		while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
6702 			order--;
6703 
6704 		if (order) {
6705 			page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
6706 					   __GFP_COMP |
6707 					   __GFP_NOWARN,
6708 					   order);
6709 			if (!page) {
6710 				order--;
6711 				continue;
6712 			}
6713 		} else {
6714 			page = alloc_page(gfp_mask);
6715 			if (!page)
6716 				goto failure;
6717 		}
6718 		chunk = min_t(unsigned long, data_len,
6719 			      PAGE_SIZE << order);
6720 		skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
6721 		nr_frags++;
6722 		skb->truesize += (PAGE_SIZE << order);
6723 		data_len -= chunk;
6724 	}
6725 	return skb;
6726 
6727 failure:
6728 	kfree_skb(skb);
6729 	return NULL;
6730 }
6731 EXPORT_SYMBOL(alloc_skb_with_frags);
6732 
6733 /* carve out the first off bytes from skb when off < headlen */
6734 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
6735 				    const int headlen, gfp_t gfp_mask)
6736 {
6737 	int i;
6738 	unsigned int size = skb_end_offset(skb);
6739 	int new_hlen = headlen - off;
6740 	u8 *data;
6741 
6742 	if (skb_pfmemalloc(skb))
6743 		gfp_mask |= __GFP_MEMALLOC;
6744 
6745 	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
6746 	if (!data)
6747 		return -ENOMEM;
6748 	size = SKB_WITH_OVERHEAD(size);
6749 
6750 	/* Copy real data, and all frags */
6751 	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
6752 	skb->len -= off;
6753 
6754 	memcpy((struct skb_shared_info *)(data + size),
6755 	       skb_shinfo(skb),
6756 	       offsetof(struct skb_shared_info,
6757 			frags[skb_shinfo(skb)->nr_frags]));
6758 	if (skb_cloned(skb)) {
6759 		/* drop the old head gracefully */
6760 		if (skb_orphan_frags(skb, gfp_mask)) {
6761 			skb_kfree_head(data, size);
6762 			return -ENOMEM;
6763 		}
6764 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
6765 			skb_frag_ref(skb, i);
6766 		if (skb_has_frag_list(skb))
6767 			skb_clone_fraglist(skb);
6768 		skb_release_data(skb, SKB_CONSUMED);
6769 	} else {
6770 		/* we can reuse existing recount- all we did was
6771 		 * relocate values
6772 		 */
6773 		skb_free_head(skb);
6774 	}
6775 
6776 	skb->head = data;
6777 	skb->data = data;
6778 	skb->head_frag = 0;
6779 	skb_set_end_offset(skb, size);
6780 	skb_set_tail_pointer(skb, skb_headlen(skb));
6781 	skb_headers_offset_update(skb, 0);
6782 	skb->cloned = 0;
6783 	skb->hdr_len = 0;
6784 	skb->nohdr = 0;
6785 	atomic_set(&skb_shinfo(skb)->dataref, 1);
6786 
6787 	return 0;
6788 }
6789 
6790 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6791 
6792 /* carve out the first eat bytes from skb's frag_list. May recurse into
6793  * pskb_carve()
6794  */
6795 static int pskb_carve_frag_list(struct skb_shared_info *shinfo, int eat,
6796 				gfp_t gfp_mask)
6797 {
6798 	struct sk_buff *list = shinfo->frag_list;
6799 	struct sk_buff *clone = NULL;
6800 	struct sk_buff *insp = NULL;
6801 
6802 	do {
6803 		if (!list) {
6804 			pr_err("Not enough bytes to eat. Want %d\n", eat);
6805 			return -EFAULT;
6806 		}
6807 		if (list->len <= eat) {
6808 			/* Eaten as whole. */
6809 			eat -= list->len;
6810 			list = list->next;
6811 			insp = list;
6812 		} else {
6813 			/* Eaten partially. */
6814 			if (skb_shared(list)) {
6815 				clone = skb_clone(list, gfp_mask);
6816 				if (!clone)
6817 					return -ENOMEM;
6818 				insp = list->next;
6819 				list = clone;
6820 			} else {
6821 				/* This may be pulled without problems. */
6822 				insp = list;
6823 			}
6824 			if (pskb_carve(list, eat, gfp_mask) < 0) {
6825 				kfree_skb(clone);
6826 				return -ENOMEM;
6827 			}
6828 			break;
6829 		}
6830 	} while (eat);
6831 
6832 	/* Free pulled out fragments. */
6833 	while ((list = shinfo->frag_list) != insp) {
6834 		shinfo->frag_list = list->next;
6835 		consume_skb(list);
6836 	}
6837 	/* And insert new clone at head. */
6838 	if (clone) {
6839 		clone->next = list;
6840 		shinfo->frag_list = clone;
6841 	}
6842 	return 0;
6843 }
6844 
6845 /* carve off first len bytes from skb. Split line (off) is in the
6846  * non-linear part of skb
6847  */
6848 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
6849 				       int pos, gfp_t gfp_mask)
6850 {
6851 	int i, k = 0;
6852 	unsigned int size = skb_end_offset(skb);
6853 	u8 *data;
6854 	const int nfrags = skb_shinfo(skb)->nr_frags;
6855 	struct skb_shared_info *shinfo;
6856 
6857 	if (skb_pfmemalloc(skb))
6858 		gfp_mask |= __GFP_MEMALLOC;
6859 
6860 	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
6861 	if (!data)
6862 		return -ENOMEM;
6863 	size = SKB_WITH_OVERHEAD(size);
6864 
6865 	memcpy((struct skb_shared_info *)(data + size),
6866 	       skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
6867 	if (skb_orphan_frags(skb, gfp_mask)) {
6868 		skb_kfree_head(data, size);
6869 		return -ENOMEM;
6870 	}
6871 	shinfo = (struct skb_shared_info *)(data + size);
6872 	for (i = 0; i < nfrags; i++) {
6873 		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6874 
6875 		if (pos + fsize > off) {
6876 			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
6877 
6878 			if (pos < off) {
6879 				/* Split frag.
6880 				 * We have two variants in this case:
6881 				 * 1. Move all the frag to the second
6882 				 *    part, if it is possible. F.e.
6883 				 *    this approach is mandatory for TUX,
6884 				 *    where splitting is expensive.
6885 				 * 2. Split is accurately. We make this.
6886 				 */
6887 				skb_frag_off_add(&shinfo->frags[0], off - pos);
6888 				skb_frag_size_sub(&shinfo->frags[0], off - pos);
6889 			}
6890 			skb_frag_ref(skb, i);
6891 			k++;
6892 		}
6893 		pos += fsize;
6894 	}
6895 	shinfo->nr_frags = k;
6896 	if (skb_has_frag_list(skb))
6897 		skb_clone_fraglist(skb);
6898 
6899 	/* split line is in frag list */
6900 	if (k == 0 && pskb_carve_frag_list(shinfo, off - pos, gfp_mask)) {
6901 		/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6902 		if (skb_has_frag_list(skb))
6903 			kfree_skb_list(skb_shinfo(skb)->frag_list);
6904 		skb_kfree_head(data, size);
6905 		return -ENOMEM;
6906 	}
6907 	skb_release_data(skb, SKB_CONSUMED);
6908 
6909 	skb->head = data;
6910 	skb->head_frag = 0;
6911 	skb->data = data;
6912 	skb_set_end_offset(skb, size);
6913 	skb_reset_tail_pointer(skb);
6914 	skb_headers_offset_update(skb, 0);
6915 	skb->cloned   = 0;
6916 	skb->hdr_len  = 0;
6917 	skb->nohdr    = 0;
6918 	skb->len -= off;
6919 	skb->data_len = skb->len;
6920 	atomic_set(&skb_shinfo(skb)->dataref, 1);
6921 	return 0;
6922 }
6923 
6924 /* remove len bytes from the beginning of the skb */
6925 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6926 {
6927 	int headlen = skb_headlen(skb);
6928 
6929 	if (len < headlen)
6930 		return pskb_carve_inside_header(skb, len, headlen, gfp);
6931 	else
6932 		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6933 }
6934 
6935 /* Extract to_copy bytes starting at off from skb, and return this in
6936  * a new skb
6937  */
6938 struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6939 			     int to_copy, gfp_t gfp)
6940 {
6941 	struct sk_buff  *clone = skb_clone(skb, gfp);
6942 
6943 	if (!clone)
6944 		return NULL;
6945 
6946 	if (pskb_carve(clone, off, gfp) < 0 ||
6947 	    pskb_trim(clone, to_copy)) {
6948 		kfree_skb(clone);
6949 		return NULL;
6950 	}
6951 	return clone;
6952 }
6953 EXPORT_SYMBOL(pskb_extract);
6954 
6955 /**
6956  * skb_condense - try to get rid of fragments/frag_list if possible
6957  * @skb: buffer
6958  *
6959  * Can be used to save memory before skb is added to a busy queue.
6960  * If packet has bytes in frags and enough tail room in skb->head,
6961  * pull all of them, so that we can free the frags right now and adjust
6962  * truesize.
6963  * Notes:
6964  *	We do not reallocate skb->head thus can not fail.
6965  *	Caller must re-evaluate skb->truesize if needed.
6966  */
6967 void skb_condense(struct sk_buff *skb)
6968 {
6969 	if (skb->data_len) {
6970 		if (skb->data_len > skb->end - skb->tail ||
6971 		    skb_cloned(skb) || !skb_frags_readable(skb))
6972 			return;
6973 
6974 		/* Nice, we can free page frag(s) right now */
6975 		__pskb_pull_tail(skb, skb->data_len);
6976 	}
6977 	/* At this point, skb->truesize might be over estimated,
6978 	 * because skb had a fragment, and fragments do not tell
6979 	 * their truesize.
6980 	 * When we pulled its content into skb->head, fragment
6981 	 * was freed, but __pskb_pull_tail() could not possibly
6982 	 * adjust skb->truesize, not knowing the frag truesize.
6983 	 */
6984 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6985 }
6986 EXPORT_SYMBOL(skb_condense);
6987 
6988 #ifdef CONFIG_SKB_EXTENSIONS
6989 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6990 {
6991 	return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6992 }
6993 
6994 /**
6995  * __skb_ext_alloc - allocate a new skb extensions storage
6996  *
6997  * @flags: See kmalloc().
6998  *
6999  * Returns the newly allocated pointer. The pointer can later attached to a
7000  * skb via __skb_ext_set().
7001  * Note: caller must handle the skb_ext as an opaque data.
7002  */
7003 struct skb_ext *__skb_ext_alloc(gfp_t flags)
7004 {
7005 	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
7006 
7007 	if (new) {
7008 		memset(new->offset, 0, sizeof(new->offset));
7009 		refcount_set(&new->refcnt, 1);
7010 	}
7011 
7012 	return new;
7013 }
7014 
7015 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
7016 					 unsigned int old_active)
7017 {
7018 	struct skb_ext *new;
7019 
7020 	if (refcount_read(&old->refcnt) == 1)
7021 		return old;
7022 
7023 	new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
7024 	if (!new)
7025 		return NULL;
7026 
7027 	memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
7028 	refcount_set(&new->refcnt, 1);
7029 
7030 #ifdef CONFIG_XFRM
7031 	if (old_active & (1 << SKB_EXT_SEC_PATH)) {
7032 		struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
7033 		unsigned int i;
7034 
7035 		for (i = 0; i < sp->len; i++)
7036 			xfrm_state_hold(sp->xvec[i]);
7037 	}
7038 #endif
7039 #ifdef CONFIG_MCTP_FLOWS
7040 	if (old_active & (1 << SKB_EXT_MCTP)) {
7041 		struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
7042 
7043 		if (flow->key)
7044 			refcount_inc(&flow->key->refs);
7045 	}
7046 #endif
7047 	__skb_ext_put(old);
7048 	return new;
7049 }
7050 
7051 /**
7052  * __skb_ext_set - attach the specified extension storage to this skb
7053  * @skb: buffer
7054  * @id: extension id
7055  * @ext: extension storage previously allocated via __skb_ext_alloc()
7056  *
7057  * Existing extensions, if any, are cleared.
7058  *
7059  * Returns the pointer to the extension.
7060  */
7061 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
7062 		    struct skb_ext *ext)
7063 {
7064 	unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
7065 
7066 	skb_ext_put(skb);
7067 	newlen = newoff + skb_ext_type_len[id];
7068 	ext->chunks = newlen;
7069 	ext->offset[id] = newoff;
7070 	skb->extensions = ext;
7071 	skb->active_extensions = 1 << id;
7072 	return skb_ext_get_ptr(ext, id);
7073 }
7074 EXPORT_SYMBOL_NS_GPL(__skb_ext_set, "NETDEV_INTERNAL");
7075 
7076 /**
7077  * skb_ext_add - allocate space for given extension, COW if needed
7078  * @skb: buffer
7079  * @id: extension to allocate space for
7080  *
7081  * Allocates enough space for the given extension.
7082  * If the extension is already present, a pointer to that extension
7083  * is returned.
7084  *
7085  * If the skb was cloned, COW applies and the returned memory can be
7086  * modified without changing the extension space of clones buffers.
7087  *
7088  * Returns pointer to the extension or NULL on allocation failure.
7089  */
7090 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
7091 {
7092 	struct skb_ext *new, *old = NULL;
7093 	unsigned int newlen, newoff;
7094 
7095 	if (skb->active_extensions) {
7096 		old = skb->extensions;
7097 
7098 		new = skb_ext_maybe_cow(old, skb->active_extensions);
7099 		if (!new)
7100 			return NULL;
7101 
7102 		if (__skb_ext_exist(new, id))
7103 			goto set_active;
7104 
7105 		newoff = new->chunks;
7106 	} else {
7107 		newoff = SKB_EXT_CHUNKSIZEOF(*new);
7108 
7109 		new = __skb_ext_alloc(GFP_ATOMIC);
7110 		if (!new)
7111 			return NULL;
7112 	}
7113 
7114 	newlen = newoff + skb_ext_type_len[id];
7115 	new->chunks = newlen;
7116 	new->offset[id] = newoff;
7117 set_active:
7118 	skb->slow_gro = 1;
7119 	skb->extensions = new;
7120 	skb->active_extensions |= 1 << id;
7121 	return skb_ext_get_ptr(new, id);
7122 }
7123 EXPORT_SYMBOL(skb_ext_add);
7124 
7125 #ifdef CONFIG_XFRM
7126 static void skb_ext_put_sp(struct sec_path *sp)
7127 {
7128 	unsigned int i;
7129 
7130 	for (i = 0; i < sp->len; i++)
7131 		xfrm_state_put(sp->xvec[i]);
7132 }
7133 #endif
7134 
7135 #ifdef CONFIG_MCTP_FLOWS
7136 static void skb_ext_put_mctp(struct mctp_flow *flow)
7137 {
7138 	if (flow->key)
7139 		mctp_key_unref(flow->key);
7140 }
7141 #endif
7142 
7143 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
7144 {
7145 	struct skb_ext *ext = skb->extensions;
7146 
7147 	skb->active_extensions &= ~(1 << id);
7148 	if (skb->active_extensions == 0) {
7149 		skb->extensions = NULL;
7150 		__skb_ext_put(ext);
7151 #ifdef CONFIG_XFRM
7152 	} else if (id == SKB_EXT_SEC_PATH &&
7153 		   refcount_read(&ext->refcnt) == 1) {
7154 		struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
7155 
7156 		skb_ext_put_sp(sp);
7157 		sp->len = 0;
7158 #endif
7159 	}
7160 }
7161 EXPORT_SYMBOL(__skb_ext_del);
7162 
7163 void __skb_ext_put(struct skb_ext *ext)
7164 {
7165 	/* If this is last clone, nothing can increment
7166 	 * it after check passes.  Avoids one atomic op.
7167 	 */
7168 	if (refcount_read(&ext->refcnt) == 1)
7169 		goto free_now;
7170 
7171 	if (!refcount_dec_and_test(&ext->refcnt))
7172 		return;
7173 free_now:
7174 #ifdef CONFIG_XFRM
7175 	if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
7176 		skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
7177 #endif
7178 #ifdef CONFIG_MCTP_FLOWS
7179 	if (__skb_ext_exist(ext, SKB_EXT_MCTP))
7180 		skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP));
7181 #endif
7182 
7183 	kmem_cache_free(skbuff_ext_cache, ext);
7184 }
7185 EXPORT_SYMBOL(__skb_ext_put);
7186 #endif /* CONFIG_SKB_EXTENSIONS */
7187 
7188 static void kfree_skb_napi_cache(struct sk_buff *skb)
7189 {
7190 	/* if SKB is a clone, don't handle this case */
7191 	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
7192 		__kfree_skb(skb);
7193 		return;
7194 	}
7195 
7196 	local_bh_disable();
7197 	__napi_kfree_skb(skb, SKB_CONSUMED);
7198 	local_bh_enable();
7199 }
7200 
7201 /**
7202  * skb_attempt_defer_free - queue skb for remote freeing
7203  * @skb: buffer
7204  *
7205  * Put @skb in a per-cpu list, using the cpu which
7206  * allocated the skb/pages to reduce false sharing
7207  * and memory zone spinlock contention.
7208  */
7209 void skb_attempt_defer_free(struct sk_buff *skb)
7210 {
7211 	struct skb_defer_node *sdn;
7212 	unsigned long defer_count;
7213 	int cpu = skb->alloc_cpu;
7214 	unsigned int defer_max;
7215 	bool kick;
7216 
7217 	if (cpu == raw_smp_processor_id() ||
7218 	    WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
7219 	    !cpu_online(cpu)) {
7220 nodefer:	kfree_skb_napi_cache(skb);
7221 		return;
7222 	}
7223 
7224 	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
7225 	DEBUG_NET_WARN_ON_ONCE(skb->destructor);
7226 	DEBUG_NET_WARN_ON_ONCE(skb_nfct(skb));
7227 
7228 	sdn = per_cpu_ptr(net_hotdata.skb_defer_nodes, cpu) + numa_node_id();
7229 
7230 	defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
7231 	defer_count = atomic_long_inc_return(&sdn->defer_count);
7232 
7233 	if (defer_count >= defer_max)
7234 		goto nodefer;
7235 
7236 	llist_add(&skb->ll_node, &sdn->defer_list);
7237 
7238 	/* Send an IPI every time queue reaches half capacity. */
7239 	kick = (defer_count - 1) == (defer_max >> 1);
7240 
7241 	/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
7242 	 * if we are unlucky enough (this seems very unlikely).
7243 	 */
7244 	if (unlikely(kick))
7245 		kick_defer_list_purge(cpu);
7246 }
7247 
7248 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
7249 				 size_t offset, size_t len)
7250 {
7251 	const char *kaddr;
7252 	__wsum csum;
7253 
7254 	kaddr = kmap_local_page(page);
7255 	csum = csum_partial(kaddr + offset, len, 0);
7256 	kunmap_local(kaddr);
7257 	skb->csum = csum_block_add(skb->csum, csum, skb->len);
7258 }
7259 
7260 /**
7261  * skb_splice_from_iter - Splice (or copy) pages to skbuff
7262  * @skb: The buffer to add pages to
7263  * @iter: Iterator representing the pages to be added
7264  * @maxsize: Maximum amount of pages to be added
7265  *
7266  * This is a common helper function for supporting MSG_SPLICE_PAGES.  It
7267  * extracts pages from an iterator and adds them to the socket buffer if
7268  * possible, copying them to fragments if not possible (such as if they're slab
7269  * pages).
7270  *
7271  * Returns the amount of data spliced/copied or -EMSGSIZE if there's
7272  * insufficient space in the buffer to transfer anything.
7273  */
7274 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
7275 			     ssize_t maxsize)
7276 {
7277 	size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags);
7278 	struct page *pages[8], **ppages = pages;
7279 	ssize_t spliced = 0, ret = 0;
7280 	unsigned int i;
7281 
7282 	while (iter->count > 0) {
7283 		ssize_t space, nr, len;
7284 		size_t off;
7285 
7286 		ret = -EMSGSIZE;
7287 		space = frag_limit - skb_shinfo(skb)->nr_frags;
7288 		if (space < 0)
7289 			break;
7290 
7291 		/* We might be able to coalesce without increasing nr_frags */
7292 		nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages));
7293 
7294 		len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off);
7295 		if (len <= 0) {
7296 			ret = len ?: -EIO;
7297 			break;
7298 		}
7299 
7300 		i = 0;
7301 		do {
7302 			struct page *page = pages[i++];
7303 			size_t part = min_t(size_t, PAGE_SIZE - off, len);
7304 
7305 			ret = -EIO;
7306 			if (WARN_ON_ONCE(!sendpage_ok(page)))
7307 				goto out;
7308 
7309 			ret = skb_append_pagefrags(skb, page, off, part,
7310 						   frag_limit);
7311 			if (ret < 0) {
7312 				iov_iter_revert(iter, len);
7313 				goto out;
7314 			}
7315 
7316 			if (skb->ip_summed == CHECKSUM_NONE)
7317 				skb_splice_csum_page(skb, page, off, part);
7318 
7319 			off = 0;
7320 			spliced += part;
7321 			maxsize -= part;
7322 			len -= part;
7323 		} while (len > 0);
7324 
7325 		if (maxsize <= 0)
7326 			break;
7327 	}
7328 
7329 out:
7330 	skb_len_add(skb, spliced);
7331 	return spliced ?: ret;
7332 }
7333 EXPORT_SYMBOL(skb_splice_from_iter);
7334 
7335 static __always_inline
7336 size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
7337 			     size_t len, void *to, void *priv2)
7338 {
7339 	__wsum *csum = priv2;
7340 	__wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len);
7341 
7342 	*csum = csum_block_add(*csum, next, progress);
7343 	return 0;
7344 }
7345 
7346 static __always_inline
7347 size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
7348 				size_t len, void *to, void *priv2)
7349 {
7350 	__wsum next, *csum = priv2;
7351 
7352 	next = csum_and_copy_from_user(iter_from, to + progress, len);
7353 	*csum = csum_block_add(*csum, next, progress);
7354 	return next ? 0 : len;
7355 }
7356 
7357 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
7358 				  __wsum *csum, struct iov_iter *i)
7359 {
7360 	size_t copied;
7361 
7362 	if (WARN_ON_ONCE(!i->data_source))
7363 		return false;
7364 	copied = iterate_and_advance2(i, bytes, addr, csum,
7365 				      copy_from_user_iter_csum,
7366 				      memcpy_from_iter_csum);
7367 	if (likely(copied == bytes))
7368 		return true;
7369 	iov_iter_revert(i, copied);
7370 	return false;
7371 }
7372 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
7373 
7374 void get_netmem(netmem_ref netmem)
7375 {
7376 	struct net_iov *niov;
7377 
7378 	if (netmem_is_net_iov(netmem)) {
7379 		niov = netmem_to_net_iov(netmem);
7380 		if (net_is_devmem_iov(niov))
7381 			net_devmem_get_net_iov(netmem_to_net_iov(netmem));
7382 		return;
7383 	}
7384 	get_page(netmem_to_page(netmem));
7385 }
7386 EXPORT_SYMBOL(get_netmem);
7387 
7388 void put_netmem(netmem_ref netmem)
7389 {
7390 	struct net_iov *niov;
7391 
7392 	if (netmem_is_net_iov(netmem)) {
7393 		niov = netmem_to_net_iov(netmem);
7394 		if (net_is_devmem_iov(niov))
7395 			net_devmem_put_net_iov(netmem_to_net_iov(netmem));
7396 		return;
7397 	}
7398 
7399 	put_page(netmem_to_page(netmem));
7400 }
7401 EXPORT_SYMBOL(put_netmem);
7402