xref: /linux/net/core/skbuff.c (revision e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/kernel.h>
44 #include <linux/kmemcheck.h>
45 #include <linux/mm.h>
46 #include <linux/interrupt.h>
47 #include <linux/in.h>
48 #include <linux/inet.h>
49 #include <linux/slab.h>
50 #include <linux/netdevice.h>
51 #ifdef CONFIG_NET_CLS_ACT
52 #include <net/pkt_sched.h>
53 #endif
54 #include <linux/string.h>
55 #include <linux/skbuff.h>
56 #include <linux/splice.h>
57 #include <linux/cache.h>
58 #include <linux/rtnetlink.h>
59 #include <linux/init.h>
60 #include <linux/scatterlist.h>
61 #include <linux/errqueue.h>
62 #include <linux/prefetch.h>
63 
64 #include <net/protocol.h>
65 #include <net/dst.h>
66 #include <net/sock.h>
67 #include <net/checksum.h>
68 #include <net/xfrm.h>
69 
70 #include <asm/uaccess.h>
71 #include <trace/events/skb.h>
72 #include <linux/highmem.h>
73 
74 struct kmem_cache *skbuff_head_cache __read_mostly;
75 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
76 
77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
78 				  struct pipe_buffer *buf)
79 {
80 	put_page(buf->page);
81 }
82 
83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
84 				struct pipe_buffer *buf)
85 {
86 	get_page(buf->page);
87 }
88 
89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
90 			       struct pipe_buffer *buf)
91 {
92 	return 1;
93 }
94 
95 
96 /* Pipe buffer operations for a socket. */
97 static const struct pipe_buf_operations sock_pipe_buf_ops = {
98 	.can_merge = 0,
99 	.map = generic_pipe_buf_map,
100 	.unmap = generic_pipe_buf_unmap,
101 	.confirm = generic_pipe_buf_confirm,
102 	.release = sock_pipe_buf_release,
103 	.steal = sock_pipe_buf_steal,
104 	.get = sock_pipe_buf_get,
105 };
106 
107 /**
108  *	skb_panic - private function for out-of-line support
109  *	@skb:	buffer
110  *	@sz:	size
111  *	@addr:	address
112  *	@msg:	skb_over_panic or skb_under_panic
113  *
114  *	Out-of-line support for skb_put() and skb_push().
115  *	Called via the wrapper skb_over_panic() or skb_under_panic().
116  *	Keep out of line to prevent kernel bloat.
117  *	__builtin_return_address is not used because it is not always reliable.
118  */
119 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
120 		      const char msg[])
121 {
122 	pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
123 		 msg, addr, skb->len, sz, skb->head, skb->data,
124 		 (unsigned long)skb->tail, (unsigned long)skb->end,
125 		 skb->dev ? skb->dev->name : "<NULL>");
126 	BUG();
127 }
128 
129 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
130 {
131 	skb_panic(skb, sz, addr, __func__);
132 }
133 
134 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
135 {
136 	skb_panic(skb, sz, addr, __func__);
137 }
138 
139 /*
140  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
141  * the caller if emergency pfmemalloc reserves are being used. If it is and
142  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
143  * may be used. Otherwise, the packet data may be discarded until enough
144  * memory is free
145  */
146 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
147 	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
148 
149 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
150 			       unsigned long ip, bool *pfmemalloc)
151 {
152 	void *obj;
153 	bool ret_pfmemalloc = false;
154 
155 	/*
156 	 * Try a regular allocation, when that fails and we're not entitled
157 	 * to the reserves, fail.
158 	 */
159 	obj = kmalloc_node_track_caller(size,
160 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
161 					node);
162 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
163 		goto out;
164 
165 	/* Try again but now we are using pfmemalloc reserves */
166 	ret_pfmemalloc = true;
167 	obj = kmalloc_node_track_caller(size, flags, node);
168 
169 out:
170 	if (pfmemalloc)
171 		*pfmemalloc = ret_pfmemalloc;
172 
173 	return obj;
174 }
175 
176 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
177  *	'private' fields and also do memory statistics to find all the
178  *	[BEEP] leaks.
179  *
180  */
181 
182 /**
183  *	__alloc_skb	-	allocate a network buffer
184  *	@size: size to allocate
185  *	@gfp_mask: allocation mask
186  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
187  *		instead of head cache and allocate a cloned (child) skb.
188  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
189  *		allocations in case the data is required for writeback
190  *	@node: numa node to allocate memory on
191  *
192  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
193  *	tail room of at least size bytes. The object has a reference count
194  *	of one. The return is the buffer. On a failure the return is %NULL.
195  *
196  *	Buffers may only be allocated from interrupts using a @gfp_mask of
197  *	%GFP_ATOMIC.
198  */
199 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
200 			    int flags, int node)
201 {
202 	struct kmem_cache *cache;
203 	struct skb_shared_info *shinfo;
204 	struct sk_buff *skb;
205 	u8 *data;
206 	bool pfmemalloc;
207 
208 	cache = (flags & SKB_ALLOC_FCLONE)
209 		? skbuff_fclone_cache : skbuff_head_cache;
210 
211 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
212 		gfp_mask |= __GFP_MEMALLOC;
213 
214 	/* Get the HEAD */
215 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
216 	if (!skb)
217 		goto out;
218 	prefetchw(skb);
219 
220 	/* We do our best to align skb_shared_info on a separate cache
221 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
222 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
223 	 * Both skb->head and skb_shared_info are cache line aligned.
224 	 */
225 	size = SKB_DATA_ALIGN(size);
226 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
227 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
228 	if (!data)
229 		goto nodata;
230 	/* kmalloc(size) might give us more room than requested.
231 	 * Put skb_shared_info exactly at the end of allocated zone,
232 	 * to allow max possible filling before reallocation.
233 	 */
234 	size = SKB_WITH_OVERHEAD(ksize(data));
235 	prefetchw(data + size);
236 
237 	/*
238 	 * Only clear those fields we need to clear, not those that we will
239 	 * actually initialise below. Hence, don't put any more fields after
240 	 * the tail pointer in struct sk_buff!
241 	 */
242 	memset(skb, 0, offsetof(struct sk_buff, tail));
243 	/* Account for allocated memory : skb + skb->head */
244 	skb->truesize = SKB_TRUESIZE(size);
245 	skb->pfmemalloc = pfmemalloc;
246 	atomic_set(&skb->users, 1);
247 	skb->head = data;
248 	skb->data = data;
249 	skb_reset_tail_pointer(skb);
250 	skb->end = skb->tail + size;
251 #ifdef NET_SKBUFF_DATA_USES_OFFSET
252 	skb->mac_header = ~0U;
253 	skb->transport_header = ~0U;
254 #endif
255 
256 	/* make sure we initialize shinfo sequentially */
257 	shinfo = skb_shinfo(skb);
258 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
259 	atomic_set(&shinfo->dataref, 1);
260 	kmemcheck_annotate_variable(shinfo->destructor_arg);
261 
262 	if (flags & SKB_ALLOC_FCLONE) {
263 		struct sk_buff *child = skb + 1;
264 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
265 
266 		kmemcheck_annotate_bitfield(child, flags1);
267 		kmemcheck_annotate_bitfield(child, flags2);
268 		skb->fclone = SKB_FCLONE_ORIG;
269 		atomic_set(fclone_ref, 1);
270 
271 		child->fclone = SKB_FCLONE_UNAVAILABLE;
272 		child->pfmemalloc = pfmemalloc;
273 	}
274 out:
275 	return skb;
276 nodata:
277 	kmem_cache_free(cache, skb);
278 	skb = NULL;
279 	goto out;
280 }
281 EXPORT_SYMBOL(__alloc_skb);
282 
283 /**
284  * build_skb - build a network buffer
285  * @data: data buffer provided by caller
286  * @frag_size: size of fragment, or 0 if head was kmalloced
287  *
288  * Allocate a new &sk_buff. Caller provides space holding head and
289  * skb_shared_info. @data must have been allocated by kmalloc()
290  * The return is the new skb buffer.
291  * On a failure the return is %NULL, and @data is not freed.
292  * Notes :
293  *  Before IO, driver allocates only data buffer where NIC put incoming frame
294  *  Driver should add room at head (NET_SKB_PAD) and
295  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
296  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
297  *  before giving packet to stack.
298  *  RX rings only contains data buffers, not full skbs.
299  */
300 struct sk_buff *build_skb(void *data, unsigned int frag_size)
301 {
302 	struct skb_shared_info *shinfo;
303 	struct sk_buff *skb;
304 	unsigned int size = frag_size ? : ksize(data);
305 
306 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
307 	if (!skb)
308 		return NULL;
309 
310 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
311 
312 	memset(skb, 0, offsetof(struct sk_buff, tail));
313 	skb->truesize = SKB_TRUESIZE(size);
314 	skb->head_frag = frag_size != 0;
315 	atomic_set(&skb->users, 1);
316 	skb->head = data;
317 	skb->data = data;
318 	skb_reset_tail_pointer(skb);
319 	skb->end = skb->tail + size;
320 #ifdef NET_SKBUFF_DATA_USES_OFFSET
321 	skb->mac_header = ~0U;
322 	skb->transport_header = ~0U;
323 #endif
324 
325 	/* make sure we initialize shinfo sequentially */
326 	shinfo = skb_shinfo(skb);
327 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
328 	atomic_set(&shinfo->dataref, 1);
329 	kmemcheck_annotate_variable(shinfo->destructor_arg);
330 
331 	return skb;
332 }
333 EXPORT_SYMBOL(build_skb);
334 
335 struct netdev_alloc_cache {
336 	struct page_frag	frag;
337 	/* we maintain a pagecount bias, so that we dont dirty cache line
338 	 * containing page->_count every time we allocate a fragment.
339 	 */
340 	unsigned int		pagecnt_bias;
341 };
342 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
343 
344 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
345 {
346 	struct netdev_alloc_cache *nc;
347 	void *data = NULL;
348 	int order;
349 	unsigned long flags;
350 
351 	local_irq_save(flags);
352 	nc = &__get_cpu_var(netdev_alloc_cache);
353 	if (unlikely(!nc->frag.page)) {
354 refill:
355 		for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
356 			gfp_t gfp = gfp_mask;
357 
358 			if (order)
359 				gfp |= __GFP_COMP | __GFP_NOWARN;
360 			nc->frag.page = alloc_pages(gfp, order);
361 			if (likely(nc->frag.page))
362 				break;
363 			if (--order < 0)
364 				goto end;
365 		}
366 		nc->frag.size = PAGE_SIZE << order;
367 recycle:
368 		atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
369 		nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
370 		nc->frag.offset = 0;
371 	}
372 
373 	if (nc->frag.offset + fragsz > nc->frag.size) {
374 		/* avoid unnecessary locked operations if possible */
375 		if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
376 		    atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
377 			goto recycle;
378 		goto refill;
379 	}
380 
381 	data = page_address(nc->frag.page) + nc->frag.offset;
382 	nc->frag.offset += fragsz;
383 	nc->pagecnt_bias--;
384 end:
385 	local_irq_restore(flags);
386 	return data;
387 }
388 
389 /**
390  * netdev_alloc_frag - allocate a page fragment
391  * @fragsz: fragment size
392  *
393  * Allocates a frag from a page for receive buffer.
394  * Uses GFP_ATOMIC allocations.
395  */
396 void *netdev_alloc_frag(unsigned int fragsz)
397 {
398 	return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
399 }
400 EXPORT_SYMBOL(netdev_alloc_frag);
401 
402 /**
403  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
404  *	@dev: network device to receive on
405  *	@length: length to allocate
406  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
407  *
408  *	Allocate a new &sk_buff and assign it a usage count of one. The
409  *	buffer has unspecified headroom built in. Users should allocate
410  *	the headroom they think they need without accounting for the
411  *	built in space. The built in space is used for optimisations.
412  *
413  *	%NULL is returned if there is no free memory.
414  */
415 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
416 				   unsigned int length, gfp_t gfp_mask)
417 {
418 	struct sk_buff *skb = NULL;
419 	unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
420 			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
421 
422 	if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
423 		void *data;
424 
425 		if (sk_memalloc_socks())
426 			gfp_mask |= __GFP_MEMALLOC;
427 
428 		data = __netdev_alloc_frag(fragsz, gfp_mask);
429 
430 		if (likely(data)) {
431 			skb = build_skb(data, fragsz);
432 			if (unlikely(!skb))
433 				put_page(virt_to_head_page(data));
434 		}
435 	} else {
436 		skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
437 				  SKB_ALLOC_RX, NUMA_NO_NODE);
438 	}
439 	if (likely(skb)) {
440 		skb_reserve(skb, NET_SKB_PAD);
441 		skb->dev = dev;
442 	}
443 	return skb;
444 }
445 EXPORT_SYMBOL(__netdev_alloc_skb);
446 
447 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
448 		     int size, unsigned int truesize)
449 {
450 	skb_fill_page_desc(skb, i, page, off, size);
451 	skb->len += size;
452 	skb->data_len += size;
453 	skb->truesize += truesize;
454 }
455 EXPORT_SYMBOL(skb_add_rx_frag);
456 
457 static void skb_drop_list(struct sk_buff **listp)
458 {
459 	struct sk_buff *list = *listp;
460 
461 	*listp = NULL;
462 
463 	do {
464 		struct sk_buff *this = list;
465 		list = list->next;
466 		kfree_skb(this);
467 	} while (list);
468 }
469 
470 static inline void skb_drop_fraglist(struct sk_buff *skb)
471 {
472 	skb_drop_list(&skb_shinfo(skb)->frag_list);
473 }
474 
475 static void skb_clone_fraglist(struct sk_buff *skb)
476 {
477 	struct sk_buff *list;
478 
479 	skb_walk_frags(skb, list)
480 		skb_get(list);
481 }
482 
483 static void skb_free_head(struct sk_buff *skb)
484 {
485 	if (skb->head_frag)
486 		put_page(virt_to_head_page(skb->head));
487 	else
488 		kfree(skb->head);
489 }
490 
491 static void skb_release_data(struct sk_buff *skb)
492 {
493 	if (!skb->cloned ||
494 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
495 			       &skb_shinfo(skb)->dataref)) {
496 		if (skb_shinfo(skb)->nr_frags) {
497 			int i;
498 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
499 				skb_frag_unref(skb, i);
500 		}
501 
502 		/*
503 		 * If skb buf is from userspace, we need to notify the caller
504 		 * the lower device DMA has done;
505 		 */
506 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
507 			struct ubuf_info *uarg;
508 
509 			uarg = skb_shinfo(skb)->destructor_arg;
510 			if (uarg->callback)
511 				uarg->callback(uarg, true);
512 		}
513 
514 		if (skb_has_frag_list(skb))
515 			skb_drop_fraglist(skb);
516 
517 		skb_free_head(skb);
518 	}
519 }
520 
521 /*
522  *	Free an skbuff by memory without cleaning the state.
523  */
524 static void kfree_skbmem(struct sk_buff *skb)
525 {
526 	struct sk_buff *other;
527 	atomic_t *fclone_ref;
528 
529 	switch (skb->fclone) {
530 	case SKB_FCLONE_UNAVAILABLE:
531 		kmem_cache_free(skbuff_head_cache, skb);
532 		break;
533 
534 	case SKB_FCLONE_ORIG:
535 		fclone_ref = (atomic_t *) (skb + 2);
536 		if (atomic_dec_and_test(fclone_ref))
537 			kmem_cache_free(skbuff_fclone_cache, skb);
538 		break;
539 
540 	case SKB_FCLONE_CLONE:
541 		fclone_ref = (atomic_t *) (skb + 1);
542 		other = skb - 1;
543 
544 		/* The clone portion is available for
545 		 * fast-cloning again.
546 		 */
547 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
548 
549 		if (atomic_dec_and_test(fclone_ref))
550 			kmem_cache_free(skbuff_fclone_cache, other);
551 		break;
552 	}
553 }
554 
555 static void skb_release_head_state(struct sk_buff *skb)
556 {
557 	skb_dst_drop(skb);
558 #ifdef CONFIG_XFRM
559 	secpath_put(skb->sp);
560 #endif
561 	if (skb->destructor) {
562 		WARN_ON(in_irq());
563 		skb->destructor(skb);
564 	}
565 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
566 	nf_conntrack_put(skb->nfct);
567 #endif
568 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
569 	nf_conntrack_put_reasm(skb->nfct_reasm);
570 #endif
571 #ifdef CONFIG_BRIDGE_NETFILTER
572 	nf_bridge_put(skb->nf_bridge);
573 #endif
574 /* XXX: IS this still necessary? - JHS */
575 #ifdef CONFIG_NET_SCHED
576 	skb->tc_index = 0;
577 #ifdef CONFIG_NET_CLS_ACT
578 	skb->tc_verd = 0;
579 #endif
580 #endif
581 }
582 
583 /* Free everything but the sk_buff shell. */
584 static void skb_release_all(struct sk_buff *skb)
585 {
586 	skb_release_head_state(skb);
587 	skb_release_data(skb);
588 }
589 
590 /**
591  *	__kfree_skb - private function
592  *	@skb: buffer
593  *
594  *	Free an sk_buff. Release anything attached to the buffer.
595  *	Clean the state. This is an internal helper function. Users should
596  *	always call kfree_skb
597  */
598 
599 void __kfree_skb(struct sk_buff *skb)
600 {
601 	skb_release_all(skb);
602 	kfree_skbmem(skb);
603 }
604 EXPORT_SYMBOL(__kfree_skb);
605 
606 /**
607  *	kfree_skb - free an sk_buff
608  *	@skb: buffer to free
609  *
610  *	Drop a reference to the buffer and free it if the usage count has
611  *	hit zero.
612  */
613 void kfree_skb(struct sk_buff *skb)
614 {
615 	if (unlikely(!skb))
616 		return;
617 	if (likely(atomic_read(&skb->users) == 1))
618 		smp_rmb();
619 	else if (likely(!atomic_dec_and_test(&skb->users)))
620 		return;
621 	trace_kfree_skb(skb, __builtin_return_address(0));
622 	__kfree_skb(skb);
623 }
624 EXPORT_SYMBOL(kfree_skb);
625 
626 /**
627  *	skb_tx_error - report an sk_buff xmit error
628  *	@skb: buffer that triggered an error
629  *
630  *	Report xmit error if a device callback is tracking this skb.
631  *	skb must be freed afterwards.
632  */
633 void skb_tx_error(struct sk_buff *skb)
634 {
635 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
636 		struct ubuf_info *uarg;
637 
638 		uarg = skb_shinfo(skb)->destructor_arg;
639 		if (uarg->callback)
640 			uarg->callback(uarg, false);
641 		skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
642 	}
643 }
644 EXPORT_SYMBOL(skb_tx_error);
645 
646 /**
647  *	consume_skb - free an skbuff
648  *	@skb: buffer to free
649  *
650  *	Drop a ref to the buffer and free it if the usage count has hit zero
651  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
652  *	is being dropped after a failure and notes that
653  */
654 void consume_skb(struct sk_buff *skb)
655 {
656 	if (unlikely(!skb))
657 		return;
658 	if (likely(atomic_read(&skb->users) == 1))
659 		smp_rmb();
660 	else if (likely(!atomic_dec_and_test(&skb->users)))
661 		return;
662 	trace_consume_skb(skb);
663 	__kfree_skb(skb);
664 }
665 EXPORT_SYMBOL(consume_skb);
666 
667 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
668 {
669 	new->tstamp		= old->tstamp;
670 	new->dev		= old->dev;
671 	new->transport_header	= old->transport_header;
672 	new->network_header	= old->network_header;
673 	new->mac_header		= old->mac_header;
674 	new->inner_transport_header = old->inner_transport_header;
675 	new->inner_network_header = old->inner_network_header;
676 	new->inner_mac_header = old->inner_mac_header;
677 	skb_dst_copy(new, old);
678 	new->rxhash		= old->rxhash;
679 	new->ooo_okay		= old->ooo_okay;
680 	new->l4_rxhash		= old->l4_rxhash;
681 	new->no_fcs		= old->no_fcs;
682 	new->encapsulation	= old->encapsulation;
683 #ifdef CONFIG_XFRM
684 	new->sp			= secpath_get(old->sp);
685 #endif
686 	memcpy(new->cb, old->cb, sizeof(old->cb));
687 	new->csum		= old->csum;
688 	new->local_df		= old->local_df;
689 	new->pkt_type		= old->pkt_type;
690 	new->ip_summed		= old->ip_summed;
691 	skb_copy_queue_mapping(new, old);
692 	new->priority		= old->priority;
693 #if IS_ENABLED(CONFIG_IP_VS)
694 	new->ipvs_property	= old->ipvs_property;
695 #endif
696 	new->pfmemalloc		= old->pfmemalloc;
697 	new->protocol		= old->protocol;
698 	new->mark		= old->mark;
699 	new->skb_iif		= old->skb_iif;
700 	__nf_copy(new, old);
701 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
702 	new->nf_trace		= old->nf_trace;
703 #endif
704 #ifdef CONFIG_NET_SCHED
705 	new->tc_index		= old->tc_index;
706 #ifdef CONFIG_NET_CLS_ACT
707 	new->tc_verd		= old->tc_verd;
708 #endif
709 #endif
710 	new->vlan_tci		= old->vlan_tci;
711 
712 	skb_copy_secmark(new, old);
713 }
714 
715 /*
716  * You should not add any new code to this function.  Add it to
717  * __copy_skb_header above instead.
718  */
719 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
720 {
721 #define C(x) n->x = skb->x
722 
723 	n->next = n->prev = NULL;
724 	n->sk = NULL;
725 	__copy_skb_header(n, skb);
726 
727 	C(len);
728 	C(data_len);
729 	C(mac_len);
730 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
731 	n->cloned = 1;
732 	n->nohdr = 0;
733 	n->destructor = NULL;
734 	C(tail);
735 	C(end);
736 	C(head);
737 	C(head_frag);
738 	C(data);
739 	C(truesize);
740 	atomic_set(&n->users, 1);
741 
742 	atomic_inc(&(skb_shinfo(skb)->dataref));
743 	skb->cloned = 1;
744 
745 	return n;
746 #undef C
747 }
748 
749 /**
750  *	skb_morph	-	morph one skb into another
751  *	@dst: the skb to receive the contents
752  *	@src: the skb to supply the contents
753  *
754  *	This is identical to skb_clone except that the target skb is
755  *	supplied by the user.
756  *
757  *	The target skb is returned upon exit.
758  */
759 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
760 {
761 	skb_release_all(dst);
762 	return __skb_clone(dst, src);
763 }
764 EXPORT_SYMBOL_GPL(skb_morph);
765 
766 /**
767  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
768  *	@skb: the skb to modify
769  *	@gfp_mask: allocation priority
770  *
771  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
772  *	It will copy all frags into kernel and drop the reference
773  *	to userspace pages.
774  *
775  *	If this function is called from an interrupt gfp_mask() must be
776  *	%GFP_ATOMIC.
777  *
778  *	Returns 0 on success or a negative error code on failure
779  *	to allocate kernel memory to copy to.
780  */
781 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
782 {
783 	int i;
784 	int num_frags = skb_shinfo(skb)->nr_frags;
785 	struct page *page, *head = NULL;
786 	struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
787 
788 	for (i = 0; i < num_frags; i++) {
789 		u8 *vaddr;
790 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
791 
792 		page = alloc_page(gfp_mask);
793 		if (!page) {
794 			while (head) {
795 				struct page *next = (struct page *)head->private;
796 				put_page(head);
797 				head = next;
798 			}
799 			return -ENOMEM;
800 		}
801 		vaddr = kmap_atomic(skb_frag_page(f));
802 		memcpy(page_address(page),
803 		       vaddr + f->page_offset, skb_frag_size(f));
804 		kunmap_atomic(vaddr);
805 		page->private = (unsigned long)head;
806 		head = page;
807 	}
808 
809 	/* skb frags release userspace buffers */
810 	for (i = 0; i < num_frags; i++)
811 		skb_frag_unref(skb, i);
812 
813 	uarg->callback(uarg, false);
814 
815 	/* skb frags point to kernel buffers */
816 	for (i = num_frags - 1; i >= 0; i--) {
817 		__skb_fill_page_desc(skb, i, head, 0,
818 				     skb_shinfo(skb)->frags[i].size);
819 		head = (struct page *)head->private;
820 	}
821 
822 	skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
823 	return 0;
824 }
825 EXPORT_SYMBOL_GPL(skb_copy_ubufs);
826 
827 /**
828  *	skb_clone	-	duplicate an sk_buff
829  *	@skb: buffer to clone
830  *	@gfp_mask: allocation priority
831  *
832  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
833  *	copies share the same packet data but not structure. The new
834  *	buffer has a reference count of 1. If the allocation fails the
835  *	function returns %NULL otherwise the new buffer is returned.
836  *
837  *	If this function is called from an interrupt gfp_mask() must be
838  *	%GFP_ATOMIC.
839  */
840 
841 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
842 {
843 	struct sk_buff *n;
844 
845 	if (skb_orphan_frags(skb, gfp_mask))
846 		return NULL;
847 
848 	n = skb + 1;
849 	if (skb->fclone == SKB_FCLONE_ORIG &&
850 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
851 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
852 		n->fclone = SKB_FCLONE_CLONE;
853 		atomic_inc(fclone_ref);
854 	} else {
855 		if (skb_pfmemalloc(skb))
856 			gfp_mask |= __GFP_MEMALLOC;
857 
858 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
859 		if (!n)
860 			return NULL;
861 
862 		kmemcheck_annotate_bitfield(n, flags1);
863 		kmemcheck_annotate_bitfield(n, flags2);
864 		n->fclone = SKB_FCLONE_UNAVAILABLE;
865 	}
866 
867 	return __skb_clone(n, skb);
868 }
869 EXPORT_SYMBOL(skb_clone);
870 
871 static void skb_headers_offset_update(struct sk_buff *skb, int off)
872 {
873 	/* {transport,network,mac}_header and tail are relative to skb->head */
874 	skb->transport_header += off;
875 	skb->network_header   += off;
876 	if (skb_mac_header_was_set(skb))
877 		skb->mac_header += off;
878 	skb->inner_transport_header += off;
879 	skb->inner_network_header += off;
880 	skb->inner_mac_header += off;
881 }
882 
883 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
884 {
885 #ifndef NET_SKBUFF_DATA_USES_OFFSET
886 	/*
887 	 *	Shift between the two data areas in bytes
888 	 */
889 	unsigned long offset = new->data - old->data;
890 #endif
891 
892 	__copy_skb_header(new, old);
893 
894 #ifndef NET_SKBUFF_DATA_USES_OFFSET
895 	skb_headers_offset_update(new, offset);
896 #endif
897 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
898 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
899 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
900 }
901 
902 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
903 {
904 	if (skb_pfmemalloc(skb))
905 		return SKB_ALLOC_RX;
906 	return 0;
907 }
908 
909 /**
910  *	skb_copy	-	create private copy of an sk_buff
911  *	@skb: buffer to copy
912  *	@gfp_mask: allocation priority
913  *
914  *	Make a copy of both an &sk_buff and its data. This is used when the
915  *	caller wishes to modify the data and needs a private copy of the
916  *	data to alter. Returns %NULL on failure or the pointer to the buffer
917  *	on success. The returned buffer has a reference count of 1.
918  *
919  *	As by-product this function converts non-linear &sk_buff to linear
920  *	one, so that &sk_buff becomes completely private and caller is allowed
921  *	to modify all the data of returned buffer. This means that this
922  *	function is not recommended for use in circumstances when only
923  *	header is going to be modified. Use pskb_copy() instead.
924  */
925 
926 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
927 {
928 	int headerlen = skb_headroom(skb);
929 	unsigned int size = skb_end_offset(skb) + skb->data_len;
930 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
931 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
932 
933 	if (!n)
934 		return NULL;
935 
936 	/* Set the data pointer */
937 	skb_reserve(n, headerlen);
938 	/* Set the tail pointer and length */
939 	skb_put(n, skb->len);
940 
941 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
942 		BUG();
943 
944 	copy_skb_header(n, skb);
945 	return n;
946 }
947 EXPORT_SYMBOL(skb_copy);
948 
949 /**
950  *	__pskb_copy	-	create copy of an sk_buff with private head.
951  *	@skb: buffer to copy
952  *	@headroom: headroom of new skb
953  *	@gfp_mask: allocation priority
954  *
955  *	Make a copy of both an &sk_buff and part of its data, located
956  *	in header. Fragmented data remain shared. This is used when
957  *	the caller wishes to modify only header of &sk_buff and needs
958  *	private copy of the header to alter. Returns %NULL on failure
959  *	or the pointer to the buffer on success.
960  *	The returned buffer has a reference count of 1.
961  */
962 
963 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
964 {
965 	unsigned int size = skb_headlen(skb) + headroom;
966 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
967 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
968 
969 	if (!n)
970 		goto out;
971 
972 	/* Set the data pointer */
973 	skb_reserve(n, headroom);
974 	/* Set the tail pointer and length */
975 	skb_put(n, skb_headlen(skb));
976 	/* Copy the bytes */
977 	skb_copy_from_linear_data(skb, n->data, n->len);
978 
979 	n->truesize += skb->data_len;
980 	n->data_len  = skb->data_len;
981 	n->len	     = skb->len;
982 
983 	if (skb_shinfo(skb)->nr_frags) {
984 		int i;
985 
986 		if (skb_orphan_frags(skb, gfp_mask)) {
987 			kfree_skb(n);
988 			n = NULL;
989 			goto out;
990 		}
991 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
992 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
993 			skb_frag_ref(skb, i);
994 		}
995 		skb_shinfo(n)->nr_frags = i;
996 	}
997 
998 	if (skb_has_frag_list(skb)) {
999 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1000 		skb_clone_fraglist(n);
1001 	}
1002 
1003 	copy_skb_header(n, skb);
1004 out:
1005 	return n;
1006 }
1007 EXPORT_SYMBOL(__pskb_copy);
1008 
1009 /**
1010  *	pskb_expand_head - reallocate header of &sk_buff
1011  *	@skb: buffer to reallocate
1012  *	@nhead: room to add at head
1013  *	@ntail: room to add at tail
1014  *	@gfp_mask: allocation priority
1015  *
1016  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
1017  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
1018  *	reference count of 1. Returns zero in the case of success or error,
1019  *	if expansion failed. In the last case, &sk_buff is not changed.
1020  *
1021  *	All the pointers pointing into skb header may change and must be
1022  *	reloaded after call to this function.
1023  */
1024 
1025 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1026 		     gfp_t gfp_mask)
1027 {
1028 	int i;
1029 	u8 *data;
1030 	int size = nhead + skb_end_offset(skb) + ntail;
1031 	long off;
1032 
1033 	BUG_ON(nhead < 0);
1034 
1035 	if (skb_shared(skb))
1036 		BUG();
1037 
1038 	size = SKB_DATA_ALIGN(size);
1039 
1040 	if (skb_pfmemalloc(skb))
1041 		gfp_mask |= __GFP_MEMALLOC;
1042 	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1043 			       gfp_mask, NUMA_NO_NODE, NULL);
1044 	if (!data)
1045 		goto nodata;
1046 	size = SKB_WITH_OVERHEAD(ksize(data));
1047 
1048 	/* Copy only real data... and, alas, header. This should be
1049 	 * optimized for the cases when header is void.
1050 	 */
1051 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1052 
1053 	memcpy((struct skb_shared_info *)(data + size),
1054 	       skb_shinfo(skb),
1055 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1056 
1057 	/*
1058 	 * if shinfo is shared we must drop the old head gracefully, but if it
1059 	 * is not we can just drop the old head and let the existing refcount
1060 	 * be since all we did is relocate the values
1061 	 */
1062 	if (skb_cloned(skb)) {
1063 		/* copy this zero copy skb frags */
1064 		if (skb_orphan_frags(skb, gfp_mask))
1065 			goto nofrags;
1066 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1067 			skb_frag_ref(skb, i);
1068 
1069 		if (skb_has_frag_list(skb))
1070 			skb_clone_fraglist(skb);
1071 
1072 		skb_release_data(skb);
1073 	} else {
1074 		skb_free_head(skb);
1075 	}
1076 	off = (data + nhead) - skb->head;
1077 
1078 	skb->head     = data;
1079 	skb->head_frag = 0;
1080 	skb->data    += off;
1081 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1082 	skb->end      = size;
1083 	off           = nhead;
1084 #else
1085 	skb->end      = skb->head + size;
1086 #endif
1087 	skb->tail	      += off;
1088 	skb_headers_offset_update(skb, off);
1089 	/* Only adjust this if it actually is csum_start rather than csum */
1090 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1091 		skb->csum_start += nhead;
1092 	skb->cloned   = 0;
1093 	skb->hdr_len  = 0;
1094 	skb->nohdr    = 0;
1095 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1096 	return 0;
1097 
1098 nofrags:
1099 	kfree(data);
1100 nodata:
1101 	return -ENOMEM;
1102 }
1103 EXPORT_SYMBOL(pskb_expand_head);
1104 
1105 /* Make private copy of skb with writable head and some headroom */
1106 
1107 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1108 {
1109 	struct sk_buff *skb2;
1110 	int delta = headroom - skb_headroom(skb);
1111 
1112 	if (delta <= 0)
1113 		skb2 = pskb_copy(skb, GFP_ATOMIC);
1114 	else {
1115 		skb2 = skb_clone(skb, GFP_ATOMIC);
1116 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1117 					     GFP_ATOMIC)) {
1118 			kfree_skb(skb2);
1119 			skb2 = NULL;
1120 		}
1121 	}
1122 	return skb2;
1123 }
1124 EXPORT_SYMBOL(skb_realloc_headroom);
1125 
1126 /**
1127  *	skb_copy_expand	-	copy and expand sk_buff
1128  *	@skb: buffer to copy
1129  *	@newheadroom: new free bytes at head
1130  *	@newtailroom: new free bytes at tail
1131  *	@gfp_mask: allocation priority
1132  *
1133  *	Make a copy of both an &sk_buff and its data and while doing so
1134  *	allocate additional space.
1135  *
1136  *	This is used when the caller wishes to modify the data and needs a
1137  *	private copy of the data to alter as well as more space for new fields.
1138  *	Returns %NULL on failure or the pointer to the buffer
1139  *	on success. The returned buffer has a reference count of 1.
1140  *
1141  *	You must pass %GFP_ATOMIC as the allocation priority if this function
1142  *	is called from an interrupt.
1143  */
1144 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1145 				int newheadroom, int newtailroom,
1146 				gfp_t gfp_mask)
1147 {
1148 	/*
1149 	 *	Allocate the copy buffer
1150 	 */
1151 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1152 					gfp_mask, skb_alloc_rx_flag(skb),
1153 					NUMA_NO_NODE);
1154 	int oldheadroom = skb_headroom(skb);
1155 	int head_copy_len, head_copy_off;
1156 	int off;
1157 
1158 	if (!n)
1159 		return NULL;
1160 
1161 	skb_reserve(n, newheadroom);
1162 
1163 	/* Set the tail pointer and length */
1164 	skb_put(n, skb->len);
1165 
1166 	head_copy_len = oldheadroom;
1167 	head_copy_off = 0;
1168 	if (newheadroom <= head_copy_len)
1169 		head_copy_len = newheadroom;
1170 	else
1171 		head_copy_off = newheadroom - head_copy_len;
1172 
1173 	/* Copy the linear header and data. */
1174 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1175 			  skb->len + head_copy_len))
1176 		BUG();
1177 
1178 	copy_skb_header(n, skb);
1179 
1180 	off                  = newheadroom - oldheadroom;
1181 	if (n->ip_summed == CHECKSUM_PARTIAL)
1182 		n->csum_start += off;
1183 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1184 	skb_headers_offset_update(n, off);
1185 #endif
1186 
1187 	return n;
1188 }
1189 EXPORT_SYMBOL(skb_copy_expand);
1190 
1191 /**
1192  *	skb_pad			-	zero pad the tail of an skb
1193  *	@skb: buffer to pad
1194  *	@pad: space to pad
1195  *
1196  *	Ensure that a buffer is followed by a padding area that is zero
1197  *	filled. Used by network drivers which may DMA or transfer data
1198  *	beyond the buffer end onto the wire.
1199  *
1200  *	May return error in out of memory cases. The skb is freed on error.
1201  */
1202 
1203 int skb_pad(struct sk_buff *skb, int pad)
1204 {
1205 	int err;
1206 	int ntail;
1207 
1208 	/* If the skbuff is non linear tailroom is always zero.. */
1209 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1210 		memset(skb->data+skb->len, 0, pad);
1211 		return 0;
1212 	}
1213 
1214 	ntail = skb->data_len + pad - (skb->end - skb->tail);
1215 	if (likely(skb_cloned(skb) || ntail > 0)) {
1216 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1217 		if (unlikely(err))
1218 			goto free_skb;
1219 	}
1220 
1221 	/* FIXME: The use of this function with non-linear skb's really needs
1222 	 * to be audited.
1223 	 */
1224 	err = skb_linearize(skb);
1225 	if (unlikely(err))
1226 		goto free_skb;
1227 
1228 	memset(skb->data + skb->len, 0, pad);
1229 	return 0;
1230 
1231 free_skb:
1232 	kfree_skb(skb);
1233 	return err;
1234 }
1235 EXPORT_SYMBOL(skb_pad);
1236 
1237 /**
1238  *	skb_put - add data to a buffer
1239  *	@skb: buffer to use
1240  *	@len: amount of data to add
1241  *
1242  *	This function extends the used data area of the buffer. If this would
1243  *	exceed the total buffer size the kernel will panic. A pointer to the
1244  *	first byte of the extra data is returned.
1245  */
1246 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1247 {
1248 	unsigned char *tmp = skb_tail_pointer(skb);
1249 	SKB_LINEAR_ASSERT(skb);
1250 	skb->tail += len;
1251 	skb->len  += len;
1252 	if (unlikely(skb->tail > skb->end))
1253 		skb_over_panic(skb, len, __builtin_return_address(0));
1254 	return tmp;
1255 }
1256 EXPORT_SYMBOL(skb_put);
1257 
1258 /**
1259  *	skb_push - add data to the start of a buffer
1260  *	@skb: buffer to use
1261  *	@len: amount of data to add
1262  *
1263  *	This function extends the used data area of the buffer at the buffer
1264  *	start. If this would exceed the total buffer headroom the kernel will
1265  *	panic. A pointer to the first byte of the extra data is returned.
1266  */
1267 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1268 {
1269 	skb->data -= len;
1270 	skb->len  += len;
1271 	if (unlikely(skb->data<skb->head))
1272 		skb_under_panic(skb, len, __builtin_return_address(0));
1273 	return skb->data;
1274 }
1275 EXPORT_SYMBOL(skb_push);
1276 
1277 /**
1278  *	skb_pull - remove data from the start of a buffer
1279  *	@skb: buffer to use
1280  *	@len: amount of data to remove
1281  *
1282  *	This function removes data from the start of a buffer, returning
1283  *	the memory to the headroom. A pointer to the next data in the buffer
1284  *	is returned. Once the data has been pulled future pushes will overwrite
1285  *	the old data.
1286  */
1287 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1288 {
1289 	return skb_pull_inline(skb, len);
1290 }
1291 EXPORT_SYMBOL(skb_pull);
1292 
1293 /**
1294  *	skb_trim - remove end from a buffer
1295  *	@skb: buffer to alter
1296  *	@len: new length
1297  *
1298  *	Cut the length of a buffer down by removing data from the tail. If
1299  *	the buffer is already under the length specified it is not modified.
1300  *	The skb must be linear.
1301  */
1302 void skb_trim(struct sk_buff *skb, unsigned int len)
1303 {
1304 	if (skb->len > len)
1305 		__skb_trim(skb, len);
1306 }
1307 EXPORT_SYMBOL(skb_trim);
1308 
1309 /* Trims skb to length len. It can change skb pointers.
1310  */
1311 
1312 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1313 {
1314 	struct sk_buff **fragp;
1315 	struct sk_buff *frag;
1316 	int offset = skb_headlen(skb);
1317 	int nfrags = skb_shinfo(skb)->nr_frags;
1318 	int i;
1319 	int err;
1320 
1321 	if (skb_cloned(skb) &&
1322 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1323 		return err;
1324 
1325 	i = 0;
1326 	if (offset >= len)
1327 		goto drop_pages;
1328 
1329 	for (; i < nfrags; i++) {
1330 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1331 
1332 		if (end < len) {
1333 			offset = end;
1334 			continue;
1335 		}
1336 
1337 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1338 
1339 drop_pages:
1340 		skb_shinfo(skb)->nr_frags = i;
1341 
1342 		for (; i < nfrags; i++)
1343 			skb_frag_unref(skb, i);
1344 
1345 		if (skb_has_frag_list(skb))
1346 			skb_drop_fraglist(skb);
1347 		goto done;
1348 	}
1349 
1350 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1351 	     fragp = &frag->next) {
1352 		int end = offset + frag->len;
1353 
1354 		if (skb_shared(frag)) {
1355 			struct sk_buff *nfrag;
1356 
1357 			nfrag = skb_clone(frag, GFP_ATOMIC);
1358 			if (unlikely(!nfrag))
1359 				return -ENOMEM;
1360 
1361 			nfrag->next = frag->next;
1362 			consume_skb(frag);
1363 			frag = nfrag;
1364 			*fragp = frag;
1365 		}
1366 
1367 		if (end < len) {
1368 			offset = end;
1369 			continue;
1370 		}
1371 
1372 		if (end > len &&
1373 		    unlikely((err = pskb_trim(frag, len - offset))))
1374 			return err;
1375 
1376 		if (frag->next)
1377 			skb_drop_list(&frag->next);
1378 		break;
1379 	}
1380 
1381 done:
1382 	if (len > skb_headlen(skb)) {
1383 		skb->data_len -= skb->len - len;
1384 		skb->len       = len;
1385 	} else {
1386 		skb->len       = len;
1387 		skb->data_len  = 0;
1388 		skb_set_tail_pointer(skb, len);
1389 	}
1390 
1391 	return 0;
1392 }
1393 EXPORT_SYMBOL(___pskb_trim);
1394 
1395 /**
1396  *	__pskb_pull_tail - advance tail of skb header
1397  *	@skb: buffer to reallocate
1398  *	@delta: number of bytes to advance tail
1399  *
1400  *	The function makes a sense only on a fragmented &sk_buff,
1401  *	it expands header moving its tail forward and copying necessary
1402  *	data from fragmented part.
1403  *
1404  *	&sk_buff MUST have reference count of 1.
1405  *
1406  *	Returns %NULL (and &sk_buff does not change) if pull failed
1407  *	or value of new tail of skb in the case of success.
1408  *
1409  *	All the pointers pointing into skb header may change and must be
1410  *	reloaded after call to this function.
1411  */
1412 
1413 /* Moves tail of skb head forward, copying data from fragmented part,
1414  * when it is necessary.
1415  * 1. It may fail due to malloc failure.
1416  * 2. It may change skb pointers.
1417  *
1418  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1419  */
1420 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1421 {
1422 	/* If skb has not enough free space at tail, get new one
1423 	 * plus 128 bytes for future expansions. If we have enough
1424 	 * room at tail, reallocate without expansion only if skb is cloned.
1425 	 */
1426 	int i, k, eat = (skb->tail + delta) - skb->end;
1427 
1428 	if (eat > 0 || skb_cloned(skb)) {
1429 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1430 				     GFP_ATOMIC))
1431 			return NULL;
1432 	}
1433 
1434 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1435 		BUG();
1436 
1437 	/* Optimization: no fragments, no reasons to preestimate
1438 	 * size of pulled pages. Superb.
1439 	 */
1440 	if (!skb_has_frag_list(skb))
1441 		goto pull_pages;
1442 
1443 	/* Estimate size of pulled pages. */
1444 	eat = delta;
1445 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1446 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1447 
1448 		if (size >= eat)
1449 			goto pull_pages;
1450 		eat -= size;
1451 	}
1452 
1453 	/* If we need update frag list, we are in troubles.
1454 	 * Certainly, it possible to add an offset to skb data,
1455 	 * but taking into account that pulling is expected to
1456 	 * be very rare operation, it is worth to fight against
1457 	 * further bloating skb head and crucify ourselves here instead.
1458 	 * Pure masohism, indeed. 8)8)
1459 	 */
1460 	if (eat) {
1461 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1462 		struct sk_buff *clone = NULL;
1463 		struct sk_buff *insp = NULL;
1464 
1465 		do {
1466 			BUG_ON(!list);
1467 
1468 			if (list->len <= eat) {
1469 				/* Eaten as whole. */
1470 				eat -= list->len;
1471 				list = list->next;
1472 				insp = list;
1473 			} else {
1474 				/* Eaten partially. */
1475 
1476 				if (skb_shared(list)) {
1477 					/* Sucks! We need to fork list. :-( */
1478 					clone = skb_clone(list, GFP_ATOMIC);
1479 					if (!clone)
1480 						return NULL;
1481 					insp = list->next;
1482 					list = clone;
1483 				} else {
1484 					/* This may be pulled without
1485 					 * problems. */
1486 					insp = list;
1487 				}
1488 				if (!pskb_pull(list, eat)) {
1489 					kfree_skb(clone);
1490 					return NULL;
1491 				}
1492 				break;
1493 			}
1494 		} while (eat);
1495 
1496 		/* Free pulled out fragments. */
1497 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1498 			skb_shinfo(skb)->frag_list = list->next;
1499 			kfree_skb(list);
1500 		}
1501 		/* And insert new clone at head. */
1502 		if (clone) {
1503 			clone->next = list;
1504 			skb_shinfo(skb)->frag_list = clone;
1505 		}
1506 	}
1507 	/* Success! Now we may commit changes to skb data. */
1508 
1509 pull_pages:
1510 	eat = delta;
1511 	k = 0;
1512 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1513 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1514 
1515 		if (size <= eat) {
1516 			skb_frag_unref(skb, i);
1517 			eat -= size;
1518 		} else {
1519 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1520 			if (eat) {
1521 				skb_shinfo(skb)->frags[k].page_offset += eat;
1522 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1523 				eat = 0;
1524 			}
1525 			k++;
1526 		}
1527 	}
1528 	skb_shinfo(skb)->nr_frags = k;
1529 
1530 	skb->tail     += delta;
1531 	skb->data_len -= delta;
1532 
1533 	return skb_tail_pointer(skb);
1534 }
1535 EXPORT_SYMBOL(__pskb_pull_tail);
1536 
1537 /**
1538  *	skb_copy_bits - copy bits from skb to kernel buffer
1539  *	@skb: source skb
1540  *	@offset: offset in source
1541  *	@to: destination buffer
1542  *	@len: number of bytes to copy
1543  *
1544  *	Copy the specified number of bytes from the source skb to the
1545  *	destination buffer.
1546  *
1547  *	CAUTION ! :
1548  *		If its prototype is ever changed,
1549  *		check arch/{*}/net/{*}.S files,
1550  *		since it is called from BPF assembly code.
1551  */
1552 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1553 {
1554 	int start = skb_headlen(skb);
1555 	struct sk_buff *frag_iter;
1556 	int i, copy;
1557 
1558 	if (offset > (int)skb->len - len)
1559 		goto fault;
1560 
1561 	/* Copy header. */
1562 	if ((copy = start - offset) > 0) {
1563 		if (copy > len)
1564 			copy = len;
1565 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1566 		if ((len -= copy) == 0)
1567 			return 0;
1568 		offset += copy;
1569 		to     += copy;
1570 	}
1571 
1572 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1573 		int end;
1574 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1575 
1576 		WARN_ON(start > offset + len);
1577 
1578 		end = start + skb_frag_size(f);
1579 		if ((copy = end - offset) > 0) {
1580 			u8 *vaddr;
1581 
1582 			if (copy > len)
1583 				copy = len;
1584 
1585 			vaddr = kmap_atomic(skb_frag_page(f));
1586 			memcpy(to,
1587 			       vaddr + f->page_offset + offset - start,
1588 			       copy);
1589 			kunmap_atomic(vaddr);
1590 
1591 			if ((len -= copy) == 0)
1592 				return 0;
1593 			offset += copy;
1594 			to     += copy;
1595 		}
1596 		start = end;
1597 	}
1598 
1599 	skb_walk_frags(skb, frag_iter) {
1600 		int end;
1601 
1602 		WARN_ON(start > offset + len);
1603 
1604 		end = start + frag_iter->len;
1605 		if ((copy = end - offset) > 0) {
1606 			if (copy > len)
1607 				copy = len;
1608 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1609 				goto fault;
1610 			if ((len -= copy) == 0)
1611 				return 0;
1612 			offset += copy;
1613 			to     += copy;
1614 		}
1615 		start = end;
1616 	}
1617 
1618 	if (!len)
1619 		return 0;
1620 
1621 fault:
1622 	return -EFAULT;
1623 }
1624 EXPORT_SYMBOL(skb_copy_bits);
1625 
1626 /*
1627  * Callback from splice_to_pipe(), if we need to release some pages
1628  * at the end of the spd in case we error'ed out in filling the pipe.
1629  */
1630 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1631 {
1632 	put_page(spd->pages[i]);
1633 }
1634 
1635 static struct page *linear_to_page(struct page *page, unsigned int *len,
1636 				   unsigned int *offset,
1637 				   struct sock *sk)
1638 {
1639 	struct page_frag *pfrag = sk_page_frag(sk);
1640 
1641 	if (!sk_page_frag_refill(sk, pfrag))
1642 		return NULL;
1643 
1644 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1645 
1646 	memcpy(page_address(pfrag->page) + pfrag->offset,
1647 	       page_address(page) + *offset, *len);
1648 	*offset = pfrag->offset;
1649 	pfrag->offset += *len;
1650 
1651 	return pfrag->page;
1652 }
1653 
1654 static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1655 			     struct page *page,
1656 			     unsigned int offset)
1657 {
1658 	return	spd->nr_pages &&
1659 		spd->pages[spd->nr_pages - 1] == page &&
1660 		(spd->partial[spd->nr_pages - 1].offset +
1661 		 spd->partial[spd->nr_pages - 1].len == offset);
1662 }
1663 
1664 /*
1665  * Fill page/offset/length into spd, if it can hold more pages.
1666  */
1667 static bool spd_fill_page(struct splice_pipe_desc *spd,
1668 			  struct pipe_inode_info *pipe, struct page *page,
1669 			  unsigned int *len, unsigned int offset,
1670 			  bool linear,
1671 			  struct sock *sk)
1672 {
1673 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1674 		return true;
1675 
1676 	if (linear) {
1677 		page = linear_to_page(page, len, &offset, sk);
1678 		if (!page)
1679 			return true;
1680 	}
1681 	if (spd_can_coalesce(spd, page, offset)) {
1682 		spd->partial[spd->nr_pages - 1].len += *len;
1683 		return false;
1684 	}
1685 	get_page(page);
1686 	spd->pages[spd->nr_pages] = page;
1687 	spd->partial[spd->nr_pages].len = *len;
1688 	spd->partial[spd->nr_pages].offset = offset;
1689 	spd->nr_pages++;
1690 
1691 	return false;
1692 }
1693 
1694 static bool __splice_segment(struct page *page, unsigned int poff,
1695 			     unsigned int plen, unsigned int *off,
1696 			     unsigned int *len,
1697 			     struct splice_pipe_desc *spd, bool linear,
1698 			     struct sock *sk,
1699 			     struct pipe_inode_info *pipe)
1700 {
1701 	if (!*len)
1702 		return true;
1703 
1704 	/* skip this segment if already processed */
1705 	if (*off >= plen) {
1706 		*off -= plen;
1707 		return false;
1708 	}
1709 
1710 	/* ignore any bits we already processed */
1711 	poff += *off;
1712 	plen -= *off;
1713 	*off = 0;
1714 
1715 	do {
1716 		unsigned int flen = min(*len, plen);
1717 
1718 		if (spd_fill_page(spd, pipe, page, &flen, poff,
1719 				  linear, sk))
1720 			return true;
1721 		poff += flen;
1722 		plen -= flen;
1723 		*len -= flen;
1724 	} while (*len && plen);
1725 
1726 	return false;
1727 }
1728 
1729 /*
1730  * Map linear and fragment data from the skb to spd. It reports true if the
1731  * pipe is full or if we already spliced the requested length.
1732  */
1733 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1734 			      unsigned int *offset, unsigned int *len,
1735 			      struct splice_pipe_desc *spd, struct sock *sk)
1736 {
1737 	int seg;
1738 
1739 	/* map the linear part :
1740 	 * If skb->head_frag is set, this 'linear' part is backed by a
1741 	 * fragment, and if the head is not shared with any clones then
1742 	 * we can avoid a copy since we own the head portion of this page.
1743 	 */
1744 	if (__splice_segment(virt_to_page(skb->data),
1745 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1746 			     skb_headlen(skb),
1747 			     offset, len, spd,
1748 			     skb_head_is_locked(skb),
1749 			     sk, pipe))
1750 		return true;
1751 
1752 	/*
1753 	 * then map the fragments
1754 	 */
1755 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1756 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1757 
1758 		if (__splice_segment(skb_frag_page(f),
1759 				     f->page_offset, skb_frag_size(f),
1760 				     offset, len, spd, false, sk, pipe))
1761 			return true;
1762 	}
1763 
1764 	return false;
1765 }
1766 
1767 /*
1768  * Map data from the skb to a pipe. Should handle both the linear part,
1769  * the fragments, and the frag list. It does NOT handle frag lists within
1770  * the frag list, if such a thing exists. We'd probably need to recurse to
1771  * handle that cleanly.
1772  */
1773 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1774 		    struct pipe_inode_info *pipe, unsigned int tlen,
1775 		    unsigned int flags)
1776 {
1777 	struct partial_page partial[MAX_SKB_FRAGS];
1778 	struct page *pages[MAX_SKB_FRAGS];
1779 	struct splice_pipe_desc spd = {
1780 		.pages = pages,
1781 		.partial = partial,
1782 		.nr_pages_max = MAX_SKB_FRAGS,
1783 		.flags = flags,
1784 		.ops = &sock_pipe_buf_ops,
1785 		.spd_release = sock_spd_release,
1786 	};
1787 	struct sk_buff *frag_iter;
1788 	struct sock *sk = skb->sk;
1789 	int ret = 0;
1790 
1791 	/*
1792 	 * __skb_splice_bits() only fails if the output has no room left,
1793 	 * so no point in going over the frag_list for the error case.
1794 	 */
1795 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1796 		goto done;
1797 	else if (!tlen)
1798 		goto done;
1799 
1800 	/*
1801 	 * now see if we have a frag_list to map
1802 	 */
1803 	skb_walk_frags(skb, frag_iter) {
1804 		if (!tlen)
1805 			break;
1806 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1807 			break;
1808 	}
1809 
1810 done:
1811 	if (spd.nr_pages) {
1812 		/*
1813 		 * Drop the socket lock, otherwise we have reverse
1814 		 * locking dependencies between sk_lock and i_mutex
1815 		 * here as compared to sendfile(). We enter here
1816 		 * with the socket lock held, and splice_to_pipe() will
1817 		 * grab the pipe inode lock. For sendfile() emulation,
1818 		 * we call into ->sendpage() with the i_mutex lock held
1819 		 * and networking will grab the socket lock.
1820 		 */
1821 		release_sock(sk);
1822 		ret = splice_to_pipe(pipe, &spd);
1823 		lock_sock(sk);
1824 	}
1825 
1826 	return ret;
1827 }
1828 
1829 /**
1830  *	skb_store_bits - store bits from kernel buffer to skb
1831  *	@skb: destination buffer
1832  *	@offset: offset in destination
1833  *	@from: source buffer
1834  *	@len: number of bytes to copy
1835  *
1836  *	Copy the specified number of bytes from the source buffer to the
1837  *	destination skb.  This function handles all the messy bits of
1838  *	traversing fragment lists and such.
1839  */
1840 
1841 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1842 {
1843 	int start = skb_headlen(skb);
1844 	struct sk_buff *frag_iter;
1845 	int i, copy;
1846 
1847 	if (offset > (int)skb->len - len)
1848 		goto fault;
1849 
1850 	if ((copy = start - offset) > 0) {
1851 		if (copy > len)
1852 			copy = len;
1853 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1854 		if ((len -= copy) == 0)
1855 			return 0;
1856 		offset += copy;
1857 		from += copy;
1858 	}
1859 
1860 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1861 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1862 		int end;
1863 
1864 		WARN_ON(start > offset + len);
1865 
1866 		end = start + skb_frag_size(frag);
1867 		if ((copy = end - offset) > 0) {
1868 			u8 *vaddr;
1869 
1870 			if (copy > len)
1871 				copy = len;
1872 
1873 			vaddr = kmap_atomic(skb_frag_page(frag));
1874 			memcpy(vaddr + frag->page_offset + offset - start,
1875 			       from, copy);
1876 			kunmap_atomic(vaddr);
1877 
1878 			if ((len -= copy) == 0)
1879 				return 0;
1880 			offset += copy;
1881 			from += copy;
1882 		}
1883 		start = end;
1884 	}
1885 
1886 	skb_walk_frags(skb, frag_iter) {
1887 		int end;
1888 
1889 		WARN_ON(start > offset + len);
1890 
1891 		end = start + frag_iter->len;
1892 		if ((copy = end - offset) > 0) {
1893 			if (copy > len)
1894 				copy = len;
1895 			if (skb_store_bits(frag_iter, offset - start,
1896 					   from, copy))
1897 				goto fault;
1898 			if ((len -= copy) == 0)
1899 				return 0;
1900 			offset += copy;
1901 			from += copy;
1902 		}
1903 		start = end;
1904 	}
1905 	if (!len)
1906 		return 0;
1907 
1908 fault:
1909 	return -EFAULT;
1910 }
1911 EXPORT_SYMBOL(skb_store_bits);
1912 
1913 /* Checksum skb data. */
1914 
1915 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1916 			  int len, __wsum csum)
1917 {
1918 	int start = skb_headlen(skb);
1919 	int i, copy = start - offset;
1920 	struct sk_buff *frag_iter;
1921 	int pos = 0;
1922 
1923 	/* Checksum header. */
1924 	if (copy > 0) {
1925 		if (copy > len)
1926 			copy = len;
1927 		csum = csum_partial(skb->data + offset, copy, csum);
1928 		if ((len -= copy) == 0)
1929 			return csum;
1930 		offset += copy;
1931 		pos	= copy;
1932 	}
1933 
1934 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1935 		int end;
1936 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1937 
1938 		WARN_ON(start > offset + len);
1939 
1940 		end = start + skb_frag_size(frag);
1941 		if ((copy = end - offset) > 0) {
1942 			__wsum csum2;
1943 			u8 *vaddr;
1944 
1945 			if (copy > len)
1946 				copy = len;
1947 			vaddr = kmap_atomic(skb_frag_page(frag));
1948 			csum2 = csum_partial(vaddr + frag->page_offset +
1949 					     offset - start, copy, 0);
1950 			kunmap_atomic(vaddr);
1951 			csum = csum_block_add(csum, csum2, pos);
1952 			if (!(len -= copy))
1953 				return csum;
1954 			offset += copy;
1955 			pos    += copy;
1956 		}
1957 		start = end;
1958 	}
1959 
1960 	skb_walk_frags(skb, frag_iter) {
1961 		int end;
1962 
1963 		WARN_ON(start > offset + len);
1964 
1965 		end = start + frag_iter->len;
1966 		if ((copy = end - offset) > 0) {
1967 			__wsum csum2;
1968 			if (copy > len)
1969 				copy = len;
1970 			csum2 = skb_checksum(frag_iter, offset - start,
1971 					     copy, 0);
1972 			csum = csum_block_add(csum, csum2, pos);
1973 			if ((len -= copy) == 0)
1974 				return csum;
1975 			offset += copy;
1976 			pos    += copy;
1977 		}
1978 		start = end;
1979 	}
1980 	BUG_ON(len);
1981 
1982 	return csum;
1983 }
1984 EXPORT_SYMBOL(skb_checksum);
1985 
1986 /* Both of above in one bottle. */
1987 
1988 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1989 				    u8 *to, int len, __wsum csum)
1990 {
1991 	int start = skb_headlen(skb);
1992 	int i, copy = start - offset;
1993 	struct sk_buff *frag_iter;
1994 	int pos = 0;
1995 
1996 	/* Copy header. */
1997 	if (copy > 0) {
1998 		if (copy > len)
1999 			copy = len;
2000 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
2001 						 copy, csum);
2002 		if ((len -= copy) == 0)
2003 			return csum;
2004 		offset += copy;
2005 		to     += copy;
2006 		pos	= copy;
2007 	}
2008 
2009 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2010 		int end;
2011 
2012 		WARN_ON(start > offset + len);
2013 
2014 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2015 		if ((copy = end - offset) > 0) {
2016 			__wsum csum2;
2017 			u8 *vaddr;
2018 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2019 
2020 			if (copy > len)
2021 				copy = len;
2022 			vaddr = kmap_atomic(skb_frag_page(frag));
2023 			csum2 = csum_partial_copy_nocheck(vaddr +
2024 							  frag->page_offset +
2025 							  offset - start, to,
2026 							  copy, 0);
2027 			kunmap_atomic(vaddr);
2028 			csum = csum_block_add(csum, csum2, pos);
2029 			if (!(len -= copy))
2030 				return csum;
2031 			offset += copy;
2032 			to     += copy;
2033 			pos    += copy;
2034 		}
2035 		start = end;
2036 	}
2037 
2038 	skb_walk_frags(skb, frag_iter) {
2039 		__wsum csum2;
2040 		int end;
2041 
2042 		WARN_ON(start > offset + len);
2043 
2044 		end = start + frag_iter->len;
2045 		if ((copy = end - offset) > 0) {
2046 			if (copy > len)
2047 				copy = len;
2048 			csum2 = skb_copy_and_csum_bits(frag_iter,
2049 						       offset - start,
2050 						       to, copy, 0);
2051 			csum = csum_block_add(csum, csum2, pos);
2052 			if ((len -= copy) == 0)
2053 				return csum;
2054 			offset += copy;
2055 			to     += copy;
2056 			pos    += copy;
2057 		}
2058 		start = end;
2059 	}
2060 	BUG_ON(len);
2061 	return csum;
2062 }
2063 EXPORT_SYMBOL(skb_copy_and_csum_bits);
2064 
2065 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2066 {
2067 	__wsum csum;
2068 	long csstart;
2069 
2070 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2071 		csstart = skb_checksum_start_offset(skb);
2072 	else
2073 		csstart = skb_headlen(skb);
2074 
2075 	BUG_ON(csstart > skb_headlen(skb));
2076 
2077 	skb_copy_from_linear_data(skb, to, csstart);
2078 
2079 	csum = 0;
2080 	if (csstart != skb->len)
2081 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2082 					      skb->len - csstart, 0);
2083 
2084 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2085 		long csstuff = csstart + skb->csum_offset;
2086 
2087 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
2088 	}
2089 }
2090 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2091 
2092 /**
2093  *	skb_dequeue - remove from the head of the queue
2094  *	@list: list to dequeue from
2095  *
2096  *	Remove the head of the list. The list lock is taken so the function
2097  *	may be used safely with other locking list functions. The head item is
2098  *	returned or %NULL if the list is empty.
2099  */
2100 
2101 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2102 {
2103 	unsigned long flags;
2104 	struct sk_buff *result;
2105 
2106 	spin_lock_irqsave(&list->lock, flags);
2107 	result = __skb_dequeue(list);
2108 	spin_unlock_irqrestore(&list->lock, flags);
2109 	return result;
2110 }
2111 EXPORT_SYMBOL(skb_dequeue);
2112 
2113 /**
2114  *	skb_dequeue_tail - remove from the tail of the queue
2115  *	@list: list to dequeue from
2116  *
2117  *	Remove the tail of the list. The list lock is taken so the function
2118  *	may be used safely with other locking list functions. The tail item is
2119  *	returned or %NULL if the list is empty.
2120  */
2121 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2122 {
2123 	unsigned long flags;
2124 	struct sk_buff *result;
2125 
2126 	spin_lock_irqsave(&list->lock, flags);
2127 	result = __skb_dequeue_tail(list);
2128 	spin_unlock_irqrestore(&list->lock, flags);
2129 	return result;
2130 }
2131 EXPORT_SYMBOL(skb_dequeue_tail);
2132 
2133 /**
2134  *	skb_queue_purge - empty a list
2135  *	@list: list to empty
2136  *
2137  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
2138  *	the list and one reference dropped. This function takes the list
2139  *	lock and is atomic with respect to other list locking functions.
2140  */
2141 void skb_queue_purge(struct sk_buff_head *list)
2142 {
2143 	struct sk_buff *skb;
2144 	while ((skb = skb_dequeue(list)) != NULL)
2145 		kfree_skb(skb);
2146 }
2147 EXPORT_SYMBOL(skb_queue_purge);
2148 
2149 /**
2150  *	skb_queue_head - queue a buffer at the list head
2151  *	@list: list to use
2152  *	@newsk: buffer to queue
2153  *
2154  *	Queue a buffer at the start of the list. This function takes the
2155  *	list lock and can be used safely with other locking &sk_buff functions
2156  *	safely.
2157  *
2158  *	A buffer cannot be placed on two lists at the same time.
2159  */
2160 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2161 {
2162 	unsigned long flags;
2163 
2164 	spin_lock_irqsave(&list->lock, flags);
2165 	__skb_queue_head(list, newsk);
2166 	spin_unlock_irqrestore(&list->lock, flags);
2167 }
2168 EXPORT_SYMBOL(skb_queue_head);
2169 
2170 /**
2171  *	skb_queue_tail - queue a buffer at the list tail
2172  *	@list: list to use
2173  *	@newsk: buffer to queue
2174  *
2175  *	Queue a buffer at the tail of the list. This function takes the
2176  *	list lock and can be used safely with other locking &sk_buff functions
2177  *	safely.
2178  *
2179  *	A buffer cannot be placed on two lists at the same time.
2180  */
2181 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2182 {
2183 	unsigned long flags;
2184 
2185 	spin_lock_irqsave(&list->lock, flags);
2186 	__skb_queue_tail(list, newsk);
2187 	spin_unlock_irqrestore(&list->lock, flags);
2188 }
2189 EXPORT_SYMBOL(skb_queue_tail);
2190 
2191 /**
2192  *	skb_unlink	-	remove a buffer from a list
2193  *	@skb: buffer to remove
2194  *	@list: list to use
2195  *
2196  *	Remove a packet from a list. The list locks are taken and this
2197  *	function is atomic with respect to other list locked calls
2198  *
2199  *	You must know what list the SKB is on.
2200  */
2201 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2202 {
2203 	unsigned long flags;
2204 
2205 	spin_lock_irqsave(&list->lock, flags);
2206 	__skb_unlink(skb, list);
2207 	spin_unlock_irqrestore(&list->lock, flags);
2208 }
2209 EXPORT_SYMBOL(skb_unlink);
2210 
2211 /**
2212  *	skb_append	-	append a buffer
2213  *	@old: buffer to insert after
2214  *	@newsk: buffer to insert
2215  *	@list: list to use
2216  *
2217  *	Place a packet after a given packet in a list. The list locks are taken
2218  *	and this function is atomic with respect to other list locked calls.
2219  *	A buffer cannot be placed on two lists at the same time.
2220  */
2221 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2222 {
2223 	unsigned long flags;
2224 
2225 	spin_lock_irqsave(&list->lock, flags);
2226 	__skb_queue_after(list, old, newsk);
2227 	spin_unlock_irqrestore(&list->lock, flags);
2228 }
2229 EXPORT_SYMBOL(skb_append);
2230 
2231 /**
2232  *	skb_insert	-	insert a buffer
2233  *	@old: buffer to insert before
2234  *	@newsk: buffer to insert
2235  *	@list: list to use
2236  *
2237  *	Place a packet before a given packet in a list. The list locks are
2238  * 	taken and this function is atomic with respect to other list locked
2239  *	calls.
2240  *
2241  *	A buffer cannot be placed on two lists at the same time.
2242  */
2243 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2244 {
2245 	unsigned long flags;
2246 
2247 	spin_lock_irqsave(&list->lock, flags);
2248 	__skb_insert(newsk, old->prev, old, list);
2249 	spin_unlock_irqrestore(&list->lock, flags);
2250 }
2251 EXPORT_SYMBOL(skb_insert);
2252 
2253 static inline void skb_split_inside_header(struct sk_buff *skb,
2254 					   struct sk_buff* skb1,
2255 					   const u32 len, const int pos)
2256 {
2257 	int i;
2258 
2259 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2260 					 pos - len);
2261 	/* And move data appendix as is. */
2262 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2263 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2264 
2265 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2266 	skb_shinfo(skb)->nr_frags  = 0;
2267 	skb1->data_len		   = skb->data_len;
2268 	skb1->len		   += skb1->data_len;
2269 	skb->data_len		   = 0;
2270 	skb->len		   = len;
2271 	skb_set_tail_pointer(skb, len);
2272 }
2273 
2274 static inline void skb_split_no_header(struct sk_buff *skb,
2275 				       struct sk_buff* skb1,
2276 				       const u32 len, int pos)
2277 {
2278 	int i, k = 0;
2279 	const int nfrags = skb_shinfo(skb)->nr_frags;
2280 
2281 	skb_shinfo(skb)->nr_frags = 0;
2282 	skb1->len		  = skb1->data_len = skb->len - len;
2283 	skb->len		  = len;
2284 	skb->data_len		  = len - pos;
2285 
2286 	for (i = 0; i < nfrags; i++) {
2287 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2288 
2289 		if (pos + size > len) {
2290 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2291 
2292 			if (pos < len) {
2293 				/* Split frag.
2294 				 * We have two variants in this case:
2295 				 * 1. Move all the frag to the second
2296 				 *    part, if it is possible. F.e.
2297 				 *    this approach is mandatory for TUX,
2298 				 *    where splitting is expensive.
2299 				 * 2. Split is accurately. We make this.
2300 				 */
2301 				skb_frag_ref(skb, i);
2302 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2303 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2304 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2305 				skb_shinfo(skb)->nr_frags++;
2306 			}
2307 			k++;
2308 		} else
2309 			skb_shinfo(skb)->nr_frags++;
2310 		pos += size;
2311 	}
2312 	skb_shinfo(skb1)->nr_frags = k;
2313 }
2314 
2315 /**
2316  * skb_split - Split fragmented skb to two parts at length len.
2317  * @skb: the buffer to split
2318  * @skb1: the buffer to receive the second part
2319  * @len: new length for skb
2320  */
2321 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2322 {
2323 	int pos = skb_headlen(skb);
2324 
2325 	skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2326 	if (len < pos)	/* Split line is inside header. */
2327 		skb_split_inside_header(skb, skb1, len, pos);
2328 	else		/* Second chunk has no header, nothing to copy. */
2329 		skb_split_no_header(skb, skb1, len, pos);
2330 }
2331 EXPORT_SYMBOL(skb_split);
2332 
2333 /* Shifting from/to a cloned skb is a no-go.
2334  *
2335  * Caller cannot keep skb_shinfo related pointers past calling here!
2336  */
2337 static int skb_prepare_for_shift(struct sk_buff *skb)
2338 {
2339 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2340 }
2341 
2342 /**
2343  * skb_shift - Shifts paged data partially from skb to another
2344  * @tgt: buffer into which tail data gets added
2345  * @skb: buffer from which the paged data comes from
2346  * @shiftlen: shift up to this many bytes
2347  *
2348  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2349  * the length of the skb, from skb to tgt. Returns number bytes shifted.
2350  * It's up to caller to free skb if everything was shifted.
2351  *
2352  * If @tgt runs out of frags, the whole operation is aborted.
2353  *
2354  * Skb cannot include anything else but paged data while tgt is allowed
2355  * to have non-paged data as well.
2356  *
2357  * TODO: full sized shift could be optimized but that would need
2358  * specialized skb free'er to handle frags without up-to-date nr_frags.
2359  */
2360 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2361 {
2362 	int from, to, merge, todo;
2363 	struct skb_frag_struct *fragfrom, *fragto;
2364 
2365 	BUG_ON(shiftlen > skb->len);
2366 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2367 
2368 	todo = shiftlen;
2369 	from = 0;
2370 	to = skb_shinfo(tgt)->nr_frags;
2371 	fragfrom = &skb_shinfo(skb)->frags[from];
2372 
2373 	/* Actual merge is delayed until the point when we know we can
2374 	 * commit all, so that we don't have to undo partial changes
2375 	 */
2376 	if (!to ||
2377 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2378 			      fragfrom->page_offset)) {
2379 		merge = -1;
2380 	} else {
2381 		merge = to - 1;
2382 
2383 		todo -= skb_frag_size(fragfrom);
2384 		if (todo < 0) {
2385 			if (skb_prepare_for_shift(skb) ||
2386 			    skb_prepare_for_shift(tgt))
2387 				return 0;
2388 
2389 			/* All previous frag pointers might be stale! */
2390 			fragfrom = &skb_shinfo(skb)->frags[from];
2391 			fragto = &skb_shinfo(tgt)->frags[merge];
2392 
2393 			skb_frag_size_add(fragto, shiftlen);
2394 			skb_frag_size_sub(fragfrom, shiftlen);
2395 			fragfrom->page_offset += shiftlen;
2396 
2397 			goto onlymerged;
2398 		}
2399 
2400 		from++;
2401 	}
2402 
2403 	/* Skip full, not-fitting skb to avoid expensive operations */
2404 	if ((shiftlen == skb->len) &&
2405 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2406 		return 0;
2407 
2408 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2409 		return 0;
2410 
2411 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2412 		if (to == MAX_SKB_FRAGS)
2413 			return 0;
2414 
2415 		fragfrom = &skb_shinfo(skb)->frags[from];
2416 		fragto = &skb_shinfo(tgt)->frags[to];
2417 
2418 		if (todo >= skb_frag_size(fragfrom)) {
2419 			*fragto = *fragfrom;
2420 			todo -= skb_frag_size(fragfrom);
2421 			from++;
2422 			to++;
2423 
2424 		} else {
2425 			__skb_frag_ref(fragfrom);
2426 			fragto->page = fragfrom->page;
2427 			fragto->page_offset = fragfrom->page_offset;
2428 			skb_frag_size_set(fragto, todo);
2429 
2430 			fragfrom->page_offset += todo;
2431 			skb_frag_size_sub(fragfrom, todo);
2432 			todo = 0;
2433 
2434 			to++;
2435 			break;
2436 		}
2437 	}
2438 
2439 	/* Ready to "commit" this state change to tgt */
2440 	skb_shinfo(tgt)->nr_frags = to;
2441 
2442 	if (merge >= 0) {
2443 		fragfrom = &skb_shinfo(skb)->frags[0];
2444 		fragto = &skb_shinfo(tgt)->frags[merge];
2445 
2446 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2447 		__skb_frag_unref(fragfrom);
2448 	}
2449 
2450 	/* Reposition in the original skb */
2451 	to = 0;
2452 	while (from < skb_shinfo(skb)->nr_frags)
2453 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2454 	skb_shinfo(skb)->nr_frags = to;
2455 
2456 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2457 
2458 onlymerged:
2459 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2460 	 * the other hand might need it if it needs to be resent
2461 	 */
2462 	tgt->ip_summed = CHECKSUM_PARTIAL;
2463 	skb->ip_summed = CHECKSUM_PARTIAL;
2464 
2465 	/* Yak, is it really working this way? Some helper please? */
2466 	skb->len -= shiftlen;
2467 	skb->data_len -= shiftlen;
2468 	skb->truesize -= shiftlen;
2469 	tgt->len += shiftlen;
2470 	tgt->data_len += shiftlen;
2471 	tgt->truesize += shiftlen;
2472 
2473 	return shiftlen;
2474 }
2475 
2476 /**
2477  * skb_prepare_seq_read - Prepare a sequential read of skb data
2478  * @skb: the buffer to read
2479  * @from: lower offset of data to be read
2480  * @to: upper offset of data to be read
2481  * @st: state variable
2482  *
2483  * Initializes the specified state variable. Must be called before
2484  * invoking skb_seq_read() for the first time.
2485  */
2486 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2487 			  unsigned int to, struct skb_seq_state *st)
2488 {
2489 	st->lower_offset = from;
2490 	st->upper_offset = to;
2491 	st->root_skb = st->cur_skb = skb;
2492 	st->frag_idx = st->stepped_offset = 0;
2493 	st->frag_data = NULL;
2494 }
2495 EXPORT_SYMBOL(skb_prepare_seq_read);
2496 
2497 /**
2498  * skb_seq_read - Sequentially read skb data
2499  * @consumed: number of bytes consumed by the caller so far
2500  * @data: destination pointer for data to be returned
2501  * @st: state variable
2502  *
2503  * Reads a block of skb data at &consumed relative to the
2504  * lower offset specified to skb_prepare_seq_read(). Assigns
2505  * the head of the data block to &data and returns the length
2506  * of the block or 0 if the end of the skb data or the upper
2507  * offset has been reached.
2508  *
2509  * The caller is not required to consume all of the data
2510  * returned, i.e. &consumed is typically set to the number
2511  * of bytes already consumed and the next call to
2512  * skb_seq_read() will return the remaining part of the block.
2513  *
2514  * Note 1: The size of each block of data returned can be arbitrary,
2515  *       this limitation is the cost for zerocopy seqeuental
2516  *       reads of potentially non linear data.
2517  *
2518  * Note 2: Fragment lists within fragments are not implemented
2519  *       at the moment, state->root_skb could be replaced with
2520  *       a stack for this purpose.
2521  */
2522 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2523 			  struct skb_seq_state *st)
2524 {
2525 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2526 	skb_frag_t *frag;
2527 
2528 	if (unlikely(abs_offset >= st->upper_offset))
2529 		return 0;
2530 
2531 next_skb:
2532 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2533 
2534 	if (abs_offset < block_limit && !st->frag_data) {
2535 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2536 		return block_limit - abs_offset;
2537 	}
2538 
2539 	if (st->frag_idx == 0 && !st->frag_data)
2540 		st->stepped_offset += skb_headlen(st->cur_skb);
2541 
2542 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2543 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2544 		block_limit = skb_frag_size(frag) + st->stepped_offset;
2545 
2546 		if (abs_offset < block_limit) {
2547 			if (!st->frag_data)
2548 				st->frag_data = kmap_atomic(skb_frag_page(frag));
2549 
2550 			*data = (u8 *) st->frag_data + frag->page_offset +
2551 				(abs_offset - st->stepped_offset);
2552 
2553 			return block_limit - abs_offset;
2554 		}
2555 
2556 		if (st->frag_data) {
2557 			kunmap_atomic(st->frag_data);
2558 			st->frag_data = NULL;
2559 		}
2560 
2561 		st->frag_idx++;
2562 		st->stepped_offset += skb_frag_size(frag);
2563 	}
2564 
2565 	if (st->frag_data) {
2566 		kunmap_atomic(st->frag_data);
2567 		st->frag_data = NULL;
2568 	}
2569 
2570 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2571 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2572 		st->frag_idx = 0;
2573 		goto next_skb;
2574 	} else if (st->cur_skb->next) {
2575 		st->cur_skb = st->cur_skb->next;
2576 		st->frag_idx = 0;
2577 		goto next_skb;
2578 	}
2579 
2580 	return 0;
2581 }
2582 EXPORT_SYMBOL(skb_seq_read);
2583 
2584 /**
2585  * skb_abort_seq_read - Abort a sequential read of skb data
2586  * @st: state variable
2587  *
2588  * Must be called if skb_seq_read() was not called until it
2589  * returned 0.
2590  */
2591 void skb_abort_seq_read(struct skb_seq_state *st)
2592 {
2593 	if (st->frag_data)
2594 		kunmap_atomic(st->frag_data);
2595 }
2596 EXPORT_SYMBOL(skb_abort_seq_read);
2597 
2598 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2599 
2600 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2601 					  struct ts_config *conf,
2602 					  struct ts_state *state)
2603 {
2604 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2605 }
2606 
2607 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2608 {
2609 	skb_abort_seq_read(TS_SKB_CB(state));
2610 }
2611 
2612 /**
2613  * skb_find_text - Find a text pattern in skb data
2614  * @skb: the buffer to look in
2615  * @from: search offset
2616  * @to: search limit
2617  * @config: textsearch configuration
2618  * @state: uninitialized textsearch state variable
2619  *
2620  * Finds a pattern in the skb data according to the specified
2621  * textsearch configuration. Use textsearch_next() to retrieve
2622  * subsequent occurrences of the pattern. Returns the offset
2623  * to the first occurrence or UINT_MAX if no match was found.
2624  */
2625 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2626 			   unsigned int to, struct ts_config *config,
2627 			   struct ts_state *state)
2628 {
2629 	unsigned int ret;
2630 
2631 	config->get_next_block = skb_ts_get_next_block;
2632 	config->finish = skb_ts_finish;
2633 
2634 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2635 
2636 	ret = textsearch_find(config, state);
2637 	return (ret <= to - from ? ret : UINT_MAX);
2638 }
2639 EXPORT_SYMBOL(skb_find_text);
2640 
2641 /**
2642  * skb_append_datato_frags - append the user data to a skb
2643  * @sk: sock  structure
2644  * @skb: skb structure to be appened with user data.
2645  * @getfrag: call back function to be used for getting the user data
2646  * @from: pointer to user message iov
2647  * @length: length of the iov message
2648  *
2649  * Description: This procedure append the user data in the fragment part
2650  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2651  */
2652 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2653 			int (*getfrag)(void *from, char *to, int offset,
2654 					int len, int odd, struct sk_buff *skb),
2655 			void *from, int length)
2656 {
2657 	int frg_cnt = skb_shinfo(skb)->nr_frags;
2658 	int copy;
2659 	int offset = 0;
2660 	int ret;
2661 	struct page_frag *pfrag = &current->task_frag;
2662 
2663 	do {
2664 		/* Return error if we don't have space for new frag */
2665 		if (frg_cnt >= MAX_SKB_FRAGS)
2666 			return -EMSGSIZE;
2667 
2668 		if (!sk_page_frag_refill(sk, pfrag))
2669 			return -ENOMEM;
2670 
2671 		/* copy the user data to page */
2672 		copy = min_t(int, length, pfrag->size - pfrag->offset);
2673 
2674 		ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2675 			      offset, copy, 0, skb);
2676 		if (ret < 0)
2677 			return -EFAULT;
2678 
2679 		/* copy was successful so update the size parameters */
2680 		skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2681 				   copy);
2682 		frg_cnt++;
2683 		pfrag->offset += copy;
2684 		get_page(pfrag->page);
2685 
2686 		skb->truesize += copy;
2687 		atomic_add(copy, &sk->sk_wmem_alloc);
2688 		skb->len += copy;
2689 		skb->data_len += copy;
2690 		offset += copy;
2691 		length -= copy;
2692 
2693 	} while (length > 0);
2694 
2695 	return 0;
2696 }
2697 EXPORT_SYMBOL(skb_append_datato_frags);
2698 
2699 /**
2700  *	skb_pull_rcsum - pull skb and update receive checksum
2701  *	@skb: buffer to update
2702  *	@len: length of data pulled
2703  *
2704  *	This function performs an skb_pull on the packet and updates
2705  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2706  *	receive path processing instead of skb_pull unless you know
2707  *	that the checksum difference is zero (e.g., a valid IP header)
2708  *	or you are setting ip_summed to CHECKSUM_NONE.
2709  */
2710 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2711 {
2712 	BUG_ON(len > skb->len);
2713 	skb->len -= len;
2714 	BUG_ON(skb->len < skb->data_len);
2715 	skb_postpull_rcsum(skb, skb->data, len);
2716 	return skb->data += len;
2717 }
2718 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2719 
2720 /**
2721  *	skb_segment - Perform protocol segmentation on skb.
2722  *	@skb: buffer to segment
2723  *	@features: features for the output path (see dev->features)
2724  *
2725  *	This function performs segmentation on the given skb.  It returns
2726  *	a pointer to the first in a list of new skbs for the segments.
2727  *	In case of error it returns ERR_PTR(err).
2728  */
2729 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2730 {
2731 	struct sk_buff *segs = NULL;
2732 	struct sk_buff *tail = NULL;
2733 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2734 	unsigned int mss = skb_shinfo(skb)->gso_size;
2735 	unsigned int doffset = skb->data - skb_mac_header(skb);
2736 	unsigned int offset = doffset;
2737 	unsigned int tnl_hlen = skb_tnl_header_len(skb);
2738 	unsigned int headroom;
2739 	unsigned int len;
2740 	__be16 proto;
2741 	bool csum;
2742 	int sg = !!(features & NETIF_F_SG);
2743 	int nfrags = skb_shinfo(skb)->nr_frags;
2744 	int err = -ENOMEM;
2745 	int i = 0;
2746 	int pos;
2747 
2748 	proto = skb_network_protocol(skb);
2749 	if (unlikely(!proto))
2750 		return ERR_PTR(-EINVAL);
2751 
2752 	csum = !!can_checksum_protocol(features, proto);
2753 	__skb_push(skb, doffset);
2754 	headroom = skb_headroom(skb);
2755 	pos = skb_headlen(skb);
2756 
2757 	do {
2758 		struct sk_buff *nskb;
2759 		skb_frag_t *frag;
2760 		int hsize;
2761 		int size;
2762 
2763 		len = skb->len - offset;
2764 		if (len > mss)
2765 			len = mss;
2766 
2767 		hsize = skb_headlen(skb) - offset;
2768 		if (hsize < 0)
2769 			hsize = 0;
2770 		if (hsize > len || !sg)
2771 			hsize = len;
2772 
2773 		if (!hsize && i >= nfrags) {
2774 			BUG_ON(fskb->len != len);
2775 
2776 			pos += len;
2777 			nskb = skb_clone(fskb, GFP_ATOMIC);
2778 			fskb = fskb->next;
2779 
2780 			if (unlikely(!nskb))
2781 				goto err;
2782 
2783 			hsize = skb_end_offset(nskb);
2784 			if (skb_cow_head(nskb, doffset + headroom)) {
2785 				kfree_skb(nskb);
2786 				goto err;
2787 			}
2788 
2789 			nskb->truesize += skb_end_offset(nskb) - hsize;
2790 			skb_release_head_state(nskb);
2791 			__skb_push(nskb, doffset);
2792 		} else {
2793 			nskb = __alloc_skb(hsize + doffset + headroom,
2794 					   GFP_ATOMIC, skb_alloc_rx_flag(skb),
2795 					   NUMA_NO_NODE);
2796 
2797 			if (unlikely(!nskb))
2798 				goto err;
2799 
2800 			skb_reserve(nskb, headroom);
2801 			__skb_put(nskb, doffset);
2802 		}
2803 
2804 		if (segs)
2805 			tail->next = nskb;
2806 		else
2807 			segs = nskb;
2808 		tail = nskb;
2809 
2810 		__copy_skb_header(nskb, skb);
2811 		nskb->mac_len = skb->mac_len;
2812 
2813 		/* nskb and skb might have different headroom */
2814 		if (nskb->ip_summed == CHECKSUM_PARTIAL)
2815 			nskb->csum_start += skb_headroom(nskb) - headroom;
2816 
2817 		skb_reset_mac_header(nskb);
2818 		skb_set_network_header(nskb, skb->mac_len);
2819 		nskb->transport_header = (nskb->network_header +
2820 					  skb_network_header_len(skb));
2821 
2822 		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
2823 						 nskb->data - tnl_hlen,
2824 						 doffset + tnl_hlen);
2825 
2826 		if (fskb != skb_shinfo(skb)->frag_list)
2827 			continue;
2828 
2829 		if (!sg) {
2830 			nskb->ip_summed = CHECKSUM_NONE;
2831 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2832 							    skb_put(nskb, len),
2833 							    len, 0);
2834 			continue;
2835 		}
2836 
2837 		frag = skb_shinfo(nskb)->frags;
2838 
2839 		skb_copy_from_linear_data_offset(skb, offset,
2840 						 skb_put(nskb, hsize), hsize);
2841 
2842 		skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2843 
2844 		while (pos < offset + len && i < nfrags) {
2845 			*frag = skb_shinfo(skb)->frags[i];
2846 			__skb_frag_ref(frag);
2847 			size = skb_frag_size(frag);
2848 
2849 			if (pos < offset) {
2850 				frag->page_offset += offset - pos;
2851 				skb_frag_size_sub(frag, offset - pos);
2852 			}
2853 
2854 			skb_shinfo(nskb)->nr_frags++;
2855 
2856 			if (pos + size <= offset + len) {
2857 				i++;
2858 				pos += size;
2859 			} else {
2860 				skb_frag_size_sub(frag, pos + size - (offset + len));
2861 				goto skip_fraglist;
2862 			}
2863 
2864 			frag++;
2865 		}
2866 
2867 		if (pos < offset + len) {
2868 			struct sk_buff *fskb2 = fskb;
2869 
2870 			BUG_ON(pos + fskb->len != offset + len);
2871 
2872 			pos += fskb->len;
2873 			fskb = fskb->next;
2874 
2875 			if (fskb2->next) {
2876 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2877 				if (!fskb2)
2878 					goto err;
2879 			} else
2880 				skb_get(fskb2);
2881 
2882 			SKB_FRAG_ASSERT(nskb);
2883 			skb_shinfo(nskb)->frag_list = fskb2;
2884 		}
2885 
2886 skip_fraglist:
2887 		nskb->data_len = len - hsize;
2888 		nskb->len += nskb->data_len;
2889 		nskb->truesize += nskb->data_len;
2890 
2891 		if (!csum) {
2892 			nskb->csum = skb_checksum(nskb, doffset,
2893 						  nskb->len - doffset, 0);
2894 			nskb->ip_summed = CHECKSUM_NONE;
2895 		}
2896 	} while ((offset += len) < skb->len);
2897 
2898 	return segs;
2899 
2900 err:
2901 	while ((skb = segs)) {
2902 		segs = skb->next;
2903 		kfree_skb(skb);
2904 	}
2905 	return ERR_PTR(err);
2906 }
2907 EXPORT_SYMBOL_GPL(skb_segment);
2908 
2909 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2910 {
2911 	struct sk_buff *p = *head;
2912 	struct sk_buff *nskb;
2913 	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2914 	struct skb_shared_info *pinfo = skb_shinfo(p);
2915 	unsigned int headroom;
2916 	unsigned int len = skb_gro_len(skb);
2917 	unsigned int offset = skb_gro_offset(skb);
2918 	unsigned int headlen = skb_headlen(skb);
2919 	unsigned int delta_truesize;
2920 
2921 	if (p->len + len >= 65536)
2922 		return -E2BIG;
2923 
2924 	if (pinfo->frag_list)
2925 		goto merge;
2926 	else if (headlen <= offset) {
2927 		skb_frag_t *frag;
2928 		skb_frag_t *frag2;
2929 		int i = skbinfo->nr_frags;
2930 		int nr_frags = pinfo->nr_frags + i;
2931 
2932 		offset -= headlen;
2933 
2934 		if (nr_frags > MAX_SKB_FRAGS)
2935 			return -E2BIG;
2936 
2937 		pinfo->nr_frags = nr_frags;
2938 		skbinfo->nr_frags = 0;
2939 
2940 		frag = pinfo->frags + nr_frags;
2941 		frag2 = skbinfo->frags + i;
2942 		do {
2943 			*--frag = *--frag2;
2944 		} while (--i);
2945 
2946 		frag->page_offset += offset;
2947 		skb_frag_size_sub(frag, offset);
2948 
2949 		/* all fragments truesize : remove (head size + sk_buff) */
2950 		delta_truesize = skb->truesize -
2951 				 SKB_TRUESIZE(skb_end_offset(skb));
2952 
2953 		skb->truesize -= skb->data_len;
2954 		skb->len -= skb->data_len;
2955 		skb->data_len = 0;
2956 
2957 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
2958 		goto done;
2959 	} else if (skb->head_frag) {
2960 		int nr_frags = pinfo->nr_frags;
2961 		skb_frag_t *frag = pinfo->frags + nr_frags;
2962 		struct page *page = virt_to_head_page(skb->head);
2963 		unsigned int first_size = headlen - offset;
2964 		unsigned int first_offset;
2965 
2966 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
2967 			return -E2BIG;
2968 
2969 		first_offset = skb->data -
2970 			       (unsigned char *)page_address(page) +
2971 			       offset;
2972 
2973 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
2974 
2975 		frag->page.p	  = page;
2976 		frag->page_offset = first_offset;
2977 		skb_frag_size_set(frag, first_size);
2978 
2979 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
2980 		/* We dont need to clear skbinfo->nr_frags here */
2981 
2982 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
2983 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
2984 		goto done;
2985 	} else if (skb_gro_len(p) != pinfo->gso_size)
2986 		return -E2BIG;
2987 
2988 	headroom = skb_headroom(p);
2989 	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2990 	if (unlikely(!nskb))
2991 		return -ENOMEM;
2992 
2993 	__copy_skb_header(nskb, p);
2994 	nskb->mac_len = p->mac_len;
2995 
2996 	skb_reserve(nskb, headroom);
2997 	__skb_put(nskb, skb_gro_offset(p));
2998 
2999 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
3000 	skb_set_network_header(nskb, skb_network_offset(p));
3001 	skb_set_transport_header(nskb, skb_transport_offset(p));
3002 
3003 	__skb_pull(p, skb_gro_offset(p));
3004 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
3005 	       p->data - skb_mac_header(p));
3006 
3007 	skb_shinfo(nskb)->frag_list = p;
3008 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3009 	pinfo->gso_size = 0;
3010 	skb_header_release(p);
3011 	NAPI_GRO_CB(nskb)->last = p;
3012 
3013 	nskb->data_len += p->len;
3014 	nskb->truesize += p->truesize;
3015 	nskb->len += p->len;
3016 
3017 	*head = nskb;
3018 	nskb->next = p->next;
3019 	p->next = NULL;
3020 
3021 	p = nskb;
3022 
3023 merge:
3024 	delta_truesize = skb->truesize;
3025 	if (offset > headlen) {
3026 		unsigned int eat = offset - headlen;
3027 
3028 		skbinfo->frags[0].page_offset += eat;
3029 		skb_frag_size_sub(&skbinfo->frags[0], eat);
3030 		skb->data_len -= eat;
3031 		skb->len -= eat;
3032 		offset = headlen;
3033 	}
3034 
3035 	__skb_pull(skb, offset);
3036 
3037 	NAPI_GRO_CB(p)->last->next = skb;
3038 	NAPI_GRO_CB(p)->last = skb;
3039 	skb_header_release(skb);
3040 
3041 done:
3042 	NAPI_GRO_CB(p)->count++;
3043 	p->data_len += len;
3044 	p->truesize += delta_truesize;
3045 	p->len += len;
3046 
3047 	NAPI_GRO_CB(skb)->same_flow = 1;
3048 	return 0;
3049 }
3050 EXPORT_SYMBOL_GPL(skb_gro_receive);
3051 
3052 void __init skb_init(void)
3053 {
3054 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3055 					      sizeof(struct sk_buff),
3056 					      0,
3057 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3058 					      NULL);
3059 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3060 						(2*sizeof(struct sk_buff)) +
3061 						sizeof(atomic_t),
3062 						0,
3063 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3064 						NULL);
3065 }
3066 
3067 /**
3068  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3069  *	@skb: Socket buffer containing the buffers to be mapped
3070  *	@sg: The scatter-gather list to map into
3071  *	@offset: The offset into the buffer's contents to start mapping
3072  *	@len: Length of buffer space to be mapped
3073  *
3074  *	Fill the specified scatter-gather list with mappings/pointers into a
3075  *	region of the buffer space attached to a socket buffer.
3076  */
3077 static int
3078 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3079 {
3080 	int start = skb_headlen(skb);
3081 	int i, copy = start - offset;
3082 	struct sk_buff *frag_iter;
3083 	int elt = 0;
3084 
3085 	if (copy > 0) {
3086 		if (copy > len)
3087 			copy = len;
3088 		sg_set_buf(sg, skb->data + offset, copy);
3089 		elt++;
3090 		if ((len -= copy) == 0)
3091 			return elt;
3092 		offset += copy;
3093 	}
3094 
3095 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3096 		int end;
3097 
3098 		WARN_ON(start > offset + len);
3099 
3100 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3101 		if ((copy = end - offset) > 0) {
3102 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3103 
3104 			if (copy > len)
3105 				copy = len;
3106 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3107 					frag->page_offset+offset-start);
3108 			elt++;
3109 			if (!(len -= copy))
3110 				return elt;
3111 			offset += copy;
3112 		}
3113 		start = end;
3114 	}
3115 
3116 	skb_walk_frags(skb, frag_iter) {
3117 		int end;
3118 
3119 		WARN_ON(start > offset + len);
3120 
3121 		end = start + frag_iter->len;
3122 		if ((copy = end - offset) > 0) {
3123 			if (copy > len)
3124 				copy = len;
3125 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3126 					      copy);
3127 			if ((len -= copy) == 0)
3128 				return elt;
3129 			offset += copy;
3130 		}
3131 		start = end;
3132 	}
3133 	BUG_ON(len);
3134 	return elt;
3135 }
3136 
3137 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3138 {
3139 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
3140 
3141 	sg_mark_end(&sg[nsg - 1]);
3142 
3143 	return nsg;
3144 }
3145 EXPORT_SYMBOL_GPL(skb_to_sgvec);
3146 
3147 /**
3148  *	skb_cow_data - Check that a socket buffer's data buffers are writable
3149  *	@skb: The socket buffer to check.
3150  *	@tailbits: Amount of trailing space to be added
3151  *	@trailer: Returned pointer to the skb where the @tailbits space begins
3152  *
3153  *	Make sure that the data buffers attached to a socket buffer are
3154  *	writable. If they are not, private copies are made of the data buffers
3155  *	and the socket buffer is set to use these instead.
3156  *
3157  *	If @tailbits is given, make sure that there is space to write @tailbits
3158  *	bytes of data beyond current end of socket buffer.  @trailer will be
3159  *	set to point to the skb in which this space begins.
3160  *
3161  *	The number of scatterlist elements required to completely map the
3162  *	COW'd and extended socket buffer will be returned.
3163  */
3164 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3165 {
3166 	int copyflag;
3167 	int elt;
3168 	struct sk_buff *skb1, **skb_p;
3169 
3170 	/* If skb is cloned or its head is paged, reallocate
3171 	 * head pulling out all the pages (pages are considered not writable
3172 	 * at the moment even if they are anonymous).
3173 	 */
3174 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3175 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3176 		return -ENOMEM;
3177 
3178 	/* Easy case. Most of packets will go this way. */
3179 	if (!skb_has_frag_list(skb)) {
3180 		/* A little of trouble, not enough of space for trailer.
3181 		 * This should not happen, when stack is tuned to generate
3182 		 * good frames. OK, on miss we reallocate and reserve even more
3183 		 * space, 128 bytes is fair. */
3184 
3185 		if (skb_tailroom(skb) < tailbits &&
3186 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3187 			return -ENOMEM;
3188 
3189 		/* Voila! */
3190 		*trailer = skb;
3191 		return 1;
3192 	}
3193 
3194 	/* Misery. We are in troubles, going to mincer fragments... */
3195 
3196 	elt = 1;
3197 	skb_p = &skb_shinfo(skb)->frag_list;
3198 	copyflag = 0;
3199 
3200 	while ((skb1 = *skb_p) != NULL) {
3201 		int ntail = 0;
3202 
3203 		/* The fragment is partially pulled by someone,
3204 		 * this can happen on input. Copy it and everything
3205 		 * after it. */
3206 
3207 		if (skb_shared(skb1))
3208 			copyflag = 1;
3209 
3210 		/* If the skb is the last, worry about trailer. */
3211 
3212 		if (skb1->next == NULL && tailbits) {
3213 			if (skb_shinfo(skb1)->nr_frags ||
3214 			    skb_has_frag_list(skb1) ||
3215 			    skb_tailroom(skb1) < tailbits)
3216 				ntail = tailbits + 128;
3217 		}
3218 
3219 		if (copyflag ||
3220 		    skb_cloned(skb1) ||
3221 		    ntail ||
3222 		    skb_shinfo(skb1)->nr_frags ||
3223 		    skb_has_frag_list(skb1)) {
3224 			struct sk_buff *skb2;
3225 
3226 			/* Fuck, we are miserable poor guys... */
3227 			if (ntail == 0)
3228 				skb2 = skb_copy(skb1, GFP_ATOMIC);
3229 			else
3230 				skb2 = skb_copy_expand(skb1,
3231 						       skb_headroom(skb1),
3232 						       ntail,
3233 						       GFP_ATOMIC);
3234 			if (unlikely(skb2 == NULL))
3235 				return -ENOMEM;
3236 
3237 			if (skb1->sk)
3238 				skb_set_owner_w(skb2, skb1->sk);
3239 
3240 			/* Looking around. Are we still alive?
3241 			 * OK, link new skb, drop old one */
3242 
3243 			skb2->next = skb1->next;
3244 			*skb_p = skb2;
3245 			kfree_skb(skb1);
3246 			skb1 = skb2;
3247 		}
3248 		elt++;
3249 		*trailer = skb1;
3250 		skb_p = &skb1->next;
3251 	}
3252 
3253 	return elt;
3254 }
3255 EXPORT_SYMBOL_GPL(skb_cow_data);
3256 
3257 static void sock_rmem_free(struct sk_buff *skb)
3258 {
3259 	struct sock *sk = skb->sk;
3260 
3261 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3262 }
3263 
3264 /*
3265  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3266  */
3267 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3268 {
3269 	int len = skb->len;
3270 
3271 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3272 	    (unsigned int)sk->sk_rcvbuf)
3273 		return -ENOMEM;
3274 
3275 	skb_orphan(skb);
3276 	skb->sk = sk;
3277 	skb->destructor = sock_rmem_free;
3278 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3279 
3280 	/* before exiting rcu section, make sure dst is refcounted */
3281 	skb_dst_force(skb);
3282 
3283 	skb_queue_tail(&sk->sk_error_queue, skb);
3284 	if (!sock_flag(sk, SOCK_DEAD))
3285 		sk->sk_data_ready(sk, len);
3286 	return 0;
3287 }
3288 EXPORT_SYMBOL(sock_queue_err_skb);
3289 
3290 void skb_tstamp_tx(struct sk_buff *orig_skb,
3291 		struct skb_shared_hwtstamps *hwtstamps)
3292 {
3293 	struct sock *sk = orig_skb->sk;
3294 	struct sock_exterr_skb *serr;
3295 	struct sk_buff *skb;
3296 	int err;
3297 
3298 	if (!sk)
3299 		return;
3300 
3301 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3302 	if (!skb)
3303 		return;
3304 
3305 	if (hwtstamps) {
3306 		*skb_hwtstamps(skb) =
3307 			*hwtstamps;
3308 	} else {
3309 		/*
3310 		 * no hardware time stamps available,
3311 		 * so keep the shared tx_flags and only
3312 		 * store software time stamp
3313 		 */
3314 		skb->tstamp = ktime_get_real();
3315 	}
3316 
3317 	serr = SKB_EXT_ERR(skb);
3318 	memset(serr, 0, sizeof(*serr));
3319 	serr->ee.ee_errno = ENOMSG;
3320 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3321 
3322 	err = sock_queue_err_skb(sk, skb);
3323 
3324 	if (err)
3325 		kfree_skb(skb);
3326 }
3327 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3328 
3329 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3330 {
3331 	struct sock *sk = skb->sk;
3332 	struct sock_exterr_skb *serr;
3333 	int err;
3334 
3335 	skb->wifi_acked_valid = 1;
3336 	skb->wifi_acked = acked;
3337 
3338 	serr = SKB_EXT_ERR(skb);
3339 	memset(serr, 0, sizeof(*serr));
3340 	serr->ee.ee_errno = ENOMSG;
3341 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3342 
3343 	err = sock_queue_err_skb(sk, skb);
3344 	if (err)
3345 		kfree_skb(skb);
3346 }
3347 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3348 
3349 
3350 /**
3351  * skb_partial_csum_set - set up and verify partial csum values for packet
3352  * @skb: the skb to set
3353  * @start: the number of bytes after skb->data to start checksumming.
3354  * @off: the offset from start to place the checksum.
3355  *
3356  * For untrusted partially-checksummed packets, we need to make sure the values
3357  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3358  *
3359  * This function checks and sets those values and skb->ip_summed: if this
3360  * returns false you should drop the packet.
3361  */
3362 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3363 {
3364 	if (unlikely(start > skb_headlen(skb)) ||
3365 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3366 		net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3367 				     start, off, skb_headlen(skb));
3368 		return false;
3369 	}
3370 	skb->ip_summed = CHECKSUM_PARTIAL;
3371 	skb->csum_start = skb_headroom(skb) + start;
3372 	skb->csum_offset = off;
3373 	skb_set_transport_header(skb, start);
3374 	return true;
3375 }
3376 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3377 
3378 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3379 {
3380 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
3381 			     skb->dev->name);
3382 }
3383 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3384 
3385 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3386 {
3387 	if (head_stolen) {
3388 		skb_release_head_state(skb);
3389 		kmem_cache_free(skbuff_head_cache, skb);
3390 	} else {
3391 		__kfree_skb(skb);
3392 	}
3393 }
3394 EXPORT_SYMBOL(kfree_skb_partial);
3395 
3396 /**
3397  * skb_try_coalesce - try to merge skb to prior one
3398  * @to: prior buffer
3399  * @from: buffer to add
3400  * @fragstolen: pointer to boolean
3401  * @delta_truesize: how much more was allocated than was requested
3402  */
3403 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3404 		      bool *fragstolen, int *delta_truesize)
3405 {
3406 	int i, delta, len = from->len;
3407 
3408 	*fragstolen = false;
3409 
3410 	if (skb_cloned(to))
3411 		return false;
3412 
3413 	if (len <= skb_tailroom(to)) {
3414 		BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3415 		*delta_truesize = 0;
3416 		return true;
3417 	}
3418 
3419 	if (skb_has_frag_list(to) || skb_has_frag_list(from))
3420 		return false;
3421 
3422 	if (skb_headlen(from) != 0) {
3423 		struct page *page;
3424 		unsigned int offset;
3425 
3426 		if (skb_shinfo(to)->nr_frags +
3427 		    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3428 			return false;
3429 
3430 		if (skb_head_is_locked(from))
3431 			return false;
3432 
3433 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3434 
3435 		page = virt_to_head_page(from->head);
3436 		offset = from->data - (unsigned char *)page_address(page);
3437 
3438 		skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3439 				   page, offset, skb_headlen(from));
3440 		*fragstolen = true;
3441 	} else {
3442 		if (skb_shinfo(to)->nr_frags +
3443 		    skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3444 			return false;
3445 
3446 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
3447 	}
3448 
3449 	WARN_ON_ONCE(delta < len);
3450 
3451 	memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3452 	       skb_shinfo(from)->frags,
3453 	       skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3454 	skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3455 
3456 	if (!skb_cloned(from))
3457 		skb_shinfo(from)->nr_frags = 0;
3458 
3459 	/* if the skb is not cloned this does nothing
3460 	 * since we set nr_frags to 0.
3461 	 */
3462 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3463 		skb_frag_ref(from, i);
3464 
3465 	to->truesize += delta;
3466 	to->len += len;
3467 	to->data_len += len;
3468 
3469 	*delta_truesize = delta;
3470 	return true;
3471 }
3472 EXPORT_SYMBOL(skb_try_coalesce);
3473