xref: /linux/net/core/skbuff.c (revision ec2212088c42ff7d1362629ec26dda4f3e8bdad3)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/mm.h>
44 #include <linux/interrupt.h>
45 #include <linux/in.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 #include <linux/prefetch.h>
61 
62 #include <net/protocol.h>
63 #include <net/dst.h>
64 #include <net/sock.h>
65 #include <net/checksum.h>
66 #include <net/xfrm.h>
67 
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 #include <trace/events/skb.h>
71 
72 #include "kmap_skb.h"
73 
74 static struct kmem_cache *skbuff_head_cache __read_mostly;
75 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
76 
77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
78 				  struct pipe_buffer *buf)
79 {
80 	put_page(buf->page);
81 }
82 
83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
84 				struct pipe_buffer *buf)
85 {
86 	get_page(buf->page);
87 }
88 
89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
90 			       struct pipe_buffer *buf)
91 {
92 	return 1;
93 }
94 
95 
96 /* Pipe buffer operations for a socket. */
97 static const struct pipe_buf_operations sock_pipe_buf_ops = {
98 	.can_merge = 0,
99 	.map = generic_pipe_buf_map,
100 	.unmap = generic_pipe_buf_unmap,
101 	.confirm = generic_pipe_buf_confirm,
102 	.release = sock_pipe_buf_release,
103 	.steal = sock_pipe_buf_steal,
104 	.get = sock_pipe_buf_get,
105 };
106 
107 /*
108  *	Keep out-of-line to prevent kernel bloat.
109  *	__builtin_return_address is not used because it is not always
110  *	reliable.
111  */
112 
113 /**
114  *	skb_over_panic	- 	private function
115  *	@skb: buffer
116  *	@sz: size
117  *	@here: address
118  *
119  *	Out of line support code for skb_put(). Not user callable.
120  */
121 static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
122 {
123 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
124 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
125 	       here, skb->len, sz, skb->head, skb->data,
126 	       (unsigned long)skb->tail, (unsigned long)skb->end,
127 	       skb->dev ? skb->dev->name : "<NULL>");
128 	BUG();
129 }
130 
131 /**
132  *	skb_under_panic	- 	private function
133  *	@skb: buffer
134  *	@sz: size
135  *	@here: address
136  *
137  *	Out of line support code for skb_push(). Not user callable.
138  */
139 
140 static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
141 {
142 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
143 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
144 	       here, skb->len, sz, skb->head, skb->data,
145 	       (unsigned long)skb->tail, (unsigned long)skb->end,
146 	       skb->dev ? skb->dev->name : "<NULL>");
147 	BUG();
148 }
149 
150 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
151  *	'private' fields and also do memory statistics to find all the
152  *	[BEEP] leaks.
153  *
154  */
155 
156 /**
157  *	__alloc_skb	-	allocate a network buffer
158  *	@size: size to allocate
159  *	@gfp_mask: allocation mask
160  *	@fclone: allocate from fclone cache instead of head cache
161  *		and allocate a cloned (child) skb
162  *	@node: numa node to allocate memory on
163  *
164  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
165  *	tail room of size bytes. The object has a reference count of one.
166  *	The return is the buffer. On a failure the return is %NULL.
167  *
168  *	Buffers may only be allocated from interrupts using a @gfp_mask of
169  *	%GFP_ATOMIC.
170  */
171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
172 			    int fclone, int node)
173 {
174 	struct kmem_cache *cache;
175 	struct skb_shared_info *shinfo;
176 	struct sk_buff *skb;
177 	u8 *data;
178 
179 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
180 
181 	/* Get the HEAD */
182 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
183 	if (!skb)
184 		goto out;
185 	prefetchw(skb);
186 
187 	/* We do our best to align skb_shared_info on a separate cache
188 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 	 * Both skb->head and skb_shared_info are cache line aligned.
191 	 */
192 	size = SKB_DATA_ALIGN(size);
193 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
194 	data = kmalloc_node_track_caller(size, gfp_mask, node);
195 	if (!data)
196 		goto nodata;
197 	/* kmalloc(size) might give us more room than requested.
198 	 * Put skb_shared_info exactly at the end of allocated zone,
199 	 * to allow max possible filling before reallocation.
200 	 */
201 	size = SKB_WITH_OVERHEAD(ksize(data));
202 	prefetchw(data + size);
203 
204 	/*
205 	 * Only clear those fields we need to clear, not those that we will
206 	 * actually initialise below. Hence, don't put any more fields after
207 	 * the tail pointer in struct sk_buff!
208 	 */
209 	memset(skb, 0, offsetof(struct sk_buff, tail));
210 	/* Account for allocated memory : skb + skb->head */
211 	skb->truesize = SKB_TRUESIZE(size);
212 	atomic_set(&skb->users, 1);
213 	skb->head = data;
214 	skb->data = data;
215 	skb_reset_tail_pointer(skb);
216 	skb->end = skb->tail + size;
217 #ifdef NET_SKBUFF_DATA_USES_OFFSET
218 	skb->mac_header = ~0U;
219 #endif
220 
221 	/* make sure we initialize shinfo sequentially */
222 	shinfo = skb_shinfo(skb);
223 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
224 	atomic_set(&shinfo->dataref, 1);
225 	kmemcheck_annotate_variable(shinfo->destructor_arg);
226 
227 	if (fclone) {
228 		struct sk_buff *child = skb + 1;
229 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
230 
231 		kmemcheck_annotate_bitfield(child, flags1);
232 		kmemcheck_annotate_bitfield(child, flags2);
233 		skb->fclone = SKB_FCLONE_ORIG;
234 		atomic_set(fclone_ref, 1);
235 
236 		child->fclone = SKB_FCLONE_UNAVAILABLE;
237 	}
238 out:
239 	return skb;
240 nodata:
241 	kmem_cache_free(cache, skb);
242 	skb = NULL;
243 	goto out;
244 }
245 EXPORT_SYMBOL(__alloc_skb);
246 
247 /**
248  * build_skb - build a network buffer
249  * @data: data buffer provided by caller
250  *
251  * Allocate a new &sk_buff. Caller provides space holding head and
252  * skb_shared_info. @data must have been allocated by kmalloc()
253  * The return is the new skb buffer.
254  * On a failure the return is %NULL, and @data is not freed.
255  * Notes :
256  *  Before IO, driver allocates only data buffer where NIC put incoming frame
257  *  Driver should add room at head (NET_SKB_PAD) and
258  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
259  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
260  *  before giving packet to stack.
261  *  RX rings only contains data buffers, not full skbs.
262  */
263 struct sk_buff *build_skb(void *data)
264 {
265 	struct skb_shared_info *shinfo;
266 	struct sk_buff *skb;
267 	unsigned int size;
268 
269 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
270 	if (!skb)
271 		return NULL;
272 
273 	size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
274 
275 	memset(skb, 0, offsetof(struct sk_buff, tail));
276 	skb->truesize = SKB_TRUESIZE(size);
277 	atomic_set(&skb->users, 1);
278 	skb->head = data;
279 	skb->data = data;
280 	skb_reset_tail_pointer(skb);
281 	skb->end = skb->tail + size;
282 #ifdef NET_SKBUFF_DATA_USES_OFFSET
283 	skb->mac_header = ~0U;
284 #endif
285 
286 	/* make sure we initialize shinfo sequentially */
287 	shinfo = skb_shinfo(skb);
288 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
289 	atomic_set(&shinfo->dataref, 1);
290 	kmemcheck_annotate_variable(shinfo->destructor_arg);
291 
292 	return skb;
293 }
294 EXPORT_SYMBOL(build_skb);
295 
296 /**
297  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
298  *	@dev: network device to receive on
299  *	@length: length to allocate
300  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
301  *
302  *	Allocate a new &sk_buff and assign it a usage count of one. The
303  *	buffer has unspecified headroom built in. Users should allocate
304  *	the headroom they think they need without accounting for the
305  *	built in space. The built in space is used for optimisations.
306  *
307  *	%NULL is returned if there is no free memory.
308  */
309 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
310 		unsigned int length, gfp_t gfp_mask)
311 {
312 	struct sk_buff *skb;
313 
314 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
315 	if (likely(skb)) {
316 		skb_reserve(skb, NET_SKB_PAD);
317 		skb->dev = dev;
318 	}
319 	return skb;
320 }
321 EXPORT_SYMBOL(__netdev_alloc_skb);
322 
323 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
324 		int size)
325 {
326 	skb_fill_page_desc(skb, i, page, off, size);
327 	skb->len += size;
328 	skb->data_len += size;
329 	skb->truesize += size;
330 }
331 EXPORT_SYMBOL(skb_add_rx_frag);
332 
333 /**
334  *	dev_alloc_skb - allocate an skbuff for receiving
335  *	@length: length to allocate
336  *
337  *	Allocate a new &sk_buff and assign it a usage count of one. The
338  *	buffer has unspecified headroom built in. Users should allocate
339  *	the headroom they think they need without accounting for the
340  *	built in space. The built in space is used for optimisations.
341  *
342  *	%NULL is returned if there is no free memory. Although this function
343  *	allocates memory it can be called from an interrupt.
344  */
345 struct sk_buff *dev_alloc_skb(unsigned int length)
346 {
347 	/*
348 	 * There is more code here than it seems:
349 	 * __dev_alloc_skb is an inline
350 	 */
351 	return __dev_alloc_skb(length, GFP_ATOMIC);
352 }
353 EXPORT_SYMBOL(dev_alloc_skb);
354 
355 static void skb_drop_list(struct sk_buff **listp)
356 {
357 	struct sk_buff *list = *listp;
358 
359 	*listp = NULL;
360 
361 	do {
362 		struct sk_buff *this = list;
363 		list = list->next;
364 		kfree_skb(this);
365 	} while (list);
366 }
367 
368 static inline void skb_drop_fraglist(struct sk_buff *skb)
369 {
370 	skb_drop_list(&skb_shinfo(skb)->frag_list);
371 }
372 
373 static void skb_clone_fraglist(struct sk_buff *skb)
374 {
375 	struct sk_buff *list;
376 
377 	skb_walk_frags(skb, list)
378 		skb_get(list);
379 }
380 
381 static void skb_release_data(struct sk_buff *skb)
382 {
383 	if (!skb->cloned ||
384 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
385 			       &skb_shinfo(skb)->dataref)) {
386 		if (skb_shinfo(skb)->nr_frags) {
387 			int i;
388 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
389 				skb_frag_unref(skb, i);
390 		}
391 
392 		/*
393 		 * If skb buf is from userspace, we need to notify the caller
394 		 * the lower device DMA has done;
395 		 */
396 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
397 			struct ubuf_info *uarg;
398 
399 			uarg = skb_shinfo(skb)->destructor_arg;
400 			if (uarg->callback)
401 				uarg->callback(uarg);
402 		}
403 
404 		if (skb_has_frag_list(skb))
405 			skb_drop_fraglist(skb);
406 
407 		kfree(skb->head);
408 	}
409 }
410 
411 /*
412  *	Free an skbuff by memory without cleaning the state.
413  */
414 static void kfree_skbmem(struct sk_buff *skb)
415 {
416 	struct sk_buff *other;
417 	atomic_t *fclone_ref;
418 
419 	switch (skb->fclone) {
420 	case SKB_FCLONE_UNAVAILABLE:
421 		kmem_cache_free(skbuff_head_cache, skb);
422 		break;
423 
424 	case SKB_FCLONE_ORIG:
425 		fclone_ref = (atomic_t *) (skb + 2);
426 		if (atomic_dec_and_test(fclone_ref))
427 			kmem_cache_free(skbuff_fclone_cache, skb);
428 		break;
429 
430 	case SKB_FCLONE_CLONE:
431 		fclone_ref = (atomic_t *) (skb + 1);
432 		other = skb - 1;
433 
434 		/* The clone portion is available for
435 		 * fast-cloning again.
436 		 */
437 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
438 
439 		if (atomic_dec_and_test(fclone_ref))
440 			kmem_cache_free(skbuff_fclone_cache, other);
441 		break;
442 	}
443 }
444 
445 static void skb_release_head_state(struct sk_buff *skb)
446 {
447 	skb_dst_drop(skb);
448 #ifdef CONFIG_XFRM
449 	secpath_put(skb->sp);
450 #endif
451 	if (skb->destructor) {
452 		WARN_ON(in_irq());
453 		skb->destructor(skb);
454 	}
455 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
456 	nf_conntrack_put(skb->nfct);
457 #endif
458 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
459 	nf_conntrack_put_reasm(skb->nfct_reasm);
460 #endif
461 #ifdef CONFIG_BRIDGE_NETFILTER
462 	nf_bridge_put(skb->nf_bridge);
463 #endif
464 /* XXX: IS this still necessary? - JHS */
465 #ifdef CONFIG_NET_SCHED
466 	skb->tc_index = 0;
467 #ifdef CONFIG_NET_CLS_ACT
468 	skb->tc_verd = 0;
469 #endif
470 #endif
471 }
472 
473 /* Free everything but the sk_buff shell. */
474 static void skb_release_all(struct sk_buff *skb)
475 {
476 	skb_release_head_state(skb);
477 	skb_release_data(skb);
478 }
479 
480 /**
481  *	__kfree_skb - private function
482  *	@skb: buffer
483  *
484  *	Free an sk_buff. Release anything attached to the buffer.
485  *	Clean the state. This is an internal helper function. Users should
486  *	always call kfree_skb
487  */
488 
489 void __kfree_skb(struct sk_buff *skb)
490 {
491 	skb_release_all(skb);
492 	kfree_skbmem(skb);
493 }
494 EXPORT_SYMBOL(__kfree_skb);
495 
496 /**
497  *	kfree_skb - free an sk_buff
498  *	@skb: buffer to free
499  *
500  *	Drop a reference to the buffer and free it if the usage count has
501  *	hit zero.
502  */
503 void kfree_skb(struct sk_buff *skb)
504 {
505 	if (unlikely(!skb))
506 		return;
507 	if (likely(atomic_read(&skb->users) == 1))
508 		smp_rmb();
509 	else if (likely(!atomic_dec_and_test(&skb->users)))
510 		return;
511 	trace_kfree_skb(skb, __builtin_return_address(0));
512 	__kfree_skb(skb);
513 }
514 EXPORT_SYMBOL(kfree_skb);
515 
516 /**
517  *	consume_skb - free an skbuff
518  *	@skb: buffer to free
519  *
520  *	Drop a ref to the buffer and free it if the usage count has hit zero
521  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
522  *	is being dropped after a failure and notes that
523  */
524 void consume_skb(struct sk_buff *skb)
525 {
526 	if (unlikely(!skb))
527 		return;
528 	if (likely(atomic_read(&skb->users) == 1))
529 		smp_rmb();
530 	else if (likely(!atomic_dec_and_test(&skb->users)))
531 		return;
532 	trace_consume_skb(skb);
533 	__kfree_skb(skb);
534 }
535 EXPORT_SYMBOL(consume_skb);
536 
537 /**
538  * 	skb_recycle - clean up an skb for reuse
539  * 	@skb: buffer
540  *
541  * 	Recycles the skb to be reused as a receive buffer. This
542  * 	function does any necessary reference count dropping, and
543  * 	cleans up the skbuff as if it just came from __alloc_skb().
544  */
545 void skb_recycle(struct sk_buff *skb)
546 {
547 	struct skb_shared_info *shinfo;
548 
549 	skb_release_head_state(skb);
550 
551 	shinfo = skb_shinfo(skb);
552 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
553 	atomic_set(&shinfo->dataref, 1);
554 
555 	memset(skb, 0, offsetof(struct sk_buff, tail));
556 	skb->data = skb->head + NET_SKB_PAD;
557 	skb_reset_tail_pointer(skb);
558 }
559 EXPORT_SYMBOL(skb_recycle);
560 
561 /**
562  *	skb_recycle_check - check if skb can be reused for receive
563  *	@skb: buffer
564  *	@skb_size: minimum receive buffer size
565  *
566  *	Checks that the skb passed in is not shared or cloned, and
567  *	that it is linear and its head portion at least as large as
568  *	skb_size so that it can be recycled as a receive buffer.
569  *	If these conditions are met, this function does any necessary
570  *	reference count dropping and cleans up the skbuff as if it
571  *	just came from __alloc_skb().
572  */
573 bool skb_recycle_check(struct sk_buff *skb, int skb_size)
574 {
575 	if (!skb_is_recycleable(skb, skb_size))
576 		return false;
577 
578 	skb_recycle(skb);
579 
580 	return true;
581 }
582 EXPORT_SYMBOL(skb_recycle_check);
583 
584 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
585 {
586 	new->tstamp		= old->tstamp;
587 	new->dev		= old->dev;
588 	new->transport_header	= old->transport_header;
589 	new->network_header	= old->network_header;
590 	new->mac_header		= old->mac_header;
591 	skb_dst_copy(new, old);
592 	new->rxhash		= old->rxhash;
593 	new->ooo_okay		= old->ooo_okay;
594 	new->l4_rxhash		= old->l4_rxhash;
595 	new->no_fcs		= old->no_fcs;
596 #ifdef CONFIG_XFRM
597 	new->sp			= secpath_get(old->sp);
598 #endif
599 	memcpy(new->cb, old->cb, sizeof(old->cb));
600 	new->csum		= old->csum;
601 	new->local_df		= old->local_df;
602 	new->pkt_type		= old->pkt_type;
603 	new->ip_summed		= old->ip_summed;
604 	skb_copy_queue_mapping(new, old);
605 	new->priority		= old->priority;
606 #if IS_ENABLED(CONFIG_IP_VS)
607 	new->ipvs_property	= old->ipvs_property;
608 #endif
609 	new->protocol		= old->protocol;
610 	new->mark		= old->mark;
611 	new->skb_iif		= old->skb_iif;
612 	__nf_copy(new, old);
613 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
614 	new->nf_trace		= old->nf_trace;
615 #endif
616 #ifdef CONFIG_NET_SCHED
617 	new->tc_index		= old->tc_index;
618 #ifdef CONFIG_NET_CLS_ACT
619 	new->tc_verd		= old->tc_verd;
620 #endif
621 #endif
622 	new->vlan_tci		= old->vlan_tci;
623 
624 	skb_copy_secmark(new, old);
625 }
626 
627 /*
628  * You should not add any new code to this function.  Add it to
629  * __copy_skb_header above instead.
630  */
631 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
632 {
633 #define C(x) n->x = skb->x
634 
635 	n->next = n->prev = NULL;
636 	n->sk = NULL;
637 	__copy_skb_header(n, skb);
638 
639 	C(len);
640 	C(data_len);
641 	C(mac_len);
642 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
643 	n->cloned = 1;
644 	n->nohdr = 0;
645 	n->destructor = NULL;
646 	C(tail);
647 	C(end);
648 	C(head);
649 	C(data);
650 	C(truesize);
651 	atomic_set(&n->users, 1);
652 
653 	atomic_inc(&(skb_shinfo(skb)->dataref));
654 	skb->cloned = 1;
655 
656 	return n;
657 #undef C
658 }
659 
660 /**
661  *	skb_morph	-	morph one skb into another
662  *	@dst: the skb to receive the contents
663  *	@src: the skb to supply the contents
664  *
665  *	This is identical to skb_clone except that the target skb is
666  *	supplied by the user.
667  *
668  *	The target skb is returned upon exit.
669  */
670 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
671 {
672 	skb_release_all(dst);
673 	return __skb_clone(dst, src);
674 }
675 EXPORT_SYMBOL_GPL(skb_morph);
676 
677 /*	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
678  *	@skb: the skb to modify
679  *	@gfp_mask: allocation priority
680  *
681  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
682  *	It will copy all frags into kernel and drop the reference
683  *	to userspace pages.
684  *
685  *	If this function is called from an interrupt gfp_mask() must be
686  *	%GFP_ATOMIC.
687  *
688  *	Returns 0 on success or a negative error code on failure
689  *	to allocate kernel memory to copy to.
690  */
691 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
692 {
693 	int i;
694 	int num_frags = skb_shinfo(skb)->nr_frags;
695 	struct page *page, *head = NULL;
696 	struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
697 
698 	for (i = 0; i < num_frags; i++) {
699 		u8 *vaddr;
700 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
701 
702 		page = alloc_page(GFP_ATOMIC);
703 		if (!page) {
704 			while (head) {
705 				struct page *next = (struct page *)head->private;
706 				put_page(head);
707 				head = next;
708 			}
709 			return -ENOMEM;
710 		}
711 		vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
712 		memcpy(page_address(page),
713 		       vaddr + f->page_offset, skb_frag_size(f));
714 		kunmap_skb_frag(vaddr);
715 		page->private = (unsigned long)head;
716 		head = page;
717 	}
718 
719 	/* skb frags release userspace buffers */
720 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
721 		skb_frag_unref(skb, i);
722 
723 	uarg->callback(uarg);
724 
725 	/* skb frags point to kernel buffers */
726 	for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
727 		__skb_fill_page_desc(skb, i-1, head, 0,
728 				     skb_shinfo(skb)->frags[i - 1].size);
729 		head = (struct page *)head->private;
730 	}
731 
732 	skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
733 	return 0;
734 }
735 
736 
737 /**
738  *	skb_clone	-	duplicate an sk_buff
739  *	@skb: buffer to clone
740  *	@gfp_mask: allocation priority
741  *
742  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
743  *	copies share the same packet data but not structure. The new
744  *	buffer has a reference count of 1. If the allocation fails the
745  *	function returns %NULL otherwise the new buffer is returned.
746  *
747  *	If this function is called from an interrupt gfp_mask() must be
748  *	%GFP_ATOMIC.
749  */
750 
751 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
752 {
753 	struct sk_buff *n;
754 
755 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
756 		if (skb_copy_ubufs(skb, gfp_mask))
757 			return NULL;
758 	}
759 
760 	n = skb + 1;
761 	if (skb->fclone == SKB_FCLONE_ORIG &&
762 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
763 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
764 		n->fclone = SKB_FCLONE_CLONE;
765 		atomic_inc(fclone_ref);
766 	} else {
767 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
768 		if (!n)
769 			return NULL;
770 
771 		kmemcheck_annotate_bitfield(n, flags1);
772 		kmemcheck_annotate_bitfield(n, flags2);
773 		n->fclone = SKB_FCLONE_UNAVAILABLE;
774 	}
775 
776 	return __skb_clone(n, skb);
777 }
778 EXPORT_SYMBOL(skb_clone);
779 
780 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
781 {
782 #ifndef NET_SKBUFF_DATA_USES_OFFSET
783 	/*
784 	 *	Shift between the two data areas in bytes
785 	 */
786 	unsigned long offset = new->data - old->data;
787 #endif
788 
789 	__copy_skb_header(new, old);
790 
791 #ifndef NET_SKBUFF_DATA_USES_OFFSET
792 	/* {transport,network,mac}_header are relative to skb->head */
793 	new->transport_header += offset;
794 	new->network_header   += offset;
795 	if (skb_mac_header_was_set(new))
796 		new->mac_header	      += offset;
797 #endif
798 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
799 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
800 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
801 }
802 
803 /**
804  *	skb_copy	-	create private copy of an sk_buff
805  *	@skb: buffer to copy
806  *	@gfp_mask: allocation priority
807  *
808  *	Make a copy of both an &sk_buff and its data. This is used when the
809  *	caller wishes to modify the data and needs a private copy of the
810  *	data to alter. Returns %NULL on failure or the pointer to the buffer
811  *	on success. The returned buffer has a reference count of 1.
812  *
813  *	As by-product this function converts non-linear &sk_buff to linear
814  *	one, so that &sk_buff becomes completely private and caller is allowed
815  *	to modify all the data of returned buffer. This means that this
816  *	function is not recommended for use in circumstances when only
817  *	header is going to be modified. Use pskb_copy() instead.
818  */
819 
820 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
821 {
822 	int headerlen = skb_headroom(skb);
823 	unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
824 	struct sk_buff *n = alloc_skb(size, gfp_mask);
825 
826 	if (!n)
827 		return NULL;
828 
829 	/* Set the data pointer */
830 	skb_reserve(n, headerlen);
831 	/* Set the tail pointer and length */
832 	skb_put(n, skb->len);
833 
834 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
835 		BUG();
836 
837 	copy_skb_header(n, skb);
838 	return n;
839 }
840 EXPORT_SYMBOL(skb_copy);
841 
842 /**
843  *	__pskb_copy	-	create copy of an sk_buff with private head.
844  *	@skb: buffer to copy
845  *	@headroom: headroom of new skb
846  *	@gfp_mask: allocation priority
847  *
848  *	Make a copy of both an &sk_buff and part of its data, located
849  *	in header. Fragmented data remain shared. This is used when
850  *	the caller wishes to modify only header of &sk_buff and needs
851  *	private copy of the header to alter. Returns %NULL on failure
852  *	or the pointer to the buffer on success.
853  *	The returned buffer has a reference count of 1.
854  */
855 
856 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
857 {
858 	unsigned int size = skb_headlen(skb) + headroom;
859 	struct sk_buff *n = alloc_skb(size, gfp_mask);
860 
861 	if (!n)
862 		goto out;
863 
864 	/* Set the data pointer */
865 	skb_reserve(n, headroom);
866 	/* Set the tail pointer and length */
867 	skb_put(n, skb_headlen(skb));
868 	/* Copy the bytes */
869 	skb_copy_from_linear_data(skb, n->data, n->len);
870 
871 	n->truesize += skb->data_len;
872 	n->data_len  = skb->data_len;
873 	n->len	     = skb->len;
874 
875 	if (skb_shinfo(skb)->nr_frags) {
876 		int i;
877 
878 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
879 			if (skb_copy_ubufs(skb, gfp_mask)) {
880 				kfree_skb(n);
881 				n = NULL;
882 				goto out;
883 			}
884 		}
885 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
886 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
887 			skb_frag_ref(skb, i);
888 		}
889 		skb_shinfo(n)->nr_frags = i;
890 	}
891 
892 	if (skb_has_frag_list(skb)) {
893 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
894 		skb_clone_fraglist(n);
895 	}
896 
897 	copy_skb_header(n, skb);
898 out:
899 	return n;
900 }
901 EXPORT_SYMBOL(__pskb_copy);
902 
903 /**
904  *	pskb_expand_head - reallocate header of &sk_buff
905  *	@skb: buffer to reallocate
906  *	@nhead: room to add at head
907  *	@ntail: room to add at tail
908  *	@gfp_mask: allocation priority
909  *
910  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
911  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
912  *	reference count of 1. Returns zero in the case of success or error,
913  *	if expansion failed. In the last case, &sk_buff is not changed.
914  *
915  *	All the pointers pointing into skb header may change and must be
916  *	reloaded after call to this function.
917  */
918 
919 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
920 		     gfp_t gfp_mask)
921 {
922 	int i;
923 	u8 *data;
924 	int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
925 	long off;
926 	bool fastpath;
927 
928 	BUG_ON(nhead < 0);
929 
930 	if (skb_shared(skb))
931 		BUG();
932 
933 	size = SKB_DATA_ALIGN(size);
934 
935 	/* Check if we can avoid taking references on fragments if we own
936 	 * the last reference on skb->head. (see skb_release_data())
937 	 */
938 	if (!skb->cloned)
939 		fastpath = true;
940 	else {
941 		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
942 		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
943 	}
944 
945 	if (fastpath &&
946 	    size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
947 		memmove(skb->head + size, skb_shinfo(skb),
948 			offsetof(struct skb_shared_info,
949 				 frags[skb_shinfo(skb)->nr_frags]));
950 		memmove(skb->head + nhead, skb->head,
951 			skb_tail_pointer(skb) - skb->head);
952 		off = nhead;
953 		goto adjust_others;
954 	}
955 
956 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
957 	if (!data)
958 		goto nodata;
959 
960 	/* Copy only real data... and, alas, header. This should be
961 	 * optimized for the cases when header is void.
962 	 */
963 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
964 
965 	memcpy((struct skb_shared_info *)(data + size),
966 	       skb_shinfo(skb),
967 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
968 
969 	if (fastpath) {
970 		kfree(skb->head);
971 	} else {
972 		/* copy this zero copy skb frags */
973 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
974 			if (skb_copy_ubufs(skb, gfp_mask))
975 				goto nofrags;
976 		}
977 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
978 			skb_frag_ref(skb, i);
979 
980 		if (skb_has_frag_list(skb))
981 			skb_clone_fraglist(skb);
982 
983 		skb_release_data(skb);
984 	}
985 	off = (data + nhead) - skb->head;
986 
987 	skb->head     = data;
988 adjust_others:
989 	skb->data    += off;
990 #ifdef NET_SKBUFF_DATA_USES_OFFSET
991 	skb->end      = size;
992 	off           = nhead;
993 #else
994 	skb->end      = skb->head + size;
995 #endif
996 	/* {transport,network,mac}_header and tail are relative to skb->head */
997 	skb->tail	      += off;
998 	skb->transport_header += off;
999 	skb->network_header   += off;
1000 	if (skb_mac_header_was_set(skb))
1001 		skb->mac_header += off;
1002 	/* Only adjust this if it actually is csum_start rather than csum */
1003 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1004 		skb->csum_start += nhead;
1005 	skb->cloned   = 0;
1006 	skb->hdr_len  = 0;
1007 	skb->nohdr    = 0;
1008 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1009 	return 0;
1010 
1011 nofrags:
1012 	kfree(data);
1013 nodata:
1014 	return -ENOMEM;
1015 }
1016 EXPORT_SYMBOL(pskb_expand_head);
1017 
1018 /* Make private copy of skb with writable head and some headroom */
1019 
1020 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1021 {
1022 	struct sk_buff *skb2;
1023 	int delta = headroom - skb_headroom(skb);
1024 
1025 	if (delta <= 0)
1026 		skb2 = pskb_copy(skb, GFP_ATOMIC);
1027 	else {
1028 		skb2 = skb_clone(skb, GFP_ATOMIC);
1029 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1030 					     GFP_ATOMIC)) {
1031 			kfree_skb(skb2);
1032 			skb2 = NULL;
1033 		}
1034 	}
1035 	return skb2;
1036 }
1037 EXPORT_SYMBOL(skb_realloc_headroom);
1038 
1039 /**
1040  *	skb_copy_expand	-	copy and expand sk_buff
1041  *	@skb: buffer to copy
1042  *	@newheadroom: new free bytes at head
1043  *	@newtailroom: new free bytes at tail
1044  *	@gfp_mask: allocation priority
1045  *
1046  *	Make a copy of both an &sk_buff and its data and while doing so
1047  *	allocate additional space.
1048  *
1049  *	This is used when the caller wishes to modify the data and needs a
1050  *	private copy of the data to alter as well as more space for new fields.
1051  *	Returns %NULL on failure or the pointer to the buffer
1052  *	on success. The returned buffer has a reference count of 1.
1053  *
1054  *	You must pass %GFP_ATOMIC as the allocation priority if this function
1055  *	is called from an interrupt.
1056  */
1057 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1058 				int newheadroom, int newtailroom,
1059 				gfp_t gfp_mask)
1060 {
1061 	/*
1062 	 *	Allocate the copy buffer
1063 	 */
1064 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
1065 				      gfp_mask);
1066 	int oldheadroom = skb_headroom(skb);
1067 	int head_copy_len, head_copy_off;
1068 	int off;
1069 
1070 	if (!n)
1071 		return NULL;
1072 
1073 	skb_reserve(n, newheadroom);
1074 
1075 	/* Set the tail pointer and length */
1076 	skb_put(n, skb->len);
1077 
1078 	head_copy_len = oldheadroom;
1079 	head_copy_off = 0;
1080 	if (newheadroom <= head_copy_len)
1081 		head_copy_len = newheadroom;
1082 	else
1083 		head_copy_off = newheadroom - head_copy_len;
1084 
1085 	/* Copy the linear header and data. */
1086 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1087 			  skb->len + head_copy_len))
1088 		BUG();
1089 
1090 	copy_skb_header(n, skb);
1091 
1092 	off                  = newheadroom - oldheadroom;
1093 	if (n->ip_summed == CHECKSUM_PARTIAL)
1094 		n->csum_start += off;
1095 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1096 	n->transport_header += off;
1097 	n->network_header   += off;
1098 	if (skb_mac_header_was_set(skb))
1099 		n->mac_header += off;
1100 #endif
1101 
1102 	return n;
1103 }
1104 EXPORT_SYMBOL(skb_copy_expand);
1105 
1106 /**
1107  *	skb_pad			-	zero pad the tail of an skb
1108  *	@skb: buffer to pad
1109  *	@pad: space to pad
1110  *
1111  *	Ensure that a buffer is followed by a padding area that is zero
1112  *	filled. Used by network drivers which may DMA or transfer data
1113  *	beyond the buffer end onto the wire.
1114  *
1115  *	May return error in out of memory cases. The skb is freed on error.
1116  */
1117 
1118 int skb_pad(struct sk_buff *skb, int pad)
1119 {
1120 	int err;
1121 	int ntail;
1122 
1123 	/* If the skbuff is non linear tailroom is always zero.. */
1124 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1125 		memset(skb->data+skb->len, 0, pad);
1126 		return 0;
1127 	}
1128 
1129 	ntail = skb->data_len + pad - (skb->end - skb->tail);
1130 	if (likely(skb_cloned(skb) || ntail > 0)) {
1131 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1132 		if (unlikely(err))
1133 			goto free_skb;
1134 	}
1135 
1136 	/* FIXME: The use of this function with non-linear skb's really needs
1137 	 * to be audited.
1138 	 */
1139 	err = skb_linearize(skb);
1140 	if (unlikely(err))
1141 		goto free_skb;
1142 
1143 	memset(skb->data + skb->len, 0, pad);
1144 	return 0;
1145 
1146 free_skb:
1147 	kfree_skb(skb);
1148 	return err;
1149 }
1150 EXPORT_SYMBOL(skb_pad);
1151 
1152 /**
1153  *	skb_put - add data to a buffer
1154  *	@skb: buffer to use
1155  *	@len: amount of data to add
1156  *
1157  *	This function extends the used data area of the buffer. If this would
1158  *	exceed the total buffer size the kernel will panic. A pointer to the
1159  *	first byte of the extra data is returned.
1160  */
1161 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1162 {
1163 	unsigned char *tmp = skb_tail_pointer(skb);
1164 	SKB_LINEAR_ASSERT(skb);
1165 	skb->tail += len;
1166 	skb->len  += len;
1167 	if (unlikely(skb->tail > skb->end))
1168 		skb_over_panic(skb, len, __builtin_return_address(0));
1169 	return tmp;
1170 }
1171 EXPORT_SYMBOL(skb_put);
1172 
1173 /**
1174  *	skb_push - add data to the start of a buffer
1175  *	@skb: buffer to use
1176  *	@len: amount of data to add
1177  *
1178  *	This function extends the used data area of the buffer at the buffer
1179  *	start. If this would exceed the total buffer headroom the kernel will
1180  *	panic. A pointer to the first byte of the extra data is returned.
1181  */
1182 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1183 {
1184 	skb->data -= len;
1185 	skb->len  += len;
1186 	if (unlikely(skb->data<skb->head))
1187 		skb_under_panic(skb, len, __builtin_return_address(0));
1188 	return skb->data;
1189 }
1190 EXPORT_SYMBOL(skb_push);
1191 
1192 /**
1193  *	skb_pull - remove data from the start of a buffer
1194  *	@skb: buffer to use
1195  *	@len: amount of data to remove
1196  *
1197  *	This function removes data from the start of a buffer, returning
1198  *	the memory to the headroom. A pointer to the next data in the buffer
1199  *	is returned. Once the data has been pulled future pushes will overwrite
1200  *	the old data.
1201  */
1202 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1203 {
1204 	return skb_pull_inline(skb, len);
1205 }
1206 EXPORT_SYMBOL(skb_pull);
1207 
1208 /**
1209  *	skb_trim - remove end from a buffer
1210  *	@skb: buffer to alter
1211  *	@len: new length
1212  *
1213  *	Cut the length of a buffer down by removing data from the tail. If
1214  *	the buffer is already under the length specified it is not modified.
1215  *	The skb must be linear.
1216  */
1217 void skb_trim(struct sk_buff *skb, unsigned int len)
1218 {
1219 	if (skb->len > len)
1220 		__skb_trim(skb, len);
1221 }
1222 EXPORT_SYMBOL(skb_trim);
1223 
1224 /* Trims skb to length len. It can change skb pointers.
1225  */
1226 
1227 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1228 {
1229 	struct sk_buff **fragp;
1230 	struct sk_buff *frag;
1231 	int offset = skb_headlen(skb);
1232 	int nfrags = skb_shinfo(skb)->nr_frags;
1233 	int i;
1234 	int err;
1235 
1236 	if (skb_cloned(skb) &&
1237 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1238 		return err;
1239 
1240 	i = 0;
1241 	if (offset >= len)
1242 		goto drop_pages;
1243 
1244 	for (; i < nfrags; i++) {
1245 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1246 
1247 		if (end < len) {
1248 			offset = end;
1249 			continue;
1250 		}
1251 
1252 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1253 
1254 drop_pages:
1255 		skb_shinfo(skb)->nr_frags = i;
1256 
1257 		for (; i < nfrags; i++)
1258 			skb_frag_unref(skb, i);
1259 
1260 		if (skb_has_frag_list(skb))
1261 			skb_drop_fraglist(skb);
1262 		goto done;
1263 	}
1264 
1265 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1266 	     fragp = &frag->next) {
1267 		int end = offset + frag->len;
1268 
1269 		if (skb_shared(frag)) {
1270 			struct sk_buff *nfrag;
1271 
1272 			nfrag = skb_clone(frag, GFP_ATOMIC);
1273 			if (unlikely(!nfrag))
1274 				return -ENOMEM;
1275 
1276 			nfrag->next = frag->next;
1277 			kfree_skb(frag);
1278 			frag = nfrag;
1279 			*fragp = frag;
1280 		}
1281 
1282 		if (end < len) {
1283 			offset = end;
1284 			continue;
1285 		}
1286 
1287 		if (end > len &&
1288 		    unlikely((err = pskb_trim(frag, len - offset))))
1289 			return err;
1290 
1291 		if (frag->next)
1292 			skb_drop_list(&frag->next);
1293 		break;
1294 	}
1295 
1296 done:
1297 	if (len > skb_headlen(skb)) {
1298 		skb->data_len -= skb->len - len;
1299 		skb->len       = len;
1300 	} else {
1301 		skb->len       = len;
1302 		skb->data_len  = 0;
1303 		skb_set_tail_pointer(skb, len);
1304 	}
1305 
1306 	return 0;
1307 }
1308 EXPORT_SYMBOL(___pskb_trim);
1309 
1310 /**
1311  *	__pskb_pull_tail - advance tail of skb header
1312  *	@skb: buffer to reallocate
1313  *	@delta: number of bytes to advance tail
1314  *
1315  *	The function makes a sense only on a fragmented &sk_buff,
1316  *	it expands header moving its tail forward and copying necessary
1317  *	data from fragmented part.
1318  *
1319  *	&sk_buff MUST have reference count of 1.
1320  *
1321  *	Returns %NULL (and &sk_buff does not change) if pull failed
1322  *	or value of new tail of skb in the case of success.
1323  *
1324  *	All the pointers pointing into skb header may change and must be
1325  *	reloaded after call to this function.
1326  */
1327 
1328 /* Moves tail of skb head forward, copying data from fragmented part,
1329  * when it is necessary.
1330  * 1. It may fail due to malloc failure.
1331  * 2. It may change skb pointers.
1332  *
1333  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1334  */
1335 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1336 {
1337 	/* If skb has not enough free space at tail, get new one
1338 	 * plus 128 bytes for future expansions. If we have enough
1339 	 * room at tail, reallocate without expansion only if skb is cloned.
1340 	 */
1341 	int i, k, eat = (skb->tail + delta) - skb->end;
1342 
1343 	if (eat > 0 || skb_cloned(skb)) {
1344 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1345 				     GFP_ATOMIC))
1346 			return NULL;
1347 	}
1348 
1349 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1350 		BUG();
1351 
1352 	/* Optimization: no fragments, no reasons to preestimate
1353 	 * size of pulled pages. Superb.
1354 	 */
1355 	if (!skb_has_frag_list(skb))
1356 		goto pull_pages;
1357 
1358 	/* Estimate size of pulled pages. */
1359 	eat = delta;
1360 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1361 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1362 
1363 		if (size >= eat)
1364 			goto pull_pages;
1365 		eat -= size;
1366 	}
1367 
1368 	/* If we need update frag list, we are in troubles.
1369 	 * Certainly, it possible to add an offset to skb data,
1370 	 * but taking into account that pulling is expected to
1371 	 * be very rare operation, it is worth to fight against
1372 	 * further bloating skb head and crucify ourselves here instead.
1373 	 * Pure masohism, indeed. 8)8)
1374 	 */
1375 	if (eat) {
1376 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1377 		struct sk_buff *clone = NULL;
1378 		struct sk_buff *insp = NULL;
1379 
1380 		do {
1381 			BUG_ON(!list);
1382 
1383 			if (list->len <= eat) {
1384 				/* Eaten as whole. */
1385 				eat -= list->len;
1386 				list = list->next;
1387 				insp = list;
1388 			} else {
1389 				/* Eaten partially. */
1390 
1391 				if (skb_shared(list)) {
1392 					/* Sucks! We need to fork list. :-( */
1393 					clone = skb_clone(list, GFP_ATOMIC);
1394 					if (!clone)
1395 						return NULL;
1396 					insp = list->next;
1397 					list = clone;
1398 				} else {
1399 					/* This may be pulled without
1400 					 * problems. */
1401 					insp = list;
1402 				}
1403 				if (!pskb_pull(list, eat)) {
1404 					kfree_skb(clone);
1405 					return NULL;
1406 				}
1407 				break;
1408 			}
1409 		} while (eat);
1410 
1411 		/* Free pulled out fragments. */
1412 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1413 			skb_shinfo(skb)->frag_list = list->next;
1414 			kfree_skb(list);
1415 		}
1416 		/* And insert new clone at head. */
1417 		if (clone) {
1418 			clone->next = list;
1419 			skb_shinfo(skb)->frag_list = clone;
1420 		}
1421 	}
1422 	/* Success! Now we may commit changes to skb data. */
1423 
1424 pull_pages:
1425 	eat = delta;
1426 	k = 0;
1427 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1428 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1429 
1430 		if (size <= eat) {
1431 			skb_frag_unref(skb, i);
1432 			eat -= size;
1433 		} else {
1434 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1435 			if (eat) {
1436 				skb_shinfo(skb)->frags[k].page_offset += eat;
1437 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1438 				eat = 0;
1439 			}
1440 			k++;
1441 		}
1442 	}
1443 	skb_shinfo(skb)->nr_frags = k;
1444 
1445 	skb->tail     += delta;
1446 	skb->data_len -= delta;
1447 
1448 	return skb_tail_pointer(skb);
1449 }
1450 EXPORT_SYMBOL(__pskb_pull_tail);
1451 
1452 /**
1453  *	skb_copy_bits - copy bits from skb to kernel buffer
1454  *	@skb: source skb
1455  *	@offset: offset in source
1456  *	@to: destination buffer
1457  *	@len: number of bytes to copy
1458  *
1459  *	Copy the specified number of bytes from the source skb to the
1460  *	destination buffer.
1461  *
1462  *	CAUTION ! :
1463  *		If its prototype is ever changed,
1464  *		check arch/{*}/net/{*}.S files,
1465  *		since it is called from BPF assembly code.
1466  */
1467 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1468 {
1469 	int start = skb_headlen(skb);
1470 	struct sk_buff *frag_iter;
1471 	int i, copy;
1472 
1473 	if (offset > (int)skb->len - len)
1474 		goto fault;
1475 
1476 	/* Copy header. */
1477 	if ((copy = start - offset) > 0) {
1478 		if (copy > len)
1479 			copy = len;
1480 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1481 		if ((len -= copy) == 0)
1482 			return 0;
1483 		offset += copy;
1484 		to     += copy;
1485 	}
1486 
1487 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1488 		int end;
1489 
1490 		WARN_ON(start > offset + len);
1491 
1492 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1493 		if ((copy = end - offset) > 0) {
1494 			u8 *vaddr;
1495 
1496 			if (copy > len)
1497 				copy = len;
1498 
1499 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1500 			memcpy(to,
1501 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1502 			       offset - start, copy);
1503 			kunmap_skb_frag(vaddr);
1504 
1505 			if ((len -= copy) == 0)
1506 				return 0;
1507 			offset += copy;
1508 			to     += copy;
1509 		}
1510 		start = end;
1511 	}
1512 
1513 	skb_walk_frags(skb, frag_iter) {
1514 		int end;
1515 
1516 		WARN_ON(start > offset + len);
1517 
1518 		end = start + frag_iter->len;
1519 		if ((copy = end - offset) > 0) {
1520 			if (copy > len)
1521 				copy = len;
1522 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1523 				goto fault;
1524 			if ((len -= copy) == 0)
1525 				return 0;
1526 			offset += copy;
1527 			to     += copy;
1528 		}
1529 		start = end;
1530 	}
1531 
1532 	if (!len)
1533 		return 0;
1534 
1535 fault:
1536 	return -EFAULT;
1537 }
1538 EXPORT_SYMBOL(skb_copy_bits);
1539 
1540 /*
1541  * Callback from splice_to_pipe(), if we need to release some pages
1542  * at the end of the spd in case we error'ed out in filling the pipe.
1543  */
1544 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1545 {
1546 	put_page(spd->pages[i]);
1547 }
1548 
1549 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1550 					  unsigned int *offset,
1551 					  struct sk_buff *skb, struct sock *sk)
1552 {
1553 	struct page *p = sk->sk_sndmsg_page;
1554 	unsigned int off;
1555 
1556 	if (!p) {
1557 new_page:
1558 		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1559 		if (!p)
1560 			return NULL;
1561 
1562 		off = sk->sk_sndmsg_off = 0;
1563 		/* hold one ref to this page until it's full */
1564 	} else {
1565 		unsigned int mlen;
1566 
1567 		off = sk->sk_sndmsg_off;
1568 		mlen = PAGE_SIZE - off;
1569 		if (mlen < 64 && mlen < *len) {
1570 			put_page(p);
1571 			goto new_page;
1572 		}
1573 
1574 		*len = min_t(unsigned int, *len, mlen);
1575 	}
1576 
1577 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1578 	sk->sk_sndmsg_off += *len;
1579 	*offset = off;
1580 	get_page(p);
1581 
1582 	return p;
1583 }
1584 
1585 /*
1586  * Fill page/offset/length into spd, if it can hold more pages.
1587  */
1588 static inline int spd_fill_page(struct splice_pipe_desc *spd,
1589 				struct pipe_inode_info *pipe, struct page *page,
1590 				unsigned int *len, unsigned int offset,
1591 				struct sk_buff *skb, int linear,
1592 				struct sock *sk)
1593 {
1594 	if (unlikely(spd->nr_pages == pipe->buffers))
1595 		return 1;
1596 
1597 	if (linear) {
1598 		page = linear_to_page(page, len, &offset, skb, sk);
1599 		if (!page)
1600 			return 1;
1601 	} else
1602 		get_page(page);
1603 
1604 	spd->pages[spd->nr_pages] = page;
1605 	spd->partial[spd->nr_pages].len = *len;
1606 	spd->partial[spd->nr_pages].offset = offset;
1607 	spd->nr_pages++;
1608 
1609 	return 0;
1610 }
1611 
1612 static inline void __segment_seek(struct page **page, unsigned int *poff,
1613 				  unsigned int *plen, unsigned int off)
1614 {
1615 	unsigned long n;
1616 
1617 	*poff += off;
1618 	n = *poff / PAGE_SIZE;
1619 	if (n)
1620 		*page = nth_page(*page, n);
1621 
1622 	*poff = *poff % PAGE_SIZE;
1623 	*plen -= off;
1624 }
1625 
1626 static inline int __splice_segment(struct page *page, unsigned int poff,
1627 				   unsigned int plen, unsigned int *off,
1628 				   unsigned int *len, struct sk_buff *skb,
1629 				   struct splice_pipe_desc *spd, int linear,
1630 				   struct sock *sk,
1631 				   struct pipe_inode_info *pipe)
1632 {
1633 	if (!*len)
1634 		return 1;
1635 
1636 	/* skip this segment if already processed */
1637 	if (*off >= plen) {
1638 		*off -= plen;
1639 		return 0;
1640 	}
1641 
1642 	/* ignore any bits we already processed */
1643 	if (*off) {
1644 		__segment_seek(&page, &poff, &plen, *off);
1645 		*off = 0;
1646 	}
1647 
1648 	do {
1649 		unsigned int flen = min(*len, plen);
1650 
1651 		/* the linear region may spread across several pages  */
1652 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1653 
1654 		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1655 			return 1;
1656 
1657 		__segment_seek(&page, &poff, &plen, flen);
1658 		*len -= flen;
1659 
1660 	} while (*len && plen);
1661 
1662 	return 0;
1663 }
1664 
1665 /*
1666  * Map linear and fragment data from the skb to spd. It reports failure if the
1667  * pipe is full or if we already spliced the requested length.
1668  */
1669 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1670 			     unsigned int *offset, unsigned int *len,
1671 			     struct splice_pipe_desc *spd, struct sock *sk)
1672 {
1673 	int seg;
1674 
1675 	/*
1676 	 * map the linear part
1677 	 */
1678 	if (__splice_segment(virt_to_page(skb->data),
1679 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1680 			     skb_headlen(skb),
1681 			     offset, len, skb, spd, 1, sk, pipe))
1682 		return 1;
1683 
1684 	/*
1685 	 * then map the fragments
1686 	 */
1687 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1688 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1689 
1690 		if (__splice_segment(skb_frag_page(f),
1691 				     f->page_offset, skb_frag_size(f),
1692 				     offset, len, skb, spd, 0, sk, pipe))
1693 			return 1;
1694 	}
1695 
1696 	return 0;
1697 }
1698 
1699 /*
1700  * Map data from the skb to a pipe. Should handle both the linear part,
1701  * the fragments, and the frag list. It does NOT handle frag lists within
1702  * the frag list, if such a thing exists. We'd probably need to recurse to
1703  * handle that cleanly.
1704  */
1705 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1706 		    struct pipe_inode_info *pipe, unsigned int tlen,
1707 		    unsigned int flags)
1708 {
1709 	struct partial_page partial[PIPE_DEF_BUFFERS];
1710 	struct page *pages[PIPE_DEF_BUFFERS];
1711 	struct splice_pipe_desc spd = {
1712 		.pages = pages,
1713 		.partial = partial,
1714 		.flags = flags,
1715 		.ops = &sock_pipe_buf_ops,
1716 		.spd_release = sock_spd_release,
1717 	};
1718 	struct sk_buff *frag_iter;
1719 	struct sock *sk = skb->sk;
1720 	int ret = 0;
1721 
1722 	if (splice_grow_spd(pipe, &spd))
1723 		return -ENOMEM;
1724 
1725 	/*
1726 	 * __skb_splice_bits() only fails if the output has no room left,
1727 	 * so no point in going over the frag_list for the error case.
1728 	 */
1729 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1730 		goto done;
1731 	else if (!tlen)
1732 		goto done;
1733 
1734 	/*
1735 	 * now see if we have a frag_list to map
1736 	 */
1737 	skb_walk_frags(skb, frag_iter) {
1738 		if (!tlen)
1739 			break;
1740 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1741 			break;
1742 	}
1743 
1744 done:
1745 	if (spd.nr_pages) {
1746 		/*
1747 		 * Drop the socket lock, otherwise we have reverse
1748 		 * locking dependencies between sk_lock and i_mutex
1749 		 * here as compared to sendfile(). We enter here
1750 		 * with the socket lock held, and splice_to_pipe() will
1751 		 * grab the pipe inode lock. For sendfile() emulation,
1752 		 * we call into ->sendpage() with the i_mutex lock held
1753 		 * and networking will grab the socket lock.
1754 		 */
1755 		release_sock(sk);
1756 		ret = splice_to_pipe(pipe, &spd);
1757 		lock_sock(sk);
1758 	}
1759 
1760 	splice_shrink_spd(pipe, &spd);
1761 	return ret;
1762 }
1763 
1764 /**
1765  *	skb_store_bits - store bits from kernel buffer to skb
1766  *	@skb: destination buffer
1767  *	@offset: offset in destination
1768  *	@from: source buffer
1769  *	@len: number of bytes to copy
1770  *
1771  *	Copy the specified number of bytes from the source buffer to the
1772  *	destination skb.  This function handles all the messy bits of
1773  *	traversing fragment lists and such.
1774  */
1775 
1776 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1777 {
1778 	int start = skb_headlen(skb);
1779 	struct sk_buff *frag_iter;
1780 	int i, copy;
1781 
1782 	if (offset > (int)skb->len - len)
1783 		goto fault;
1784 
1785 	if ((copy = start - offset) > 0) {
1786 		if (copy > len)
1787 			copy = len;
1788 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1789 		if ((len -= copy) == 0)
1790 			return 0;
1791 		offset += copy;
1792 		from += copy;
1793 	}
1794 
1795 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1796 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1797 		int end;
1798 
1799 		WARN_ON(start > offset + len);
1800 
1801 		end = start + skb_frag_size(frag);
1802 		if ((copy = end - offset) > 0) {
1803 			u8 *vaddr;
1804 
1805 			if (copy > len)
1806 				copy = len;
1807 
1808 			vaddr = kmap_skb_frag(frag);
1809 			memcpy(vaddr + frag->page_offset + offset - start,
1810 			       from, copy);
1811 			kunmap_skb_frag(vaddr);
1812 
1813 			if ((len -= copy) == 0)
1814 				return 0;
1815 			offset += copy;
1816 			from += copy;
1817 		}
1818 		start = end;
1819 	}
1820 
1821 	skb_walk_frags(skb, frag_iter) {
1822 		int end;
1823 
1824 		WARN_ON(start > offset + len);
1825 
1826 		end = start + frag_iter->len;
1827 		if ((copy = end - offset) > 0) {
1828 			if (copy > len)
1829 				copy = len;
1830 			if (skb_store_bits(frag_iter, offset - start,
1831 					   from, copy))
1832 				goto fault;
1833 			if ((len -= copy) == 0)
1834 				return 0;
1835 			offset += copy;
1836 			from += copy;
1837 		}
1838 		start = end;
1839 	}
1840 	if (!len)
1841 		return 0;
1842 
1843 fault:
1844 	return -EFAULT;
1845 }
1846 EXPORT_SYMBOL(skb_store_bits);
1847 
1848 /* Checksum skb data. */
1849 
1850 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1851 			  int len, __wsum csum)
1852 {
1853 	int start = skb_headlen(skb);
1854 	int i, copy = start - offset;
1855 	struct sk_buff *frag_iter;
1856 	int pos = 0;
1857 
1858 	/* Checksum header. */
1859 	if (copy > 0) {
1860 		if (copy > len)
1861 			copy = len;
1862 		csum = csum_partial(skb->data + offset, copy, csum);
1863 		if ((len -= copy) == 0)
1864 			return csum;
1865 		offset += copy;
1866 		pos	= copy;
1867 	}
1868 
1869 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1870 		int end;
1871 
1872 		WARN_ON(start > offset + len);
1873 
1874 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1875 		if ((copy = end - offset) > 0) {
1876 			__wsum csum2;
1877 			u8 *vaddr;
1878 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1879 
1880 			if (copy > len)
1881 				copy = len;
1882 			vaddr = kmap_skb_frag(frag);
1883 			csum2 = csum_partial(vaddr + frag->page_offset +
1884 					     offset - start, copy, 0);
1885 			kunmap_skb_frag(vaddr);
1886 			csum = csum_block_add(csum, csum2, pos);
1887 			if (!(len -= copy))
1888 				return csum;
1889 			offset += copy;
1890 			pos    += copy;
1891 		}
1892 		start = end;
1893 	}
1894 
1895 	skb_walk_frags(skb, frag_iter) {
1896 		int end;
1897 
1898 		WARN_ON(start > offset + len);
1899 
1900 		end = start + frag_iter->len;
1901 		if ((copy = end - offset) > 0) {
1902 			__wsum csum2;
1903 			if (copy > len)
1904 				copy = len;
1905 			csum2 = skb_checksum(frag_iter, offset - start,
1906 					     copy, 0);
1907 			csum = csum_block_add(csum, csum2, pos);
1908 			if ((len -= copy) == 0)
1909 				return csum;
1910 			offset += copy;
1911 			pos    += copy;
1912 		}
1913 		start = end;
1914 	}
1915 	BUG_ON(len);
1916 
1917 	return csum;
1918 }
1919 EXPORT_SYMBOL(skb_checksum);
1920 
1921 /* Both of above in one bottle. */
1922 
1923 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1924 				    u8 *to, int len, __wsum csum)
1925 {
1926 	int start = skb_headlen(skb);
1927 	int i, copy = start - offset;
1928 	struct sk_buff *frag_iter;
1929 	int pos = 0;
1930 
1931 	/* Copy header. */
1932 	if (copy > 0) {
1933 		if (copy > len)
1934 			copy = len;
1935 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1936 						 copy, csum);
1937 		if ((len -= copy) == 0)
1938 			return csum;
1939 		offset += copy;
1940 		to     += copy;
1941 		pos	= copy;
1942 	}
1943 
1944 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1945 		int end;
1946 
1947 		WARN_ON(start > offset + len);
1948 
1949 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1950 		if ((copy = end - offset) > 0) {
1951 			__wsum csum2;
1952 			u8 *vaddr;
1953 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1954 
1955 			if (copy > len)
1956 				copy = len;
1957 			vaddr = kmap_skb_frag(frag);
1958 			csum2 = csum_partial_copy_nocheck(vaddr +
1959 							  frag->page_offset +
1960 							  offset - start, to,
1961 							  copy, 0);
1962 			kunmap_skb_frag(vaddr);
1963 			csum = csum_block_add(csum, csum2, pos);
1964 			if (!(len -= copy))
1965 				return csum;
1966 			offset += copy;
1967 			to     += copy;
1968 			pos    += copy;
1969 		}
1970 		start = end;
1971 	}
1972 
1973 	skb_walk_frags(skb, frag_iter) {
1974 		__wsum csum2;
1975 		int end;
1976 
1977 		WARN_ON(start > offset + len);
1978 
1979 		end = start + frag_iter->len;
1980 		if ((copy = end - offset) > 0) {
1981 			if (copy > len)
1982 				copy = len;
1983 			csum2 = skb_copy_and_csum_bits(frag_iter,
1984 						       offset - start,
1985 						       to, copy, 0);
1986 			csum = csum_block_add(csum, csum2, pos);
1987 			if ((len -= copy) == 0)
1988 				return csum;
1989 			offset += copy;
1990 			to     += copy;
1991 			pos    += copy;
1992 		}
1993 		start = end;
1994 	}
1995 	BUG_ON(len);
1996 	return csum;
1997 }
1998 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1999 
2000 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2001 {
2002 	__wsum csum;
2003 	long csstart;
2004 
2005 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2006 		csstart = skb_checksum_start_offset(skb);
2007 	else
2008 		csstart = skb_headlen(skb);
2009 
2010 	BUG_ON(csstart > skb_headlen(skb));
2011 
2012 	skb_copy_from_linear_data(skb, to, csstart);
2013 
2014 	csum = 0;
2015 	if (csstart != skb->len)
2016 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2017 					      skb->len - csstart, 0);
2018 
2019 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2020 		long csstuff = csstart + skb->csum_offset;
2021 
2022 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
2023 	}
2024 }
2025 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2026 
2027 /**
2028  *	skb_dequeue - remove from the head of the queue
2029  *	@list: list to dequeue from
2030  *
2031  *	Remove the head of the list. The list lock is taken so the function
2032  *	may be used safely with other locking list functions. The head item is
2033  *	returned or %NULL if the list is empty.
2034  */
2035 
2036 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2037 {
2038 	unsigned long flags;
2039 	struct sk_buff *result;
2040 
2041 	spin_lock_irqsave(&list->lock, flags);
2042 	result = __skb_dequeue(list);
2043 	spin_unlock_irqrestore(&list->lock, flags);
2044 	return result;
2045 }
2046 EXPORT_SYMBOL(skb_dequeue);
2047 
2048 /**
2049  *	skb_dequeue_tail - remove from the tail of the queue
2050  *	@list: list to dequeue from
2051  *
2052  *	Remove the tail of the list. The list lock is taken so the function
2053  *	may be used safely with other locking list functions. The tail item is
2054  *	returned or %NULL if the list is empty.
2055  */
2056 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2057 {
2058 	unsigned long flags;
2059 	struct sk_buff *result;
2060 
2061 	spin_lock_irqsave(&list->lock, flags);
2062 	result = __skb_dequeue_tail(list);
2063 	spin_unlock_irqrestore(&list->lock, flags);
2064 	return result;
2065 }
2066 EXPORT_SYMBOL(skb_dequeue_tail);
2067 
2068 /**
2069  *	skb_queue_purge - empty a list
2070  *	@list: list to empty
2071  *
2072  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
2073  *	the list and one reference dropped. This function takes the list
2074  *	lock and is atomic with respect to other list locking functions.
2075  */
2076 void skb_queue_purge(struct sk_buff_head *list)
2077 {
2078 	struct sk_buff *skb;
2079 	while ((skb = skb_dequeue(list)) != NULL)
2080 		kfree_skb(skb);
2081 }
2082 EXPORT_SYMBOL(skb_queue_purge);
2083 
2084 /**
2085  *	skb_queue_head - queue a buffer at the list head
2086  *	@list: list to use
2087  *	@newsk: buffer to queue
2088  *
2089  *	Queue a buffer at the start of the list. This function takes the
2090  *	list lock and can be used safely with other locking &sk_buff functions
2091  *	safely.
2092  *
2093  *	A buffer cannot be placed on two lists at the same time.
2094  */
2095 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2096 {
2097 	unsigned long flags;
2098 
2099 	spin_lock_irqsave(&list->lock, flags);
2100 	__skb_queue_head(list, newsk);
2101 	spin_unlock_irqrestore(&list->lock, flags);
2102 }
2103 EXPORT_SYMBOL(skb_queue_head);
2104 
2105 /**
2106  *	skb_queue_tail - queue a buffer at the list tail
2107  *	@list: list to use
2108  *	@newsk: buffer to queue
2109  *
2110  *	Queue a buffer at the tail of the list. This function takes the
2111  *	list lock and can be used safely with other locking &sk_buff functions
2112  *	safely.
2113  *
2114  *	A buffer cannot be placed on two lists at the same time.
2115  */
2116 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2117 {
2118 	unsigned long flags;
2119 
2120 	spin_lock_irqsave(&list->lock, flags);
2121 	__skb_queue_tail(list, newsk);
2122 	spin_unlock_irqrestore(&list->lock, flags);
2123 }
2124 EXPORT_SYMBOL(skb_queue_tail);
2125 
2126 /**
2127  *	skb_unlink	-	remove a buffer from a list
2128  *	@skb: buffer to remove
2129  *	@list: list to use
2130  *
2131  *	Remove a packet from a list. The list locks are taken and this
2132  *	function is atomic with respect to other list locked calls
2133  *
2134  *	You must know what list the SKB is on.
2135  */
2136 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2137 {
2138 	unsigned long flags;
2139 
2140 	spin_lock_irqsave(&list->lock, flags);
2141 	__skb_unlink(skb, list);
2142 	spin_unlock_irqrestore(&list->lock, flags);
2143 }
2144 EXPORT_SYMBOL(skb_unlink);
2145 
2146 /**
2147  *	skb_append	-	append a buffer
2148  *	@old: buffer to insert after
2149  *	@newsk: buffer to insert
2150  *	@list: list to use
2151  *
2152  *	Place a packet after a given packet in a list. The list locks are taken
2153  *	and this function is atomic with respect to other list locked calls.
2154  *	A buffer cannot be placed on two lists at the same time.
2155  */
2156 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2157 {
2158 	unsigned long flags;
2159 
2160 	spin_lock_irqsave(&list->lock, flags);
2161 	__skb_queue_after(list, old, newsk);
2162 	spin_unlock_irqrestore(&list->lock, flags);
2163 }
2164 EXPORT_SYMBOL(skb_append);
2165 
2166 /**
2167  *	skb_insert	-	insert a buffer
2168  *	@old: buffer to insert before
2169  *	@newsk: buffer to insert
2170  *	@list: list to use
2171  *
2172  *	Place a packet before a given packet in a list. The list locks are
2173  * 	taken and this function is atomic with respect to other list locked
2174  *	calls.
2175  *
2176  *	A buffer cannot be placed on two lists at the same time.
2177  */
2178 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2179 {
2180 	unsigned long flags;
2181 
2182 	spin_lock_irqsave(&list->lock, flags);
2183 	__skb_insert(newsk, old->prev, old, list);
2184 	spin_unlock_irqrestore(&list->lock, flags);
2185 }
2186 EXPORT_SYMBOL(skb_insert);
2187 
2188 static inline void skb_split_inside_header(struct sk_buff *skb,
2189 					   struct sk_buff* skb1,
2190 					   const u32 len, const int pos)
2191 {
2192 	int i;
2193 
2194 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2195 					 pos - len);
2196 	/* And move data appendix as is. */
2197 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2198 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2199 
2200 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2201 	skb_shinfo(skb)->nr_frags  = 0;
2202 	skb1->data_len		   = skb->data_len;
2203 	skb1->len		   += skb1->data_len;
2204 	skb->data_len		   = 0;
2205 	skb->len		   = len;
2206 	skb_set_tail_pointer(skb, len);
2207 }
2208 
2209 static inline void skb_split_no_header(struct sk_buff *skb,
2210 				       struct sk_buff* skb1,
2211 				       const u32 len, int pos)
2212 {
2213 	int i, k = 0;
2214 	const int nfrags = skb_shinfo(skb)->nr_frags;
2215 
2216 	skb_shinfo(skb)->nr_frags = 0;
2217 	skb1->len		  = skb1->data_len = skb->len - len;
2218 	skb->len		  = len;
2219 	skb->data_len		  = len - pos;
2220 
2221 	for (i = 0; i < nfrags; i++) {
2222 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2223 
2224 		if (pos + size > len) {
2225 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2226 
2227 			if (pos < len) {
2228 				/* Split frag.
2229 				 * We have two variants in this case:
2230 				 * 1. Move all the frag to the second
2231 				 *    part, if it is possible. F.e.
2232 				 *    this approach is mandatory for TUX,
2233 				 *    where splitting is expensive.
2234 				 * 2. Split is accurately. We make this.
2235 				 */
2236 				skb_frag_ref(skb, i);
2237 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2238 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2239 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2240 				skb_shinfo(skb)->nr_frags++;
2241 			}
2242 			k++;
2243 		} else
2244 			skb_shinfo(skb)->nr_frags++;
2245 		pos += size;
2246 	}
2247 	skb_shinfo(skb1)->nr_frags = k;
2248 }
2249 
2250 /**
2251  * skb_split - Split fragmented skb to two parts at length len.
2252  * @skb: the buffer to split
2253  * @skb1: the buffer to receive the second part
2254  * @len: new length for skb
2255  */
2256 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2257 {
2258 	int pos = skb_headlen(skb);
2259 
2260 	if (len < pos)	/* Split line is inside header. */
2261 		skb_split_inside_header(skb, skb1, len, pos);
2262 	else		/* Second chunk has no header, nothing to copy. */
2263 		skb_split_no_header(skb, skb1, len, pos);
2264 }
2265 EXPORT_SYMBOL(skb_split);
2266 
2267 /* Shifting from/to a cloned skb is a no-go.
2268  *
2269  * Caller cannot keep skb_shinfo related pointers past calling here!
2270  */
2271 static int skb_prepare_for_shift(struct sk_buff *skb)
2272 {
2273 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2274 }
2275 
2276 /**
2277  * skb_shift - Shifts paged data partially from skb to another
2278  * @tgt: buffer into which tail data gets added
2279  * @skb: buffer from which the paged data comes from
2280  * @shiftlen: shift up to this many bytes
2281  *
2282  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2283  * the length of the skb, from skb to tgt. Returns number bytes shifted.
2284  * It's up to caller to free skb if everything was shifted.
2285  *
2286  * If @tgt runs out of frags, the whole operation is aborted.
2287  *
2288  * Skb cannot include anything else but paged data while tgt is allowed
2289  * to have non-paged data as well.
2290  *
2291  * TODO: full sized shift could be optimized but that would need
2292  * specialized skb free'er to handle frags without up-to-date nr_frags.
2293  */
2294 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2295 {
2296 	int from, to, merge, todo;
2297 	struct skb_frag_struct *fragfrom, *fragto;
2298 
2299 	BUG_ON(shiftlen > skb->len);
2300 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2301 
2302 	todo = shiftlen;
2303 	from = 0;
2304 	to = skb_shinfo(tgt)->nr_frags;
2305 	fragfrom = &skb_shinfo(skb)->frags[from];
2306 
2307 	/* Actual merge is delayed until the point when we know we can
2308 	 * commit all, so that we don't have to undo partial changes
2309 	 */
2310 	if (!to ||
2311 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2312 			      fragfrom->page_offset)) {
2313 		merge = -1;
2314 	} else {
2315 		merge = to - 1;
2316 
2317 		todo -= skb_frag_size(fragfrom);
2318 		if (todo < 0) {
2319 			if (skb_prepare_for_shift(skb) ||
2320 			    skb_prepare_for_shift(tgt))
2321 				return 0;
2322 
2323 			/* All previous frag pointers might be stale! */
2324 			fragfrom = &skb_shinfo(skb)->frags[from];
2325 			fragto = &skb_shinfo(tgt)->frags[merge];
2326 
2327 			skb_frag_size_add(fragto, shiftlen);
2328 			skb_frag_size_sub(fragfrom, shiftlen);
2329 			fragfrom->page_offset += shiftlen;
2330 
2331 			goto onlymerged;
2332 		}
2333 
2334 		from++;
2335 	}
2336 
2337 	/* Skip full, not-fitting skb to avoid expensive operations */
2338 	if ((shiftlen == skb->len) &&
2339 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2340 		return 0;
2341 
2342 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2343 		return 0;
2344 
2345 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2346 		if (to == MAX_SKB_FRAGS)
2347 			return 0;
2348 
2349 		fragfrom = &skb_shinfo(skb)->frags[from];
2350 		fragto = &skb_shinfo(tgt)->frags[to];
2351 
2352 		if (todo >= skb_frag_size(fragfrom)) {
2353 			*fragto = *fragfrom;
2354 			todo -= skb_frag_size(fragfrom);
2355 			from++;
2356 			to++;
2357 
2358 		} else {
2359 			__skb_frag_ref(fragfrom);
2360 			fragto->page = fragfrom->page;
2361 			fragto->page_offset = fragfrom->page_offset;
2362 			skb_frag_size_set(fragto, todo);
2363 
2364 			fragfrom->page_offset += todo;
2365 			skb_frag_size_sub(fragfrom, todo);
2366 			todo = 0;
2367 
2368 			to++;
2369 			break;
2370 		}
2371 	}
2372 
2373 	/* Ready to "commit" this state change to tgt */
2374 	skb_shinfo(tgt)->nr_frags = to;
2375 
2376 	if (merge >= 0) {
2377 		fragfrom = &skb_shinfo(skb)->frags[0];
2378 		fragto = &skb_shinfo(tgt)->frags[merge];
2379 
2380 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2381 		__skb_frag_unref(fragfrom);
2382 	}
2383 
2384 	/* Reposition in the original skb */
2385 	to = 0;
2386 	while (from < skb_shinfo(skb)->nr_frags)
2387 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2388 	skb_shinfo(skb)->nr_frags = to;
2389 
2390 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2391 
2392 onlymerged:
2393 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2394 	 * the other hand might need it if it needs to be resent
2395 	 */
2396 	tgt->ip_summed = CHECKSUM_PARTIAL;
2397 	skb->ip_summed = CHECKSUM_PARTIAL;
2398 
2399 	/* Yak, is it really working this way? Some helper please? */
2400 	skb->len -= shiftlen;
2401 	skb->data_len -= shiftlen;
2402 	skb->truesize -= shiftlen;
2403 	tgt->len += shiftlen;
2404 	tgt->data_len += shiftlen;
2405 	tgt->truesize += shiftlen;
2406 
2407 	return shiftlen;
2408 }
2409 
2410 /**
2411  * skb_prepare_seq_read - Prepare a sequential read of skb data
2412  * @skb: the buffer to read
2413  * @from: lower offset of data to be read
2414  * @to: upper offset of data to be read
2415  * @st: state variable
2416  *
2417  * Initializes the specified state variable. Must be called before
2418  * invoking skb_seq_read() for the first time.
2419  */
2420 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2421 			  unsigned int to, struct skb_seq_state *st)
2422 {
2423 	st->lower_offset = from;
2424 	st->upper_offset = to;
2425 	st->root_skb = st->cur_skb = skb;
2426 	st->frag_idx = st->stepped_offset = 0;
2427 	st->frag_data = NULL;
2428 }
2429 EXPORT_SYMBOL(skb_prepare_seq_read);
2430 
2431 /**
2432  * skb_seq_read - Sequentially read skb data
2433  * @consumed: number of bytes consumed by the caller so far
2434  * @data: destination pointer for data to be returned
2435  * @st: state variable
2436  *
2437  * Reads a block of skb data at &consumed relative to the
2438  * lower offset specified to skb_prepare_seq_read(). Assigns
2439  * the head of the data block to &data and returns the length
2440  * of the block or 0 if the end of the skb data or the upper
2441  * offset has been reached.
2442  *
2443  * The caller is not required to consume all of the data
2444  * returned, i.e. &consumed is typically set to the number
2445  * of bytes already consumed and the next call to
2446  * skb_seq_read() will return the remaining part of the block.
2447  *
2448  * Note 1: The size of each block of data returned can be arbitrary,
2449  *       this limitation is the cost for zerocopy seqeuental
2450  *       reads of potentially non linear data.
2451  *
2452  * Note 2: Fragment lists within fragments are not implemented
2453  *       at the moment, state->root_skb could be replaced with
2454  *       a stack for this purpose.
2455  */
2456 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2457 			  struct skb_seq_state *st)
2458 {
2459 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2460 	skb_frag_t *frag;
2461 
2462 	if (unlikely(abs_offset >= st->upper_offset))
2463 		return 0;
2464 
2465 next_skb:
2466 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2467 
2468 	if (abs_offset < block_limit && !st->frag_data) {
2469 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2470 		return block_limit - abs_offset;
2471 	}
2472 
2473 	if (st->frag_idx == 0 && !st->frag_data)
2474 		st->stepped_offset += skb_headlen(st->cur_skb);
2475 
2476 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2477 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2478 		block_limit = skb_frag_size(frag) + st->stepped_offset;
2479 
2480 		if (abs_offset < block_limit) {
2481 			if (!st->frag_data)
2482 				st->frag_data = kmap_skb_frag(frag);
2483 
2484 			*data = (u8 *) st->frag_data + frag->page_offset +
2485 				(abs_offset - st->stepped_offset);
2486 
2487 			return block_limit - abs_offset;
2488 		}
2489 
2490 		if (st->frag_data) {
2491 			kunmap_skb_frag(st->frag_data);
2492 			st->frag_data = NULL;
2493 		}
2494 
2495 		st->frag_idx++;
2496 		st->stepped_offset += skb_frag_size(frag);
2497 	}
2498 
2499 	if (st->frag_data) {
2500 		kunmap_skb_frag(st->frag_data);
2501 		st->frag_data = NULL;
2502 	}
2503 
2504 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2505 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2506 		st->frag_idx = 0;
2507 		goto next_skb;
2508 	} else if (st->cur_skb->next) {
2509 		st->cur_skb = st->cur_skb->next;
2510 		st->frag_idx = 0;
2511 		goto next_skb;
2512 	}
2513 
2514 	return 0;
2515 }
2516 EXPORT_SYMBOL(skb_seq_read);
2517 
2518 /**
2519  * skb_abort_seq_read - Abort a sequential read of skb data
2520  * @st: state variable
2521  *
2522  * Must be called if skb_seq_read() was not called until it
2523  * returned 0.
2524  */
2525 void skb_abort_seq_read(struct skb_seq_state *st)
2526 {
2527 	if (st->frag_data)
2528 		kunmap_skb_frag(st->frag_data);
2529 }
2530 EXPORT_SYMBOL(skb_abort_seq_read);
2531 
2532 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2533 
2534 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2535 					  struct ts_config *conf,
2536 					  struct ts_state *state)
2537 {
2538 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2539 }
2540 
2541 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2542 {
2543 	skb_abort_seq_read(TS_SKB_CB(state));
2544 }
2545 
2546 /**
2547  * skb_find_text - Find a text pattern in skb data
2548  * @skb: the buffer to look in
2549  * @from: search offset
2550  * @to: search limit
2551  * @config: textsearch configuration
2552  * @state: uninitialized textsearch state variable
2553  *
2554  * Finds a pattern in the skb data according to the specified
2555  * textsearch configuration. Use textsearch_next() to retrieve
2556  * subsequent occurrences of the pattern. Returns the offset
2557  * to the first occurrence or UINT_MAX if no match was found.
2558  */
2559 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2560 			   unsigned int to, struct ts_config *config,
2561 			   struct ts_state *state)
2562 {
2563 	unsigned int ret;
2564 
2565 	config->get_next_block = skb_ts_get_next_block;
2566 	config->finish = skb_ts_finish;
2567 
2568 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2569 
2570 	ret = textsearch_find(config, state);
2571 	return (ret <= to - from ? ret : UINT_MAX);
2572 }
2573 EXPORT_SYMBOL(skb_find_text);
2574 
2575 /**
2576  * skb_append_datato_frags: - append the user data to a skb
2577  * @sk: sock  structure
2578  * @skb: skb structure to be appened with user data.
2579  * @getfrag: call back function to be used for getting the user data
2580  * @from: pointer to user message iov
2581  * @length: length of the iov message
2582  *
2583  * Description: This procedure append the user data in the fragment part
2584  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2585  */
2586 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2587 			int (*getfrag)(void *from, char *to, int offset,
2588 					int len, int odd, struct sk_buff *skb),
2589 			void *from, int length)
2590 {
2591 	int frg_cnt = 0;
2592 	skb_frag_t *frag = NULL;
2593 	struct page *page = NULL;
2594 	int copy, left;
2595 	int offset = 0;
2596 	int ret;
2597 
2598 	do {
2599 		/* Return error if we don't have space for new frag */
2600 		frg_cnt = skb_shinfo(skb)->nr_frags;
2601 		if (frg_cnt >= MAX_SKB_FRAGS)
2602 			return -EFAULT;
2603 
2604 		/* allocate a new page for next frag */
2605 		page = alloc_pages(sk->sk_allocation, 0);
2606 
2607 		/* If alloc_page fails just return failure and caller will
2608 		 * free previous allocated pages by doing kfree_skb()
2609 		 */
2610 		if (page == NULL)
2611 			return -ENOMEM;
2612 
2613 		/* initialize the next frag */
2614 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2615 		skb->truesize += PAGE_SIZE;
2616 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2617 
2618 		/* get the new initialized frag */
2619 		frg_cnt = skb_shinfo(skb)->nr_frags;
2620 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2621 
2622 		/* copy the user data to page */
2623 		left = PAGE_SIZE - frag->page_offset;
2624 		copy = (length > left)? left : length;
2625 
2626 		ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2627 			    offset, copy, 0, skb);
2628 		if (ret < 0)
2629 			return -EFAULT;
2630 
2631 		/* copy was successful so update the size parameters */
2632 		skb_frag_size_add(frag, copy);
2633 		skb->len += copy;
2634 		skb->data_len += copy;
2635 		offset += copy;
2636 		length -= copy;
2637 
2638 	} while (length > 0);
2639 
2640 	return 0;
2641 }
2642 EXPORT_SYMBOL(skb_append_datato_frags);
2643 
2644 /**
2645  *	skb_pull_rcsum - pull skb and update receive checksum
2646  *	@skb: buffer to update
2647  *	@len: length of data pulled
2648  *
2649  *	This function performs an skb_pull on the packet and updates
2650  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2651  *	receive path processing instead of skb_pull unless you know
2652  *	that the checksum difference is zero (e.g., a valid IP header)
2653  *	or you are setting ip_summed to CHECKSUM_NONE.
2654  */
2655 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2656 {
2657 	BUG_ON(len > skb->len);
2658 	skb->len -= len;
2659 	BUG_ON(skb->len < skb->data_len);
2660 	skb_postpull_rcsum(skb, skb->data, len);
2661 	return skb->data += len;
2662 }
2663 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2664 
2665 /**
2666  *	skb_segment - Perform protocol segmentation on skb.
2667  *	@skb: buffer to segment
2668  *	@features: features for the output path (see dev->features)
2669  *
2670  *	This function performs segmentation on the given skb.  It returns
2671  *	a pointer to the first in a list of new skbs for the segments.
2672  *	In case of error it returns ERR_PTR(err).
2673  */
2674 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2675 {
2676 	struct sk_buff *segs = NULL;
2677 	struct sk_buff *tail = NULL;
2678 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2679 	unsigned int mss = skb_shinfo(skb)->gso_size;
2680 	unsigned int doffset = skb->data - skb_mac_header(skb);
2681 	unsigned int offset = doffset;
2682 	unsigned int headroom;
2683 	unsigned int len;
2684 	int sg = !!(features & NETIF_F_SG);
2685 	int nfrags = skb_shinfo(skb)->nr_frags;
2686 	int err = -ENOMEM;
2687 	int i = 0;
2688 	int pos;
2689 
2690 	__skb_push(skb, doffset);
2691 	headroom = skb_headroom(skb);
2692 	pos = skb_headlen(skb);
2693 
2694 	do {
2695 		struct sk_buff *nskb;
2696 		skb_frag_t *frag;
2697 		int hsize;
2698 		int size;
2699 
2700 		len = skb->len - offset;
2701 		if (len > mss)
2702 			len = mss;
2703 
2704 		hsize = skb_headlen(skb) - offset;
2705 		if (hsize < 0)
2706 			hsize = 0;
2707 		if (hsize > len || !sg)
2708 			hsize = len;
2709 
2710 		if (!hsize && i >= nfrags) {
2711 			BUG_ON(fskb->len != len);
2712 
2713 			pos += len;
2714 			nskb = skb_clone(fskb, GFP_ATOMIC);
2715 			fskb = fskb->next;
2716 
2717 			if (unlikely(!nskb))
2718 				goto err;
2719 
2720 			hsize = skb_end_pointer(nskb) - nskb->head;
2721 			if (skb_cow_head(nskb, doffset + headroom)) {
2722 				kfree_skb(nskb);
2723 				goto err;
2724 			}
2725 
2726 			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2727 					  hsize;
2728 			skb_release_head_state(nskb);
2729 			__skb_push(nskb, doffset);
2730 		} else {
2731 			nskb = alloc_skb(hsize + doffset + headroom,
2732 					 GFP_ATOMIC);
2733 
2734 			if (unlikely(!nskb))
2735 				goto err;
2736 
2737 			skb_reserve(nskb, headroom);
2738 			__skb_put(nskb, doffset);
2739 		}
2740 
2741 		if (segs)
2742 			tail->next = nskb;
2743 		else
2744 			segs = nskb;
2745 		tail = nskb;
2746 
2747 		__copy_skb_header(nskb, skb);
2748 		nskb->mac_len = skb->mac_len;
2749 
2750 		/* nskb and skb might have different headroom */
2751 		if (nskb->ip_summed == CHECKSUM_PARTIAL)
2752 			nskb->csum_start += skb_headroom(nskb) - headroom;
2753 
2754 		skb_reset_mac_header(nskb);
2755 		skb_set_network_header(nskb, skb->mac_len);
2756 		nskb->transport_header = (nskb->network_header +
2757 					  skb_network_header_len(skb));
2758 		skb_copy_from_linear_data(skb, nskb->data, doffset);
2759 
2760 		if (fskb != skb_shinfo(skb)->frag_list)
2761 			continue;
2762 
2763 		if (!sg) {
2764 			nskb->ip_summed = CHECKSUM_NONE;
2765 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2766 							    skb_put(nskb, len),
2767 							    len, 0);
2768 			continue;
2769 		}
2770 
2771 		frag = skb_shinfo(nskb)->frags;
2772 
2773 		skb_copy_from_linear_data_offset(skb, offset,
2774 						 skb_put(nskb, hsize), hsize);
2775 
2776 		while (pos < offset + len && i < nfrags) {
2777 			*frag = skb_shinfo(skb)->frags[i];
2778 			__skb_frag_ref(frag);
2779 			size = skb_frag_size(frag);
2780 
2781 			if (pos < offset) {
2782 				frag->page_offset += offset - pos;
2783 				skb_frag_size_sub(frag, offset - pos);
2784 			}
2785 
2786 			skb_shinfo(nskb)->nr_frags++;
2787 
2788 			if (pos + size <= offset + len) {
2789 				i++;
2790 				pos += size;
2791 			} else {
2792 				skb_frag_size_sub(frag, pos + size - (offset + len));
2793 				goto skip_fraglist;
2794 			}
2795 
2796 			frag++;
2797 		}
2798 
2799 		if (pos < offset + len) {
2800 			struct sk_buff *fskb2 = fskb;
2801 
2802 			BUG_ON(pos + fskb->len != offset + len);
2803 
2804 			pos += fskb->len;
2805 			fskb = fskb->next;
2806 
2807 			if (fskb2->next) {
2808 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2809 				if (!fskb2)
2810 					goto err;
2811 			} else
2812 				skb_get(fskb2);
2813 
2814 			SKB_FRAG_ASSERT(nskb);
2815 			skb_shinfo(nskb)->frag_list = fskb2;
2816 		}
2817 
2818 skip_fraglist:
2819 		nskb->data_len = len - hsize;
2820 		nskb->len += nskb->data_len;
2821 		nskb->truesize += nskb->data_len;
2822 	} while ((offset += len) < skb->len);
2823 
2824 	return segs;
2825 
2826 err:
2827 	while ((skb = segs)) {
2828 		segs = skb->next;
2829 		kfree_skb(skb);
2830 	}
2831 	return ERR_PTR(err);
2832 }
2833 EXPORT_SYMBOL_GPL(skb_segment);
2834 
2835 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2836 {
2837 	struct sk_buff *p = *head;
2838 	struct sk_buff *nskb;
2839 	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2840 	struct skb_shared_info *pinfo = skb_shinfo(p);
2841 	unsigned int headroom;
2842 	unsigned int len = skb_gro_len(skb);
2843 	unsigned int offset = skb_gro_offset(skb);
2844 	unsigned int headlen = skb_headlen(skb);
2845 
2846 	if (p->len + len >= 65536)
2847 		return -E2BIG;
2848 
2849 	if (pinfo->frag_list)
2850 		goto merge;
2851 	else if (headlen <= offset) {
2852 		skb_frag_t *frag;
2853 		skb_frag_t *frag2;
2854 		int i = skbinfo->nr_frags;
2855 		int nr_frags = pinfo->nr_frags + i;
2856 
2857 		offset -= headlen;
2858 
2859 		if (nr_frags > MAX_SKB_FRAGS)
2860 			return -E2BIG;
2861 
2862 		pinfo->nr_frags = nr_frags;
2863 		skbinfo->nr_frags = 0;
2864 
2865 		frag = pinfo->frags + nr_frags;
2866 		frag2 = skbinfo->frags + i;
2867 		do {
2868 			*--frag = *--frag2;
2869 		} while (--i);
2870 
2871 		frag->page_offset += offset;
2872 		skb_frag_size_sub(frag, offset);
2873 
2874 		skb->truesize -= skb->data_len;
2875 		skb->len -= skb->data_len;
2876 		skb->data_len = 0;
2877 
2878 		NAPI_GRO_CB(skb)->free = 1;
2879 		goto done;
2880 	} else if (skb_gro_len(p) != pinfo->gso_size)
2881 		return -E2BIG;
2882 
2883 	headroom = skb_headroom(p);
2884 	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2885 	if (unlikely(!nskb))
2886 		return -ENOMEM;
2887 
2888 	__copy_skb_header(nskb, p);
2889 	nskb->mac_len = p->mac_len;
2890 
2891 	skb_reserve(nskb, headroom);
2892 	__skb_put(nskb, skb_gro_offset(p));
2893 
2894 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2895 	skb_set_network_header(nskb, skb_network_offset(p));
2896 	skb_set_transport_header(nskb, skb_transport_offset(p));
2897 
2898 	__skb_pull(p, skb_gro_offset(p));
2899 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
2900 	       p->data - skb_mac_header(p));
2901 
2902 	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2903 	skb_shinfo(nskb)->frag_list = p;
2904 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2905 	pinfo->gso_size = 0;
2906 	skb_header_release(p);
2907 	nskb->prev = p;
2908 
2909 	nskb->data_len += p->len;
2910 	nskb->truesize += p->truesize;
2911 	nskb->len += p->len;
2912 
2913 	*head = nskb;
2914 	nskb->next = p->next;
2915 	p->next = NULL;
2916 
2917 	p = nskb;
2918 
2919 merge:
2920 	p->truesize += skb->truesize - len;
2921 	if (offset > headlen) {
2922 		unsigned int eat = offset - headlen;
2923 
2924 		skbinfo->frags[0].page_offset += eat;
2925 		skb_frag_size_sub(&skbinfo->frags[0], eat);
2926 		skb->data_len -= eat;
2927 		skb->len -= eat;
2928 		offset = headlen;
2929 	}
2930 
2931 	__skb_pull(skb, offset);
2932 
2933 	p->prev->next = skb;
2934 	p->prev = skb;
2935 	skb_header_release(skb);
2936 
2937 done:
2938 	NAPI_GRO_CB(p)->count++;
2939 	p->data_len += len;
2940 	p->truesize += len;
2941 	p->len += len;
2942 
2943 	NAPI_GRO_CB(skb)->same_flow = 1;
2944 	return 0;
2945 }
2946 EXPORT_SYMBOL_GPL(skb_gro_receive);
2947 
2948 void __init skb_init(void)
2949 {
2950 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2951 					      sizeof(struct sk_buff),
2952 					      0,
2953 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2954 					      NULL);
2955 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2956 						(2*sizeof(struct sk_buff)) +
2957 						sizeof(atomic_t),
2958 						0,
2959 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2960 						NULL);
2961 }
2962 
2963 /**
2964  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2965  *	@skb: Socket buffer containing the buffers to be mapped
2966  *	@sg: The scatter-gather list to map into
2967  *	@offset: The offset into the buffer's contents to start mapping
2968  *	@len: Length of buffer space to be mapped
2969  *
2970  *	Fill the specified scatter-gather list with mappings/pointers into a
2971  *	region of the buffer space attached to a socket buffer.
2972  */
2973 static int
2974 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2975 {
2976 	int start = skb_headlen(skb);
2977 	int i, copy = start - offset;
2978 	struct sk_buff *frag_iter;
2979 	int elt = 0;
2980 
2981 	if (copy > 0) {
2982 		if (copy > len)
2983 			copy = len;
2984 		sg_set_buf(sg, skb->data + offset, copy);
2985 		elt++;
2986 		if ((len -= copy) == 0)
2987 			return elt;
2988 		offset += copy;
2989 	}
2990 
2991 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2992 		int end;
2993 
2994 		WARN_ON(start > offset + len);
2995 
2996 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2997 		if ((copy = end - offset) > 0) {
2998 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2999 
3000 			if (copy > len)
3001 				copy = len;
3002 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3003 					frag->page_offset+offset-start);
3004 			elt++;
3005 			if (!(len -= copy))
3006 				return elt;
3007 			offset += copy;
3008 		}
3009 		start = end;
3010 	}
3011 
3012 	skb_walk_frags(skb, frag_iter) {
3013 		int end;
3014 
3015 		WARN_ON(start > offset + len);
3016 
3017 		end = start + frag_iter->len;
3018 		if ((copy = end - offset) > 0) {
3019 			if (copy > len)
3020 				copy = len;
3021 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3022 					      copy);
3023 			if ((len -= copy) == 0)
3024 				return elt;
3025 			offset += copy;
3026 		}
3027 		start = end;
3028 	}
3029 	BUG_ON(len);
3030 	return elt;
3031 }
3032 
3033 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3034 {
3035 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
3036 
3037 	sg_mark_end(&sg[nsg - 1]);
3038 
3039 	return nsg;
3040 }
3041 EXPORT_SYMBOL_GPL(skb_to_sgvec);
3042 
3043 /**
3044  *	skb_cow_data - Check that a socket buffer's data buffers are writable
3045  *	@skb: The socket buffer to check.
3046  *	@tailbits: Amount of trailing space to be added
3047  *	@trailer: Returned pointer to the skb where the @tailbits space begins
3048  *
3049  *	Make sure that the data buffers attached to a socket buffer are
3050  *	writable. If they are not, private copies are made of the data buffers
3051  *	and the socket buffer is set to use these instead.
3052  *
3053  *	If @tailbits is given, make sure that there is space to write @tailbits
3054  *	bytes of data beyond current end of socket buffer.  @trailer will be
3055  *	set to point to the skb in which this space begins.
3056  *
3057  *	The number of scatterlist elements required to completely map the
3058  *	COW'd and extended socket buffer will be returned.
3059  */
3060 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3061 {
3062 	int copyflag;
3063 	int elt;
3064 	struct sk_buff *skb1, **skb_p;
3065 
3066 	/* If skb is cloned or its head is paged, reallocate
3067 	 * head pulling out all the pages (pages are considered not writable
3068 	 * at the moment even if they are anonymous).
3069 	 */
3070 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3071 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3072 		return -ENOMEM;
3073 
3074 	/* Easy case. Most of packets will go this way. */
3075 	if (!skb_has_frag_list(skb)) {
3076 		/* A little of trouble, not enough of space for trailer.
3077 		 * This should not happen, when stack is tuned to generate
3078 		 * good frames. OK, on miss we reallocate and reserve even more
3079 		 * space, 128 bytes is fair. */
3080 
3081 		if (skb_tailroom(skb) < tailbits &&
3082 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3083 			return -ENOMEM;
3084 
3085 		/* Voila! */
3086 		*trailer = skb;
3087 		return 1;
3088 	}
3089 
3090 	/* Misery. We are in troubles, going to mincer fragments... */
3091 
3092 	elt = 1;
3093 	skb_p = &skb_shinfo(skb)->frag_list;
3094 	copyflag = 0;
3095 
3096 	while ((skb1 = *skb_p) != NULL) {
3097 		int ntail = 0;
3098 
3099 		/* The fragment is partially pulled by someone,
3100 		 * this can happen on input. Copy it and everything
3101 		 * after it. */
3102 
3103 		if (skb_shared(skb1))
3104 			copyflag = 1;
3105 
3106 		/* If the skb is the last, worry about trailer. */
3107 
3108 		if (skb1->next == NULL && tailbits) {
3109 			if (skb_shinfo(skb1)->nr_frags ||
3110 			    skb_has_frag_list(skb1) ||
3111 			    skb_tailroom(skb1) < tailbits)
3112 				ntail = tailbits + 128;
3113 		}
3114 
3115 		if (copyflag ||
3116 		    skb_cloned(skb1) ||
3117 		    ntail ||
3118 		    skb_shinfo(skb1)->nr_frags ||
3119 		    skb_has_frag_list(skb1)) {
3120 			struct sk_buff *skb2;
3121 
3122 			/* Fuck, we are miserable poor guys... */
3123 			if (ntail == 0)
3124 				skb2 = skb_copy(skb1, GFP_ATOMIC);
3125 			else
3126 				skb2 = skb_copy_expand(skb1,
3127 						       skb_headroom(skb1),
3128 						       ntail,
3129 						       GFP_ATOMIC);
3130 			if (unlikely(skb2 == NULL))
3131 				return -ENOMEM;
3132 
3133 			if (skb1->sk)
3134 				skb_set_owner_w(skb2, skb1->sk);
3135 
3136 			/* Looking around. Are we still alive?
3137 			 * OK, link new skb, drop old one */
3138 
3139 			skb2->next = skb1->next;
3140 			*skb_p = skb2;
3141 			kfree_skb(skb1);
3142 			skb1 = skb2;
3143 		}
3144 		elt++;
3145 		*trailer = skb1;
3146 		skb_p = &skb1->next;
3147 	}
3148 
3149 	return elt;
3150 }
3151 EXPORT_SYMBOL_GPL(skb_cow_data);
3152 
3153 static void sock_rmem_free(struct sk_buff *skb)
3154 {
3155 	struct sock *sk = skb->sk;
3156 
3157 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3158 }
3159 
3160 /*
3161  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3162  */
3163 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3164 {
3165 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3166 	    (unsigned)sk->sk_rcvbuf)
3167 		return -ENOMEM;
3168 
3169 	skb_orphan(skb);
3170 	skb->sk = sk;
3171 	skb->destructor = sock_rmem_free;
3172 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3173 
3174 	/* before exiting rcu section, make sure dst is refcounted */
3175 	skb_dst_force(skb);
3176 
3177 	skb_queue_tail(&sk->sk_error_queue, skb);
3178 	if (!sock_flag(sk, SOCK_DEAD))
3179 		sk->sk_data_ready(sk, skb->len);
3180 	return 0;
3181 }
3182 EXPORT_SYMBOL(sock_queue_err_skb);
3183 
3184 void skb_tstamp_tx(struct sk_buff *orig_skb,
3185 		struct skb_shared_hwtstamps *hwtstamps)
3186 {
3187 	struct sock *sk = orig_skb->sk;
3188 	struct sock_exterr_skb *serr;
3189 	struct sk_buff *skb;
3190 	int err;
3191 
3192 	if (!sk)
3193 		return;
3194 
3195 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3196 	if (!skb)
3197 		return;
3198 
3199 	if (hwtstamps) {
3200 		*skb_hwtstamps(skb) =
3201 			*hwtstamps;
3202 	} else {
3203 		/*
3204 		 * no hardware time stamps available,
3205 		 * so keep the shared tx_flags and only
3206 		 * store software time stamp
3207 		 */
3208 		skb->tstamp = ktime_get_real();
3209 	}
3210 
3211 	serr = SKB_EXT_ERR(skb);
3212 	memset(serr, 0, sizeof(*serr));
3213 	serr->ee.ee_errno = ENOMSG;
3214 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3215 
3216 	err = sock_queue_err_skb(sk, skb);
3217 
3218 	if (err)
3219 		kfree_skb(skb);
3220 }
3221 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3222 
3223 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3224 {
3225 	struct sock *sk = skb->sk;
3226 	struct sock_exterr_skb *serr;
3227 	int err;
3228 
3229 	skb->wifi_acked_valid = 1;
3230 	skb->wifi_acked = acked;
3231 
3232 	serr = SKB_EXT_ERR(skb);
3233 	memset(serr, 0, sizeof(*serr));
3234 	serr->ee.ee_errno = ENOMSG;
3235 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3236 
3237 	err = sock_queue_err_skb(sk, skb);
3238 	if (err)
3239 		kfree_skb(skb);
3240 }
3241 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3242 
3243 
3244 /**
3245  * skb_partial_csum_set - set up and verify partial csum values for packet
3246  * @skb: the skb to set
3247  * @start: the number of bytes after skb->data to start checksumming.
3248  * @off: the offset from start to place the checksum.
3249  *
3250  * For untrusted partially-checksummed packets, we need to make sure the values
3251  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3252  *
3253  * This function checks and sets those values and skb->ip_summed: if this
3254  * returns false you should drop the packet.
3255  */
3256 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3257 {
3258 	if (unlikely(start > skb_headlen(skb)) ||
3259 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3260 		if (net_ratelimit())
3261 			printk(KERN_WARNING
3262 			       "bad partial csum: csum=%u/%u len=%u\n",
3263 			       start, off, skb_headlen(skb));
3264 		return false;
3265 	}
3266 	skb->ip_summed = CHECKSUM_PARTIAL;
3267 	skb->csum_start = skb_headroom(skb) + start;
3268 	skb->csum_offset = off;
3269 	return true;
3270 }
3271 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3272 
3273 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3274 {
3275 	if (net_ratelimit())
3276 		pr_warning("%s: received packets cannot be forwarded"
3277 			   " while LRO is enabled\n", skb->dev->name);
3278 }
3279 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3280