xref: /linux/net/core/skbuff.c (revision 0dd9ac63ce26ec87b080ca9c3e6efed33c23ace6)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/mm.h>
44 #include <linux/interrupt.h>
45 #include <linux/in.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 
61 #include <net/protocol.h>
62 #include <net/dst.h>
63 #include <net/sock.h>
64 #include <net/checksum.h>
65 #include <net/xfrm.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 #include <trace/events/skb.h>
70 
71 #include "kmap_skb.h"
72 
73 static struct kmem_cache *skbuff_head_cache __read_mostly;
74 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
75 
76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
77 				  struct pipe_buffer *buf)
78 {
79 	put_page(buf->page);
80 }
81 
82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
83 				struct pipe_buffer *buf)
84 {
85 	get_page(buf->page);
86 }
87 
88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
89 			       struct pipe_buffer *buf)
90 {
91 	return 1;
92 }
93 
94 
95 /* Pipe buffer operations for a socket. */
96 static const struct pipe_buf_operations sock_pipe_buf_ops = {
97 	.can_merge = 0,
98 	.map = generic_pipe_buf_map,
99 	.unmap = generic_pipe_buf_unmap,
100 	.confirm = generic_pipe_buf_confirm,
101 	.release = sock_pipe_buf_release,
102 	.steal = sock_pipe_buf_steal,
103 	.get = sock_pipe_buf_get,
104 };
105 
106 /*
107  *	Keep out-of-line to prevent kernel bloat.
108  *	__builtin_return_address is not used because it is not always
109  *	reliable.
110  */
111 
112 /**
113  *	skb_over_panic	- 	private function
114  *	@skb: buffer
115  *	@sz: size
116  *	@here: address
117  *
118  *	Out of line support code for skb_put(). Not user callable.
119  */
120 static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
121 {
122 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
123 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
124 	       here, skb->len, sz, skb->head, skb->data,
125 	       (unsigned long)skb->tail, (unsigned long)skb->end,
126 	       skb->dev ? skb->dev->name : "<NULL>");
127 	BUG();
128 }
129 
130 /**
131  *	skb_under_panic	- 	private function
132  *	@skb: buffer
133  *	@sz: size
134  *	@here: address
135  *
136  *	Out of line support code for skb_push(). Not user callable.
137  */
138 
139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
140 {
141 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
142 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
143 	       here, skb->len, sz, skb->head, skb->data,
144 	       (unsigned long)skb->tail, (unsigned long)skb->end,
145 	       skb->dev ? skb->dev->name : "<NULL>");
146 	BUG();
147 }
148 
149 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
150  *	'private' fields and also do memory statistics to find all the
151  *	[BEEP] leaks.
152  *
153  */
154 
155 /**
156  *	__alloc_skb	-	allocate a network buffer
157  *	@size: size to allocate
158  *	@gfp_mask: allocation mask
159  *	@fclone: allocate from fclone cache instead of head cache
160  *		and allocate a cloned (child) skb
161  *	@node: numa node to allocate memory on
162  *
163  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
164  *	tail room of size bytes. The object has a reference count of one.
165  *	The return is the buffer. On a failure the return is %NULL.
166  *
167  *	Buffers may only be allocated from interrupts using a @gfp_mask of
168  *	%GFP_ATOMIC.
169  */
170 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
171 			    int fclone, int node)
172 {
173 	struct kmem_cache *cache;
174 	struct skb_shared_info *shinfo;
175 	struct sk_buff *skb;
176 	u8 *data;
177 
178 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
179 
180 	/* Get the HEAD */
181 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
182 	if (!skb)
183 		goto out;
184 	prefetchw(skb);
185 
186 	size = SKB_DATA_ALIGN(size);
187 	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
188 			gfp_mask, node);
189 	if (!data)
190 		goto nodata;
191 	prefetchw(data + size);
192 
193 	/*
194 	 * Only clear those fields we need to clear, not those that we will
195 	 * actually initialise below. Hence, don't put any more fields after
196 	 * the tail pointer in struct sk_buff!
197 	 */
198 	memset(skb, 0, offsetof(struct sk_buff, tail));
199 	skb->truesize = size + sizeof(struct sk_buff);
200 	atomic_set(&skb->users, 1);
201 	skb->head = data;
202 	skb->data = data;
203 	skb_reset_tail_pointer(skb);
204 	skb->end = skb->tail + size;
205 	kmemcheck_annotate_bitfield(skb, flags1);
206 	kmemcheck_annotate_bitfield(skb, flags2);
207 #ifdef NET_SKBUFF_DATA_USES_OFFSET
208 	skb->mac_header = ~0U;
209 #endif
210 
211 	/* make sure we initialize shinfo sequentially */
212 	shinfo = skb_shinfo(skb);
213 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
214 	atomic_set(&shinfo->dataref, 1);
215 
216 	if (fclone) {
217 		struct sk_buff *child = skb + 1;
218 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
219 
220 		kmemcheck_annotate_bitfield(child, flags1);
221 		kmemcheck_annotate_bitfield(child, flags2);
222 		skb->fclone = SKB_FCLONE_ORIG;
223 		atomic_set(fclone_ref, 1);
224 
225 		child->fclone = SKB_FCLONE_UNAVAILABLE;
226 	}
227 out:
228 	return skb;
229 nodata:
230 	kmem_cache_free(cache, skb);
231 	skb = NULL;
232 	goto out;
233 }
234 EXPORT_SYMBOL(__alloc_skb);
235 
236 /**
237  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
238  *	@dev: network device to receive on
239  *	@length: length to allocate
240  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
241  *
242  *	Allocate a new &sk_buff and assign it a usage count of one. The
243  *	buffer has unspecified headroom built in. Users should allocate
244  *	the headroom they think they need without accounting for the
245  *	built in space. The built in space is used for optimisations.
246  *
247  *	%NULL is returned if there is no free memory.
248  */
249 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
250 		unsigned int length, gfp_t gfp_mask)
251 {
252 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
253 	struct sk_buff *skb;
254 
255 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
256 	if (likely(skb)) {
257 		skb_reserve(skb, NET_SKB_PAD);
258 		skb->dev = dev;
259 	}
260 	return skb;
261 }
262 EXPORT_SYMBOL(__netdev_alloc_skb);
263 
264 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
265 {
266 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
267 	struct page *page;
268 
269 	page = alloc_pages_node(node, gfp_mask, 0);
270 	return page;
271 }
272 EXPORT_SYMBOL(__netdev_alloc_page);
273 
274 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
275 		int size)
276 {
277 	skb_fill_page_desc(skb, i, page, off, size);
278 	skb->len += size;
279 	skb->data_len += size;
280 	skb->truesize += size;
281 }
282 EXPORT_SYMBOL(skb_add_rx_frag);
283 
284 /**
285  *	dev_alloc_skb - allocate an skbuff for receiving
286  *	@length: length to allocate
287  *
288  *	Allocate a new &sk_buff and assign it a usage count of one. The
289  *	buffer has unspecified headroom built in. Users should allocate
290  *	the headroom they think they need without accounting for the
291  *	built in space. The built in space is used for optimisations.
292  *
293  *	%NULL is returned if there is no free memory. Although this function
294  *	allocates memory it can be called from an interrupt.
295  */
296 struct sk_buff *dev_alloc_skb(unsigned int length)
297 {
298 	/*
299 	 * There is more code here than it seems:
300 	 * __dev_alloc_skb is an inline
301 	 */
302 	return __dev_alloc_skb(length, GFP_ATOMIC);
303 }
304 EXPORT_SYMBOL(dev_alloc_skb);
305 
306 static void skb_drop_list(struct sk_buff **listp)
307 {
308 	struct sk_buff *list = *listp;
309 
310 	*listp = NULL;
311 
312 	do {
313 		struct sk_buff *this = list;
314 		list = list->next;
315 		kfree_skb(this);
316 	} while (list);
317 }
318 
319 static inline void skb_drop_fraglist(struct sk_buff *skb)
320 {
321 	skb_drop_list(&skb_shinfo(skb)->frag_list);
322 }
323 
324 static void skb_clone_fraglist(struct sk_buff *skb)
325 {
326 	struct sk_buff *list;
327 
328 	skb_walk_frags(skb, list)
329 		skb_get(list);
330 }
331 
332 static void skb_release_data(struct sk_buff *skb)
333 {
334 	if (!skb->cloned ||
335 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
336 			       &skb_shinfo(skb)->dataref)) {
337 		if (skb_shinfo(skb)->nr_frags) {
338 			int i;
339 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
340 				put_page(skb_shinfo(skb)->frags[i].page);
341 		}
342 
343 		if (skb_has_frags(skb))
344 			skb_drop_fraglist(skb);
345 
346 		kfree(skb->head);
347 	}
348 }
349 
350 /*
351  *	Free an skbuff by memory without cleaning the state.
352  */
353 static void kfree_skbmem(struct sk_buff *skb)
354 {
355 	struct sk_buff *other;
356 	atomic_t *fclone_ref;
357 
358 	switch (skb->fclone) {
359 	case SKB_FCLONE_UNAVAILABLE:
360 		kmem_cache_free(skbuff_head_cache, skb);
361 		break;
362 
363 	case SKB_FCLONE_ORIG:
364 		fclone_ref = (atomic_t *) (skb + 2);
365 		if (atomic_dec_and_test(fclone_ref))
366 			kmem_cache_free(skbuff_fclone_cache, skb);
367 		break;
368 
369 	case SKB_FCLONE_CLONE:
370 		fclone_ref = (atomic_t *) (skb + 1);
371 		other = skb - 1;
372 
373 		/* The clone portion is available for
374 		 * fast-cloning again.
375 		 */
376 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
377 
378 		if (atomic_dec_and_test(fclone_ref))
379 			kmem_cache_free(skbuff_fclone_cache, other);
380 		break;
381 	}
382 }
383 
384 static void skb_release_head_state(struct sk_buff *skb)
385 {
386 	skb_dst_drop(skb);
387 #ifdef CONFIG_XFRM
388 	secpath_put(skb->sp);
389 #endif
390 	if (skb->destructor) {
391 		WARN_ON(in_irq());
392 		skb->destructor(skb);
393 	}
394 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
395 	nf_conntrack_put(skb->nfct);
396 	nf_conntrack_put_reasm(skb->nfct_reasm);
397 #endif
398 #ifdef CONFIG_BRIDGE_NETFILTER
399 	nf_bridge_put(skb->nf_bridge);
400 #endif
401 /* XXX: IS this still necessary? - JHS */
402 #ifdef CONFIG_NET_SCHED
403 	skb->tc_index = 0;
404 #ifdef CONFIG_NET_CLS_ACT
405 	skb->tc_verd = 0;
406 #endif
407 #endif
408 }
409 
410 /* Free everything but the sk_buff shell. */
411 static void skb_release_all(struct sk_buff *skb)
412 {
413 	skb_release_head_state(skb);
414 	skb_release_data(skb);
415 }
416 
417 /**
418  *	__kfree_skb - private function
419  *	@skb: buffer
420  *
421  *	Free an sk_buff. Release anything attached to the buffer.
422  *	Clean the state. This is an internal helper function. Users should
423  *	always call kfree_skb
424  */
425 
426 void __kfree_skb(struct sk_buff *skb)
427 {
428 	skb_release_all(skb);
429 	kfree_skbmem(skb);
430 }
431 EXPORT_SYMBOL(__kfree_skb);
432 
433 /**
434  *	kfree_skb - free an sk_buff
435  *	@skb: buffer to free
436  *
437  *	Drop a reference to the buffer and free it if the usage count has
438  *	hit zero.
439  */
440 void kfree_skb(struct sk_buff *skb)
441 {
442 	if (unlikely(!skb))
443 		return;
444 	if (likely(atomic_read(&skb->users) == 1))
445 		smp_rmb();
446 	else if (likely(!atomic_dec_and_test(&skb->users)))
447 		return;
448 	trace_kfree_skb(skb, __builtin_return_address(0));
449 	__kfree_skb(skb);
450 }
451 EXPORT_SYMBOL(kfree_skb);
452 
453 /**
454  *	consume_skb - free an skbuff
455  *	@skb: buffer to free
456  *
457  *	Drop a ref to the buffer and free it if the usage count has hit zero
458  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
459  *	is being dropped after a failure and notes that
460  */
461 void consume_skb(struct sk_buff *skb)
462 {
463 	if (unlikely(!skb))
464 		return;
465 	if (likely(atomic_read(&skb->users) == 1))
466 		smp_rmb();
467 	else if (likely(!atomic_dec_and_test(&skb->users)))
468 		return;
469 	__kfree_skb(skb);
470 }
471 EXPORT_SYMBOL(consume_skb);
472 
473 /**
474  *	skb_recycle_check - check if skb can be reused for receive
475  *	@skb: buffer
476  *	@skb_size: minimum receive buffer size
477  *
478  *	Checks that the skb passed in is not shared or cloned, and
479  *	that it is linear and its head portion at least as large as
480  *	skb_size so that it can be recycled as a receive buffer.
481  *	If these conditions are met, this function does any necessary
482  *	reference count dropping and cleans up the skbuff as if it
483  *	just came from __alloc_skb().
484  */
485 bool skb_recycle_check(struct sk_buff *skb, int skb_size)
486 {
487 	struct skb_shared_info *shinfo;
488 
489 	if (irqs_disabled())
490 		return false;
491 
492 	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
493 		return false;
494 
495 	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
496 	if (skb_end_pointer(skb) - skb->head < skb_size)
497 		return false;
498 
499 	if (skb_shared(skb) || skb_cloned(skb))
500 		return false;
501 
502 	skb_release_head_state(skb);
503 
504 	shinfo = skb_shinfo(skb);
505 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
506 	atomic_set(&shinfo->dataref, 1);
507 
508 	memset(skb, 0, offsetof(struct sk_buff, tail));
509 	skb->data = skb->head + NET_SKB_PAD;
510 	skb_reset_tail_pointer(skb);
511 
512 	return true;
513 }
514 EXPORT_SYMBOL(skb_recycle_check);
515 
516 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
517 {
518 	new->tstamp		= old->tstamp;
519 	new->dev		= old->dev;
520 	new->transport_header	= old->transport_header;
521 	new->network_header	= old->network_header;
522 	new->mac_header		= old->mac_header;
523 	skb_dst_copy(new, old);
524 	new->rxhash		= old->rxhash;
525 #ifdef CONFIG_XFRM
526 	new->sp			= secpath_get(old->sp);
527 #endif
528 	memcpy(new->cb, old->cb, sizeof(old->cb));
529 	new->csum		= old->csum;
530 	new->local_df		= old->local_df;
531 	new->pkt_type		= old->pkt_type;
532 	new->ip_summed		= old->ip_summed;
533 	skb_copy_queue_mapping(new, old);
534 	new->priority		= old->priority;
535 	new->deliver_no_wcard	= old->deliver_no_wcard;
536 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
537 	new->ipvs_property	= old->ipvs_property;
538 #endif
539 	new->protocol		= old->protocol;
540 	new->mark		= old->mark;
541 	new->skb_iif		= old->skb_iif;
542 	__nf_copy(new, old);
543 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
544     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
545 	new->nf_trace		= old->nf_trace;
546 #endif
547 #ifdef CONFIG_NET_SCHED
548 	new->tc_index		= old->tc_index;
549 #ifdef CONFIG_NET_CLS_ACT
550 	new->tc_verd		= old->tc_verd;
551 #endif
552 #endif
553 	new->vlan_tci		= old->vlan_tci;
554 
555 	skb_copy_secmark(new, old);
556 }
557 
558 /*
559  * You should not add any new code to this function.  Add it to
560  * __copy_skb_header above instead.
561  */
562 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
563 {
564 #define C(x) n->x = skb->x
565 
566 	n->next = n->prev = NULL;
567 	n->sk = NULL;
568 	__copy_skb_header(n, skb);
569 
570 	C(len);
571 	C(data_len);
572 	C(mac_len);
573 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
574 	n->cloned = 1;
575 	n->nohdr = 0;
576 	n->destructor = NULL;
577 	C(tail);
578 	C(end);
579 	C(head);
580 	C(data);
581 	C(truesize);
582 	atomic_set(&n->users, 1);
583 
584 	atomic_inc(&(skb_shinfo(skb)->dataref));
585 	skb->cloned = 1;
586 
587 	return n;
588 #undef C
589 }
590 
591 /**
592  *	skb_morph	-	morph one skb into another
593  *	@dst: the skb to receive the contents
594  *	@src: the skb to supply the contents
595  *
596  *	This is identical to skb_clone except that the target skb is
597  *	supplied by the user.
598  *
599  *	The target skb is returned upon exit.
600  */
601 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
602 {
603 	skb_release_all(dst);
604 	return __skb_clone(dst, src);
605 }
606 EXPORT_SYMBOL_GPL(skb_morph);
607 
608 /**
609  *	skb_clone	-	duplicate an sk_buff
610  *	@skb: buffer to clone
611  *	@gfp_mask: allocation priority
612  *
613  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
614  *	copies share the same packet data but not structure. The new
615  *	buffer has a reference count of 1. If the allocation fails the
616  *	function returns %NULL otherwise the new buffer is returned.
617  *
618  *	If this function is called from an interrupt gfp_mask() must be
619  *	%GFP_ATOMIC.
620  */
621 
622 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
623 {
624 	struct sk_buff *n;
625 
626 	n = skb + 1;
627 	if (skb->fclone == SKB_FCLONE_ORIG &&
628 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
629 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
630 		n->fclone = SKB_FCLONE_CLONE;
631 		atomic_inc(fclone_ref);
632 	} else {
633 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
634 		if (!n)
635 			return NULL;
636 
637 		kmemcheck_annotate_bitfield(n, flags1);
638 		kmemcheck_annotate_bitfield(n, flags2);
639 		n->fclone = SKB_FCLONE_UNAVAILABLE;
640 	}
641 
642 	return __skb_clone(n, skb);
643 }
644 EXPORT_SYMBOL(skb_clone);
645 
646 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
647 {
648 #ifndef NET_SKBUFF_DATA_USES_OFFSET
649 	/*
650 	 *	Shift between the two data areas in bytes
651 	 */
652 	unsigned long offset = new->data - old->data;
653 #endif
654 
655 	__copy_skb_header(new, old);
656 
657 #ifndef NET_SKBUFF_DATA_USES_OFFSET
658 	/* {transport,network,mac}_header are relative to skb->head */
659 	new->transport_header += offset;
660 	new->network_header   += offset;
661 	if (skb_mac_header_was_set(new))
662 		new->mac_header	      += offset;
663 #endif
664 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
665 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
666 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
667 }
668 
669 /**
670  *	skb_copy	-	create private copy of an sk_buff
671  *	@skb: buffer to copy
672  *	@gfp_mask: allocation priority
673  *
674  *	Make a copy of both an &sk_buff and its data. This is used when the
675  *	caller wishes to modify the data and needs a private copy of the
676  *	data to alter. Returns %NULL on failure or the pointer to the buffer
677  *	on success. The returned buffer has a reference count of 1.
678  *
679  *	As by-product this function converts non-linear &sk_buff to linear
680  *	one, so that &sk_buff becomes completely private and caller is allowed
681  *	to modify all the data of returned buffer. This means that this
682  *	function is not recommended for use in circumstances when only
683  *	header is going to be modified. Use pskb_copy() instead.
684  */
685 
686 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
687 {
688 	int headerlen = skb->data - skb->head;
689 	/*
690 	 *	Allocate the copy buffer
691 	 */
692 	struct sk_buff *n;
693 #ifdef NET_SKBUFF_DATA_USES_OFFSET
694 	n = alloc_skb(skb->end + skb->data_len, gfp_mask);
695 #else
696 	n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
697 #endif
698 	if (!n)
699 		return NULL;
700 
701 	/* Set the data pointer */
702 	skb_reserve(n, headerlen);
703 	/* Set the tail pointer and length */
704 	skb_put(n, skb->len);
705 
706 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
707 		BUG();
708 
709 	copy_skb_header(n, skb);
710 	return n;
711 }
712 EXPORT_SYMBOL(skb_copy);
713 
714 /**
715  *	pskb_copy	-	create copy of an sk_buff with private head.
716  *	@skb: buffer to copy
717  *	@gfp_mask: allocation priority
718  *
719  *	Make a copy of both an &sk_buff and part of its data, located
720  *	in header. Fragmented data remain shared. This is used when
721  *	the caller wishes to modify only header of &sk_buff and needs
722  *	private copy of the header to alter. Returns %NULL on failure
723  *	or the pointer to the buffer on success.
724  *	The returned buffer has a reference count of 1.
725  */
726 
727 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
728 {
729 	/*
730 	 *	Allocate the copy buffer
731 	 */
732 	struct sk_buff *n;
733 #ifdef NET_SKBUFF_DATA_USES_OFFSET
734 	n = alloc_skb(skb->end, gfp_mask);
735 #else
736 	n = alloc_skb(skb->end - skb->head, gfp_mask);
737 #endif
738 	if (!n)
739 		goto out;
740 
741 	/* Set the data pointer */
742 	skb_reserve(n, skb->data - skb->head);
743 	/* Set the tail pointer and length */
744 	skb_put(n, skb_headlen(skb));
745 	/* Copy the bytes */
746 	skb_copy_from_linear_data(skb, n->data, n->len);
747 
748 	n->truesize += skb->data_len;
749 	n->data_len  = skb->data_len;
750 	n->len	     = skb->len;
751 
752 	if (skb_shinfo(skb)->nr_frags) {
753 		int i;
754 
755 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
756 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
757 			get_page(skb_shinfo(n)->frags[i].page);
758 		}
759 		skb_shinfo(n)->nr_frags = i;
760 	}
761 
762 	if (skb_has_frags(skb)) {
763 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
764 		skb_clone_fraglist(n);
765 	}
766 
767 	copy_skb_header(n, skb);
768 out:
769 	return n;
770 }
771 EXPORT_SYMBOL(pskb_copy);
772 
773 /**
774  *	pskb_expand_head - reallocate header of &sk_buff
775  *	@skb: buffer to reallocate
776  *	@nhead: room to add at head
777  *	@ntail: room to add at tail
778  *	@gfp_mask: allocation priority
779  *
780  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
781  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
782  *	reference count of 1. Returns zero in the case of success or error,
783  *	if expansion failed. In the last case, &sk_buff is not changed.
784  *
785  *	All the pointers pointing into skb header may change and must be
786  *	reloaded after call to this function.
787  */
788 
789 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
790 		     gfp_t gfp_mask)
791 {
792 	int i;
793 	u8 *data;
794 #ifdef NET_SKBUFF_DATA_USES_OFFSET
795 	int size = nhead + skb->end + ntail;
796 #else
797 	int size = nhead + (skb->end - skb->head) + ntail;
798 #endif
799 	long off;
800 
801 	BUG_ON(nhead < 0);
802 
803 	if (skb_shared(skb))
804 		BUG();
805 
806 	size = SKB_DATA_ALIGN(size);
807 
808 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
809 	if (!data)
810 		goto nodata;
811 
812 	/* Copy only real data... and, alas, header. This should be
813 	 * optimized for the cases when header is void. */
814 #ifdef NET_SKBUFF_DATA_USES_OFFSET
815 	memcpy(data + nhead, skb->head, skb->tail);
816 #else
817 	memcpy(data + nhead, skb->head, skb->tail - skb->head);
818 #endif
819 	memcpy(data + size, skb_end_pointer(skb),
820 	       sizeof(struct skb_shared_info));
821 
822 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
823 		get_page(skb_shinfo(skb)->frags[i].page);
824 
825 	if (skb_has_frags(skb))
826 		skb_clone_fraglist(skb);
827 
828 	skb_release_data(skb);
829 
830 	off = (data + nhead) - skb->head;
831 
832 	skb->head     = data;
833 	skb->data    += off;
834 #ifdef NET_SKBUFF_DATA_USES_OFFSET
835 	skb->end      = size;
836 	off           = nhead;
837 #else
838 	skb->end      = skb->head + size;
839 #endif
840 	/* {transport,network,mac}_header and tail are relative to skb->head */
841 	skb->tail	      += off;
842 	skb->transport_header += off;
843 	skb->network_header   += off;
844 	if (skb_mac_header_was_set(skb))
845 		skb->mac_header += off;
846 	skb->csum_start       += nhead;
847 	skb->cloned   = 0;
848 	skb->hdr_len  = 0;
849 	skb->nohdr    = 0;
850 	atomic_set(&skb_shinfo(skb)->dataref, 1);
851 	return 0;
852 
853 nodata:
854 	return -ENOMEM;
855 }
856 EXPORT_SYMBOL(pskb_expand_head);
857 
858 /* Make private copy of skb with writable head and some headroom */
859 
860 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
861 {
862 	struct sk_buff *skb2;
863 	int delta = headroom - skb_headroom(skb);
864 
865 	if (delta <= 0)
866 		skb2 = pskb_copy(skb, GFP_ATOMIC);
867 	else {
868 		skb2 = skb_clone(skb, GFP_ATOMIC);
869 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
870 					     GFP_ATOMIC)) {
871 			kfree_skb(skb2);
872 			skb2 = NULL;
873 		}
874 	}
875 	return skb2;
876 }
877 EXPORT_SYMBOL(skb_realloc_headroom);
878 
879 /**
880  *	skb_copy_expand	-	copy and expand sk_buff
881  *	@skb: buffer to copy
882  *	@newheadroom: new free bytes at head
883  *	@newtailroom: new free bytes at tail
884  *	@gfp_mask: allocation priority
885  *
886  *	Make a copy of both an &sk_buff and its data and while doing so
887  *	allocate additional space.
888  *
889  *	This is used when the caller wishes to modify the data and needs a
890  *	private copy of the data to alter as well as more space for new fields.
891  *	Returns %NULL on failure or the pointer to the buffer
892  *	on success. The returned buffer has a reference count of 1.
893  *
894  *	You must pass %GFP_ATOMIC as the allocation priority if this function
895  *	is called from an interrupt.
896  */
897 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
898 				int newheadroom, int newtailroom,
899 				gfp_t gfp_mask)
900 {
901 	/*
902 	 *	Allocate the copy buffer
903 	 */
904 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
905 				      gfp_mask);
906 	int oldheadroom = skb_headroom(skb);
907 	int head_copy_len, head_copy_off;
908 	int off;
909 
910 	if (!n)
911 		return NULL;
912 
913 	skb_reserve(n, newheadroom);
914 
915 	/* Set the tail pointer and length */
916 	skb_put(n, skb->len);
917 
918 	head_copy_len = oldheadroom;
919 	head_copy_off = 0;
920 	if (newheadroom <= head_copy_len)
921 		head_copy_len = newheadroom;
922 	else
923 		head_copy_off = newheadroom - head_copy_len;
924 
925 	/* Copy the linear header and data. */
926 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
927 			  skb->len + head_copy_len))
928 		BUG();
929 
930 	copy_skb_header(n, skb);
931 
932 	off                  = newheadroom - oldheadroom;
933 	n->csum_start       += off;
934 #ifdef NET_SKBUFF_DATA_USES_OFFSET
935 	n->transport_header += off;
936 	n->network_header   += off;
937 	if (skb_mac_header_was_set(skb))
938 		n->mac_header += off;
939 #endif
940 
941 	return n;
942 }
943 EXPORT_SYMBOL(skb_copy_expand);
944 
945 /**
946  *	skb_pad			-	zero pad the tail of an skb
947  *	@skb: buffer to pad
948  *	@pad: space to pad
949  *
950  *	Ensure that a buffer is followed by a padding area that is zero
951  *	filled. Used by network drivers which may DMA or transfer data
952  *	beyond the buffer end onto the wire.
953  *
954  *	May return error in out of memory cases. The skb is freed on error.
955  */
956 
957 int skb_pad(struct sk_buff *skb, int pad)
958 {
959 	int err;
960 	int ntail;
961 
962 	/* If the skbuff is non linear tailroom is always zero.. */
963 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
964 		memset(skb->data+skb->len, 0, pad);
965 		return 0;
966 	}
967 
968 	ntail = skb->data_len + pad - (skb->end - skb->tail);
969 	if (likely(skb_cloned(skb) || ntail > 0)) {
970 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
971 		if (unlikely(err))
972 			goto free_skb;
973 	}
974 
975 	/* FIXME: The use of this function with non-linear skb's really needs
976 	 * to be audited.
977 	 */
978 	err = skb_linearize(skb);
979 	if (unlikely(err))
980 		goto free_skb;
981 
982 	memset(skb->data + skb->len, 0, pad);
983 	return 0;
984 
985 free_skb:
986 	kfree_skb(skb);
987 	return err;
988 }
989 EXPORT_SYMBOL(skb_pad);
990 
991 /**
992  *	skb_put - add data to a buffer
993  *	@skb: buffer to use
994  *	@len: amount of data to add
995  *
996  *	This function extends the used data area of the buffer. If this would
997  *	exceed the total buffer size the kernel will panic. A pointer to the
998  *	first byte of the extra data is returned.
999  */
1000 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1001 {
1002 	unsigned char *tmp = skb_tail_pointer(skb);
1003 	SKB_LINEAR_ASSERT(skb);
1004 	skb->tail += len;
1005 	skb->len  += len;
1006 	if (unlikely(skb->tail > skb->end))
1007 		skb_over_panic(skb, len, __builtin_return_address(0));
1008 	return tmp;
1009 }
1010 EXPORT_SYMBOL(skb_put);
1011 
1012 /**
1013  *	skb_push - add data to the start of a buffer
1014  *	@skb: buffer to use
1015  *	@len: amount of data to add
1016  *
1017  *	This function extends the used data area of the buffer at the buffer
1018  *	start. If this would exceed the total buffer headroom the kernel will
1019  *	panic. A pointer to the first byte of the extra data is returned.
1020  */
1021 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1022 {
1023 	skb->data -= len;
1024 	skb->len  += len;
1025 	if (unlikely(skb->data<skb->head))
1026 		skb_under_panic(skb, len, __builtin_return_address(0));
1027 	return skb->data;
1028 }
1029 EXPORT_SYMBOL(skb_push);
1030 
1031 /**
1032  *	skb_pull - remove data from the start of a buffer
1033  *	@skb: buffer to use
1034  *	@len: amount of data to remove
1035  *
1036  *	This function removes data from the start of a buffer, returning
1037  *	the memory to the headroom. A pointer to the next data in the buffer
1038  *	is returned. Once the data has been pulled future pushes will overwrite
1039  *	the old data.
1040  */
1041 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1042 {
1043 	return skb_pull_inline(skb, len);
1044 }
1045 EXPORT_SYMBOL(skb_pull);
1046 
1047 /**
1048  *	skb_trim - remove end from a buffer
1049  *	@skb: buffer to alter
1050  *	@len: new length
1051  *
1052  *	Cut the length of a buffer down by removing data from the tail. If
1053  *	the buffer is already under the length specified it is not modified.
1054  *	The skb must be linear.
1055  */
1056 void skb_trim(struct sk_buff *skb, unsigned int len)
1057 {
1058 	if (skb->len > len)
1059 		__skb_trim(skb, len);
1060 }
1061 EXPORT_SYMBOL(skb_trim);
1062 
1063 /* Trims skb to length len. It can change skb pointers.
1064  */
1065 
1066 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1067 {
1068 	struct sk_buff **fragp;
1069 	struct sk_buff *frag;
1070 	int offset = skb_headlen(skb);
1071 	int nfrags = skb_shinfo(skb)->nr_frags;
1072 	int i;
1073 	int err;
1074 
1075 	if (skb_cloned(skb) &&
1076 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1077 		return err;
1078 
1079 	i = 0;
1080 	if (offset >= len)
1081 		goto drop_pages;
1082 
1083 	for (; i < nfrags; i++) {
1084 		int end = offset + skb_shinfo(skb)->frags[i].size;
1085 
1086 		if (end < len) {
1087 			offset = end;
1088 			continue;
1089 		}
1090 
1091 		skb_shinfo(skb)->frags[i++].size = len - offset;
1092 
1093 drop_pages:
1094 		skb_shinfo(skb)->nr_frags = i;
1095 
1096 		for (; i < nfrags; i++)
1097 			put_page(skb_shinfo(skb)->frags[i].page);
1098 
1099 		if (skb_has_frags(skb))
1100 			skb_drop_fraglist(skb);
1101 		goto done;
1102 	}
1103 
1104 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1105 	     fragp = &frag->next) {
1106 		int end = offset + frag->len;
1107 
1108 		if (skb_shared(frag)) {
1109 			struct sk_buff *nfrag;
1110 
1111 			nfrag = skb_clone(frag, GFP_ATOMIC);
1112 			if (unlikely(!nfrag))
1113 				return -ENOMEM;
1114 
1115 			nfrag->next = frag->next;
1116 			kfree_skb(frag);
1117 			frag = nfrag;
1118 			*fragp = frag;
1119 		}
1120 
1121 		if (end < len) {
1122 			offset = end;
1123 			continue;
1124 		}
1125 
1126 		if (end > len &&
1127 		    unlikely((err = pskb_trim(frag, len - offset))))
1128 			return err;
1129 
1130 		if (frag->next)
1131 			skb_drop_list(&frag->next);
1132 		break;
1133 	}
1134 
1135 done:
1136 	if (len > skb_headlen(skb)) {
1137 		skb->data_len -= skb->len - len;
1138 		skb->len       = len;
1139 	} else {
1140 		skb->len       = len;
1141 		skb->data_len  = 0;
1142 		skb_set_tail_pointer(skb, len);
1143 	}
1144 
1145 	return 0;
1146 }
1147 EXPORT_SYMBOL(___pskb_trim);
1148 
1149 /**
1150  *	__pskb_pull_tail - advance tail of skb header
1151  *	@skb: buffer to reallocate
1152  *	@delta: number of bytes to advance tail
1153  *
1154  *	The function makes a sense only on a fragmented &sk_buff,
1155  *	it expands header moving its tail forward and copying necessary
1156  *	data from fragmented part.
1157  *
1158  *	&sk_buff MUST have reference count of 1.
1159  *
1160  *	Returns %NULL (and &sk_buff does not change) if pull failed
1161  *	or value of new tail of skb in the case of success.
1162  *
1163  *	All the pointers pointing into skb header may change and must be
1164  *	reloaded after call to this function.
1165  */
1166 
1167 /* Moves tail of skb head forward, copying data from fragmented part,
1168  * when it is necessary.
1169  * 1. It may fail due to malloc failure.
1170  * 2. It may change skb pointers.
1171  *
1172  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1173  */
1174 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1175 {
1176 	/* If skb has not enough free space at tail, get new one
1177 	 * plus 128 bytes for future expansions. If we have enough
1178 	 * room at tail, reallocate without expansion only if skb is cloned.
1179 	 */
1180 	int i, k, eat = (skb->tail + delta) - skb->end;
1181 
1182 	if (eat > 0 || skb_cloned(skb)) {
1183 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1184 				     GFP_ATOMIC))
1185 			return NULL;
1186 	}
1187 
1188 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1189 		BUG();
1190 
1191 	/* Optimization: no fragments, no reasons to preestimate
1192 	 * size of pulled pages. Superb.
1193 	 */
1194 	if (!skb_has_frags(skb))
1195 		goto pull_pages;
1196 
1197 	/* Estimate size of pulled pages. */
1198 	eat = delta;
1199 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1200 		if (skb_shinfo(skb)->frags[i].size >= eat)
1201 			goto pull_pages;
1202 		eat -= skb_shinfo(skb)->frags[i].size;
1203 	}
1204 
1205 	/* If we need update frag list, we are in troubles.
1206 	 * Certainly, it possible to add an offset to skb data,
1207 	 * but taking into account that pulling is expected to
1208 	 * be very rare operation, it is worth to fight against
1209 	 * further bloating skb head and crucify ourselves here instead.
1210 	 * Pure masohism, indeed. 8)8)
1211 	 */
1212 	if (eat) {
1213 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1214 		struct sk_buff *clone = NULL;
1215 		struct sk_buff *insp = NULL;
1216 
1217 		do {
1218 			BUG_ON(!list);
1219 
1220 			if (list->len <= eat) {
1221 				/* Eaten as whole. */
1222 				eat -= list->len;
1223 				list = list->next;
1224 				insp = list;
1225 			} else {
1226 				/* Eaten partially. */
1227 
1228 				if (skb_shared(list)) {
1229 					/* Sucks! We need to fork list. :-( */
1230 					clone = skb_clone(list, GFP_ATOMIC);
1231 					if (!clone)
1232 						return NULL;
1233 					insp = list->next;
1234 					list = clone;
1235 				} else {
1236 					/* This may be pulled without
1237 					 * problems. */
1238 					insp = list;
1239 				}
1240 				if (!pskb_pull(list, eat)) {
1241 					kfree_skb(clone);
1242 					return NULL;
1243 				}
1244 				break;
1245 			}
1246 		} while (eat);
1247 
1248 		/* Free pulled out fragments. */
1249 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1250 			skb_shinfo(skb)->frag_list = list->next;
1251 			kfree_skb(list);
1252 		}
1253 		/* And insert new clone at head. */
1254 		if (clone) {
1255 			clone->next = list;
1256 			skb_shinfo(skb)->frag_list = clone;
1257 		}
1258 	}
1259 	/* Success! Now we may commit changes to skb data. */
1260 
1261 pull_pages:
1262 	eat = delta;
1263 	k = 0;
1264 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1265 		if (skb_shinfo(skb)->frags[i].size <= eat) {
1266 			put_page(skb_shinfo(skb)->frags[i].page);
1267 			eat -= skb_shinfo(skb)->frags[i].size;
1268 		} else {
1269 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1270 			if (eat) {
1271 				skb_shinfo(skb)->frags[k].page_offset += eat;
1272 				skb_shinfo(skb)->frags[k].size -= eat;
1273 				eat = 0;
1274 			}
1275 			k++;
1276 		}
1277 	}
1278 	skb_shinfo(skb)->nr_frags = k;
1279 
1280 	skb->tail     += delta;
1281 	skb->data_len -= delta;
1282 
1283 	return skb_tail_pointer(skb);
1284 }
1285 EXPORT_SYMBOL(__pskb_pull_tail);
1286 
1287 /* Copy some data bits from skb to kernel buffer. */
1288 
1289 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1290 {
1291 	int start = skb_headlen(skb);
1292 	struct sk_buff *frag_iter;
1293 	int i, copy;
1294 
1295 	if (offset > (int)skb->len - len)
1296 		goto fault;
1297 
1298 	/* Copy header. */
1299 	if ((copy = start - offset) > 0) {
1300 		if (copy > len)
1301 			copy = len;
1302 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1303 		if ((len -= copy) == 0)
1304 			return 0;
1305 		offset += copy;
1306 		to     += copy;
1307 	}
1308 
1309 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1310 		int end;
1311 
1312 		WARN_ON(start > offset + len);
1313 
1314 		end = start + skb_shinfo(skb)->frags[i].size;
1315 		if ((copy = end - offset) > 0) {
1316 			u8 *vaddr;
1317 
1318 			if (copy > len)
1319 				copy = len;
1320 
1321 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1322 			memcpy(to,
1323 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1324 			       offset - start, copy);
1325 			kunmap_skb_frag(vaddr);
1326 
1327 			if ((len -= copy) == 0)
1328 				return 0;
1329 			offset += copy;
1330 			to     += copy;
1331 		}
1332 		start = end;
1333 	}
1334 
1335 	skb_walk_frags(skb, frag_iter) {
1336 		int end;
1337 
1338 		WARN_ON(start > offset + len);
1339 
1340 		end = start + frag_iter->len;
1341 		if ((copy = end - offset) > 0) {
1342 			if (copy > len)
1343 				copy = len;
1344 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1345 				goto fault;
1346 			if ((len -= copy) == 0)
1347 				return 0;
1348 			offset += copy;
1349 			to     += copy;
1350 		}
1351 		start = end;
1352 	}
1353 	if (!len)
1354 		return 0;
1355 
1356 fault:
1357 	return -EFAULT;
1358 }
1359 EXPORT_SYMBOL(skb_copy_bits);
1360 
1361 /*
1362  * Callback from splice_to_pipe(), if we need to release some pages
1363  * at the end of the spd in case we error'ed out in filling the pipe.
1364  */
1365 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1366 {
1367 	put_page(spd->pages[i]);
1368 }
1369 
1370 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1371 					  unsigned int *offset,
1372 					  struct sk_buff *skb, struct sock *sk)
1373 {
1374 	struct page *p = sk->sk_sndmsg_page;
1375 	unsigned int off;
1376 
1377 	if (!p) {
1378 new_page:
1379 		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1380 		if (!p)
1381 			return NULL;
1382 
1383 		off = sk->sk_sndmsg_off = 0;
1384 		/* hold one ref to this page until it's full */
1385 	} else {
1386 		unsigned int mlen;
1387 
1388 		off = sk->sk_sndmsg_off;
1389 		mlen = PAGE_SIZE - off;
1390 		if (mlen < 64 && mlen < *len) {
1391 			put_page(p);
1392 			goto new_page;
1393 		}
1394 
1395 		*len = min_t(unsigned int, *len, mlen);
1396 	}
1397 
1398 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1399 	sk->sk_sndmsg_off += *len;
1400 	*offset = off;
1401 	get_page(p);
1402 
1403 	return p;
1404 }
1405 
1406 /*
1407  * Fill page/offset/length into spd, if it can hold more pages.
1408  */
1409 static inline int spd_fill_page(struct splice_pipe_desc *spd,
1410 				struct pipe_inode_info *pipe, struct page *page,
1411 				unsigned int *len, unsigned int offset,
1412 				struct sk_buff *skb, int linear,
1413 				struct sock *sk)
1414 {
1415 	if (unlikely(spd->nr_pages == pipe->buffers))
1416 		return 1;
1417 
1418 	if (linear) {
1419 		page = linear_to_page(page, len, &offset, skb, sk);
1420 		if (!page)
1421 			return 1;
1422 	} else
1423 		get_page(page);
1424 
1425 	spd->pages[spd->nr_pages] = page;
1426 	spd->partial[spd->nr_pages].len = *len;
1427 	spd->partial[spd->nr_pages].offset = offset;
1428 	spd->nr_pages++;
1429 
1430 	return 0;
1431 }
1432 
1433 static inline void __segment_seek(struct page **page, unsigned int *poff,
1434 				  unsigned int *plen, unsigned int off)
1435 {
1436 	unsigned long n;
1437 
1438 	*poff += off;
1439 	n = *poff / PAGE_SIZE;
1440 	if (n)
1441 		*page = nth_page(*page, n);
1442 
1443 	*poff = *poff % PAGE_SIZE;
1444 	*plen -= off;
1445 }
1446 
1447 static inline int __splice_segment(struct page *page, unsigned int poff,
1448 				   unsigned int plen, unsigned int *off,
1449 				   unsigned int *len, struct sk_buff *skb,
1450 				   struct splice_pipe_desc *spd, int linear,
1451 				   struct sock *sk,
1452 				   struct pipe_inode_info *pipe)
1453 {
1454 	if (!*len)
1455 		return 1;
1456 
1457 	/* skip this segment if already processed */
1458 	if (*off >= plen) {
1459 		*off -= plen;
1460 		return 0;
1461 	}
1462 
1463 	/* ignore any bits we already processed */
1464 	if (*off) {
1465 		__segment_seek(&page, &poff, &plen, *off);
1466 		*off = 0;
1467 	}
1468 
1469 	do {
1470 		unsigned int flen = min(*len, plen);
1471 
1472 		/* the linear region may spread across several pages  */
1473 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1474 
1475 		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1476 			return 1;
1477 
1478 		__segment_seek(&page, &poff, &plen, flen);
1479 		*len -= flen;
1480 
1481 	} while (*len && plen);
1482 
1483 	return 0;
1484 }
1485 
1486 /*
1487  * Map linear and fragment data from the skb to spd. It reports failure if the
1488  * pipe is full or if we already spliced the requested length.
1489  */
1490 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1491 			     unsigned int *offset, unsigned int *len,
1492 			     struct splice_pipe_desc *spd, struct sock *sk)
1493 {
1494 	int seg;
1495 
1496 	/*
1497 	 * map the linear part
1498 	 */
1499 	if (__splice_segment(virt_to_page(skb->data),
1500 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1501 			     skb_headlen(skb),
1502 			     offset, len, skb, spd, 1, sk, pipe))
1503 		return 1;
1504 
1505 	/*
1506 	 * then map the fragments
1507 	 */
1508 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1509 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1510 
1511 		if (__splice_segment(f->page, f->page_offset, f->size,
1512 				     offset, len, skb, spd, 0, sk, pipe))
1513 			return 1;
1514 	}
1515 
1516 	return 0;
1517 }
1518 
1519 /*
1520  * Map data from the skb to a pipe. Should handle both the linear part,
1521  * the fragments, and the frag list. It does NOT handle frag lists within
1522  * the frag list, if such a thing exists. We'd probably need to recurse to
1523  * handle that cleanly.
1524  */
1525 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1526 		    struct pipe_inode_info *pipe, unsigned int tlen,
1527 		    unsigned int flags)
1528 {
1529 	struct partial_page partial[PIPE_DEF_BUFFERS];
1530 	struct page *pages[PIPE_DEF_BUFFERS];
1531 	struct splice_pipe_desc spd = {
1532 		.pages = pages,
1533 		.partial = partial,
1534 		.flags = flags,
1535 		.ops = &sock_pipe_buf_ops,
1536 		.spd_release = sock_spd_release,
1537 	};
1538 	struct sk_buff *frag_iter;
1539 	struct sock *sk = skb->sk;
1540 	int ret = 0;
1541 
1542 	if (splice_grow_spd(pipe, &spd))
1543 		return -ENOMEM;
1544 
1545 	/*
1546 	 * __skb_splice_bits() only fails if the output has no room left,
1547 	 * so no point in going over the frag_list for the error case.
1548 	 */
1549 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1550 		goto done;
1551 	else if (!tlen)
1552 		goto done;
1553 
1554 	/*
1555 	 * now see if we have a frag_list to map
1556 	 */
1557 	skb_walk_frags(skb, frag_iter) {
1558 		if (!tlen)
1559 			break;
1560 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1561 			break;
1562 	}
1563 
1564 done:
1565 	if (spd.nr_pages) {
1566 		/*
1567 		 * Drop the socket lock, otherwise we have reverse
1568 		 * locking dependencies between sk_lock and i_mutex
1569 		 * here as compared to sendfile(). We enter here
1570 		 * with the socket lock held, and splice_to_pipe() will
1571 		 * grab the pipe inode lock. For sendfile() emulation,
1572 		 * we call into ->sendpage() with the i_mutex lock held
1573 		 * and networking will grab the socket lock.
1574 		 */
1575 		release_sock(sk);
1576 		ret = splice_to_pipe(pipe, &spd);
1577 		lock_sock(sk);
1578 	}
1579 
1580 	splice_shrink_spd(pipe, &spd);
1581 	return ret;
1582 }
1583 
1584 /**
1585  *	skb_store_bits - store bits from kernel buffer to skb
1586  *	@skb: destination buffer
1587  *	@offset: offset in destination
1588  *	@from: source buffer
1589  *	@len: number of bytes to copy
1590  *
1591  *	Copy the specified number of bytes from the source buffer to the
1592  *	destination skb.  This function handles all the messy bits of
1593  *	traversing fragment lists and such.
1594  */
1595 
1596 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1597 {
1598 	int start = skb_headlen(skb);
1599 	struct sk_buff *frag_iter;
1600 	int i, copy;
1601 
1602 	if (offset > (int)skb->len - len)
1603 		goto fault;
1604 
1605 	if ((copy = start - offset) > 0) {
1606 		if (copy > len)
1607 			copy = len;
1608 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1609 		if ((len -= copy) == 0)
1610 			return 0;
1611 		offset += copy;
1612 		from += copy;
1613 	}
1614 
1615 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1616 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1617 		int end;
1618 
1619 		WARN_ON(start > offset + len);
1620 
1621 		end = start + frag->size;
1622 		if ((copy = end - offset) > 0) {
1623 			u8 *vaddr;
1624 
1625 			if (copy > len)
1626 				copy = len;
1627 
1628 			vaddr = kmap_skb_frag(frag);
1629 			memcpy(vaddr + frag->page_offset + offset - start,
1630 			       from, copy);
1631 			kunmap_skb_frag(vaddr);
1632 
1633 			if ((len -= copy) == 0)
1634 				return 0;
1635 			offset += copy;
1636 			from += copy;
1637 		}
1638 		start = end;
1639 	}
1640 
1641 	skb_walk_frags(skb, frag_iter) {
1642 		int end;
1643 
1644 		WARN_ON(start > offset + len);
1645 
1646 		end = start + frag_iter->len;
1647 		if ((copy = end - offset) > 0) {
1648 			if (copy > len)
1649 				copy = len;
1650 			if (skb_store_bits(frag_iter, offset - start,
1651 					   from, copy))
1652 				goto fault;
1653 			if ((len -= copy) == 0)
1654 				return 0;
1655 			offset += copy;
1656 			from += copy;
1657 		}
1658 		start = end;
1659 	}
1660 	if (!len)
1661 		return 0;
1662 
1663 fault:
1664 	return -EFAULT;
1665 }
1666 EXPORT_SYMBOL(skb_store_bits);
1667 
1668 /* Checksum skb data. */
1669 
1670 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1671 			  int len, __wsum csum)
1672 {
1673 	int start = skb_headlen(skb);
1674 	int i, copy = start - offset;
1675 	struct sk_buff *frag_iter;
1676 	int pos = 0;
1677 
1678 	/* Checksum header. */
1679 	if (copy > 0) {
1680 		if (copy > len)
1681 			copy = len;
1682 		csum = csum_partial(skb->data + offset, copy, csum);
1683 		if ((len -= copy) == 0)
1684 			return csum;
1685 		offset += copy;
1686 		pos	= copy;
1687 	}
1688 
1689 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1690 		int end;
1691 
1692 		WARN_ON(start > offset + len);
1693 
1694 		end = start + skb_shinfo(skb)->frags[i].size;
1695 		if ((copy = end - offset) > 0) {
1696 			__wsum csum2;
1697 			u8 *vaddr;
1698 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1699 
1700 			if (copy > len)
1701 				copy = len;
1702 			vaddr = kmap_skb_frag(frag);
1703 			csum2 = csum_partial(vaddr + frag->page_offset +
1704 					     offset - start, copy, 0);
1705 			kunmap_skb_frag(vaddr);
1706 			csum = csum_block_add(csum, csum2, pos);
1707 			if (!(len -= copy))
1708 				return csum;
1709 			offset += copy;
1710 			pos    += copy;
1711 		}
1712 		start = end;
1713 	}
1714 
1715 	skb_walk_frags(skb, frag_iter) {
1716 		int end;
1717 
1718 		WARN_ON(start > offset + len);
1719 
1720 		end = start + frag_iter->len;
1721 		if ((copy = end - offset) > 0) {
1722 			__wsum csum2;
1723 			if (copy > len)
1724 				copy = len;
1725 			csum2 = skb_checksum(frag_iter, offset - start,
1726 					     copy, 0);
1727 			csum = csum_block_add(csum, csum2, pos);
1728 			if ((len -= copy) == 0)
1729 				return csum;
1730 			offset += copy;
1731 			pos    += copy;
1732 		}
1733 		start = end;
1734 	}
1735 	BUG_ON(len);
1736 
1737 	return csum;
1738 }
1739 EXPORT_SYMBOL(skb_checksum);
1740 
1741 /* Both of above in one bottle. */
1742 
1743 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1744 				    u8 *to, int len, __wsum csum)
1745 {
1746 	int start = skb_headlen(skb);
1747 	int i, copy = start - offset;
1748 	struct sk_buff *frag_iter;
1749 	int pos = 0;
1750 
1751 	/* Copy header. */
1752 	if (copy > 0) {
1753 		if (copy > len)
1754 			copy = len;
1755 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1756 						 copy, csum);
1757 		if ((len -= copy) == 0)
1758 			return csum;
1759 		offset += copy;
1760 		to     += copy;
1761 		pos	= copy;
1762 	}
1763 
1764 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1765 		int end;
1766 
1767 		WARN_ON(start > offset + len);
1768 
1769 		end = start + skb_shinfo(skb)->frags[i].size;
1770 		if ((copy = end - offset) > 0) {
1771 			__wsum csum2;
1772 			u8 *vaddr;
1773 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1774 
1775 			if (copy > len)
1776 				copy = len;
1777 			vaddr = kmap_skb_frag(frag);
1778 			csum2 = csum_partial_copy_nocheck(vaddr +
1779 							  frag->page_offset +
1780 							  offset - start, to,
1781 							  copy, 0);
1782 			kunmap_skb_frag(vaddr);
1783 			csum = csum_block_add(csum, csum2, pos);
1784 			if (!(len -= copy))
1785 				return csum;
1786 			offset += copy;
1787 			to     += copy;
1788 			pos    += copy;
1789 		}
1790 		start = end;
1791 	}
1792 
1793 	skb_walk_frags(skb, frag_iter) {
1794 		__wsum csum2;
1795 		int end;
1796 
1797 		WARN_ON(start > offset + len);
1798 
1799 		end = start + frag_iter->len;
1800 		if ((copy = end - offset) > 0) {
1801 			if (copy > len)
1802 				copy = len;
1803 			csum2 = skb_copy_and_csum_bits(frag_iter,
1804 						       offset - start,
1805 						       to, copy, 0);
1806 			csum = csum_block_add(csum, csum2, pos);
1807 			if ((len -= copy) == 0)
1808 				return csum;
1809 			offset += copy;
1810 			to     += copy;
1811 			pos    += copy;
1812 		}
1813 		start = end;
1814 	}
1815 	BUG_ON(len);
1816 	return csum;
1817 }
1818 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1819 
1820 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1821 {
1822 	__wsum csum;
1823 	long csstart;
1824 
1825 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1826 		csstart = skb->csum_start - skb_headroom(skb);
1827 	else
1828 		csstart = skb_headlen(skb);
1829 
1830 	BUG_ON(csstart > skb_headlen(skb));
1831 
1832 	skb_copy_from_linear_data(skb, to, csstart);
1833 
1834 	csum = 0;
1835 	if (csstart != skb->len)
1836 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1837 					      skb->len - csstart, 0);
1838 
1839 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1840 		long csstuff = csstart + skb->csum_offset;
1841 
1842 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
1843 	}
1844 }
1845 EXPORT_SYMBOL(skb_copy_and_csum_dev);
1846 
1847 /**
1848  *	skb_dequeue - remove from the head of the queue
1849  *	@list: list to dequeue from
1850  *
1851  *	Remove the head of the list. The list lock is taken so the function
1852  *	may be used safely with other locking list functions. The head item is
1853  *	returned or %NULL if the list is empty.
1854  */
1855 
1856 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1857 {
1858 	unsigned long flags;
1859 	struct sk_buff *result;
1860 
1861 	spin_lock_irqsave(&list->lock, flags);
1862 	result = __skb_dequeue(list);
1863 	spin_unlock_irqrestore(&list->lock, flags);
1864 	return result;
1865 }
1866 EXPORT_SYMBOL(skb_dequeue);
1867 
1868 /**
1869  *	skb_dequeue_tail - remove from the tail of the queue
1870  *	@list: list to dequeue from
1871  *
1872  *	Remove the tail of the list. The list lock is taken so the function
1873  *	may be used safely with other locking list functions. The tail item is
1874  *	returned or %NULL if the list is empty.
1875  */
1876 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1877 {
1878 	unsigned long flags;
1879 	struct sk_buff *result;
1880 
1881 	spin_lock_irqsave(&list->lock, flags);
1882 	result = __skb_dequeue_tail(list);
1883 	spin_unlock_irqrestore(&list->lock, flags);
1884 	return result;
1885 }
1886 EXPORT_SYMBOL(skb_dequeue_tail);
1887 
1888 /**
1889  *	skb_queue_purge - empty a list
1890  *	@list: list to empty
1891  *
1892  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1893  *	the list and one reference dropped. This function takes the list
1894  *	lock and is atomic with respect to other list locking functions.
1895  */
1896 void skb_queue_purge(struct sk_buff_head *list)
1897 {
1898 	struct sk_buff *skb;
1899 	while ((skb = skb_dequeue(list)) != NULL)
1900 		kfree_skb(skb);
1901 }
1902 EXPORT_SYMBOL(skb_queue_purge);
1903 
1904 /**
1905  *	skb_queue_head - queue a buffer at the list head
1906  *	@list: list to use
1907  *	@newsk: buffer to queue
1908  *
1909  *	Queue a buffer at the start of the list. This function takes the
1910  *	list lock and can be used safely with other locking &sk_buff functions
1911  *	safely.
1912  *
1913  *	A buffer cannot be placed on two lists at the same time.
1914  */
1915 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1916 {
1917 	unsigned long flags;
1918 
1919 	spin_lock_irqsave(&list->lock, flags);
1920 	__skb_queue_head(list, newsk);
1921 	spin_unlock_irqrestore(&list->lock, flags);
1922 }
1923 EXPORT_SYMBOL(skb_queue_head);
1924 
1925 /**
1926  *	skb_queue_tail - queue a buffer at the list tail
1927  *	@list: list to use
1928  *	@newsk: buffer to queue
1929  *
1930  *	Queue a buffer at the tail of the list. This function takes the
1931  *	list lock and can be used safely with other locking &sk_buff functions
1932  *	safely.
1933  *
1934  *	A buffer cannot be placed on two lists at the same time.
1935  */
1936 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1937 {
1938 	unsigned long flags;
1939 
1940 	spin_lock_irqsave(&list->lock, flags);
1941 	__skb_queue_tail(list, newsk);
1942 	spin_unlock_irqrestore(&list->lock, flags);
1943 }
1944 EXPORT_SYMBOL(skb_queue_tail);
1945 
1946 /**
1947  *	skb_unlink	-	remove a buffer from a list
1948  *	@skb: buffer to remove
1949  *	@list: list to use
1950  *
1951  *	Remove a packet from a list. The list locks are taken and this
1952  *	function is atomic with respect to other list locked calls
1953  *
1954  *	You must know what list the SKB is on.
1955  */
1956 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1957 {
1958 	unsigned long flags;
1959 
1960 	spin_lock_irqsave(&list->lock, flags);
1961 	__skb_unlink(skb, list);
1962 	spin_unlock_irqrestore(&list->lock, flags);
1963 }
1964 EXPORT_SYMBOL(skb_unlink);
1965 
1966 /**
1967  *	skb_append	-	append a buffer
1968  *	@old: buffer to insert after
1969  *	@newsk: buffer to insert
1970  *	@list: list to use
1971  *
1972  *	Place a packet after a given packet in a list. The list locks are taken
1973  *	and this function is atomic with respect to other list locked calls.
1974  *	A buffer cannot be placed on two lists at the same time.
1975  */
1976 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1977 {
1978 	unsigned long flags;
1979 
1980 	spin_lock_irqsave(&list->lock, flags);
1981 	__skb_queue_after(list, old, newsk);
1982 	spin_unlock_irqrestore(&list->lock, flags);
1983 }
1984 EXPORT_SYMBOL(skb_append);
1985 
1986 /**
1987  *	skb_insert	-	insert a buffer
1988  *	@old: buffer to insert before
1989  *	@newsk: buffer to insert
1990  *	@list: list to use
1991  *
1992  *	Place a packet before a given packet in a list. The list locks are
1993  * 	taken and this function is atomic with respect to other list locked
1994  *	calls.
1995  *
1996  *	A buffer cannot be placed on two lists at the same time.
1997  */
1998 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1999 {
2000 	unsigned long flags;
2001 
2002 	spin_lock_irqsave(&list->lock, flags);
2003 	__skb_insert(newsk, old->prev, old, list);
2004 	spin_unlock_irqrestore(&list->lock, flags);
2005 }
2006 EXPORT_SYMBOL(skb_insert);
2007 
2008 static inline void skb_split_inside_header(struct sk_buff *skb,
2009 					   struct sk_buff* skb1,
2010 					   const u32 len, const int pos)
2011 {
2012 	int i;
2013 
2014 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2015 					 pos - len);
2016 	/* And move data appendix as is. */
2017 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2018 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2019 
2020 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2021 	skb_shinfo(skb)->nr_frags  = 0;
2022 	skb1->data_len		   = skb->data_len;
2023 	skb1->len		   += skb1->data_len;
2024 	skb->data_len		   = 0;
2025 	skb->len		   = len;
2026 	skb_set_tail_pointer(skb, len);
2027 }
2028 
2029 static inline void skb_split_no_header(struct sk_buff *skb,
2030 				       struct sk_buff* skb1,
2031 				       const u32 len, int pos)
2032 {
2033 	int i, k = 0;
2034 	const int nfrags = skb_shinfo(skb)->nr_frags;
2035 
2036 	skb_shinfo(skb)->nr_frags = 0;
2037 	skb1->len		  = skb1->data_len = skb->len - len;
2038 	skb->len		  = len;
2039 	skb->data_len		  = len - pos;
2040 
2041 	for (i = 0; i < nfrags; i++) {
2042 		int size = skb_shinfo(skb)->frags[i].size;
2043 
2044 		if (pos + size > len) {
2045 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2046 
2047 			if (pos < len) {
2048 				/* Split frag.
2049 				 * We have two variants in this case:
2050 				 * 1. Move all the frag to the second
2051 				 *    part, if it is possible. F.e.
2052 				 *    this approach is mandatory for TUX,
2053 				 *    where splitting is expensive.
2054 				 * 2. Split is accurately. We make this.
2055 				 */
2056 				get_page(skb_shinfo(skb)->frags[i].page);
2057 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2058 				skb_shinfo(skb1)->frags[0].size -= len - pos;
2059 				skb_shinfo(skb)->frags[i].size	= len - pos;
2060 				skb_shinfo(skb)->nr_frags++;
2061 			}
2062 			k++;
2063 		} else
2064 			skb_shinfo(skb)->nr_frags++;
2065 		pos += size;
2066 	}
2067 	skb_shinfo(skb1)->nr_frags = k;
2068 }
2069 
2070 /**
2071  * skb_split - Split fragmented skb to two parts at length len.
2072  * @skb: the buffer to split
2073  * @skb1: the buffer to receive the second part
2074  * @len: new length for skb
2075  */
2076 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2077 {
2078 	int pos = skb_headlen(skb);
2079 
2080 	if (len < pos)	/* Split line is inside header. */
2081 		skb_split_inside_header(skb, skb1, len, pos);
2082 	else		/* Second chunk has no header, nothing to copy. */
2083 		skb_split_no_header(skb, skb1, len, pos);
2084 }
2085 EXPORT_SYMBOL(skb_split);
2086 
2087 /* Shifting from/to a cloned skb is a no-go.
2088  *
2089  * Caller cannot keep skb_shinfo related pointers past calling here!
2090  */
2091 static int skb_prepare_for_shift(struct sk_buff *skb)
2092 {
2093 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2094 }
2095 
2096 /**
2097  * skb_shift - Shifts paged data partially from skb to another
2098  * @tgt: buffer into which tail data gets added
2099  * @skb: buffer from which the paged data comes from
2100  * @shiftlen: shift up to this many bytes
2101  *
2102  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2103  * the length of the skb, from tgt to skb. Returns number bytes shifted.
2104  * It's up to caller to free skb if everything was shifted.
2105  *
2106  * If @tgt runs out of frags, the whole operation is aborted.
2107  *
2108  * Skb cannot include anything else but paged data while tgt is allowed
2109  * to have non-paged data as well.
2110  *
2111  * TODO: full sized shift could be optimized but that would need
2112  * specialized skb free'er to handle frags without up-to-date nr_frags.
2113  */
2114 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2115 {
2116 	int from, to, merge, todo;
2117 	struct skb_frag_struct *fragfrom, *fragto;
2118 
2119 	BUG_ON(shiftlen > skb->len);
2120 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2121 
2122 	todo = shiftlen;
2123 	from = 0;
2124 	to = skb_shinfo(tgt)->nr_frags;
2125 	fragfrom = &skb_shinfo(skb)->frags[from];
2126 
2127 	/* Actual merge is delayed until the point when we know we can
2128 	 * commit all, so that we don't have to undo partial changes
2129 	 */
2130 	if (!to ||
2131 	    !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2132 		merge = -1;
2133 	} else {
2134 		merge = to - 1;
2135 
2136 		todo -= fragfrom->size;
2137 		if (todo < 0) {
2138 			if (skb_prepare_for_shift(skb) ||
2139 			    skb_prepare_for_shift(tgt))
2140 				return 0;
2141 
2142 			/* All previous frag pointers might be stale! */
2143 			fragfrom = &skb_shinfo(skb)->frags[from];
2144 			fragto = &skb_shinfo(tgt)->frags[merge];
2145 
2146 			fragto->size += shiftlen;
2147 			fragfrom->size -= shiftlen;
2148 			fragfrom->page_offset += shiftlen;
2149 
2150 			goto onlymerged;
2151 		}
2152 
2153 		from++;
2154 	}
2155 
2156 	/* Skip full, not-fitting skb to avoid expensive operations */
2157 	if ((shiftlen == skb->len) &&
2158 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2159 		return 0;
2160 
2161 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2162 		return 0;
2163 
2164 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2165 		if (to == MAX_SKB_FRAGS)
2166 			return 0;
2167 
2168 		fragfrom = &skb_shinfo(skb)->frags[from];
2169 		fragto = &skb_shinfo(tgt)->frags[to];
2170 
2171 		if (todo >= fragfrom->size) {
2172 			*fragto = *fragfrom;
2173 			todo -= fragfrom->size;
2174 			from++;
2175 			to++;
2176 
2177 		} else {
2178 			get_page(fragfrom->page);
2179 			fragto->page = fragfrom->page;
2180 			fragto->page_offset = fragfrom->page_offset;
2181 			fragto->size = todo;
2182 
2183 			fragfrom->page_offset += todo;
2184 			fragfrom->size -= todo;
2185 			todo = 0;
2186 
2187 			to++;
2188 			break;
2189 		}
2190 	}
2191 
2192 	/* Ready to "commit" this state change to tgt */
2193 	skb_shinfo(tgt)->nr_frags = to;
2194 
2195 	if (merge >= 0) {
2196 		fragfrom = &skb_shinfo(skb)->frags[0];
2197 		fragto = &skb_shinfo(tgt)->frags[merge];
2198 
2199 		fragto->size += fragfrom->size;
2200 		put_page(fragfrom->page);
2201 	}
2202 
2203 	/* Reposition in the original skb */
2204 	to = 0;
2205 	while (from < skb_shinfo(skb)->nr_frags)
2206 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2207 	skb_shinfo(skb)->nr_frags = to;
2208 
2209 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2210 
2211 onlymerged:
2212 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2213 	 * the other hand might need it if it needs to be resent
2214 	 */
2215 	tgt->ip_summed = CHECKSUM_PARTIAL;
2216 	skb->ip_summed = CHECKSUM_PARTIAL;
2217 
2218 	/* Yak, is it really working this way? Some helper please? */
2219 	skb->len -= shiftlen;
2220 	skb->data_len -= shiftlen;
2221 	skb->truesize -= shiftlen;
2222 	tgt->len += shiftlen;
2223 	tgt->data_len += shiftlen;
2224 	tgt->truesize += shiftlen;
2225 
2226 	return shiftlen;
2227 }
2228 
2229 /**
2230  * skb_prepare_seq_read - Prepare a sequential read of skb data
2231  * @skb: the buffer to read
2232  * @from: lower offset of data to be read
2233  * @to: upper offset of data to be read
2234  * @st: state variable
2235  *
2236  * Initializes the specified state variable. Must be called before
2237  * invoking skb_seq_read() for the first time.
2238  */
2239 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2240 			  unsigned int to, struct skb_seq_state *st)
2241 {
2242 	st->lower_offset = from;
2243 	st->upper_offset = to;
2244 	st->root_skb = st->cur_skb = skb;
2245 	st->frag_idx = st->stepped_offset = 0;
2246 	st->frag_data = NULL;
2247 }
2248 EXPORT_SYMBOL(skb_prepare_seq_read);
2249 
2250 /**
2251  * skb_seq_read - Sequentially read skb data
2252  * @consumed: number of bytes consumed by the caller so far
2253  * @data: destination pointer for data to be returned
2254  * @st: state variable
2255  *
2256  * Reads a block of skb data at &consumed relative to the
2257  * lower offset specified to skb_prepare_seq_read(). Assigns
2258  * the head of the data block to &data and returns the length
2259  * of the block or 0 if the end of the skb data or the upper
2260  * offset has been reached.
2261  *
2262  * The caller is not required to consume all of the data
2263  * returned, i.e. &consumed is typically set to the number
2264  * of bytes already consumed and the next call to
2265  * skb_seq_read() will return the remaining part of the block.
2266  *
2267  * Note 1: The size of each block of data returned can be arbitary,
2268  *       this limitation is the cost for zerocopy seqeuental
2269  *       reads of potentially non linear data.
2270  *
2271  * Note 2: Fragment lists within fragments are not implemented
2272  *       at the moment, state->root_skb could be replaced with
2273  *       a stack for this purpose.
2274  */
2275 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2276 			  struct skb_seq_state *st)
2277 {
2278 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2279 	skb_frag_t *frag;
2280 
2281 	if (unlikely(abs_offset >= st->upper_offset))
2282 		return 0;
2283 
2284 next_skb:
2285 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2286 
2287 	if (abs_offset < block_limit && !st->frag_data) {
2288 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2289 		return block_limit - abs_offset;
2290 	}
2291 
2292 	if (st->frag_idx == 0 && !st->frag_data)
2293 		st->stepped_offset += skb_headlen(st->cur_skb);
2294 
2295 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2296 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2297 		block_limit = frag->size + st->stepped_offset;
2298 
2299 		if (abs_offset < block_limit) {
2300 			if (!st->frag_data)
2301 				st->frag_data = kmap_skb_frag(frag);
2302 
2303 			*data = (u8 *) st->frag_data + frag->page_offset +
2304 				(abs_offset - st->stepped_offset);
2305 
2306 			return block_limit - abs_offset;
2307 		}
2308 
2309 		if (st->frag_data) {
2310 			kunmap_skb_frag(st->frag_data);
2311 			st->frag_data = NULL;
2312 		}
2313 
2314 		st->frag_idx++;
2315 		st->stepped_offset += frag->size;
2316 	}
2317 
2318 	if (st->frag_data) {
2319 		kunmap_skb_frag(st->frag_data);
2320 		st->frag_data = NULL;
2321 	}
2322 
2323 	if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
2324 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2325 		st->frag_idx = 0;
2326 		goto next_skb;
2327 	} else if (st->cur_skb->next) {
2328 		st->cur_skb = st->cur_skb->next;
2329 		st->frag_idx = 0;
2330 		goto next_skb;
2331 	}
2332 
2333 	return 0;
2334 }
2335 EXPORT_SYMBOL(skb_seq_read);
2336 
2337 /**
2338  * skb_abort_seq_read - Abort a sequential read of skb data
2339  * @st: state variable
2340  *
2341  * Must be called if skb_seq_read() was not called until it
2342  * returned 0.
2343  */
2344 void skb_abort_seq_read(struct skb_seq_state *st)
2345 {
2346 	if (st->frag_data)
2347 		kunmap_skb_frag(st->frag_data);
2348 }
2349 EXPORT_SYMBOL(skb_abort_seq_read);
2350 
2351 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2352 
2353 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2354 					  struct ts_config *conf,
2355 					  struct ts_state *state)
2356 {
2357 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2358 }
2359 
2360 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2361 {
2362 	skb_abort_seq_read(TS_SKB_CB(state));
2363 }
2364 
2365 /**
2366  * skb_find_text - Find a text pattern in skb data
2367  * @skb: the buffer to look in
2368  * @from: search offset
2369  * @to: search limit
2370  * @config: textsearch configuration
2371  * @state: uninitialized textsearch state variable
2372  *
2373  * Finds a pattern in the skb data according to the specified
2374  * textsearch configuration. Use textsearch_next() to retrieve
2375  * subsequent occurrences of the pattern. Returns the offset
2376  * to the first occurrence or UINT_MAX if no match was found.
2377  */
2378 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2379 			   unsigned int to, struct ts_config *config,
2380 			   struct ts_state *state)
2381 {
2382 	unsigned int ret;
2383 
2384 	config->get_next_block = skb_ts_get_next_block;
2385 	config->finish = skb_ts_finish;
2386 
2387 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2388 
2389 	ret = textsearch_find(config, state);
2390 	return (ret <= to - from ? ret : UINT_MAX);
2391 }
2392 EXPORT_SYMBOL(skb_find_text);
2393 
2394 /**
2395  * skb_append_datato_frags: - append the user data to a skb
2396  * @sk: sock  structure
2397  * @skb: skb structure to be appened with user data.
2398  * @getfrag: call back function to be used for getting the user data
2399  * @from: pointer to user message iov
2400  * @length: length of the iov message
2401  *
2402  * Description: This procedure append the user data in the fragment part
2403  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2404  */
2405 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2406 			int (*getfrag)(void *from, char *to, int offset,
2407 					int len, int odd, struct sk_buff *skb),
2408 			void *from, int length)
2409 {
2410 	int frg_cnt = 0;
2411 	skb_frag_t *frag = NULL;
2412 	struct page *page = NULL;
2413 	int copy, left;
2414 	int offset = 0;
2415 	int ret;
2416 
2417 	do {
2418 		/* Return error if we don't have space for new frag */
2419 		frg_cnt = skb_shinfo(skb)->nr_frags;
2420 		if (frg_cnt >= MAX_SKB_FRAGS)
2421 			return -EFAULT;
2422 
2423 		/* allocate a new page for next frag */
2424 		page = alloc_pages(sk->sk_allocation, 0);
2425 
2426 		/* If alloc_page fails just return failure and caller will
2427 		 * free previous allocated pages by doing kfree_skb()
2428 		 */
2429 		if (page == NULL)
2430 			return -ENOMEM;
2431 
2432 		/* initialize the next frag */
2433 		sk->sk_sndmsg_page = page;
2434 		sk->sk_sndmsg_off = 0;
2435 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2436 		skb->truesize += PAGE_SIZE;
2437 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2438 
2439 		/* get the new initialized frag */
2440 		frg_cnt = skb_shinfo(skb)->nr_frags;
2441 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2442 
2443 		/* copy the user data to page */
2444 		left = PAGE_SIZE - frag->page_offset;
2445 		copy = (length > left)? left : length;
2446 
2447 		ret = getfrag(from, (page_address(frag->page) +
2448 			    frag->page_offset + frag->size),
2449 			    offset, copy, 0, skb);
2450 		if (ret < 0)
2451 			return -EFAULT;
2452 
2453 		/* copy was successful so update the size parameters */
2454 		sk->sk_sndmsg_off += copy;
2455 		frag->size += copy;
2456 		skb->len += copy;
2457 		skb->data_len += copy;
2458 		offset += copy;
2459 		length -= copy;
2460 
2461 	} while (length > 0);
2462 
2463 	return 0;
2464 }
2465 EXPORT_SYMBOL(skb_append_datato_frags);
2466 
2467 /**
2468  *	skb_pull_rcsum - pull skb and update receive checksum
2469  *	@skb: buffer to update
2470  *	@len: length of data pulled
2471  *
2472  *	This function performs an skb_pull on the packet and updates
2473  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2474  *	receive path processing instead of skb_pull unless you know
2475  *	that the checksum difference is zero (e.g., a valid IP header)
2476  *	or you are setting ip_summed to CHECKSUM_NONE.
2477  */
2478 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2479 {
2480 	BUG_ON(len > skb->len);
2481 	skb->len -= len;
2482 	BUG_ON(skb->len < skb->data_len);
2483 	skb_postpull_rcsum(skb, skb->data, len);
2484 	return skb->data += len;
2485 }
2486 
2487 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2488 
2489 /**
2490  *	skb_segment - Perform protocol segmentation on skb.
2491  *	@skb: buffer to segment
2492  *	@features: features for the output path (see dev->features)
2493  *
2494  *	This function performs segmentation on the given skb.  It returns
2495  *	a pointer to the first in a list of new skbs for the segments.
2496  *	In case of error it returns ERR_PTR(err).
2497  */
2498 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2499 {
2500 	struct sk_buff *segs = NULL;
2501 	struct sk_buff *tail = NULL;
2502 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2503 	unsigned int mss = skb_shinfo(skb)->gso_size;
2504 	unsigned int doffset = skb->data - skb_mac_header(skb);
2505 	unsigned int offset = doffset;
2506 	unsigned int headroom;
2507 	unsigned int len;
2508 	int sg = features & NETIF_F_SG;
2509 	int nfrags = skb_shinfo(skb)->nr_frags;
2510 	int err = -ENOMEM;
2511 	int i = 0;
2512 	int pos;
2513 
2514 	__skb_push(skb, doffset);
2515 	headroom = skb_headroom(skb);
2516 	pos = skb_headlen(skb);
2517 
2518 	do {
2519 		struct sk_buff *nskb;
2520 		skb_frag_t *frag;
2521 		int hsize;
2522 		int size;
2523 
2524 		len = skb->len - offset;
2525 		if (len > mss)
2526 			len = mss;
2527 
2528 		hsize = skb_headlen(skb) - offset;
2529 		if (hsize < 0)
2530 			hsize = 0;
2531 		if (hsize > len || !sg)
2532 			hsize = len;
2533 
2534 		if (!hsize && i >= nfrags) {
2535 			BUG_ON(fskb->len != len);
2536 
2537 			pos += len;
2538 			nskb = skb_clone(fskb, GFP_ATOMIC);
2539 			fskb = fskb->next;
2540 
2541 			if (unlikely(!nskb))
2542 				goto err;
2543 
2544 			hsize = skb_end_pointer(nskb) - nskb->head;
2545 			if (skb_cow_head(nskb, doffset + headroom)) {
2546 				kfree_skb(nskb);
2547 				goto err;
2548 			}
2549 
2550 			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2551 					  hsize;
2552 			skb_release_head_state(nskb);
2553 			__skb_push(nskb, doffset);
2554 		} else {
2555 			nskb = alloc_skb(hsize + doffset + headroom,
2556 					 GFP_ATOMIC);
2557 
2558 			if (unlikely(!nskb))
2559 				goto err;
2560 
2561 			skb_reserve(nskb, headroom);
2562 			__skb_put(nskb, doffset);
2563 		}
2564 
2565 		if (segs)
2566 			tail->next = nskb;
2567 		else
2568 			segs = nskb;
2569 		tail = nskb;
2570 
2571 		__copy_skb_header(nskb, skb);
2572 		nskb->mac_len = skb->mac_len;
2573 
2574 		skb_reset_mac_header(nskb);
2575 		skb_set_network_header(nskb, skb->mac_len);
2576 		nskb->transport_header = (nskb->network_header +
2577 					  skb_network_header_len(skb));
2578 		skb_copy_from_linear_data(skb, nskb->data, doffset);
2579 
2580 		if (fskb != skb_shinfo(skb)->frag_list)
2581 			continue;
2582 
2583 		if (!sg) {
2584 			nskb->ip_summed = CHECKSUM_NONE;
2585 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2586 							    skb_put(nskb, len),
2587 							    len, 0);
2588 			continue;
2589 		}
2590 
2591 		frag = skb_shinfo(nskb)->frags;
2592 
2593 		skb_copy_from_linear_data_offset(skb, offset,
2594 						 skb_put(nskb, hsize), hsize);
2595 
2596 		while (pos < offset + len && i < nfrags) {
2597 			*frag = skb_shinfo(skb)->frags[i];
2598 			get_page(frag->page);
2599 			size = frag->size;
2600 
2601 			if (pos < offset) {
2602 				frag->page_offset += offset - pos;
2603 				frag->size -= offset - pos;
2604 			}
2605 
2606 			skb_shinfo(nskb)->nr_frags++;
2607 
2608 			if (pos + size <= offset + len) {
2609 				i++;
2610 				pos += size;
2611 			} else {
2612 				frag->size -= pos + size - (offset + len);
2613 				goto skip_fraglist;
2614 			}
2615 
2616 			frag++;
2617 		}
2618 
2619 		if (pos < offset + len) {
2620 			struct sk_buff *fskb2 = fskb;
2621 
2622 			BUG_ON(pos + fskb->len != offset + len);
2623 
2624 			pos += fskb->len;
2625 			fskb = fskb->next;
2626 
2627 			if (fskb2->next) {
2628 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2629 				if (!fskb2)
2630 					goto err;
2631 			} else
2632 				skb_get(fskb2);
2633 
2634 			SKB_FRAG_ASSERT(nskb);
2635 			skb_shinfo(nskb)->frag_list = fskb2;
2636 		}
2637 
2638 skip_fraglist:
2639 		nskb->data_len = len - hsize;
2640 		nskb->len += nskb->data_len;
2641 		nskb->truesize += nskb->data_len;
2642 	} while ((offset += len) < skb->len);
2643 
2644 	return segs;
2645 
2646 err:
2647 	while ((skb = segs)) {
2648 		segs = skb->next;
2649 		kfree_skb(skb);
2650 	}
2651 	return ERR_PTR(err);
2652 }
2653 EXPORT_SYMBOL_GPL(skb_segment);
2654 
2655 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2656 {
2657 	struct sk_buff *p = *head;
2658 	struct sk_buff *nskb;
2659 	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2660 	struct skb_shared_info *pinfo = skb_shinfo(p);
2661 	unsigned int headroom;
2662 	unsigned int len = skb_gro_len(skb);
2663 	unsigned int offset = skb_gro_offset(skb);
2664 	unsigned int headlen = skb_headlen(skb);
2665 
2666 	if (p->len + len >= 65536)
2667 		return -E2BIG;
2668 
2669 	if (pinfo->frag_list)
2670 		goto merge;
2671 	else if (headlen <= offset) {
2672 		skb_frag_t *frag;
2673 		skb_frag_t *frag2;
2674 		int i = skbinfo->nr_frags;
2675 		int nr_frags = pinfo->nr_frags + i;
2676 
2677 		offset -= headlen;
2678 
2679 		if (nr_frags > MAX_SKB_FRAGS)
2680 			return -E2BIG;
2681 
2682 		pinfo->nr_frags = nr_frags;
2683 		skbinfo->nr_frags = 0;
2684 
2685 		frag = pinfo->frags + nr_frags;
2686 		frag2 = skbinfo->frags + i;
2687 		do {
2688 			*--frag = *--frag2;
2689 		} while (--i);
2690 
2691 		frag->page_offset += offset;
2692 		frag->size -= offset;
2693 
2694 		skb->truesize -= skb->data_len;
2695 		skb->len -= skb->data_len;
2696 		skb->data_len = 0;
2697 
2698 		NAPI_GRO_CB(skb)->free = 1;
2699 		goto done;
2700 	} else if (skb_gro_len(p) != pinfo->gso_size)
2701 		return -E2BIG;
2702 
2703 	headroom = skb_headroom(p);
2704 	nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
2705 	if (unlikely(!nskb))
2706 		return -ENOMEM;
2707 
2708 	__copy_skb_header(nskb, p);
2709 	nskb->mac_len = p->mac_len;
2710 
2711 	skb_reserve(nskb, headroom);
2712 	__skb_put(nskb, skb_gro_offset(p));
2713 
2714 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2715 	skb_set_network_header(nskb, skb_network_offset(p));
2716 	skb_set_transport_header(nskb, skb_transport_offset(p));
2717 
2718 	__skb_pull(p, skb_gro_offset(p));
2719 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
2720 	       p->data - skb_mac_header(p));
2721 
2722 	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2723 	skb_shinfo(nskb)->frag_list = p;
2724 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2725 	pinfo->gso_size = 0;
2726 	skb_header_release(p);
2727 	nskb->prev = p;
2728 
2729 	nskb->data_len += p->len;
2730 	nskb->truesize += p->len;
2731 	nskb->len += p->len;
2732 
2733 	*head = nskb;
2734 	nskb->next = p->next;
2735 	p->next = NULL;
2736 
2737 	p = nskb;
2738 
2739 merge:
2740 	if (offset > headlen) {
2741 		skbinfo->frags[0].page_offset += offset - headlen;
2742 		skbinfo->frags[0].size -= offset - headlen;
2743 		offset = headlen;
2744 	}
2745 
2746 	__skb_pull(skb, offset);
2747 
2748 	p->prev->next = skb;
2749 	p->prev = skb;
2750 	skb_header_release(skb);
2751 
2752 done:
2753 	NAPI_GRO_CB(p)->count++;
2754 	p->data_len += len;
2755 	p->truesize += len;
2756 	p->len += len;
2757 
2758 	NAPI_GRO_CB(skb)->same_flow = 1;
2759 	return 0;
2760 }
2761 EXPORT_SYMBOL_GPL(skb_gro_receive);
2762 
2763 void __init skb_init(void)
2764 {
2765 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2766 					      sizeof(struct sk_buff),
2767 					      0,
2768 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2769 					      NULL);
2770 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2771 						(2*sizeof(struct sk_buff)) +
2772 						sizeof(atomic_t),
2773 						0,
2774 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2775 						NULL);
2776 }
2777 
2778 /**
2779  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2780  *	@skb: Socket buffer containing the buffers to be mapped
2781  *	@sg: The scatter-gather list to map into
2782  *	@offset: The offset into the buffer's contents to start mapping
2783  *	@len: Length of buffer space to be mapped
2784  *
2785  *	Fill the specified scatter-gather list with mappings/pointers into a
2786  *	region of the buffer space attached to a socket buffer.
2787  */
2788 static int
2789 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2790 {
2791 	int start = skb_headlen(skb);
2792 	int i, copy = start - offset;
2793 	struct sk_buff *frag_iter;
2794 	int elt = 0;
2795 
2796 	if (copy > 0) {
2797 		if (copy > len)
2798 			copy = len;
2799 		sg_set_buf(sg, skb->data + offset, copy);
2800 		elt++;
2801 		if ((len -= copy) == 0)
2802 			return elt;
2803 		offset += copy;
2804 	}
2805 
2806 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2807 		int end;
2808 
2809 		WARN_ON(start > offset + len);
2810 
2811 		end = start + skb_shinfo(skb)->frags[i].size;
2812 		if ((copy = end - offset) > 0) {
2813 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2814 
2815 			if (copy > len)
2816 				copy = len;
2817 			sg_set_page(&sg[elt], frag->page, copy,
2818 					frag->page_offset+offset-start);
2819 			elt++;
2820 			if (!(len -= copy))
2821 				return elt;
2822 			offset += copy;
2823 		}
2824 		start = end;
2825 	}
2826 
2827 	skb_walk_frags(skb, frag_iter) {
2828 		int end;
2829 
2830 		WARN_ON(start > offset + len);
2831 
2832 		end = start + frag_iter->len;
2833 		if ((copy = end - offset) > 0) {
2834 			if (copy > len)
2835 				copy = len;
2836 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2837 					      copy);
2838 			if ((len -= copy) == 0)
2839 				return elt;
2840 			offset += copy;
2841 		}
2842 		start = end;
2843 	}
2844 	BUG_ON(len);
2845 	return elt;
2846 }
2847 
2848 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2849 {
2850 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
2851 
2852 	sg_mark_end(&sg[nsg - 1]);
2853 
2854 	return nsg;
2855 }
2856 EXPORT_SYMBOL_GPL(skb_to_sgvec);
2857 
2858 /**
2859  *	skb_cow_data - Check that a socket buffer's data buffers are writable
2860  *	@skb: The socket buffer to check.
2861  *	@tailbits: Amount of trailing space to be added
2862  *	@trailer: Returned pointer to the skb where the @tailbits space begins
2863  *
2864  *	Make sure that the data buffers attached to a socket buffer are
2865  *	writable. If they are not, private copies are made of the data buffers
2866  *	and the socket buffer is set to use these instead.
2867  *
2868  *	If @tailbits is given, make sure that there is space to write @tailbits
2869  *	bytes of data beyond current end of socket buffer.  @trailer will be
2870  *	set to point to the skb in which this space begins.
2871  *
2872  *	The number of scatterlist elements required to completely map the
2873  *	COW'd and extended socket buffer will be returned.
2874  */
2875 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2876 {
2877 	int copyflag;
2878 	int elt;
2879 	struct sk_buff *skb1, **skb_p;
2880 
2881 	/* If skb is cloned or its head is paged, reallocate
2882 	 * head pulling out all the pages (pages are considered not writable
2883 	 * at the moment even if they are anonymous).
2884 	 */
2885 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2886 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2887 		return -ENOMEM;
2888 
2889 	/* Easy case. Most of packets will go this way. */
2890 	if (!skb_has_frags(skb)) {
2891 		/* A little of trouble, not enough of space for trailer.
2892 		 * This should not happen, when stack is tuned to generate
2893 		 * good frames. OK, on miss we reallocate and reserve even more
2894 		 * space, 128 bytes is fair. */
2895 
2896 		if (skb_tailroom(skb) < tailbits &&
2897 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2898 			return -ENOMEM;
2899 
2900 		/* Voila! */
2901 		*trailer = skb;
2902 		return 1;
2903 	}
2904 
2905 	/* Misery. We are in troubles, going to mincer fragments... */
2906 
2907 	elt = 1;
2908 	skb_p = &skb_shinfo(skb)->frag_list;
2909 	copyflag = 0;
2910 
2911 	while ((skb1 = *skb_p) != NULL) {
2912 		int ntail = 0;
2913 
2914 		/* The fragment is partially pulled by someone,
2915 		 * this can happen on input. Copy it and everything
2916 		 * after it. */
2917 
2918 		if (skb_shared(skb1))
2919 			copyflag = 1;
2920 
2921 		/* If the skb is the last, worry about trailer. */
2922 
2923 		if (skb1->next == NULL && tailbits) {
2924 			if (skb_shinfo(skb1)->nr_frags ||
2925 			    skb_has_frags(skb1) ||
2926 			    skb_tailroom(skb1) < tailbits)
2927 				ntail = tailbits + 128;
2928 		}
2929 
2930 		if (copyflag ||
2931 		    skb_cloned(skb1) ||
2932 		    ntail ||
2933 		    skb_shinfo(skb1)->nr_frags ||
2934 		    skb_has_frags(skb1)) {
2935 			struct sk_buff *skb2;
2936 
2937 			/* Fuck, we are miserable poor guys... */
2938 			if (ntail == 0)
2939 				skb2 = skb_copy(skb1, GFP_ATOMIC);
2940 			else
2941 				skb2 = skb_copy_expand(skb1,
2942 						       skb_headroom(skb1),
2943 						       ntail,
2944 						       GFP_ATOMIC);
2945 			if (unlikely(skb2 == NULL))
2946 				return -ENOMEM;
2947 
2948 			if (skb1->sk)
2949 				skb_set_owner_w(skb2, skb1->sk);
2950 
2951 			/* Looking around. Are we still alive?
2952 			 * OK, link new skb, drop old one */
2953 
2954 			skb2->next = skb1->next;
2955 			*skb_p = skb2;
2956 			kfree_skb(skb1);
2957 			skb1 = skb2;
2958 		}
2959 		elt++;
2960 		*trailer = skb1;
2961 		skb_p = &skb1->next;
2962 	}
2963 
2964 	return elt;
2965 }
2966 EXPORT_SYMBOL_GPL(skb_cow_data);
2967 
2968 static void sock_rmem_free(struct sk_buff *skb)
2969 {
2970 	struct sock *sk = skb->sk;
2971 
2972 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2973 }
2974 
2975 /*
2976  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2977  */
2978 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2979 {
2980 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2981 	    (unsigned)sk->sk_rcvbuf)
2982 		return -ENOMEM;
2983 
2984 	skb_orphan(skb);
2985 	skb->sk = sk;
2986 	skb->destructor = sock_rmem_free;
2987 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2988 
2989 	skb_queue_tail(&sk->sk_error_queue, skb);
2990 	if (!sock_flag(sk, SOCK_DEAD))
2991 		sk->sk_data_ready(sk, skb->len);
2992 	return 0;
2993 }
2994 EXPORT_SYMBOL(sock_queue_err_skb);
2995 
2996 void skb_tstamp_tx(struct sk_buff *orig_skb,
2997 		struct skb_shared_hwtstamps *hwtstamps)
2998 {
2999 	struct sock *sk = orig_skb->sk;
3000 	struct sock_exterr_skb *serr;
3001 	struct sk_buff *skb;
3002 	int err;
3003 
3004 	if (!sk)
3005 		return;
3006 
3007 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3008 	if (!skb)
3009 		return;
3010 
3011 	if (hwtstamps) {
3012 		*skb_hwtstamps(skb) =
3013 			*hwtstamps;
3014 	} else {
3015 		/*
3016 		 * no hardware time stamps available,
3017 		 * so keep the skb_shared_tx and only
3018 		 * store software time stamp
3019 		 */
3020 		skb->tstamp = ktime_get_real();
3021 	}
3022 
3023 	serr = SKB_EXT_ERR(skb);
3024 	memset(serr, 0, sizeof(*serr));
3025 	serr->ee.ee_errno = ENOMSG;
3026 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3027 
3028 	err = sock_queue_err_skb(sk, skb);
3029 
3030 	if (err)
3031 		kfree_skb(skb);
3032 }
3033 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3034 
3035 
3036 /**
3037  * skb_partial_csum_set - set up and verify partial csum values for packet
3038  * @skb: the skb to set
3039  * @start: the number of bytes after skb->data to start checksumming.
3040  * @off: the offset from start to place the checksum.
3041  *
3042  * For untrusted partially-checksummed packets, we need to make sure the values
3043  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3044  *
3045  * This function checks and sets those values and skb->ip_summed: if this
3046  * returns false you should drop the packet.
3047  */
3048 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3049 {
3050 	if (unlikely(start > skb_headlen(skb)) ||
3051 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3052 		if (net_ratelimit())
3053 			printk(KERN_WARNING
3054 			       "bad partial csum: csum=%u/%u len=%u\n",
3055 			       start, off, skb_headlen(skb));
3056 		return false;
3057 	}
3058 	skb->ip_summed = CHECKSUM_PARTIAL;
3059 	skb->csum_start = skb_headroom(skb) + start;
3060 	skb->csum_offset = off;
3061 	return true;
3062 }
3063 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3064 
3065 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3066 {
3067 	if (net_ratelimit())
3068 		pr_warning("%s: received packets cannot be forwarded"
3069 			   " while LRO is enabled\n", skb->dev->name);
3070 }
3071 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3072