xref: /linux/net/core/skbuff.c (revision cb299ba8b5ef2239429484072fea394cd7581bd7)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/mm.h>
44 #include <linux/interrupt.h>
45 #include <linux/in.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 
61 #include <net/protocol.h>
62 #include <net/dst.h>
63 #include <net/sock.h>
64 #include <net/checksum.h>
65 #include <net/xfrm.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 #include <trace/events/skb.h>
70 
71 #include "kmap_skb.h"
72 
73 static struct kmem_cache *skbuff_head_cache __read_mostly;
74 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
75 
76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
77 				  struct pipe_buffer *buf)
78 {
79 	put_page(buf->page);
80 }
81 
82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
83 				struct pipe_buffer *buf)
84 {
85 	get_page(buf->page);
86 }
87 
88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
89 			       struct pipe_buffer *buf)
90 {
91 	return 1;
92 }
93 
94 
95 /* Pipe buffer operations for a socket. */
96 static const struct pipe_buf_operations sock_pipe_buf_ops = {
97 	.can_merge = 0,
98 	.map = generic_pipe_buf_map,
99 	.unmap = generic_pipe_buf_unmap,
100 	.confirm = generic_pipe_buf_confirm,
101 	.release = sock_pipe_buf_release,
102 	.steal = sock_pipe_buf_steal,
103 	.get = sock_pipe_buf_get,
104 };
105 
106 /*
107  *	Keep out-of-line to prevent kernel bloat.
108  *	__builtin_return_address is not used because it is not always
109  *	reliable.
110  */
111 
112 /**
113  *	skb_over_panic	- 	private function
114  *	@skb: buffer
115  *	@sz: size
116  *	@here: address
117  *
118  *	Out of line support code for skb_put(). Not user callable.
119  */
120 static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
121 {
122 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
123 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
124 	       here, skb->len, sz, skb->head, skb->data,
125 	       (unsigned long)skb->tail, (unsigned long)skb->end,
126 	       skb->dev ? skb->dev->name : "<NULL>");
127 	BUG();
128 }
129 
130 /**
131  *	skb_under_panic	- 	private function
132  *	@skb: buffer
133  *	@sz: size
134  *	@here: address
135  *
136  *	Out of line support code for skb_push(). Not user callable.
137  */
138 
139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
140 {
141 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
142 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
143 	       here, skb->len, sz, skb->head, skb->data,
144 	       (unsigned long)skb->tail, (unsigned long)skb->end,
145 	       skb->dev ? skb->dev->name : "<NULL>");
146 	BUG();
147 }
148 
149 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
150  *	'private' fields and also do memory statistics to find all the
151  *	[BEEP] leaks.
152  *
153  */
154 
155 /**
156  *	__alloc_skb	-	allocate a network buffer
157  *	@size: size to allocate
158  *	@gfp_mask: allocation mask
159  *	@fclone: allocate from fclone cache instead of head cache
160  *		and allocate a cloned (child) skb
161  *	@node: numa node to allocate memory on
162  *
163  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
164  *	tail room of size bytes. The object has a reference count of one.
165  *	The return is the buffer. On a failure the return is %NULL.
166  *
167  *	Buffers may only be allocated from interrupts using a @gfp_mask of
168  *	%GFP_ATOMIC.
169  */
170 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
171 			    int fclone, int node)
172 {
173 	struct kmem_cache *cache;
174 	struct skb_shared_info *shinfo;
175 	struct sk_buff *skb;
176 	u8 *data;
177 
178 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
179 
180 	/* Get the HEAD */
181 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
182 	if (!skb)
183 		goto out;
184 	prefetchw(skb);
185 
186 	size = SKB_DATA_ALIGN(size);
187 	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
188 			gfp_mask, node);
189 	if (!data)
190 		goto nodata;
191 	prefetchw(data + size);
192 
193 	/*
194 	 * Only clear those fields we need to clear, not those that we will
195 	 * actually initialise below. Hence, don't put any more fields after
196 	 * the tail pointer in struct sk_buff!
197 	 */
198 	memset(skb, 0, offsetof(struct sk_buff, tail));
199 	skb->truesize = size + sizeof(struct sk_buff);
200 	atomic_set(&skb->users, 1);
201 	skb->head = data;
202 	skb->data = data;
203 	skb_reset_tail_pointer(skb);
204 	skb->end = skb->tail + size;
205 #ifdef NET_SKBUFF_DATA_USES_OFFSET
206 	skb->mac_header = ~0U;
207 #endif
208 
209 	/* make sure we initialize shinfo sequentially */
210 	shinfo = skb_shinfo(skb);
211 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
212 	atomic_set(&shinfo->dataref, 1);
213 
214 	if (fclone) {
215 		struct sk_buff *child = skb + 1;
216 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
217 
218 		kmemcheck_annotate_bitfield(child, flags1);
219 		kmemcheck_annotate_bitfield(child, flags2);
220 		skb->fclone = SKB_FCLONE_ORIG;
221 		atomic_set(fclone_ref, 1);
222 
223 		child->fclone = SKB_FCLONE_UNAVAILABLE;
224 	}
225 out:
226 	return skb;
227 nodata:
228 	kmem_cache_free(cache, skb);
229 	skb = NULL;
230 	goto out;
231 }
232 EXPORT_SYMBOL(__alloc_skb);
233 
234 /**
235  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
236  *	@dev: network device to receive on
237  *	@length: length to allocate
238  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
239  *
240  *	Allocate a new &sk_buff and assign it a usage count of one. The
241  *	buffer has unspecified headroom built in. Users should allocate
242  *	the headroom they think they need without accounting for the
243  *	built in space. The built in space is used for optimisations.
244  *
245  *	%NULL is returned if there is no free memory.
246  */
247 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
248 		unsigned int length, gfp_t gfp_mask)
249 {
250 	struct sk_buff *skb;
251 
252 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
253 	if (likely(skb)) {
254 		skb_reserve(skb, NET_SKB_PAD);
255 		skb->dev = dev;
256 	}
257 	return skb;
258 }
259 EXPORT_SYMBOL(__netdev_alloc_skb);
260 
261 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
262 		int size)
263 {
264 	skb_fill_page_desc(skb, i, page, off, size);
265 	skb->len += size;
266 	skb->data_len += size;
267 	skb->truesize += size;
268 }
269 EXPORT_SYMBOL(skb_add_rx_frag);
270 
271 /**
272  *	dev_alloc_skb - allocate an skbuff for receiving
273  *	@length: length to allocate
274  *
275  *	Allocate a new &sk_buff and assign it a usage count of one. The
276  *	buffer has unspecified headroom built in. Users should allocate
277  *	the headroom they think they need without accounting for the
278  *	built in space. The built in space is used for optimisations.
279  *
280  *	%NULL is returned if there is no free memory. Although this function
281  *	allocates memory it can be called from an interrupt.
282  */
283 struct sk_buff *dev_alloc_skb(unsigned int length)
284 {
285 	/*
286 	 * There is more code here than it seems:
287 	 * __dev_alloc_skb is an inline
288 	 */
289 	return __dev_alloc_skb(length, GFP_ATOMIC);
290 }
291 EXPORT_SYMBOL(dev_alloc_skb);
292 
293 static void skb_drop_list(struct sk_buff **listp)
294 {
295 	struct sk_buff *list = *listp;
296 
297 	*listp = NULL;
298 
299 	do {
300 		struct sk_buff *this = list;
301 		list = list->next;
302 		kfree_skb(this);
303 	} while (list);
304 }
305 
306 static inline void skb_drop_fraglist(struct sk_buff *skb)
307 {
308 	skb_drop_list(&skb_shinfo(skb)->frag_list);
309 }
310 
311 static void skb_clone_fraglist(struct sk_buff *skb)
312 {
313 	struct sk_buff *list;
314 
315 	skb_walk_frags(skb, list)
316 		skb_get(list);
317 }
318 
319 static void skb_release_data(struct sk_buff *skb)
320 {
321 	if (!skb->cloned ||
322 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
323 			       &skb_shinfo(skb)->dataref)) {
324 		if (skb_shinfo(skb)->nr_frags) {
325 			int i;
326 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
327 				put_page(skb_shinfo(skb)->frags[i].page);
328 		}
329 
330 		if (skb_has_frag_list(skb))
331 			skb_drop_fraglist(skb);
332 
333 		kfree(skb->head);
334 	}
335 }
336 
337 /*
338  *	Free an skbuff by memory without cleaning the state.
339  */
340 static void kfree_skbmem(struct sk_buff *skb)
341 {
342 	struct sk_buff *other;
343 	atomic_t *fclone_ref;
344 
345 	switch (skb->fclone) {
346 	case SKB_FCLONE_UNAVAILABLE:
347 		kmem_cache_free(skbuff_head_cache, skb);
348 		break;
349 
350 	case SKB_FCLONE_ORIG:
351 		fclone_ref = (atomic_t *) (skb + 2);
352 		if (atomic_dec_and_test(fclone_ref))
353 			kmem_cache_free(skbuff_fclone_cache, skb);
354 		break;
355 
356 	case SKB_FCLONE_CLONE:
357 		fclone_ref = (atomic_t *) (skb + 1);
358 		other = skb - 1;
359 
360 		/* The clone portion is available for
361 		 * fast-cloning again.
362 		 */
363 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
364 
365 		if (atomic_dec_and_test(fclone_ref))
366 			kmem_cache_free(skbuff_fclone_cache, other);
367 		break;
368 	}
369 }
370 
371 static void skb_release_head_state(struct sk_buff *skb)
372 {
373 	skb_dst_drop(skb);
374 #ifdef CONFIG_XFRM
375 	secpath_put(skb->sp);
376 #endif
377 	if (skb->destructor) {
378 		WARN_ON(in_irq());
379 		skb->destructor(skb);
380 	}
381 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
382 	nf_conntrack_put(skb->nfct);
383 	nf_conntrack_put_reasm(skb->nfct_reasm);
384 #endif
385 #ifdef CONFIG_BRIDGE_NETFILTER
386 	nf_bridge_put(skb->nf_bridge);
387 #endif
388 /* XXX: IS this still necessary? - JHS */
389 #ifdef CONFIG_NET_SCHED
390 	skb->tc_index = 0;
391 #ifdef CONFIG_NET_CLS_ACT
392 	skb->tc_verd = 0;
393 #endif
394 #endif
395 }
396 
397 /* Free everything but the sk_buff shell. */
398 static void skb_release_all(struct sk_buff *skb)
399 {
400 	skb_release_head_state(skb);
401 	skb_release_data(skb);
402 }
403 
404 /**
405  *	__kfree_skb - private function
406  *	@skb: buffer
407  *
408  *	Free an sk_buff. Release anything attached to the buffer.
409  *	Clean the state. This is an internal helper function. Users should
410  *	always call kfree_skb
411  */
412 
413 void __kfree_skb(struct sk_buff *skb)
414 {
415 	skb_release_all(skb);
416 	kfree_skbmem(skb);
417 }
418 EXPORT_SYMBOL(__kfree_skb);
419 
420 /**
421  *	kfree_skb - free an sk_buff
422  *	@skb: buffer to free
423  *
424  *	Drop a reference to the buffer and free it if the usage count has
425  *	hit zero.
426  */
427 void kfree_skb(struct sk_buff *skb)
428 {
429 	if (unlikely(!skb))
430 		return;
431 	if (likely(atomic_read(&skb->users) == 1))
432 		smp_rmb();
433 	else if (likely(!atomic_dec_and_test(&skb->users)))
434 		return;
435 	trace_kfree_skb(skb, __builtin_return_address(0));
436 	__kfree_skb(skb);
437 }
438 EXPORT_SYMBOL(kfree_skb);
439 
440 /**
441  *	consume_skb - free an skbuff
442  *	@skb: buffer to free
443  *
444  *	Drop a ref to the buffer and free it if the usage count has hit zero
445  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
446  *	is being dropped after a failure and notes that
447  */
448 void consume_skb(struct sk_buff *skb)
449 {
450 	if (unlikely(!skb))
451 		return;
452 	if (likely(atomic_read(&skb->users) == 1))
453 		smp_rmb();
454 	else if (likely(!atomic_dec_and_test(&skb->users)))
455 		return;
456 	trace_consume_skb(skb);
457 	__kfree_skb(skb);
458 }
459 EXPORT_SYMBOL(consume_skb);
460 
461 /**
462  *	skb_recycle_check - check if skb can be reused for receive
463  *	@skb: buffer
464  *	@skb_size: minimum receive buffer size
465  *
466  *	Checks that the skb passed in is not shared or cloned, and
467  *	that it is linear and its head portion at least as large as
468  *	skb_size so that it can be recycled as a receive buffer.
469  *	If these conditions are met, this function does any necessary
470  *	reference count dropping and cleans up the skbuff as if it
471  *	just came from __alloc_skb().
472  */
473 bool skb_recycle_check(struct sk_buff *skb, int skb_size)
474 {
475 	struct skb_shared_info *shinfo;
476 
477 	if (irqs_disabled())
478 		return false;
479 
480 	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
481 		return false;
482 
483 	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
484 	if (skb_end_pointer(skb) - skb->head < skb_size)
485 		return false;
486 
487 	if (skb_shared(skb) || skb_cloned(skb))
488 		return false;
489 
490 	skb_release_head_state(skb);
491 
492 	shinfo = skb_shinfo(skb);
493 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
494 	atomic_set(&shinfo->dataref, 1);
495 
496 	memset(skb, 0, offsetof(struct sk_buff, tail));
497 	skb->data = skb->head + NET_SKB_PAD;
498 	skb_reset_tail_pointer(skb);
499 
500 	return true;
501 }
502 EXPORT_SYMBOL(skb_recycle_check);
503 
504 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
505 {
506 	new->tstamp		= old->tstamp;
507 	new->dev		= old->dev;
508 	new->transport_header	= old->transport_header;
509 	new->network_header	= old->network_header;
510 	new->mac_header		= old->mac_header;
511 	skb_dst_copy(new, old);
512 	new->rxhash		= old->rxhash;
513 #ifdef CONFIG_XFRM
514 	new->sp			= secpath_get(old->sp);
515 #endif
516 	memcpy(new->cb, old->cb, sizeof(old->cb));
517 	new->csum		= old->csum;
518 	new->local_df		= old->local_df;
519 	new->pkt_type		= old->pkt_type;
520 	new->ip_summed		= old->ip_summed;
521 	skb_copy_queue_mapping(new, old);
522 	new->priority		= old->priority;
523 	new->deliver_no_wcard	= old->deliver_no_wcard;
524 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
525 	new->ipvs_property	= old->ipvs_property;
526 #endif
527 	new->protocol		= old->protocol;
528 	new->mark		= old->mark;
529 	new->skb_iif		= old->skb_iif;
530 	__nf_copy(new, old);
531 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
532     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
533 	new->nf_trace		= old->nf_trace;
534 #endif
535 #ifdef CONFIG_NET_SCHED
536 	new->tc_index		= old->tc_index;
537 #ifdef CONFIG_NET_CLS_ACT
538 	new->tc_verd		= old->tc_verd;
539 #endif
540 #endif
541 	new->vlan_tci		= old->vlan_tci;
542 
543 	skb_copy_secmark(new, old);
544 }
545 
546 /*
547  * You should not add any new code to this function.  Add it to
548  * __copy_skb_header above instead.
549  */
550 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
551 {
552 #define C(x) n->x = skb->x
553 
554 	n->next = n->prev = NULL;
555 	n->sk = NULL;
556 	__copy_skb_header(n, skb);
557 
558 	C(len);
559 	C(data_len);
560 	C(mac_len);
561 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
562 	n->cloned = 1;
563 	n->nohdr = 0;
564 	n->destructor = NULL;
565 	C(tail);
566 	C(end);
567 	C(head);
568 	C(data);
569 	C(truesize);
570 	atomic_set(&n->users, 1);
571 
572 	atomic_inc(&(skb_shinfo(skb)->dataref));
573 	skb->cloned = 1;
574 
575 	return n;
576 #undef C
577 }
578 
579 /**
580  *	skb_morph	-	morph one skb into another
581  *	@dst: the skb to receive the contents
582  *	@src: the skb to supply the contents
583  *
584  *	This is identical to skb_clone except that the target skb is
585  *	supplied by the user.
586  *
587  *	The target skb is returned upon exit.
588  */
589 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
590 {
591 	skb_release_all(dst);
592 	return __skb_clone(dst, src);
593 }
594 EXPORT_SYMBOL_GPL(skb_morph);
595 
596 /**
597  *	skb_clone	-	duplicate an sk_buff
598  *	@skb: buffer to clone
599  *	@gfp_mask: allocation priority
600  *
601  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
602  *	copies share the same packet data but not structure. The new
603  *	buffer has a reference count of 1. If the allocation fails the
604  *	function returns %NULL otherwise the new buffer is returned.
605  *
606  *	If this function is called from an interrupt gfp_mask() must be
607  *	%GFP_ATOMIC.
608  */
609 
610 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
611 {
612 	struct sk_buff *n;
613 
614 	n = skb + 1;
615 	if (skb->fclone == SKB_FCLONE_ORIG &&
616 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
617 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
618 		n->fclone = SKB_FCLONE_CLONE;
619 		atomic_inc(fclone_ref);
620 	} else {
621 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
622 		if (!n)
623 			return NULL;
624 
625 		kmemcheck_annotate_bitfield(n, flags1);
626 		kmemcheck_annotate_bitfield(n, flags2);
627 		n->fclone = SKB_FCLONE_UNAVAILABLE;
628 	}
629 
630 	return __skb_clone(n, skb);
631 }
632 EXPORT_SYMBOL(skb_clone);
633 
634 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
635 {
636 #ifndef NET_SKBUFF_DATA_USES_OFFSET
637 	/*
638 	 *	Shift between the two data areas in bytes
639 	 */
640 	unsigned long offset = new->data - old->data;
641 #endif
642 
643 	__copy_skb_header(new, old);
644 
645 #ifndef NET_SKBUFF_DATA_USES_OFFSET
646 	/* {transport,network,mac}_header are relative to skb->head */
647 	new->transport_header += offset;
648 	new->network_header   += offset;
649 	if (skb_mac_header_was_set(new))
650 		new->mac_header	      += offset;
651 #endif
652 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
653 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
654 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
655 }
656 
657 /**
658  *	skb_copy	-	create private copy of an sk_buff
659  *	@skb: buffer to copy
660  *	@gfp_mask: allocation priority
661  *
662  *	Make a copy of both an &sk_buff and its data. This is used when the
663  *	caller wishes to modify the data and needs a private copy of the
664  *	data to alter. Returns %NULL on failure or the pointer to the buffer
665  *	on success. The returned buffer has a reference count of 1.
666  *
667  *	As by-product this function converts non-linear &sk_buff to linear
668  *	one, so that &sk_buff becomes completely private and caller is allowed
669  *	to modify all the data of returned buffer. This means that this
670  *	function is not recommended for use in circumstances when only
671  *	header is going to be modified. Use pskb_copy() instead.
672  */
673 
674 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
675 {
676 	int headerlen = skb_headroom(skb);
677 	unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
678 	struct sk_buff *n = alloc_skb(size, gfp_mask);
679 
680 	if (!n)
681 		return NULL;
682 
683 	/* Set the data pointer */
684 	skb_reserve(n, headerlen);
685 	/* Set the tail pointer and length */
686 	skb_put(n, skb->len);
687 
688 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
689 		BUG();
690 
691 	copy_skb_header(n, skb);
692 	return n;
693 }
694 EXPORT_SYMBOL(skb_copy);
695 
696 /**
697  *	pskb_copy	-	create copy of an sk_buff with private head.
698  *	@skb: buffer to copy
699  *	@gfp_mask: allocation priority
700  *
701  *	Make a copy of both an &sk_buff and part of its data, located
702  *	in header. Fragmented data remain shared. This is used when
703  *	the caller wishes to modify only header of &sk_buff and needs
704  *	private copy of the header to alter. Returns %NULL on failure
705  *	or the pointer to the buffer on success.
706  *	The returned buffer has a reference count of 1.
707  */
708 
709 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
710 {
711 	unsigned int size = skb_end_pointer(skb) - skb->head;
712 	struct sk_buff *n = alloc_skb(size, gfp_mask);
713 
714 	if (!n)
715 		goto out;
716 
717 	/* Set the data pointer */
718 	skb_reserve(n, skb_headroom(skb));
719 	/* Set the tail pointer and length */
720 	skb_put(n, skb_headlen(skb));
721 	/* Copy the bytes */
722 	skb_copy_from_linear_data(skb, n->data, n->len);
723 
724 	n->truesize += skb->data_len;
725 	n->data_len  = skb->data_len;
726 	n->len	     = skb->len;
727 
728 	if (skb_shinfo(skb)->nr_frags) {
729 		int i;
730 
731 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
732 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
733 			get_page(skb_shinfo(n)->frags[i].page);
734 		}
735 		skb_shinfo(n)->nr_frags = i;
736 	}
737 
738 	if (skb_has_frag_list(skb)) {
739 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
740 		skb_clone_fraglist(n);
741 	}
742 
743 	copy_skb_header(n, skb);
744 out:
745 	return n;
746 }
747 EXPORT_SYMBOL(pskb_copy);
748 
749 /**
750  *	pskb_expand_head - reallocate header of &sk_buff
751  *	@skb: buffer to reallocate
752  *	@nhead: room to add at head
753  *	@ntail: room to add at tail
754  *	@gfp_mask: allocation priority
755  *
756  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
757  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
758  *	reference count of 1. Returns zero in the case of success or error,
759  *	if expansion failed. In the last case, &sk_buff is not changed.
760  *
761  *	All the pointers pointing into skb header may change and must be
762  *	reloaded after call to this function.
763  */
764 
765 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
766 		     gfp_t gfp_mask)
767 {
768 	int i;
769 	u8 *data;
770 	int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
771 	long off;
772 	bool fastpath;
773 
774 	BUG_ON(nhead < 0);
775 
776 	if (skb_shared(skb))
777 		BUG();
778 
779 	size = SKB_DATA_ALIGN(size);
780 
781 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
782 	if (!data)
783 		goto nodata;
784 
785 	/* Copy only real data... and, alas, header. This should be
786 	 * optimized for the cases when header is void.
787 	 */
788 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
789 
790 	memcpy((struct skb_shared_info *)(data + size),
791 	       skb_shinfo(skb),
792 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
793 
794 	/* Check if we can avoid taking references on fragments if we own
795 	 * the last reference on skb->head. (see skb_release_data())
796 	 */
797 	if (!skb->cloned)
798 		fastpath = true;
799 	else {
800 		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
801 
802 		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
803 	}
804 
805 	if (fastpath) {
806 		kfree(skb->head);
807 	} else {
808 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
809 			get_page(skb_shinfo(skb)->frags[i].page);
810 
811 		if (skb_has_frag_list(skb))
812 			skb_clone_fraglist(skb);
813 
814 		skb_release_data(skb);
815 	}
816 	off = (data + nhead) - skb->head;
817 
818 	skb->head     = data;
819 	skb->data    += off;
820 #ifdef NET_SKBUFF_DATA_USES_OFFSET
821 	skb->end      = size;
822 	off           = nhead;
823 #else
824 	skb->end      = skb->head + size;
825 #endif
826 	/* {transport,network,mac}_header and tail are relative to skb->head */
827 	skb->tail	      += off;
828 	skb->transport_header += off;
829 	skb->network_header   += off;
830 	if (skb_mac_header_was_set(skb))
831 		skb->mac_header += off;
832 	/* Only adjust this if it actually is csum_start rather than csum */
833 	if (skb->ip_summed == CHECKSUM_PARTIAL)
834 		skb->csum_start += nhead;
835 	skb->cloned   = 0;
836 	skb->hdr_len  = 0;
837 	skb->nohdr    = 0;
838 	atomic_set(&skb_shinfo(skb)->dataref, 1);
839 	return 0;
840 
841 nodata:
842 	return -ENOMEM;
843 }
844 EXPORT_SYMBOL(pskb_expand_head);
845 
846 /* Make private copy of skb with writable head and some headroom */
847 
848 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
849 {
850 	struct sk_buff *skb2;
851 	int delta = headroom - skb_headroom(skb);
852 
853 	if (delta <= 0)
854 		skb2 = pskb_copy(skb, GFP_ATOMIC);
855 	else {
856 		skb2 = skb_clone(skb, GFP_ATOMIC);
857 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
858 					     GFP_ATOMIC)) {
859 			kfree_skb(skb2);
860 			skb2 = NULL;
861 		}
862 	}
863 	return skb2;
864 }
865 EXPORT_SYMBOL(skb_realloc_headroom);
866 
867 /**
868  *	skb_copy_expand	-	copy and expand sk_buff
869  *	@skb: buffer to copy
870  *	@newheadroom: new free bytes at head
871  *	@newtailroom: new free bytes at tail
872  *	@gfp_mask: allocation priority
873  *
874  *	Make a copy of both an &sk_buff and its data and while doing so
875  *	allocate additional space.
876  *
877  *	This is used when the caller wishes to modify the data and needs a
878  *	private copy of the data to alter as well as more space for new fields.
879  *	Returns %NULL on failure or the pointer to the buffer
880  *	on success. The returned buffer has a reference count of 1.
881  *
882  *	You must pass %GFP_ATOMIC as the allocation priority if this function
883  *	is called from an interrupt.
884  */
885 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
886 				int newheadroom, int newtailroom,
887 				gfp_t gfp_mask)
888 {
889 	/*
890 	 *	Allocate the copy buffer
891 	 */
892 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
893 				      gfp_mask);
894 	int oldheadroom = skb_headroom(skb);
895 	int head_copy_len, head_copy_off;
896 	int off;
897 
898 	if (!n)
899 		return NULL;
900 
901 	skb_reserve(n, newheadroom);
902 
903 	/* Set the tail pointer and length */
904 	skb_put(n, skb->len);
905 
906 	head_copy_len = oldheadroom;
907 	head_copy_off = 0;
908 	if (newheadroom <= head_copy_len)
909 		head_copy_len = newheadroom;
910 	else
911 		head_copy_off = newheadroom - head_copy_len;
912 
913 	/* Copy the linear header and data. */
914 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
915 			  skb->len + head_copy_len))
916 		BUG();
917 
918 	copy_skb_header(n, skb);
919 
920 	off                  = newheadroom - oldheadroom;
921 	if (n->ip_summed == CHECKSUM_PARTIAL)
922 		n->csum_start += off;
923 #ifdef NET_SKBUFF_DATA_USES_OFFSET
924 	n->transport_header += off;
925 	n->network_header   += off;
926 	if (skb_mac_header_was_set(skb))
927 		n->mac_header += off;
928 #endif
929 
930 	return n;
931 }
932 EXPORT_SYMBOL(skb_copy_expand);
933 
934 /**
935  *	skb_pad			-	zero pad the tail of an skb
936  *	@skb: buffer to pad
937  *	@pad: space to pad
938  *
939  *	Ensure that a buffer is followed by a padding area that is zero
940  *	filled. Used by network drivers which may DMA or transfer data
941  *	beyond the buffer end onto the wire.
942  *
943  *	May return error in out of memory cases. The skb is freed on error.
944  */
945 
946 int skb_pad(struct sk_buff *skb, int pad)
947 {
948 	int err;
949 	int ntail;
950 
951 	/* If the skbuff is non linear tailroom is always zero.. */
952 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
953 		memset(skb->data+skb->len, 0, pad);
954 		return 0;
955 	}
956 
957 	ntail = skb->data_len + pad - (skb->end - skb->tail);
958 	if (likely(skb_cloned(skb) || ntail > 0)) {
959 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
960 		if (unlikely(err))
961 			goto free_skb;
962 	}
963 
964 	/* FIXME: The use of this function with non-linear skb's really needs
965 	 * to be audited.
966 	 */
967 	err = skb_linearize(skb);
968 	if (unlikely(err))
969 		goto free_skb;
970 
971 	memset(skb->data + skb->len, 0, pad);
972 	return 0;
973 
974 free_skb:
975 	kfree_skb(skb);
976 	return err;
977 }
978 EXPORT_SYMBOL(skb_pad);
979 
980 /**
981  *	skb_put - add data to a buffer
982  *	@skb: buffer to use
983  *	@len: amount of data to add
984  *
985  *	This function extends the used data area of the buffer. If this would
986  *	exceed the total buffer size the kernel will panic. A pointer to the
987  *	first byte of the extra data is returned.
988  */
989 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
990 {
991 	unsigned char *tmp = skb_tail_pointer(skb);
992 	SKB_LINEAR_ASSERT(skb);
993 	skb->tail += len;
994 	skb->len  += len;
995 	if (unlikely(skb->tail > skb->end))
996 		skb_over_panic(skb, len, __builtin_return_address(0));
997 	return tmp;
998 }
999 EXPORT_SYMBOL(skb_put);
1000 
1001 /**
1002  *	skb_push - add data to the start of a buffer
1003  *	@skb: buffer to use
1004  *	@len: amount of data to add
1005  *
1006  *	This function extends the used data area of the buffer at the buffer
1007  *	start. If this would exceed the total buffer headroom the kernel will
1008  *	panic. A pointer to the first byte of the extra data is returned.
1009  */
1010 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1011 {
1012 	skb->data -= len;
1013 	skb->len  += len;
1014 	if (unlikely(skb->data<skb->head))
1015 		skb_under_panic(skb, len, __builtin_return_address(0));
1016 	return skb->data;
1017 }
1018 EXPORT_SYMBOL(skb_push);
1019 
1020 /**
1021  *	skb_pull - remove data from the start of a buffer
1022  *	@skb: buffer to use
1023  *	@len: amount of data to remove
1024  *
1025  *	This function removes data from the start of a buffer, returning
1026  *	the memory to the headroom. A pointer to the next data in the buffer
1027  *	is returned. Once the data has been pulled future pushes will overwrite
1028  *	the old data.
1029  */
1030 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1031 {
1032 	return skb_pull_inline(skb, len);
1033 }
1034 EXPORT_SYMBOL(skb_pull);
1035 
1036 /**
1037  *	skb_trim - remove end from a buffer
1038  *	@skb: buffer to alter
1039  *	@len: new length
1040  *
1041  *	Cut the length of a buffer down by removing data from the tail. If
1042  *	the buffer is already under the length specified it is not modified.
1043  *	The skb must be linear.
1044  */
1045 void skb_trim(struct sk_buff *skb, unsigned int len)
1046 {
1047 	if (skb->len > len)
1048 		__skb_trim(skb, len);
1049 }
1050 EXPORT_SYMBOL(skb_trim);
1051 
1052 /* Trims skb to length len. It can change skb pointers.
1053  */
1054 
1055 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1056 {
1057 	struct sk_buff **fragp;
1058 	struct sk_buff *frag;
1059 	int offset = skb_headlen(skb);
1060 	int nfrags = skb_shinfo(skb)->nr_frags;
1061 	int i;
1062 	int err;
1063 
1064 	if (skb_cloned(skb) &&
1065 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1066 		return err;
1067 
1068 	i = 0;
1069 	if (offset >= len)
1070 		goto drop_pages;
1071 
1072 	for (; i < nfrags; i++) {
1073 		int end = offset + skb_shinfo(skb)->frags[i].size;
1074 
1075 		if (end < len) {
1076 			offset = end;
1077 			continue;
1078 		}
1079 
1080 		skb_shinfo(skb)->frags[i++].size = len - offset;
1081 
1082 drop_pages:
1083 		skb_shinfo(skb)->nr_frags = i;
1084 
1085 		for (; i < nfrags; i++)
1086 			put_page(skb_shinfo(skb)->frags[i].page);
1087 
1088 		if (skb_has_frag_list(skb))
1089 			skb_drop_fraglist(skb);
1090 		goto done;
1091 	}
1092 
1093 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1094 	     fragp = &frag->next) {
1095 		int end = offset + frag->len;
1096 
1097 		if (skb_shared(frag)) {
1098 			struct sk_buff *nfrag;
1099 
1100 			nfrag = skb_clone(frag, GFP_ATOMIC);
1101 			if (unlikely(!nfrag))
1102 				return -ENOMEM;
1103 
1104 			nfrag->next = frag->next;
1105 			kfree_skb(frag);
1106 			frag = nfrag;
1107 			*fragp = frag;
1108 		}
1109 
1110 		if (end < len) {
1111 			offset = end;
1112 			continue;
1113 		}
1114 
1115 		if (end > len &&
1116 		    unlikely((err = pskb_trim(frag, len - offset))))
1117 			return err;
1118 
1119 		if (frag->next)
1120 			skb_drop_list(&frag->next);
1121 		break;
1122 	}
1123 
1124 done:
1125 	if (len > skb_headlen(skb)) {
1126 		skb->data_len -= skb->len - len;
1127 		skb->len       = len;
1128 	} else {
1129 		skb->len       = len;
1130 		skb->data_len  = 0;
1131 		skb_set_tail_pointer(skb, len);
1132 	}
1133 
1134 	return 0;
1135 }
1136 EXPORT_SYMBOL(___pskb_trim);
1137 
1138 /**
1139  *	__pskb_pull_tail - advance tail of skb header
1140  *	@skb: buffer to reallocate
1141  *	@delta: number of bytes to advance tail
1142  *
1143  *	The function makes a sense only on a fragmented &sk_buff,
1144  *	it expands header moving its tail forward and copying necessary
1145  *	data from fragmented part.
1146  *
1147  *	&sk_buff MUST have reference count of 1.
1148  *
1149  *	Returns %NULL (and &sk_buff does not change) if pull failed
1150  *	or value of new tail of skb in the case of success.
1151  *
1152  *	All the pointers pointing into skb header may change and must be
1153  *	reloaded after call to this function.
1154  */
1155 
1156 /* Moves tail of skb head forward, copying data from fragmented part,
1157  * when it is necessary.
1158  * 1. It may fail due to malloc failure.
1159  * 2. It may change skb pointers.
1160  *
1161  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1162  */
1163 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1164 {
1165 	/* If skb has not enough free space at tail, get new one
1166 	 * plus 128 bytes for future expansions. If we have enough
1167 	 * room at tail, reallocate without expansion only if skb is cloned.
1168 	 */
1169 	int i, k, eat = (skb->tail + delta) - skb->end;
1170 
1171 	if (eat > 0 || skb_cloned(skb)) {
1172 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1173 				     GFP_ATOMIC))
1174 			return NULL;
1175 	}
1176 
1177 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1178 		BUG();
1179 
1180 	/* Optimization: no fragments, no reasons to preestimate
1181 	 * size of pulled pages. Superb.
1182 	 */
1183 	if (!skb_has_frag_list(skb))
1184 		goto pull_pages;
1185 
1186 	/* Estimate size of pulled pages. */
1187 	eat = delta;
1188 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1189 		if (skb_shinfo(skb)->frags[i].size >= eat)
1190 			goto pull_pages;
1191 		eat -= skb_shinfo(skb)->frags[i].size;
1192 	}
1193 
1194 	/* If we need update frag list, we are in troubles.
1195 	 * Certainly, it possible to add an offset to skb data,
1196 	 * but taking into account that pulling is expected to
1197 	 * be very rare operation, it is worth to fight against
1198 	 * further bloating skb head and crucify ourselves here instead.
1199 	 * Pure masohism, indeed. 8)8)
1200 	 */
1201 	if (eat) {
1202 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1203 		struct sk_buff *clone = NULL;
1204 		struct sk_buff *insp = NULL;
1205 
1206 		do {
1207 			BUG_ON(!list);
1208 
1209 			if (list->len <= eat) {
1210 				/* Eaten as whole. */
1211 				eat -= list->len;
1212 				list = list->next;
1213 				insp = list;
1214 			} else {
1215 				/* Eaten partially. */
1216 
1217 				if (skb_shared(list)) {
1218 					/* Sucks! We need to fork list. :-( */
1219 					clone = skb_clone(list, GFP_ATOMIC);
1220 					if (!clone)
1221 						return NULL;
1222 					insp = list->next;
1223 					list = clone;
1224 				} else {
1225 					/* This may be pulled without
1226 					 * problems. */
1227 					insp = list;
1228 				}
1229 				if (!pskb_pull(list, eat)) {
1230 					kfree_skb(clone);
1231 					return NULL;
1232 				}
1233 				break;
1234 			}
1235 		} while (eat);
1236 
1237 		/* Free pulled out fragments. */
1238 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1239 			skb_shinfo(skb)->frag_list = list->next;
1240 			kfree_skb(list);
1241 		}
1242 		/* And insert new clone at head. */
1243 		if (clone) {
1244 			clone->next = list;
1245 			skb_shinfo(skb)->frag_list = clone;
1246 		}
1247 	}
1248 	/* Success! Now we may commit changes to skb data. */
1249 
1250 pull_pages:
1251 	eat = delta;
1252 	k = 0;
1253 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1254 		if (skb_shinfo(skb)->frags[i].size <= eat) {
1255 			put_page(skb_shinfo(skb)->frags[i].page);
1256 			eat -= skb_shinfo(skb)->frags[i].size;
1257 		} else {
1258 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1259 			if (eat) {
1260 				skb_shinfo(skb)->frags[k].page_offset += eat;
1261 				skb_shinfo(skb)->frags[k].size -= eat;
1262 				eat = 0;
1263 			}
1264 			k++;
1265 		}
1266 	}
1267 	skb_shinfo(skb)->nr_frags = k;
1268 
1269 	skb->tail     += delta;
1270 	skb->data_len -= delta;
1271 
1272 	return skb_tail_pointer(skb);
1273 }
1274 EXPORT_SYMBOL(__pskb_pull_tail);
1275 
1276 /* Copy some data bits from skb to kernel buffer. */
1277 
1278 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1279 {
1280 	int start = skb_headlen(skb);
1281 	struct sk_buff *frag_iter;
1282 	int i, copy;
1283 
1284 	if (offset > (int)skb->len - len)
1285 		goto fault;
1286 
1287 	/* Copy header. */
1288 	if ((copy = start - offset) > 0) {
1289 		if (copy > len)
1290 			copy = len;
1291 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1292 		if ((len -= copy) == 0)
1293 			return 0;
1294 		offset += copy;
1295 		to     += copy;
1296 	}
1297 
1298 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1299 		int end;
1300 
1301 		WARN_ON(start > offset + len);
1302 
1303 		end = start + skb_shinfo(skb)->frags[i].size;
1304 		if ((copy = end - offset) > 0) {
1305 			u8 *vaddr;
1306 
1307 			if (copy > len)
1308 				copy = len;
1309 
1310 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1311 			memcpy(to,
1312 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1313 			       offset - start, copy);
1314 			kunmap_skb_frag(vaddr);
1315 
1316 			if ((len -= copy) == 0)
1317 				return 0;
1318 			offset += copy;
1319 			to     += copy;
1320 		}
1321 		start = end;
1322 	}
1323 
1324 	skb_walk_frags(skb, frag_iter) {
1325 		int end;
1326 
1327 		WARN_ON(start > offset + len);
1328 
1329 		end = start + frag_iter->len;
1330 		if ((copy = end - offset) > 0) {
1331 			if (copy > len)
1332 				copy = len;
1333 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1334 				goto fault;
1335 			if ((len -= copy) == 0)
1336 				return 0;
1337 			offset += copy;
1338 			to     += copy;
1339 		}
1340 		start = end;
1341 	}
1342 	if (!len)
1343 		return 0;
1344 
1345 fault:
1346 	return -EFAULT;
1347 }
1348 EXPORT_SYMBOL(skb_copy_bits);
1349 
1350 /*
1351  * Callback from splice_to_pipe(), if we need to release some pages
1352  * at the end of the spd in case we error'ed out in filling the pipe.
1353  */
1354 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1355 {
1356 	put_page(spd->pages[i]);
1357 }
1358 
1359 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1360 					  unsigned int *offset,
1361 					  struct sk_buff *skb, struct sock *sk)
1362 {
1363 	struct page *p = sk->sk_sndmsg_page;
1364 	unsigned int off;
1365 
1366 	if (!p) {
1367 new_page:
1368 		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1369 		if (!p)
1370 			return NULL;
1371 
1372 		off = sk->sk_sndmsg_off = 0;
1373 		/* hold one ref to this page until it's full */
1374 	} else {
1375 		unsigned int mlen;
1376 
1377 		off = sk->sk_sndmsg_off;
1378 		mlen = PAGE_SIZE - off;
1379 		if (mlen < 64 && mlen < *len) {
1380 			put_page(p);
1381 			goto new_page;
1382 		}
1383 
1384 		*len = min_t(unsigned int, *len, mlen);
1385 	}
1386 
1387 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1388 	sk->sk_sndmsg_off += *len;
1389 	*offset = off;
1390 	get_page(p);
1391 
1392 	return p;
1393 }
1394 
1395 /*
1396  * Fill page/offset/length into spd, if it can hold more pages.
1397  */
1398 static inline int spd_fill_page(struct splice_pipe_desc *spd,
1399 				struct pipe_inode_info *pipe, struct page *page,
1400 				unsigned int *len, unsigned int offset,
1401 				struct sk_buff *skb, int linear,
1402 				struct sock *sk)
1403 {
1404 	if (unlikely(spd->nr_pages == pipe->buffers))
1405 		return 1;
1406 
1407 	if (linear) {
1408 		page = linear_to_page(page, len, &offset, skb, sk);
1409 		if (!page)
1410 			return 1;
1411 	} else
1412 		get_page(page);
1413 
1414 	spd->pages[spd->nr_pages] = page;
1415 	spd->partial[spd->nr_pages].len = *len;
1416 	spd->partial[spd->nr_pages].offset = offset;
1417 	spd->nr_pages++;
1418 
1419 	return 0;
1420 }
1421 
1422 static inline void __segment_seek(struct page **page, unsigned int *poff,
1423 				  unsigned int *plen, unsigned int off)
1424 {
1425 	unsigned long n;
1426 
1427 	*poff += off;
1428 	n = *poff / PAGE_SIZE;
1429 	if (n)
1430 		*page = nth_page(*page, n);
1431 
1432 	*poff = *poff % PAGE_SIZE;
1433 	*plen -= off;
1434 }
1435 
1436 static inline int __splice_segment(struct page *page, unsigned int poff,
1437 				   unsigned int plen, unsigned int *off,
1438 				   unsigned int *len, struct sk_buff *skb,
1439 				   struct splice_pipe_desc *spd, int linear,
1440 				   struct sock *sk,
1441 				   struct pipe_inode_info *pipe)
1442 {
1443 	if (!*len)
1444 		return 1;
1445 
1446 	/* skip this segment if already processed */
1447 	if (*off >= plen) {
1448 		*off -= plen;
1449 		return 0;
1450 	}
1451 
1452 	/* ignore any bits we already processed */
1453 	if (*off) {
1454 		__segment_seek(&page, &poff, &plen, *off);
1455 		*off = 0;
1456 	}
1457 
1458 	do {
1459 		unsigned int flen = min(*len, plen);
1460 
1461 		/* the linear region may spread across several pages  */
1462 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1463 
1464 		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1465 			return 1;
1466 
1467 		__segment_seek(&page, &poff, &plen, flen);
1468 		*len -= flen;
1469 
1470 	} while (*len && plen);
1471 
1472 	return 0;
1473 }
1474 
1475 /*
1476  * Map linear and fragment data from the skb to spd. It reports failure if the
1477  * pipe is full or if we already spliced the requested length.
1478  */
1479 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1480 			     unsigned int *offset, unsigned int *len,
1481 			     struct splice_pipe_desc *spd, struct sock *sk)
1482 {
1483 	int seg;
1484 
1485 	/*
1486 	 * map the linear part
1487 	 */
1488 	if (__splice_segment(virt_to_page(skb->data),
1489 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1490 			     skb_headlen(skb),
1491 			     offset, len, skb, spd, 1, sk, pipe))
1492 		return 1;
1493 
1494 	/*
1495 	 * then map the fragments
1496 	 */
1497 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1498 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1499 
1500 		if (__splice_segment(f->page, f->page_offset, f->size,
1501 				     offset, len, skb, spd, 0, sk, pipe))
1502 			return 1;
1503 	}
1504 
1505 	return 0;
1506 }
1507 
1508 /*
1509  * Map data from the skb to a pipe. Should handle both the linear part,
1510  * the fragments, and the frag list. It does NOT handle frag lists within
1511  * the frag list, if such a thing exists. We'd probably need to recurse to
1512  * handle that cleanly.
1513  */
1514 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1515 		    struct pipe_inode_info *pipe, unsigned int tlen,
1516 		    unsigned int flags)
1517 {
1518 	struct partial_page partial[PIPE_DEF_BUFFERS];
1519 	struct page *pages[PIPE_DEF_BUFFERS];
1520 	struct splice_pipe_desc spd = {
1521 		.pages = pages,
1522 		.partial = partial,
1523 		.flags = flags,
1524 		.ops = &sock_pipe_buf_ops,
1525 		.spd_release = sock_spd_release,
1526 	};
1527 	struct sk_buff *frag_iter;
1528 	struct sock *sk = skb->sk;
1529 	int ret = 0;
1530 
1531 	if (splice_grow_spd(pipe, &spd))
1532 		return -ENOMEM;
1533 
1534 	/*
1535 	 * __skb_splice_bits() only fails if the output has no room left,
1536 	 * so no point in going over the frag_list for the error case.
1537 	 */
1538 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1539 		goto done;
1540 	else if (!tlen)
1541 		goto done;
1542 
1543 	/*
1544 	 * now see if we have a frag_list to map
1545 	 */
1546 	skb_walk_frags(skb, frag_iter) {
1547 		if (!tlen)
1548 			break;
1549 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1550 			break;
1551 	}
1552 
1553 done:
1554 	if (spd.nr_pages) {
1555 		/*
1556 		 * Drop the socket lock, otherwise we have reverse
1557 		 * locking dependencies between sk_lock and i_mutex
1558 		 * here as compared to sendfile(). We enter here
1559 		 * with the socket lock held, and splice_to_pipe() will
1560 		 * grab the pipe inode lock. For sendfile() emulation,
1561 		 * we call into ->sendpage() with the i_mutex lock held
1562 		 * and networking will grab the socket lock.
1563 		 */
1564 		release_sock(sk);
1565 		ret = splice_to_pipe(pipe, &spd);
1566 		lock_sock(sk);
1567 	}
1568 
1569 	splice_shrink_spd(pipe, &spd);
1570 	return ret;
1571 }
1572 
1573 /**
1574  *	skb_store_bits - store bits from kernel buffer to skb
1575  *	@skb: destination buffer
1576  *	@offset: offset in destination
1577  *	@from: source buffer
1578  *	@len: number of bytes to copy
1579  *
1580  *	Copy the specified number of bytes from the source buffer to the
1581  *	destination skb.  This function handles all the messy bits of
1582  *	traversing fragment lists and such.
1583  */
1584 
1585 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1586 {
1587 	int start = skb_headlen(skb);
1588 	struct sk_buff *frag_iter;
1589 	int i, copy;
1590 
1591 	if (offset > (int)skb->len - len)
1592 		goto fault;
1593 
1594 	if ((copy = start - offset) > 0) {
1595 		if (copy > len)
1596 			copy = len;
1597 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1598 		if ((len -= copy) == 0)
1599 			return 0;
1600 		offset += copy;
1601 		from += copy;
1602 	}
1603 
1604 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1605 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1606 		int end;
1607 
1608 		WARN_ON(start > offset + len);
1609 
1610 		end = start + frag->size;
1611 		if ((copy = end - offset) > 0) {
1612 			u8 *vaddr;
1613 
1614 			if (copy > len)
1615 				copy = len;
1616 
1617 			vaddr = kmap_skb_frag(frag);
1618 			memcpy(vaddr + frag->page_offset + offset - start,
1619 			       from, copy);
1620 			kunmap_skb_frag(vaddr);
1621 
1622 			if ((len -= copy) == 0)
1623 				return 0;
1624 			offset += copy;
1625 			from += copy;
1626 		}
1627 		start = end;
1628 	}
1629 
1630 	skb_walk_frags(skb, frag_iter) {
1631 		int end;
1632 
1633 		WARN_ON(start > offset + len);
1634 
1635 		end = start + frag_iter->len;
1636 		if ((copy = end - offset) > 0) {
1637 			if (copy > len)
1638 				copy = len;
1639 			if (skb_store_bits(frag_iter, offset - start,
1640 					   from, copy))
1641 				goto fault;
1642 			if ((len -= copy) == 0)
1643 				return 0;
1644 			offset += copy;
1645 			from += copy;
1646 		}
1647 		start = end;
1648 	}
1649 	if (!len)
1650 		return 0;
1651 
1652 fault:
1653 	return -EFAULT;
1654 }
1655 EXPORT_SYMBOL(skb_store_bits);
1656 
1657 /* Checksum skb data. */
1658 
1659 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1660 			  int len, __wsum csum)
1661 {
1662 	int start = skb_headlen(skb);
1663 	int i, copy = start - offset;
1664 	struct sk_buff *frag_iter;
1665 	int pos = 0;
1666 
1667 	/* Checksum header. */
1668 	if (copy > 0) {
1669 		if (copy > len)
1670 			copy = len;
1671 		csum = csum_partial(skb->data + offset, copy, csum);
1672 		if ((len -= copy) == 0)
1673 			return csum;
1674 		offset += copy;
1675 		pos	= copy;
1676 	}
1677 
1678 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1679 		int end;
1680 
1681 		WARN_ON(start > offset + len);
1682 
1683 		end = start + skb_shinfo(skb)->frags[i].size;
1684 		if ((copy = end - offset) > 0) {
1685 			__wsum csum2;
1686 			u8 *vaddr;
1687 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1688 
1689 			if (copy > len)
1690 				copy = len;
1691 			vaddr = kmap_skb_frag(frag);
1692 			csum2 = csum_partial(vaddr + frag->page_offset +
1693 					     offset - start, copy, 0);
1694 			kunmap_skb_frag(vaddr);
1695 			csum = csum_block_add(csum, csum2, pos);
1696 			if (!(len -= copy))
1697 				return csum;
1698 			offset += copy;
1699 			pos    += copy;
1700 		}
1701 		start = end;
1702 	}
1703 
1704 	skb_walk_frags(skb, frag_iter) {
1705 		int end;
1706 
1707 		WARN_ON(start > offset + len);
1708 
1709 		end = start + frag_iter->len;
1710 		if ((copy = end - offset) > 0) {
1711 			__wsum csum2;
1712 			if (copy > len)
1713 				copy = len;
1714 			csum2 = skb_checksum(frag_iter, offset - start,
1715 					     copy, 0);
1716 			csum = csum_block_add(csum, csum2, pos);
1717 			if ((len -= copy) == 0)
1718 				return csum;
1719 			offset += copy;
1720 			pos    += copy;
1721 		}
1722 		start = end;
1723 	}
1724 	BUG_ON(len);
1725 
1726 	return csum;
1727 }
1728 EXPORT_SYMBOL(skb_checksum);
1729 
1730 /* Both of above in one bottle. */
1731 
1732 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1733 				    u8 *to, int len, __wsum csum)
1734 {
1735 	int start = skb_headlen(skb);
1736 	int i, copy = start - offset;
1737 	struct sk_buff *frag_iter;
1738 	int pos = 0;
1739 
1740 	/* Copy header. */
1741 	if (copy > 0) {
1742 		if (copy > len)
1743 			copy = len;
1744 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1745 						 copy, csum);
1746 		if ((len -= copy) == 0)
1747 			return csum;
1748 		offset += copy;
1749 		to     += copy;
1750 		pos	= copy;
1751 	}
1752 
1753 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1754 		int end;
1755 
1756 		WARN_ON(start > offset + len);
1757 
1758 		end = start + skb_shinfo(skb)->frags[i].size;
1759 		if ((copy = end - offset) > 0) {
1760 			__wsum csum2;
1761 			u8 *vaddr;
1762 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1763 
1764 			if (copy > len)
1765 				copy = len;
1766 			vaddr = kmap_skb_frag(frag);
1767 			csum2 = csum_partial_copy_nocheck(vaddr +
1768 							  frag->page_offset +
1769 							  offset - start, to,
1770 							  copy, 0);
1771 			kunmap_skb_frag(vaddr);
1772 			csum = csum_block_add(csum, csum2, pos);
1773 			if (!(len -= copy))
1774 				return csum;
1775 			offset += copy;
1776 			to     += copy;
1777 			pos    += copy;
1778 		}
1779 		start = end;
1780 	}
1781 
1782 	skb_walk_frags(skb, frag_iter) {
1783 		__wsum csum2;
1784 		int end;
1785 
1786 		WARN_ON(start > offset + len);
1787 
1788 		end = start + frag_iter->len;
1789 		if ((copy = end - offset) > 0) {
1790 			if (copy > len)
1791 				copy = len;
1792 			csum2 = skb_copy_and_csum_bits(frag_iter,
1793 						       offset - start,
1794 						       to, copy, 0);
1795 			csum = csum_block_add(csum, csum2, pos);
1796 			if ((len -= copy) == 0)
1797 				return csum;
1798 			offset += copy;
1799 			to     += copy;
1800 			pos    += copy;
1801 		}
1802 		start = end;
1803 	}
1804 	BUG_ON(len);
1805 	return csum;
1806 }
1807 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1808 
1809 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1810 {
1811 	__wsum csum;
1812 	long csstart;
1813 
1814 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1815 		csstart = skb->csum_start - skb_headroom(skb);
1816 	else
1817 		csstart = skb_headlen(skb);
1818 
1819 	BUG_ON(csstart > skb_headlen(skb));
1820 
1821 	skb_copy_from_linear_data(skb, to, csstart);
1822 
1823 	csum = 0;
1824 	if (csstart != skb->len)
1825 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1826 					      skb->len - csstart, 0);
1827 
1828 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1829 		long csstuff = csstart + skb->csum_offset;
1830 
1831 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
1832 	}
1833 }
1834 EXPORT_SYMBOL(skb_copy_and_csum_dev);
1835 
1836 /**
1837  *	skb_dequeue - remove from the head of the queue
1838  *	@list: list to dequeue from
1839  *
1840  *	Remove the head of the list. The list lock is taken so the function
1841  *	may be used safely with other locking list functions. The head item is
1842  *	returned or %NULL if the list is empty.
1843  */
1844 
1845 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1846 {
1847 	unsigned long flags;
1848 	struct sk_buff *result;
1849 
1850 	spin_lock_irqsave(&list->lock, flags);
1851 	result = __skb_dequeue(list);
1852 	spin_unlock_irqrestore(&list->lock, flags);
1853 	return result;
1854 }
1855 EXPORT_SYMBOL(skb_dequeue);
1856 
1857 /**
1858  *	skb_dequeue_tail - remove from the tail of the queue
1859  *	@list: list to dequeue from
1860  *
1861  *	Remove the tail of the list. The list lock is taken so the function
1862  *	may be used safely with other locking list functions. The tail item is
1863  *	returned or %NULL if the list is empty.
1864  */
1865 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1866 {
1867 	unsigned long flags;
1868 	struct sk_buff *result;
1869 
1870 	spin_lock_irqsave(&list->lock, flags);
1871 	result = __skb_dequeue_tail(list);
1872 	spin_unlock_irqrestore(&list->lock, flags);
1873 	return result;
1874 }
1875 EXPORT_SYMBOL(skb_dequeue_tail);
1876 
1877 /**
1878  *	skb_queue_purge - empty a list
1879  *	@list: list to empty
1880  *
1881  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1882  *	the list and one reference dropped. This function takes the list
1883  *	lock and is atomic with respect to other list locking functions.
1884  */
1885 void skb_queue_purge(struct sk_buff_head *list)
1886 {
1887 	struct sk_buff *skb;
1888 	while ((skb = skb_dequeue(list)) != NULL)
1889 		kfree_skb(skb);
1890 }
1891 EXPORT_SYMBOL(skb_queue_purge);
1892 
1893 /**
1894  *	skb_queue_head - queue a buffer at the list head
1895  *	@list: list to use
1896  *	@newsk: buffer to queue
1897  *
1898  *	Queue a buffer at the start of the list. This function takes the
1899  *	list lock and can be used safely with other locking &sk_buff functions
1900  *	safely.
1901  *
1902  *	A buffer cannot be placed on two lists at the same time.
1903  */
1904 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1905 {
1906 	unsigned long flags;
1907 
1908 	spin_lock_irqsave(&list->lock, flags);
1909 	__skb_queue_head(list, newsk);
1910 	spin_unlock_irqrestore(&list->lock, flags);
1911 }
1912 EXPORT_SYMBOL(skb_queue_head);
1913 
1914 /**
1915  *	skb_queue_tail - queue a buffer at the list tail
1916  *	@list: list to use
1917  *	@newsk: buffer to queue
1918  *
1919  *	Queue a buffer at the tail of the list. This function takes the
1920  *	list lock and can be used safely with other locking &sk_buff functions
1921  *	safely.
1922  *
1923  *	A buffer cannot be placed on two lists at the same time.
1924  */
1925 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1926 {
1927 	unsigned long flags;
1928 
1929 	spin_lock_irqsave(&list->lock, flags);
1930 	__skb_queue_tail(list, newsk);
1931 	spin_unlock_irqrestore(&list->lock, flags);
1932 }
1933 EXPORT_SYMBOL(skb_queue_tail);
1934 
1935 /**
1936  *	skb_unlink	-	remove a buffer from a list
1937  *	@skb: buffer to remove
1938  *	@list: list to use
1939  *
1940  *	Remove a packet from a list. The list locks are taken and this
1941  *	function is atomic with respect to other list locked calls
1942  *
1943  *	You must know what list the SKB is on.
1944  */
1945 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1946 {
1947 	unsigned long flags;
1948 
1949 	spin_lock_irqsave(&list->lock, flags);
1950 	__skb_unlink(skb, list);
1951 	spin_unlock_irqrestore(&list->lock, flags);
1952 }
1953 EXPORT_SYMBOL(skb_unlink);
1954 
1955 /**
1956  *	skb_append	-	append a buffer
1957  *	@old: buffer to insert after
1958  *	@newsk: buffer to insert
1959  *	@list: list to use
1960  *
1961  *	Place a packet after a given packet in a list. The list locks are taken
1962  *	and this function is atomic with respect to other list locked calls.
1963  *	A buffer cannot be placed on two lists at the same time.
1964  */
1965 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1966 {
1967 	unsigned long flags;
1968 
1969 	spin_lock_irqsave(&list->lock, flags);
1970 	__skb_queue_after(list, old, newsk);
1971 	spin_unlock_irqrestore(&list->lock, flags);
1972 }
1973 EXPORT_SYMBOL(skb_append);
1974 
1975 /**
1976  *	skb_insert	-	insert a buffer
1977  *	@old: buffer to insert before
1978  *	@newsk: buffer to insert
1979  *	@list: list to use
1980  *
1981  *	Place a packet before a given packet in a list. The list locks are
1982  * 	taken and this function is atomic with respect to other list locked
1983  *	calls.
1984  *
1985  *	A buffer cannot be placed on two lists at the same time.
1986  */
1987 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1988 {
1989 	unsigned long flags;
1990 
1991 	spin_lock_irqsave(&list->lock, flags);
1992 	__skb_insert(newsk, old->prev, old, list);
1993 	spin_unlock_irqrestore(&list->lock, flags);
1994 }
1995 EXPORT_SYMBOL(skb_insert);
1996 
1997 static inline void skb_split_inside_header(struct sk_buff *skb,
1998 					   struct sk_buff* skb1,
1999 					   const u32 len, const int pos)
2000 {
2001 	int i;
2002 
2003 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2004 					 pos - len);
2005 	/* And move data appendix as is. */
2006 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2007 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2008 
2009 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2010 	skb_shinfo(skb)->nr_frags  = 0;
2011 	skb1->data_len		   = skb->data_len;
2012 	skb1->len		   += skb1->data_len;
2013 	skb->data_len		   = 0;
2014 	skb->len		   = len;
2015 	skb_set_tail_pointer(skb, len);
2016 }
2017 
2018 static inline void skb_split_no_header(struct sk_buff *skb,
2019 				       struct sk_buff* skb1,
2020 				       const u32 len, int pos)
2021 {
2022 	int i, k = 0;
2023 	const int nfrags = skb_shinfo(skb)->nr_frags;
2024 
2025 	skb_shinfo(skb)->nr_frags = 0;
2026 	skb1->len		  = skb1->data_len = skb->len - len;
2027 	skb->len		  = len;
2028 	skb->data_len		  = len - pos;
2029 
2030 	for (i = 0; i < nfrags; i++) {
2031 		int size = skb_shinfo(skb)->frags[i].size;
2032 
2033 		if (pos + size > len) {
2034 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2035 
2036 			if (pos < len) {
2037 				/* Split frag.
2038 				 * We have two variants in this case:
2039 				 * 1. Move all the frag to the second
2040 				 *    part, if it is possible. F.e.
2041 				 *    this approach is mandatory for TUX,
2042 				 *    where splitting is expensive.
2043 				 * 2. Split is accurately. We make this.
2044 				 */
2045 				get_page(skb_shinfo(skb)->frags[i].page);
2046 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2047 				skb_shinfo(skb1)->frags[0].size -= len - pos;
2048 				skb_shinfo(skb)->frags[i].size	= len - pos;
2049 				skb_shinfo(skb)->nr_frags++;
2050 			}
2051 			k++;
2052 		} else
2053 			skb_shinfo(skb)->nr_frags++;
2054 		pos += size;
2055 	}
2056 	skb_shinfo(skb1)->nr_frags = k;
2057 }
2058 
2059 /**
2060  * skb_split - Split fragmented skb to two parts at length len.
2061  * @skb: the buffer to split
2062  * @skb1: the buffer to receive the second part
2063  * @len: new length for skb
2064  */
2065 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2066 {
2067 	int pos = skb_headlen(skb);
2068 
2069 	if (len < pos)	/* Split line is inside header. */
2070 		skb_split_inside_header(skb, skb1, len, pos);
2071 	else		/* Second chunk has no header, nothing to copy. */
2072 		skb_split_no_header(skb, skb1, len, pos);
2073 }
2074 EXPORT_SYMBOL(skb_split);
2075 
2076 /* Shifting from/to a cloned skb is a no-go.
2077  *
2078  * Caller cannot keep skb_shinfo related pointers past calling here!
2079  */
2080 static int skb_prepare_for_shift(struct sk_buff *skb)
2081 {
2082 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2083 }
2084 
2085 /**
2086  * skb_shift - Shifts paged data partially from skb to another
2087  * @tgt: buffer into which tail data gets added
2088  * @skb: buffer from which the paged data comes from
2089  * @shiftlen: shift up to this many bytes
2090  *
2091  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2092  * the length of the skb, from tgt to skb. Returns number bytes shifted.
2093  * It's up to caller to free skb if everything was shifted.
2094  *
2095  * If @tgt runs out of frags, the whole operation is aborted.
2096  *
2097  * Skb cannot include anything else but paged data while tgt is allowed
2098  * to have non-paged data as well.
2099  *
2100  * TODO: full sized shift could be optimized but that would need
2101  * specialized skb free'er to handle frags without up-to-date nr_frags.
2102  */
2103 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2104 {
2105 	int from, to, merge, todo;
2106 	struct skb_frag_struct *fragfrom, *fragto;
2107 
2108 	BUG_ON(shiftlen > skb->len);
2109 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2110 
2111 	todo = shiftlen;
2112 	from = 0;
2113 	to = skb_shinfo(tgt)->nr_frags;
2114 	fragfrom = &skb_shinfo(skb)->frags[from];
2115 
2116 	/* Actual merge is delayed until the point when we know we can
2117 	 * commit all, so that we don't have to undo partial changes
2118 	 */
2119 	if (!to ||
2120 	    !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2121 		merge = -1;
2122 	} else {
2123 		merge = to - 1;
2124 
2125 		todo -= fragfrom->size;
2126 		if (todo < 0) {
2127 			if (skb_prepare_for_shift(skb) ||
2128 			    skb_prepare_for_shift(tgt))
2129 				return 0;
2130 
2131 			/* All previous frag pointers might be stale! */
2132 			fragfrom = &skb_shinfo(skb)->frags[from];
2133 			fragto = &skb_shinfo(tgt)->frags[merge];
2134 
2135 			fragto->size += shiftlen;
2136 			fragfrom->size -= shiftlen;
2137 			fragfrom->page_offset += shiftlen;
2138 
2139 			goto onlymerged;
2140 		}
2141 
2142 		from++;
2143 	}
2144 
2145 	/* Skip full, not-fitting skb to avoid expensive operations */
2146 	if ((shiftlen == skb->len) &&
2147 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2148 		return 0;
2149 
2150 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2151 		return 0;
2152 
2153 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2154 		if (to == MAX_SKB_FRAGS)
2155 			return 0;
2156 
2157 		fragfrom = &skb_shinfo(skb)->frags[from];
2158 		fragto = &skb_shinfo(tgt)->frags[to];
2159 
2160 		if (todo >= fragfrom->size) {
2161 			*fragto = *fragfrom;
2162 			todo -= fragfrom->size;
2163 			from++;
2164 			to++;
2165 
2166 		} else {
2167 			get_page(fragfrom->page);
2168 			fragto->page = fragfrom->page;
2169 			fragto->page_offset = fragfrom->page_offset;
2170 			fragto->size = todo;
2171 
2172 			fragfrom->page_offset += todo;
2173 			fragfrom->size -= todo;
2174 			todo = 0;
2175 
2176 			to++;
2177 			break;
2178 		}
2179 	}
2180 
2181 	/* Ready to "commit" this state change to tgt */
2182 	skb_shinfo(tgt)->nr_frags = to;
2183 
2184 	if (merge >= 0) {
2185 		fragfrom = &skb_shinfo(skb)->frags[0];
2186 		fragto = &skb_shinfo(tgt)->frags[merge];
2187 
2188 		fragto->size += fragfrom->size;
2189 		put_page(fragfrom->page);
2190 	}
2191 
2192 	/* Reposition in the original skb */
2193 	to = 0;
2194 	while (from < skb_shinfo(skb)->nr_frags)
2195 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2196 	skb_shinfo(skb)->nr_frags = to;
2197 
2198 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2199 
2200 onlymerged:
2201 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2202 	 * the other hand might need it if it needs to be resent
2203 	 */
2204 	tgt->ip_summed = CHECKSUM_PARTIAL;
2205 	skb->ip_summed = CHECKSUM_PARTIAL;
2206 
2207 	/* Yak, is it really working this way? Some helper please? */
2208 	skb->len -= shiftlen;
2209 	skb->data_len -= shiftlen;
2210 	skb->truesize -= shiftlen;
2211 	tgt->len += shiftlen;
2212 	tgt->data_len += shiftlen;
2213 	tgt->truesize += shiftlen;
2214 
2215 	return shiftlen;
2216 }
2217 
2218 /**
2219  * skb_prepare_seq_read - Prepare a sequential read of skb data
2220  * @skb: the buffer to read
2221  * @from: lower offset of data to be read
2222  * @to: upper offset of data to be read
2223  * @st: state variable
2224  *
2225  * Initializes the specified state variable. Must be called before
2226  * invoking skb_seq_read() for the first time.
2227  */
2228 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2229 			  unsigned int to, struct skb_seq_state *st)
2230 {
2231 	st->lower_offset = from;
2232 	st->upper_offset = to;
2233 	st->root_skb = st->cur_skb = skb;
2234 	st->frag_idx = st->stepped_offset = 0;
2235 	st->frag_data = NULL;
2236 }
2237 EXPORT_SYMBOL(skb_prepare_seq_read);
2238 
2239 /**
2240  * skb_seq_read - Sequentially read skb data
2241  * @consumed: number of bytes consumed by the caller so far
2242  * @data: destination pointer for data to be returned
2243  * @st: state variable
2244  *
2245  * Reads a block of skb data at &consumed relative to the
2246  * lower offset specified to skb_prepare_seq_read(). Assigns
2247  * the head of the data block to &data and returns the length
2248  * of the block or 0 if the end of the skb data or the upper
2249  * offset has been reached.
2250  *
2251  * The caller is not required to consume all of the data
2252  * returned, i.e. &consumed is typically set to the number
2253  * of bytes already consumed and the next call to
2254  * skb_seq_read() will return the remaining part of the block.
2255  *
2256  * Note 1: The size of each block of data returned can be arbitary,
2257  *       this limitation is the cost for zerocopy seqeuental
2258  *       reads of potentially non linear data.
2259  *
2260  * Note 2: Fragment lists within fragments are not implemented
2261  *       at the moment, state->root_skb could be replaced with
2262  *       a stack for this purpose.
2263  */
2264 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2265 			  struct skb_seq_state *st)
2266 {
2267 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2268 	skb_frag_t *frag;
2269 
2270 	if (unlikely(abs_offset >= st->upper_offset))
2271 		return 0;
2272 
2273 next_skb:
2274 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2275 
2276 	if (abs_offset < block_limit && !st->frag_data) {
2277 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2278 		return block_limit - abs_offset;
2279 	}
2280 
2281 	if (st->frag_idx == 0 && !st->frag_data)
2282 		st->stepped_offset += skb_headlen(st->cur_skb);
2283 
2284 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2285 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2286 		block_limit = frag->size + st->stepped_offset;
2287 
2288 		if (abs_offset < block_limit) {
2289 			if (!st->frag_data)
2290 				st->frag_data = kmap_skb_frag(frag);
2291 
2292 			*data = (u8 *) st->frag_data + frag->page_offset +
2293 				(abs_offset - st->stepped_offset);
2294 
2295 			return block_limit - abs_offset;
2296 		}
2297 
2298 		if (st->frag_data) {
2299 			kunmap_skb_frag(st->frag_data);
2300 			st->frag_data = NULL;
2301 		}
2302 
2303 		st->frag_idx++;
2304 		st->stepped_offset += frag->size;
2305 	}
2306 
2307 	if (st->frag_data) {
2308 		kunmap_skb_frag(st->frag_data);
2309 		st->frag_data = NULL;
2310 	}
2311 
2312 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2313 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2314 		st->frag_idx = 0;
2315 		goto next_skb;
2316 	} else if (st->cur_skb->next) {
2317 		st->cur_skb = st->cur_skb->next;
2318 		st->frag_idx = 0;
2319 		goto next_skb;
2320 	}
2321 
2322 	return 0;
2323 }
2324 EXPORT_SYMBOL(skb_seq_read);
2325 
2326 /**
2327  * skb_abort_seq_read - Abort a sequential read of skb data
2328  * @st: state variable
2329  *
2330  * Must be called if skb_seq_read() was not called until it
2331  * returned 0.
2332  */
2333 void skb_abort_seq_read(struct skb_seq_state *st)
2334 {
2335 	if (st->frag_data)
2336 		kunmap_skb_frag(st->frag_data);
2337 }
2338 EXPORT_SYMBOL(skb_abort_seq_read);
2339 
2340 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2341 
2342 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2343 					  struct ts_config *conf,
2344 					  struct ts_state *state)
2345 {
2346 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2347 }
2348 
2349 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2350 {
2351 	skb_abort_seq_read(TS_SKB_CB(state));
2352 }
2353 
2354 /**
2355  * skb_find_text - Find a text pattern in skb data
2356  * @skb: the buffer to look in
2357  * @from: search offset
2358  * @to: search limit
2359  * @config: textsearch configuration
2360  * @state: uninitialized textsearch state variable
2361  *
2362  * Finds a pattern in the skb data according to the specified
2363  * textsearch configuration. Use textsearch_next() to retrieve
2364  * subsequent occurrences of the pattern. Returns the offset
2365  * to the first occurrence or UINT_MAX if no match was found.
2366  */
2367 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2368 			   unsigned int to, struct ts_config *config,
2369 			   struct ts_state *state)
2370 {
2371 	unsigned int ret;
2372 
2373 	config->get_next_block = skb_ts_get_next_block;
2374 	config->finish = skb_ts_finish;
2375 
2376 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2377 
2378 	ret = textsearch_find(config, state);
2379 	return (ret <= to - from ? ret : UINT_MAX);
2380 }
2381 EXPORT_SYMBOL(skb_find_text);
2382 
2383 /**
2384  * skb_append_datato_frags: - append the user data to a skb
2385  * @sk: sock  structure
2386  * @skb: skb structure to be appened with user data.
2387  * @getfrag: call back function to be used for getting the user data
2388  * @from: pointer to user message iov
2389  * @length: length of the iov message
2390  *
2391  * Description: This procedure append the user data in the fragment part
2392  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2393  */
2394 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2395 			int (*getfrag)(void *from, char *to, int offset,
2396 					int len, int odd, struct sk_buff *skb),
2397 			void *from, int length)
2398 {
2399 	int frg_cnt = 0;
2400 	skb_frag_t *frag = NULL;
2401 	struct page *page = NULL;
2402 	int copy, left;
2403 	int offset = 0;
2404 	int ret;
2405 
2406 	do {
2407 		/* Return error if we don't have space for new frag */
2408 		frg_cnt = skb_shinfo(skb)->nr_frags;
2409 		if (frg_cnt >= MAX_SKB_FRAGS)
2410 			return -EFAULT;
2411 
2412 		/* allocate a new page for next frag */
2413 		page = alloc_pages(sk->sk_allocation, 0);
2414 
2415 		/* If alloc_page fails just return failure and caller will
2416 		 * free previous allocated pages by doing kfree_skb()
2417 		 */
2418 		if (page == NULL)
2419 			return -ENOMEM;
2420 
2421 		/* initialize the next frag */
2422 		sk->sk_sndmsg_page = page;
2423 		sk->sk_sndmsg_off = 0;
2424 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2425 		skb->truesize += PAGE_SIZE;
2426 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2427 
2428 		/* get the new initialized frag */
2429 		frg_cnt = skb_shinfo(skb)->nr_frags;
2430 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2431 
2432 		/* copy the user data to page */
2433 		left = PAGE_SIZE - frag->page_offset;
2434 		copy = (length > left)? left : length;
2435 
2436 		ret = getfrag(from, (page_address(frag->page) +
2437 			    frag->page_offset + frag->size),
2438 			    offset, copy, 0, skb);
2439 		if (ret < 0)
2440 			return -EFAULT;
2441 
2442 		/* copy was successful so update the size parameters */
2443 		sk->sk_sndmsg_off += copy;
2444 		frag->size += copy;
2445 		skb->len += copy;
2446 		skb->data_len += copy;
2447 		offset += copy;
2448 		length -= copy;
2449 
2450 	} while (length > 0);
2451 
2452 	return 0;
2453 }
2454 EXPORT_SYMBOL(skb_append_datato_frags);
2455 
2456 /**
2457  *	skb_pull_rcsum - pull skb and update receive checksum
2458  *	@skb: buffer to update
2459  *	@len: length of data pulled
2460  *
2461  *	This function performs an skb_pull on the packet and updates
2462  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2463  *	receive path processing instead of skb_pull unless you know
2464  *	that the checksum difference is zero (e.g., a valid IP header)
2465  *	or you are setting ip_summed to CHECKSUM_NONE.
2466  */
2467 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2468 {
2469 	BUG_ON(len > skb->len);
2470 	skb->len -= len;
2471 	BUG_ON(skb->len < skb->data_len);
2472 	skb_postpull_rcsum(skb, skb->data, len);
2473 	return skb->data += len;
2474 }
2475 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2476 
2477 /**
2478  *	skb_segment - Perform protocol segmentation on skb.
2479  *	@skb: buffer to segment
2480  *	@features: features for the output path (see dev->features)
2481  *
2482  *	This function performs segmentation on the given skb.  It returns
2483  *	a pointer to the first in a list of new skbs for the segments.
2484  *	In case of error it returns ERR_PTR(err).
2485  */
2486 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2487 {
2488 	struct sk_buff *segs = NULL;
2489 	struct sk_buff *tail = NULL;
2490 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2491 	unsigned int mss = skb_shinfo(skb)->gso_size;
2492 	unsigned int doffset = skb->data - skb_mac_header(skb);
2493 	unsigned int offset = doffset;
2494 	unsigned int headroom;
2495 	unsigned int len;
2496 	int sg = features & NETIF_F_SG;
2497 	int nfrags = skb_shinfo(skb)->nr_frags;
2498 	int err = -ENOMEM;
2499 	int i = 0;
2500 	int pos;
2501 
2502 	__skb_push(skb, doffset);
2503 	headroom = skb_headroom(skb);
2504 	pos = skb_headlen(skb);
2505 
2506 	do {
2507 		struct sk_buff *nskb;
2508 		skb_frag_t *frag;
2509 		int hsize;
2510 		int size;
2511 
2512 		len = skb->len - offset;
2513 		if (len > mss)
2514 			len = mss;
2515 
2516 		hsize = skb_headlen(skb) - offset;
2517 		if (hsize < 0)
2518 			hsize = 0;
2519 		if (hsize > len || !sg)
2520 			hsize = len;
2521 
2522 		if (!hsize && i >= nfrags) {
2523 			BUG_ON(fskb->len != len);
2524 
2525 			pos += len;
2526 			nskb = skb_clone(fskb, GFP_ATOMIC);
2527 			fskb = fskb->next;
2528 
2529 			if (unlikely(!nskb))
2530 				goto err;
2531 
2532 			hsize = skb_end_pointer(nskb) - nskb->head;
2533 			if (skb_cow_head(nskb, doffset + headroom)) {
2534 				kfree_skb(nskb);
2535 				goto err;
2536 			}
2537 
2538 			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2539 					  hsize;
2540 			skb_release_head_state(nskb);
2541 			__skb_push(nskb, doffset);
2542 		} else {
2543 			nskb = alloc_skb(hsize + doffset + headroom,
2544 					 GFP_ATOMIC);
2545 
2546 			if (unlikely(!nskb))
2547 				goto err;
2548 
2549 			skb_reserve(nskb, headroom);
2550 			__skb_put(nskb, doffset);
2551 		}
2552 
2553 		if (segs)
2554 			tail->next = nskb;
2555 		else
2556 			segs = nskb;
2557 		tail = nskb;
2558 
2559 		__copy_skb_header(nskb, skb);
2560 		nskb->mac_len = skb->mac_len;
2561 
2562 		/* nskb and skb might have different headroom */
2563 		if (nskb->ip_summed == CHECKSUM_PARTIAL)
2564 			nskb->csum_start += skb_headroom(nskb) - headroom;
2565 
2566 		skb_reset_mac_header(nskb);
2567 		skb_set_network_header(nskb, skb->mac_len);
2568 		nskb->transport_header = (nskb->network_header +
2569 					  skb_network_header_len(skb));
2570 		skb_copy_from_linear_data(skb, nskb->data, doffset);
2571 
2572 		if (fskb != skb_shinfo(skb)->frag_list)
2573 			continue;
2574 
2575 		if (!sg) {
2576 			nskb->ip_summed = CHECKSUM_NONE;
2577 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2578 							    skb_put(nskb, len),
2579 							    len, 0);
2580 			continue;
2581 		}
2582 
2583 		frag = skb_shinfo(nskb)->frags;
2584 
2585 		skb_copy_from_linear_data_offset(skb, offset,
2586 						 skb_put(nskb, hsize), hsize);
2587 
2588 		while (pos < offset + len && i < nfrags) {
2589 			*frag = skb_shinfo(skb)->frags[i];
2590 			get_page(frag->page);
2591 			size = frag->size;
2592 
2593 			if (pos < offset) {
2594 				frag->page_offset += offset - pos;
2595 				frag->size -= offset - pos;
2596 			}
2597 
2598 			skb_shinfo(nskb)->nr_frags++;
2599 
2600 			if (pos + size <= offset + len) {
2601 				i++;
2602 				pos += size;
2603 			} else {
2604 				frag->size -= pos + size - (offset + len);
2605 				goto skip_fraglist;
2606 			}
2607 
2608 			frag++;
2609 		}
2610 
2611 		if (pos < offset + len) {
2612 			struct sk_buff *fskb2 = fskb;
2613 
2614 			BUG_ON(pos + fskb->len != offset + len);
2615 
2616 			pos += fskb->len;
2617 			fskb = fskb->next;
2618 
2619 			if (fskb2->next) {
2620 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2621 				if (!fskb2)
2622 					goto err;
2623 			} else
2624 				skb_get(fskb2);
2625 
2626 			SKB_FRAG_ASSERT(nskb);
2627 			skb_shinfo(nskb)->frag_list = fskb2;
2628 		}
2629 
2630 skip_fraglist:
2631 		nskb->data_len = len - hsize;
2632 		nskb->len += nskb->data_len;
2633 		nskb->truesize += nskb->data_len;
2634 	} while ((offset += len) < skb->len);
2635 
2636 	return segs;
2637 
2638 err:
2639 	while ((skb = segs)) {
2640 		segs = skb->next;
2641 		kfree_skb(skb);
2642 	}
2643 	return ERR_PTR(err);
2644 }
2645 EXPORT_SYMBOL_GPL(skb_segment);
2646 
2647 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2648 {
2649 	struct sk_buff *p = *head;
2650 	struct sk_buff *nskb;
2651 	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2652 	struct skb_shared_info *pinfo = skb_shinfo(p);
2653 	unsigned int headroom;
2654 	unsigned int len = skb_gro_len(skb);
2655 	unsigned int offset = skb_gro_offset(skb);
2656 	unsigned int headlen = skb_headlen(skb);
2657 
2658 	if (p->len + len >= 65536)
2659 		return -E2BIG;
2660 
2661 	if (pinfo->frag_list)
2662 		goto merge;
2663 	else if (headlen <= offset) {
2664 		skb_frag_t *frag;
2665 		skb_frag_t *frag2;
2666 		int i = skbinfo->nr_frags;
2667 		int nr_frags = pinfo->nr_frags + i;
2668 
2669 		offset -= headlen;
2670 
2671 		if (nr_frags > MAX_SKB_FRAGS)
2672 			return -E2BIG;
2673 
2674 		pinfo->nr_frags = nr_frags;
2675 		skbinfo->nr_frags = 0;
2676 
2677 		frag = pinfo->frags + nr_frags;
2678 		frag2 = skbinfo->frags + i;
2679 		do {
2680 			*--frag = *--frag2;
2681 		} while (--i);
2682 
2683 		frag->page_offset += offset;
2684 		frag->size -= offset;
2685 
2686 		skb->truesize -= skb->data_len;
2687 		skb->len -= skb->data_len;
2688 		skb->data_len = 0;
2689 
2690 		NAPI_GRO_CB(skb)->free = 1;
2691 		goto done;
2692 	} else if (skb_gro_len(p) != pinfo->gso_size)
2693 		return -E2BIG;
2694 
2695 	headroom = skb_headroom(p);
2696 	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2697 	if (unlikely(!nskb))
2698 		return -ENOMEM;
2699 
2700 	__copy_skb_header(nskb, p);
2701 	nskb->mac_len = p->mac_len;
2702 
2703 	skb_reserve(nskb, headroom);
2704 	__skb_put(nskb, skb_gro_offset(p));
2705 
2706 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2707 	skb_set_network_header(nskb, skb_network_offset(p));
2708 	skb_set_transport_header(nskb, skb_transport_offset(p));
2709 
2710 	__skb_pull(p, skb_gro_offset(p));
2711 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
2712 	       p->data - skb_mac_header(p));
2713 
2714 	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2715 	skb_shinfo(nskb)->frag_list = p;
2716 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2717 	pinfo->gso_size = 0;
2718 	skb_header_release(p);
2719 	nskb->prev = p;
2720 
2721 	nskb->data_len += p->len;
2722 	nskb->truesize += p->len;
2723 	nskb->len += p->len;
2724 
2725 	*head = nskb;
2726 	nskb->next = p->next;
2727 	p->next = NULL;
2728 
2729 	p = nskb;
2730 
2731 merge:
2732 	if (offset > headlen) {
2733 		skbinfo->frags[0].page_offset += offset - headlen;
2734 		skbinfo->frags[0].size -= offset - headlen;
2735 		offset = headlen;
2736 	}
2737 
2738 	__skb_pull(skb, offset);
2739 
2740 	p->prev->next = skb;
2741 	p->prev = skb;
2742 	skb_header_release(skb);
2743 
2744 done:
2745 	NAPI_GRO_CB(p)->count++;
2746 	p->data_len += len;
2747 	p->truesize += len;
2748 	p->len += len;
2749 
2750 	NAPI_GRO_CB(skb)->same_flow = 1;
2751 	return 0;
2752 }
2753 EXPORT_SYMBOL_GPL(skb_gro_receive);
2754 
2755 void __init skb_init(void)
2756 {
2757 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2758 					      sizeof(struct sk_buff),
2759 					      0,
2760 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2761 					      NULL);
2762 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2763 						(2*sizeof(struct sk_buff)) +
2764 						sizeof(atomic_t),
2765 						0,
2766 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2767 						NULL);
2768 }
2769 
2770 /**
2771  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2772  *	@skb: Socket buffer containing the buffers to be mapped
2773  *	@sg: The scatter-gather list to map into
2774  *	@offset: The offset into the buffer's contents to start mapping
2775  *	@len: Length of buffer space to be mapped
2776  *
2777  *	Fill the specified scatter-gather list with mappings/pointers into a
2778  *	region of the buffer space attached to a socket buffer.
2779  */
2780 static int
2781 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2782 {
2783 	int start = skb_headlen(skb);
2784 	int i, copy = start - offset;
2785 	struct sk_buff *frag_iter;
2786 	int elt = 0;
2787 
2788 	if (copy > 0) {
2789 		if (copy > len)
2790 			copy = len;
2791 		sg_set_buf(sg, skb->data + offset, copy);
2792 		elt++;
2793 		if ((len -= copy) == 0)
2794 			return elt;
2795 		offset += copy;
2796 	}
2797 
2798 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2799 		int end;
2800 
2801 		WARN_ON(start > offset + len);
2802 
2803 		end = start + skb_shinfo(skb)->frags[i].size;
2804 		if ((copy = end - offset) > 0) {
2805 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2806 
2807 			if (copy > len)
2808 				copy = len;
2809 			sg_set_page(&sg[elt], frag->page, copy,
2810 					frag->page_offset+offset-start);
2811 			elt++;
2812 			if (!(len -= copy))
2813 				return elt;
2814 			offset += copy;
2815 		}
2816 		start = end;
2817 	}
2818 
2819 	skb_walk_frags(skb, frag_iter) {
2820 		int end;
2821 
2822 		WARN_ON(start > offset + len);
2823 
2824 		end = start + frag_iter->len;
2825 		if ((copy = end - offset) > 0) {
2826 			if (copy > len)
2827 				copy = len;
2828 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2829 					      copy);
2830 			if ((len -= copy) == 0)
2831 				return elt;
2832 			offset += copy;
2833 		}
2834 		start = end;
2835 	}
2836 	BUG_ON(len);
2837 	return elt;
2838 }
2839 
2840 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2841 {
2842 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
2843 
2844 	sg_mark_end(&sg[nsg - 1]);
2845 
2846 	return nsg;
2847 }
2848 EXPORT_SYMBOL_GPL(skb_to_sgvec);
2849 
2850 /**
2851  *	skb_cow_data - Check that a socket buffer's data buffers are writable
2852  *	@skb: The socket buffer to check.
2853  *	@tailbits: Amount of trailing space to be added
2854  *	@trailer: Returned pointer to the skb where the @tailbits space begins
2855  *
2856  *	Make sure that the data buffers attached to a socket buffer are
2857  *	writable. If they are not, private copies are made of the data buffers
2858  *	and the socket buffer is set to use these instead.
2859  *
2860  *	If @tailbits is given, make sure that there is space to write @tailbits
2861  *	bytes of data beyond current end of socket buffer.  @trailer will be
2862  *	set to point to the skb in which this space begins.
2863  *
2864  *	The number of scatterlist elements required to completely map the
2865  *	COW'd and extended socket buffer will be returned.
2866  */
2867 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2868 {
2869 	int copyflag;
2870 	int elt;
2871 	struct sk_buff *skb1, **skb_p;
2872 
2873 	/* If skb is cloned or its head is paged, reallocate
2874 	 * head pulling out all the pages (pages are considered not writable
2875 	 * at the moment even if they are anonymous).
2876 	 */
2877 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2878 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2879 		return -ENOMEM;
2880 
2881 	/* Easy case. Most of packets will go this way. */
2882 	if (!skb_has_frag_list(skb)) {
2883 		/* A little of trouble, not enough of space for trailer.
2884 		 * This should not happen, when stack is tuned to generate
2885 		 * good frames. OK, on miss we reallocate and reserve even more
2886 		 * space, 128 bytes is fair. */
2887 
2888 		if (skb_tailroom(skb) < tailbits &&
2889 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2890 			return -ENOMEM;
2891 
2892 		/* Voila! */
2893 		*trailer = skb;
2894 		return 1;
2895 	}
2896 
2897 	/* Misery. We are in troubles, going to mincer fragments... */
2898 
2899 	elt = 1;
2900 	skb_p = &skb_shinfo(skb)->frag_list;
2901 	copyflag = 0;
2902 
2903 	while ((skb1 = *skb_p) != NULL) {
2904 		int ntail = 0;
2905 
2906 		/* The fragment is partially pulled by someone,
2907 		 * this can happen on input. Copy it and everything
2908 		 * after it. */
2909 
2910 		if (skb_shared(skb1))
2911 			copyflag = 1;
2912 
2913 		/* If the skb is the last, worry about trailer. */
2914 
2915 		if (skb1->next == NULL && tailbits) {
2916 			if (skb_shinfo(skb1)->nr_frags ||
2917 			    skb_has_frag_list(skb1) ||
2918 			    skb_tailroom(skb1) < tailbits)
2919 				ntail = tailbits + 128;
2920 		}
2921 
2922 		if (copyflag ||
2923 		    skb_cloned(skb1) ||
2924 		    ntail ||
2925 		    skb_shinfo(skb1)->nr_frags ||
2926 		    skb_has_frag_list(skb1)) {
2927 			struct sk_buff *skb2;
2928 
2929 			/* Fuck, we are miserable poor guys... */
2930 			if (ntail == 0)
2931 				skb2 = skb_copy(skb1, GFP_ATOMIC);
2932 			else
2933 				skb2 = skb_copy_expand(skb1,
2934 						       skb_headroom(skb1),
2935 						       ntail,
2936 						       GFP_ATOMIC);
2937 			if (unlikely(skb2 == NULL))
2938 				return -ENOMEM;
2939 
2940 			if (skb1->sk)
2941 				skb_set_owner_w(skb2, skb1->sk);
2942 
2943 			/* Looking around. Are we still alive?
2944 			 * OK, link new skb, drop old one */
2945 
2946 			skb2->next = skb1->next;
2947 			*skb_p = skb2;
2948 			kfree_skb(skb1);
2949 			skb1 = skb2;
2950 		}
2951 		elt++;
2952 		*trailer = skb1;
2953 		skb_p = &skb1->next;
2954 	}
2955 
2956 	return elt;
2957 }
2958 EXPORT_SYMBOL_GPL(skb_cow_data);
2959 
2960 static void sock_rmem_free(struct sk_buff *skb)
2961 {
2962 	struct sock *sk = skb->sk;
2963 
2964 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2965 }
2966 
2967 /*
2968  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2969  */
2970 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2971 {
2972 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2973 	    (unsigned)sk->sk_rcvbuf)
2974 		return -ENOMEM;
2975 
2976 	skb_orphan(skb);
2977 	skb->sk = sk;
2978 	skb->destructor = sock_rmem_free;
2979 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2980 
2981 	skb_queue_tail(&sk->sk_error_queue, skb);
2982 	if (!sock_flag(sk, SOCK_DEAD))
2983 		sk->sk_data_ready(sk, skb->len);
2984 	return 0;
2985 }
2986 EXPORT_SYMBOL(sock_queue_err_skb);
2987 
2988 void skb_tstamp_tx(struct sk_buff *orig_skb,
2989 		struct skb_shared_hwtstamps *hwtstamps)
2990 {
2991 	struct sock *sk = orig_skb->sk;
2992 	struct sock_exterr_skb *serr;
2993 	struct sk_buff *skb;
2994 	int err;
2995 
2996 	if (!sk)
2997 		return;
2998 
2999 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3000 	if (!skb)
3001 		return;
3002 
3003 	if (hwtstamps) {
3004 		*skb_hwtstamps(skb) =
3005 			*hwtstamps;
3006 	} else {
3007 		/*
3008 		 * no hardware time stamps available,
3009 		 * so keep the shared tx_flags and only
3010 		 * store software time stamp
3011 		 */
3012 		skb->tstamp = ktime_get_real();
3013 	}
3014 
3015 	serr = SKB_EXT_ERR(skb);
3016 	memset(serr, 0, sizeof(*serr));
3017 	serr->ee.ee_errno = ENOMSG;
3018 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3019 
3020 	err = sock_queue_err_skb(sk, skb);
3021 
3022 	if (err)
3023 		kfree_skb(skb);
3024 }
3025 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3026 
3027 
3028 /**
3029  * skb_partial_csum_set - set up and verify partial csum values for packet
3030  * @skb: the skb to set
3031  * @start: the number of bytes after skb->data to start checksumming.
3032  * @off: the offset from start to place the checksum.
3033  *
3034  * For untrusted partially-checksummed packets, we need to make sure the values
3035  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3036  *
3037  * This function checks and sets those values and skb->ip_summed: if this
3038  * returns false you should drop the packet.
3039  */
3040 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3041 {
3042 	if (unlikely(start > skb_headlen(skb)) ||
3043 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3044 		if (net_ratelimit())
3045 			printk(KERN_WARNING
3046 			       "bad partial csum: csum=%u/%u len=%u\n",
3047 			       start, off, skb_headlen(skb));
3048 		return false;
3049 	}
3050 	skb->ip_summed = CHECKSUM_PARTIAL;
3051 	skb->csum_start = skb_headroom(skb) + start;
3052 	skb->csum_offset = off;
3053 	return true;
3054 }
3055 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3056 
3057 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3058 {
3059 	if (net_ratelimit())
3060 		pr_warning("%s: received packets cannot be forwarded"
3061 			   " while LRO is enabled\n", skb->dev->name);
3062 }
3063 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3064