xref: /linux/net/ipv4/ip_output.c (revision f24e9f586b377749dff37554696cf3a105540c94)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The Internet Protocol (IP) output module.
7  *
8  * Version:	$Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Donald Becker, <becker@super.org>
13  *		Alan Cox, <Alan.Cox@linux.org>
14  *		Richard Underwood
15  *		Stefan Becker, <stefanb@yello.ping.de>
16  *		Jorge Cwik, <jorge@laser.satlink.net>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Hirokazu Takahashi, <taka@valinux.co.jp>
19  *
20  *	See ip_input.c for original log
21  *
22  *	Fixes:
23  *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
24  *		Mike Kilburn	:	htons() missing in ip_build_xmit.
25  *		Bradford Johnson:	Fix faulty handling of some frames when
26  *					no route is found.
27  *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
28  *					(in case if packet not accepted by
29  *					output firewall rules)
30  *		Mike McLagan	:	Routing by source
31  *		Alexey Kuznetsov:	use new route cache
32  *		Andi Kleen:		Fix broken PMTU recovery and remove
33  *					some redundant tests.
34  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
35  *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
36  *		Andi Kleen	:	Split fast and slow ip_build_xmit path
37  *					for decreased register pressure on x86
38  *					and more readibility.
39  *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
40  *					silently drop skb instead of failing with -EPERM.
41  *		Detlev Wengorz	:	Copy protocol for fragments.
42  *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
43  *					datagrams.
44  *		Hirokazu Takahashi:	sendfile() on UDP works now.
45  */
46 
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/mm.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
59 #include <linux/in.h>
60 #include <linux/inet.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/proc_fs.h>
64 #include <linux/stat.h>
65 #include <linux/init.h>
66 
67 #include <net/snmp.h>
68 #include <net/ip.h>
69 #include <net/protocol.h>
70 #include <net/route.h>
71 #include <net/xfrm.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <net/arp.h>
75 #include <net/icmp.h>
76 #include <net/checksum.h>
77 #include <net/inetpeer.h>
78 #include <net/checksum.h>
79 #include <linux/igmp.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/netfilter_bridge.h>
82 #include <linux/mroute.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
85 
86 int sysctl_ip_default_ttl = IPDEFTTL;
87 
88 /* Generate a checksum for an outgoing IP datagram. */
89 __inline__ void ip_send_check(struct iphdr *iph)
90 {
91 	iph->check = 0;
92 	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
93 }
94 
95 /* dev_loopback_xmit for use with netfilter. */
96 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
97 {
98 	newskb->mac.raw = newskb->data;
99 	__skb_pull(newskb, newskb->nh.raw - newskb->data);
100 	newskb->pkt_type = PACKET_LOOPBACK;
101 	newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 	BUG_TRAP(newskb->dst);
103 	netif_rx(newskb);
104 	return 0;
105 }
106 
107 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
108 {
109 	int ttl = inet->uc_ttl;
110 
111 	if (ttl < 0)
112 		ttl = dst_metric(dst, RTAX_HOPLIMIT);
113 	return ttl;
114 }
115 
116 /*
117  *		Add an ip header to a skbuff and send it out.
118  *
119  */
120 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
121 			  u32 saddr, u32 daddr, struct ip_options *opt)
122 {
123 	struct inet_sock *inet = inet_sk(sk);
124 	struct rtable *rt = (struct rtable *)skb->dst;
125 	struct iphdr *iph;
126 
127 	/* Build the IP header. */
128 	if (opt)
129 		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
130 	else
131 		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
132 
133 	iph->version  = 4;
134 	iph->ihl      = 5;
135 	iph->tos      = inet->tos;
136 	if (ip_dont_fragment(sk, &rt->u.dst))
137 		iph->frag_off = htons(IP_DF);
138 	else
139 		iph->frag_off = 0;
140 	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
141 	iph->daddr    = rt->rt_dst;
142 	iph->saddr    = rt->rt_src;
143 	iph->protocol = sk->sk_protocol;
144 	iph->tot_len  = htons(skb->len);
145 	ip_select_ident(iph, &rt->u.dst, sk);
146 	skb->nh.iph   = iph;
147 
148 	if (opt && opt->optlen) {
149 		iph->ihl += opt->optlen>>2;
150 		ip_options_build(skb, opt, daddr, rt, 0);
151 	}
152 	ip_send_check(iph);
153 
154 	skb->priority = sk->sk_priority;
155 
156 	/* Send it out. */
157 	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
158 		       dst_output);
159 }
160 
161 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
162 
163 static inline int ip_finish_output2(struct sk_buff *skb)
164 {
165 	struct dst_entry *dst = skb->dst;
166 	struct hh_cache *hh = dst->hh;
167 	struct net_device *dev = dst->dev;
168 	int hh_len = LL_RESERVED_SPACE(dev);
169 
170 	/* Be paranoid, rather than too clever. */
171 	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
172 		struct sk_buff *skb2;
173 
174 		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
175 		if (skb2 == NULL) {
176 			kfree_skb(skb);
177 			return -ENOMEM;
178 		}
179 		if (skb->sk)
180 			skb_set_owner_w(skb2, skb->sk);
181 		kfree_skb(skb);
182 		skb = skb2;
183 	}
184 
185 	if (hh) {
186 		int hh_alen;
187 
188 		read_lock_bh(&hh->hh_lock);
189 		hh_alen = HH_DATA_ALIGN(hh->hh_len);
190   		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
191 		read_unlock_bh(&hh->hh_lock);
192 	        skb_push(skb, hh->hh_len);
193 		return hh->hh_output(skb);
194 	} else if (dst->neighbour)
195 		return dst->neighbour->output(skb);
196 
197 	if (net_ratelimit())
198 		printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
199 	kfree_skb(skb);
200 	return -EINVAL;
201 }
202 
203 static inline int ip_finish_output(struct sk_buff *skb)
204 {
205 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
206 	/* Policy lookup after SNAT yielded a new policy */
207 	if (skb->dst->xfrm != NULL) {
208 		IPCB(skb)->flags |= IPSKB_REROUTED;
209 		return dst_output(skb);
210 	}
211 #endif
212 	if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
213 		return ip_fragment(skb, ip_finish_output2);
214 	else
215 		return ip_finish_output2(skb);
216 }
217 
218 int ip_mc_output(struct sk_buff *skb)
219 {
220 	struct sock *sk = skb->sk;
221 	struct rtable *rt = (struct rtable*)skb->dst;
222 	struct net_device *dev = rt->u.dst.dev;
223 
224 	/*
225 	 *	If the indicated interface is up and running, send the packet.
226 	 */
227 	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
228 
229 	skb->dev = dev;
230 	skb->protocol = htons(ETH_P_IP);
231 
232 	/*
233 	 *	Multicasts are looped back for other local users
234 	 */
235 
236 	if (rt->rt_flags&RTCF_MULTICAST) {
237 		if ((!sk || inet_sk(sk)->mc_loop)
238 #ifdef CONFIG_IP_MROUTE
239 		/* Small optimization: do not loopback not local frames,
240 		   which returned after forwarding; they will be  dropped
241 		   by ip_mr_input in any case.
242 		   Note, that local frames are looped back to be delivered
243 		   to local recipients.
244 
245 		   This check is duplicated in ip_mr_input at the moment.
246 		 */
247 		    && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
248 #endif
249 		) {
250 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
251 			if (newskb)
252 				NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
253 					newskb->dev,
254 					ip_dev_loopback_xmit);
255 		}
256 
257 		/* Multicasts with ttl 0 must not go beyond the host */
258 
259 		if (skb->nh.iph->ttl == 0) {
260 			kfree_skb(skb);
261 			return 0;
262 		}
263 	}
264 
265 	if (rt->rt_flags&RTCF_BROADCAST) {
266 		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
267 		if (newskb)
268 			NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
269 				newskb->dev, ip_dev_loopback_xmit);
270 	}
271 
272 	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
273 			    ip_finish_output,
274 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
275 }
276 
277 int ip_output(struct sk_buff *skb)
278 {
279 	struct net_device *dev = skb->dst->dev;
280 
281 	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
282 
283 	skb->dev = dev;
284 	skb->protocol = htons(ETH_P_IP);
285 
286 	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
287 		            ip_finish_output,
288 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
289 }
290 
291 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
292 {
293 	struct sock *sk = skb->sk;
294 	struct inet_sock *inet = inet_sk(sk);
295 	struct ip_options *opt = inet->opt;
296 	struct rtable *rt;
297 	struct iphdr *iph;
298 
299 	/* Skip all of this if the packet is already routed,
300 	 * f.e. by something like SCTP.
301 	 */
302 	rt = (struct rtable *) skb->dst;
303 	if (rt != NULL)
304 		goto packet_routed;
305 
306 	/* Make sure we can route this packet. */
307 	rt = (struct rtable *)__sk_dst_check(sk, 0);
308 	if (rt == NULL) {
309 		u32 daddr;
310 
311 		/* Use correct destination address if we have options. */
312 		daddr = inet->daddr;
313 		if(opt && opt->srr)
314 			daddr = opt->faddr;
315 
316 		{
317 			struct flowi fl = { .oif = sk->sk_bound_dev_if,
318 					    .nl_u = { .ip4_u =
319 						      { .daddr = daddr,
320 							.saddr = inet->saddr,
321 							.tos = RT_CONN_FLAGS(sk) } },
322 					    .proto = sk->sk_protocol,
323 					    .uli_u = { .ports =
324 						       { .sport = inet->sport,
325 							 .dport = inet->dport } } };
326 
327 			/* If this fails, retransmit mechanism of transport layer will
328 			 * keep trying until route appears or the connection times
329 			 * itself out.
330 			 */
331 			if (ip_route_output_flow(&rt, &fl, sk, 0))
332 				goto no_route;
333 		}
334 		sk_setup_caps(sk, &rt->u.dst);
335 	}
336 	skb->dst = dst_clone(&rt->u.dst);
337 
338 packet_routed:
339 	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
340 		goto no_route;
341 
342 	/* OK, we know where to send it, allocate and build IP header. */
343 	iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
344 	*((__u16 *)iph)	= htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
345 	iph->tot_len = htons(skb->len);
346 	if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
347 		iph->frag_off = htons(IP_DF);
348 	else
349 		iph->frag_off = 0;
350 	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
351 	iph->protocol = sk->sk_protocol;
352 	iph->saddr    = rt->rt_src;
353 	iph->daddr    = rt->rt_dst;
354 	skb->nh.iph   = iph;
355 	/* Transport layer set skb->h.foo itself. */
356 
357 	if (opt && opt->optlen) {
358 		iph->ihl += opt->optlen >> 2;
359 		ip_options_build(skb, opt, inet->daddr, rt, 0);
360 	}
361 
362 	ip_select_ident_more(iph, &rt->u.dst, sk,
363 			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
364 
365 	/* Add an IP checksum. */
366 	ip_send_check(iph);
367 
368 	skb->priority = sk->sk_priority;
369 
370 	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
371 		       dst_output);
372 
373 no_route:
374 	IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
375 	kfree_skb(skb);
376 	return -EHOSTUNREACH;
377 }
378 
379 
380 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
381 {
382 	to->pkt_type = from->pkt_type;
383 	to->priority = from->priority;
384 	to->protocol = from->protocol;
385 	dst_release(to->dst);
386 	to->dst = dst_clone(from->dst);
387 	to->dev = from->dev;
388 
389 	/* Copy the flags to each fragment. */
390 	IPCB(to)->flags = IPCB(from)->flags;
391 
392 #ifdef CONFIG_NET_SCHED
393 	to->tc_index = from->tc_index;
394 #endif
395 #ifdef CONFIG_NETFILTER
396 	to->nfmark = from->nfmark;
397 	/* Connection association is same as pre-frag packet */
398 	nf_conntrack_put(to->nfct);
399 	to->nfct = from->nfct;
400 	nf_conntrack_get(to->nfct);
401 	to->nfctinfo = from->nfctinfo;
402 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
403 	to->ipvs_property = from->ipvs_property;
404 #endif
405 #ifdef CONFIG_BRIDGE_NETFILTER
406 	nf_bridge_put(to->nf_bridge);
407 	to->nf_bridge = from->nf_bridge;
408 	nf_bridge_get(to->nf_bridge);
409 #endif
410 #endif
411 	skb_copy_secmark(to, from);
412 }
413 
414 /*
415  *	This IP datagram is too large to be sent in one piece.  Break it up into
416  *	smaller pieces (each of size equal to IP header plus
417  *	a block of the data of the original IP data part) that will yet fit in a
418  *	single device frame, and queue such a frame for sending.
419  */
420 
421 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
422 {
423 	struct iphdr *iph;
424 	int raw = 0;
425 	int ptr;
426 	struct net_device *dev;
427 	struct sk_buff *skb2;
428 	unsigned int mtu, hlen, left, len, ll_rs;
429 	int offset;
430 	__be16 not_last_frag;
431 	struct rtable *rt = (struct rtable*)skb->dst;
432 	int err = 0;
433 
434 	dev = rt->u.dst.dev;
435 
436 	/*
437 	 *	Point into the IP datagram header.
438 	 */
439 
440 	iph = skb->nh.iph;
441 
442 	if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
443 		IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
444 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
445 			  htonl(dst_mtu(&rt->u.dst)));
446 		kfree_skb(skb);
447 		return -EMSGSIZE;
448 	}
449 
450 	/*
451 	 *	Setup starting values.
452 	 */
453 
454 	hlen = iph->ihl * 4;
455 	mtu = dst_mtu(&rt->u.dst) - hlen;	/* Size of data space */
456 	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
457 
458 	/* When frag_list is given, use it. First, check its validity:
459 	 * some transformers could create wrong frag_list or break existing
460 	 * one, it is not prohibited. In this case fall back to copying.
461 	 *
462 	 * LATER: this step can be merged to real generation of fragments,
463 	 * we can switch to copy when see the first bad fragment.
464 	 */
465 	if (skb_shinfo(skb)->frag_list) {
466 		struct sk_buff *frag;
467 		int first_len = skb_pagelen(skb);
468 
469 		if (first_len - hlen > mtu ||
470 		    ((first_len - hlen) & 7) ||
471 		    (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
472 		    skb_cloned(skb))
473 			goto slow_path;
474 
475 		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
476 			/* Correct geometry. */
477 			if (frag->len > mtu ||
478 			    ((frag->len & 7) && frag->next) ||
479 			    skb_headroom(frag) < hlen)
480 			    goto slow_path;
481 
482 			/* Partially cloned skb? */
483 			if (skb_shared(frag))
484 				goto slow_path;
485 
486 			BUG_ON(frag->sk);
487 			if (skb->sk) {
488 				sock_hold(skb->sk);
489 				frag->sk = skb->sk;
490 				frag->destructor = sock_wfree;
491 				skb->truesize -= frag->truesize;
492 			}
493 		}
494 
495 		/* Everything is OK. Generate! */
496 
497 		err = 0;
498 		offset = 0;
499 		frag = skb_shinfo(skb)->frag_list;
500 		skb_shinfo(skb)->frag_list = NULL;
501 		skb->data_len = first_len - skb_headlen(skb);
502 		skb->len = first_len;
503 		iph->tot_len = htons(first_len);
504 		iph->frag_off = htons(IP_MF);
505 		ip_send_check(iph);
506 
507 		for (;;) {
508 			/* Prepare header of the next frame,
509 			 * before previous one went down. */
510 			if (frag) {
511 				frag->ip_summed = CHECKSUM_NONE;
512 				frag->h.raw = frag->data;
513 				frag->nh.raw = __skb_push(frag, hlen);
514 				memcpy(frag->nh.raw, iph, hlen);
515 				iph = frag->nh.iph;
516 				iph->tot_len = htons(frag->len);
517 				ip_copy_metadata(frag, skb);
518 				if (offset == 0)
519 					ip_options_fragment(frag);
520 				offset += skb->len - hlen;
521 				iph->frag_off = htons(offset>>3);
522 				if (frag->next != NULL)
523 					iph->frag_off |= htons(IP_MF);
524 				/* Ready, complete checksum */
525 				ip_send_check(iph);
526 			}
527 
528 			err = output(skb);
529 
530 			if (!err)
531 				IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
532 			if (err || !frag)
533 				break;
534 
535 			skb = frag;
536 			frag = skb->next;
537 			skb->next = NULL;
538 		}
539 
540 		if (err == 0) {
541 			IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
542 			return 0;
543 		}
544 
545 		while (frag) {
546 			skb = frag->next;
547 			kfree_skb(frag);
548 			frag = skb;
549 		}
550 		IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
551 		return err;
552 	}
553 
554 slow_path:
555 	left = skb->len - hlen;		/* Space per frame */
556 	ptr = raw + hlen;		/* Where to start from */
557 
558 #ifdef CONFIG_BRIDGE_NETFILTER
559 	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
560 	 * we need to make room for the encapsulating header */
561 	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
562 	mtu -= nf_bridge_pad(skb);
563 #else
564 	ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
565 #endif
566 	/*
567 	 *	Fragment the datagram.
568 	 */
569 
570 	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
571 	not_last_frag = iph->frag_off & htons(IP_MF);
572 
573 	/*
574 	 *	Keep copying data until we run out.
575 	 */
576 
577 	while(left > 0)	{
578 		len = left;
579 		/* IF: it doesn't fit, use 'mtu' - the data space left */
580 		if (len > mtu)
581 			len = mtu;
582 		/* IF: we are not sending upto and including the packet end
583 		   then align the next start on an eight byte boundary */
584 		if (len < left)	{
585 			len &= ~7;
586 		}
587 		/*
588 		 *	Allocate buffer.
589 		 */
590 
591 		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
592 			NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
593 			err = -ENOMEM;
594 			goto fail;
595 		}
596 
597 		/*
598 		 *	Set up data on packet
599 		 */
600 
601 		ip_copy_metadata(skb2, skb);
602 		skb_reserve(skb2, ll_rs);
603 		skb_put(skb2, len + hlen);
604 		skb2->nh.raw = skb2->data;
605 		skb2->h.raw = skb2->data + hlen;
606 
607 		/*
608 		 *	Charge the memory for the fragment to any owner
609 		 *	it might possess
610 		 */
611 
612 		if (skb->sk)
613 			skb_set_owner_w(skb2, skb->sk);
614 
615 		/*
616 		 *	Copy the packet header into the new buffer.
617 		 */
618 
619 		memcpy(skb2->nh.raw, skb->data, hlen);
620 
621 		/*
622 		 *	Copy a block of the IP datagram.
623 		 */
624 		if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
625 			BUG();
626 		left -= len;
627 
628 		/*
629 		 *	Fill in the new header fields.
630 		 */
631 		iph = skb2->nh.iph;
632 		iph->frag_off = htons((offset >> 3));
633 
634 		/* ANK: dirty, but effective trick. Upgrade options only if
635 		 * the segment to be fragmented was THE FIRST (otherwise,
636 		 * options are already fixed) and make it ONCE
637 		 * on the initial skb, so that all the following fragments
638 		 * will inherit fixed options.
639 		 */
640 		if (offset == 0)
641 			ip_options_fragment(skb);
642 
643 		/*
644 		 *	Added AC : If we are fragmenting a fragment that's not the
645 		 *		   last fragment then keep MF on each bit
646 		 */
647 		if (left > 0 || not_last_frag)
648 			iph->frag_off |= htons(IP_MF);
649 		ptr += len;
650 		offset += len;
651 
652 		/*
653 		 *	Put this fragment into the sending queue.
654 		 */
655 		iph->tot_len = htons(len + hlen);
656 
657 		ip_send_check(iph);
658 
659 		err = output(skb2);
660 		if (err)
661 			goto fail;
662 
663 		IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
664 	}
665 	kfree_skb(skb);
666 	IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
667 	return err;
668 
669 fail:
670 	kfree_skb(skb);
671 	IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
672 	return err;
673 }
674 
675 EXPORT_SYMBOL(ip_fragment);
676 
677 int
678 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
679 {
680 	struct iovec *iov = from;
681 
682 	if (skb->ip_summed == CHECKSUM_HW) {
683 		if (memcpy_fromiovecend(to, iov, offset, len) < 0)
684 			return -EFAULT;
685 	} else {
686 		unsigned int csum = 0;
687 		if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
688 			return -EFAULT;
689 		skb->csum = csum_block_add(skb->csum, csum, odd);
690 	}
691 	return 0;
692 }
693 
694 static inline unsigned int
695 csum_page(struct page *page, int offset, int copy)
696 {
697 	char *kaddr;
698 	unsigned int csum;
699 	kaddr = kmap(page);
700 	csum = csum_partial(kaddr + offset, copy, 0);
701 	kunmap(page);
702 	return csum;
703 }
704 
705 static inline int ip_ufo_append_data(struct sock *sk,
706 			int getfrag(void *from, char *to, int offset, int len,
707 			       int odd, struct sk_buff *skb),
708 			void *from, int length, int hh_len, int fragheaderlen,
709 			int transhdrlen, int mtu,unsigned int flags)
710 {
711 	struct sk_buff *skb;
712 	int err;
713 
714 	/* There is support for UDP fragmentation offload by network
715 	 * device, so create one single skb packet containing complete
716 	 * udp datagram
717 	 */
718 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
719 		skb = sock_alloc_send_skb(sk,
720 			hh_len + fragheaderlen + transhdrlen + 20,
721 			(flags & MSG_DONTWAIT), &err);
722 
723 		if (skb == NULL)
724 			return err;
725 
726 		/* reserve space for Hardware header */
727 		skb_reserve(skb, hh_len);
728 
729 		/* create space for UDP/IP header */
730 		skb_put(skb,fragheaderlen + transhdrlen);
731 
732 		/* initialize network header pointer */
733 		skb->nh.raw = skb->data;
734 
735 		/* initialize protocol header pointer */
736 		skb->h.raw = skb->data + fragheaderlen;
737 
738 		skb->ip_summed = CHECKSUM_HW;
739 		skb->csum = 0;
740 		sk->sk_sndmsg_off = 0;
741 	}
742 
743 	err = skb_append_datato_frags(sk,skb, getfrag, from,
744 			       (length - transhdrlen));
745 	if (!err) {
746 		/* specify the length of each IP datagram fragment*/
747 		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
748 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
749 		__skb_queue_tail(&sk->sk_write_queue, skb);
750 
751 		return 0;
752 	}
753 	/* There is not enough support do UFO ,
754 	 * so follow normal path
755 	 */
756 	kfree_skb(skb);
757 	return err;
758 }
759 
760 /*
761  *	ip_append_data() and ip_append_page() can make one large IP datagram
762  *	from many pieces of data. Each pieces will be holded on the socket
763  *	until ip_push_pending_frames() is called. Each piece can be a page
764  *	or non-page data.
765  *
766  *	Not only UDP, other transport protocols - e.g. raw sockets - can use
767  *	this interface potentially.
768  *
769  *	LATER: length must be adjusted by pad at tail, when it is required.
770  */
771 int ip_append_data(struct sock *sk,
772 		   int getfrag(void *from, char *to, int offset, int len,
773 			       int odd, struct sk_buff *skb),
774 		   void *from, int length, int transhdrlen,
775 		   struct ipcm_cookie *ipc, struct rtable *rt,
776 		   unsigned int flags)
777 {
778 	struct inet_sock *inet = inet_sk(sk);
779 	struct sk_buff *skb;
780 
781 	struct ip_options *opt = NULL;
782 	int hh_len;
783 	int exthdrlen;
784 	int mtu;
785 	int copy;
786 	int err;
787 	int offset = 0;
788 	unsigned int maxfraglen, fragheaderlen;
789 	int csummode = CHECKSUM_NONE;
790 
791 	if (flags&MSG_PROBE)
792 		return 0;
793 
794 	if (skb_queue_empty(&sk->sk_write_queue)) {
795 		/*
796 		 * setup for corking.
797 		 */
798 		opt = ipc->opt;
799 		if (opt) {
800 			if (inet->cork.opt == NULL) {
801 				inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
802 				if (unlikely(inet->cork.opt == NULL))
803 					return -ENOBUFS;
804 			}
805 			memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
806 			inet->cork.flags |= IPCORK_OPT;
807 			inet->cork.addr = ipc->addr;
808 		}
809 		dst_hold(&rt->u.dst);
810 		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
811 		inet->cork.rt = rt;
812 		inet->cork.length = 0;
813 		sk->sk_sndmsg_page = NULL;
814 		sk->sk_sndmsg_off = 0;
815 		if ((exthdrlen = rt->u.dst.header_len) != 0) {
816 			length += exthdrlen;
817 			transhdrlen += exthdrlen;
818 		}
819 	} else {
820 		rt = inet->cork.rt;
821 		if (inet->cork.flags & IPCORK_OPT)
822 			opt = inet->cork.opt;
823 
824 		transhdrlen = 0;
825 		exthdrlen = 0;
826 		mtu = inet->cork.fragsize;
827 	}
828 	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
829 
830 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
831 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
832 
833 	if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
834 		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
835 		return -EMSGSIZE;
836 	}
837 
838 	/*
839 	 * transhdrlen > 0 means that this is the first fragment and we wish
840 	 * it won't be fragmented in the future.
841 	 */
842 	if (transhdrlen &&
843 	    length + fragheaderlen <= mtu &&
844 	    rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
845 	    !exthdrlen)
846 		csummode = CHECKSUM_HW;
847 
848 	inet->cork.length += length;
849 	if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
850 			(rt->u.dst.dev->features & NETIF_F_UFO)) {
851 
852 		err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
853 					 fragheaderlen, transhdrlen, mtu,
854 					 flags);
855 		if (err)
856 			goto error;
857 		return 0;
858 	}
859 
860 	/* So, what's going on in the loop below?
861 	 *
862 	 * We use calculated fragment length to generate chained skb,
863 	 * each of segments is IP fragment ready for sending to network after
864 	 * adding appropriate IP header.
865 	 */
866 
867 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
868 		goto alloc_new_skb;
869 
870 	while (length > 0) {
871 		/* Check if the remaining data fits into current packet. */
872 		copy = mtu - skb->len;
873 		if (copy < length)
874 			copy = maxfraglen - skb->len;
875 		if (copy <= 0) {
876 			char *data;
877 			unsigned int datalen;
878 			unsigned int fraglen;
879 			unsigned int fraggap;
880 			unsigned int alloclen;
881 			struct sk_buff *skb_prev;
882 alloc_new_skb:
883 			skb_prev = skb;
884 			if (skb_prev)
885 				fraggap = skb_prev->len - maxfraglen;
886 			else
887 				fraggap = 0;
888 
889 			/*
890 			 * If remaining data exceeds the mtu,
891 			 * we know we need more fragment(s).
892 			 */
893 			datalen = length + fraggap;
894 			if (datalen > mtu - fragheaderlen)
895 				datalen = maxfraglen - fragheaderlen;
896 			fraglen = datalen + fragheaderlen;
897 
898 			if ((flags & MSG_MORE) &&
899 			    !(rt->u.dst.dev->features&NETIF_F_SG))
900 				alloclen = mtu;
901 			else
902 				alloclen = datalen + fragheaderlen;
903 
904 			/* The last fragment gets additional space at tail.
905 			 * Note, with MSG_MORE we overallocate on fragments,
906 			 * because we have no idea what fragment will be
907 			 * the last.
908 			 */
909 			if (datalen == length + fraggap)
910 				alloclen += rt->u.dst.trailer_len;
911 
912 			if (transhdrlen) {
913 				skb = sock_alloc_send_skb(sk,
914 						alloclen + hh_len + 15,
915 						(flags & MSG_DONTWAIT), &err);
916 			} else {
917 				skb = NULL;
918 				if (atomic_read(&sk->sk_wmem_alloc) <=
919 				    2 * sk->sk_sndbuf)
920 					skb = sock_wmalloc(sk,
921 							   alloclen + hh_len + 15, 1,
922 							   sk->sk_allocation);
923 				if (unlikely(skb == NULL))
924 					err = -ENOBUFS;
925 			}
926 			if (skb == NULL)
927 				goto error;
928 
929 			/*
930 			 *	Fill in the control structures
931 			 */
932 			skb->ip_summed = csummode;
933 			skb->csum = 0;
934 			skb_reserve(skb, hh_len);
935 
936 			/*
937 			 *	Find where to start putting bytes.
938 			 */
939 			data = skb_put(skb, fraglen);
940 			skb->nh.raw = data + exthdrlen;
941 			data += fragheaderlen;
942 			skb->h.raw = data + exthdrlen;
943 
944 			if (fraggap) {
945 				skb->csum = skb_copy_and_csum_bits(
946 					skb_prev, maxfraglen,
947 					data + transhdrlen, fraggap, 0);
948 				skb_prev->csum = csum_sub(skb_prev->csum,
949 							  skb->csum);
950 				data += fraggap;
951 				pskb_trim_unique(skb_prev, maxfraglen);
952 			}
953 
954 			copy = datalen - transhdrlen - fraggap;
955 			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
956 				err = -EFAULT;
957 				kfree_skb(skb);
958 				goto error;
959 			}
960 
961 			offset += copy;
962 			length -= datalen - fraggap;
963 			transhdrlen = 0;
964 			exthdrlen = 0;
965 			csummode = CHECKSUM_NONE;
966 
967 			/*
968 			 * Put the packet on the pending queue.
969 			 */
970 			__skb_queue_tail(&sk->sk_write_queue, skb);
971 			continue;
972 		}
973 
974 		if (copy > length)
975 			copy = length;
976 
977 		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
978 			unsigned int off;
979 
980 			off = skb->len;
981 			if (getfrag(from, skb_put(skb, copy),
982 					offset, copy, off, skb) < 0) {
983 				__skb_trim(skb, off);
984 				err = -EFAULT;
985 				goto error;
986 			}
987 		} else {
988 			int i = skb_shinfo(skb)->nr_frags;
989 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
990 			struct page *page = sk->sk_sndmsg_page;
991 			int off = sk->sk_sndmsg_off;
992 			unsigned int left;
993 
994 			if (page && (left = PAGE_SIZE - off) > 0) {
995 				if (copy >= left)
996 					copy = left;
997 				if (page != frag->page) {
998 					if (i == MAX_SKB_FRAGS) {
999 						err = -EMSGSIZE;
1000 						goto error;
1001 					}
1002 					get_page(page);
1003 	 				skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1004 					frag = &skb_shinfo(skb)->frags[i];
1005 				}
1006 			} else if (i < MAX_SKB_FRAGS) {
1007 				if (copy > PAGE_SIZE)
1008 					copy = PAGE_SIZE;
1009 				page = alloc_pages(sk->sk_allocation, 0);
1010 				if (page == NULL)  {
1011 					err = -ENOMEM;
1012 					goto error;
1013 				}
1014 				sk->sk_sndmsg_page = page;
1015 				sk->sk_sndmsg_off = 0;
1016 
1017 				skb_fill_page_desc(skb, i, page, 0, 0);
1018 				frag = &skb_shinfo(skb)->frags[i];
1019 				skb->truesize += PAGE_SIZE;
1020 				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1021 			} else {
1022 				err = -EMSGSIZE;
1023 				goto error;
1024 			}
1025 			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1026 				err = -EFAULT;
1027 				goto error;
1028 			}
1029 			sk->sk_sndmsg_off += copy;
1030 			frag->size += copy;
1031 			skb->len += copy;
1032 			skb->data_len += copy;
1033 		}
1034 		offset += copy;
1035 		length -= copy;
1036 	}
1037 
1038 	return 0;
1039 
1040 error:
1041 	inet->cork.length -= length;
1042 	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1043 	return err;
1044 }
1045 
1046 ssize_t	ip_append_page(struct sock *sk, struct page *page,
1047 		       int offset, size_t size, int flags)
1048 {
1049 	struct inet_sock *inet = inet_sk(sk);
1050 	struct sk_buff *skb;
1051 	struct rtable *rt;
1052 	struct ip_options *opt = NULL;
1053 	int hh_len;
1054 	int mtu;
1055 	int len;
1056 	int err;
1057 	unsigned int maxfraglen, fragheaderlen, fraggap;
1058 
1059 	if (inet->hdrincl)
1060 		return -EPERM;
1061 
1062 	if (flags&MSG_PROBE)
1063 		return 0;
1064 
1065 	if (skb_queue_empty(&sk->sk_write_queue))
1066 		return -EINVAL;
1067 
1068 	rt = inet->cork.rt;
1069 	if (inet->cork.flags & IPCORK_OPT)
1070 		opt = inet->cork.opt;
1071 
1072 	if (!(rt->u.dst.dev->features&NETIF_F_SG))
1073 		return -EOPNOTSUPP;
1074 
1075 	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1076 	mtu = inet->cork.fragsize;
1077 
1078 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1079 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1080 
1081 	if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1082 		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1083 		return -EMSGSIZE;
1084 	}
1085 
1086 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1087 		return -EINVAL;
1088 
1089 	inet->cork.length += size;
1090 	if ((sk->sk_protocol == IPPROTO_UDP) &&
1091 	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
1092 		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1093 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1094 	}
1095 
1096 
1097 	while (size > 0) {
1098 		int i;
1099 
1100 		if (skb_is_gso(skb))
1101 			len = size;
1102 		else {
1103 
1104 			/* Check if the remaining data fits into current packet. */
1105 			len = mtu - skb->len;
1106 			if (len < size)
1107 				len = maxfraglen - skb->len;
1108 		}
1109 		if (len <= 0) {
1110 			struct sk_buff *skb_prev;
1111 			char *data;
1112 			struct iphdr *iph;
1113 			int alloclen;
1114 
1115 			skb_prev = skb;
1116 			fraggap = skb_prev->len - maxfraglen;
1117 
1118 			alloclen = fragheaderlen + hh_len + fraggap + 15;
1119 			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1120 			if (unlikely(!skb)) {
1121 				err = -ENOBUFS;
1122 				goto error;
1123 			}
1124 
1125 			/*
1126 			 *	Fill in the control structures
1127 			 */
1128 			skb->ip_summed = CHECKSUM_NONE;
1129 			skb->csum = 0;
1130 			skb_reserve(skb, hh_len);
1131 
1132 			/*
1133 			 *	Find where to start putting bytes.
1134 			 */
1135 			data = skb_put(skb, fragheaderlen + fraggap);
1136 			skb->nh.iph = iph = (struct iphdr *)data;
1137 			data += fragheaderlen;
1138 			skb->h.raw = data;
1139 
1140 			if (fraggap) {
1141 				skb->csum = skb_copy_and_csum_bits(
1142 					skb_prev, maxfraglen,
1143 					data, fraggap, 0);
1144 				skb_prev->csum = csum_sub(skb_prev->csum,
1145 							  skb->csum);
1146 				pskb_trim_unique(skb_prev, maxfraglen);
1147 			}
1148 
1149 			/*
1150 			 * Put the packet on the pending queue.
1151 			 */
1152 			__skb_queue_tail(&sk->sk_write_queue, skb);
1153 			continue;
1154 		}
1155 
1156 		i = skb_shinfo(skb)->nr_frags;
1157 		if (len > size)
1158 			len = size;
1159 		if (skb_can_coalesce(skb, i, page, offset)) {
1160 			skb_shinfo(skb)->frags[i-1].size += len;
1161 		} else if (i < MAX_SKB_FRAGS) {
1162 			get_page(page);
1163 			skb_fill_page_desc(skb, i, page, offset, len);
1164 		} else {
1165 			err = -EMSGSIZE;
1166 			goto error;
1167 		}
1168 
1169 		if (skb->ip_summed == CHECKSUM_NONE) {
1170 			unsigned int csum;
1171 			csum = csum_page(page, offset, len);
1172 			skb->csum = csum_block_add(skb->csum, csum, skb->len);
1173 		}
1174 
1175 		skb->len += len;
1176 		skb->data_len += len;
1177 		offset += len;
1178 		size -= len;
1179 	}
1180 	return 0;
1181 
1182 error:
1183 	inet->cork.length -= size;
1184 	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1185 	return err;
1186 }
1187 
1188 /*
1189  *	Combined all pending IP fragments on the socket as one IP datagram
1190  *	and push them out.
1191  */
1192 int ip_push_pending_frames(struct sock *sk)
1193 {
1194 	struct sk_buff *skb, *tmp_skb;
1195 	struct sk_buff **tail_skb;
1196 	struct inet_sock *inet = inet_sk(sk);
1197 	struct ip_options *opt = NULL;
1198 	struct rtable *rt = inet->cork.rt;
1199 	struct iphdr *iph;
1200 	__be16 df = 0;
1201 	__u8 ttl;
1202 	int err = 0;
1203 
1204 	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1205 		goto out;
1206 	tail_skb = &(skb_shinfo(skb)->frag_list);
1207 
1208 	/* move skb->data to ip header from ext header */
1209 	if (skb->data < skb->nh.raw)
1210 		__skb_pull(skb, skb->nh.raw - skb->data);
1211 	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1212 		__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1213 		*tail_skb = tmp_skb;
1214 		tail_skb = &(tmp_skb->next);
1215 		skb->len += tmp_skb->len;
1216 		skb->data_len += tmp_skb->len;
1217 		skb->truesize += tmp_skb->truesize;
1218 		__sock_put(tmp_skb->sk);
1219 		tmp_skb->destructor = NULL;
1220 		tmp_skb->sk = NULL;
1221 	}
1222 
1223 	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1224 	 * to fragment the frame generated here. No matter, what transforms
1225 	 * how transforms change size of the packet, it will come out.
1226 	 */
1227 	if (inet->pmtudisc != IP_PMTUDISC_DO)
1228 		skb->local_df = 1;
1229 
1230 	/* DF bit is set when we want to see DF on outgoing frames.
1231 	 * If local_df is set too, we still allow to fragment this frame
1232 	 * locally. */
1233 	if (inet->pmtudisc == IP_PMTUDISC_DO ||
1234 	    (skb->len <= dst_mtu(&rt->u.dst) &&
1235 	     ip_dont_fragment(sk, &rt->u.dst)))
1236 		df = htons(IP_DF);
1237 
1238 	if (inet->cork.flags & IPCORK_OPT)
1239 		opt = inet->cork.opt;
1240 
1241 	if (rt->rt_type == RTN_MULTICAST)
1242 		ttl = inet->mc_ttl;
1243 	else
1244 		ttl = ip_select_ttl(inet, &rt->u.dst);
1245 
1246 	iph = (struct iphdr *)skb->data;
1247 	iph->version = 4;
1248 	iph->ihl = 5;
1249 	if (opt) {
1250 		iph->ihl += opt->optlen>>2;
1251 		ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1252 	}
1253 	iph->tos = inet->tos;
1254 	iph->tot_len = htons(skb->len);
1255 	iph->frag_off = df;
1256 	ip_select_ident(iph, &rt->u.dst, sk);
1257 	iph->ttl = ttl;
1258 	iph->protocol = sk->sk_protocol;
1259 	iph->saddr = rt->rt_src;
1260 	iph->daddr = rt->rt_dst;
1261 	ip_send_check(iph);
1262 
1263 	skb->priority = sk->sk_priority;
1264 	skb->dst = dst_clone(&rt->u.dst);
1265 
1266 	/* Netfilter gets whole the not fragmented skb. */
1267 	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1268 		      skb->dst->dev, dst_output);
1269 	if (err) {
1270 		if (err > 0)
1271 			err = inet->recverr ? net_xmit_errno(err) : 0;
1272 		if (err)
1273 			goto error;
1274 	}
1275 
1276 out:
1277 	inet->cork.flags &= ~IPCORK_OPT;
1278 	kfree(inet->cork.opt);
1279 	inet->cork.opt = NULL;
1280 	if (inet->cork.rt) {
1281 		ip_rt_put(inet->cork.rt);
1282 		inet->cork.rt = NULL;
1283 	}
1284 	return err;
1285 
1286 error:
1287 	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1288 	goto out;
1289 }
1290 
1291 /*
1292  *	Throw away all pending data on the socket.
1293  */
1294 void ip_flush_pending_frames(struct sock *sk)
1295 {
1296 	struct inet_sock *inet = inet_sk(sk);
1297 	struct sk_buff *skb;
1298 
1299 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1300 		kfree_skb(skb);
1301 
1302 	inet->cork.flags &= ~IPCORK_OPT;
1303 	kfree(inet->cork.opt);
1304 	inet->cork.opt = NULL;
1305 	if (inet->cork.rt) {
1306 		ip_rt_put(inet->cork.rt);
1307 		inet->cork.rt = NULL;
1308 	}
1309 }
1310 
1311 
1312 /*
1313  *	Fetch data from kernel space and fill in checksum if needed.
1314  */
1315 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1316 			      int len, int odd, struct sk_buff *skb)
1317 {
1318 	unsigned int csum;
1319 
1320 	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1321 	skb->csum = csum_block_add(skb->csum, csum, odd);
1322 	return 0;
1323 }
1324 
1325 /*
1326  *	Generic function to send a packet as reply to another packet.
1327  *	Used to send TCP resets so far. ICMP should use this function too.
1328  *
1329  *	Should run single threaded per socket because it uses the sock
1330  *     	structure to pass arguments.
1331  *
1332  *	LATER: switch from ip_build_xmit to ip_append_*
1333  */
1334 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1335 		   unsigned int len)
1336 {
1337 	struct inet_sock *inet = inet_sk(sk);
1338 	struct {
1339 		struct ip_options	opt;
1340 		char			data[40];
1341 	} replyopts;
1342 	struct ipcm_cookie ipc;
1343 	u32 daddr;
1344 	struct rtable *rt = (struct rtable*)skb->dst;
1345 
1346 	if (ip_options_echo(&replyopts.opt, skb))
1347 		return;
1348 
1349 	daddr = ipc.addr = rt->rt_src;
1350 	ipc.opt = NULL;
1351 
1352 	if (replyopts.opt.optlen) {
1353 		ipc.opt = &replyopts.opt;
1354 
1355 		if (ipc.opt->srr)
1356 			daddr = replyopts.opt.faddr;
1357 	}
1358 
1359 	{
1360 		struct flowi fl = { .nl_u = { .ip4_u =
1361 					      { .daddr = daddr,
1362 						.saddr = rt->rt_spec_dst,
1363 						.tos = RT_TOS(skb->nh.iph->tos) } },
1364 				    /* Not quite clean, but right. */
1365 				    .uli_u = { .ports =
1366 					       { .sport = skb->h.th->dest,
1367 					         .dport = skb->h.th->source } },
1368 				    .proto = sk->sk_protocol };
1369 		if (ip_route_output_key(&rt, &fl))
1370 			return;
1371 	}
1372 
1373 	/* And let IP do all the hard work.
1374 
1375 	   This chunk is not reenterable, hence spinlock.
1376 	   Note that it uses the fact, that this function is called
1377 	   with locally disabled BH and that sk cannot be already spinlocked.
1378 	 */
1379 	bh_lock_sock(sk);
1380 	inet->tos = skb->nh.iph->tos;
1381 	sk->sk_priority = skb->priority;
1382 	sk->sk_protocol = skb->nh.iph->protocol;
1383 	ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1384 		       &ipc, rt, MSG_DONTWAIT);
1385 	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1386 		if (arg->csumoffset >= 0)
1387 			*((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1388 		skb->ip_summed = CHECKSUM_NONE;
1389 		ip_push_pending_frames(sk);
1390 	}
1391 
1392 	bh_unlock_sock(sk);
1393 
1394 	ip_rt_put(rt);
1395 }
1396 
1397 void __init ip_init(void)
1398 {
1399 	ip_rt_init();
1400 	inet_initpeers();
1401 
1402 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1403 	igmp_mc_proc_init();
1404 #endif
1405 }
1406 
1407 EXPORT_SYMBOL(ip_generic_getfrag);
1408 EXPORT_SYMBOL(ip_queue_xmit);
1409 EXPORT_SYMBOL(ip_send_check);
1410