xref: /linux/net/ipv6/ip6_output.c (revision 2d87650a3bf1b80f7d0d150ee1af3f8a89e5b7aa)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	: 	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44 
45 #include <net/sock.h>
46 #include <net/snmp.h>
47 
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 
59 static int ip6_finish_output2(struct sk_buff *skb)
60 {
61 	struct dst_entry *dst = skb_dst(skb);
62 	struct net_device *dev = dst->dev;
63 	struct neighbour *neigh;
64 	struct in6_addr *nexthop;
65 	int ret;
66 
67 	skb->protocol = htons(ETH_P_IPV6);
68 	skb->dev = dev;
69 
70 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72 
73 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74 		    ((mroute6_socket(dev_net(dev), skb) &&
75 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 					 &ipv6_hdr(skb)->saddr))) {
78 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79 
80 			/* Do not check for IFF_ALLMULTI; multicast routing
81 			   is not supported in any case.
82 			 */
83 			if (newskb)
84 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 					newskb, NULL, newskb->dev,
86 					dev_loopback_xmit);
87 
88 			if (ipv6_hdr(skb)->hop_limit == 0) {
89 				IP6_INC_STATS(dev_net(dev), idev,
90 					      IPSTATS_MIB_OUTDISCARDS);
91 				kfree_skb(skb);
92 				return 0;
93 			}
94 		}
95 
96 		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
97 				skb->len);
98 
99 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 		    IPV6_ADDR_SCOPE_NODELOCAL &&
101 		    !(dev->flags & IFF_LOOPBACK)) {
102 			kfree_skb(skb);
103 			return 0;
104 		}
105 	}
106 
107 	rcu_read_lock_bh();
108 	nexthop = rt6_nexthop((struct rt6_info *)dst);
109 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 	if (unlikely(!neigh))
111 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 	if (!IS_ERR(neigh)) {
113 		ret = dst_neigh_output(dst, neigh, skb);
114 		rcu_read_unlock_bh();
115 		return ret;
116 	}
117 	rcu_read_unlock_bh();
118 
119 	IP6_INC_STATS(dev_net(dst->dev),
120 		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 	kfree_skb(skb);
122 	return -EINVAL;
123 }
124 
125 static int ip6_finish_output(struct sk_buff *skb)
126 {
127 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 	    dst_allfrag(skb_dst(skb)) ||
129 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 		return ip6_fragment(skb, ip6_finish_output2);
131 	else
132 		return ip6_finish_output2(skb);
133 }
134 
135 int ip6_output(struct sk_buff *skb)
136 {
137 	struct net_device *dev = skb_dst(skb)->dev;
138 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 	if (unlikely(idev->cnf.disable_ipv6)) {
140 		IP6_INC_STATS(dev_net(dev), idev,
141 			      IPSTATS_MIB_OUTDISCARDS);
142 		kfree_skb(skb);
143 		return 0;
144 	}
145 
146 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
147 			    ip6_finish_output,
148 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150 
151 /*
152  *	xmit an sk_buff (used by TCP, SCTP and DCCP)
153  */
154 
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156 	     struct ipv6_txoptions *opt, int tclass)
157 {
158 	struct net *net = sock_net(sk);
159 	struct ipv6_pinfo *np = inet6_sk(sk);
160 	struct in6_addr *first_hop = &fl6->daddr;
161 	struct dst_entry *dst = skb_dst(skb);
162 	struct ipv6hdr *hdr;
163 	u8  proto = fl6->flowi6_proto;
164 	int seg_len = skb->len;
165 	int hlimit = -1;
166 	u32 mtu;
167 
168 	if (opt) {
169 		unsigned int head_room;
170 
171 		/* First: exthdrs may take lots of space (~8K for now)
172 		   MAX_HEADER is not enough.
173 		 */
174 		head_room = opt->opt_nflen + opt->opt_flen;
175 		seg_len += head_room;
176 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
177 
178 		if (skb_headroom(skb) < head_room) {
179 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
180 			if (skb2 == NULL) {
181 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182 					      IPSTATS_MIB_OUTDISCARDS);
183 				kfree_skb(skb);
184 				return -ENOBUFS;
185 			}
186 			consume_skb(skb);
187 			skb = skb2;
188 			skb_set_owner_w(skb, sk);
189 		}
190 		if (opt->opt_flen)
191 			ipv6_push_frag_opts(skb, opt, &proto);
192 		if (opt->opt_nflen)
193 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
194 	}
195 
196 	skb_push(skb, sizeof(struct ipv6hdr));
197 	skb_reset_network_header(skb);
198 	hdr = ipv6_hdr(skb);
199 
200 	/*
201 	 *	Fill in the IPv6 header
202 	 */
203 	if (np)
204 		hlimit = np->hop_limit;
205 	if (hlimit < 0)
206 		hlimit = ip6_dst_hoplimit(dst);
207 
208 	ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
209 
210 	hdr->payload_len = htons(seg_len);
211 	hdr->nexthdr = proto;
212 	hdr->hop_limit = hlimit;
213 
214 	hdr->saddr = fl6->saddr;
215 	hdr->daddr = *first_hop;
216 
217 	skb->protocol = htons(ETH_P_IPV6);
218 	skb->priority = sk->sk_priority;
219 	skb->mark = sk->sk_mark;
220 
221 	mtu = dst_mtu(dst);
222 	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
223 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
224 			      IPSTATS_MIB_OUT, skb->len);
225 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
226 			       dst->dev, dst_output);
227 	}
228 
229 	skb->dev = dst->dev;
230 	ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
231 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
232 	kfree_skb(skb);
233 	return -EMSGSIZE;
234 }
235 
236 EXPORT_SYMBOL(ip6_xmit);
237 
238 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
239 {
240 	struct ip6_ra_chain *ra;
241 	struct sock *last = NULL;
242 
243 	read_lock(&ip6_ra_lock);
244 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
245 		struct sock *sk = ra->sk;
246 		if (sk && ra->sel == sel &&
247 		    (!sk->sk_bound_dev_if ||
248 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
249 			if (last) {
250 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
251 				if (skb2)
252 					rawv6_rcv(last, skb2);
253 			}
254 			last = sk;
255 		}
256 	}
257 
258 	if (last) {
259 		rawv6_rcv(last, skb);
260 		read_unlock(&ip6_ra_lock);
261 		return 1;
262 	}
263 	read_unlock(&ip6_ra_lock);
264 	return 0;
265 }
266 
267 static int ip6_forward_proxy_check(struct sk_buff *skb)
268 {
269 	struct ipv6hdr *hdr = ipv6_hdr(skb);
270 	u8 nexthdr = hdr->nexthdr;
271 	__be16 frag_off;
272 	int offset;
273 
274 	if (ipv6_ext_hdr(nexthdr)) {
275 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
276 		if (offset < 0)
277 			return 0;
278 	} else
279 		offset = sizeof(struct ipv6hdr);
280 
281 	if (nexthdr == IPPROTO_ICMPV6) {
282 		struct icmp6hdr *icmp6;
283 
284 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
285 					 offset + 1 - skb->data)))
286 			return 0;
287 
288 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
289 
290 		switch (icmp6->icmp6_type) {
291 		case NDISC_ROUTER_SOLICITATION:
292 		case NDISC_ROUTER_ADVERTISEMENT:
293 		case NDISC_NEIGHBOUR_SOLICITATION:
294 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
295 		case NDISC_REDIRECT:
296 			/* For reaction involving unicast neighbor discovery
297 			 * message destined to the proxied address, pass it to
298 			 * input function.
299 			 */
300 			return 1;
301 		default:
302 			break;
303 		}
304 	}
305 
306 	/*
307 	 * The proxying router can't forward traffic sent to a link-local
308 	 * address, so signal the sender and discard the packet. This
309 	 * behavior is clarified by the MIPv6 specification.
310 	 */
311 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
312 		dst_link_failure(skb);
313 		return -1;
314 	}
315 
316 	return 0;
317 }
318 
319 static inline int ip6_forward_finish(struct sk_buff *skb)
320 {
321 	return dst_output(skb);
322 }
323 
324 int ip6_forward(struct sk_buff *skb)
325 {
326 	struct dst_entry *dst = skb_dst(skb);
327 	struct ipv6hdr *hdr = ipv6_hdr(skb);
328 	struct inet6_skb_parm *opt = IP6CB(skb);
329 	struct net *net = dev_net(dst->dev);
330 	u32 mtu;
331 
332 	if (net->ipv6.devconf_all->forwarding == 0)
333 		goto error;
334 
335 	if (skb_warn_if_lro(skb))
336 		goto drop;
337 
338 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
339 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
340 				 IPSTATS_MIB_INDISCARDS);
341 		goto drop;
342 	}
343 
344 	if (skb->pkt_type != PACKET_HOST)
345 		goto drop;
346 
347 	skb_forward_csum(skb);
348 
349 	/*
350 	 *	We DO NOT make any processing on
351 	 *	RA packets, pushing them to user level AS IS
352 	 *	without ane WARRANTY that application will be able
353 	 *	to interpret them. The reason is that we
354 	 *	cannot make anything clever here.
355 	 *
356 	 *	We are not end-node, so that if packet contains
357 	 *	AH/ESP, we cannot make anything.
358 	 *	Defragmentation also would be mistake, RA packets
359 	 *	cannot be fragmented, because there is no warranty
360 	 *	that different fragments will go along one path. --ANK
361 	 */
362 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
363 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
364 			return 0;
365 	}
366 
367 	/*
368 	 *	check and decrement ttl
369 	 */
370 	if (hdr->hop_limit <= 1) {
371 		/* Force OUTPUT device used as source address */
372 		skb->dev = dst->dev;
373 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
374 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
375 				 IPSTATS_MIB_INHDRERRORS);
376 
377 		kfree_skb(skb);
378 		return -ETIMEDOUT;
379 	}
380 
381 	/* XXX: idev->cnf.proxy_ndp? */
382 	if (net->ipv6.devconf_all->proxy_ndp &&
383 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
384 		int proxied = ip6_forward_proxy_check(skb);
385 		if (proxied > 0)
386 			return ip6_input(skb);
387 		else if (proxied < 0) {
388 			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
389 					 IPSTATS_MIB_INDISCARDS);
390 			goto drop;
391 		}
392 	}
393 
394 	if (!xfrm6_route_forward(skb)) {
395 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
396 				 IPSTATS_MIB_INDISCARDS);
397 		goto drop;
398 	}
399 	dst = skb_dst(skb);
400 
401 	/* IPv6 specs say nothing about it, but it is clear that we cannot
402 	   send redirects to source routed frames.
403 	   We don't send redirects to frames decapsulated from IPsec.
404 	 */
405 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
406 		struct in6_addr *target = NULL;
407 		struct inet_peer *peer;
408 		struct rt6_info *rt;
409 
410 		/*
411 		 *	incoming and outgoing devices are the same
412 		 *	send a redirect.
413 		 */
414 
415 		rt = (struct rt6_info *) dst;
416 		if (rt->rt6i_flags & RTF_GATEWAY)
417 			target = &rt->rt6i_gateway;
418 		else
419 			target = &hdr->daddr;
420 
421 		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
422 
423 		/* Limit redirects both by destination (here)
424 		   and by source (inside ndisc_send_redirect)
425 		 */
426 		if (inet_peer_xrlim_allow(peer, 1*HZ))
427 			ndisc_send_redirect(skb, target);
428 		if (peer)
429 			inet_putpeer(peer);
430 	} else {
431 		int addrtype = ipv6_addr_type(&hdr->saddr);
432 
433 		/* This check is security critical. */
434 		if (addrtype == IPV6_ADDR_ANY ||
435 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
436 			goto error;
437 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
438 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
439 				    ICMPV6_NOT_NEIGHBOUR, 0);
440 			goto error;
441 		}
442 	}
443 
444 	mtu = dst_mtu(dst);
445 	if (mtu < IPV6_MIN_MTU)
446 		mtu = IPV6_MIN_MTU;
447 
448 	if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
449 	    (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
450 		/* Again, force OUTPUT device used as source address */
451 		skb->dev = dst->dev;
452 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
453 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
454 				 IPSTATS_MIB_INTOOBIGERRORS);
455 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
456 				 IPSTATS_MIB_FRAGFAILS);
457 		kfree_skb(skb);
458 		return -EMSGSIZE;
459 	}
460 
461 	if (skb_cow(skb, dst->dev->hard_header_len)) {
462 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
463 				 IPSTATS_MIB_OUTDISCARDS);
464 		goto drop;
465 	}
466 
467 	hdr = ipv6_hdr(skb);
468 
469 	/* Mangling hops number delayed to point after skb COW */
470 
471 	hdr->hop_limit--;
472 
473 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
474 	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
475 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
476 		       ip6_forward_finish);
477 
478 error:
479 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
480 drop:
481 	kfree_skb(skb);
482 	return -EINVAL;
483 }
484 
485 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
486 {
487 	to->pkt_type = from->pkt_type;
488 	to->priority = from->priority;
489 	to->protocol = from->protocol;
490 	skb_dst_drop(to);
491 	skb_dst_set(to, dst_clone(skb_dst(from)));
492 	to->dev = from->dev;
493 	to->mark = from->mark;
494 
495 #ifdef CONFIG_NET_SCHED
496 	to->tc_index = from->tc_index;
497 #endif
498 	nf_copy(to, from);
499 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
500 	to->nf_trace = from->nf_trace;
501 #endif
502 	skb_copy_secmark(to, from);
503 }
504 
505 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
506 {
507 	struct sk_buff *frag;
508 	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
509 	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
510 	struct ipv6hdr *tmp_hdr;
511 	struct frag_hdr *fh;
512 	unsigned int mtu, hlen, left, len;
513 	int hroom, troom;
514 	__be32 frag_id = 0;
515 	int ptr, offset = 0, err=0;
516 	u8 *prevhdr, nexthdr = 0;
517 	struct net *net = dev_net(skb_dst(skb)->dev);
518 
519 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
520 	nexthdr = *prevhdr;
521 
522 	mtu = ip6_skb_dst_mtu(skb);
523 
524 	/* We must not fragment if the socket is set to force MTU discovery
525 	 * or if the skb it not generated by a local socket.
526 	 */
527 	if (unlikely(!skb->local_df && skb->len > mtu) ||
528 		     (IP6CB(skb)->frag_max_size &&
529 		      IP6CB(skb)->frag_max_size > mtu)) {
530 		if (skb->sk && dst_allfrag(skb_dst(skb)))
531 			sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
532 
533 		skb->dev = skb_dst(skb)->dev;
534 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
535 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
536 			      IPSTATS_MIB_FRAGFAILS);
537 		kfree_skb(skb);
538 		return -EMSGSIZE;
539 	}
540 
541 	if (np && np->frag_size < mtu) {
542 		if (np->frag_size)
543 			mtu = np->frag_size;
544 	}
545 	mtu -= hlen + sizeof(struct frag_hdr);
546 
547 	if (skb_has_frag_list(skb)) {
548 		int first_len = skb_pagelen(skb);
549 		struct sk_buff *frag2;
550 
551 		if (first_len - hlen > mtu ||
552 		    ((first_len - hlen) & 7) ||
553 		    skb_cloned(skb))
554 			goto slow_path;
555 
556 		skb_walk_frags(skb, frag) {
557 			/* Correct geometry. */
558 			if (frag->len > mtu ||
559 			    ((frag->len & 7) && frag->next) ||
560 			    skb_headroom(frag) < hlen)
561 				goto slow_path_clean;
562 
563 			/* Partially cloned skb? */
564 			if (skb_shared(frag))
565 				goto slow_path_clean;
566 
567 			BUG_ON(frag->sk);
568 			if (skb->sk) {
569 				frag->sk = skb->sk;
570 				frag->destructor = sock_wfree;
571 			}
572 			skb->truesize -= frag->truesize;
573 		}
574 
575 		err = 0;
576 		offset = 0;
577 		frag = skb_shinfo(skb)->frag_list;
578 		skb_frag_list_init(skb);
579 		/* BUILD HEADER */
580 
581 		*prevhdr = NEXTHDR_FRAGMENT;
582 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
583 		if (!tmp_hdr) {
584 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
585 				      IPSTATS_MIB_FRAGFAILS);
586 			return -ENOMEM;
587 		}
588 
589 		__skb_pull(skb, hlen);
590 		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
591 		__skb_push(skb, hlen);
592 		skb_reset_network_header(skb);
593 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
594 
595 		ipv6_select_ident(fh, rt);
596 		fh->nexthdr = nexthdr;
597 		fh->reserved = 0;
598 		fh->frag_off = htons(IP6_MF);
599 		frag_id = fh->identification;
600 
601 		first_len = skb_pagelen(skb);
602 		skb->data_len = first_len - skb_headlen(skb);
603 		skb->len = first_len;
604 		ipv6_hdr(skb)->payload_len = htons(first_len -
605 						   sizeof(struct ipv6hdr));
606 
607 		dst_hold(&rt->dst);
608 
609 		for (;;) {
610 			/* Prepare header of the next frame,
611 			 * before previous one went down. */
612 			if (frag) {
613 				frag->ip_summed = CHECKSUM_NONE;
614 				skb_reset_transport_header(frag);
615 				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
616 				__skb_push(frag, hlen);
617 				skb_reset_network_header(frag);
618 				memcpy(skb_network_header(frag), tmp_hdr,
619 				       hlen);
620 				offset += skb->len - hlen - sizeof(struct frag_hdr);
621 				fh->nexthdr = nexthdr;
622 				fh->reserved = 0;
623 				fh->frag_off = htons(offset);
624 				if (frag->next != NULL)
625 					fh->frag_off |= htons(IP6_MF);
626 				fh->identification = frag_id;
627 				ipv6_hdr(frag)->payload_len =
628 						htons(frag->len -
629 						      sizeof(struct ipv6hdr));
630 				ip6_copy_metadata(frag, skb);
631 			}
632 
633 			err = output(skb);
634 			if(!err)
635 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
636 					      IPSTATS_MIB_FRAGCREATES);
637 
638 			if (err || !frag)
639 				break;
640 
641 			skb = frag;
642 			frag = skb->next;
643 			skb->next = NULL;
644 		}
645 
646 		kfree(tmp_hdr);
647 
648 		if (err == 0) {
649 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
650 				      IPSTATS_MIB_FRAGOKS);
651 			ip6_rt_put(rt);
652 			return 0;
653 		}
654 
655 		while (frag) {
656 			skb = frag->next;
657 			kfree_skb(frag);
658 			frag = skb;
659 		}
660 
661 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
662 			      IPSTATS_MIB_FRAGFAILS);
663 		ip6_rt_put(rt);
664 		return err;
665 
666 slow_path_clean:
667 		skb_walk_frags(skb, frag2) {
668 			if (frag2 == frag)
669 				break;
670 			frag2->sk = NULL;
671 			frag2->destructor = NULL;
672 			skb->truesize += frag2->truesize;
673 		}
674 	}
675 
676 slow_path:
677 	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
678 	    skb_checksum_help(skb))
679 		goto fail;
680 
681 	left = skb->len - hlen;		/* Space per frame */
682 	ptr = hlen;			/* Where to start from */
683 
684 	/*
685 	 *	Fragment the datagram.
686 	 */
687 
688 	*prevhdr = NEXTHDR_FRAGMENT;
689 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
690 	troom = rt->dst.dev->needed_tailroom;
691 
692 	/*
693 	 *	Keep copying data until we run out.
694 	 */
695 	while(left > 0)	{
696 		len = left;
697 		/* IF: it doesn't fit, use 'mtu' - the data space left */
698 		if (len > mtu)
699 			len = mtu;
700 		/* IF: we are not sending up to and including the packet end
701 		   then align the next start on an eight byte boundary */
702 		if (len < left)	{
703 			len &= ~7;
704 		}
705 		/*
706 		 *	Allocate buffer.
707 		 */
708 
709 		if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
710 				      hroom + troom, GFP_ATOMIC)) == NULL) {
711 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
712 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
713 				      IPSTATS_MIB_FRAGFAILS);
714 			err = -ENOMEM;
715 			goto fail;
716 		}
717 
718 		/*
719 		 *	Set up data on packet
720 		 */
721 
722 		ip6_copy_metadata(frag, skb);
723 		skb_reserve(frag, hroom);
724 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
725 		skb_reset_network_header(frag);
726 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
727 		frag->transport_header = (frag->network_header + hlen +
728 					  sizeof(struct frag_hdr));
729 
730 		/*
731 		 *	Charge the memory for the fragment to any owner
732 		 *	it might possess
733 		 */
734 		if (skb->sk)
735 			skb_set_owner_w(frag, skb->sk);
736 
737 		/*
738 		 *	Copy the packet header into the new buffer.
739 		 */
740 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
741 
742 		/*
743 		 *	Build fragment header.
744 		 */
745 		fh->nexthdr = nexthdr;
746 		fh->reserved = 0;
747 		if (!frag_id) {
748 			ipv6_select_ident(fh, rt);
749 			frag_id = fh->identification;
750 		} else
751 			fh->identification = frag_id;
752 
753 		/*
754 		 *	Copy a block of the IP datagram.
755 		 */
756 		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
757 			BUG();
758 		left -= len;
759 
760 		fh->frag_off = htons(offset);
761 		if (left > 0)
762 			fh->frag_off |= htons(IP6_MF);
763 		ipv6_hdr(frag)->payload_len = htons(frag->len -
764 						    sizeof(struct ipv6hdr));
765 
766 		ptr += len;
767 		offset += len;
768 
769 		/*
770 		 *	Put this fragment into the sending queue.
771 		 */
772 		err = output(frag);
773 		if (err)
774 			goto fail;
775 
776 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
777 			      IPSTATS_MIB_FRAGCREATES);
778 	}
779 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
780 		      IPSTATS_MIB_FRAGOKS);
781 	consume_skb(skb);
782 	return err;
783 
784 fail:
785 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
786 		      IPSTATS_MIB_FRAGFAILS);
787 	kfree_skb(skb);
788 	return err;
789 }
790 
791 static inline int ip6_rt_check(const struct rt6key *rt_key,
792 			       const struct in6_addr *fl_addr,
793 			       const struct in6_addr *addr_cache)
794 {
795 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
796 		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
797 }
798 
799 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
800 					  struct dst_entry *dst,
801 					  const struct flowi6 *fl6)
802 {
803 	struct ipv6_pinfo *np = inet6_sk(sk);
804 	struct rt6_info *rt;
805 
806 	if (!dst)
807 		goto out;
808 
809 	if (dst->ops->family != AF_INET6) {
810 		dst_release(dst);
811 		return NULL;
812 	}
813 
814 	rt = (struct rt6_info *)dst;
815 	/* Yes, checking route validity in not connected
816 	 * case is not very simple. Take into account,
817 	 * that we do not support routing by source, TOS,
818 	 * and MSG_DONTROUTE 		--ANK (980726)
819 	 *
820 	 * 1. ip6_rt_check(): If route was host route,
821 	 *    check that cached destination is current.
822 	 *    If it is network route, we still may
823 	 *    check its validity using saved pointer
824 	 *    to the last used address: daddr_cache.
825 	 *    We do not want to save whole address now,
826 	 *    (because main consumer of this service
827 	 *    is tcp, which has not this problem),
828 	 *    so that the last trick works only on connected
829 	 *    sockets.
830 	 * 2. oif also should be the same.
831 	 */
832 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
833 #ifdef CONFIG_IPV6_SUBTREES
834 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
835 #endif
836 	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
837 		dst_release(dst);
838 		dst = NULL;
839 	}
840 
841 out:
842 	return dst;
843 }
844 
845 static int ip6_dst_lookup_tail(struct sock *sk,
846 			       struct dst_entry **dst, struct flowi6 *fl6)
847 {
848 	struct net *net = sock_net(sk);
849 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
850 	struct neighbour *n;
851 	struct rt6_info *rt;
852 #endif
853 	int err;
854 
855 	if (*dst == NULL)
856 		*dst = ip6_route_output(net, sk, fl6);
857 
858 	if ((err = (*dst)->error))
859 		goto out_err_release;
860 
861 	if (ipv6_addr_any(&fl6->saddr)) {
862 		struct rt6_info *rt = (struct rt6_info *) *dst;
863 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
864 					  sk ? inet6_sk(sk)->srcprefs : 0,
865 					  &fl6->saddr);
866 		if (err)
867 			goto out_err_release;
868 	}
869 
870 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
871 	/*
872 	 * Here if the dst entry we've looked up
873 	 * has a neighbour entry that is in the INCOMPLETE
874 	 * state and the src address from the flow is
875 	 * marked as OPTIMISTIC, we release the found
876 	 * dst entry and replace it instead with the
877 	 * dst entry of the nexthop router
878 	 */
879 	rt = (struct rt6_info *) *dst;
880 	rcu_read_lock_bh();
881 	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
882 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
883 	rcu_read_unlock_bh();
884 
885 	if (err) {
886 		struct inet6_ifaddr *ifp;
887 		struct flowi6 fl_gw6;
888 		int redirect;
889 
890 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
891 				      (*dst)->dev, 1);
892 
893 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
894 		if (ifp)
895 			in6_ifa_put(ifp);
896 
897 		if (redirect) {
898 			/*
899 			 * We need to get the dst entry for the
900 			 * default router instead
901 			 */
902 			dst_release(*dst);
903 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
904 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
905 			*dst = ip6_route_output(net, sk, &fl_gw6);
906 			if ((err = (*dst)->error))
907 				goto out_err_release;
908 		}
909 	}
910 #endif
911 
912 	return 0;
913 
914 out_err_release:
915 	if (err == -ENETUNREACH)
916 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
917 	dst_release(*dst);
918 	*dst = NULL;
919 	return err;
920 }
921 
922 /**
923  *	ip6_dst_lookup - perform route lookup on flow
924  *	@sk: socket which provides route info
925  *	@dst: pointer to dst_entry * for result
926  *	@fl6: flow to lookup
927  *
928  *	This function performs a route lookup on the given flow.
929  *
930  *	It returns zero on success, or a standard errno code on error.
931  */
932 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
933 {
934 	*dst = NULL;
935 	return ip6_dst_lookup_tail(sk, dst, fl6);
936 }
937 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
938 
939 /**
940  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
941  *	@sk: socket which provides route info
942  *	@fl6: flow to lookup
943  *	@final_dst: final destination address for ipsec lookup
944  *
945  *	This function performs a route lookup on the given flow.
946  *
947  *	It returns a valid dst pointer on success, or a pointer encoded
948  *	error code.
949  */
950 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
951 				      const struct in6_addr *final_dst)
952 {
953 	struct dst_entry *dst = NULL;
954 	int err;
955 
956 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
957 	if (err)
958 		return ERR_PTR(err);
959 	if (final_dst)
960 		fl6->daddr = *final_dst;
961 
962 	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
963 }
964 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
965 
966 /**
967  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
968  *	@sk: socket which provides the dst cache and route info
969  *	@fl6: flow to lookup
970  *	@final_dst: final destination address for ipsec lookup
971  *
972  *	This function performs a route lookup on the given flow with the
973  *	possibility of using the cached route in the socket if it is valid.
974  *	It will take the socket dst lock when operating on the dst cache.
975  *	As a result, this function can only be used in process context.
976  *
977  *	It returns a valid dst pointer on success, or a pointer encoded
978  *	error code.
979  */
980 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
981 					 const struct in6_addr *final_dst)
982 {
983 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
984 	int err;
985 
986 	dst = ip6_sk_dst_check(sk, dst, fl6);
987 
988 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
989 	if (err)
990 		return ERR_PTR(err);
991 	if (final_dst)
992 		fl6->daddr = *final_dst;
993 
994 	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
995 }
996 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
997 
998 static inline int ip6_ufo_append_data(struct sock *sk,
999 			int getfrag(void *from, char *to, int offset, int len,
1000 			int odd, struct sk_buff *skb),
1001 			void *from, int length, int hh_len, int fragheaderlen,
1002 			int transhdrlen, int mtu,unsigned int flags,
1003 			struct rt6_info *rt)
1004 
1005 {
1006 	struct sk_buff *skb;
1007 	struct frag_hdr fhdr;
1008 	int err;
1009 
1010 	/* There is support for UDP large send offload by network
1011 	 * device, so create one single skb packet containing complete
1012 	 * udp datagram
1013 	 */
1014 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1015 		skb = sock_alloc_send_skb(sk,
1016 			hh_len + fragheaderlen + transhdrlen + 20,
1017 			(flags & MSG_DONTWAIT), &err);
1018 		if (skb == NULL)
1019 			return err;
1020 
1021 		/* reserve space for Hardware header */
1022 		skb_reserve(skb, hh_len);
1023 
1024 		/* create space for UDP/IP header */
1025 		skb_put(skb,fragheaderlen + transhdrlen);
1026 
1027 		/* initialize network header pointer */
1028 		skb_reset_network_header(skb);
1029 
1030 		/* initialize protocol header pointer */
1031 		skb->transport_header = skb->network_header + fragheaderlen;
1032 
1033 		skb->protocol = htons(ETH_P_IPV6);
1034 		skb->csum = 0;
1035 
1036 		__skb_queue_tail(&sk->sk_write_queue, skb);
1037 	} else if (skb_is_gso(skb)) {
1038 		goto append;
1039 	}
1040 
1041 	skb->ip_summed = CHECKSUM_PARTIAL;
1042 	/* Specify the length of each IPv6 datagram fragment.
1043 	 * It has to be a multiple of 8.
1044 	 */
1045 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1046 				     sizeof(struct frag_hdr)) & ~7;
1047 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1048 	ipv6_select_ident(&fhdr, rt);
1049 	skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1050 
1051 append:
1052 	return skb_append_datato_frags(sk, skb, getfrag, from,
1053 				       (length - transhdrlen));
1054 }
1055 
1056 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1057 					       gfp_t gfp)
1058 {
1059 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1060 }
1061 
1062 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1063 						gfp_t gfp)
1064 {
1065 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1066 }
1067 
1068 static void ip6_append_data_mtu(unsigned int *mtu,
1069 				int *maxfraglen,
1070 				unsigned int fragheaderlen,
1071 				struct sk_buff *skb,
1072 				struct rt6_info *rt,
1073 				bool pmtuprobe)
1074 {
1075 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1076 		if (skb == NULL) {
1077 			/* first fragment, reserve header_len */
1078 			*mtu = *mtu - rt->dst.header_len;
1079 
1080 		} else {
1081 			/*
1082 			 * this fragment is not first, the headers
1083 			 * space is regarded as data space.
1084 			 */
1085 			*mtu = min(*mtu, pmtuprobe ?
1086 				   rt->dst.dev->mtu :
1087 				   dst_mtu(rt->dst.path));
1088 		}
1089 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1090 			      + fragheaderlen - sizeof(struct frag_hdr);
1091 	}
1092 }
1093 
1094 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1095 	int offset, int len, int odd, struct sk_buff *skb),
1096 	void *from, int length, int transhdrlen,
1097 	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1098 	struct rt6_info *rt, unsigned int flags, int dontfrag)
1099 {
1100 	struct inet_sock *inet = inet_sk(sk);
1101 	struct ipv6_pinfo *np = inet6_sk(sk);
1102 	struct inet_cork *cork;
1103 	struct sk_buff *skb, *skb_prev = NULL;
1104 	unsigned int maxfraglen, fragheaderlen, mtu;
1105 	int exthdrlen;
1106 	int dst_exthdrlen;
1107 	int hh_len;
1108 	int copy;
1109 	int err;
1110 	int offset = 0;
1111 	__u8 tx_flags = 0;
1112 
1113 	if (flags&MSG_PROBE)
1114 		return 0;
1115 	cork = &inet->cork.base;
1116 	if (skb_queue_empty(&sk->sk_write_queue)) {
1117 		/*
1118 		 * setup for corking
1119 		 */
1120 		if (opt) {
1121 			if (WARN_ON(np->cork.opt))
1122 				return -EINVAL;
1123 
1124 			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1125 			if (unlikely(np->cork.opt == NULL))
1126 				return -ENOBUFS;
1127 
1128 			np->cork.opt->tot_len = opt->tot_len;
1129 			np->cork.opt->opt_flen = opt->opt_flen;
1130 			np->cork.opt->opt_nflen = opt->opt_nflen;
1131 
1132 			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1133 							    sk->sk_allocation);
1134 			if (opt->dst0opt && !np->cork.opt->dst0opt)
1135 				return -ENOBUFS;
1136 
1137 			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1138 							    sk->sk_allocation);
1139 			if (opt->dst1opt && !np->cork.opt->dst1opt)
1140 				return -ENOBUFS;
1141 
1142 			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1143 							   sk->sk_allocation);
1144 			if (opt->hopopt && !np->cork.opt->hopopt)
1145 				return -ENOBUFS;
1146 
1147 			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1148 							    sk->sk_allocation);
1149 			if (opt->srcrt && !np->cork.opt->srcrt)
1150 				return -ENOBUFS;
1151 
1152 			/* need source address above miyazawa*/
1153 		}
1154 		dst_hold(&rt->dst);
1155 		cork->dst = &rt->dst;
1156 		inet->cork.fl.u.ip6 = *fl6;
1157 		np->cork.hop_limit = hlimit;
1158 		np->cork.tclass = tclass;
1159 		if (rt->dst.flags & DST_XFRM_TUNNEL)
1160 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1161 			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1162 		else
1163 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1164 			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1165 		if (np->frag_size < mtu) {
1166 			if (np->frag_size)
1167 				mtu = np->frag_size;
1168 		}
1169 		cork->fragsize = mtu;
1170 		if (dst_allfrag(rt->dst.path))
1171 			cork->flags |= IPCORK_ALLFRAG;
1172 		cork->length = 0;
1173 		exthdrlen = (opt ? opt->opt_flen : 0);
1174 		length += exthdrlen;
1175 		transhdrlen += exthdrlen;
1176 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1177 	} else {
1178 		rt = (struct rt6_info *)cork->dst;
1179 		fl6 = &inet->cork.fl.u.ip6;
1180 		opt = np->cork.opt;
1181 		transhdrlen = 0;
1182 		exthdrlen = 0;
1183 		dst_exthdrlen = 0;
1184 		mtu = cork->fragsize;
1185 	}
1186 
1187 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1188 
1189 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1190 			(opt ? opt->opt_nflen : 0);
1191 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1192 
1193 	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1194 		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1195 			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1196 			return -EMSGSIZE;
1197 		}
1198 	}
1199 
1200 	/* For UDP, check if TX timestamp is enabled */
1201 	if (sk->sk_type == SOCK_DGRAM)
1202 		sock_tx_timestamp(sk, &tx_flags);
1203 
1204 	/*
1205 	 * Let's try using as much space as possible.
1206 	 * Use MTU if total length of the message fits into the MTU.
1207 	 * Otherwise, we need to reserve fragment header and
1208 	 * fragment alignment (= 8-15 octects, in total).
1209 	 *
1210 	 * Note that we may need to "move" the data from the tail of
1211 	 * of the buffer to the new fragment when we split
1212 	 * the message.
1213 	 *
1214 	 * FIXME: It may be fragmented into multiple chunks
1215 	 *        at once if non-fragmentable extension headers
1216 	 *        are too large.
1217 	 * --yoshfuji
1218 	 */
1219 
1220 	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1221 					   sk->sk_protocol == IPPROTO_RAW)) {
1222 		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1223 		return -EMSGSIZE;
1224 	}
1225 
1226 	skb = skb_peek_tail(&sk->sk_write_queue);
1227 	cork->length += length;
1228 	if (((length > mtu) ||
1229 	     (skb && skb_is_gso(skb))) &&
1230 	    (sk->sk_protocol == IPPROTO_UDP) &&
1231 	    (rt->dst.dev->features & NETIF_F_UFO)) {
1232 		err = ip6_ufo_append_data(sk, getfrag, from, length,
1233 					  hh_len, fragheaderlen,
1234 					  transhdrlen, mtu, flags, rt);
1235 		if (err)
1236 			goto error;
1237 		return 0;
1238 	}
1239 
1240 	if (!skb)
1241 		goto alloc_new_skb;
1242 
1243 	while (length > 0) {
1244 		/* Check if the remaining data fits into current packet. */
1245 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1246 		if (copy < length)
1247 			copy = maxfraglen - skb->len;
1248 
1249 		if (copy <= 0) {
1250 			char *data;
1251 			unsigned int datalen;
1252 			unsigned int fraglen;
1253 			unsigned int fraggap;
1254 			unsigned int alloclen;
1255 alloc_new_skb:
1256 			/* There's no room in the current skb */
1257 			if (skb)
1258 				fraggap = skb->len - maxfraglen;
1259 			else
1260 				fraggap = 0;
1261 			/* update mtu and maxfraglen if necessary */
1262 			if (skb == NULL || skb_prev == NULL)
1263 				ip6_append_data_mtu(&mtu, &maxfraglen,
1264 						    fragheaderlen, skb, rt,
1265 						    np->pmtudisc >=
1266 						    IPV6_PMTUDISC_PROBE);
1267 
1268 			skb_prev = skb;
1269 
1270 			/*
1271 			 * If remaining data exceeds the mtu,
1272 			 * we know we need more fragment(s).
1273 			 */
1274 			datalen = length + fraggap;
1275 
1276 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1277 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1278 			if ((flags & MSG_MORE) &&
1279 			    !(rt->dst.dev->features&NETIF_F_SG))
1280 				alloclen = mtu;
1281 			else
1282 				alloclen = datalen + fragheaderlen;
1283 
1284 			alloclen += dst_exthdrlen;
1285 
1286 			if (datalen != length + fraggap) {
1287 				/*
1288 				 * this is not the last fragment, the trailer
1289 				 * space is regarded as data space.
1290 				 */
1291 				datalen += rt->dst.trailer_len;
1292 			}
1293 
1294 			alloclen += rt->dst.trailer_len;
1295 			fraglen = datalen + fragheaderlen;
1296 
1297 			/*
1298 			 * We just reserve space for fragment header.
1299 			 * Note: this may be overallocation if the message
1300 			 * (without MSG_MORE) fits into the MTU.
1301 			 */
1302 			alloclen += sizeof(struct frag_hdr);
1303 
1304 			if (transhdrlen) {
1305 				skb = sock_alloc_send_skb(sk,
1306 						alloclen + hh_len,
1307 						(flags & MSG_DONTWAIT), &err);
1308 			} else {
1309 				skb = NULL;
1310 				if (atomic_read(&sk->sk_wmem_alloc) <=
1311 				    2 * sk->sk_sndbuf)
1312 					skb = sock_wmalloc(sk,
1313 							   alloclen + hh_len, 1,
1314 							   sk->sk_allocation);
1315 				if (unlikely(skb == NULL))
1316 					err = -ENOBUFS;
1317 				else {
1318 					/* Only the initial fragment
1319 					 * is time stamped.
1320 					 */
1321 					tx_flags = 0;
1322 				}
1323 			}
1324 			if (skb == NULL)
1325 				goto error;
1326 			/*
1327 			 *	Fill in the control structures
1328 			 */
1329 			skb->protocol = htons(ETH_P_IPV6);
1330 			skb->ip_summed = CHECKSUM_NONE;
1331 			skb->csum = 0;
1332 			/* reserve for fragmentation and ipsec header */
1333 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1334 				    dst_exthdrlen);
1335 
1336 			if (sk->sk_type == SOCK_DGRAM)
1337 				skb_shinfo(skb)->tx_flags = tx_flags;
1338 
1339 			/*
1340 			 *	Find where to start putting bytes
1341 			 */
1342 			data = skb_put(skb, fraglen);
1343 			skb_set_network_header(skb, exthdrlen);
1344 			data += fragheaderlen;
1345 			skb->transport_header = (skb->network_header +
1346 						 fragheaderlen);
1347 			if (fraggap) {
1348 				skb->csum = skb_copy_and_csum_bits(
1349 					skb_prev, maxfraglen,
1350 					data + transhdrlen, fraggap, 0);
1351 				skb_prev->csum = csum_sub(skb_prev->csum,
1352 							  skb->csum);
1353 				data += fraggap;
1354 				pskb_trim_unique(skb_prev, maxfraglen);
1355 			}
1356 			copy = datalen - transhdrlen - fraggap;
1357 
1358 			if (copy < 0) {
1359 				err = -EINVAL;
1360 				kfree_skb(skb);
1361 				goto error;
1362 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1363 				err = -EFAULT;
1364 				kfree_skb(skb);
1365 				goto error;
1366 			}
1367 
1368 			offset += copy;
1369 			length -= datalen - fraggap;
1370 			transhdrlen = 0;
1371 			exthdrlen = 0;
1372 			dst_exthdrlen = 0;
1373 
1374 			/*
1375 			 * Put the packet on the pending queue
1376 			 */
1377 			__skb_queue_tail(&sk->sk_write_queue, skb);
1378 			continue;
1379 		}
1380 
1381 		if (copy > length)
1382 			copy = length;
1383 
1384 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1385 			unsigned int off;
1386 
1387 			off = skb->len;
1388 			if (getfrag(from, skb_put(skb, copy),
1389 						offset, copy, off, skb) < 0) {
1390 				__skb_trim(skb, off);
1391 				err = -EFAULT;
1392 				goto error;
1393 			}
1394 		} else {
1395 			int i = skb_shinfo(skb)->nr_frags;
1396 			struct page_frag *pfrag = sk_page_frag(sk);
1397 
1398 			err = -ENOMEM;
1399 			if (!sk_page_frag_refill(sk, pfrag))
1400 				goto error;
1401 
1402 			if (!skb_can_coalesce(skb, i, pfrag->page,
1403 					      pfrag->offset)) {
1404 				err = -EMSGSIZE;
1405 				if (i == MAX_SKB_FRAGS)
1406 					goto error;
1407 
1408 				__skb_fill_page_desc(skb, i, pfrag->page,
1409 						     pfrag->offset, 0);
1410 				skb_shinfo(skb)->nr_frags = ++i;
1411 				get_page(pfrag->page);
1412 			}
1413 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1414 			if (getfrag(from,
1415 				    page_address(pfrag->page) + pfrag->offset,
1416 				    offset, copy, skb->len, skb) < 0)
1417 				goto error_efault;
1418 
1419 			pfrag->offset += copy;
1420 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1421 			skb->len += copy;
1422 			skb->data_len += copy;
1423 			skb->truesize += copy;
1424 			atomic_add(copy, &sk->sk_wmem_alloc);
1425 		}
1426 		offset += copy;
1427 		length -= copy;
1428 	}
1429 
1430 	return 0;
1431 
1432 error_efault:
1433 	err = -EFAULT;
1434 error:
1435 	cork->length -= length;
1436 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1437 	return err;
1438 }
1439 EXPORT_SYMBOL_GPL(ip6_append_data);
1440 
1441 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1442 {
1443 	if (np->cork.opt) {
1444 		kfree(np->cork.opt->dst0opt);
1445 		kfree(np->cork.opt->dst1opt);
1446 		kfree(np->cork.opt->hopopt);
1447 		kfree(np->cork.opt->srcrt);
1448 		kfree(np->cork.opt);
1449 		np->cork.opt = NULL;
1450 	}
1451 
1452 	if (inet->cork.base.dst) {
1453 		dst_release(inet->cork.base.dst);
1454 		inet->cork.base.dst = NULL;
1455 		inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1456 	}
1457 	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1458 }
1459 
1460 int ip6_push_pending_frames(struct sock *sk)
1461 {
1462 	struct sk_buff *skb, *tmp_skb;
1463 	struct sk_buff **tail_skb;
1464 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1465 	struct inet_sock *inet = inet_sk(sk);
1466 	struct ipv6_pinfo *np = inet6_sk(sk);
1467 	struct net *net = sock_net(sk);
1468 	struct ipv6hdr *hdr;
1469 	struct ipv6_txoptions *opt = np->cork.opt;
1470 	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1471 	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1472 	unsigned char proto = fl6->flowi6_proto;
1473 	int err = 0;
1474 
1475 	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1476 		goto out;
1477 	tail_skb = &(skb_shinfo(skb)->frag_list);
1478 
1479 	/* move skb->data to ip header from ext header */
1480 	if (skb->data < skb_network_header(skb))
1481 		__skb_pull(skb, skb_network_offset(skb));
1482 	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1483 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1484 		*tail_skb = tmp_skb;
1485 		tail_skb = &(tmp_skb->next);
1486 		skb->len += tmp_skb->len;
1487 		skb->data_len += tmp_skb->len;
1488 		skb->truesize += tmp_skb->truesize;
1489 		tmp_skb->destructor = NULL;
1490 		tmp_skb->sk = NULL;
1491 	}
1492 
1493 	/* Allow local fragmentation. */
1494 	if (np->pmtudisc < IPV6_PMTUDISC_DO)
1495 		skb->local_df = 1;
1496 
1497 	*final_dst = fl6->daddr;
1498 	__skb_pull(skb, skb_network_header_len(skb));
1499 	if (opt && opt->opt_flen)
1500 		ipv6_push_frag_opts(skb, opt, &proto);
1501 	if (opt && opt->opt_nflen)
1502 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1503 
1504 	skb_push(skb, sizeof(struct ipv6hdr));
1505 	skb_reset_network_header(skb);
1506 	hdr = ipv6_hdr(skb);
1507 
1508 	ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1509 	hdr->hop_limit = np->cork.hop_limit;
1510 	hdr->nexthdr = proto;
1511 	hdr->saddr = fl6->saddr;
1512 	hdr->daddr = *final_dst;
1513 
1514 	skb->priority = sk->sk_priority;
1515 	skb->mark = sk->sk_mark;
1516 
1517 	skb_dst_set(skb, dst_clone(&rt->dst));
1518 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1519 	if (proto == IPPROTO_ICMPV6) {
1520 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1521 
1522 		ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1523 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1524 	}
1525 
1526 	err = ip6_local_out(skb);
1527 	if (err) {
1528 		if (err > 0)
1529 			err = net_xmit_errno(err);
1530 		if (err)
1531 			goto error;
1532 	}
1533 
1534 out:
1535 	ip6_cork_release(inet, np);
1536 	return err;
1537 error:
1538 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1539 	goto out;
1540 }
1541 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1542 
1543 void ip6_flush_pending_frames(struct sock *sk)
1544 {
1545 	struct sk_buff *skb;
1546 
1547 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1548 		if (skb_dst(skb))
1549 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1550 				      IPSTATS_MIB_OUTDISCARDS);
1551 		kfree_skb(skb);
1552 	}
1553 
1554 	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1555 }
1556 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1557