xref: /linux/net/ipv6/ip6_output.c (revision 80483c3abf8423e6ec4fb63647a8e2e1f5976801)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	:	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44 
45 #include <net/sock.h>
46 #include <net/snmp.h>
47 
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 #include <net/l3mdev.h>
59 
60 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
61 {
62 	struct dst_entry *dst = skb_dst(skb);
63 	struct net_device *dev = dst->dev;
64 	struct neighbour *neigh;
65 	struct in6_addr *nexthop;
66 	int ret;
67 
68 	skb->protocol = htons(ETH_P_IPV6);
69 	skb->dev = dev;
70 
71 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
72 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
73 
74 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
75 		    ((mroute6_socket(net, skb) &&
76 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
77 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
78 					 &ipv6_hdr(skb)->saddr))) {
79 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
80 
81 			/* Do not check for IFF_ALLMULTI; multicast routing
82 			   is not supported in any case.
83 			 */
84 			if (newskb)
85 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
86 					net, sk, newskb, NULL, newskb->dev,
87 					dev_loopback_xmit);
88 
89 			if (ipv6_hdr(skb)->hop_limit == 0) {
90 				IP6_INC_STATS(net, idev,
91 					      IPSTATS_MIB_OUTDISCARDS);
92 				kfree_skb(skb);
93 				return 0;
94 			}
95 		}
96 
97 		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
98 
99 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 		    IPV6_ADDR_SCOPE_NODELOCAL &&
101 		    !(dev->flags & IFF_LOOPBACK)) {
102 			kfree_skb(skb);
103 			return 0;
104 		}
105 	}
106 
107 	rcu_read_lock_bh();
108 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
109 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 	if (unlikely(!neigh))
111 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 	if (!IS_ERR(neigh)) {
113 		ret = dst_neigh_output(dst, neigh, skb);
114 		rcu_read_unlock_bh();
115 		return ret;
116 	}
117 	rcu_read_unlock_bh();
118 
119 	IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
120 	kfree_skb(skb);
121 	return -EINVAL;
122 }
123 
124 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
125 {
126 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
127 	    dst_allfrag(skb_dst(skb)) ||
128 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
129 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
130 	else
131 		return ip6_finish_output2(net, sk, skb);
132 }
133 
134 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
135 {
136 	struct net_device *dev = skb_dst(skb)->dev;
137 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
138 
139 	if (unlikely(idev->cnf.disable_ipv6)) {
140 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
141 		kfree_skb(skb);
142 		return 0;
143 	}
144 
145 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
146 			    net, sk, skb, NULL, dev,
147 			    ip6_finish_output,
148 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150 
151 /*
152  * xmit an sk_buff (used by TCP, SCTP and DCCP)
153  * Note : socket lock is not held for SYNACK packets, but might be modified
154  * by calls to skb_set_owner_w() and ipv6_local_error(),
155  * which are using proper atomic operations or spinlocks.
156  */
157 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
158 	     struct ipv6_txoptions *opt, int tclass)
159 {
160 	struct net *net = sock_net(sk);
161 	const struct ipv6_pinfo *np = inet6_sk(sk);
162 	struct in6_addr *first_hop = &fl6->daddr;
163 	struct dst_entry *dst = skb_dst(skb);
164 	struct ipv6hdr *hdr;
165 	u8  proto = fl6->flowi6_proto;
166 	int seg_len = skb->len;
167 	int hlimit = -1;
168 	u32 mtu;
169 
170 	if (opt) {
171 		unsigned int head_room;
172 
173 		/* First: exthdrs may take lots of space (~8K for now)
174 		   MAX_HEADER is not enough.
175 		 */
176 		head_room = opt->opt_nflen + opt->opt_flen;
177 		seg_len += head_room;
178 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
179 
180 		if (skb_headroom(skb) < head_room) {
181 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
182 			if (!skb2) {
183 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
184 					      IPSTATS_MIB_OUTDISCARDS);
185 				kfree_skb(skb);
186 				return -ENOBUFS;
187 			}
188 			consume_skb(skb);
189 			skb = skb2;
190 			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
191 			 * it is safe to call in our context (socket lock not held)
192 			 */
193 			skb_set_owner_w(skb, (struct sock *)sk);
194 		}
195 		if (opt->opt_flen)
196 			ipv6_push_frag_opts(skb, opt, &proto);
197 		if (opt->opt_nflen)
198 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
199 	}
200 
201 	skb_push(skb, sizeof(struct ipv6hdr));
202 	skb_reset_network_header(skb);
203 	hdr = ipv6_hdr(skb);
204 
205 	/*
206 	 *	Fill in the IPv6 header
207 	 */
208 	if (np)
209 		hlimit = np->hop_limit;
210 	if (hlimit < 0)
211 		hlimit = ip6_dst_hoplimit(dst);
212 
213 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
214 						     np->autoflowlabel, fl6));
215 
216 	hdr->payload_len = htons(seg_len);
217 	hdr->nexthdr = proto;
218 	hdr->hop_limit = hlimit;
219 
220 	hdr->saddr = fl6->saddr;
221 	hdr->daddr = *first_hop;
222 
223 	skb->protocol = htons(ETH_P_IPV6);
224 	skb->priority = sk->sk_priority;
225 	skb->mark = sk->sk_mark;
226 
227 	mtu = dst_mtu(dst);
228 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
229 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
230 			      IPSTATS_MIB_OUT, skb->len);
231 		/* hooks should never assume socket lock is held.
232 		 * we promote our socket to non const
233 		 */
234 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
235 			       net, (struct sock *)sk, skb, NULL, dst->dev,
236 			       dst_output);
237 	}
238 
239 	skb->dev = dst->dev;
240 	/* ipv6_local_error() does not require socket lock,
241 	 * we promote our socket to non const
242 	 */
243 	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
244 
245 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
246 	kfree_skb(skb);
247 	return -EMSGSIZE;
248 }
249 EXPORT_SYMBOL(ip6_xmit);
250 
251 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
252 {
253 	struct ip6_ra_chain *ra;
254 	struct sock *last = NULL;
255 
256 	read_lock(&ip6_ra_lock);
257 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
258 		struct sock *sk = ra->sk;
259 		if (sk && ra->sel == sel &&
260 		    (!sk->sk_bound_dev_if ||
261 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
262 			if (last) {
263 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
264 				if (skb2)
265 					rawv6_rcv(last, skb2);
266 			}
267 			last = sk;
268 		}
269 	}
270 
271 	if (last) {
272 		rawv6_rcv(last, skb);
273 		read_unlock(&ip6_ra_lock);
274 		return 1;
275 	}
276 	read_unlock(&ip6_ra_lock);
277 	return 0;
278 }
279 
280 static int ip6_forward_proxy_check(struct sk_buff *skb)
281 {
282 	struct ipv6hdr *hdr = ipv6_hdr(skb);
283 	u8 nexthdr = hdr->nexthdr;
284 	__be16 frag_off;
285 	int offset;
286 
287 	if (ipv6_ext_hdr(nexthdr)) {
288 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
289 		if (offset < 0)
290 			return 0;
291 	} else
292 		offset = sizeof(struct ipv6hdr);
293 
294 	if (nexthdr == IPPROTO_ICMPV6) {
295 		struct icmp6hdr *icmp6;
296 
297 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
298 					 offset + 1 - skb->data)))
299 			return 0;
300 
301 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
302 
303 		switch (icmp6->icmp6_type) {
304 		case NDISC_ROUTER_SOLICITATION:
305 		case NDISC_ROUTER_ADVERTISEMENT:
306 		case NDISC_NEIGHBOUR_SOLICITATION:
307 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
308 		case NDISC_REDIRECT:
309 			/* For reaction involving unicast neighbor discovery
310 			 * message destined to the proxied address, pass it to
311 			 * input function.
312 			 */
313 			return 1;
314 		default:
315 			break;
316 		}
317 	}
318 
319 	/*
320 	 * The proxying router can't forward traffic sent to a link-local
321 	 * address, so signal the sender and discard the packet. This
322 	 * behavior is clarified by the MIPv6 specification.
323 	 */
324 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
325 		dst_link_failure(skb);
326 		return -1;
327 	}
328 
329 	return 0;
330 }
331 
332 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
333 				     struct sk_buff *skb)
334 {
335 	return dst_output(net, sk, skb);
336 }
337 
338 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
339 {
340 	unsigned int mtu;
341 	struct inet6_dev *idev;
342 
343 	if (dst_metric_locked(dst, RTAX_MTU)) {
344 		mtu = dst_metric_raw(dst, RTAX_MTU);
345 		if (mtu)
346 			return mtu;
347 	}
348 
349 	mtu = IPV6_MIN_MTU;
350 	rcu_read_lock();
351 	idev = __in6_dev_get(dst->dev);
352 	if (idev)
353 		mtu = idev->cnf.mtu6;
354 	rcu_read_unlock();
355 
356 	return mtu;
357 }
358 
359 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
360 {
361 	if (skb->len <= mtu)
362 		return false;
363 
364 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
365 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
366 		return true;
367 
368 	if (skb->ignore_df)
369 		return false;
370 
371 	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
372 		return false;
373 
374 	return true;
375 }
376 
377 int ip6_forward(struct sk_buff *skb)
378 {
379 	struct dst_entry *dst = skb_dst(skb);
380 	struct ipv6hdr *hdr = ipv6_hdr(skb);
381 	struct inet6_skb_parm *opt = IP6CB(skb);
382 	struct net *net = dev_net(dst->dev);
383 	u32 mtu;
384 
385 	if (net->ipv6.devconf_all->forwarding == 0)
386 		goto error;
387 
388 	if (skb->pkt_type != PACKET_HOST)
389 		goto drop;
390 
391 	if (unlikely(skb->sk))
392 		goto drop;
393 
394 	if (skb_warn_if_lro(skb))
395 		goto drop;
396 
397 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
398 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
399 				IPSTATS_MIB_INDISCARDS);
400 		goto drop;
401 	}
402 
403 	skb_forward_csum(skb);
404 
405 	/*
406 	 *	We DO NOT make any processing on
407 	 *	RA packets, pushing them to user level AS IS
408 	 *	without ane WARRANTY that application will be able
409 	 *	to interpret them. The reason is that we
410 	 *	cannot make anything clever here.
411 	 *
412 	 *	We are not end-node, so that if packet contains
413 	 *	AH/ESP, we cannot make anything.
414 	 *	Defragmentation also would be mistake, RA packets
415 	 *	cannot be fragmented, because there is no warranty
416 	 *	that different fragments will go along one path. --ANK
417 	 */
418 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
419 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
420 			return 0;
421 	}
422 
423 	/*
424 	 *	check and decrement ttl
425 	 */
426 	if (hdr->hop_limit <= 1) {
427 		/* Force OUTPUT device used as source address */
428 		skb->dev = dst->dev;
429 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
430 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
431 				IPSTATS_MIB_INHDRERRORS);
432 
433 		kfree_skb(skb);
434 		return -ETIMEDOUT;
435 	}
436 
437 	/* XXX: idev->cnf.proxy_ndp? */
438 	if (net->ipv6.devconf_all->proxy_ndp &&
439 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
440 		int proxied = ip6_forward_proxy_check(skb);
441 		if (proxied > 0)
442 			return ip6_input(skb);
443 		else if (proxied < 0) {
444 			__IP6_INC_STATS(net, ip6_dst_idev(dst),
445 					IPSTATS_MIB_INDISCARDS);
446 			goto drop;
447 		}
448 	}
449 
450 	if (!xfrm6_route_forward(skb)) {
451 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
452 				IPSTATS_MIB_INDISCARDS);
453 		goto drop;
454 	}
455 	dst = skb_dst(skb);
456 
457 	/* IPv6 specs say nothing about it, but it is clear that we cannot
458 	   send redirects to source routed frames.
459 	   We don't send redirects to frames decapsulated from IPsec.
460 	 */
461 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
462 		struct in6_addr *target = NULL;
463 		struct inet_peer *peer;
464 		struct rt6_info *rt;
465 
466 		/*
467 		 *	incoming and outgoing devices are the same
468 		 *	send a redirect.
469 		 */
470 
471 		rt = (struct rt6_info *) dst;
472 		if (rt->rt6i_flags & RTF_GATEWAY)
473 			target = &rt->rt6i_gateway;
474 		else
475 			target = &hdr->daddr;
476 
477 		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
478 
479 		/* Limit redirects both by destination (here)
480 		   and by source (inside ndisc_send_redirect)
481 		 */
482 		if (inet_peer_xrlim_allow(peer, 1*HZ))
483 			ndisc_send_redirect(skb, target);
484 		if (peer)
485 			inet_putpeer(peer);
486 	} else {
487 		int addrtype = ipv6_addr_type(&hdr->saddr);
488 
489 		/* This check is security critical. */
490 		if (addrtype == IPV6_ADDR_ANY ||
491 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
492 			goto error;
493 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
494 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
495 				    ICMPV6_NOT_NEIGHBOUR, 0);
496 			goto error;
497 		}
498 	}
499 
500 	mtu = ip6_dst_mtu_forward(dst);
501 	if (mtu < IPV6_MIN_MTU)
502 		mtu = IPV6_MIN_MTU;
503 
504 	if (ip6_pkt_too_big(skb, mtu)) {
505 		/* Again, force OUTPUT device used as source address */
506 		skb->dev = dst->dev;
507 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
508 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
509 				IPSTATS_MIB_INTOOBIGERRORS);
510 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
511 				IPSTATS_MIB_FRAGFAILS);
512 		kfree_skb(skb);
513 		return -EMSGSIZE;
514 	}
515 
516 	if (skb_cow(skb, dst->dev->hard_header_len)) {
517 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
518 				IPSTATS_MIB_OUTDISCARDS);
519 		goto drop;
520 	}
521 
522 	hdr = ipv6_hdr(skb);
523 
524 	/* Mangling hops number delayed to point after skb COW */
525 
526 	hdr->hop_limit--;
527 
528 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
529 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
530 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
531 		       net, NULL, skb, skb->dev, dst->dev,
532 		       ip6_forward_finish);
533 
534 error:
535 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
536 drop:
537 	kfree_skb(skb);
538 	return -EINVAL;
539 }
540 
541 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
542 {
543 	to->pkt_type = from->pkt_type;
544 	to->priority = from->priority;
545 	to->protocol = from->protocol;
546 	skb_dst_drop(to);
547 	skb_dst_set(to, dst_clone(skb_dst(from)));
548 	to->dev = from->dev;
549 	to->mark = from->mark;
550 
551 #ifdef CONFIG_NET_SCHED
552 	to->tc_index = from->tc_index;
553 #endif
554 	nf_copy(to, from);
555 	skb_copy_secmark(to, from);
556 }
557 
558 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
559 		 int (*output)(struct net *, struct sock *, struct sk_buff *))
560 {
561 	struct sk_buff *frag;
562 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
563 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
564 				inet6_sk(skb->sk) : NULL;
565 	struct ipv6hdr *tmp_hdr;
566 	struct frag_hdr *fh;
567 	unsigned int mtu, hlen, left, len;
568 	int hroom, troom;
569 	__be32 frag_id;
570 	int ptr, offset = 0, err = 0;
571 	u8 *prevhdr, nexthdr = 0;
572 
573 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
574 	nexthdr = *prevhdr;
575 
576 	mtu = ip6_skb_dst_mtu(skb);
577 
578 	/* We must not fragment if the socket is set to force MTU discovery
579 	 * or if the skb it not generated by a local socket.
580 	 */
581 	if (unlikely(!skb->ignore_df && skb->len > mtu))
582 		goto fail_toobig;
583 
584 	if (IP6CB(skb)->frag_max_size) {
585 		if (IP6CB(skb)->frag_max_size > mtu)
586 			goto fail_toobig;
587 
588 		/* don't send fragments larger than what we received */
589 		mtu = IP6CB(skb)->frag_max_size;
590 		if (mtu < IPV6_MIN_MTU)
591 			mtu = IPV6_MIN_MTU;
592 	}
593 
594 	if (np && np->frag_size < mtu) {
595 		if (np->frag_size)
596 			mtu = np->frag_size;
597 	}
598 	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
599 		goto fail_toobig;
600 	mtu -= hlen + sizeof(struct frag_hdr);
601 
602 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
603 				    &ipv6_hdr(skb)->saddr);
604 
605 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
606 	    (err = skb_checksum_help(skb)))
607 		goto fail;
608 
609 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
610 	if (skb_has_frag_list(skb)) {
611 		int first_len = skb_pagelen(skb);
612 		struct sk_buff *frag2;
613 
614 		if (first_len - hlen > mtu ||
615 		    ((first_len - hlen) & 7) ||
616 		    skb_cloned(skb) ||
617 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
618 			goto slow_path;
619 
620 		skb_walk_frags(skb, frag) {
621 			/* Correct geometry. */
622 			if (frag->len > mtu ||
623 			    ((frag->len & 7) && frag->next) ||
624 			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
625 				goto slow_path_clean;
626 
627 			/* Partially cloned skb? */
628 			if (skb_shared(frag))
629 				goto slow_path_clean;
630 
631 			BUG_ON(frag->sk);
632 			if (skb->sk) {
633 				frag->sk = skb->sk;
634 				frag->destructor = sock_wfree;
635 			}
636 			skb->truesize -= frag->truesize;
637 		}
638 
639 		err = 0;
640 		offset = 0;
641 		/* BUILD HEADER */
642 
643 		*prevhdr = NEXTHDR_FRAGMENT;
644 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
645 		if (!tmp_hdr) {
646 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
647 				      IPSTATS_MIB_FRAGFAILS);
648 			err = -ENOMEM;
649 			goto fail;
650 		}
651 		frag = skb_shinfo(skb)->frag_list;
652 		skb_frag_list_init(skb);
653 
654 		__skb_pull(skb, hlen);
655 		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
656 		__skb_push(skb, hlen);
657 		skb_reset_network_header(skb);
658 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
659 
660 		fh->nexthdr = nexthdr;
661 		fh->reserved = 0;
662 		fh->frag_off = htons(IP6_MF);
663 		fh->identification = frag_id;
664 
665 		first_len = skb_pagelen(skb);
666 		skb->data_len = first_len - skb_headlen(skb);
667 		skb->len = first_len;
668 		ipv6_hdr(skb)->payload_len = htons(first_len -
669 						   sizeof(struct ipv6hdr));
670 
671 		dst_hold(&rt->dst);
672 
673 		for (;;) {
674 			/* Prepare header of the next frame,
675 			 * before previous one went down. */
676 			if (frag) {
677 				frag->ip_summed = CHECKSUM_NONE;
678 				skb_reset_transport_header(frag);
679 				fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
680 				__skb_push(frag, hlen);
681 				skb_reset_network_header(frag);
682 				memcpy(skb_network_header(frag), tmp_hdr,
683 				       hlen);
684 				offset += skb->len - hlen - sizeof(struct frag_hdr);
685 				fh->nexthdr = nexthdr;
686 				fh->reserved = 0;
687 				fh->frag_off = htons(offset);
688 				if (frag->next)
689 					fh->frag_off |= htons(IP6_MF);
690 				fh->identification = frag_id;
691 				ipv6_hdr(frag)->payload_len =
692 						htons(frag->len -
693 						      sizeof(struct ipv6hdr));
694 				ip6_copy_metadata(frag, skb);
695 			}
696 
697 			err = output(net, sk, skb);
698 			if (!err)
699 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
700 					      IPSTATS_MIB_FRAGCREATES);
701 
702 			if (err || !frag)
703 				break;
704 
705 			skb = frag;
706 			frag = skb->next;
707 			skb->next = NULL;
708 		}
709 
710 		kfree(tmp_hdr);
711 
712 		if (err == 0) {
713 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
714 				      IPSTATS_MIB_FRAGOKS);
715 			ip6_rt_put(rt);
716 			return 0;
717 		}
718 
719 		kfree_skb_list(frag);
720 
721 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
722 			      IPSTATS_MIB_FRAGFAILS);
723 		ip6_rt_put(rt);
724 		return err;
725 
726 slow_path_clean:
727 		skb_walk_frags(skb, frag2) {
728 			if (frag2 == frag)
729 				break;
730 			frag2->sk = NULL;
731 			frag2->destructor = NULL;
732 			skb->truesize += frag2->truesize;
733 		}
734 	}
735 
736 slow_path:
737 	left = skb->len - hlen;		/* Space per frame */
738 	ptr = hlen;			/* Where to start from */
739 
740 	/*
741 	 *	Fragment the datagram.
742 	 */
743 
744 	*prevhdr = NEXTHDR_FRAGMENT;
745 	troom = rt->dst.dev->needed_tailroom;
746 
747 	/*
748 	 *	Keep copying data until we run out.
749 	 */
750 	while (left > 0)	{
751 		len = left;
752 		/* IF: it doesn't fit, use 'mtu' - the data space left */
753 		if (len > mtu)
754 			len = mtu;
755 		/* IF: we are not sending up to and including the packet end
756 		   then align the next start on an eight byte boundary */
757 		if (len < left)	{
758 			len &= ~7;
759 		}
760 
761 		/* Allocate buffer */
762 		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
763 				 hroom + troom, GFP_ATOMIC);
764 		if (!frag) {
765 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
766 				      IPSTATS_MIB_FRAGFAILS);
767 			err = -ENOMEM;
768 			goto fail;
769 		}
770 
771 		/*
772 		 *	Set up data on packet
773 		 */
774 
775 		ip6_copy_metadata(frag, skb);
776 		skb_reserve(frag, hroom);
777 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
778 		skb_reset_network_header(frag);
779 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
780 		frag->transport_header = (frag->network_header + hlen +
781 					  sizeof(struct frag_hdr));
782 
783 		/*
784 		 *	Charge the memory for the fragment to any owner
785 		 *	it might possess
786 		 */
787 		if (skb->sk)
788 			skb_set_owner_w(frag, skb->sk);
789 
790 		/*
791 		 *	Copy the packet header into the new buffer.
792 		 */
793 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
794 
795 		/*
796 		 *	Build fragment header.
797 		 */
798 		fh->nexthdr = nexthdr;
799 		fh->reserved = 0;
800 		fh->identification = frag_id;
801 
802 		/*
803 		 *	Copy a block of the IP datagram.
804 		 */
805 		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
806 				     len));
807 		left -= len;
808 
809 		fh->frag_off = htons(offset);
810 		if (left > 0)
811 			fh->frag_off |= htons(IP6_MF);
812 		ipv6_hdr(frag)->payload_len = htons(frag->len -
813 						    sizeof(struct ipv6hdr));
814 
815 		ptr += len;
816 		offset += len;
817 
818 		/*
819 		 *	Put this fragment into the sending queue.
820 		 */
821 		err = output(net, sk, frag);
822 		if (err)
823 			goto fail;
824 
825 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
826 			      IPSTATS_MIB_FRAGCREATES);
827 	}
828 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
829 		      IPSTATS_MIB_FRAGOKS);
830 	consume_skb(skb);
831 	return err;
832 
833 fail_toobig:
834 	if (skb->sk && dst_allfrag(skb_dst(skb)))
835 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
836 
837 	skb->dev = skb_dst(skb)->dev;
838 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
839 	err = -EMSGSIZE;
840 
841 fail:
842 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
843 		      IPSTATS_MIB_FRAGFAILS);
844 	kfree_skb(skb);
845 	return err;
846 }
847 
848 static inline int ip6_rt_check(const struct rt6key *rt_key,
849 			       const struct in6_addr *fl_addr,
850 			       const struct in6_addr *addr_cache)
851 {
852 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
853 		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
854 }
855 
856 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
857 					  struct dst_entry *dst,
858 					  const struct flowi6 *fl6)
859 {
860 	struct ipv6_pinfo *np = inet6_sk(sk);
861 	struct rt6_info *rt;
862 
863 	if (!dst)
864 		goto out;
865 
866 	if (dst->ops->family != AF_INET6) {
867 		dst_release(dst);
868 		return NULL;
869 	}
870 
871 	rt = (struct rt6_info *)dst;
872 	/* Yes, checking route validity in not connected
873 	 * case is not very simple. Take into account,
874 	 * that we do not support routing by source, TOS,
875 	 * and MSG_DONTROUTE		--ANK (980726)
876 	 *
877 	 * 1. ip6_rt_check(): If route was host route,
878 	 *    check that cached destination is current.
879 	 *    If it is network route, we still may
880 	 *    check its validity using saved pointer
881 	 *    to the last used address: daddr_cache.
882 	 *    We do not want to save whole address now,
883 	 *    (because main consumer of this service
884 	 *    is tcp, which has not this problem),
885 	 *    so that the last trick works only on connected
886 	 *    sockets.
887 	 * 2. oif also should be the same.
888 	 */
889 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
890 #ifdef CONFIG_IPV6_SUBTREES
891 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
892 #endif
893 	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
894 	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
895 		dst_release(dst);
896 		dst = NULL;
897 	}
898 
899 out:
900 	return dst;
901 }
902 
903 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
904 			       struct dst_entry **dst, struct flowi6 *fl6)
905 {
906 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
907 	struct neighbour *n;
908 	struct rt6_info *rt;
909 #endif
910 	int err;
911 	int flags = 0;
912 
913 	if (ipv6_addr_any(&fl6->saddr) && fl6->flowi6_oif &&
914 	    (!*dst || !(*dst)->error)) {
915 		err = l3mdev_get_saddr6(net, sk, fl6);
916 		if (err)
917 			goto out_err;
918 	}
919 
920 	/* The correct way to handle this would be to do
921 	 * ip6_route_get_saddr, and then ip6_route_output; however,
922 	 * the route-specific preferred source forces the
923 	 * ip6_route_output call _before_ ip6_route_get_saddr.
924 	 *
925 	 * In source specific routing (no src=any default route),
926 	 * ip6_route_output will fail given src=any saddr, though, so
927 	 * that's why we try it again later.
928 	 */
929 	if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
930 		struct rt6_info *rt;
931 		bool had_dst = *dst != NULL;
932 
933 		if (!had_dst)
934 			*dst = ip6_route_output(net, sk, fl6);
935 		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
936 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
937 					  sk ? inet6_sk(sk)->srcprefs : 0,
938 					  &fl6->saddr);
939 		if (err)
940 			goto out_err_release;
941 
942 		/* If we had an erroneous initial result, pretend it
943 		 * never existed and let the SA-enabled version take
944 		 * over.
945 		 */
946 		if (!had_dst && (*dst)->error) {
947 			dst_release(*dst);
948 			*dst = NULL;
949 		}
950 
951 		if (fl6->flowi6_oif)
952 			flags |= RT6_LOOKUP_F_IFACE;
953 	}
954 
955 	if (!*dst)
956 		*dst = ip6_route_output_flags(net, sk, fl6, flags);
957 
958 	err = (*dst)->error;
959 	if (err)
960 		goto out_err_release;
961 
962 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
963 	/*
964 	 * Here if the dst entry we've looked up
965 	 * has a neighbour entry that is in the INCOMPLETE
966 	 * state and the src address from the flow is
967 	 * marked as OPTIMISTIC, we release the found
968 	 * dst entry and replace it instead with the
969 	 * dst entry of the nexthop router
970 	 */
971 	rt = (struct rt6_info *) *dst;
972 	rcu_read_lock_bh();
973 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
974 				      rt6_nexthop(rt, &fl6->daddr));
975 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
976 	rcu_read_unlock_bh();
977 
978 	if (err) {
979 		struct inet6_ifaddr *ifp;
980 		struct flowi6 fl_gw6;
981 		int redirect;
982 
983 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
984 				      (*dst)->dev, 1);
985 
986 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
987 		if (ifp)
988 			in6_ifa_put(ifp);
989 
990 		if (redirect) {
991 			/*
992 			 * We need to get the dst entry for the
993 			 * default router instead
994 			 */
995 			dst_release(*dst);
996 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
997 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
998 			*dst = ip6_route_output(net, sk, &fl_gw6);
999 			err = (*dst)->error;
1000 			if (err)
1001 				goto out_err_release;
1002 		}
1003 	}
1004 #endif
1005 
1006 	return 0;
1007 
1008 out_err_release:
1009 	dst_release(*dst);
1010 	*dst = NULL;
1011 out_err:
1012 	if (err == -ENETUNREACH)
1013 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1014 	return err;
1015 }
1016 
1017 /**
1018  *	ip6_dst_lookup - perform route lookup on flow
1019  *	@sk: socket which provides route info
1020  *	@dst: pointer to dst_entry * for result
1021  *	@fl6: flow to lookup
1022  *
1023  *	This function performs a route lookup on the given flow.
1024  *
1025  *	It returns zero on success, or a standard errno code on error.
1026  */
1027 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1028 		   struct flowi6 *fl6)
1029 {
1030 	*dst = NULL;
1031 	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1032 }
1033 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1034 
1035 /**
1036  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1037  *	@sk: socket which provides route info
1038  *	@fl6: flow to lookup
1039  *	@final_dst: final destination address for ipsec lookup
1040  *
1041  *	This function performs a route lookup on the given flow.
1042  *
1043  *	It returns a valid dst pointer on success, or a pointer encoded
1044  *	error code.
1045  */
1046 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1047 				      const struct in6_addr *final_dst)
1048 {
1049 	struct dst_entry *dst = NULL;
1050 	int err;
1051 
1052 	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1053 	if (err)
1054 		return ERR_PTR(err);
1055 	if (final_dst)
1056 		fl6->daddr = *final_dst;
1057 	if (!fl6->flowi6_oif)
1058 		fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
1059 
1060 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1061 }
1062 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1063 
1064 /**
1065  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1066  *	@sk: socket which provides the dst cache and route info
1067  *	@fl6: flow to lookup
1068  *	@final_dst: final destination address for ipsec lookup
1069  *
1070  *	This function performs a route lookup on the given flow with the
1071  *	possibility of using the cached route in the socket if it is valid.
1072  *	It will take the socket dst lock when operating on the dst cache.
1073  *	As a result, this function can only be used in process context.
1074  *
1075  *	It returns a valid dst pointer on success, or a pointer encoded
1076  *	error code.
1077  */
1078 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1079 					 const struct in6_addr *final_dst)
1080 {
1081 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1082 
1083 	dst = ip6_sk_dst_check(sk, dst, fl6);
1084 	if (!dst)
1085 		dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1086 
1087 	return dst;
1088 }
1089 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1090 
1091 static inline int ip6_ufo_append_data(struct sock *sk,
1092 			struct sk_buff_head *queue,
1093 			int getfrag(void *from, char *to, int offset, int len,
1094 			int odd, struct sk_buff *skb),
1095 			void *from, int length, int hh_len, int fragheaderlen,
1096 			int exthdrlen, int transhdrlen, int mtu,
1097 			unsigned int flags, const struct flowi6 *fl6)
1098 
1099 {
1100 	struct sk_buff *skb;
1101 	int err;
1102 
1103 	/* There is support for UDP large send offload by network
1104 	 * device, so create one single skb packet containing complete
1105 	 * udp datagram
1106 	 */
1107 	skb = skb_peek_tail(queue);
1108 	if (!skb) {
1109 		skb = sock_alloc_send_skb(sk,
1110 			hh_len + fragheaderlen + transhdrlen + 20,
1111 			(flags & MSG_DONTWAIT), &err);
1112 		if (!skb)
1113 			return err;
1114 
1115 		/* reserve space for Hardware header */
1116 		skb_reserve(skb, hh_len);
1117 
1118 		/* create space for UDP/IP header */
1119 		skb_put(skb, fragheaderlen + transhdrlen);
1120 
1121 		/* initialize network header pointer */
1122 		skb_set_network_header(skb, exthdrlen);
1123 
1124 		/* initialize protocol header pointer */
1125 		skb->transport_header = skb->network_header + fragheaderlen;
1126 
1127 		skb->protocol = htons(ETH_P_IPV6);
1128 		skb->csum = 0;
1129 
1130 		__skb_queue_tail(queue, skb);
1131 	} else if (skb_is_gso(skb)) {
1132 		goto append;
1133 	}
1134 
1135 	skb->ip_summed = CHECKSUM_PARTIAL;
1136 	/* Specify the length of each IPv6 datagram fragment.
1137 	 * It has to be a multiple of 8.
1138 	 */
1139 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1140 				     sizeof(struct frag_hdr)) & ~7;
1141 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1142 	skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1143 							 &fl6->daddr,
1144 							 &fl6->saddr);
1145 
1146 append:
1147 	return skb_append_datato_frags(sk, skb, getfrag, from,
1148 				       (length - transhdrlen));
1149 }
1150 
1151 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1152 					       gfp_t gfp)
1153 {
1154 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1155 }
1156 
1157 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1158 						gfp_t gfp)
1159 {
1160 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1161 }
1162 
1163 static void ip6_append_data_mtu(unsigned int *mtu,
1164 				int *maxfraglen,
1165 				unsigned int fragheaderlen,
1166 				struct sk_buff *skb,
1167 				struct rt6_info *rt,
1168 				unsigned int orig_mtu)
1169 {
1170 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1171 		if (!skb) {
1172 			/* first fragment, reserve header_len */
1173 			*mtu = orig_mtu - rt->dst.header_len;
1174 
1175 		} else {
1176 			/*
1177 			 * this fragment is not first, the headers
1178 			 * space is regarded as data space.
1179 			 */
1180 			*mtu = orig_mtu;
1181 		}
1182 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1183 			      + fragheaderlen - sizeof(struct frag_hdr);
1184 	}
1185 }
1186 
1187 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1188 			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1189 			  struct rt6_info *rt, struct flowi6 *fl6)
1190 {
1191 	struct ipv6_pinfo *np = inet6_sk(sk);
1192 	unsigned int mtu;
1193 	struct ipv6_txoptions *opt = ipc6->opt;
1194 
1195 	/*
1196 	 * setup for corking
1197 	 */
1198 	if (opt) {
1199 		if (WARN_ON(v6_cork->opt))
1200 			return -EINVAL;
1201 
1202 		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1203 		if (unlikely(!v6_cork->opt))
1204 			return -ENOBUFS;
1205 
1206 		v6_cork->opt->tot_len = opt->tot_len;
1207 		v6_cork->opt->opt_flen = opt->opt_flen;
1208 		v6_cork->opt->opt_nflen = opt->opt_nflen;
1209 
1210 		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1211 						    sk->sk_allocation);
1212 		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1213 			return -ENOBUFS;
1214 
1215 		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1216 						    sk->sk_allocation);
1217 		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1218 			return -ENOBUFS;
1219 
1220 		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1221 						   sk->sk_allocation);
1222 		if (opt->hopopt && !v6_cork->opt->hopopt)
1223 			return -ENOBUFS;
1224 
1225 		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1226 						    sk->sk_allocation);
1227 		if (opt->srcrt && !v6_cork->opt->srcrt)
1228 			return -ENOBUFS;
1229 
1230 		/* need source address above miyazawa*/
1231 	}
1232 	dst_hold(&rt->dst);
1233 	cork->base.dst = &rt->dst;
1234 	cork->fl.u.ip6 = *fl6;
1235 	v6_cork->hop_limit = ipc6->hlimit;
1236 	v6_cork->tclass = ipc6->tclass;
1237 	if (rt->dst.flags & DST_XFRM_TUNNEL)
1238 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1239 		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1240 	else
1241 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1242 		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1243 	if (np->frag_size < mtu) {
1244 		if (np->frag_size)
1245 			mtu = np->frag_size;
1246 	}
1247 	cork->base.fragsize = mtu;
1248 	if (dst_allfrag(rt->dst.path))
1249 		cork->base.flags |= IPCORK_ALLFRAG;
1250 	cork->base.length = 0;
1251 
1252 	return 0;
1253 }
1254 
1255 static int __ip6_append_data(struct sock *sk,
1256 			     struct flowi6 *fl6,
1257 			     struct sk_buff_head *queue,
1258 			     struct inet_cork *cork,
1259 			     struct inet6_cork *v6_cork,
1260 			     struct page_frag *pfrag,
1261 			     int getfrag(void *from, char *to, int offset,
1262 					 int len, int odd, struct sk_buff *skb),
1263 			     void *from, int length, int transhdrlen,
1264 			     unsigned int flags, struct ipcm6_cookie *ipc6,
1265 			     const struct sockcm_cookie *sockc)
1266 {
1267 	struct sk_buff *skb, *skb_prev = NULL;
1268 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1269 	int exthdrlen = 0;
1270 	int dst_exthdrlen = 0;
1271 	int hh_len;
1272 	int copy;
1273 	int err;
1274 	int offset = 0;
1275 	__u8 tx_flags = 0;
1276 	u32 tskey = 0;
1277 	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1278 	struct ipv6_txoptions *opt = v6_cork->opt;
1279 	int csummode = CHECKSUM_NONE;
1280 	unsigned int maxnonfragsize, headersize;
1281 
1282 	skb = skb_peek_tail(queue);
1283 	if (!skb) {
1284 		exthdrlen = opt ? opt->opt_flen : 0;
1285 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1286 	}
1287 
1288 	mtu = cork->fragsize;
1289 	orig_mtu = mtu;
1290 
1291 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1292 
1293 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1294 			(opt ? opt->opt_nflen : 0);
1295 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1296 		     sizeof(struct frag_hdr);
1297 
1298 	headersize = sizeof(struct ipv6hdr) +
1299 		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1300 		     (dst_allfrag(&rt->dst) ?
1301 		      sizeof(struct frag_hdr) : 0) +
1302 		     rt->rt6i_nfheader_len;
1303 
1304 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1305 	    (sk->sk_protocol == IPPROTO_UDP ||
1306 	     sk->sk_protocol == IPPROTO_RAW)) {
1307 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1308 				sizeof(struct ipv6hdr));
1309 		goto emsgsize;
1310 	}
1311 
1312 	if (ip6_sk_ignore_df(sk))
1313 		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1314 	else
1315 		maxnonfragsize = mtu;
1316 
1317 	if (cork->length + length > maxnonfragsize - headersize) {
1318 emsgsize:
1319 		ipv6_local_error(sk, EMSGSIZE, fl6,
1320 				 mtu - headersize +
1321 				 sizeof(struct ipv6hdr));
1322 		return -EMSGSIZE;
1323 	}
1324 
1325 	/* CHECKSUM_PARTIAL only with no extension headers and when
1326 	 * we are not going to fragment
1327 	 */
1328 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1329 	    headersize == sizeof(struct ipv6hdr) &&
1330 	    length < mtu - headersize &&
1331 	    !(flags & MSG_MORE) &&
1332 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1333 		csummode = CHECKSUM_PARTIAL;
1334 
1335 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1336 		sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
1337 		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1338 		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1339 			tskey = sk->sk_tskey++;
1340 	}
1341 
1342 	/*
1343 	 * Let's try using as much space as possible.
1344 	 * Use MTU if total length of the message fits into the MTU.
1345 	 * Otherwise, we need to reserve fragment header and
1346 	 * fragment alignment (= 8-15 octects, in total).
1347 	 *
1348 	 * Note that we may need to "move" the data from the tail of
1349 	 * of the buffer to the new fragment when we split
1350 	 * the message.
1351 	 *
1352 	 * FIXME: It may be fragmented into multiple chunks
1353 	 *        at once if non-fragmentable extension headers
1354 	 *        are too large.
1355 	 * --yoshfuji
1356 	 */
1357 
1358 	cork->length += length;
1359 	if (((length > mtu) ||
1360 	     (skb && skb_is_gso(skb))) &&
1361 	    (sk->sk_protocol == IPPROTO_UDP) &&
1362 	    (rt->dst.dev->features & NETIF_F_UFO) &&
1363 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1364 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1365 					  hh_len, fragheaderlen, exthdrlen,
1366 					  transhdrlen, mtu, flags, fl6);
1367 		if (err)
1368 			goto error;
1369 		return 0;
1370 	}
1371 
1372 	if (!skb)
1373 		goto alloc_new_skb;
1374 
1375 	while (length > 0) {
1376 		/* Check if the remaining data fits into current packet. */
1377 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1378 		if (copy < length)
1379 			copy = maxfraglen - skb->len;
1380 
1381 		if (copy <= 0) {
1382 			char *data;
1383 			unsigned int datalen;
1384 			unsigned int fraglen;
1385 			unsigned int fraggap;
1386 			unsigned int alloclen;
1387 alloc_new_skb:
1388 			/* There's no room in the current skb */
1389 			if (skb)
1390 				fraggap = skb->len - maxfraglen;
1391 			else
1392 				fraggap = 0;
1393 			/* update mtu and maxfraglen if necessary */
1394 			if (!skb || !skb_prev)
1395 				ip6_append_data_mtu(&mtu, &maxfraglen,
1396 						    fragheaderlen, skb, rt,
1397 						    orig_mtu);
1398 
1399 			skb_prev = skb;
1400 
1401 			/*
1402 			 * If remaining data exceeds the mtu,
1403 			 * we know we need more fragment(s).
1404 			 */
1405 			datalen = length + fraggap;
1406 
1407 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1408 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1409 			if ((flags & MSG_MORE) &&
1410 			    !(rt->dst.dev->features&NETIF_F_SG))
1411 				alloclen = mtu;
1412 			else
1413 				alloclen = datalen + fragheaderlen;
1414 
1415 			alloclen += dst_exthdrlen;
1416 
1417 			if (datalen != length + fraggap) {
1418 				/*
1419 				 * this is not the last fragment, the trailer
1420 				 * space is regarded as data space.
1421 				 */
1422 				datalen += rt->dst.trailer_len;
1423 			}
1424 
1425 			alloclen += rt->dst.trailer_len;
1426 			fraglen = datalen + fragheaderlen;
1427 
1428 			/*
1429 			 * We just reserve space for fragment header.
1430 			 * Note: this may be overallocation if the message
1431 			 * (without MSG_MORE) fits into the MTU.
1432 			 */
1433 			alloclen += sizeof(struct frag_hdr);
1434 
1435 			if (transhdrlen) {
1436 				skb = sock_alloc_send_skb(sk,
1437 						alloclen + hh_len,
1438 						(flags & MSG_DONTWAIT), &err);
1439 			} else {
1440 				skb = NULL;
1441 				if (atomic_read(&sk->sk_wmem_alloc) <=
1442 				    2 * sk->sk_sndbuf)
1443 					skb = sock_wmalloc(sk,
1444 							   alloclen + hh_len, 1,
1445 							   sk->sk_allocation);
1446 				if (unlikely(!skb))
1447 					err = -ENOBUFS;
1448 			}
1449 			if (!skb)
1450 				goto error;
1451 			/*
1452 			 *	Fill in the control structures
1453 			 */
1454 			skb->protocol = htons(ETH_P_IPV6);
1455 			skb->ip_summed = csummode;
1456 			skb->csum = 0;
1457 			/* reserve for fragmentation and ipsec header */
1458 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1459 				    dst_exthdrlen);
1460 
1461 			/* Only the initial fragment is time stamped */
1462 			skb_shinfo(skb)->tx_flags = tx_flags;
1463 			tx_flags = 0;
1464 			skb_shinfo(skb)->tskey = tskey;
1465 			tskey = 0;
1466 
1467 			/*
1468 			 *	Find where to start putting bytes
1469 			 */
1470 			data = skb_put(skb, fraglen);
1471 			skb_set_network_header(skb, exthdrlen);
1472 			data += fragheaderlen;
1473 			skb->transport_header = (skb->network_header +
1474 						 fragheaderlen);
1475 			if (fraggap) {
1476 				skb->csum = skb_copy_and_csum_bits(
1477 					skb_prev, maxfraglen,
1478 					data + transhdrlen, fraggap, 0);
1479 				skb_prev->csum = csum_sub(skb_prev->csum,
1480 							  skb->csum);
1481 				data += fraggap;
1482 				pskb_trim_unique(skb_prev, maxfraglen);
1483 			}
1484 			copy = datalen - transhdrlen - fraggap;
1485 
1486 			if (copy < 0) {
1487 				err = -EINVAL;
1488 				kfree_skb(skb);
1489 				goto error;
1490 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1491 				err = -EFAULT;
1492 				kfree_skb(skb);
1493 				goto error;
1494 			}
1495 
1496 			offset += copy;
1497 			length -= datalen - fraggap;
1498 			transhdrlen = 0;
1499 			exthdrlen = 0;
1500 			dst_exthdrlen = 0;
1501 
1502 			/*
1503 			 * Put the packet on the pending queue
1504 			 */
1505 			__skb_queue_tail(queue, skb);
1506 			continue;
1507 		}
1508 
1509 		if (copy > length)
1510 			copy = length;
1511 
1512 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1513 			unsigned int off;
1514 
1515 			off = skb->len;
1516 			if (getfrag(from, skb_put(skb, copy),
1517 						offset, copy, off, skb) < 0) {
1518 				__skb_trim(skb, off);
1519 				err = -EFAULT;
1520 				goto error;
1521 			}
1522 		} else {
1523 			int i = skb_shinfo(skb)->nr_frags;
1524 
1525 			err = -ENOMEM;
1526 			if (!sk_page_frag_refill(sk, pfrag))
1527 				goto error;
1528 
1529 			if (!skb_can_coalesce(skb, i, pfrag->page,
1530 					      pfrag->offset)) {
1531 				err = -EMSGSIZE;
1532 				if (i == MAX_SKB_FRAGS)
1533 					goto error;
1534 
1535 				__skb_fill_page_desc(skb, i, pfrag->page,
1536 						     pfrag->offset, 0);
1537 				skb_shinfo(skb)->nr_frags = ++i;
1538 				get_page(pfrag->page);
1539 			}
1540 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1541 			if (getfrag(from,
1542 				    page_address(pfrag->page) + pfrag->offset,
1543 				    offset, copy, skb->len, skb) < 0)
1544 				goto error_efault;
1545 
1546 			pfrag->offset += copy;
1547 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1548 			skb->len += copy;
1549 			skb->data_len += copy;
1550 			skb->truesize += copy;
1551 			atomic_add(copy, &sk->sk_wmem_alloc);
1552 		}
1553 		offset += copy;
1554 		length -= copy;
1555 	}
1556 
1557 	return 0;
1558 
1559 error_efault:
1560 	err = -EFAULT;
1561 error:
1562 	cork->length -= length;
1563 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1564 	return err;
1565 }
1566 
1567 int ip6_append_data(struct sock *sk,
1568 		    int getfrag(void *from, char *to, int offset, int len,
1569 				int odd, struct sk_buff *skb),
1570 		    void *from, int length, int transhdrlen,
1571 		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1572 		    struct rt6_info *rt, unsigned int flags,
1573 		    const struct sockcm_cookie *sockc)
1574 {
1575 	struct inet_sock *inet = inet_sk(sk);
1576 	struct ipv6_pinfo *np = inet6_sk(sk);
1577 	int exthdrlen;
1578 	int err;
1579 
1580 	if (flags&MSG_PROBE)
1581 		return 0;
1582 	if (skb_queue_empty(&sk->sk_write_queue)) {
1583 		/*
1584 		 * setup for corking
1585 		 */
1586 		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1587 				     ipc6, rt, fl6);
1588 		if (err)
1589 			return err;
1590 
1591 		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1592 		length += exthdrlen;
1593 		transhdrlen += exthdrlen;
1594 	} else {
1595 		fl6 = &inet->cork.fl.u.ip6;
1596 		transhdrlen = 0;
1597 	}
1598 
1599 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1600 				 &np->cork, sk_page_frag(sk), getfrag,
1601 				 from, length, transhdrlen, flags, ipc6, sockc);
1602 }
1603 EXPORT_SYMBOL_GPL(ip6_append_data);
1604 
1605 static void ip6_cork_release(struct inet_cork_full *cork,
1606 			     struct inet6_cork *v6_cork)
1607 {
1608 	if (v6_cork->opt) {
1609 		kfree(v6_cork->opt->dst0opt);
1610 		kfree(v6_cork->opt->dst1opt);
1611 		kfree(v6_cork->opt->hopopt);
1612 		kfree(v6_cork->opt->srcrt);
1613 		kfree(v6_cork->opt);
1614 		v6_cork->opt = NULL;
1615 	}
1616 
1617 	if (cork->base.dst) {
1618 		dst_release(cork->base.dst);
1619 		cork->base.dst = NULL;
1620 		cork->base.flags &= ~IPCORK_ALLFRAG;
1621 	}
1622 	memset(&cork->fl, 0, sizeof(cork->fl));
1623 }
1624 
1625 struct sk_buff *__ip6_make_skb(struct sock *sk,
1626 			       struct sk_buff_head *queue,
1627 			       struct inet_cork_full *cork,
1628 			       struct inet6_cork *v6_cork)
1629 {
1630 	struct sk_buff *skb, *tmp_skb;
1631 	struct sk_buff **tail_skb;
1632 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1633 	struct ipv6_pinfo *np = inet6_sk(sk);
1634 	struct net *net = sock_net(sk);
1635 	struct ipv6hdr *hdr;
1636 	struct ipv6_txoptions *opt = v6_cork->opt;
1637 	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1638 	struct flowi6 *fl6 = &cork->fl.u.ip6;
1639 	unsigned char proto = fl6->flowi6_proto;
1640 
1641 	skb = __skb_dequeue(queue);
1642 	if (!skb)
1643 		goto out;
1644 	tail_skb = &(skb_shinfo(skb)->frag_list);
1645 
1646 	/* move skb->data to ip header from ext header */
1647 	if (skb->data < skb_network_header(skb))
1648 		__skb_pull(skb, skb_network_offset(skb));
1649 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1650 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1651 		*tail_skb = tmp_skb;
1652 		tail_skb = &(tmp_skb->next);
1653 		skb->len += tmp_skb->len;
1654 		skb->data_len += tmp_skb->len;
1655 		skb->truesize += tmp_skb->truesize;
1656 		tmp_skb->destructor = NULL;
1657 		tmp_skb->sk = NULL;
1658 	}
1659 
1660 	/* Allow local fragmentation. */
1661 	skb->ignore_df = ip6_sk_ignore_df(sk);
1662 
1663 	*final_dst = fl6->daddr;
1664 	__skb_pull(skb, skb_network_header_len(skb));
1665 	if (opt && opt->opt_flen)
1666 		ipv6_push_frag_opts(skb, opt, &proto);
1667 	if (opt && opt->opt_nflen)
1668 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1669 
1670 	skb_push(skb, sizeof(struct ipv6hdr));
1671 	skb_reset_network_header(skb);
1672 	hdr = ipv6_hdr(skb);
1673 
1674 	ip6_flow_hdr(hdr, v6_cork->tclass,
1675 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1676 					np->autoflowlabel, fl6));
1677 	hdr->hop_limit = v6_cork->hop_limit;
1678 	hdr->nexthdr = proto;
1679 	hdr->saddr = fl6->saddr;
1680 	hdr->daddr = *final_dst;
1681 
1682 	skb->priority = sk->sk_priority;
1683 	skb->mark = sk->sk_mark;
1684 
1685 	skb_dst_set(skb, dst_clone(&rt->dst));
1686 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1687 	if (proto == IPPROTO_ICMPV6) {
1688 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1689 
1690 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1691 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1692 	}
1693 
1694 	ip6_cork_release(cork, v6_cork);
1695 out:
1696 	return skb;
1697 }
1698 
1699 int ip6_send_skb(struct sk_buff *skb)
1700 {
1701 	struct net *net = sock_net(skb->sk);
1702 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1703 	int err;
1704 
1705 	err = ip6_local_out(net, skb->sk, skb);
1706 	if (err) {
1707 		if (err > 0)
1708 			err = net_xmit_errno(err);
1709 		if (err)
1710 			IP6_INC_STATS(net, rt->rt6i_idev,
1711 				      IPSTATS_MIB_OUTDISCARDS);
1712 	}
1713 
1714 	return err;
1715 }
1716 
1717 int ip6_push_pending_frames(struct sock *sk)
1718 {
1719 	struct sk_buff *skb;
1720 
1721 	skb = ip6_finish_skb(sk);
1722 	if (!skb)
1723 		return 0;
1724 
1725 	return ip6_send_skb(skb);
1726 }
1727 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1728 
1729 static void __ip6_flush_pending_frames(struct sock *sk,
1730 				       struct sk_buff_head *queue,
1731 				       struct inet_cork_full *cork,
1732 				       struct inet6_cork *v6_cork)
1733 {
1734 	struct sk_buff *skb;
1735 
1736 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1737 		if (skb_dst(skb))
1738 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1739 				      IPSTATS_MIB_OUTDISCARDS);
1740 		kfree_skb(skb);
1741 	}
1742 
1743 	ip6_cork_release(cork, v6_cork);
1744 }
1745 
1746 void ip6_flush_pending_frames(struct sock *sk)
1747 {
1748 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1749 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1750 }
1751 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1752 
1753 struct sk_buff *ip6_make_skb(struct sock *sk,
1754 			     int getfrag(void *from, char *to, int offset,
1755 					 int len, int odd, struct sk_buff *skb),
1756 			     void *from, int length, int transhdrlen,
1757 			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1758 			     struct rt6_info *rt, unsigned int flags,
1759 			     const struct sockcm_cookie *sockc)
1760 {
1761 	struct inet_cork_full cork;
1762 	struct inet6_cork v6_cork;
1763 	struct sk_buff_head queue;
1764 	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1765 	int err;
1766 
1767 	if (flags & MSG_PROBE)
1768 		return NULL;
1769 
1770 	__skb_queue_head_init(&queue);
1771 
1772 	cork.base.flags = 0;
1773 	cork.base.addr = 0;
1774 	cork.base.opt = NULL;
1775 	v6_cork.opt = NULL;
1776 	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1777 	if (err)
1778 		return ERR_PTR(err);
1779 
1780 	if (ipc6->dontfrag < 0)
1781 		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1782 
1783 	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1784 				&current->task_frag, getfrag, from,
1785 				length + exthdrlen, transhdrlen + exthdrlen,
1786 				flags, ipc6, sockc);
1787 	if (err) {
1788 		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1789 		return ERR_PTR(err);
1790 	}
1791 
1792 	return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
1793 }
1794