xref: /linux/net/ipv6/ip6_output.c (revision e04e2b760ddbe3d7b283a05898c3a029085cd8cd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 output functions
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/net/ipv4/ip_output.c
10  *
11  *	Changes:
12  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
13  *				extension headers are implemented.
14  *				route changes now work.
15  *				ip6_forward does not confuse sniffers.
16  *				etc.
17  *
18  *      H. von Brand    :       Added missing #include <linux/string.h>
19  *	Imran Patel	:	frag id should be in NBO
20  *      Kazunori MIYAZAWA @USAGI
21  *			:       add ip6_append_data and related functions
22  *				for datagram xmit
23  */
24 
25 #include <linux/errno.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/socket.h>
29 #include <linux/net.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/in6.h>
33 #include <linux/tcp.h>
34 #include <linux/route.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 
38 #include <linux/bpf-cgroup.h>
39 #include <linux/netfilter.h>
40 #include <linux/netfilter_ipv6.h>
41 
42 #include <net/sock.h>
43 #include <net/snmp.h>
44 
45 #include <net/gso.h>
46 #include <net/ipv6.h>
47 #include <net/ndisc.h>
48 #include <net/protocol.h>
49 #include <net/ip6_route.h>
50 #include <net/addrconf.h>
51 #include <net/rawv6.h>
52 #include <net/icmp.h>
53 #include <net/xfrm.h>
54 #include <net/checksum.h>
55 #include <linux/mroute6.h>
56 #include <net/l3mdev.h>
57 #include <net/lwtunnel.h>
58 #include <net/ip_tunnels.h>
59 
60 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
61 {
62 	struct dst_entry *dst = skb_dst(skb);
63 	struct net_device *dev = dst->dev;
64 	struct inet6_dev *idev = ip6_dst_idev(dst);
65 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
66 	const struct in6_addr *daddr, *nexthop;
67 	struct ipv6hdr *hdr;
68 	struct neighbour *neigh;
69 	int ret;
70 
71 	/* Be paranoid, rather than too clever. */
72 	if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
73 		/* Make sure idev stays alive */
74 		rcu_read_lock();
75 		skb = skb_expand_head(skb, hh_len);
76 		if (!skb) {
77 			IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
78 			rcu_read_unlock();
79 			return -ENOMEM;
80 		}
81 		rcu_read_unlock();
82 	}
83 
84 	hdr = ipv6_hdr(skb);
85 	daddr = &hdr->daddr;
86 	if (ipv6_addr_is_multicast(daddr)) {
87 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
88 		    ((mroute6_is_socket(net, skb) &&
89 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
90 		     ipv6_chk_mcast_addr(dev, daddr, &hdr->saddr))) {
91 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
92 
93 			/* Do not check for IFF_ALLMULTI; multicast routing
94 			   is not supported in any case.
95 			 */
96 			if (newskb)
97 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
98 					net, sk, newskb, NULL, newskb->dev,
99 					dev_loopback_xmit);
100 
101 			if (hdr->hop_limit == 0) {
102 				IP6_INC_STATS(net, idev,
103 					      IPSTATS_MIB_OUTDISCARDS);
104 				kfree_skb(skb);
105 				return 0;
106 			}
107 		}
108 
109 		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
110 		if (IPV6_ADDR_MC_SCOPE(daddr) <= IPV6_ADDR_SCOPE_NODELOCAL &&
111 		    !(dev->flags & IFF_LOOPBACK)) {
112 			kfree_skb(skb);
113 			return 0;
114 		}
115 	}
116 
117 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
118 		int res = lwtunnel_xmit(skb);
119 
120 		if (res != LWTUNNEL_XMIT_CONTINUE)
121 			return res;
122 	}
123 
124 	IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
125 
126 	rcu_read_lock();
127 	nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
128 	neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
129 
130 	if (unlikely(IS_ERR_OR_NULL(neigh))) {
131 		if (unlikely(!neigh))
132 			neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
133 		if (IS_ERR(neigh)) {
134 			rcu_read_unlock();
135 			IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
136 			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
137 			return -EINVAL;
138 		}
139 	}
140 	sock_confirm_neigh(skb, neigh);
141 	ret = neigh_output(neigh, skb, false);
142 	rcu_read_unlock();
143 	return ret;
144 }
145 
146 static int
147 ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
148 				    struct sk_buff *skb, unsigned int mtu)
149 {
150 	struct sk_buff *segs, *nskb;
151 	netdev_features_t features;
152 	int ret = 0;
153 
154 	/* Please see corresponding comment in ip_finish_output_gso
155 	 * describing the cases where GSO segment length exceeds the
156 	 * egress MTU.
157 	 */
158 	features = netif_skb_features(skb);
159 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
160 	if (IS_ERR_OR_NULL(segs)) {
161 		kfree_skb(skb);
162 		return -ENOMEM;
163 	}
164 
165 	consume_skb(skb);
166 
167 	skb_list_walk_safe(segs, segs, nskb) {
168 		int err;
169 
170 		skb_mark_not_on_list(segs);
171 		/* Last GSO segment can be smaller than gso_size (and MTU).
172 		 * Adding a fragment header would produce an "atomic fragment",
173 		 * which is considered harmful (RFC-8021). Avoid that.
174 		 */
175 		err = segs->len > mtu ?
176 			ip6_fragment(net, sk, segs, ip6_finish_output2) :
177 			ip6_finish_output2(net, sk, segs);
178 		if (err && ret == 0)
179 			ret = err;
180 	}
181 
182 	return ret;
183 }
184 
185 static int ip6_finish_output_gso(struct net *net, struct sock *sk,
186 				 struct sk_buff *skb, unsigned int mtu)
187 {
188 	if (!(IP6CB(skb)->flags & IP6SKB_FAKEJUMBO) &&
189 	    !skb_gso_validate_network_len(skb, mtu))
190 		return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
191 
192 	return ip6_finish_output2(net, sk, skb);
193 }
194 
195 static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
196 {
197 	unsigned int mtu;
198 
199 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
200 	/* Policy lookup after SNAT yielded a new policy */
201 	if (skb_dst(skb)->xfrm) {
202 		IP6CB(skb)->flags |= IP6SKB_REROUTED;
203 		return dst_output(net, sk, skb);
204 	}
205 #endif
206 
207 	mtu = ip6_skb_dst_mtu(skb);
208 	if (skb_is_gso(skb))
209 		return ip6_finish_output_gso(net, sk, skb, mtu);
210 
211 	if (skb->len > mtu ||
212 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
213 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
214 
215 	return ip6_finish_output2(net, sk, skb);
216 }
217 
218 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
219 {
220 	int ret;
221 
222 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
223 	switch (ret) {
224 	case NET_XMIT_SUCCESS:
225 	case NET_XMIT_CN:
226 		return __ip6_finish_output(net, sk, skb) ? : ret;
227 	default:
228 		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
229 		return ret;
230 	}
231 }
232 
233 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
234 {
235 	struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
236 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
237 
238 	skb->protocol = htons(ETH_P_IPV6);
239 	skb->dev = dev;
240 
241 	if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) {
242 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
243 		kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
244 		return 0;
245 	}
246 
247 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
248 			    net, sk, skb, indev, dev,
249 			    ip6_finish_output,
250 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
251 }
252 EXPORT_SYMBOL(ip6_output);
253 
254 bool ip6_autoflowlabel(struct net *net, const struct sock *sk)
255 {
256 	if (!inet6_test_bit(AUTOFLOWLABEL_SET, sk))
257 		return ip6_default_np_autolabel(net);
258 	return inet6_test_bit(AUTOFLOWLABEL, sk);
259 }
260 
261 /*
262  * xmit an sk_buff (used by TCP, SCTP and DCCP)
263  * Note : socket lock is not held for SYNACK packets, but might be modified
264  * by calls to skb_set_owner_w() and ipv6_local_error(),
265  * which are using proper atomic operations or spinlocks.
266  */
267 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
268 	     __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
269 {
270 	struct net *net = sock_net(sk);
271 	const struct ipv6_pinfo *np = inet6_sk(sk);
272 	struct in6_addr *first_hop = &fl6->daddr;
273 	struct dst_entry *dst = skb_dst(skb);
274 	struct net_device *dev = dst->dev;
275 	struct inet6_dev *idev = ip6_dst_idev(dst);
276 	struct hop_jumbo_hdr *hop_jumbo;
277 	int hoplen = sizeof(*hop_jumbo);
278 	unsigned int head_room;
279 	struct ipv6hdr *hdr;
280 	u8  proto = fl6->flowi6_proto;
281 	int seg_len = skb->len;
282 	int hlimit = -1;
283 	u32 mtu;
284 
285 	head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
286 	if (opt)
287 		head_room += opt->opt_nflen + opt->opt_flen;
288 
289 	if (unlikely(head_room > skb_headroom(skb))) {
290 		/* Make sure idev stays alive */
291 		rcu_read_lock();
292 		skb = skb_expand_head(skb, head_room);
293 		if (!skb) {
294 			IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
295 			rcu_read_unlock();
296 			return -ENOBUFS;
297 		}
298 		rcu_read_unlock();
299 	}
300 
301 	if (opt) {
302 		seg_len += opt->opt_nflen + opt->opt_flen;
303 
304 		if (opt->opt_flen)
305 			ipv6_push_frag_opts(skb, opt, &proto);
306 
307 		if (opt->opt_nflen)
308 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
309 					     &fl6->saddr);
310 	}
311 
312 	if (unlikely(seg_len > IPV6_MAXPLEN)) {
313 		hop_jumbo = skb_push(skb, hoplen);
314 
315 		hop_jumbo->nexthdr = proto;
316 		hop_jumbo->hdrlen = 0;
317 		hop_jumbo->tlv_type = IPV6_TLV_JUMBO;
318 		hop_jumbo->tlv_len = 4;
319 		hop_jumbo->jumbo_payload_len = htonl(seg_len + hoplen);
320 
321 		proto = IPPROTO_HOPOPTS;
322 		seg_len = 0;
323 		IP6CB(skb)->flags |= IP6SKB_FAKEJUMBO;
324 	}
325 
326 	skb_push(skb, sizeof(struct ipv6hdr));
327 	skb_reset_network_header(skb);
328 	hdr = ipv6_hdr(skb);
329 
330 	/*
331 	 *	Fill in the IPv6 header
332 	 */
333 	if (np)
334 		hlimit = READ_ONCE(np->hop_limit);
335 	if (hlimit < 0)
336 		hlimit = ip6_dst_hoplimit(dst);
337 
338 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
339 				ip6_autoflowlabel(net, sk), fl6));
340 
341 	hdr->payload_len = htons(seg_len);
342 	hdr->nexthdr = proto;
343 	hdr->hop_limit = hlimit;
344 
345 	hdr->saddr = fl6->saddr;
346 	hdr->daddr = *first_hop;
347 
348 	skb->protocol = htons(ETH_P_IPV6);
349 	skb->priority = priority;
350 	skb->mark = mark;
351 
352 	mtu = dst_mtu(dst);
353 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
354 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
355 
356 		/* if egress device is enslaved to an L3 master device pass the
357 		 * skb to its handler for processing
358 		 */
359 		skb = l3mdev_ip6_out((struct sock *)sk, skb);
360 		if (unlikely(!skb))
361 			return 0;
362 
363 		/* hooks should never assume socket lock is held.
364 		 * we promote our socket to non const
365 		 */
366 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
367 			       net, (struct sock *)sk, skb, NULL, dev,
368 			       dst_output);
369 	}
370 
371 	skb->dev = dev;
372 	/* ipv6_local_error() does not require socket lock,
373 	 * we promote our socket to non const
374 	 */
375 	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
376 
377 	IP6_INC_STATS(net, idev, IPSTATS_MIB_FRAGFAILS);
378 	kfree_skb(skb);
379 	return -EMSGSIZE;
380 }
381 EXPORT_SYMBOL(ip6_xmit);
382 
383 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
384 {
385 	struct ip6_ra_chain *ra;
386 	struct sock *last = NULL;
387 
388 	read_lock(&ip6_ra_lock);
389 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
390 		struct sock *sk = ra->sk;
391 		if (sk && ra->sel == sel &&
392 		    (!sk->sk_bound_dev_if ||
393 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
394 
395 			if (inet6_test_bit(RTALERT_ISOLATE, sk) &&
396 			    !net_eq(sock_net(sk), dev_net(skb->dev))) {
397 				continue;
398 			}
399 			if (last) {
400 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
401 				if (skb2)
402 					rawv6_rcv(last, skb2);
403 			}
404 			last = sk;
405 		}
406 	}
407 
408 	if (last) {
409 		rawv6_rcv(last, skb);
410 		read_unlock(&ip6_ra_lock);
411 		return 1;
412 	}
413 	read_unlock(&ip6_ra_lock);
414 	return 0;
415 }
416 
417 static int ip6_forward_proxy_check(struct sk_buff *skb)
418 {
419 	struct ipv6hdr *hdr = ipv6_hdr(skb);
420 	u8 nexthdr = hdr->nexthdr;
421 	__be16 frag_off;
422 	int offset;
423 
424 	if (ipv6_ext_hdr(nexthdr)) {
425 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
426 		if (offset < 0)
427 			return 0;
428 	} else
429 		offset = sizeof(struct ipv6hdr);
430 
431 	if (nexthdr == IPPROTO_ICMPV6) {
432 		struct icmp6hdr *icmp6;
433 
434 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
435 					 offset + 1 - skb->data)))
436 			return 0;
437 
438 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
439 
440 		switch (icmp6->icmp6_type) {
441 		case NDISC_ROUTER_SOLICITATION:
442 		case NDISC_ROUTER_ADVERTISEMENT:
443 		case NDISC_NEIGHBOUR_SOLICITATION:
444 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
445 		case NDISC_REDIRECT:
446 			/* For reaction involving unicast neighbor discovery
447 			 * message destined to the proxied address, pass it to
448 			 * input function.
449 			 */
450 			return 1;
451 		default:
452 			break;
453 		}
454 	}
455 
456 	/*
457 	 * The proxying router can't forward traffic sent to a link-local
458 	 * address, so signal the sender and discard the packet. This
459 	 * behavior is clarified by the MIPv6 specification.
460 	 */
461 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
462 		dst_link_failure(skb);
463 		return -1;
464 	}
465 
466 	return 0;
467 }
468 
469 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
470 				     struct sk_buff *skb)
471 {
472 #ifdef CONFIG_NET_SWITCHDEV
473 	if (skb->offload_l3_fwd_mark) {
474 		consume_skb(skb);
475 		return 0;
476 	}
477 #endif
478 
479 	skb_clear_tstamp(skb);
480 	return dst_output(net, sk, skb);
481 }
482 
483 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
484 {
485 	if (skb->len <= mtu)
486 		return false;
487 
488 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
489 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
490 		return true;
491 
492 	if (skb->ignore_df)
493 		return false;
494 
495 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
496 		return false;
497 
498 	return true;
499 }
500 
501 int ip6_forward(struct sk_buff *skb)
502 {
503 	struct dst_entry *dst = skb_dst(skb);
504 	struct ipv6hdr *hdr = ipv6_hdr(skb);
505 	struct inet6_skb_parm *opt = IP6CB(skb);
506 	struct net *net = dev_net(dst->dev);
507 	struct inet6_dev *idev;
508 	SKB_DR(reason);
509 	u32 mtu;
510 
511 	idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
512 	if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
513 		goto error;
514 
515 	if (skb->pkt_type != PACKET_HOST)
516 		goto drop;
517 
518 	if (unlikely(skb->sk))
519 		goto drop;
520 
521 	if (skb_warn_if_lro(skb))
522 		goto drop;
523 
524 	if (!READ_ONCE(net->ipv6.devconf_all->disable_policy) &&
525 	    (!idev || !READ_ONCE(idev->cnf.disable_policy)) &&
526 	    !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
527 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
528 		goto drop;
529 	}
530 
531 	skb_forward_csum(skb);
532 
533 	/*
534 	 *	We DO NOT make any processing on
535 	 *	RA packets, pushing them to user level AS IS
536 	 *	without ane WARRANTY that application will be able
537 	 *	to interpret them. The reason is that we
538 	 *	cannot make anything clever here.
539 	 *
540 	 *	We are not end-node, so that if packet contains
541 	 *	AH/ESP, we cannot make anything.
542 	 *	Defragmentation also would be mistake, RA packets
543 	 *	cannot be fragmented, because there is no warranty
544 	 *	that different fragments will go along one path. --ANK
545 	 */
546 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
547 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
548 			return 0;
549 	}
550 
551 	/*
552 	 *	check and decrement ttl
553 	 */
554 	if (hdr->hop_limit <= 1) {
555 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
556 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
557 
558 		kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
559 		return -ETIMEDOUT;
560 	}
561 
562 	/* XXX: idev->cnf.proxy_ndp? */
563 	if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
564 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
565 		int proxied = ip6_forward_proxy_check(skb);
566 		if (proxied > 0) {
567 			/* It's tempting to decrease the hop limit
568 			 * here by 1, as we do at the end of the
569 			 * function too.
570 			 *
571 			 * But that would be incorrect, as proxying is
572 			 * not forwarding.  The ip6_input function
573 			 * will handle this packet locally, and it
574 			 * depends on the hop limit being unchanged.
575 			 *
576 			 * One example is the NDP hop limit, that
577 			 * always has to stay 255, but other would be
578 			 * similar checks around RA packets, where the
579 			 * user can even change the desired limit.
580 			 */
581 			return ip6_input(skb);
582 		} else if (proxied < 0) {
583 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
584 			goto drop;
585 		}
586 	}
587 
588 	if (!xfrm6_route_forward(skb)) {
589 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
590 		SKB_DR_SET(reason, XFRM_POLICY);
591 		goto drop;
592 	}
593 	dst = skb_dst(skb);
594 
595 	/* IPv6 specs say nothing about it, but it is clear that we cannot
596 	   send redirects to source routed frames.
597 	   We don't send redirects to frames decapsulated from IPsec.
598 	 */
599 	if (IP6CB(skb)->iif == dst->dev->ifindex &&
600 	    opt->srcrt == 0 && !skb_sec_path(skb)) {
601 		struct in6_addr *target = NULL;
602 		struct inet_peer *peer;
603 		struct rt6_info *rt;
604 
605 		/*
606 		 *	incoming and outgoing devices are the same
607 		 *	send a redirect.
608 		 */
609 
610 		rt = dst_rt6_info(dst);
611 		if (rt->rt6i_flags & RTF_GATEWAY)
612 			target = &rt->rt6i_gateway;
613 		else
614 			target = &hdr->daddr;
615 
616 		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
617 
618 		/* Limit redirects both by destination (here)
619 		   and by source (inside ndisc_send_redirect)
620 		 */
621 		if (inet_peer_xrlim_allow(peer, 1*HZ))
622 			ndisc_send_redirect(skb, target);
623 		if (peer)
624 			inet_putpeer(peer);
625 	} else {
626 		int addrtype = ipv6_addr_type(&hdr->saddr);
627 
628 		/* This check is security critical. */
629 		if (addrtype == IPV6_ADDR_ANY ||
630 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
631 			goto error;
632 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
633 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
634 				    ICMPV6_NOT_NEIGHBOUR, 0);
635 			goto error;
636 		}
637 	}
638 
639 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
640 
641 	mtu = ip6_dst_mtu_maybe_forward(dst, true);
642 	if (mtu < IPV6_MIN_MTU)
643 		mtu = IPV6_MIN_MTU;
644 
645 	if (ip6_pkt_too_big(skb, mtu)) {
646 		/* Again, force OUTPUT device used as source address */
647 		skb->dev = dst->dev;
648 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
649 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
650 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
651 				IPSTATS_MIB_FRAGFAILS);
652 		kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
653 		return -EMSGSIZE;
654 	}
655 
656 	if (skb_cow(skb, dst->dev->hard_header_len)) {
657 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
658 				IPSTATS_MIB_OUTDISCARDS);
659 		goto drop;
660 	}
661 
662 	hdr = ipv6_hdr(skb);
663 
664 	/* Mangling hops number delayed to point after skb COW */
665 
666 	hdr->hop_limit--;
667 
668 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
669 		       net, NULL, skb, skb->dev, dst->dev,
670 		       ip6_forward_finish);
671 
672 error:
673 	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
674 	SKB_DR_SET(reason, IP_INADDRERRORS);
675 drop:
676 	kfree_skb_reason(skb, reason);
677 	return -EINVAL;
678 }
679 
680 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
681 {
682 	to->pkt_type = from->pkt_type;
683 	to->priority = from->priority;
684 	to->protocol = from->protocol;
685 	skb_dst_drop(to);
686 	skb_dst_set(to, dst_clone(skb_dst(from)));
687 	to->dev = from->dev;
688 	to->mark = from->mark;
689 
690 	skb_copy_hash(to, from);
691 
692 #ifdef CONFIG_NET_SCHED
693 	to->tc_index = from->tc_index;
694 #endif
695 	nf_copy(to, from);
696 	skb_ext_copy(to, from);
697 	skb_copy_secmark(to, from);
698 }
699 
700 int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
701 		      u8 nexthdr, __be32 frag_id,
702 		      struct ip6_fraglist_iter *iter)
703 {
704 	unsigned int first_len;
705 	struct frag_hdr *fh;
706 
707 	/* BUILD HEADER */
708 	*prevhdr = NEXTHDR_FRAGMENT;
709 	iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
710 	if (!iter->tmp_hdr)
711 		return -ENOMEM;
712 
713 	iter->frag = skb_shinfo(skb)->frag_list;
714 	skb_frag_list_init(skb);
715 
716 	iter->offset = 0;
717 	iter->hlen = hlen;
718 	iter->frag_id = frag_id;
719 	iter->nexthdr = nexthdr;
720 
721 	__skb_pull(skb, hlen);
722 	fh = __skb_push(skb, sizeof(struct frag_hdr));
723 	__skb_push(skb, hlen);
724 	skb_reset_network_header(skb);
725 	memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
726 
727 	fh->nexthdr = nexthdr;
728 	fh->reserved = 0;
729 	fh->frag_off = htons(IP6_MF);
730 	fh->identification = frag_id;
731 
732 	first_len = skb_pagelen(skb);
733 	skb->data_len = first_len - skb_headlen(skb);
734 	skb->len = first_len;
735 	ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr));
736 
737 	return 0;
738 }
739 EXPORT_SYMBOL(ip6_fraglist_init);
740 
741 void ip6_fraglist_prepare(struct sk_buff *skb,
742 			  struct ip6_fraglist_iter *iter)
743 {
744 	struct sk_buff *frag = iter->frag;
745 	unsigned int hlen = iter->hlen;
746 	struct frag_hdr *fh;
747 
748 	frag->ip_summed = CHECKSUM_NONE;
749 	skb_reset_transport_header(frag);
750 	fh = __skb_push(frag, sizeof(struct frag_hdr));
751 	__skb_push(frag, hlen);
752 	skb_reset_network_header(frag);
753 	memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
754 	iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
755 	fh->nexthdr = iter->nexthdr;
756 	fh->reserved = 0;
757 	fh->frag_off = htons(iter->offset);
758 	if (frag->next)
759 		fh->frag_off |= htons(IP6_MF);
760 	fh->identification = iter->frag_id;
761 	ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
762 	ip6_copy_metadata(frag, skb);
763 }
764 EXPORT_SYMBOL(ip6_fraglist_prepare);
765 
766 void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
767 		   unsigned short needed_tailroom, int hdr_room, u8 *prevhdr,
768 		   u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state)
769 {
770 	state->prevhdr = prevhdr;
771 	state->nexthdr = nexthdr;
772 	state->frag_id = frag_id;
773 
774 	state->hlen = hlen;
775 	state->mtu = mtu;
776 
777 	state->left = skb->len - hlen;	/* Space per frame */
778 	state->ptr = hlen;		/* Where to start from */
779 
780 	state->hroom = hdr_room;
781 	state->troom = needed_tailroom;
782 
783 	state->offset = 0;
784 }
785 EXPORT_SYMBOL(ip6_frag_init);
786 
787 struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state)
788 {
789 	u8 *prevhdr = state->prevhdr, *fragnexthdr_offset;
790 	struct sk_buff *frag;
791 	struct frag_hdr *fh;
792 	unsigned int len;
793 
794 	len = state->left;
795 	/* IF: it doesn't fit, use 'mtu' - the data space left */
796 	if (len > state->mtu)
797 		len = state->mtu;
798 	/* IF: we are not sending up to and including the packet end
799 	   then align the next start on an eight byte boundary */
800 	if (len < state->left)
801 		len &= ~7;
802 
803 	/* Allocate buffer */
804 	frag = alloc_skb(len + state->hlen + sizeof(struct frag_hdr) +
805 			 state->hroom + state->troom, GFP_ATOMIC);
806 	if (!frag)
807 		return ERR_PTR(-ENOMEM);
808 
809 	/*
810 	 *	Set up data on packet
811 	 */
812 
813 	ip6_copy_metadata(frag, skb);
814 	skb_reserve(frag, state->hroom);
815 	skb_put(frag, len + state->hlen + sizeof(struct frag_hdr));
816 	skb_reset_network_header(frag);
817 	fh = (struct frag_hdr *)(skb_network_header(frag) + state->hlen);
818 	frag->transport_header = (frag->network_header + state->hlen +
819 				  sizeof(struct frag_hdr));
820 
821 	/*
822 	 *	Charge the memory for the fragment to any owner
823 	 *	it might possess
824 	 */
825 	if (skb->sk)
826 		skb_set_owner_w(frag, skb->sk);
827 
828 	/*
829 	 *	Copy the packet header into the new buffer.
830 	 */
831 	skb_copy_from_linear_data(skb, skb_network_header(frag), state->hlen);
832 
833 	fragnexthdr_offset = skb_network_header(frag);
834 	fragnexthdr_offset += prevhdr - skb_network_header(skb);
835 	*fragnexthdr_offset = NEXTHDR_FRAGMENT;
836 
837 	/*
838 	 *	Build fragment header.
839 	 */
840 	fh->nexthdr = state->nexthdr;
841 	fh->reserved = 0;
842 	fh->identification = state->frag_id;
843 
844 	/*
845 	 *	Copy a block of the IP datagram.
846 	 */
847 	BUG_ON(skb_copy_bits(skb, state->ptr, skb_transport_header(frag),
848 			     len));
849 	state->left -= len;
850 
851 	fh->frag_off = htons(state->offset);
852 	if (state->left > 0)
853 		fh->frag_off |= htons(IP6_MF);
854 	ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
855 
856 	state->ptr += len;
857 	state->offset += len;
858 
859 	return frag;
860 }
861 EXPORT_SYMBOL(ip6_frag_next);
862 
863 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
864 		 int (*output)(struct net *, struct sock *, struct sk_buff *))
865 {
866 	struct sk_buff *frag;
867 	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
868 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
869 				inet6_sk(skb->sk) : NULL;
870 	u8 tstamp_type = skb->tstamp_type;
871 	struct ip6_frag_state state;
872 	unsigned int mtu, hlen, nexthdr_offset;
873 	ktime_t tstamp = skb->tstamp;
874 	int hroom, err = 0;
875 	__be32 frag_id;
876 	u8 *prevhdr, nexthdr = 0;
877 
878 	err = ip6_find_1stfragopt(skb, &prevhdr);
879 	if (err < 0)
880 		goto fail;
881 	hlen = err;
882 	nexthdr = *prevhdr;
883 	nexthdr_offset = prevhdr - skb_network_header(skb);
884 
885 	mtu = ip6_skb_dst_mtu(skb);
886 
887 	/* We must not fragment if the socket is set to force MTU discovery
888 	 * or if the skb it not generated by a local socket.
889 	 */
890 	if (unlikely(!skb->ignore_df && skb->len > mtu))
891 		goto fail_toobig;
892 
893 	if (IP6CB(skb)->frag_max_size) {
894 		if (IP6CB(skb)->frag_max_size > mtu)
895 			goto fail_toobig;
896 
897 		/* don't send fragments larger than what we received */
898 		mtu = IP6CB(skb)->frag_max_size;
899 		if (mtu < IPV6_MIN_MTU)
900 			mtu = IPV6_MIN_MTU;
901 	}
902 
903 	if (np) {
904 		u32 frag_size = READ_ONCE(np->frag_size);
905 
906 		if (frag_size && frag_size < mtu)
907 			mtu = frag_size;
908 	}
909 	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
910 		goto fail_toobig;
911 	mtu -= hlen + sizeof(struct frag_hdr);
912 
913 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
914 				    &ipv6_hdr(skb)->saddr);
915 
916 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
917 	    (err = skb_checksum_help(skb)))
918 		goto fail;
919 
920 	prevhdr = skb_network_header(skb) + nexthdr_offset;
921 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
922 	if (skb_has_frag_list(skb)) {
923 		unsigned int first_len = skb_pagelen(skb);
924 		struct ip6_fraglist_iter iter;
925 		struct sk_buff *frag2;
926 
927 		if (first_len - hlen > mtu ||
928 		    ((first_len - hlen) & 7) ||
929 		    skb_cloned(skb) ||
930 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
931 			goto slow_path;
932 
933 		skb_walk_frags(skb, frag) {
934 			/* Correct geometry. */
935 			if (frag->len > mtu ||
936 			    ((frag->len & 7) && frag->next) ||
937 			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
938 				goto slow_path_clean;
939 
940 			/* Partially cloned skb? */
941 			if (skb_shared(frag))
942 				goto slow_path_clean;
943 
944 			BUG_ON(frag->sk);
945 			if (skb->sk) {
946 				frag->sk = skb->sk;
947 				frag->destructor = sock_wfree;
948 			}
949 			skb->truesize -= frag->truesize;
950 		}
951 
952 		err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
953 					&iter);
954 		if (err < 0)
955 			goto fail;
956 
957 		/* We prevent @rt from being freed. */
958 		rcu_read_lock();
959 
960 		for (;;) {
961 			/* Prepare header of the next frame,
962 			 * before previous one went down. */
963 			if (iter.frag)
964 				ip6_fraglist_prepare(skb, &iter);
965 
966 			skb_set_delivery_time(skb, tstamp, tstamp_type);
967 			err = output(net, sk, skb);
968 			if (!err)
969 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
970 					      IPSTATS_MIB_FRAGCREATES);
971 
972 			if (err || !iter.frag)
973 				break;
974 
975 			skb = ip6_fraglist_next(&iter);
976 		}
977 
978 		kfree(iter.tmp_hdr);
979 
980 		if (err == 0) {
981 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
982 				      IPSTATS_MIB_FRAGOKS);
983 			rcu_read_unlock();
984 			return 0;
985 		}
986 
987 		kfree_skb_list(iter.frag);
988 
989 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
990 			      IPSTATS_MIB_FRAGFAILS);
991 		rcu_read_unlock();
992 		return err;
993 
994 slow_path_clean:
995 		skb_walk_frags(skb, frag2) {
996 			if (frag2 == frag)
997 				break;
998 			frag2->sk = NULL;
999 			frag2->destructor = NULL;
1000 			skb->truesize += frag2->truesize;
1001 		}
1002 	}
1003 
1004 slow_path:
1005 	/*
1006 	 *	Fragment the datagram.
1007 	 */
1008 
1009 	ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom,
1010 		      LL_RESERVED_SPACE(rt->dst.dev), prevhdr, nexthdr, frag_id,
1011 		      &state);
1012 
1013 	/*
1014 	 *	Keep copying data until we run out.
1015 	 */
1016 
1017 	while (state.left > 0) {
1018 		frag = ip6_frag_next(skb, &state);
1019 		if (IS_ERR(frag)) {
1020 			err = PTR_ERR(frag);
1021 			goto fail;
1022 		}
1023 
1024 		/*
1025 		 *	Put this fragment into the sending queue.
1026 		 */
1027 		skb_set_delivery_time(frag, tstamp, tstamp_type);
1028 		err = output(net, sk, frag);
1029 		if (err)
1030 			goto fail;
1031 
1032 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1033 			      IPSTATS_MIB_FRAGCREATES);
1034 	}
1035 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1036 		      IPSTATS_MIB_FRAGOKS);
1037 	consume_skb(skb);
1038 	return err;
1039 
1040 fail_toobig:
1041 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1042 	err = -EMSGSIZE;
1043 
1044 fail:
1045 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1046 		      IPSTATS_MIB_FRAGFAILS);
1047 	kfree_skb(skb);
1048 	return err;
1049 }
1050 
1051 static inline int ip6_rt_check(const struct rt6key *rt_key,
1052 			       const struct in6_addr *fl_addr,
1053 			       const struct in6_addr *addr_cache)
1054 {
1055 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
1056 		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
1057 }
1058 
1059 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
1060 					  struct dst_entry *dst,
1061 					  const struct flowi6 *fl6)
1062 {
1063 	struct ipv6_pinfo *np = inet6_sk(sk);
1064 	struct rt6_info *rt;
1065 
1066 	if (!dst)
1067 		goto out;
1068 
1069 	if (dst->ops->family != AF_INET6) {
1070 		dst_release(dst);
1071 		return NULL;
1072 	}
1073 
1074 	rt = dst_rt6_info(dst);
1075 	/* Yes, checking route validity in not connected
1076 	 * case is not very simple. Take into account,
1077 	 * that we do not support routing by source, TOS,
1078 	 * and MSG_DONTROUTE		--ANK (980726)
1079 	 *
1080 	 * 1. ip6_rt_check(): If route was host route,
1081 	 *    check that cached destination is current.
1082 	 *    If it is network route, we still may
1083 	 *    check its validity using saved pointer
1084 	 *    to the last used address: daddr_cache.
1085 	 *    We do not want to save whole address now,
1086 	 *    (because main consumer of this service
1087 	 *    is tcp, which has not this problem),
1088 	 *    so that the last trick works only on connected
1089 	 *    sockets.
1090 	 * 2. oif also should be the same.
1091 	 */
1092 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
1093 #ifdef CONFIG_IPV6_SUBTREES
1094 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
1095 #endif
1096 	   (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
1097 		dst_release(dst);
1098 		dst = NULL;
1099 	}
1100 
1101 out:
1102 	return dst;
1103 }
1104 
1105 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
1106 			       struct dst_entry **dst, struct flowi6 *fl6)
1107 {
1108 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1109 	struct neighbour *n;
1110 	struct rt6_info *rt;
1111 #endif
1112 	int err;
1113 	int flags = 0;
1114 
1115 	/* The correct way to handle this would be to do
1116 	 * ip6_route_get_saddr, and then ip6_route_output; however,
1117 	 * the route-specific preferred source forces the
1118 	 * ip6_route_output call _before_ ip6_route_get_saddr.
1119 	 *
1120 	 * In source specific routing (no src=any default route),
1121 	 * ip6_route_output will fail given src=any saddr, though, so
1122 	 * that's why we try it again later.
1123 	 */
1124 	if (ipv6_addr_any(&fl6->saddr)) {
1125 		struct fib6_info *from;
1126 		struct rt6_info *rt;
1127 
1128 		*dst = ip6_route_output(net, sk, fl6);
1129 		rt = (*dst)->error ? NULL : dst_rt6_info(*dst);
1130 
1131 		rcu_read_lock();
1132 		from = rt ? rcu_dereference(rt->from) : NULL;
1133 		err = ip6_route_get_saddr(net, from, &fl6->daddr,
1134 					  sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0,
1135 					  fl6->flowi6_l3mdev,
1136 					  &fl6->saddr);
1137 		rcu_read_unlock();
1138 
1139 		if (err)
1140 			goto out_err_release;
1141 
1142 		/* If we had an erroneous initial result, pretend it
1143 		 * never existed and let the SA-enabled version take
1144 		 * over.
1145 		 */
1146 		if ((*dst)->error) {
1147 			dst_release(*dst);
1148 			*dst = NULL;
1149 		}
1150 
1151 		if (fl6->flowi6_oif)
1152 			flags |= RT6_LOOKUP_F_IFACE;
1153 	}
1154 
1155 	if (!*dst)
1156 		*dst = ip6_route_output_flags(net, sk, fl6, flags);
1157 
1158 	err = (*dst)->error;
1159 	if (err)
1160 		goto out_err_release;
1161 
1162 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1163 	/*
1164 	 * Here if the dst entry we've looked up
1165 	 * has a neighbour entry that is in the INCOMPLETE
1166 	 * state and the src address from the flow is
1167 	 * marked as OPTIMISTIC, we release the found
1168 	 * dst entry and replace it instead with the
1169 	 * dst entry of the nexthop router
1170 	 */
1171 	rt = dst_rt6_info(*dst);
1172 	rcu_read_lock();
1173 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
1174 				      rt6_nexthop(rt, &fl6->daddr));
1175 	err = n && !(READ_ONCE(n->nud_state) & NUD_VALID) ? -EINVAL : 0;
1176 	rcu_read_unlock();
1177 
1178 	if (err) {
1179 		struct inet6_ifaddr *ifp;
1180 		struct flowi6 fl_gw6;
1181 		int redirect;
1182 
1183 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1184 				      (*dst)->dev, 1);
1185 
1186 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1187 		if (ifp)
1188 			in6_ifa_put(ifp);
1189 
1190 		if (redirect) {
1191 			/*
1192 			 * We need to get the dst entry for the
1193 			 * default router instead
1194 			 */
1195 			dst_release(*dst);
1196 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1197 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1198 			*dst = ip6_route_output(net, sk, &fl_gw6);
1199 			err = (*dst)->error;
1200 			if (err)
1201 				goto out_err_release;
1202 		}
1203 	}
1204 #endif
1205 	if (ipv6_addr_v4mapped(&fl6->saddr) &&
1206 	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1207 		err = -EAFNOSUPPORT;
1208 		goto out_err_release;
1209 	}
1210 
1211 	return 0;
1212 
1213 out_err_release:
1214 	dst_release(*dst);
1215 	*dst = NULL;
1216 
1217 	if (err == -ENETUNREACH)
1218 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1219 	return err;
1220 }
1221 
1222 /**
1223  *	ip6_dst_lookup - perform route lookup on flow
1224  *	@net: Network namespace to perform lookup in
1225  *	@sk: socket which provides route info
1226  *	@dst: pointer to dst_entry * for result
1227  *	@fl6: flow to lookup
1228  *
1229  *	This function performs a route lookup on the given flow.
1230  *
1231  *	It returns zero on success, or a standard errno code on error.
1232  */
1233 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1234 		   struct flowi6 *fl6)
1235 {
1236 	*dst = NULL;
1237 	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1238 }
1239 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1240 
1241 /**
1242  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1243  *	@net: Network namespace to perform lookup in
1244  *	@sk: socket which provides route info
1245  *	@fl6: flow to lookup
1246  *	@final_dst: final destination address for ipsec lookup
1247  *
1248  *	This function performs a route lookup on the given flow.
1249  *
1250  *	It returns a valid dst pointer on success, or a pointer encoded
1251  *	error code.
1252  */
1253 struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
1254 				      const struct in6_addr *final_dst)
1255 {
1256 	struct dst_entry *dst = NULL;
1257 	int err;
1258 
1259 	err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
1260 	if (err)
1261 		return ERR_PTR(err);
1262 	if (final_dst)
1263 		fl6->daddr = *final_dst;
1264 
1265 	return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
1266 }
1267 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1268 
1269 /**
1270  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1271  *	@sk: socket which provides the dst cache and route info
1272  *	@fl6: flow to lookup
1273  *	@final_dst: final destination address for ipsec lookup
1274  *	@connected: whether @sk is connected or not
1275  *
1276  *	This function performs a route lookup on the given flow with the
1277  *	possibility of using the cached route in the socket if it is valid.
1278  *	It will take the socket dst lock when operating on the dst cache.
1279  *	As a result, this function can only be used in process context.
1280  *
1281  *	In addition, for a connected socket, cache the dst in the socket
1282  *	if the current cache is not valid.
1283  *
1284  *	It returns a valid dst pointer on success, or a pointer encoded
1285  *	error code.
1286  */
1287 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1288 					 const struct in6_addr *final_dst,
1289 					 bool connected)
1290 {
1291 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1292 
1293 	dst = ip6_sk_dst_check(sk, dst, fl6);
1294 	if (dst)
1295 		return dst;
1296 
1297 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
1298 	if (connected && !IS_ERR(dst))
1299 		ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
1300 
1301 	return dst;
1302 }
1303 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1304 
1305 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1306 					       gfp_t gfp)
1307 {
1308 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1309 }
1310 
1311 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1312 						gfp_t gfp)
1313 {
1314 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1315 }
1316 
1317 static void ip6_append_data_mtu(unsigned int *mtu,
1318 				int *maxfraglen,
1319 				unsigned int fragheaderlen,
1320 				struct sk_buff *skb,
1321 				struct rt6_info *rt,
1322 				unsigned int orig_mtu)
1323 {
1324 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1325 		if (!skb) {
1326 			/* first fragment, reserve header_len */
1327 			*mtu = orig_mtu - rt->dst.header_len;
1328 
1329 		} else {
1330 			/*
1331 			 * this fragment is not first, the headers
1332 			 * space is regarded as data space.
1333 			 */
1334 			*mtu = orig_mtu;
1335 		}
1336 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1337 			      + fragheaderlen - sizeof(struct frag_hdr);
1338 	}
1339 }
1340 
1341 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1342 			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1343 			  struct rt6_info *rt)
1344 {
1345 	struct ipv6_pinfo *np = inet6_sk(sk);
1346 	unsigned int mtu, frag_size;
1347 	struct ipv6_txoptions *nopt, *opt = ipc6->opt;
1348 
1349 	/* callers pass dst together with a reference, set it first so
1350 	 * ip6_cork_release() can put it down even in case of an error.
1351 	 */
1352 	cork->base.dst = &rt->dst;
1353 
1354 	/*
1355 	 * setup for corking
1356 	 */
1357 	if (opt) {
1358 		if (WARN_ON(v6_cork->opt))
1359 			return -EINVAL;
1360 
1361 		nopt = v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1362 		if (unlikely(!nopt))
1363 			return -ENOBUFS;
1364 
1365 		nopt->tot_len = sizeof(*opt);
1366 		nopt->opt_flen = opt->opt_flen;
1367 		nopt->opt_nflen = opt->opt_nflen;
1368 
1369 		nopt->dst0opt = ip6_opt_dup(opt->dst0opt, sk->sk_allocation);
1370 		if (opt->dst0opt && !nopt->dst0opt)
1371 			return -ENOBUFS;
1372 
1373 		nopt->dst1opt = ip6_opt_dup(opt->dst1opt, sk->sk_allocation);
1374 		if (opt->dst1opt && !nopt->dst1opt)
1375 			return -ENOBUFS;
1376 
1377 		nopt->hopopt = ip6_opt_dup(opt->hopopt, sk->sk_allocation);
1378 		if (opt->hopopt && !nopt->hopopt)
1379 			return -ENOBUFS;
1380 
1381 		nopt->srcrt = ip6_rthdr_dup(opt->srcrt, sk->sk_allocation);
1382 		if (opt->srcrt && !nopt->srcrt)
1383 			return -ENOBUFS;
1384 
1385 		/* need source address above miyazawa*/
1386 	}
1387 	v6_cork->hop_limit = ipc6->hlimit;
1388 	v6_cork->tclass = ipc6->tclass;
1389 	if (rt->dst.flags & DST_XFRM_TUNNEL)
1390 		mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ?
1391 		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1392 	else
1393 		mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ?
1394 			READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
1395 
1396 	frag_size = READ_ONCE(np->frag_size);
1397 	if (frag_size && frag_size < mtu)
1398 		mtu = frag_size;
1399 
1400 	cork->base.fragsize = mtu;
1401 	cork->base.gso_size = ipc6->gso_size;
1402 	cork->base.tx_flags = 0;
1403 	cork->base.mark = ipc6->sockc.mark;
1404 	sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
1405 
1406 	cork->base.length = 0;
1407 	cork->base.transmit_time = ipc6->sockc.transmit_time;
1408 
1409 	return 0;
1410 }
1411 
1412 static int __ip6_append_data(struct sock *sk,
1413 			     struct sk_buff_head *queue,
1414 			     struct inet_cork_full *cork_full,
1415 			     struct inet6_cork *v6_cork,
1416 			     struct page_frag *pfrag,
1417 			     int getfrag(void *from, char *to, int offset,
1418 					 int len, int odd, struct sk_buff *skb),
1419 			     void *from, size_t length, int transhdrlen,
1420 			     unsigned int flags, struct ipcm6_cookie *ipc6)
1421 {
1422 	struct sk_buff *skb, *skb_prev = NULL;
1423 	struct inet_cork *cork = &cork_full->base;
1424 	struct flowi6 *fl6 = &cork_full->fl.u.ip6;
1425 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
1426 	struct ubuf_info *uarg = NULL;
1427 	int exthdrlen = 0;
1428 	int dst_exthdrlen = 0;
1429 	int hh_len;
1430 	int copy;
1431 	int err;
1432 	int offset = 0;
1433 	bool zc = false;
1434 	u32 tskey = 0;
1435 	struct rt6_info *rt = dst_rt6_info(cork->dst);
1436 	bool paged, hold_tskey, extra_uref = false;
1437 	struct ipv6_txoptions *opt = v6_cork->opt;
1438 	int csummode = CHECKSUM_NONE;
1439 	unsigned int maxnonfragsize, headersize;
1440 	unsigned int wmem_alloc_delta = 0;
1441 
1442 	skb = skb_peek_tail(queue);
1443 	if (!skb) {
1444 		exthdrlen = opt ? opt->opt_flen : 0;
1445 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1446 	}
1447 
1448 	paged = !!cork->gso_size;
1449 	mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
1450 	orig_mtu = mtu;
1451 
1452 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1453 
1454 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1455 			(opt ? opt->opt_nflen : 0);
1456 
1457 	headersize = sizeof(struct ipv6hdr) +
1458 		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1459 		     rt->rt6i_nfheader_len;
1460 
1461 	if (mtu <= fragheaderlen ||
1462 	    ((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr))
1463 		goto emsgsize;
1464 
1465 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1466 		     sizeof(struct frag_hdr);
1467 
1468 	/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1469 	 * the first fragment
1470 	 */
1471 	if (headersize + transhdrlen > mtu)
1472 		goto emsgsize;
1473 
1474 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1475 	    (sk->sk_protocol == IPPROTO_UDP ||
1476 	     sk->sk_protocol == IPPROTO_ICMPV6 ||
1477 	     sk->sk_protocol == IPPROTO_RAW)) {
1478 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1479 				sizeof(struct ipv6hdr));
1480 		goto emsgsize;
1481 	}
1482 
1483 	if (ip6_sk_ignore_df(sk))
1484 		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1485 	else
1486 		maxnonfragsize = mtu;
1487 
1488 	if (cork->length + length > maxnonfragsize - headersize) {
1489 emsgsize:
1490 		pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
1491 		ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
1492 		return -EMSGSIZE;
1493 	}
1494 
1495 	/* CHECKSUM_PARTIAL only with no extension headers and when
1496 	 * we are not going to fragment
1497 	 */
1498 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1499 	    headersize == sizeof(struct ipv6hdr) &&
1500 	    length <= mtu - headersize &&
1501 	    (!(flags & MSG_MORE) || cork->gso_size) &&
1502 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1503 		csummode = CHECKSUM_PARTIAL;
1504 
1505 	if ((flags & MSG_ZEROCOPY) && length) {
1506 		struct msghdr *msg = from;
1507 
1508 		if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1509 			if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1510 				return -EINVAL;
1511 
1512 			/* Leave uarg NULL if can't zerocopy, callers should
1513 			 * be able to handle it.
1514 			 */
1515 			if ((rt->dst.dev->features & NETIF_F_SG) &&
1516 			    csummode == CHECKSUM_PARTIAL) {
1517 				paged = true;
1518 				zc = true;
1519 				uarg = msg->msg_ubuf;
1520 			}
1521 		} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1522 			uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1523 			if (!uarg)
1524 				return -ENOBUFS;
1525 			extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
1526 			if (rt->dst.dev->features & NETIF_F_SG &&
1527 			    csummode == CHECKSUM_PARTIAL) {
1528 				paged = true;
1529 				zc = true;
1530 			} else {
1531 				uarg_to_msgzc(uarg)->zerocopy = 0;
1532 				skb_zcopy_set(skb, uarg, &extra_uref);
1533 			}
1534 		}
1535 	} else if ((flags & MSG_SPLICE_PAGES) && length) {
1536 		if (inet_test_bit(HDRINCL, sk))
1537 			return -EPERM;
1538 		if (rt->dst.dev->features & NETIF_F_SG &&
1539 		    getfrag == ip_generic_getfrag)
1540 			/* We need an empty buffer to attach stuff to */
1541 			paged = true;
1542 		else
1543 			flags &= ~MSG_SPLICE_PAGES;
1544 	}
1545 
1546 	hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
1547 		     READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
1548 	if (hold_tskey)
1549 		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
1550 
1551 	/*
1552 	 * Let's try using as much space as possible.
1553 	 * Use MTU if total length of the message fits into the MTU.
1554 	 * Otherwise, we need to reserve fragment header and
1555 	 * fragment alignment (= 8-15 octects, in total).
1556 	 *
1557 	 * Note that we may need to "move" the data from the tail
1558 	 * of the buffer to the new fragment when we split
1559 	 * the message.
1560 	 *
1561 	 * FIXME: It may be fragmented into multiple chunks
1562 	 *        at once if non-fragmentable extension headers
1563 	 *        are too large.
1564 	 * --yoshfuji
1565 	 */
1566 
1567 	cork->length += length;
1568 	if (!skb)
1569 		goto alloc_new_skb;
1570 
1571 	while (length > 0) {
1572 		/* Check if the remaining data fits into current packet. */
1573 		copy = (cork->length <= mtu ? mtu : maxfraglen) - skb->len;
1574 		if (copy < length)
1575 			copy = maxfraglen - skb->len;
1576 
1577 		if (copy <= 0) {
1578 			char *data;
1579 			unsigned int datalen;
1580 			unsigned int fraglen;
1581 			unsigned int fraggap;
1582 			unsigned int alloclen, alloc_extra;
1583 			unsigned int pagedlen;
1584 alloc_new_skb:
1585 			/* There's no room in the current skb */
1586 			if (skb)
1587 				fraggap = skb->len - maxfraglen;
1588 			else
1589 				fraggap = 0;
1590 			/* update mtu and maxfraglen if necessary */
1591 			if (!skb || !skb_prev)
1592 				ip6_append_data_mtu(&mtu, &maxfraglen,
1593 						    fragheaderlen, skb, rt,
1594 						    orig_mtu);
1595 
1596 			skb_prev = skb;
1597 
1598 			/*
1599 			 * If remaining data exceeds the mtu,
1600 			 * we know we need more fragment(s).
1601 			 */
1602 			datalen = length + fraggap;
1603 
1604 			if (datalen > (cork->length <= mtu ? mtu : maxfraglen) - fragheaderlen)
1605 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1606 			fraglen = datalen + fragheaderlen;
1607 			pagedlen = 0;
1608 
1609 			alloc_extra = hh_len;
1610 			alloc_extra += dst_exthdrlen;
1611 			alloc_extra += rt->dst.trailer_len;
1612 
1613 			/* We just reserve space for fragment header.
1614 			 * Note: this may be overallocation if the message
1615 			 * (without MSG_MORE) fits into the MTU.
1616 			 */
1617 			alloc_extra += sizeof(struct frag_hdr);
1618 
1619 			if ((flags & MSG_MORE) &&
1620 			    !(rt->dst.dev->features&NETIF_F_SG))
1621 				alloclen = mtu;
1622 			else if (!paged &&
1623 				 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1624 				  !(rt->dst.dev->features & NETIF_F_SG)))
1625 				alloclen = fraglen;
1626 			else {
1627 				alloclen = fragheaderlen + transhdrlen;
1628 				pagedlen = datalen - transhdrlen;
1629 			}
1630 			alloclen += alloc_extra;
1631 
1632 			if (datalen != length + fraggap) {
1633 				/*
1634 				 * this is not the last fragment, the trailer
1635 				 * space is regarded as data space.
1636 				 */
1637 				datalen += rt->dst.trailer_len;
1638 			}
1639 
1640 			fraglen = datalen + fragheaderlen;
1641 
1642 			copy = datalen - transhdrlen - fraggap - pagedlen;
1643 			/* [!] NOTE: copy may be negative if pagedlen>0
1644 			 * because then the equation may reduces to -fraggap.
1645 			 */
1646 			if (copy < 0 && !(flags & MSG_SPLICE_PAGES)) {
1647 				err = -EINVAL;
1648 				goto error;
1649 			}
1650 			if (transhdrlen) {
1651 				skb = sock_alloc_send_skb(sk, alloclen,
1652 						(flags & MSG_DONTWAIT), &err);
1653 			} else {
1654 				skb = NULL;
1655 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1656 				    2 * sk->sk_sndbuf)
1657 					skb = alloc_skb(alloclen,
1658 							sk->sk_allocation);
1659 				if (unlikely(!skb))
1660 					err = -ENOBUFS;
1661 			}
1662 			if (!skb)
1663 				goto error;
1664 			/*
1665 			 *	Fill in the control structures
1666 			 */
1667 			skb->protocol = htons(ETH_P_IPV6);
1668 			skb->ip_summed = csummode;
1669 			skb->csum = 0;
1670 			/* reserve for fragmentation and ipsec header */
1671 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1672 				    dst_exthdrlen);
1673 
1674 			/*
1675 			 *	Find where to start putting bytes
1676 			 */
1677 			data = skb_put(skb, fraglen - pagedlen);
1678 			skb_set_network_header(skb, exthdrlen);
1679 			data += fragheaderlen;
1680 			skb->transport_header = (skb->network_header +
1681 						 fragheaderlen);
1682 			if (fraggap) {
1683 				skb->csum = skb_copy_and_csum_bits(
1684 					skb_prev, maxfraglen,
1685 					data + transhdrlen, fraggap);
1686 				skb_prev->csum = csum_sub(skb_prev->csum,
1687 							  skb->csum);
1688 				data += fraggap;
1689 				pskb_trim_unique(skb_prev, maxfraglen);
1690 			}
1691 			if (copy > 0 &&
1692 			    getfrag(from, data + transhdrlen, offset,
1693 				    copy, fraggap, skb) < 0) {
1694 				err = -EFAULT;
1695 				kfree_skb(skb);
1696 				goto error;
1697 			} else if (flags & MSG_SPLICE_PAGES) {
1698 				copy = 0;
1699 			}
1700 
1701 			offset += copy;
1702 			length -= copy + transhdrlen;
1703 			transhdrlen = 0;
1704 			exthdrlen = 0;
1705 			dst_exthdrlen = 0;
1706 
1707 			/* Only the initial fragment is time stamped */
1708 			skb_shinfo(skb)->tx_flags = cork->tx_flags;
1709 			cork->tx_flags = 0;
1710 			skb_shinfo(skb)->tskey = tskey;
1711 			tskey = 0;
1712 			skb_zcopy_set(skb, uarg, &extra_uref);
1713 
1714 			if ((flags & MSG_CONFIRM) && !skb_prev)
1715 				skb_set_dst_pending_confirm(skb, 1);
1716 
1717 			/*
1718 			 * Put the packet on the pending queue
1719 			 */
1720 			if (!skb->destructor) {
1721 				skb->destructor = sock_wfree;
1722 				skb->sk = sk;
1723 				wmem_alloc_delta += skb->truesize;
1724 			}
1725 			__skb_queue_tail(queue, skb);
1726 			continue;
1727 		}
1728 
1729 		if (copy > length)
1730 			copy = length;
1731 
1732 		if (!(rt->dst.dev->features&NETIF_F_SG) &&
1733 		    skb_tailroom(skb) >= copy) {
1734 			unsigned int off;
1735 
1736 			off = skb->len;
1737 			if (getfrag(from, skb_put(skb, copy),
1738 						offset, copy, off, skb) < 0) {
1739 				__skb_trim(skb, off);
1740 				err = -EFAULT;
1741 				goto error;
1742 			}
1743 		} else if (flags & MSG_SPLICE_PAGES) {
1744 			struct msghdr *msg = from;
1745 
1746 			err = -EIO;
1747 			if (WARN_ON_ONCE(copy > msg->msg_iter.count))
1748 				goto error;
1749 
1750 			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1751 						   sk->sk_allocation);
1752 			if (err < 0)
1753 				goto error;
1754 			copy = err;
1755 			wmem_alloc_delta += copy;
1756 		} else if (!zc) {
1757 			int i = skb_shinfo(skb)->nr_frags;
1758 
1759 			err = -ENOMEM;
1760 			if (!sk_page_frag_refill(sk, pfrag))
1761 				goto error;
1762 
1763 			skb_zcopy_downgrade_managed(skb);
1764 			if (!skb_can_coalesce(skb, i, pfrag->page,
1765 					      pfrag->offset)) {
1766 				err = -EMSGSIZE;
1767 				if (i == MAX_SKB_FRAGS)
1768 					goto error;
1769 
1770 				__skb_fill_page_desc(skb, i, pfrag->page,
1771 						     pfrag->offset, 0);
1772 				skb_shinfo(skb)->nr_frags = ++i;
1773 				get_page(pfrag->page);
1774 			}
1775 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1776 			if (getfrag(from,
1777 				    page_address(pfrag->page) + pfrag->offset,
1778 				    offset, copy, skb->len, skb) < 0)
1779 				goto error_efault;
1780 
1781 			pfrag->offset += copy;
1782 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1783 			skb->len += copy;
1784 			skb->data_len += copy;
1785 			skb->truesize += copy;
1786 			wmem_alloc_delta += copy;
1787 		} else {
1788 			err = skb_zerocopy_iter_dgram(skb, from, copy);
1789 			if (err < 0)
1790 				goto error;
1791 		}
1792 		offset += copy;
1793 		length -= copy;
1794 	}
1795 
1796 	if (wmem_alloc_delta)
1797 		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1798 	return 0;
1799 
1800 error_efault:
1801 	err = -EFAULT;
1802 error:
1803 	net_zcopy_put_abort(uarg, extra_uref);
1804 	cork->length -= length;
1805 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1806 	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1807 	if (hold_tskey)
1808 		atomic_dec(&sk->sk_tskey);
1809 	return err;
1810 }
1811 
1812 int ip6_append_data(struct sock *sk,
1813 		    int getfrag(void *from, char *to, int offset, int len,
1814 				int odd, struct sk_buff *skb),
1815 		    void *from, size_t length, int transhdrlen,
1816 		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1817 		    struct rt6_info *rt, unsigned int flags)
1818 {
1819 	struct inet_sock *inet = inet_sk(sk);
1820 	struct ipv6_pinfo *np = inet6_sk(sk);
1821 	int exthdrlen;
1822 	int err;
1823 
1824 	if (flags&MSG_PROBE)
1825 		return 0;
1826 	if (skb_queue_empty(&sk->sk_write_queue)) {
1827 		/*
1828 		 * setup for corking
1829 		 */
1830 		dst_hold(&rt->dst);
1831 		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1832 				     ipc6, rt);
1833 		if (err)
1834 			return err;
1835 
1836 		inet->cork.fl.u.ip6 = *fl6;
1837 		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1838 		length += exthdrlen;
1839 		transhdrlen += exthdrlen;
1840 	} else {
1841 		transhdrlen = 0;
1842 	}
1843 
1844 	return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork,
1845 				 &np->cork, sk_page_frag(sk), getfrag,
1846 				 from, length, transhdrlen, flags, ipc6);
1847 }
1848 EXPORT_SYMBOL_GPL(ip6_append_data);
1849 
1850 static void ip6_cork_steal_dst(struct sk_buff *skb, struct inet_cork_full *cork)
1851 {
1852 	struct dst_entry *dst = cork->base.dst;
1853 
1854 	cork->base.dst = NULL;
1855 	skb_dst_set(skb, dst);
1856 }
1857 
1858 static void ip6_cork_release(struct inet_cork_full *cork,
1859 			     struct inet6_cork *v6_cork)
1860 {
1861 	if (v6_cork->opt) {
1862 		struct ipv6_txoptions *opt = v6_cork->opt;
1863 
1864 		kfree(opt->dst0opt);
1865 		kfree(opt->dst1opt);
1866 		kfree(opt->hopopt);
1867 		kfree(opt->srcrt);
1868 		kfree(opt);
1869 		v6_cork->opt = NULL;
1870 	}
1871 
1872 	if (cork->base.dst) {
1873 		dst_release(cork->base.dst);
1874 		cork->base.dst = NULL;
1875 	}
1876 }
1877 
1878 struct sk_buff *__ip6_make_skb(struct sock *sk,
1879 			       struct sk_buff_head *queue,
1880 			       struct inet_cork_full *cork,
1881 			       struct inet6_cork *v6_cork)
1882 {
1883 	struct sk_buff *skb, *tmp_skb;
1884 	struct sk_buff **tail_skb;
1885 	struct in6_addr *final_dst;
1886 	struct net *net = sock_net(sk);
1887 	struct ipv6hdr *hdr;
1888 	struct ipv6_txoptions *opt = v6_cork->opt;
1889 	struct rt6_info *rt = dst_rt6_info(cork->base.dst);
1890 	struct flowi6 *fl6 = &cork->fl.u.ip6;
1891 	unsigned char proto = fl6->flowi6_proto;
1892 
1893 	skb = __skb_dequeue(queue);
1894 	if (!skb)
1895 		goto out;
1896 	tail_skb = &(skb_shinfo(skb)->frag_list);
1897 
1898 	/* move skb->data to ip header from ext header */
1899 	if (skb->data < skb_network_header(skb))
1900 		__skb_pull(skb, skb_network_offset(skb));
1901 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1902 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1903 		*tail_skb = tmp_skb;
1904 		tail_skb = &(tmp_skb->next);
1905 		skb->len += tmp_skb->len;
1906 		skb->data_len += tmp_skb->len;
1907 		skb->truesize += tmp_skb->truesize;
1908 		tmp_skb->destructor = NULL;
1909 		tmp_skb->sk = NULL;
1910 	}
1911 
1912 	/* Allow local fragmentation. */
1913 	skb->ignore_df = ip6_sk_ignore_df(sk);
1914 	__skb_pull(skb, skb_network_header_len(skb));
1915 
1916 	final_dst = &fl6->daddr;
1917 	if (opt && opt->opt_flen)
1918 		ipv6_push_frag_opts(skb, opt, &proto);
1919 	if (opt && opt->opt_nflen)
1920 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1921 
1922 	skb_push(skb, sizeof(struct ipv6hdr));
1923 	skb_reset_network_header(skb);
1924 	hdr = ipv6_hdr(skb);
1925 
1926 	ip6_flow_hdr(hdr, v6_cork->tclass,
1927 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1928 					ip6_autoflowlabel(net, sk), fl6));
1929 	hdr->hop_limit = v6_cork->hop_limit;
1930 	hdr->nexthdr = proto;
1931 	hdr->saddr = fl6->saddr;
1932 	hdr->daddr = *final_dst;
1933 
1934 	skb->priority = READ_ONCE(sk->sk_priority);
1935 	skb->mark = cork->base.mark;
1936 	if (sk_is_tcp(sk))
1937 		skb_set_delivery_time(skb, cork->base.transmit_time, SKB_CLOCK_MONOTONIC);
1938 	else
1939 		skb_set_delivery_type_by_clockid(skb, cork->base.transmit_time, sk->sk_clockid);
1940 
1941 	ip6_cork_steal_dst(skb, cork);
1942 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
1943 	if (proto == IPPROTO_ICMPV6) {
1944 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1945 		u8 icmp6_type;
1946 
1947 		if (sk->sk_socket->type == SOCK_RAW &&
1948 		   !(fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH))
1949 			icmp6_type = fl6->fl6_icmp_type;
1950 		else
1951 			icmp6_type = icmp6_hdr(skb)->icmp6_type;
1952 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
1953 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1954 	}
1955 
1956 	ip6_cork_release(cork, v6_cork);
1957 out:
1958 	return skb;
1959 }
1960 
1961 int ip6_send_skb(struct sk_buff *skb)
1962 {
1963 	struct net *net = sock_net(skb->sk);
1964 	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
1965 	int err;
1966 
1967 	rcu_read_lock();
1968 	err = ip6_local_out(net, skb->sk, skb);
1969 	if (err) {
1970 		if (err > 0)
1971 			err = net_xmit_errno(err);
1972 		if (err)
1973 			IP6_INC_STATS(net, rt->rt6i_idev,
1974 				      IPSTATS_MIB_OUTDISCARDS);
1975 	}
1976 
1977 	rcu_read_unlock();
1978 	return err;
1979 }
1980 
1981 int ip6_push_pending_frames(struct sock *sk)
1982 {
1983 	struct sk_buff *skb;
1984 
1985 	skb = ip6_finish_skb(sk);
1986 	if (!skb)
1987 		return 0;
1988 
1989 	return ip6_send_skb(skb);
1990 }
1991 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1992 
1993 static void __ip6_flush_pending_frames(struct sock *sk,
1994 				       struct sk_buff_head *queue,
1995 				       struct inet_cork_full *cork,
1996 				       struct inet6_cork *v6_cork)
1997 {
1998 	struct sk_buff *skb;
1999 
2000 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
2001 		if (skb_dst(skb))
2002 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
2003 				      IPSTATS_MIB_OUTDISCARDS);
2004 		kfree_skb(skb);
2005 	}
2006 
2007 	ip6_cork_release(cork, v6_cork);
2008 }
2009 
2010 void ip6_flush_pending_frames(struct sock *sk)
2011 {
2012 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
2013 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
2014 }
2015 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
2016 
2017 struct sk_buff *ip6_make_skb(struct sock *sk,
2018 			     int getfrag(void *from, char *to, int offset,
2019 					 int len, int odd, struct sk_buff *skb),
2020 			     void *from, size_t length, int transhdrlen,
2021 			     struct ipcm6_cookie *ipc6, struct rt6_info *rt,
2022 			     unsigned int flags, struct inet_cork_full *cork)
2023 {
2024 	struct inet6_cork v6_cork;
2025 	struct sk_buff_head queue;
2026 	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
2027 	int err;
2028 
2029 	if (flags & MSG_PROBE) {
2030 		dst_release(&rt->dst);
2031 		return NULL;
2032 	}
2033 
2034 	__skb_queue_head_init(&queue);
2035 
2036 	cork->base.flags = 0;
2037 	cork->base.addr = 0;
2038 	cork->base.opt = NULL;
2039 	v6_cork.opt = NULL;
2040 	err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt);
2041 	if (err) {
2042 		ip6_cork_release(cork, &v6_cork);
2043 		return ERR_PTR(err);
2044 	}
2045 	if (ipc6->dontfrag < 0)
2046 		ipc6->dontfrag = inet6_test_bit(DONTFRAG, sk);
2047 
2048 	err = __ip6_append_data(sk, &queue, cork, &v6_cork,
2049 				&current->task_frag, getfrag, from,
2050 				length + exthdrlen, transhdrlen + exthdrlen,
2051 				flags, ipc6);
2052 	if (err) {
2053 		__ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
2054 		return ERR_PTR(err);
2055 	}
2056 
2057 	return __ip6_make_skb(sk, &queue, cork, &v6_cork);
2058 }
2059