xref: /linux/net/ipv6/ip6_output.c (revision d58ff35122847a83ba55394e2ae3a1527b6febf5)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	:	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/bpf-cgroup.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
45 
46 #include <net/sock.h>
47 #include <net/snmp.h>
48 
49 #include <net/ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
55 #include <net/icmp.h>
56 #include <net/xfrm.h>
57 #include <net/checksum.h>
58 #include <linux/mroute6.h>
59 #include <net/l3mdev.h>
60 #include <net/lwtunnel.h>
61 
62 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
63 {
64 	struct dst_entry *dst = skb_dst(skb);
65 	struct net_device *dev = dst->dev;
66 	struct neighbour *neigh;
67 	struct in6_addr *nexthop;
68 	int ret;
69 
70 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72 
73 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
74 		    ((mroute6_socket(net, skb) &&
75 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 					 &ipv6_hdr(skb)->saddr))) {
78 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79 
80 			/* Do not check for IFF_ALLMULTI; multicast routing
81 			   is not supported in any case.
82 			 */
83 			if (newskb)
84 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 					net, sk, newskb, NULL, newskb->dev,
86 					dev_loopback_xmit);
87 
88 			if (ipv6_hdr(skb)->hop_limit == 0) {
89 				IP6_INC_STATS(net, idev,
90 					      IPSTATS_MIB_OUTDISCARDS);
91 				kfree_skb(skb);
92 				return 0;
93 			}
94 		}
95 
96 		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
97 
98 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
99 		    IPV6_ADDR_SCOPE_NODELOCAL &&
100 		    !(dev->flags & IFF_LOOPBACK)) {
101 			kfree_skb(skb);
102 			return 0;
103 		}
104 	}
105 
106 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
107 		int res = lwtunnel_xmit(skb);
108 
109 		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
110 			return res;
111 	}
112 
113 	rcu_read_lock_bh();
114 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
115 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
116 	if (unlikely(!neigh))
117 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
118 	if (!IS_ERR(neigh)) {
119 		sock_confirm_neigh(skb, neigh);
120 		ret = neigh_output(neigh, skb);
121 		rcu_read_unlock_bh();
122 		return ret;
123 	}
124 	rcu_read_unlock_bh();
125 
126 	IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
127 	kfree_skb(skb);
128 	return -EINVAL;
129 }
130 
131 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
132 {
133 	int ret;
134 
135 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
136 	if (ret) {
137 		kfree_skb(skb);
138 		return ret;
139 	}
140 
141 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
142 	    dst_allfrag(skb_dst(skb)) ||
143 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
144 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
145 	else
146 		return ip6_finish_output2(net, sk, skb);
147 }
148 
149 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
150 {
151 	struct net_device *dev = skb_dst(skb)->dev;
152 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
153 
154 	skb->protocol = htons(ETH_P_IPV6);
155 	skb->dev = dev;
156 
157 	if (unlikely(idev->cnf.disable_ipv6)) {
158 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
159 		kfree_skb(skb);
160 		return 0;
161 	}
162 
163 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
164 			    net, sk, skb, NULL, dev,
165 			    ip6_finish_output,
166 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
167 }
168 
169 /*
170  * xmit an sk_buff (used by TCP, SCTP and DCCP)
171  * Note : socket lock is not held for SYNACK packets, but might be modified
172  * by calls to skb_set_owner_w() and ipv6_local_error(),
173  * which are using proper atomic operations or spinlocks.
174  */
175 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
176 	     __u32 mark, struct ipv6_txoptions *opt, int tclass)
177 {
178 	struct net *net = sock_net(sk);
179 	const struct ipv6_pinfo *np = inet6_sk(sk);
180 	struct in6_addr *first_hop = &fl6->daddr;
181 	struct dst_entry *dst = skb_dst(skb);
182 	struct ipv6hdr *hdr;
183 	u8  proto = fl6->flowi6_proto;
184 	int seg_len = skb->len;
185 	int hlimit = -1;
186 	u32 mtu;
187 
188 	if (opt) {
189 		unsigned int head_room;
190 
191 		/* First: exthdrs may take lots of space (~8K for now)
192 		   MAX_HEADER is not enough.
193 		 */
194 		head_room = opt->opt_nflen + opt->opt_flen;
195 		seg_len += head_room;
196 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
197 
198 		if (skb_headroom(skb) < head_room) {
199 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
200 			if (!skb2) {
201 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
202 					      IPSTATS_MIB_OUTDISCARDS);
203 				kfree_skb(skb);
204 				return -ENOBUFS;
205 			}
206 			consume_skb(skb);
207 			skb = skb2;
208 			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
209 			 * it is safe to call in our context (socket lock not held)
210 			 */
211 			skb_set_owner_w(skb, (struct sock *)sk);
212 		}
213 		if (opt->opt_flen)
214 			ipv6_push_frag_opts(skb, opt, &proto);
215 		if (opt->opt_nflen)
216 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
217 					     &fl6->saddr);
218 	}
219 
220 	skb_push(skb, sizeof(struct ipv6hdr));
221 	skb_reset_network_header(skb);
222 	hdr = ipv6_hdr(skb);
223 
224 	/*
225 	 *	Fill in the IPv6 header
226 	 */
227 	if (np)
228 		hlimit = np->hop_limit;
229 	if (hlimit < 0)
230 		hlimit = ip6_dst_hoplimit(dst);
231 
232 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
233 						     np->autoflowlabel, fl6));
234 
235 	hdr->payload_len = htons(seg_len);
236 	hdr->nexthdr = proto;
237 	hdr->hop_limit = hlimit;
238 
239 	hdr->saddr = fl6->saddr;
240 	hdr->daddr = *first_hop;
241 
242 	skb->protocol = htons(ETH_P_IPV6);
243 	skb->priority = sk->sk_priority;
244 	skb->mark = mark;
245 
246 	mtu = dst_mtu(dst);
247 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
248 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
249 			      IPSTATS_MIB_OUT, skb->len);
250 
251 		/* if egress device is enslaved to an L3 master device pass the
252 		 * skb to its handler for processing
253 		 */
254 		skb = l3mdev_ip6_out((struct sock *)sk, skb);
255 		if (unlikely(!skb))
256 			return 0;
257 
258 		/* hooks should never assume socket lock is held.
259 		 * we promote our socket to non const
260 		 */
261 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
262 			       net, (struct sock *)sk, skb, NULL, dst->dev,
263 			       dst_output);
264 	}
265 
266 	skb->dev = dst->dev;
267 	/* ipv6_local_error() does not require socket lock,
268 	 * we promote our socket to non const
269 	 */
270 	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
271 
272 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
273 	kfree_skb(skb);
274 	return -EMSGSIZE;
275 }
276 EXPORT_SYMBOL(ip6_xmit);
277 
278 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
279 {
280 	struct ip6_ra_chain *ra;
281 	struct sock *last = NULL;
282 
283 	read_lock(&ip6_ra_lock);
284 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
285 		struct sock *sk = ra->sk;
286 		if (sk && ra->sel == sel &&
287 		    (!sk->sk_bound_dev_if ||
288 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
289 			if (last) {
290 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
291 				if (skb2)
292 					rawv6_rcv(last, skb2);
293 			}
294 			last = sk;
295 		}
296 	}
297 
298 	if (last) {
299 		rawv6_rcv(last, skb);
300 		read_unlock(&ip6_ra_lock);
301 		return 1;
302 	}
303 	read_unlock(&ip6_ra_lock);
304 	return 0;
305 }
306 
307 static int ip6_forward_proxy_check(struct sk_buff *skb)
308 {
309 	struct ipv6hdr *hdr = ipv6_hdr(skb);
310 	u8 nexthdr = hdr->nexthdr;
311 	__be16 frag_off;
312 	int offset;
313 
314 	if (ipv6_ext_hdr(nexthdr)) {
315 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
316 		if (offset < 0)
317 			return 0;
318 	} else
319 		offset = sizeof(struct ipv6hdr);
320 
321 	if (nexthdr == IPPROTO_ICMPV6) {
322 		struct icmp6hdr *icmp6;
323 
324 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
325 					 offset + 1 - skb->data)))
326 			return 0;
327 
328 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
329 
330 		switch (icmp6->icmp6_type) {
331 		case NDISC_ROUTER_SOLICITATION:
332 		case NDISC_ROUTER_ADVERTISEMENT:
333 		case NDISC_NEIGHBOUR_SOLICITATION:
334 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
335 		case NDISC_REDIRECT:
336 			/* For reaction involving unicast neighbor discovery
337 			 * message destined to the proxied address, pass it to
338 			 * input function.
339 			 */
340 			return 1;
341 		default:
342 			break;
343 		}
344 	}
345 
346 	/*
347 	 * The proxying router can't forward traffic sent to a link-local
348 	 * address, so signal the sender and discard the packet. This
349 	 * behavior is clarified by the MIPv6 specification.
350 	 */
351 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
352 		dst_link_failure(skb);
353 		return -1;
354 	}
355 
356 	return 0;
357 }
358 
359 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
360 				     struct sk_buff *skb)
361 {
362 	return dst_output(net, sk, skb);
363 }
364 
365 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
366 {
367 	unsigned int mtu;
368 	struct inet6_dev *idev;
369 
370 	if (dst_metric_locked(dst, RTAX_MTU)) {
371 		mtu = dst_metric_raw(dst, RTAX_MTU);
372 		if (mtu)
373 			return mtu;
374 	}
375 
376 	mtu = IPV6_MIN_MTU;
377 	rcu_read_lock();
378 	idev = __in6_dev_get(dst->dev);
379 	if (idev)
380 		mtu = idev->cnf.mtu6;
381 	rcu_read_unlock();
382 
383 	return mtu;
384 }
385 
386 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
387 {
388 	if (skb->len <= mtu)
389 		return false;
390 
391 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
392 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
393 		return true;
394 
395 	if (skb->ignore_df)
396 		return false;
397 
398 	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
399 		return false;
400 
401 	return true;
402 }
403 
404 int ip6_forward(struct sk_buff *skb)
405 {
406 	struct dst_entry *dst = skb_dst(skb);
407 	struct ipv6hdr *hdr = ipv6_hdr(skb);
408 	struct inet6_skb_parm *opt = IP6CB(skb);
409 	struct net *net = dev_net(dst->dev);
410 	u32 mtu;
411 
412 	if (net->ipv6.devconf_all->forwarding == 0)
413 		goto error;
414 
415 	if (skb->pkt_type != PACKET_HOST)
416 		goto drop;
417 
418 	if (unlikely(skb->sk))
419 		goto drop;
420 
421 	if (skb_warn_if_lro(skb))
422 		goto drop;
423 
424 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
425 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
426 				IPSTATS_MIB_INDISCARDS);
427 		goto drop;
428 	}
429 
430 	skb_forward_csum(skb);
431 
432 	/*
433 	 *	We DO NOT make any processing on
434 	 *	RA packets, pushing them to user level AS IS
435 	 *	without ane WARRANTY that application will be able
436 	 *	to interpret them. The reason is that we
437 	 *	cannot make anything clever here.
438 	 *
439 	 *	We are not end-node, so that if packet contains
440 	 *	AH/ESP, we cannot make anything.
441 	 *	Defragmentation also would be mistake, RA packets
442 	 *	cannot be fragmented, because there is no warranty
443 	 *	that different fragments will go along one path. --ANK
444 	 */
445 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
446 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
447 			return 0;
448 	}
449 
450 	/*
451 	 *	check and decrement ttl
452 	 */
453 	if (hdr->hop_limit <= 1) {
454 		/* Force OUTPUT device used as source address */
455 		skb->dev = dst->dev;
456 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
457 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
458 				IPSTATS_MIB_INHDRERRORS);
459 
460 		kfree_skb(skb);
461 		return -ETIMEDOUT;
462 	}
463 
464 	/* XXX: idev->cnf.proxy_ndp? */
465 	if (net->ipv6.devconf_all->proxy_ndp &&
466 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
467 		int proxied = ip6_forward_proxy_check(skb);
468 		if (proxied > 0)
469 			return ip6_input(skb);
470 		else if (proxied < 0) {
471 			__IP6_INC_STATS(net, ip6_dst_idev(dst),
472 					IPSTATS_MIB_INDISCARDS);
473 			goto drop;
474 		}
475 	}
476 
477 	if (!xfrm6_route_forward(skb)) {
478 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
479 				IPSTATS_MIB_INDISCARDS);
480 		goto drop;
481 	}
482 	dst = skb_dst(skb);
483 
484 	/* IPv6 specs say nothing about it, but it is clear that we cannot
485 	   send redirects to source routed frames.
486 	   We don't send redirects to frames decapsulated from IPsec.
487 	 */
488 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
489 		struct in6_addr *target = NULL;
490 		struct inet_peer *peer;
491 		struct rt6_info *rt;
492 
493 		/*
494 		 *	incoming and outgoing devices are the same
495 		 *	send a redirect.
496 		 */
497 
498 		rt = (struct rt6_info *) dst;
499 		if (rt->rt6i_flags & RTF_GATEWAY)
500 			target = &rt->rt6i_gateway;
501 		else
502 			target = &hdr->daddr;
503 
504 		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
505 
506 		/* Limit redirects both by destination (here)
507 		   and by source (inside ndisc_send_redirect)
508 		 */
509 		if (inet_peer_xrlim_allow(peer, 1*HZ))
510 			ndisc_send_redirect(skb, target);
511 		if (peer)
512 			inet_putpeer(peer);
513 	} else {
514 		int addrtype = ipv6_addr_type(&hdr->saddr);
515 
516 		/* This check is security critical. */
517 		if (addrtype == IPV6_ADDR_ANY ||
518 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
519 			goto error;
520 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
521 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
522 				    ICMPV6_NOT_NEIGHBOUR, 0);
523 			goto error;
524 		}
525 	}
526 
527 	mtu = ip6_dst_mtu_forward(dst);
528 	if (mtu < IPV6_MIN_MTU)
529 		mtu = IPV6_MIN_MTU;
530 
531 	if (ip6_pkt_too_big(skb, mtu)) {
532 		/* Again, force OUTPUT device used as source address */
533 		skb->dev = dst->dev;
534 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
535 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
536 				IPSTATS_MIB_INTOOBIGERRORS);
537 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
538 				IPSTATS_MIB_FRAGFAILS);
539 		kfree_skb(skb);
540 		return -EMSGSIZE;
541 	}
542 
543 	if (skb_cow(skb, dst->dev->hard_header_len)) {
544 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
545 				IPSTATS_MIB_OUTDISCARDS);
546 		goto drop;
547 	}
548 
549 	hdr = ipv6_hdr(skb);
550 
551 	/* Mangling hops number delayed to point after skb COW */
552 
553 	hdr->hop_limit--;
554 
555 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
556 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
557 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
558 		       net, NULL, skb, skb->dev, dst->dev,
559 		       ip6_forward_finish);
560 
561 error:
562 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
563 drop:
564 	kfree_skb(skb);
565 	return -EINVAL;
566 }
567 
568 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
569 {
570 	to->pkt_type = from->pkt_type;
571 	to->priority = from->priority;
572 	to->protocol = from->protocol;
573 	skb_dst_drop(to);
574 	skb_dst_set(to, dst_clone(skb_dst(from)));
575 	to->dev = from->dev;
576 	to->mark = from->mark;
577 
578 #ifdef CONFIG_NET_SCHED
579 	to->tc_index = from->tc_index;
580 #endif
581 	nf_copy(to, from);
582 	skb_copy_secmark(to, from);
583 }
584 
585 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
586 		 int (*output)(struct net *, struct sock *, struct sk_buff *))
587 {
588 	struct sk_buff *frag;
589 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
590 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
591 				inet6_sk(skb->sk) : NULL;
592 	struct ipv6hdr *tmp_hdr;
593 	struct frag_hdr *fh;
594 	unsigned int mtu, hlen, left, len;
595 	int hroom, troom;
596 	__be32 frag_id;
597 	int ptr, offset = 0, err = 0;
598 	u8 *prevhdr, nexthdr = 0;
599 
600 	err = ip6_find_1stfragopt(skb, &prevhdr);
601 	if (err < 0)
602 		goto fail;
603 	hlen = err;
604 	nexthdr = *prevhdr;
605 
606 	mtu = ip6_skb_dst_mtu(skb);
607 
608 	/* We must not fragment if the socket is set to force MTU discovery
609 	 * or if the skb it not generated by a local socket.
610 	 */
611 	if (unlikely(!skb->ignore_df && skb->len > mtu))
612 		goto fail_toobig;
613 
614 	if (IP6CB(skb)->frag_max_size) {
615 		if (IP6CB(skb)->frag_max_size > mtu)
616 			goto fail_toobig;
617 
618 		/* don't send fragments larger than what we received */
619 		mtu = IP6CB(skb)->frag_max_size;
620 		if (mtu < IPV6_MIN_MTU)
621 			mtu = IPV6_MIN_MTU;
622 	}
623 
624 	if (np && np->frag_size < mtu) {
625 		if (np->frag_size)
626 			mtu = np->frag_size;
627 	}
628 	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
629 		goto fail_toobig;
630 	mtu -= hlen + sizeof(struct frag_hdr);
631 
632 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
633 				    &ipv6_hdr(skb)->saddr);
634 
635 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
636 	    (err = skb_checksum_help(skb)))
637 		goto fail;
638 
639 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
640 	if (skb_has_frag_list(skb)) {
641 		unsigned int first_len = skb_pagelen(skb);
642 		struct sk_buff *frag2;
643 
644 		if (first_len - hlen > mtu ||
645 		    ((first_len - hlen) & 7) ||
646 		    skb_cloned(skb) ||
647 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
648 			goto slow_path;
649 
650 		skb_walk_frags(skb, frag) {
651 			/* Correct geometry. */
652 			if (frag->len > mtu ||
653 			    ((frag->len & 7) && frag->next) ||
654 			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
655 				goto slow_path_clean;
656 
657 			/* Partially cloned skb? */
658 			if (skb_shared(frag))
659 				goto slow_path_clean;
660 
661 			BUG_ON(frag->sk);
662 			if (skb->sk) {
663 				frag->sk = skb->sk;
664 				frag->destructor = sock_wfree;
665 			}
666 			skb->truesize -= frag->truesize;
667 		}
668 
669 		err = 0;
670 		offset = 0;
671 		/* BUILD HEADER */
672 
673 		*prevhdr = NEXTHDR_FRAGMENT;
674 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
675 		if (!tmp_hdr) {
676 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
677 				      IPSTATS_MIB_FRAGFAILS);
678 			err = -ENOMEM;
679 			goto fail;
680 		}
681 		frag = skb_shinfo(skb)->frag_list;
682 		skb_frag_list_init(skb);
683 
684 		__skb_pull(skb, hlen);
685 		fh = __skb_push(skb, sizeof(struct frag_hdr));
686 		__skb_push(skb, hlen);
687 		skb_reset_network_header(skb);
688 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
689 
690 		fh->nexthdr = nexthdr;
691 		fh->reserved = 0;
692 		fh->frag_off = htons(IP6_MF);
693 		fh->identification = frag_id;
694 
695 		first_len = skb_pagelen(skb);
696 		skb->data_len = first_len - skb_headlen(skb);
697 		skb->len = first_len;
698 		ipv6_hdr(skb)->payload_len = htons(first_len -
699 						   sizeof(struct ipv6hdr));
700 
701 		dst_hold(&rt->dst);
702 
703 		for (;;) {
704 			/* Prepare header of the next frame,
705 			 * before previous one went down. */
706 			if (frag) {
707 				frag->ip_summed = CHECKSUM_NONE;
708 				skb_reset_transport_header(frag);
709 				fh = __skb_push(frag, sizeof(struct frag_hdr));
710 				__skb_push(frag, hlen);
711 				skb_reset_network_header(frag);
712 				memcpy(skb_network_header(frag), tmp_hdr,
713 				       hlen);
714 				offset += skb->len - hlen - sizeof(struct frag_hdr);
715 				fh->nexthdr = nexthdr;
716 				fh->reserved = 0;
717 				fh->frag_off = htons(offset);
718 				if (frag->next)
719 					fh->frag_off |= htons(IP6_MF);
720 				fh->identification = frag_id;
721 				ipv6_hdr(frag)->payload_len =
722 						htons(frag->len -
723 						      sizeof(struct ipv6hdr));
724 				ip6_copy_metadata(frag, skb);
725 			}
726 
727 			err = output(net, sk, skb);
728 			if (!err)
729 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
730 					      IPSTATS_MIB_FRAGCREATES);
731 
732 			if (err || !frag)
733 				break;
734 
735 			skb = frag;
736 			frag = skb->next;
737 			skb->next = NULL;
738 		}
739 
740 		kfree(tmp_hdr);
741 
742 		if (err == 0) {
743 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
744 				      IPSTATS_MIB_FRAGOKS);
745 			ip6_rt_put(rt);
746 			return 0;
747 		}
748 
749 		kfree_skb_list(frag);
750 
751 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
752 			      IPSTATS_MIB_FRAGFAILS);
753 		ip6_rt_put(rt);
754 		return err;
755 
756 slow_path_clean:
757 		skb_walk_frags(skb, frag2) {
758 			if (frag2 == frag)
759 				break;
760 			frag2->sk = NULL;
761 			frag2->destructor = NULL;
762 			skb->truesize += frag2->truesize;
763 		}
764 	}
765 
766 slow_path:
767 	left = skb->len - hlen;		/* Space per frame */
768 	ptr = hlen;			/* Where to start from */
769 
770 	/*
771 	 *	Fragment the datagram.
772 	 */
773 
774 	troom = rt->dst.dev->needed_tailroom;
775 
776 	/*
777 	 *	Keep copying data until we run out.
778 	 */
779 	while (left > 0)	{
780 		u8 *fragnexthdr_offset;
781 
782 		len = left;
783 		/* IF: it doesn't fit, use 'mtu' - the data space left */
784 		if (len > mtu)
785 			len = mtu;
786 		/* IF: we are not sending up to and including the packet end
787 		   then align the next start on an eight byte boundary */
788 		if (len < left)	{
789 			len &= ~7;
790 		}
791 
792 		/* Allocate buffer */
793 		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
794 				 hroom + troom, GFP_ATOMIC);
795 		if (!frag) {
796 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
797 				      IPSTATS_MIB_FRAGFAILS);
798 			err = -ENOMEM;
799 			goto fail;
800 		}
801 
802 		/*
803 		 *	Set up data on packet
804 		 */
805 
806 		ip6_copy_metadata(frag, skb);
807 		skb_reserve(frag, hroom);
808 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
809 		skb_reset_network_header(frag);
810 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
811 		frag->transport_header = (frag->network_header + hlen +
812 					  sizeof(struct frag_hdr));
813 
814 		/*
815 		 *	Charge the memory for the fragment to any owner
816 		 *	it might possess
817 		 */
818 		if (skb->sk)
819 			skb_set_owner_w(frag, skb->sk);
820 
821 		/*
822 		 *	Copy the packet header into the new buffer.
823 		 */
824 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
825 
826 		fragnexthdr_offset = skb_network_header(frag);
827 		fragnexthdr_offset += prevhdr - skb_network_header(skb);
828 		*fragnexthdr_offset = NEXTHDR_FRAGMENT;
829 
830 		/*
831 		 *	Build fragment header.
832 		 */
833 		fh->nexthdr = nexthdr;
834 		fh->reserved = 0;
835 		fh->identification = frag_id;
836 
837 		/*
838 		 *	Copy a block of the IP datagram.
839 		 */
840 		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
841 				     len));
842 		left -= len;
843 
844 		fh->frag_off = htons(offset);
845 		if (left > 0)
846 			fh->frag_off |= htons(IP6_MF);
847 		ipv6_hdr(frag)->payload_len = htons(frag->len -
848 						    sizeof(struct ipv6hdr));
849 
850 		ptr += len;
851 		offset += len;
852 
853 		/*
854 		 *	Put this fragment into the sending queue.
855 		 */
856 		err = output(net, sk, frag);
857 		if (err)
858 			goto fail;
859 
860 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
861 			      IPSTATS_MIB_FRAGCREATES);
862 	}
863 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
864 		      IPSTATS_MIB_FRAGOKS);
865 	consume_skb(skb);
866 	return err;
867 
868 fail_toobig:
869 	if (skb->sk && dst_allfrag(skb_dst(skb)))
870 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
871 
872 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
873 	err = -EMSGSIZE;
874 
875 fail:
876 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
877 		      IPSTATS_MIB_FRAGFAILS);
878 	kfree_skb(skb);
879 	return err;
880 }
881 
882 static inline int ip6_rt_check(const struct rt6key *rt_key,
883 			       const struct in6_addr *fl_addr,
884 			       const struct in6_addr *addr_cache)
885 {
886 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
887 		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
888 }
889 
890 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
891 					  struct dst_entry *dst,
892 					  const struct flowi6 *fl6)
893 {
894 	struct ipv6_pinfo *np = inet6_sk(sk);
895 	struct rt6_info *rt;
896 
897 	if (!dst)
898 		goto out;
899 
900 	if (dst->ops->family != AF_INET6) {
901 		dst_release(dst);
902 		return NULL;
903 	}
904 
905 	rt = (struct rt6_info *)dst;
906 	/* Yes, checking route validity in not connected
907 	 * case is not very simple. Take into account,
908 	 * that we do not support routing by source, TOS,
909 	 * and MSG_DONTROUTE		--ANK (980726)
910 	 *
911 	 * 1. ip6_rt_check(): If route was host route,
912 	 *    check that cached destination is current.
913 	 *    If it is network route, we still may
914 	 *    check its validity using saved pointer
915 	 *    to the last used address: daddr_cache.
916 	 *    We do not want to save whole address now,
917 	 *    (because main consumer of this service
918 	 *    is tcp, which has not this problem),
919 	 *    so that the last trick works only on connected
920 	 *    sockets.
921 	 * 2. oif also should be the same.
922 	 */
923 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
924 #ifdef CONFIG_IPV6_SUBTREES
925 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
926 #endif
927 	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
928 	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
929 		dst_release(dst);
930 		dst = NULL;
931 	}
932 
933 out:
934 	return dst;
935 }
936 
937 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
938 			       struct dst_entry **dst, struct flowi6 *fl6)
939 {
940 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
941 	struct neighbour *n;
942 	struct rt6_info *rt;
943 #endif
944 	int err;
945 	int flags = 0;
946 
947 	/* The correct way to handle this would be to do
948 	 * ip6_route_get_saddr, and then ip6_route_output; however,
949 	 * the route-specific preferred source forces the
950 	 * ip6_route_output call _before_ ip6_route_get_saddr.
951 	 *
952 	 * In source specific routing (no src=any default route),
953 	 * ip6_route_output will fail given src=any saddr, though, so
954 	 * that's why we try it again later.
955 	 */
956 	if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
957 		struct rt6_info *rt;
958 		bool had_dst = *dst != NULL;
959 
960 		if (!had_dst)
961 			*dst = ip6_route_output(net, sk, fl6);
962 		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
963 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
964 					  sk ? inet6_sk(sk)->srcprefs : 0,
965 					  &fl6->saddr);
966 		if (err)
967 			goto out_err_release;
968 
969 		/* If we had an erroneous initial result, pretend it
970 		 * never existed and let the SA-enabled version take
971 		 * over.
972 		 */
973 		if (!had_dst && (*dst)->error) {
974 			dst_release(*dst);
975 			*dst = NULL;
976 		}
977 
978 		if (fl6->flowi6_oif)
979 			flags |= RT6_LOOKUP_F_IFACE;
980 	}
981 
982 	if (!*dst)
983 		*dst = ip6_route_output_flags(net, sk, fl6, flags);
984 
985 	err = (*dst)->error;
986 	if (err)
987 		goto out_err_release;
988 
989 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
990 	/*
991 	 * Here if the dst entry we've looked up
992 	 * has a neighbour entry that is in the INCOMPLETE
993 	 * state and the src address from the flow is
994 	 * marked as OPTIMISTIC, we release the found
995 	 * dst entry and replace it instead with the
996 	 * dst entry of the nexthop router
997 	 */
998 	rt = (struct rt6_info *) *dst;
999 	rcu_read_lock_bh();
1000 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
1001 				      rt6_nexthop(rt, &fl6->daddr));
1002 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
1003 	rcu_read_unlock_bh();
1004 
1005 	if (err) {
1006 		struct inet6_ifaddr *ifp;
1007 		struct flowi6 fl_gw6;
1008 		int redirect;
1009 
1010 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1011 				      (*dst)->dev, 1);
1012 
1013 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1014 		if (ifp)
1015 			in6_ifa_put(ifp);
1016 
1017 		if (redirect) {
1018 			/*
1019 			 * We need to get the dst entry for the
1020 			 * default router instead
1021 			 */
1022 			dst_release(*dst);
1023 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1024 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1025 			*dst = ip6_route_output(net, sk, &fl_gw6);
1026 			err = (*dst)->error;
1027 			if (err)
1028 				goto out_err_release;
1029 		}
1030 	}
1031 #endif
1032 	if (ipv6_addr_v4mapped(&fl6->saddr) &&
1033 	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1034 		err = -EAFNOSUPPORT;
1035 		goto out_err_release;
1036 	}
1037 
1038 	return 0;
1039 
1040 out_err_release:
1041 	dst_release(*dst);
1042 	*dst = NULL;
1043 
1044 	if (err == -ENETUNREACH)
1045 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1046 	return err;
1047 }
1048 
1049 /**
1050  *	ip6_dst_lookup - perform route lookup on flow
1051  *	@sk: socket which provides route info
1052  *	@dst: pointer to dst_entry * for result
1053  *	@fl6: flow to lookup
1054  *
1055  *	This function performs a route lookup on the given flow.
1056  *
1057  *	It returns zero on success, or a standard errno code on error.
1058  */
1059 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1060 		   struct flowi6 *fl6)
1061 {
1062 	*dst = NULL;
1063 	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1064 }
1065 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1066 
1067 /**
1068  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1069  *	@sk: socket which provides route info
1070  *	@fl6: flow to lookup
1071  *	@final_dst: final destination address for ipsec lookup
1072  *
1073  *	This function performs a route lookup on the given flow.
1074  *
1075  *	It returns a valid dst pointer on success, or a pointer encoded
1076  *	error code.
1077  */
1078 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1079 				      const struct in6_addr *final_dst)
1080 {
1081 	struct dst_entry *dst = NULL;
1082 	int err;
1083 
1084 	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1085 	if (err)
1086 		return ERR_PTR(err);
1087 	if (final_dst)
1088 		fl6->daddr = *final_dst;
1089 
1090 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1091 }
1092 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1093 
1094 /**
1095  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1096  *	@sk: socket which provides the dst cache and route info
1097  *	@fl6: flow to lookup
1098  *	@final_dst: final destination address for ipsec lookup
1099  *
1100  *	This function performs a route lookup on the given flow with the
1101  *	possibility of using the cached route in the socket if it is valid.
1102  *	It will take the socket dst lock when operating on the dst cache.
1103  *	As a result, this function can only be used in process context.
1104  *
1105  *	It returns a valid dst pointer on success, or a pointer encoded
1106  *	error code.
1107  */
1108 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1109 					 const struct in6_addr *final_dst)
1110 {
1111 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1112 
1113 	dst = ip6_sk_dst_check(sk, dst, fl6);
1114 	if (!dst)
1115 		dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1116 
1117 	return dst;
1118 }
1119 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1120 
1121 static inline int ip6_ufo_append_data(struct sock *sk,
1122 			struct sk_buff_head *queue,
1123 			int getfrag(void *from, char *to, int offset, int len,
1124 			int odd, struct sk_buff *skb),
1125 			void *from, int length, int hh_len, int fragheaderlen,
1126 			int exthdrlen, int transhdrlen, int mtu,
1127 			unsigned int flags, const struct flowi6 *fl6)
1128 
1129 {
1130 	struct sk_buff *skb;
1131 	int err;
1132 
1133 	/* There is support for UDP large send offload by network
1134 	 * device, so create one single skb packet containing complete
1135 	 * udp datagram
1136 	 */
1137 	skb = skb_peek_tail(queue);
1138 	if (!skb) {
1139 		skb = sock_alloc_send_skb(sk,
1140 			hh_len + fragheaderlen + transhdrlen + 20,
1141 			(flags & MSG_DONTWAIT), &err);
1142 		if (!skb)
1143 			return err;
1144 
1145 		/* reserve space for Hardware header */
1146 		skb_reserve(skb, hh_len);
1147 
1148 		/* create space for UDP/IP header */
1149 		skb_put(skb, fragheaderlen + transhdrlen);
1150 
1151 		/* initialize network header pointer */
1152 		skb_set_network_header(skb, exthdrlen);
1153 
1154 		/* initialize protocol header pointer */
1155 		skb->transport_header = skb->network_header + fragheaderlen;
1156 
1157 		skb->protocol = htons(ETH_P_IPV6);
1158 		skb->csum = 0;
1159 
1160 		if (flags & MSG_CONFIRM)
1161 			skb_set_dst_pending_confirm(skb, 1);
1162 
1163 		__skb_queue_tail(queue, skb);
1164 	} else if (skb_is_gso(skb)) {
1165 		goto append;
1166 	}
1167 
1168 	skb->ip_summed = CHECKSUM_PARTIAL;
1169 	/* Specify the length of each IPv6 datagram fragment.
1170 	 * It has to be a multiple of 8.
1171 	 */
1172 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1173 				     sizeof(struct frag_hdr)) & ~7;
1174 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1175 	skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1176 							 &fl6->daddr,
1177 							 &fl6->saddr);
1178 
1179 append:
1180 	return skb_append_datato_frags(sk, skb, getfrag, from,
1181 				       (length - transhdrlen));
1182 }
1183 
1184 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1185 					       gfp_t gfp)
1186 {
1187 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1188 }
1189 
1190 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1191 						gfp_t gfp)
1192 {
1193 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1194 }
1195 
1196 static void ip6_append_data_mtu(unsigned int *mtu,
1197 				int *maxfraglen,
1198 				unsigned int fragheaderlen,
1199 				struct sk_buff *skb,
1200 				struct rt6_info *rt,
1201 				unsigned int orig_mtu)
1202 {
1203 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1204 		if (!skb) {
1205 			/* first fragment, reserve header_len */
1206 			*mtu = orig_mtu - rt->dst.header_len;
1207 
1208 		} else {
1209 			/*
1210 			 * this fragment is not first, the headers
1211 			 * space is regarded as data space.
1212 			 */
1213 			*mtu = orig_mtu;
1214 		}
1215 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1216 			      + fragheaderlen - sizeof(struct frag_hdr);
1217 	}
1218 }
1219 
1220 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1221 			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1222 			  struct rt6_info *rt, struct flowi6 *fl6)
1223 {
1224 	struct ipv6_pinfo *np = inet6_sk(sk);
1225 	unsigned int mtu;
1226 	struct ipv6_txoptions *opt = ipc6->opt;
1227 
1228 	/*
1229 	 * setup for corking
1230 	 */
1231 	if (opt) {
1232 		if (WARN_ON(v6_cork->opt))
1233 			return -EINVAL;
1234 
1235 		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1236 		if (unlikely(!v6_cork->opt))
1237 			return -ENOBUFS;
1238 
1239 		v6_cork->opt->tot_len = opt->tot_len;
1240 		v6_cork->opt->opt_flen = opt->opt_flen;
1241 		v6_cork->opt->opt_nflen = opt->opt_nflen;
1242 
1243 		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1244 						    sk->sk_allocation);
1245 		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1246 			return -ENOBUFS;
1247 
1248 		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1249 						    sk->sk_allocation);
1250 		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1251 			return -ENOBUFS;
1252 
1253 		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1254 						   sk->sk_allocation);
1255 		if (opt->hopopt && !v6_cork->opt->hopopt)
1256 			return -ENOBUFS;
1257 
1258 		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1259 						    sk->sk_allocation);
1260 		if (opt->srcrt && !v6_cork->opt->srcrt)
1261 			return -ENOBUFS;
1262 
1263 		/* need source address above miyazawa*/
1264 	}
1265 	dst_hold(&rt->dst);
1266 	cork->base.dst = &rt->dst;
1267 	cork->fl.u.ip6 = *fl6;
1268 	v6_cork->hop_limit = ipc6->hlimit;
1269 	v6_cork->tclass = ipc6->tclass;
1270 	if (rt->dst.flags & DST_XFRM_TUNNEL)
1271 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1272 		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1273 	else
1274 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1275 		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1276 	if (np->frag_size < mtu) {
1277 		if (np->frag_size)
1278 			mtu = np->frag_size;
1279 	}
1280 	cork->base.fragsize = mtu;
1281 	if (dst_allfrag(rt->dst.path))
1282 		cork->base.flags |= IPCORK_ALLFRAG;
1283 	cork->base.length = 0;
1284 
1285 	return 0;
1286 }
1287 
1288 static int __ip6_append_data(struct sock *sk,
1289 			     struct flowi6 *fl6,
1290 			     struct sk_buff_head *queue,
1291 			     struct inet_cork *cork,
1292 			     struct inet6_cork *v6_cork,
1293 			     struct page_frag *pfrag,
1294 			     int getfrag(void *from, char *to, int offset,
1295 					 int len, int odd, struct sk_buff *skb),
1296 			     void *from, int length, int transhdrlen,
1297 			     unsigned int flags, struct ipcm6_cookie *ipc6,
1298 			     const struct sockcm_cookie *sockc)
1299 {
1300 	struct sk_buff *skb, *skb_prev = NULL;
1301 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1302 	int exthdrlen = 0;
1303 	int dst_exthdrlen = 0;
1304 	int hh_len;
1305 	int copy;
1306 	int err;
1307 	int offset = 0;
1308 	__u8 tx_flags = 0;
1309 	u32 tskey = 0;
1310 	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1311 	struct ipv6_txoptions *opt = v6_cork->opt;
1312 	int csummode = CHECKSUM_NONE;
1313 	unsigned int maxnonfragsize, headersize;
1314 
1315 	skb = skb_peek_tail(queue);
1316 	if (!skb) {
1317 		exthdrlen = opt ? opt->opt_flen : 0;
1318 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1319 	}
1320 
1321 	mtu = cork->fragsize;
1322 	orig_mtu = mtu;
1323 
1324 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1325 
1326 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1327 			(opt ? opt->opt_nflen : 0);
1328 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1329 		     sizeof(struct frag_hdr);
1330 
1331 	headersize = sizeof(struct ipv6hdr) +
1332 		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1333 		     (dst_allfrag(&rt->dst) ?
1334 		      sizeof(struct frag_hdr) : 0) +
1335 		     rt->rt6i_nfheader_len;
1336 
1337 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1338 	    (sk->sk_protocol == IPPROTO_UDP ||
1339 	     sk->sk_protocol == IPPROTO_RAW)) {
1340 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1341 				sizeof(struct ipv6hdr));
1342 		goto emsgsize;
1343 	}
1344 
1345 	if (ip6_sk_ignore_df(sk))
1346 		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1347 	else
1348 		maxnonfragsize = mtu;
1349 
1350 	if (cork->length + length > maxnonfragsize - headersize) {
1351 emsgsize:
1352 		ipv6_local_error(sk, EMSGSIZE, fl6,
1353 				 mtu - headersize +
1354 				 sizeof(struct ipv6hdr));
1355 		return -EMSGSIZE;
1356 	}
1357 
1358 	/* CHECKSUM_PARTIAL only with no extension headers and when
1359 	 * we are not going to fragment
1360 	 */
1361 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1362 	    headersize == sizeof(struct ipv6hdr) &&
1363 	    length <= mtu - headersize &&
1364 	    !(flags & MSG_MORE) &&
1365 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1366 		csummode = CHECKSUM_PARTIAL;
1367 
1368 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1369 		sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
1370 		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1371 		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1372 			tskey = sk->sk_tskey++;
1373 	}
1374 
1375 	/*
1376 	 * Let's try using as much space as possible.
1377 	 * Use MTU if total length of the message fits into the MTU.
1378 	 * Otherwise, we need to reserve fragment header and
1379 	 * fragment alignment (= 8-15 octects, in total).
1380 	 *
1381 	 * Note that we may need to "move" the data from the tail of
1382 	 * of the buffer to the new fragment when we split
1383 	 * the message.
1384 	 *
1385 	 * FIXME: It may be fragmented into multiple chunks
1386 	 *        at once if non-fragmentable extension headers
1387 	 *        are too large.
1388 	 * --yoshfuji
1389 	 */
1390 
1391 	cork->length += length;
1392 	if ((((length + fragheaderlen) > mtu) ||
1393 	     (skb && skb_is_gso(skb))) &&
1394 	    (sk->sk_protocol == IPPROTO_UDP) &&
1395 	    (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
1396 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1397 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1398 					  hh_len, fragheaderlen, exthdrlen,
1399 					  transhdrlen, mtu, flags, fl6);
1400 		if (err)
1401 			goto error;
1402 		return 0;
1403 	}
1404 
1405 	if (!skb)
1406 		goto alloc_new_skb;
1407 
1408 	while (length > 0) {
1409 		/* Check if the remaining data fits into current packet. */
1410 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1411 		if (copy < length)
1412 			copy = maxfraglen - skb->len;
1413 
1414 		if (copy <= 0) {
1415 			char *data;
1416 			unsigned int datalen;
1417 			unsigned int fraglen;
1418 			unsigned int fraggap;
1419 			unsigned int alloclen;
1420 alloc_new_skb:
1421 			/* There's no room in the current skb */
1422 			if (skb)
1423 				fraggap = skb->len - maxfraglen;
1424 			else
1425 				fraggap = 0;
1426 			/* update mtu and maxfraglen if necessary */
1427 			if (!skb || !skb_prev)
1428 				ip6_append_data_mtu(&mtu, &maxfraglen,
1429 						    fragheaderlen, skb, rt,
1430 						    orig_mtu);
1431 
1432 			skb_prev = skb;
1433 
1434 			/*
1435 			 * If remaining data exceeds the mtu,
1436 			 * we know we need more fragment(s).
1437 			 */
1438 			datalen = length + fraggap;
1439 
1440 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1441 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1442 			if ((flags & MSG_MORE) &&
1443 			    !(rt->dst.dev->features&NETIF_F_SG))
1444 				alloclen = mtu;
1445 			else
1446 				alloclen = datalen + fragheaderlen;
1447 
1448 			alloclen += dst_exthdrlen;
1449 
1450 			if (datalen != length + fraggap) {
1451 				/*
1452 				 * this is not the last fragment, the trailer
1453 				 * space is regarded as data space.
1454 				 */
1455 				datalen += rt->dst.trailer_len;
1456 			}
1457 
1458 			alloclen += rt->dst.trailer_len;
1459 			fraglen = datalen + fragheaderlen;
1460 
1461 			/*
1462 			 * We just reserve space for fragment header.
1463 			 * Note: this may be overallocation if the message
1464 			 * (without MSG_MORE) fits into the MTU.
1465 			 */
1466 			alloclen += sizeof(struct frag_hdr);
1467 
1468 			copy = datalen - transhdrlen - fraggap;
1469 			if (copy < 0) {
1470 				err = -EINVAL;
1471 				goto error;
1472 			}
1473 			if (transhdrlen) {
1474 				skb = sock_alloc_send_skb(sk,
1475 						alloclen + hh_len,
1476 						(flags & MSG_DONTWAIT), &err);
1477 			} else {
1478 				skb = NULL;
1479 				if (atomic_read(&sk->sk_wmem_alloc) <=
1480 				    2 * sk->sk_sndbuf)
1481 					skb = sock_wmalloc(sk,
1482 							   alloclen + hh_len, 1,
1483 							   sk->sk_allocation);
1484 				if (unlikely(!skb))
1485 					err = -ENOBUFS;
1486 			}
1487 			if (!skb)
1488 				goto error;
1489 			/*
1490 			 *	Fill in the control structures
1491 			 */
1492 			skb->protocol = htons(ETH_P_IPV6);
1493 			skb->ip_summed = csummode;
1494 			skb->csum = 0;
1495 			/* reserve for fragmentation and ipsec header */
1496 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1497 				    dst_exthdrlen);
1498 
1499 			/* Only the initial fragment is time stamped */
1500 			skb_shinfo(skb)->tx_flags = tx_flags;
1501 			tx_flags = 0;
1502 			skb_shinfo(skb)->tskey = tskey;
1503 			tskey = 0;
1504 
1505 			/*
1506 			 *	Find where to start putting bytes
1507 			 */
1508 			data = skb_put(skb, fraglen);
1509 			skb_set_network_header(skb, exthdrlen);
1510 			data += fragheaderlen;
1511 			skb->transport_header = (skb->network_header +
1512 						 fragheaderlen);
1513 			if (fraggap) {
1514 				skb->csum = skb_copy_and_csum_bits(
1515 					skb_prev, maxfraglen,
1516 					data + transhdrlen, fraggap, 0);
1517 				skb_prev->csum = csum_sub(skb_prev->csum,
1518 							  skb->csum);
1519 				data += fraggap;
1520 				pskb_trim_unique(skb_prev, maxfraglen);
1521 			}
1522 			if (copy > 0 &&
1523 			    getfrag(from, data + transhdrlen, offset,
1524 				    copy, fraggap, skb) < 0) {
1525 				err = -EFAULT;
1526 				kfree_skb(skb);
1527 				goto error;
1528 			}
1529 
1530 			offset += copy;
1531 			length -= datalen - fraggap;
1532 			transhdrlen = 0;
1533 			exthdrlen = 0;
1534 			dst_exthdrlen = 0;
1535 
1536 			if ((flags & MSG_CONFIRM) && !skb_prev)
1537 				skb_set_dst_pending_confirm(skb, 1);
1538 
1539 			/*
1540 			 * Put the packet on the pending queue
1541 			 */
1542 			__skb_queue_tail(queue, skb);
1543 			continue;
1544 		}
1545 
1546 		if (copy > length)
1547 			copy = length;
1548 
1549 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1550 			unsigned int off;
1551 
1552 			off = skb->len;
1553 			if (getfrag(from, skb_put(skb, copy),
1554 						offset, copy, off, skb) < 0) {
1555 				__skb_trim(skb, off);
1556 				err = -EFAULT;
1557 				goto error;
1558 			}
1559 		} else {
1560 			int i = skb_shinfo(skb)->nr_frags;
1561 
1562 			err = -ENOMEM;
1563 			if (!sk_page_frag_refill(sk, pfrag))
1564 				goto error;
1565 
1566 			if (!skb_can_coalesce(skb, i, pfrag->page,
1567 					      pfrag->offset)) {
1568 				err = -EMSGSIZE;
1569 				if (i == MAX_SKB_FRAGS)
1570 					goto error;
1571 
1572 				__skb_fill_page_desc(skb, i, pfrag->page,
1573 						     pfrag->offset, 0);
1574 				skb_shinfo(skb)->nr_frags = ++i;
1575 				get_page(pfrag->page);
1576 			}
1577 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1578 			if (getfrag(from,
1579 				    page_address(pfrag->page) + pfrag->offset,
1580 				    offset, copy, skb->len, skb) < 0)
1581 				goto error_efault;
1582 
1583 			pfrag->offset += copy;
1584 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1585 			skb->len += copy;
1586 			skb->data_len += copy;
1587 			skb->truesize += copy;
1588 			atomic_add(copy, &sk->sk_wmem_alloc);
1589 		}
1590 		offset += copy;
1591 		length -= copy;
1592 	}
1593 
1594 	return 0;
1595 
1596 error_efault:
1597 	err = -EFAULT;
1598 error:
1599 	cork->length -= length;
1600 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1601 	return err;
1602 }
1603 
1604 int ip6_append_data(struct sock *sk,
1605 		    int getfrag(void *from, char *to, int offset, int len,
1606 				int odd, struct sk_buff *skb),
1607 		    void *from, int length, int transhdrlen,
1608 		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1609 		    struct rt6_info *rt, unsigned int flags,
1610 		    const struct sockcm_cookie *sockc)
1611 {
1612 	struct inet_sock *inet = inet_sk(sk);
1613 	struct ipv6_pinfo *np = inet6_sk(sk);
1614 	int exthdrlen;
1615 	int err;
1616 
1617 	if (flags&MSG_PROBE)
1618 		return 0;
1619 	if (skb_queue_empty(&sk->sk_write_queue)) {
1620 		/*
1621 		 * setup for corking
1622 		 */
1623 		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1624 				     ipc6, rt, fl6);
1625 		if (err)
1626 			return err;
1627 
1628 		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1629 		length += exthdrlen;
1630 		transhdrlen += exthdrlen;
1631 	} else {
1632 		fl6 = &inet->cork.fl.u.ip6;
1633 		transhdrlen = 0;
1634 	}
1635 
1636 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1637 				 &np->cork, sk_page_frag(sk), getfrag,
1638 				 from, length, transhdrlen, flags, ipc6, sockc);
1639 }
1640 EXPORT_SYMBOL_GPL(ip6_append_data);
1641 
1642 static void ip6_cork_release(struct inet_cork_full *cork,
1643 			     struct inet6_cork *v6_cork)
1644 {
1645 	if (v6_cork->opt) {
1646 		kfree(v6_cork->opt->dst0opt);
1647 		kfree(v6_cork->opt->dst1opt);
1648 		kfree(v6_cork->opt->hopopt);
1649 		kfree(v6_cork->opt->srcrt);
1650 		kfree(v6_cork->opt);
1651 		v6_cork->opt = NULL;
1652 	}
1653 
1654 	if (cork->base.dst) {
1655 		dst_release(cork->base.dst);
1656 		cork->base.dst = NULL;
1657 		cork->base.flags &= ~IPCORK_ALLFRAG;
1658 	}
1659 	memset(&cork->fl, 0, sizeof(cork->fl));
1660 }
1661 
1662 struct sk_buff *__ip6_make_skb(struct sock *sk,
1663 			       struct sk_buff_head *queue,
1664 			       struct inet_cork_full *cork,
1665 			       struct inet6_cork *v6_cork)
1666 {
1667 	struct sk_buff *skb, *tmp_skb;
1668 	struct sk_buff **tail_skb;
1669 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1670 	struct ipv6_pinfo *np = inet6_sk(sk);
1671 	struct net *net = sock_net(sk);
1672 	struct ipv6hdr *hdr;
1673 	struct ipv6_txoptions *opt = v6_cork->opt;
1674 	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1675 	struct flowi6 *fl6 = &cork->fl.u.ip6;
1676 	unsigned char proto = fl6->flowi6_proto;
1677 
1678 	skb = __skb_dequeue(queue);
1679 	if (!skb)
1680 		goto out;
1681 	tail_skb = &(skb_shinfo(skb)->frag_list);
1682 
1683 	/* move skb->data to ip header from ext header */
1684 	if (skb->data < skb_network_header(skb))
1685 		__skb_pull(skb, skb_network_offset(skb));
1686 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1687 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1688 		*tail_skb = tmp_skb;
1689 		tail_skb = &(tmp_skb->next);
1690 		skb->len += tmp_skb->len;
1691 		skb->data_len += tmp_skb->len;
1692 		skb->truesize += tmp_skb->truesize;
1693 		tmp_skb->destructor = NULL;
1694 		tmp_skb->sk = NULL;
1695 	}
1696 
1697 	/* Allow local fragmentation. */
1698 	skb->ignore_df = ip6_sk_ignore_df(sk);
1699 
1700 	*final_dst = fl6->daddr;
1701 	__skb_pull(skb, skb_network_header_len(skb));
1702 	if (opt && opt->opt_flen)
1703 		ipv6_push_frag_opts(skb, opt, &proto);
1704 	if (opt && opt->opt_nflen)
1705 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1706 
1707 	skb_push(skb, sizeof(struct ipv6hdr));
1708 	skb_reset_network_header(skb);
1709 	hdr = ipv6_hdr(skb);
1710 
1711 	ip6_flow_hdr(hdr, v6_cork->tclass,
1712 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1713 					np->autoflowlabel, fl6));
1714 	hdr->hop_limit = v6_cork->hop_limit;
1715 	hdr->nexthdr = proto;
1716 	hdr->saddr = fl6->saddr;
1717 	hdr->daddr = *final_dst;
1718 
1719 	skb->priority = sk->sk_priority;
1720 	skb->mark = sk->sk_mark;
1721 
1722 	skb_dst_set(skb, dst_clone(&rt->dst));
1723 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1724 	if (proto == IPPROTO_ICMPV6) {
1725 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1726 
1727 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1728 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1729 	}
1730 
1731 	ip6_cork_release(cork, v6_cork);
1732 out:
1733 	return skb;
1734 }
1735 
1736 int ip6_send_skb(struct sk_buff *skb)
1737 {
1738 	struct net *net = sock_net(skb->sk);
1739 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1740 	int err;
1741 
1742 	err = ip6_local_out(net, skb->sk, skb);
1743 	if (err) {
1744 		if (err > 0)
1745 			err = net_xmit_errno(err);
1746 		if (err)
1747 			IP6_INC_STATS(net, rt->rt6i_idev,
1748 				      IPSTATS_MIB_OUTDISCARDS);
1749 	}
1750 
1751 	return err;
1752 }
1753 
1754 int ip6_push_pending_frames(struct sock *sk)
1755 {
1756 	struct sk_buff *skb;
1757 
1758 	skb = ip6_finish_skb(sk);
1759 	if (!skb)
1760 		return 0;
1761 
1762 	return ip6_send_skb(skb);
1763 }
1764 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1765 
1766 static void __ip6_flush_pending_frames(struct sock *sk,
1767 				       struct sk_buff_head *queue,
1768 				       struct inet_cork_full *cork,
1769 				       struct inet6_cork *v6_cork)
1770 {
1771 	struct sk_buff *skb;
1772 
1773 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1774 		if (skb_dst(skb))
1775 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1776 				      IPSTATS_MIB_OUTDISCARDS);
1777 		kfree_skb(skb);
1778 	}
1779 
1780 	ip6_cork_release(cork, v6_cork);
1781 }
1782 
1783 void ip6_flush_pending_frames(struct sock *sk)
1784 {
1785 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1786 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1787 }
1788 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1789 
1790 struct sk_buff *ip6_make_skb(struct sock *sk,
1791 			     int getfrag(void *from, char *to, int offset,
1792 					 int len, int odd, struct sk_buff *skb),
1793 			     void *from, int length, int transhdrlen,
1794 			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1795 			     struct rt6_info *rt, unsigned int flags,
1796 			     const struct sockcm_cookie *sockc)
1797 {
1798 	struct inet_cork_full cork;
1799 	struct inet6_cork v6_cork;
1800 	struct sk_buff_head queue;
1801 	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1802 	int err;
1803 
1804 	if (flags & MSG_PROBE)
1805 		return NULL;
1806 
1807 	__skb_queue_head_init(&queue);
1808 
1809 	cork.base.flags = 0;
1810 	cork.base.addr = 0;
1811 	cork.base.opt = NULL;
1812 	v6_cork.opt = NULL;
1813 	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1814 	if (err)
1815 		return ERR_PTR(err);
1816 
1817 	if (ipc6->dontfrag < 0)
1818 		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1819 
1820 	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1821 				&current->task_frag, getfrag, from,
1822 				length + exthdrlen, transhdrlen + exthdrlen,
1823 				flags, ipc6, sockc);
1824 	if (err) {
1825 		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1826 		return ERR_PTR(err);
1827 	}
1828 
1829 	return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
1830 }
1831