xref: /linux/net/ipv6/ip6_tunnel.c (revision 98838d95075a5295f3478ceba18bcccf472e30f4)
1 /*
2  *	IPv6 tunneling device
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Ville Nuorvala		<vnuorval@tcs.hut.fi>
7  *	Yasuyuki Kozakai	<kozakai@linux-ipv6.org>
8  *
9  *      Based on:
10  *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11  *
12  *      RFC 2473
13  *
14  *	This program is free software; you can redistribute it and/or
15  *      modify it under the terms of the GNU General Public License
16  *      as published by the Free Software Foundation; either version
17  *      2 of the License, or (at your option) any later version.
18  *
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44 
45 #include <asm/uaccess.h>
46 #include <linux/atomic.h>
47 
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61 
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67 
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT  5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70 
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74 
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78 
79 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81 
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85 
86 static int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 	/* the IPv6 tunnel fallback device */
89 	struct net_device *fb_tnl_dev;
90 	/* lists for storing tunnels in use */
91 	struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 	struct ip6_tnl __rcu *tnls_wc[1];
93 	struct ip6_tnl __rcu **tnls[2];
94 	struct ip6_tnl __rcu *collect_md_tun;
95 };
96 
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 	struct pcpu_sw_netstats tmp, sum = { 0 };
100 	int i;
101 
102 	for_each_possible_cpu(i) {
103 		unsigned int start;
104 		const struct pcpu_sw_netstats *tstats =
105 						   per_cpu_ptr(dev->tstats, i);
106 
107 		do {
108 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 			tmp.rx_packets = tstats->rx_packets;
110 			tmp.rx_bytes = tstats->rx_bytes;
111 			tmp.tx_packets = tstats->tx_packets;
112 			tmp.tx_bytes =  tstats->tx_bytes;
113 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114 
115 		sum.rx_packets += tmp.rx_packets;
116 		sum.rx_bytes   += tmp.rx_bytes;
117 		sum.tx_packets += tmp.tx_packets;
118 		sum.tx_bytes   += tmp.tx_bytes;
119 	}
120 	dev->stats.rx_packets = sum.rx_packets;
121 	dev->stats.rx_bytes   = sum.rx_bytes;
122 	dev->stats.tx_packets = sum.tx_packets;
123 	dev->stats.tx_bytes   = sum.tx_bytes;
124 	return &dev->stats;
125 }
126 
127 /**
128  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129  *   @remote: the address of the tunnel exit-point
130  *   @local: the address of the tunnel entry-point
131  *
132  * Return:
133  *   tunnel matching given end-points if found,
134  *   else fallback tunnel if its device is up,
135  *   else %NULL
136  **/
137 
138 #define for_each_ip6_tunnel_rcu(start) \
139 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140 
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 	unsigned int hash = HASH(remote, local);
145 	struct ip6_tnl *t;
146 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 	struct in6_addr any;
148 
149 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
152 		    (t->dev->flags & IFF_UP))
153 			return t;
154 	}
155 
156 	memset(&any, 0, sizeof(any));
157 	hash = HASH(&any, local);
158 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 		    (t->dev->flags & IFF_UP))
161 			return t;
162 	}
163 
164 	hash = HASH(remote, &any);
165 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
166 		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
167 		    (t->dev->flags & IFF_UP))
168 			return t;
169 	}
170 
171 	t = rcu_dereference(ip6n->collect_md_tun);
172 	if (t)
173 		return t;
174 
175 	t = rcu_dereference(ip6n->tnls_wc[0]);
176 	if (t && (t->dev->flags & IFF_UP))
177 		return t;
178 
179 	return NULL;
180 }
181 
182 /**
183  * ip6_tnl_bucket - get head of list matching given tunnel parameters
184  *   @p: parameters containing tunnel end-points
185  *
186  * Description:
187  *   ip6_tnl_bucket() returns the head of the list matching the
188  *   &struct in6_addr entries laddr and raddr in @p.
189  *
190  * Return: head of IPv6 tunnel list
191  **/
192 
193 static struct ip6_tnl __rcu **
194 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
195 {
196 	const struct in6_addr *remote = &p->raddr;
197 	const struct in6_addr *local = &p->laddr;
198 	unsigned int h = 0;
199 	int prio = 0;
200 
201 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
202 		prio = 1;
203 		h = HASH(remote, local);
204 	}
205 	return &ip6n->tnls[prio][h];
206 }
207 
208 /**
209  * ip6_tnl_link - add tunnel to hash table
210  *   @t: tunnel to be added
211  **/
212 
213 static void
214 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
215 {
216 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
217 
218 	if (t->parms.collect_md)
219 		rcu_assign_pointer(ip6n->collect_md_tun, t);
220 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
221 	rcu_assign_pointer(*tp, t);
222 }
223 
224 /**
225  * ip6_tnl_unlink - remove tunnel from hash table
226  *   @t: tunnel to be removed
227  **/
228 
229 static void
230 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
231 {
232 	struct ip6_tnl __rcu **tp;
233 	struct ip6_tnl *iter;
234 
235 	if (t->parms.collect_md)
236 		rcu_assign_pointer(ip6n->collect_md_tun, NULL);
237 
238 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
239 	     (iter = rtnl_dereference(*tp)) != NULL;
240 	     tp = &iter->next) {
241 		if (t == iter) {
242 			rcu_assign_pointer(*tp, t->next);
243 			break;
244 		}
245 	}
246 }
247 
248 static void ip6_dev_free(struct net_device *dev)
249 {
250 	struct ip6_tnl *t = netdev_priv(dev);
251 
252 	gro_cells_destroy(&t->gro_cells);
253 	dst_cache_destroy(&t->dst_cache);
254 	free_percpu(dev->tstats);
255 	free_netdev(dev);
256 }
257 
258 static int ip6_tnl_create2(struct net_device *dev)
259 {
260 	struct ip6_tnl *t = netdev_priv(dev);
261 	struct net *net = dev_net(dev);
262 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
263 	int err;
264 
265 	t = netdev_priv(dev);
266 
267 	dev->rtnl_link_ops = &ip6_link_ops;
268 	err = register_netdevice(dev);
269 	if (err < 0)
270 		goto out;
271 
272 	strcpy(t->parms.name, dev->name);
273 
274 	dev_hold(dev);
275 	ip6_tnl_link(ip6n, t);
276 	return 0;
277 
278 out:
279 	return err;
280 }
281 
282 /**
283  * ip6_tnl_create - create a new tunnel
284  *   @p: tunnel parameters
285  *   @pt: pointer to new tunnel
286  *
287  * Description:
288  *   Create tunnel matching given parameters.
289  *
290  * Return:
291  *   created tunnel or error pointer
292  **/
293 
294 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
295 {
296 	struct net_device *dev;
297 	struct ip6_tnl *t;
298 	char name[IFNAMSIZ];
299 	int err = -ENOMEM;
300 
301 	if (p->name[0])
302 		strlcpy(name, p->name, IFNAMSIZ);
303 	else
304 		sprintf(name, "ip6tnl%%d");
305 
306 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
307 			   ip6_tnl_dev_setup);
308 	if (!dev)
309 		goto failed;
310 
311 	dev_net_set(dev, net);
312 
313 	t = netdev_priv(dev);
314 	t->parms = *p;
315 	t->net = dev_net(dev);
316 	err = ip6_tnl_create2(dev);
317 	if (err < 0)
318 		goto failed_free;
319 
320 	return t;
321 
322 failed_free:
323 	ip6_dev_free(dev);
324 failed:
325 	return ERR_PTR(err);
326 }
327 
328 /**
329  * ip6_tnl_locate - find or create tunnel matching given parameters
330  *   @p: tunnel parameters
331  *   @create: != 0 if allowed to create new tunnel if no match found
332  *
333  * Description:
334  *   ip6_tnl_locate() first tries to locate an existing tunnel
335  *   based on @parms. If this is unsuccessful, but @create is set a new
336  *   tunnel device is created and registered for use.
337  *
338  * Return:
339  *   matching tunnel or error pointer
340  **/
341 
342 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
343 		struct __ip6_tnl_parm *p, int create)
344 {
345 	const struct in6_addr *remote = &p->raddr;
346 	const struct in6_addr *local = &p->laddr;
347 	struct ip6_tnl __rcu **tp;
348 	struct ip6_tnl *t;
349 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
350 
351 	for (tp = ip6_tnl_bucket(ip6n, p);
352 	     (t = rtnl_dereference(*tp)) != NULL;
353 	     tp = &t->next) {
354 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
355 		    ipv6_addr_equal(remote, &t->parms.raddr)) {
356 			if (create)
357 				return ERR_PTR(-EEXIST);
358 
359 			return t;
360 		}
361 	}
362 	if (!create)
363 		return ERR_PTR(-ENODEV);
364 	return ip6_tnl_create(net, p);
365 }
366 
367 /**
368  * ip6_tnl_dev_uninit - tunnel device uninitializer
369  *   @dev: the device to be destroyed
370  *
371  * Description:
372  *   ip6_tnl_dev_uninit() removes tunnel from its list
373  **/
374 
375 static void
376 ip6_tnl_dev_uninit(struct net_device *dev)
377 {
378 	struct ip6_tnl *t = netdev_priv(dev);
379 	struct net *net = t->net;
380 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
381 
382 	if (dev == ip6n->fb_tnl_dev)
383 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
384 	else
385 		ip6_tnl_unlink(ip6n, t);
386 	dst_cache_reset(&t->dst_cache);
387 	dev_put(dev);
388 }
389 
390 /**
391  * parse_tvl_tnl_enc_lim - handle encapsulation limit option
392  *   @skb: received socket buffer
393  *
394  * Return:
395  *   0 if none was found,
396  *   else index to encapsulation limit
397  **/
398 
399 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
400 {
401 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
402 	__u8 nexthdr = ipv6h->nexthdr;
403 	__u16 off = sizeof(*ipv6h);
404 
405 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
406 		__u16 optlen = 0;
407 		struct ipv6_opt_hdr *hdr;
408 		if (raw + off + sizeof(*hdr) > skb->data &&
409 		    !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
410 			break;
411 
412 		hdr = (struct ipv6_opt_hdr *) (raw + off);
413 		if (nexthdr == NEXTHDR_FRAGMENT) {
414 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
415 			if (frag_hdr->frag_off)
416 				break;
417 			optlen = 8;
418 		} else if (nexthdr == NEXTHDR_AUTH) {
419 			optlen = (hdr->hdrlen + 2) << 2;
420 		} else {
421 			optlen = ipv6_optlen(hdr);
422 		}
423 		if (nexthdr == NEXTHDR_DEST) {
424 			__u16 i = off + 2;
425 			while (1) {
426 				struct ipv6_tlv_tnl_enc_lim *tel;
427 
428 				/* No more room for encapsulation limit */
429 				if (i + sizeof (*tel) > off + optlen)
430 					break;
431 
432 				tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
433 				/* return index of option if found and valid */
434 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
435 				    tel->length == 1)
436 					return i;
437 				/* else jump to next option */
438 				if (tel->type)
439 					i += tel->length + 2;
440 				else
441 					i++;
442 			}
443 		}
444 		nexthdr = hdr->nexthdr;
445 		off += optlen;
446 	}
447 	return 0;
448 }
449 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
450 
451 /**
452  * ip6_tnl_err - tunnel error handler
453  *
454  * Description:
455  *   ip6_tnl_err() should handle errors in the tunnel according
456  *   to the specifications in RFC 2473.
457  **/
458 
459 static int
460 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
461 	    u8 *type, u8 *code, int *msg, __u32 *info, int offset)
462 {
463 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
464 	struct ip6_tnl *t;
465 	int rel_msg = 0;
466 	u8 rel_type = ICMPV6_DEST_UNREACH;
467 	u8 rel_code = ICMPV6_ADDR_UNREACH;
468 	u8 tproto;
469 	__u32 rel_info = 0;
470 	__u16 len;
471 	int err = -ENOENT;
472 
473 	/* If the packet doesn't contain the original IPv6 header we are
474 	   in trouble since we might need the source address for further
475 	   processing of the error. */
476 
477 	rcu_read_lock();
478 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
479 	if (!t)
480 		goto out;
481 
482 	tproto = ACCESS_ONCE(t->parms.proto);
483 	if (tproto != ipproto && tproto != 0)
484 		goto out;
485 
486 	err = 0;
487 
488 	switch (*type) {
489 		__u32 teli;
490 		struct ipv6_tlv_tnl_enc_lim *tel;
491 		__u32 mtu;
492 	case ICMPV6_DEST_UNREACH:
493 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
494 				    t->parms.name);
495 		rel_msg = 1;
496 		break;
497 	case ICMPV6_TIME_EXCEED:
498 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
499 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
500 					    t->parms.name);
501 			rel_msg = 1;
502 		}
503 		break;
504 	case ICMPV6_PARAMPROB:
505 		teli = 0;
506 		if ((*code) == ICMPV6_HDR_FIELD)
507 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
508 
509 		if (teli && teli == *info - 2) {
510 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
511 			if (tel->encap_limit == 0) {
512 				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
513 						    t->parms.name);
514 				rel_msg = 1;
515 			}
516 		} else {
517 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
518 					    t->parms.name);
519 		}
520 		break;
521 	case ICMPV6_PKT_TOOBIG:
522 		mtu = *info - offset;
523 		if (mtu < IPV6_MIN_MTU)
524 			mtu = IPV6_MIN_MTU;
525 		t->dev->mtu = mtu;
526 
527 		len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
528 		if (len > mtu) {
529 			rel_type = ICMPV6_PKT_TOOBIG;
530 			rel_code = 0;
531 			rel_info = mtu;
532 			rel_msg = 1;
533 		}
534 		break;
535 	}
536 
537 	*type = rel_type;
538 	*code = rel_code;
539 	*info = rel_info;
540 	*msg = rel_msg;
541 
542 out:
543 	rcu_read_unlock();
544 	return err;
545 }
546 
547 static int
548 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
549 	   u8 type, u8 code, int offset, __be32 info)
550 {
551 	int rel_msg = 0;
552 	u8 rel_type = type;
553 	u8 rel_code = code;
554 	__u32 rel_info = ntohl(info);
555 	int err;
556 	struct sk_buff *skb2;
557 	const struct iphdr *eiph;
558 	struct rtable *rt;
559 	struct flowi4 fl4;
560 
561 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
562 			  &rel_msg, &rel_info, offset);
563 	if (err < 0)
564 		return err;
565 
566 	if (rel_msg == 0)
567 		return 0;
568 
569 	switch (rel_type) {
570 	case ICMPV6_DEST_UNREACH:
571 		if (rel_code != ICMPV6_ADDR_UNREACH)
572 			return 0;
573 		rel_type = ICMP_DEST_UNREACH;
574 		rel_code = ICMP_HOST_UNREACH;
575 		break;
576 	case ICMPV6_PKT_TOOBIG:
577 		if (rel_code != 0)
578 			return 0;
579 		rel_type = ICMP_DEST_UNREACH;
580 		rel_code = ICMP_FRAG_NEEDED;
581 		break;
582 	case NDISC_REDIRECT:
583 		rel_type = ICMP_REDIRECT;
584 		rel_code = ICMP_REDIR_HOST;
585 	default:
586 		return 0;
587 	}
588 
589 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
590 		return 0;
591 
592 	skb2 = skb_clone(skb, GFP_ATOMIC);
593 	if (!skb2)
594 		return 0;
595 
596 	skb_dst_drop(skb2);
597 
598 	skb_pull(skb2, offset);
599 	skb_reset_network_header(skb2);
600 	eiph = ip_hdr(skb2);
601 
602 	/* Try to guess incoming interface */
603 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
604 				   eiph->saddr, 0,
605 				   0, 0,
606 				   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
607 	if (IS_ERR(rt))
608 		goto out;
609 
610 	skb2->dev = rt->dst.dev;
611 
612 	/* route "incoming" packet */
613 	if (rt->rt_flags & RTCF_LOCAL) {
614 		ip_rt_put(rt);
615 		rt = NULL;
616 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
617 					   eiph->daddr, eiph->saddr,
618 					   0, 0,
619 					   IPPROTO_IPIP,
620 					   RT_TOS(eiph->tos), 0);
621 		if (IS_ERR(rt) ||
622 		    rt->dst.dev->type != ARPHRD_TUNNEL) {
623 			if (!IS_ERR(rt))
624 				ip_rt_put(rt);
625 			goto out;
626 		}
627 		skb_dst_set(skb2, &rt->dst);
628 	} else {
629 		ip_rt_put(rt);
630 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
631 				   skb2->dev) ||
632 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
633 			goto out;
634 	}
635 
636 	/* change mtu on this route */
637 	if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
638 		if (rel_info > dst_mtu(skb_dst(skb2)))
639 			goto out;
640 
641 		skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
642 	}
643 	if (rel_type == ICMP_REDIRECT)
644 		skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
645 
646 	icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
647 
648 out:
649 	kfree_skb(skb2);
650 	return 0;
651 }
652 
653 static int
654 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
655 	   u8 type, u8 code, int offset, __be32 info)
656 {
657 	int rel_msg = 0;
658 	u8 rel_type = type;
659 	u8 rel_code = code;
660 	__u32 rel_info = ntohl(info);
661 	int err;
662 
663 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
664 			  &rel_msg, &rel_info, offset);
665 	if (err < 0)
666 		return err;
667 
668 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
669 		struct rt6_info *rt;
670 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
671 
672 		if (!skb2)
673 			return 0;
674 
675 		skb_dst_drop(skb2);
676 		skb_pull(skb2, offset);
677 		skb_reset_network_header(skb2);
678 
679 		/* Try to guess incoming interface */
680 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
681 				NULL, 0, 0);
682 
683 		if (rt && rt->dst.dev)
684 			skb2->dev = rt->dst.dev;
685 
686 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
687 
688 		ip6_rt_put(rt);
689 
690 		kfree_skb(skb2);
691 	}
692 
693 	return 0;
694 }
695 
696 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
697 				       const struct ipv6hdr *ipv6h,
698 				       struct sk_buff *skb)
699 {
700 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
701 
702 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
703 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
704 
705 	return IP6_ECN_decapsulate(ipv6h, skb);
706 }
707 
708 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
709 				       const struct ipv6hdr *ipv6h,
710 				       struct sk_buff *skb)
711 {
712 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
713 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
714 
715 	return IP6_ECN_decapsulate(ipv6h, skb);
716 }
717 
718 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
719 			     const struct in6_addr *laddr,
720 			     const struct in6_addr *raddr)
721 {
722 	struct __ip6_tnl_parm *p = &t->parms;
723 	int ltype = ipv6_addr_type(laddr);
724 	int rtype = ipv6_addr_type(raddr);
725 	__u32 flags = 0;
726 
727 	if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
728 		flags = IP6_TNL_F_CAP_PER_PACKET;
729 	} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
730 		   rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
731 		   !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
732 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
733 		if (ltype&IPV6_ADDR_UNICAST)
734 			flags |= IP6_TNL_F_CAP_XMIT;
735 		if (rtype&IPV6_ADDR_UNICAST)
736 			flags |= IP6_TNL_F_CAP_RCV;
737 	}
738 	return flags;
739 }
740 EXPORT_SYMBOL(ip6_tnl_get_cap);
741 
742 /* called with rcu_read_lock() */
743 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
744 				  const struct in6_addr *laddr,
745 				  const struct in6_addr *raddr)
746 {
747 	struct __ip6_tnl_parm *p = &t->parms;
748 	int ret = 0;
749 	struct net *net = t->net;
750 
751 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
752 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
753 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
754 		struct net_device *ldev = NULL;
755 
756 		if (p->link)
757 			ldev = dev_get_by_index_rcu(net, p->link);
758 
759 		if ((ipv6_addr_is_multicast(laddr) ||
760 		     likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
761 		    likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
762 			ret = 1;
763 	}
764 	return ret;
765 }
766 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
767 
768 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
769 			 const struct tnl_ptk_info *tpi,
770 			 struct metadata_dst *tun_dst,
771 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
772 						const struct ipv6hdr *ipv6h,
773 						struct sk_buff *skb),
774 			 bool log_ecn_err)
775 {
776 	struct pcpu_sw_netstats *tstats;
777 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
778 	int err;
779 
780 	if ((!(tpi->flags & TUNNEL_CSUM) &&
781 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
782 	    ((tpi->flags & TUNNEL_CSUM) &&
783 	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
784 		tunnel->dev->stats.rx_crc_errors++;
785 		tunnel->dev->stats.rx_errors++;
786 		goto drop;
787 	}
788 
789 	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
790 		if (!(tpi->flags & TUNNEL_SEQ) ||
791 		    (tunnel->i_seqno &&
792 		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
793 			tunnel->dev->stats.rx_fifo_errors++;
794 			tunnel->dev->stats.rx_errors++;
795 			goto drop;
796 		}
797 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
798 	}
799 
800 	skb->protocol = tpi->proto;
801 
802 	/* Warning: All skb pointers will be invalidated! */
803 	if (tunnel->dev->type == ARPHRD_ETHER) {
804 		if (!pskb_may_pull(skb, ETH_HLEN)) {
805 			tunnel->dev->stats.rx_length_errors++;
806 			tunnel->dev->stats.rx_errors++;
807 			goto drop;
808 		}
809 
810 		ipv6h = ipv6_hdr(skb);
811 		skb->protocol = eth_type_trans(skb, tunnel->dev);
812 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
813 	} else {
814 		skb->dev = tunnel->dev;
815 	}
816 
817 	skb_reset_network_header(skb);
818 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
819 
820 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
821 
822 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
823 	if (unlikely(err)) {
824 		if (log_ecn_err)
825 			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
826 					     &ipv6h->saddr,
827 					     ipv6_get_dsfield(ipv6h));
828 		if (err > 1) {
829 			++tunnel->dev->stats.rx_frame_errors;
830 			++tunnel->dev->stats.rx_errors;
831 			goto drop;
832 		}
833 	}
834 
835 	tstats = this_cpu_ptr(tunnel->dev->tstats);
836 	u64_stats_update_begin(&tstats->syncp);
837 	tstats->rx_packets++;
838 	tstats->rx_bytes += skb->len;
839 	u64_stats_update_end(&tstats->syncp);
840 
841 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
842 
843 	if (tun_dst)
844 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
845 
846 	gro_cells_receive(&tunnel->gro_cells, skb);
847 	return 0;
848 
849 drop:
850 	kfree_skb(skb);
851 	return 0;
852 }
853 
854 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
855 		const struct tnl_ptk_info *tpi,
856 		struct metadata_dst *tun_dst,
857 		bool log_ecn_err)
858 {
859 	return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
860 			     log_ecn_err);
861 }
862 EXPORT_SYMBOL(ip6_tnl_rcv);
863 
864 static const struct tnl_ptk_info tpi_v6 = {
865 	/* no tunnel info required for ipxip6. */
866 	.proto = htons(ETH_P_IPV6),
867 };
868 
869 static const struct tnl_ptk_info tpi_v4 = {
870 	/* no tunnel info required for ipxip6. */
871 	.proto = htons(ETH_P_IP),
872 };
873 
874 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
875 		      const struct tnl_ptk_info *tpi,
876 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
877 						  const struct ipv6hdr *ipv6h,
878 						  struct sk_buff *skb))
879 {
880 	struct ip6_tnl *t;
881 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
882 	struct metadata_dst *tun_dst = NULL;
883 	int ret = -1;
884 
885 	rcu_read_lock();
886 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
887 
888 	if (t) {
889 		u8 tproto = ACCESS_ONCE(t->parms.proto);
890 
891 		if (tproto != ipproto && tproto != 0)
892 			goto drop;
893 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
894 			goto drop;
895 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
896 			goto drop;
897 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
898 			goto drop;
899 		if (t->parms.collect_md) {
900 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
901 			if (!tun_dst)
902 				return 0;
903 		}
904 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
905 				    log_ecn_error);
906 	}
907 
908 	rcu_read_unlock();
909 
910 	return ret;
911 
912 drop:
913 	rcu_read_unlock();
914 	kfree_skb(skb);
915 	return 0;
916 }
917 
918 static int ip4ip6_rcv(struct sk_buff *skb)
919 {
920 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
921 			  ip4ip6_dscp_ecn_decapsulate);
922 }
923 
924 static int ip6ip6_rcv(struct sk_buff *skb)
925 {
926 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
927 			  ip6ip6_dscp_ecn_decapsulate);
928 }
929 
930 struct ipv6_tel_txoption {
931 	struct ipv6_txoptions ops;
932 	__u8 dst_opt[8];
933 };
934 
935 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
936 {
937 	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
938 
939 	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
940 	opt->dst_opt[3] = 1;
941 	opt->dst_opt[4] = encap_limit;
942 	opt->dst_opt[5] = IPV6_TLV_PADN;
943 	opt->dst_opt[6] = 1;
944 
945 	opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
946 	opt->ops.opt_nflen = 8;
947 }
948 
949 /**
950  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
951  *   @t: the outgoing tunnel device
952  *   @hdr: IPv6 header from the incoming packet
953  *
954  * Description:
955  *   Avoid trivial tunneling loop by checking that tunnel exit-point
956  *   doesn't match source of incoming packet.
957  *
958  * Return:
959  *   1 if conflict,
960  *   0 else
961  **/
962 
963 static inline bool
964 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
965 {
966 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
967 }
968 
969 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
970 		     const struct in6_addr *laddr,
971 		     const struct in6_addr *raddr)
972 {
973 	struct __ip6_tnl_parm *p = &t->parms;
974 	int ret = 0;
975 	struct net *net = t->net;
976 
977 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
978 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
979 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
980 		struct net_device *ldev = NULL;
981 
982 		rcu_read_lock();
983 		if (p->link)
984 			ldev = dev_get_by_index_rcu(net, p->link);
985 
986 		if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
987 			pr_warn("%s xmit: Local address not yet configured!\n",
988 				p->name);
989 		else if (!ipv6_addr_is_multicast(raddr) &&
990 			 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
991 			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
992 				p->name);
993 		else
994 			ret = 1;
995 		rcu_read_unlock();
996 	}
997 	return ret;
998 }
999 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1000 
1001 /**
1002  * ip6_tnl_xmit - encapsulate packet and send
1003  *   @skb: the outgoing socket buffer
1004  *   @dev: the outgoing tunnel device
1005  *   @dsfield: dscp code for outer header
1006  *   @fl6: flow of tunneled packet
1007  *   @encap_limit: encapsulation limit
1008  *   @pmtu: Path MTU is stored if packet is too big
1009  *   @proto: next header value
1010  *
1011  * Description:
1012  *   Build new header and do some sanity checks on the packet before sending
1013  *   it.
1014  *
1015  * Return:
1016  *   0 on success
1017  *   -1 fail
1018  *   %-EMSGSIZE message too big. return mtu in this case.
1019  **/
1020 
1021 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1022 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1023 		 __u8 proto)
1024 {
1025 	struct ip6_tnl *t = netdev_priv(dev);
1026 	struct net *net = t->net;
1027 	struct net_device_stats *stats = &t->dev->stats;
1028 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1029 	struct ipv6_tel_txoption opt;
1030 	struct dst_entry *dst = NULL, *ndst = NULL;
1031 	struct net_device *tdev;
1032 	int mtu;
1033 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1034 	unsigned int max_headroom = psh_hlen;
1035 	u8 hop_limit;
1036 	int err = -1;
1037 
1038 	if (t->parms.collect_md) {
1039 		hop_limit = skb_tunnel_info(skb)->key.ttl;
1040 		goto route_lookup;
1041 	} else {
1042 		hop_limit = t->parms.hop_limit;
1043 	}
1044 
1045 	/* NBMA tunnel */
1046 	if (ipv6_addr_any(&t->parms.raddr)) {
1047 		struct in6_addr *addr6;
1048 		struct neighbour *neigh;
1049 		int addr_type;
1050 
1051 		if (!skb_dst(skb))
1052 			goto tx_err_link_failure;
1053 
1054 		neigh = dst_neigh_lookup(skb_dst(skb),
1055 					 &ipv6_hdr(skb)->daddr);
1056 		if (!neigh)
1057 			goto tx_err_link_failure;
1058 
1059 		addr6 = (struct in6_addr *)&neigh->primary_key;
1060 		addr_type = ipv6_addr_type(addr6);
1061 
1062 		if (addr_type == IPV6_ADDR_ANY)
1063 			addr6 = &ipv6_hdr(skb)->daddr;
1064 
1065 		memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1066 		neigh_release(neigh);
1067 	} else if (!fl6->flowi6_mark)
1068 		dst = dst_cache_get(&t->dst_cache);
1069 
1070 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1071 		goto tx_err_link_failure;
1072 
1073 	if (!dst) {
1074 route_lookup:
1075 		dst = ip6_route_output(net, NULL, fl6);
1076 
1077 		if (dst->error)
1078 			goto tx_err_link_failure;
1079 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1080 		if (IS_ERR(dst)) {
1081 			err = PTR_ERR(dst);
1082 			dst = NULL;
1083 			goto tx_err_link_failure;
1084 		}
1085 		if (t->parms.collect_md &&
1086 		    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1087 				       &fl6->daddr, 0, &fl6->saddr))
1088 			goto tx_err_link_failure;
1089 		ndst = dst;
1090 	}
1091 
1092 	tdev = dst->dev;
1093 
1094 	if (tdev == dev) {
1095 		stats->collisions++;
1096 		net_warn_ratelimited("%s: Local routing loop detected!\n",
1097 				     t->parms.name);
1098 		goto tx_err_dst_release;
1099 	}
1100 	mtu = dst_mtu(dst) - psh_hlen;
1101 	if (encap_limit >= 0) {
1102 		max_headroom += 8;
1103 		mtu -= 8;
1104 	}
1105 	if (mtu < IPV6_MIN_MTU)
1106 		mtu = IPV6_MIN_MTU;
1107 	if (skb_dst(skb) && !t->parms.collect_md)
1108 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1109 	if (skb->len > mtu && !skb_is_gso(skb)) {
1110 		*pmtu = mtu;
1111 		err = -EMSGSIZE;
1112 		goto tx_err_dst_release;
1113 	}
1114 
1115 	if (t->err_count > 0) {
1116 		if (time_before(jiffies,
1117 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1118 			t->err_count--;
1119 
1120 			dst_link_failure(skb);
1121 		} else {
1122 			t->err_count = 0;
1123 		}
1124 	}
1125 
1126 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1127 
1128 	/*
1129 	 * Okay, now see if we can stuff it in the buffer as-is.
1130 	 */
1131 	max_headroom += LL_RESERVED_SPACE(tdev);
1132 
1133 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1134 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1135 		struct sk_buff *new_skb;
1136 
1137 		new_skb = skb_realloc_headroom(skb, max_headroom);
1138 		if (!new_skb)
1139 			goto tx_err_dst_release;
1140 
1141 		if (skb->sk)
1142 			skb_set_owner_w(new_skb, skb->sk);
1143 		consume_skb(skb);
1144 		skb = new_skb;
1145 	}
1146 
1147 	if (t->parms.collect_md) {
1148 		if (t->encap.type != TUNNEL_ENCAP_NONE)
1149 			goto tx_err_dst_release;
1150 	} else {
1151 		if (!fl6->flowi6_mark && ndst)
1152 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1153 	}
1154 	skb_dst_set(skb, dst);
1155 
1156 	if (encap_limit >= 0) {
1157 		init_tel_txopt(&opt, encap_limit);
1158 		ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
1159 	}
1160 
1161 	/* Calculate max headroom for all the headers and adjust
1162 	 * needed_headroom if necessary.
1163 	 */
1164 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1165 			+ dst->header_len + t->hlen;
1166 	if (max_headroom > dev->needed_headroom)
1167 		dev->needed_headroom = max_headroom;
1168 
1169 	err = ip6_tnl_encap(skb, t, &proto, fl6);
1170 	if (err)
1171 		return err;
1172 
1173 	skb_push(skb, sizeof(struct ipv6hdr));
1174 	skb_reset_network_header(skb);
1175 	ipv6h = ipv6_hdr(skb);
1176 	ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
1177 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1178 	ipv6h->hop_limit = hop_limit;
1179 	ipv6h->nexthdr = proto;
1180 	ipv6h->saddr = fl6->saddr;
1181 	ipv6h->daddr = fl6->daddr;
1182 	ip6tunnel_xmit(NULL, skb, dev);
1183 	return 0;
1184 tx_err_link_failure:
1185 	stats->tx_carrier_errors++;
1186 	dst_link_failure(skb);
1187 tx_err_dst_release:
1188 	dst_release(dst);
1189 	return err;
1190 }
1191 EXPORT_SYMBOL(ip6_tnl_xmit);
1192 
1193 static inline int
1194 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1195 {
1196 	struct ip6_tnl *t = netdev_priv(dev);
1197 	const struct iphdr  *iph = ip_hdr(skb);
1198 	int encap_limit = -1;
1199 	struct flowi6 fl6;
1200 	__u8 dsfield;
1201 	__u32 mtu;
1202 	u8 tproto;
1203 	int err;
1204 
1205 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1206 
1207 	tproto = ACCESS_ONCE(t->parms.proto);
1208 	if (tproto != IPPROTO_IPIP && tproto != 0)
1209 		return -1;
1210 
1211 	dsfield = ipv4_get_dsfield(iph);
1212 
1213 	if (t->parms.collect_md) {
1214 		struct ip_tunnel_info *tun_info;
1215 		const struct ip_tunnel_key *key;
1216 
1217 		tun_info = skb_tunnel_info(skb);
1218 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1219 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1220 			return -1;
1221 		key = &tun_info->key;
1222 		memset(&fl6, 0, sizeof(fl6));
1223 		fl6.flowi6_proto = IPPROTO_IPIP;
1224 		fl6.daddr = key->u.ipv6.dst;
1225 		fl6.flowlabel = key->label;
1226 	} else {
1227 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1228 			encap_limit = t->parms.encap_limit;
1229 
1230 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1231 		fl6.flowi6_proto = IPPROTO_IPIP;
1232 
1233 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1234 			fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1235 					 & IPV6_TCLASS_MASK;
1236 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1237 			fl6.flowi6_mark = skb->mark;
1238 	}
1239 
1240 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1241 		return -1;
1242 
1243 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1244 
1245 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1246 			   IPPROTO_IPIP);
1247 	if (err != 0) {
1248 		/* XXX: send ICMP error even if DF is not set. */
1249 		if (err == -EMSGSIZE)
1250 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1251 				  htonl(mtu));
1252 		return -1;
1253 	}
1254 
1255 	return 0;
1256 }
1257 
1258 static inline int
1259 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1260 {
1261 	struct ip6_tnl *t = netdev_priv(dev);
1262 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1263 	int encap_limit = -1;
1264 	__u16 offset;
1265 	struct flowi6 fl6;
1266 	__u8 dsfield;
1267 	__u32 mtu;
1268 	u8 tproto;
1269 	int err;
1270 
1271 	tproto = ACCESS_ONCE(t->parms.proto);
1272 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1273 	    ip6_tnl_addr_conflict(t, ipv6h))
1274 		return -1;
1275 
1276 	dsfield = ipv6_get_dsfield(ipv6h);
1277 
1278 	if (t->parms.collect_md) {
1279 		struct ip_tunnel_info *tun_info;
1280 		const struct ip_tunnel_key *key;
1281 
1282 		tun_info = skb_tunnel_info(skb);
1283 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1284 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1285 			return -1;
1286 		key = &tun_info->key;
1287 		memset(&fl6, 0, sizeof(fl6));
1288 		fl6.flowi6_proto = IPPROTO_IPV6;
1289 		fl6.daddr = key->u.ipv6.dst;
1290 		fl6.flowlabel = key->label;
1291 	} else {
1292 		offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1293 		if (offset > 0) {
1294 			struct ipv6_tlv_tnl_enc_lim *tel;
1295 
1296 			tel = (void *)&skb_network_header(skb)[offset];
1297 			if (tel->encap_limit == 0) {
1298 				icmpv6_send(skb, ICMPV6_PARAMPROB,
1299 					    ICMPV6_HDR_FIELD, offset + 2);
1300 				return -1;
1301 			}
1302 			encap_limit = tel->encap_limit - 1;
1303 		} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1304 			encap_limit = t->parms.encap_limit;
1305 		}
1306 
1307 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1308 		fl6.flowi6_proto = IPPROTO_IPV6;
1309 
1310 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1311 			fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
1312 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1313 			fl6.flowlabel |= ip6_flowlabel(ipv6h);
1314 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1315 			fl6.flowi6_mark = skb->mark;
1316 	}
1317 
1318 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1319 		return -1;
1320 
1321 	skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1322 
1323 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1324 			   IPPROTO_IPV6);
1325 	if (err != 0) {
1326 		if (err == -EMSGSIZE)
1327 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1328 		return -1;
1329 	}
1330 
1331 	return 0;
1332 }
1333 
1334 static netdev_tx_t
1335 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1336 {
1337 	struct ip6_tnl *t = netdev_priv(dev);
1338 	struct net_device_stats *stats = &t->dev->stats;
1339 	int ret;
1340 
1341 	switch (skb->protocol) {
1342 	case htons(ETH_P_IP):
1343 		ret = ip4ip6_tnl_xmit(skb, dev);
1344 		break;
1345 	case htons(ETH_P_IPV6):
1346 		ret = ip6ip6_tnl_xmit(skb, dev);
1347 		break;
1348 	default:
1349 		goto tx_err;
1350 	}
1351 
1352 	if (ret < 0)
1353 		goto tx_err;
1354 
1355 	return NETDEV_TX_OK;
1356 
1357 tx_err:
1358 	stats->tx_errors++;
1359 	stats->tx_dropped++;
1360 	kfree_skb(skb);
1361 	return NETDEV_TX_OK;
1362 }
1363 
1364 static void ip6_tnl_link_config(struct ip6_tnl *t)
1365 {
1366 	struct net_device *dev = t->dev;
1367 	struct __ip6_tnl_parm *p = &t->parms;
1368 	struct flowi6 *fl6 = &t->fl.u.ip6;
1369 	int t_hlen;
1370 
1371 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1372 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1373 
1374 	/* Set up flowi template */
1375 	fl6->saddr = p->laddr;
1376 	fl6->daddr = p->raddr;
1377 	fl6->flowi6_oif = p->link;
1378 	fl6->flowlabel = 0;
1379 
1380 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1381 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1382 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1383 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1384 
1385 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1386 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1387 
1388 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1389 		dev->flags |= IFF_POINTOPOINT;
1390 	else
1391 		dev->flags &= ~IFF_POINTOPOINT;
1392 
1393 	t->tun_hlen = 0;
1394 	t->hlen = t->encap_hlen + t->tun_hlen;
1395 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1396 
1397 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
1398 		int strict = (ipv6_addr_type(&p->raddr) &
1399 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1400 
1401 		struct rt6_info *rt = rt6_lookup(t->net,
1402 						 &p->raddr, &p->laddr,
1403 						 p->link, strict);
1404 
1405 		if (!rt)
1406 			return;
1407 
1408 		if (rt->dst.dev) {
1409 			dev->hard_header_len = rt->dst.dev->hard_header_len +
1410 				t_hlen;
1411 
1412 			dev->mtu = rt->dst.dev->mtu - t_hlen;
1413 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1414 				dev->mtu -= 8;
1415 
1416 			if (dev->mtu < IPV6_MIN_MTU)
1417 				dev->mtu = IPV6_MIN_MTU;
1418 		}
1419 		ip6_rt_put(rt);
1420 	}
1421 }
1422 
1423 /**
1424  * ip6_tnl_change - update the tunnel parameters
1425  *   @t: tunnel to be changed
1426  *   @p: tunnel configuration parameters
1427  *
1428  * Description:
1429  *   ip6_tnl_change() updates the tunnel parameters
1430  **/
1431 
1432 static int
1433 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1434 {
1435 	t->parms.laddr = p->laddr;
1436 	t->parms.raddr = p->raddr;
1437 	t->parms.flags = p->flags;
1438 	t->parms.hop_limit = p->hop_limit;
1439 	t->parms.encap_limit = p->encap_limit;
1440 	t->parms.flowinfo = p->flowinfo;
1441 	t->parms.link = p->link;
1442 	t->parms.proto = p->proto;
1443 	dst_cache_reset(&t->dst_cache);
1444 	ip6_tnl_link_config(t);
1445 	return 0;
1446 }
1447 
1448 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1449 {
1450 	struct net *net = t->net;
1451 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1452 	int err;
1453 
1454 	ip6_tnl_unlink(ip6n, t);
1455 	synchronize_net();
1456 	err = ip6_tnl_change(t, p);
1457 	ip6_tnl_link(ip6n, t);
1458 	netdev_state_change(t->dev);
1459 	return err;
1460 }
1461 
1462 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1463 {
1464 	/* for default tnl0 device allow to change only the proto */
1465 	t->parms.proto = p->proto;
1466 	netdev_state_change(t->dev);
1467 	return 0;
1468 }
1469 
1470 static void
1471 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1472 {
1473 	p->laddr = u->laddr;
1474 	p->raddr = u->raddr;
1475 	p->flags = u->flags;
1476 	p->hop_limit = u->hop_limit;
1477 	p->encap_limit = u->encap_limit;
1478 	p->flowinfo = u->flowinfo;
1479 	p->link = u->link;
1480 	p->proto = u->proto;
1481 	memcpy(p->name, u->name, sizeof(u->name));
1482 }
1483 
1484 static void
1485 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1486 {
1487 	u->laddr = p->laddr;
1488 	u->raddr = p->raddr;
1489 	u->flags = p->flags;
1490 	u->hop_limit = p->hop_limit;
1491 	u->encap_limit = p->encap_limit;
1492 	u->flowinfo = p->flowinfo;
1493 	u->link = p->link;
1494 	u->proto = p->proto;
1495 	memcpy(u->name, p->name, sizeof(u->name));
1496 }
1497 
1498 /**
1499  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1500  *   @dev: virtual device associated with tunnel
1501  *   @ifr: parameters passed from userspace
1502  *   @cmd: command to be performed
1503  *
1504  * Description:
1505  *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1506  *   from userspace.
1507  *
1508  *   The possible commands are the following:
1509  *     %SIOCGETTUNNEL: get tunnel parameters for device
1510  *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1511  *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1512  *     %SIOCDELTUNNEL: delete tunnel
1513  *
1514  *   The fallback device "ip6tnl0", created during module
1515  *   initialization, can be used for creating other tunnel devices.
1516  *
1517  * Return:
1518  *   0 on success,
1519  *   %-EFAULT if unable to copy data to or from userspace,
1520  *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1521  *   %-EINVAL if passed tunnel parameters are invalid,
1522  *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1523  *   %-ENODEV if attempting to change or delete a nonexisting device
1524  **/
1525 
1526 static int
1527 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1528 {
1529 	int err = 0;
1530 	struct ip6_tnl_parm p;
1531 	struct __ip6_tnl_parm p1;
1532 	struct ip6_tnl *t = netdev_priv(dev);
1533 	struct net *net = t->net;
1534 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1535 
1536 	memset(&p1, 0, sizeof(p1));
1537 
1538 	switch (cmd) {
1539 	case SIOCGETTUNNEL:
1540 		if (dev == ip6n->fb_tnl_dev) {
1541 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1542 				err = -EFAULT;
1543 				break;
1544 			}
1545 			ip6_tnl_parm_from_user(&p1, &p);
1546 			t = ip6_tnl_locate(net, &p1, 0);
1547 			if (IS_ERR(t))
1548 				t = netdev_priv(dev);
1549 		} else {
1550 			memset(&p, 0, sizeof(p));
1551 		}
1552 		ip6_tnl_parm_to_user(&p, &t->parms);
1553 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1554 			err = -EFAULT;
1555 		}
1556 		break;
1557 	case SIOCADDTUNNEL:
1558 	case SIOCCHGTUNNEL:
1559 		err = -EPERM;
1560 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1561 			break;
1562 		err = -EFAULT;
1563 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1564 			break;
1565 		err = -EINVAL;
1566 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1567 		    p.proto != 0)
1568 			break;
1569 		ip6_tnl_parm_from_user(&p1, &p);
1570 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1571 		if (cmd == SIOCCHGTUNNEL) {
1572 			if (!IS_ERR(t)) {
1573 				if (t->dev != dev) {
1574 					err = -EEXIST;
1575 					break;
1576 				}
1577 			} else
1578 				t = netdev_priv(dev);
1579 			if (dev == ip6n->fb_tnl_dev)
1580 				err = ip6_tnl0_update(t, &p1);
1581 			else
1582 				err = ip6_tnl_update(t, &p1);
1583 		}
1584 		if (!IS_ERR(t)) {
1585 			err = 0;
1586 			ip6_tnl_parm_to_user(&p, &t->parms);
1587 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1588 				err = -EFAULT;
1589 
1590 		} else {
1591 			err = PTR_ERR(t);
1592 		}
1593 		break;
1594 	case SIOCDELTUNNEL:
1595 		err = -EPERM;
1596 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1597 			break;
1598 
1599 		if (dev == ip6n->fb_tnl_dev) {
1600 			err = -EFAULT;
1601 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1602 				break;
1603 			err = -ENOENT;
1604 			ip6_tnl_parm_from_user(&p1, &p);
1605 			t = ip6_tnl_locate(net, &p1, 0);
1606 			if (IS_ERR(t))
1607 				break;
1608 			err = -EPERM;
1609 			if (t->dev == ip6n->fb_tnl_dev)
1610 				break;
1611 			dev = t->dev;
1612 		}
1613 		err = 0;
1614 		unregister_netdevice(dev);
1615 		break;
1616 	default:
1617 		err = -EINVAL;
1618 	}
1619 	return err;
1620 }
1621 
1622 /**
1623  * ip6_tnl_change_mtu - change mtu manually for tunnel device
1624  *   @dev: virtual device associated with tunnel
1625  *   @new_mtu: the new mtu
1626  *
1627  * Return:
1628  *   0 on success,
1629  *   %-EINVAL if mtu too small
1630  **/
1631 
1632 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1633 {
1634 	struct ip6_tnl *tnl = netdev_priv(dev);
1635 
1636 	if (tnl->parms.proto == IPPROTO_IPIP) {
1637 		if (new_mtu < 68)
1638 			return -EINVAL;
1639 	} else {
1640 		if (new_mtu < IPV6_MIN_MTU)
1641 			return -EINVAL;
1642 	}
1643 	if (new_mtu > 0xFFF8 - dev->hard_header_len)
1644 		return -EINVAL;
1645 	dev->mtu = new_mtu;
1646 	return 0;
1647 }
1648 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1649 
1650 int ip6_tnl_get_iflink(const struct net_device *dev)
1651 {
1652 	struct ip6_tnl *t = netdev_priv(dev);
1653 
1654 	return t->parms.link;
1655 }
1656 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1657 
1658 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1659 			  unsigned int num)
1660 {
1661 	if (num >= MAX_IPTUN_ENCAP_OPS)
1662 		return -ERANGE;
1663 
1664 	return !cmpxchg((const struct ip6_tnl_encap_ops **)
1665 			&ip6tun_encaps[num],
1666 			NULL, ops) ? 0 : -1;
1667 }
1668 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1669 
1670 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1671 			  unsigned int num)
1672 {
1673 	int ret;
1674 
1675 	if (num >= MAX_IPTUN_ENCAP_OPS)
1676 		return -ERANGE;
1677 
1678 	ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1679 		       &ip6tun_encaps[num],
1680 		       ops, NULL) == ops) ? 0 : -1;
1681 
1682 	synchronize_net();
1683 
1684 	return ret;
1685 }
1686 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1687 
1688 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1689 			struct ip_tunnel_encap *ipencap)
1690 {
1691 	int hlen;
1692 
1693 	memset(&t->encap, 0, sizeof(t->encap));
1694 
1695 	hlen = ip6_encap_hlen(ipencap);
1696 	if (hlen < 0)
1697 		return hlen;
1698 
1699 	t->encap.type = ipencap->type;
1700 	t->encap.sport = ipencap->sport;
1701 	t->encap.dport = ipencap->dport;
1702 	t->encap.flags = ipencap->flags;
1703 
1704 	t->encap_hlen = hlen;
1705 	t->hlen = t->encap_hlen + t->tun_hlen;
1706 
1707 	return 0;
1708 }
1709 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1710 
1711 static const struct net_device_ops ip6_tnl_netdev_ops = {
1712 	.ndo_init	= ip6_tnl_dev_init,
1713 	.ndo_uninit	= ip6_tnl_dev_uninit,
1714 	.ndo_start_xmit = ip6_tnl_start_xmit,
1715 	.ndo_do_ioctl	= ip6_tnl_ioctl,
1716 	.ndo_change_mtu = ip6_tnl_change_mtu,
1717 	.ndo_get_stats	= ip6_get_stats,
1718 	.ndo_get_iflink = ip6_tnl_get_iflink,
1719 };
1720 
1721 #define IPXIPX_FEATURES (NETIF_F_SG |		\
1722 			 NETIF_F_FRAGLIST |	\
1723 			 NETIF_F_HIGHDMA |	\
1724 			 NETIF_F_GSO_SOFTWARE |	\
1725 			 NETIF_F_HW_CSUM)
1726 
1727 /**
1728  * ip6_tnl_dev_setup - setup virtual tunnel device
1729  *   @dev: virtual device associated with tunnel
1730  *
1731  * Description:
1732  *   Initialize function pointers and device parameters
1733  **/
1734 
1735 static void ip6_tnl_dev_setup(struct net_device *dev)
1736 {
1737 	dev->netdev_ops = &ip6_tnl_netdev_ops;
1738 	dev->destructor = ip6_dev_free;
1739 
1740 	dev->type = ARPHRD_TUNNEL6;
1741 	dev->flags |= IFF_NOARP;
1742 	dev->addr_len = sizeof(struct in6_addr);
1743 	dev->features |= NETIF_F_LLTX;
1744 	netif_keep_dst(dev);
1745 
1746 	dev->features		|= IPXIPX_FEATURES;
1747 	dev->hw_features	|= IPXIPX_FEATURES;
1748 
1749 	/* This perm addr will be used as interface identifier by IPv6 */
1750 	dev->addr_assign_type = NET_ADDR_RANDOM;
1751 	eth_random_addr(dev->perm_addr);
1752 }
1753 
1754 
1755 /**
1756  * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1757  *   @dev: virtual device associated with tunnel
1758  **/
1759 
1760 static inline int
1761 ip6_tnl_dev_init_gen(struct net_device *dev)
1762 {
1763 	struct ip6_tnl *t = netdev_priv(dev);
1764 	int ret;
1765 	int t_hlen;
1766 
1767 	t->dev = dev;
1768 	t->net = dev_net(dev);
1769 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1770 	if (!dev->tstats)
1771 		return -ENOMEM;
1772 
1773 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1774 	if (ret)
1775 		goto free_stats;
1776 
1777 	ret = gro_cells_init(&t->gro_cells, dev);
1778 	if (ret)
1779 		goto destroy_dst;
1780 
1781 	t->tun_hlen = 0;
1782 	t->hlen = t->encap_hlen + t->tun_hlen;
1783 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1784 
1785 	dev->type = ARPHRD_TUNNEL6;
1786 	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1787 	dev->mtu = ETH_DATA_LEN - t_hlen;
1788 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1789 		dev->mtu -= 8;
1790 
1791 	return 0;
1792 
1793 destroy_dst:
1794 	dst_cache_destroy(&t->dst_cache);
1795 free_stats:
1796 	free_percpu(dev->tstats);
1797 	dev->tstats = NULL;
1798 
1799 	return ret;
1800 }
1801 
1802 /**
1803  * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1804  *   @dev: virtual device associated with tunnel
1805  **/
1806 
1807 static int ip6_tnl_dev_init(struct net_device *dev)
1808 {
1809 	struct ip6_tnl *t = netdev_priv(dev);
1810 	int err = ip6_tnl_dev_init_gen(dev);
1811 
1812 	if (err)
1813 		return err;
1814 	ip6_tnl_link_config(t);
1815 	if (t->parms.collect_md) {
1816 		dev->features |= NETIF_F_NETNS_LOCAL;
1817 		netif_keep_dst(dev);
1818 	}
1819 	return 0;
1820 }
1821 
1822 /**
1823  * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1824  *   @dev: fallback device
1825  *
1826  * Return: 0
1827  **/
1828 
1829 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1830 {
1831 	struct ip6_tnl *t = netdev_priv(dev);
1832 	struct net *net = dev_net(dev);
1833 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1834 
1835 	t->parms.proto = IPPROTO_IPV6;
1836 	dev_hold(dev);
1837 
1838 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
1839 	return 0;
1840 }
1841 
1842 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1843 {
1844 	u8 proto;
1845 
1846 	if (!data || !data[IFLA_IPTUN_PROTO])
1847 		return 0;
1848 
1849 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1850 	if (proto != IPPROTO_IPV6 &&
1851 	    proto != IPPROTO_IPIP &&
1852 	    proto != 0)
1853 		return -EINVAL;
1854 
1855 	return 0;
1856 }
1857 
1858 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1859 				  struct __ip6_tnl_parm *parms)
1860 {
1861 	memset(parms, 0, sizeof(*parms));
1862 
1863 	if (!data)
1864 		return;
1865 
1866 	if (data[IFLA_IPTUN_LINK])
1867 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1868 
1869 	if (data[IFLA_IPTUN_LOCAL])
1870 		parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1871 
1872 	if (data[IFLA_IPTUN_REMOTE])
1873 		parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1874 
1875 	if (data[IFLA_IPTUN_TTL])
1876 		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1877 
1878 	if (data[IFLA_IPTUN_ENCAP_LIMIT])
1879 		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1880 
1881 	if (data[IFLA_IPTUN_FLOWINFO])
1882 		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1883 
1884 	if (data[IFLA_IPTUN_FLAGS])
1885 		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1886 
1887 	if (data[IFLA_IPTUN_PROTO])
1888 		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1889 
1890 	if (data[IFLA_IPTUN_COLLECT_METADATA])
1891 		parms->collect_md = true;
1892 }
1893 
1894 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1895 					struct ip_tunnel_encap *ipencap)
1896 {
1897 	bool ret = false;
1898 
1899 	memset(ipencap, 0, sizeof(*ipencap));
1900 
1901 	if (!data)
1902 		return ret;
1903 
1904 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1905 		ret = true;
1906 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1907 	}
1908 
1909 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1910 		ret = true;
1911 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1912 	}
1913 
1914 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1915 		ret = true;
1916 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1917 	}
1918 
1919 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1920 		ret = true;
1921 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1922 	}
1923 
1924 	return ret;
1925 }
1926 
1927 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1928 			   struct nlattr *tb[], struct nlattr *data[])
1929 {
1930 	struct net *net = dev_net(dev);
1931 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1932 	struct ip6_tnl *nt, *t;
1933 	struct ip_tunnel_encap ipencap;
1934 
1935 	nt = netdev_priv(dev);
1936 
1937 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1938 		int err = ip6_tnl_encap_setup(nt, &ipencap);
1939 
1940 		if (err < 0)
1941 			return err;
1942 	}
1943 
1944 	ip6_tnl_netlink_parms(data, &nt->parms);
1945 
1946 	if (nt->parms.collect_md) {
1947 		if (rtnl_dereference(ip6n->collect_md_tun))
1948 			return -EEXIST;
1949 	} else {
1950 		t = ip6_tnl_locate(net, &nt->parms, 0);
1951 		if (!IS_ERR(t))
1952 			return -EEXIST;
1953 	}
1954 
1955 	return ip6_tnl_create2(dev);
1956 }
1957 
1958 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1959 			      struct nlattr *data[])
1960 {
1961 	struct ip6_tnl *t = netdev_priv(dev);
1962 	struct __ip6_tnl_parm p;
1963 	struct net *net = t->net;
1964 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1965 	struct ip_tunnel_encap ipencap;
1966 
1967 	if (dev == ip6n->fb_tnl_dev)
1968 		return -EINVAL;
1969 
1970 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1971 		int err = ip6_tnl_encap_setup(t, &ipencap);
1972 
1973 		if (err < 0)
1974 			return err;
1975 	}
1976 	ip6_tnl_netlink_parms(data, &p);
1977 	if (p.collect_md)
1978 		return -EINVAL;
1979 
1980 	t = ip6_tnl_locate(net, &p, 0);
1981 	if (!IS_ERR(t)) {
1982 		if (t->dev != dev)
1983 			return -EEXIST;
1984 	} else
1985 		t = netdev_priv(dev);
1986 
1987 	return ip6_tnl_update(t, &p);
1988 }
1989 
1990 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
1991 {
1992 	struct net *net = dev_net(dev);
1993 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1994 
1995 	if (dev != ip6n->fb_tnl_dev)
1996 		unregister_netdevice_queue(dev, head);
1997 }
1998 
1999 static size_t ip6_tnl_get_size(const struct net_device *dev)
2000 {
2001 	return
2002 		/* IFLA_IPTUN_LINK */
2003 		nla_total_size(4) +
2004 		/* IFLA_IPTUN_LOCAL */
2005 		nla_total_size(sizeof(struct in6_addr)) +
2006 		/* IFLA_IPTUN_REMOTE */
2007 		nla_total_size(sizeof(struct in6_addr)) +
2008 		/* IFLA_IPTUN_TTL */
2009 		nla_total_size(1) +
2010 		/* IFLA_IPTUN_ENCAP_LIMIT */
2011 		nla_total_size(1) +
2012 		/* IFLA_IPTUN_FLOWINFO */
2013 		nla_total_size(4) +
2014 		/* IFLA_IPTUN_FLAGS */
2015 		nla_total_size(4) +
2016 		/* IFLA_IPTUN_PROTO */
2017 		nla_total_size(1) +
2018 		/* IFLA_IPTUN_ENCAP_TYPE */
2019 		nla_total_size(2) +
2020 		/* IFLA_IPTUN_ENCAP_FLAGS */
2021 		nla_total_size(2) +
2022 		/* IFLA_IPTUN_ENCAP_SPORT */
2023 		nla_total_size(2) +
2024 		/* IFLA_IPTUN_ENCAP_DPORT */
2025 		nla_total_size(2) +
2026 		/* IFLA_IPTUN_COLLECT_METADATA */
2027 		nla_total_size(0) +
2028 		0;
2029 }
2030 
2031 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2032 {
2033 	struct ip6_tnl *tunnel = netdev_priv(dev);
2034 	struct __ip6_tnl_parm *parm = &tunnel->parms;
2035 
2036 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2037 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2038 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2039 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2040 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2041 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2042 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2043 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
2044 		goto nla_put_failure;
2045 
2046 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2047 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2048 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2049 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2050 		goto nla_put_failure;
2051 
2052 	if (parm->collect_md)
2053 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2054 			goto nla_put_failure;
2055 	return 0;
2056 
2057 nla_put_failure:
2058 	return -EMSGSIZE;
2059 }
2060 
2061 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2062 {
2063 	struct ip6_tnl *tunnel = netdev_priv(dev);
2064 
2065 	return tunnel->net;
2066 }
2067 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2068 
2069 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2070 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
2071 	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
2072 	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
2073 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
2074 	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
2075 	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
2076 	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
2077 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
2078 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
2079 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
2080 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
2081 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
2082 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
2083 };
2084 
2085 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2086 	.kind		= "ip6tnl",
2087 	.maxtype	= IFLA_IPTUN_MAX,
2088 	.policy		= ip6_tnl_policy,
2089 	.priv_size	= sizeof(struct ip6_tnl),
2090 	.setup		= ip6_tnl_dev_setup,
2091 	.validate	= ip6_tnl_validate,
2092 	.newlink	= ip6_tnl_newlink,
2093 	.changelink	= ip6_tnl_changelink,
2094 	.dellink	= ip6_tnl_dellink,
2095 	.get_size	= ip6_tnl_get_size,
2096 	.fill_info	= ip6_tnl_fill_info,
2097 	.get_link_net	= ip6_tnl_get_link_net,
2098 };
2099 
2100 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2101 	.handler	= ip4ip6_rcv,
2102 	.err_handler	= ip4ip6_err,
2103 	.priority	=	1,
2104 };
2105 
2106 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2107 	.handler	= ip6ip6_rcv,
2108 	.err_handler	= ip6ip6_err,
2109 	.priority	=	1,
2110 };
2111 
2112 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
2113 {
2114 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2115 	struct net_device *dev, *aux;
2116 	int h;
2117 	struct ip6_tnl *t;
2118 	LIST_HEAD(list);
2119 
2120 	for_each_netdev_safe(net, dev, aux)
2121 		if (dev->rtnl_link_ops == &ip6_link_ops)
2122 			unregister_netdevice_queue(dev, &list);
2123 
2124 	for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2125 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
2126 		while (t) {
2127 			/* If dev is in the same netns, it has already
2128 			 * been added to the list by the previous loop.
2129 			 */
2130 			if (!net_eq(dev_net(t->dev), net))
2131 				unregister_netdevice_queue(t->dev, &list);
2132 			t = rtnl_dereference(t->next);
2133 		}
2134 	}
2135 
2136 	unregister_netdevice_many(&list);
2137 }
2138 
2139 static int __net_init ip6_tnl_init_net(struct net *net)
2140 {
2141 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2142 	struct ip6_tnl *t = NULL;
2143 	int err;
2144 
2145 	ip6n->tnls[0] = ip6n->tnls_wc;
2146 	ip6n->tnls[1] = ip6n->tnls_r_l;
2147 
2148 	err = -ENOMEM;
2149 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2150 					NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2151 
2152 	if (!ip6n->fb_tnl_dev)
2153 		goto err_alloc_dev;
2154 	dev_net_set(ip6n->fb_tnl_dev, net);
2155 	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2156 	/* FB netdevice is special: we have one, and only one per netns.
2157 	 * Allowing to move it to another netns is clearly unsafe.
2158 	 */
2159 	ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2160 
2161 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2162 	if (err < 0)
2163 		goto err_register;
2164 
2165 	err = register_netdev(ip6n->fb_tnl_dev);
2166 	if (err < 0)
2167 		goto err_register;
2168 
2169 	t = netdev_priv(ip6n->fb_tnl_dev);
2170 
2171 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2172 	return 0;
2173 
2174 err_register:
2175 	ip6_dev_free(ip6n->fb_tnl_dev);
2176 err_alloc_dev:
2177 	return err;
2178 }
2179 
2180 static void __net_exit ip6_tnl_exit_net(struct net *net)
2181 {
2182 	rtnl_lock();
2183 	ip6_tnl_destroy_tunnels(net);
2184 	rtnl_unlock();
2185 }
2186 
2187 static struct pernet_operations ip6_tnl_net_ops = {
2188 	.init = ip6_tnl_init_net,
2189 	.exit = ip6_tnl_exit_net,
2190 	.id   = &ip6_tnl_net_id,
2191 	.size = sizeof(struct ip6_tnl_net),
2192 };
2193 
2194 /**
2195  * ip6_tunnel_init - register protocol and reserve needed resources
2196  *
2197  * Return: 0 on success
2198  **/
2199 
2200 static int __init ip6_tunnel_init(void)
2201 {
2202 	int  err;
2203 
2204 	err = register_pernet_device(&ip6_tnl_net_ops);
2205 	if (err < 0)
2206 		goto out_pernet;
2207 
2208 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2209 	if (err < 0) {
2210 		pr_err("%s: can't register ip4ip6\n", __func__);
2211 		goto out_ip4ip6;
2212 	}
2213 
2214 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2215 	if (err < 0) {
2216 		pr_err("%s: can't register ip6ip6\n", __func__);
2217 		goto out_ip6ip6;
2218 	}
2219 	err = rtnl_link_register(&ip6_link_ops);
2220 	if (err < 0)
2221 		goto rtnl_link_failed;
2222 
2223 	return 0;
2224 
2225 rtnl_link_failed:
2226 	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2227 out_ip6ip6:
2228 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2229 out_ip4ip6:
2230 	unregister_pernet_device(&ip6_tnl_net_ops);
2231 out_pernet:
2232 	return err;
2233 }
2234 
2235 /**
2236  * ip6_tunnel_cleanup - free resources and unregister protocol
2237  **/
2238 
2239 static void __exit ip6_tunnel_cleanup(void)
2240 {
2241 	rtnl_link_unregister(&ip6_link_ops);
2242 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2243 		pr_info("%s: can't deregister ip4ip6\n", __func__);
2244 
2245 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2246 		pr_info("%s: can't deregister ip6ip6\n", __func__);
2247 
2248 	unregister_pernet_device(&ip6_tnl_net_ops);
2249 }
2250 
2251 module_init(ip6_tunnel_init);
2252 module_exit(ip6_tunnel_cleanup);
2253