1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 tunneling device
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Ville Nuorvala <vnuorval@tcs.hut.fi>
8 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
9 *
10 * Based on:
11 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12 *
13 * RFC 2473
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
24 #include <linux/if.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
39
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
42
43 #include <net/icmp.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56 #include <net/inet_dscp.h>
57
58 MODULE_AUTHOR("Ville Nuorvala");
59 MODULE_DESCRIPTION("IPv6 tunneling device");
60 MODULE_LICENSE("GPL");
61 MODULE_ALIAS_RTNL_LINK("ip6tnl");
62 MODULE_ALIAS_NETDEV("ip6tnl0");
63
64 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
65 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
66
67 static bool log_ecn_error = true;
68 module_param(log_ecn_error, bool, 0644);
69 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
70
HASH(const struct in6_addr * addr1,const struct in6_addr * addr2)71 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
72 {
73 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
74
75 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
76 }
77
78 static int ip6_tnl_dev_init(struct net_device *dev);
79 static void ip6_tnl_dev_setup(struct net_device *dev);
80 static struct rtnl_link_ops ip6_link_ops __read_mostly;
81
82 static unsigned int ip6_tnl_net_id __read_mostly;
83 struct ip6_tnl_net {
84 /* the IPv6 tunnel fallback device */
85 struct net_device *fb_tnl_dev;
86 /* lists for storing tunnels in use */
87 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
88 struct ip6_tnl __rcu *tnls_wc[1];
89 struct ip6_tnl __rcu **tnls[2];
90 struct ip6_tnl __rcu *collect_md_tun;
91 };
92
ip6_tnl_mpls_supported(void)93 static inline int ip6_tnl_mpls_supported(void)
94 {
95 return IS_ENABLED(CONFIG_MPLS);
96 }
97
98 #define for_each_ip6_tunnel_rcu(start) \
99 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
100
101 /**
102 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
103 * @net: network namespace
104 * @link: ifindex of underlying interface
105 * @remote: the address of the tunnel exit-point
106 * @local: the address of the tunnel entry-point
107 *
108 * Return:
109 * tunnel matching given end-points if found,
110 * else fallback tunnel if its device is up,
111 * else %NULL
112 **/
113
114 static struct ip6_tnl *
ip6_tnl_lookup(struct net * net,int link,const struct in6_addr * remote,const struct in6_addr * local)115 ip6_tnl_lookup(struct net *net, int link,
116 const struct in6_addr *remote, const struct in6_addr *local)
117 {
118 unsigned int hash = HASH(remote, local);
119 struct ip6_tnl *t, *cand = NULL;
120 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
121 struct in6_addr any;
122
123 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
124 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
125 !ipv6_addr_equal(remote, &t->parms.raddr) ||
126 !(t->dev->flags & IFF_UP))
127 continue;
128
129 if (link == t->parms.link)
130 return t;
131 else
132 cand = t;
133 }
134
135 memset(&any, 0, sizeof(any));
136 hash = HASH(&any, local);
137 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
138 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
139 !ipv6_addr_any(&t->parms.raddr) ||
140 !(t->dev->flags & IFF_UP))
141 continue;
142
143 if (link == t->parms.link)
144 return t;
145 else if (!cand)
146 cand = t;
147 }
148
149 hash = HASH(remote, &any);
150 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
151 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
152 !ipv6_addr_any(&t->parms.laddr) ||
153 !(t->dev->flags & IFF_UP))
154 continue;
155
156 if (link == t->parms.link)
157 return t;
158 else if (!cand)
159 cand = t;
160 }
161
162 if (cand)
163 return cand;
164
165 t = rcu_dereference(ip6n->collect_md_tun);
166 if (t && t->dev->flags & IFF_UP)
167 return t;
168
169 t = rcu_dereference(ip6n->tnls_wc[0]);
170 if (t && (t->dev->flags & IFF_UP))
171 return t;
172
173 return NULL;
174 }
175
176 /**
177 * ip6_tnl_bucket - get head of list matching given tunnel parameters
178 * @ip6n: the private data for ip6_vti in the netns
179 * @p: parameters containing tunnel end-points
180 *
181 * Description:
182 * ip6_tnl_bucket() returns the head of the list matching the
183 * &struct in6_addr entries laddr and raddr in @p.
184 *
185 * Return: head of IPv6 tunnel list
186 **/
187
188 static struct ip6_tnl __rcu **
ip6_tnl_bucket(struct ip6_tnl_net * ip6n,const struct __ip6_tnl_parm * p)189 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
190 {
191 const struct in6_addr *remote = &p->raddr;
192 const struct in6_addr *local = &p->laddr;
193 unsigned int h = 0;
194 int prio = 0;
195
196 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
197 prio = 1;
198 h = HASH(remote, local);
199 }
200 return &ip6n->tnls[prio][h];
201 }
202
203 /**
204 * ip6_tnl_link - add tunnel to hash table
205 * @ip6n: the private data for ip6_vti in the netns
206 * @t: tunnel to be added
207 **/
208
209 static void
ip6_tnl_link(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)210 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
211 {
212 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
213
214 if (t->parms.collect_md)
215 rcu_assign_pointer(ip6n->collect_md_tun, t);
216 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
217 rcu_assign_pointer(*tp, t);
218 }
219
220 /**
221 * ip6_tnl_unlink - remove tunnel from hash table
222 * @ip6n: the private data for ip6_vti in the netns
223 * @t: tunnel to be removed
224 **/
225
226 static void
ip6_tnl_unlink(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)227 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
228 {
229 struct ip6_tnl __rcu **tp;
230 struct ip6_tnl *iter;
231
232 if (t->parms.collect_md)
233 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
234
235 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
236 (iter = rtnl_dereference(*tp)) != NULL;
237 tp = &iter->next) {
238 if (t == iter) {
239 rcu_assign_pointer(*tp, t->next);
240 break;
241 }
242 }
243 }
244
ip6_dev_free(struct net_device * dev)245 static void ip6_dev_free(struct net_device *dev)
246 {
247 struct ip6_tnl *t = netdev_priv(dev);
248
249 gro_cells_destroy(&t->gro_cells);
250 dst_cache_destroy(&t->dst_cache);
251 }
252
ip6_tnl_create2(struct net_device * dev)253 static int ip6_tnl_create2(struct net_device *dev)
254 {
255 struct ip6_tnl *t = netdev_priv(dev);
256 struct net *net = dev_net(dev);
257 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
258 int err;
259
260 dev->rtnl_link_ops = &ip6_link_ops;
261 err = register_netdevice(dev);
262 if (err < 0)
263 goto out;
264
265 strcpy(t->parms.name, dev->name);
266
267 ip6_tnl_link(ip6n, t);
268 return 0;
269
270 out:
271 return err;
272 }
273
274 /**
275 * ip6_tnl_create - create a new tunnel
276 * @net: network namespace
277 * @p: tunnel parameters
278 *
279 * Description:
280 * Create tunnel matching given parameters.
281 *
282 * Return:
283 * created tunnel or error pointer
284 **/
285
ip6_tnl_create(struct net * net,struct __ip6_tnl_parm * p)286 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
287 {
288 struct net_device *dev;
289 struct ip6_tnl *t;
290 char name[IFNAMSIZ];
291 int err = -E2BIG;
292
293 if (p->name[0]) {
294 if (!dev_valid_name(p->name))
295 goto failed;
296 strscpy(name, p->name, IFNAMSIZ);
297 } else {
298 sprintf(name, "ip6tnl%%d");
299 }
300 err = -ENOMEM;
301 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
302 ip6_tnl_dev_setup);
303 if (!dev)
304 goto failed;
305
306 dev_net_set(dev, net);
307
308 t = netdev_priv(dev);
309 t->parms = *p;
310 t->net = dev_net(dev);
311 err = ip6_tnl_create2(dev);
312 if (err < 0)
313 goto failed_free;
314
315 return t;
316
317 failed_free:
318 free_netdev(dev);
319 failed:
320 return ERR_PTR(err);
321 }
322
323 /**
324 * ip6_tnl_locate - find or create tunnel matching given parameters
325 * @net: network namespace
326 * @p: tunnel parameters
327 * @create: != 0 if allowed to create new tunnel if no match found
328 *
329 * Description:
330 * ip6_tnl_locate() first tries to locate an existing tunnel
331 * based on @parms. If this is unsuccessful, but @create is set a new
332 * tunnel device is created and registered for use.
333 *
334 * Return:
335 * matching tunnel or error pointer
336 **/
337
ip6_tnl_locate(struct net * net,struct __ip6_tnl_parm * p,int create)338 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
339 struct __ip6_tnl_parm *p, int create)
340 {
341 const struct in6_addr *remote = &p->raddr;
342 const struct in6_addr *local = &p->laddr;
343 struct ip6_tnl __rcu **tp;
344 struct ip6_tnl *t;
345 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
346
347 for (tp = ip6_tnl_bucket(ip6n, p);
348 (t = rtnl_dereference(*tp)) != NULL;
349 tp = &t->next) {
350 if (ipv6_addr_equal(local, &t->parms.laddr) &&
351 ipv6_addr_equal(remote, &t->parms.raddr) &&
352 p->link == t->parms.link) {
353 if (create)
354 return ERR_PTR(-EEXIST);
355
356 return t;
357 }
358 }
359 if (!create)
360 return ERR_PTR(-ENODEV);
361 return ip6_tnl_create(net, p);
362 }
363
364 /**
365 * ip6_tnl_dev_uninit - tunnel device uninitializer
366 * @dev: the device to be destroyed
367 *
368 * Description:
369 * ip6_tnl_dev_uninit() removes tunnel from its list
370 **/
371
372 static void
ip6_tnl_dev_uninit(struct net_device * dev)373 ip6_tnl_dev_uninit(struct net_device *dev)
374 {
375 struct ip6_tnl *t = netdev_priv(dev);
376 struct net *net = t->net;
377 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
378
379 if (dev == ip6n->fb_tnl_dev)
380 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
381 else
382 ip6_tnl_unlink(ip6n, t);
383 dst_cache_reset(&t->dst_cache);
384 netdev_put(dev, &t->dev_tracker);
385 }
386
387 /**
388 * ip6_tnl_parse_tlv_enc_lim - handle encapsulation limit option
389 * @skb: received socket buffer
390 * @raw: the ICMPv6 error message data
391 *
392 * Return:
393 * 0 if none was found,
394 * else index to encapsulation limit
395 **/
396
ip6_tnl_parse_tlv_enc_lim(struct sk_buff * skb,__u8 * raw)397 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
398 {
399 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
400 unsigned int nhoff = raw - skb->data;
401 unsigned int off = nhoff + sizeof(*ipv6h);
402 u8 nexthdr = ipv6h->nexthdr;
403
404 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
405 struct ipv6_opt_hdr *hdr;
406 u16 optlen;
407
408 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
409 break;
410
411 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
412 if (nexthdr == NEXTHDR_FRAGMENT) {
413 optlen = 8;
414 } else if (nexthdr == NEXTHDR_AUTH) {
415 optlen = ipv6_authlen(hdr);
416 } else {
417 optlen = ipv6_optlen(hdr);
418 }
419
420 if (!pskb_may_pull(skb, off + optlen))
421 break;
422
423 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
424 if (nexthdr == NEXTHDR_FRAGMENT) {
425 struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr;
426
427 if (frag_hdr->frag_off)
428 break;
429 }
430 if (nexthdr == NEXTHDR_DEST) {
431 u16 i = 2;
432
433 while (1) {
434 struct ipv6_tlv_tnl_enc_lim *tel;
435
436 /* No more room for encapsulation limit */
437 if (i + sizeof(*tel) > optlen)
438 break;
439
440 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
441 /* return index of option if found and valid */
442 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
443 tel->length == 1)
444 return i + off - nhoff;
445 /* else jump to next option */
446 if (tel->type)
447 i += tel->length + 2;
448 else
449 i++;
450 }
451 }
452 nexthdr = hdr->nexthdr;
453 off += optlen;
454 }
455 return 0;
456 }
457 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
458
459 /* ip6_tnl_err() should handle errors in the tunnel according to the
460 * specifications in RFC 2473.
461 */
462 static int
ip6_tnl_err(struct sk_buff * skb,__u8 ipproto,struct inet6_skb_parm * opt,u8 * type,u8 * code,int * msg,__u32 * info,int offset)463 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
464 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
465 {
466 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
467 struct net *net = dev_net(skb->dev);
468 u8 rel_type = ICMPV6_DEST_UNREACH;
469 u8 rel_code = ICMPV6_ADDR_UNREACH;
470 __u32 rel_info = 0;
471 struct ip6_tnl *t;
472 int err = -ENOENT;
473 int rel_msg = 0;
474 u8 tproto;
475 __u16 len;
476
477 /* If the packet doesn't contain the original IPv6 header we are
478 in trouble since we might need the source address for further
479 processing of the error. */
480
481 rcu_read_lock();
482 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
483 if (!t)
484 goto out;
485
486 tproto = READ_ONCE(t->parms.proto);
487 if (tproto != ipproto && tproto != 0)
488 goto out;
489
490 err = 0;
491
492 switch (*type) {
493 case ICMPV6_DEST_UNREACH:
494 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
495 t->parms.name);
496 rel_msg = 1;
497 break;
498 case ICMPV6_TIME_EXCEED:
499 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
500 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
501 t->parms.name);
502 rel_msg = 1;
503 }
504 break;
505 case ICMPV6_PARAMPROB: {
506 struct ipv6_tlv_tnl_enc_lim *tel;
507 __u32 teli;
508
509 teli = 0;
510 if ((*code) == ICMPV6_HDR_FIELD)
511 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
512
513 if (teli && teli == *info - 2) {
514 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
515 if (tel->encap_limit == 0) {
516 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
517 t->parms.name);
518 rel_msg = 1;
519 }
520 } else {
521 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
522 t->parms.name);
523 }
524 break;
525 }
526 case ICMPV6_PKT_TOOBIG: {
527 __u32 mtu;
528
529 ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
530 sock_net_uid(net, NULL));
531 mtu = *info - offset;
532 if (mtu < IPV6_MIN_MTU)
533 mtu = IPV6_MIN_MTU;
534 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
535 if (len > mtu) {
536 rel_type = ICMPV6_PKT_TOOBIG;
537 rel_code = 0;
538 rel_info = mtu;
539 rel_msg = 1;
540 }
541 break;
542 }
543 case NDISC_REDIRECT:
544 ip6_redirect(skb, net, skb->dev->ifindex, 0,
545 sock_net_uid(net, NULL));
546 break;
547 }
548
549 *type = rel_type;
550 *code = rel_code;
551 *info = rel_info;
552 *msg = rel_msg;
553
554 out:
555 rcu_read_unlock();
556 return err;
557 }
558
559 static int
ip4ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)560 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
561 u8 type, u8 code, int offset, __be32 info)
562 {
563 __u32 rel_info = ntohl(info);
564 const struct iphdr *eiph;
565 struct sk_buff *skb2;
566 int err, rel_msg = 0;
567 u8 rel_type = type;
568 u8 rel_code = code;
569 struct rtable *rt;
570 struct flowi4 fl4;
571
572 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
573 &rel_msg, &rel_info, offset);
574 if (err < 0)
575 return err;
576
577 if (rel_msg == 0)
578 return 0;
579
580 switch (rel_type) {
581 case ICMPV6_DEST_UNREACH:
582 if (rel_code != ICMPV6_ADDR_UNREACH)
583 return 0;
584 rel_type = ICMP_DEST_UNREACH;
585 rel_code = ICMP_HOST_UNREACH;
586 break;
587 case ICMPV6_PKT_TOOBIG:
588 if (rel_code != 0)
589 return 0;
590 rel_type = ICMP_DEST_UNREACH;
591 rel_code = ICMP_FRAG_NEEDED;
592 break;
593 default:
594 return 0;
595 }
596
597 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
598 return 0;
599
600 skb2 = skb_clone(skb, GFP_ATOMIC);
601 if (!skb2)
602 return 0;
603
604 skb_dst_drop(skb2);
605
606 skb_pull(skb2, offset);
607 skb_reset_network_header(skb2);
608 eiph = ip_hdr(skb2);
609
610 /* Try to guess incoming interface */
611 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
612 0, 0, 0, IPPROTO_IPIP,
613 eiph->tos & INET_DSCP_MASK, 0);
614 if (IS_ERR(rt))
615 goto out;
616
617 skb2->dev = rt->dst.dev;
618 ip_rt_put(rt);
619
620 /* route "incoming" packet */
621 if (rt->rt_flags & RTCF_LOCAL) {
622 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
623 eiph->daddr, eiph->saddr, 0, 0,
624 IPPROTO_IPIP,
625 eiph->tos & INET_DSCP_MASK, 0);
626 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
627 if (!IS_ERR(rt))
628 ip_rt_put(rt);
629 goto out;
630 }
631 skb_dst_set(skb2, &rt->dst);
632 } else {
633 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
634 skb2->dev) ||
635 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
636 goto out;
637 }
638
639 /* change mtu on this route */
640 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
641 if (rel_info > dst_mtu(skb_dst(skb2)))
642 goto out;
643
644 skb_dst_update_pmtu_no_confirm(skb2, rel_info);
645 }
646
647 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
648
649 out:
650 kfree_skb(skb2);
651 return 0;
652 }
653
654 static int
ip6ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)655 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
656 u8 type, u8 code, int offset, __be32 info)
657 {
658 __u32 rel_info = ntohl(info);
659 int err, rel_msg = 0;
660 u8 rel_type = type;
661 u8 rel_code = code;
662
663 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
664 &rel_msg, &rel_info, offset);
665 if (err < 0)
666 return err;
667
668 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
669 struct rt6_info *rt;
670 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
671
672 if (!skb2)
673 return 0;
674
675 skb_dst_drop(skb2);
676 skb_pull(skb2, offset);
677 skb_reset_network_header(skb2);
678
679 /* Try to guess incoming interface */
680 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
681 NULL, 0, skb2, 0);
682
683 if (rt && rt->dst.dev)
684 skb2->dev = rt->dst.dev;
685
686 icmpv6_send(skb2, rel_type, rel_code, rel_info);
687
688 ip6_rt_put(rt);
689
690 kfree_skb(skb2);
691 }
692
693 return 0;
694 }
695
696 static int
mplsip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)697 mplsip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
698 u8 type, u8 code, int offset, __be32 info)
699 {
700 __u32 rel_info = ntohl(info);
701 int err, rel_msg = 0;
702 u8 rel_type = type;
703 u8 rel_code = code;
704
705 err = ip6_tnl_err(skb, IPPROTO_MPLS, opt, &rel_type, &rel_code,
706 &rel_msg, &rel_info, offset);
707 return err;
708 }
709
ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)710 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
711 const struct ipv6hdr *ipv6h,
712 struct sk_buff *skb)
713 {
714 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
715
716 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
717 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
718
719 return IP6_ECN_decapsulate(ipv6h, skb);
720 }
721
ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)722 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
723 const struct ipv6hdr *ipv6h,
724 struct sk_buff *skb)
725 {
726 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
727 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
728
729 return IP6_ECN_decapsulate(ipv6h, skb);
730 }
731
mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)732 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
733 const struct ipv6hdr *ipv6h,
734 struct sk_buff *skb)
735 {
736 /* ECN is not supported in AF_MPLS */
737 return 0;
738 }
739
ip6_tnl_get_cap(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)740 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
741 const struct in6_addr *laddr,
742 const struct in6_addr *raddr)
743 {
744 struct __ip6_tnl_parm *p = &t->parms;
745 int ltype = ipv6_addr_type(laddr);
746 int rtype = ipv6_addr_type(raddr);
747 __u32 flags = 0;
748
749 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
750 flags = IP6_TNL_F_CAP_PER_PACKET;
751 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
752 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
753 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
754 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
755 if (ltype&IPV6_ADDR_UNICAST)
756 flags |= IP6_TNL_F_CAP_XMIT;
757 if (rtype&IPV6_ADDR_UNICAST)
758 flags |= IP6_TNL_F_CAP_RCV;
759 }
760 return flags;
761 }
762 EXPORT_SYMBOL(ip6_tnl_get_cap);
763
764 /* called with rcu_read_lock() */
ip6_tnl_rcv_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)765 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
766 const struct in6_addr *laddr,
767 const struct in6_addr *raddr)
768 {
769 struct __ip6_tnl_parm *p = &t->parms;
770 int ret = 0;
771 struct net *net = t->net;
772
773 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
774 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
775 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
776 struct net_device *ldev = NULL;
777
778 if (p->link)
779 ldev = dev_get_by_index_rcu(net, p->link);
780
781 if ((ipv6_addr_is_multicast(laddr) ||
782 likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false,
783 0, IFA_F_TENTATIVE))) &&
784 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
785 likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true,
786 0, IFA_F_TENTATIVE))))
787 ret = 1;
788 }
789 return ret;
790 }
791 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
792
__ip6_tnl_rcv(struct ip6_tnl * tunnel,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb),bool log_ecn_err)793 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
794 const struct tnl_ptk_info *tpi,
795 struct metadata_dst *tun_dst,
796 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
797 const struct ipv6hdr *ipv6h,
798 struct sk_buff *skb),
799 bool log_ecn_err)
800 {
801 const struct ipv6hdr *ipv6h;
802 int nh, err;
803
804 if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) !=
805 test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) {
806 DEV_STATS_INC(tunnel->dev, rx_crc_errors);
807 DEV_STATS_INC(tunnel->dev, rx_errors);
808 goto drop;
809 }
810
811 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) {
812 if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) ||
813 (tunnel->i_seqno &&
814 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
815 DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
816 DEV_STATS_INC(tunnel->dev, rx_errors);
817 goto drop;
818 }
819 tunnel->i_seqno = ntohl(tpi->seq) + 1;
820 }
821
822 skb->protocol = tpi->proto;
823
824 /* Warning: All skb pointers will be invalidated! */
825 if (tunnel->dev->type == ARPHRD_ETHER) {
826 if (!pskb_may_pull(skb, ETH_HLEN)) {
827 DEV_STATS_INC(tunnel->dev, rx_length_errors);
828 DEV_STATS_INC(tunnel->dev, rx_errors);
829 goto drop;
830 }
831
832 skb->protocol = eth_type_trans(skb, tunnel->dev);
833 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
834 } else {
835 skb->dev = tunnel->dev;
836 skb_reset_mac_header(skb);
837 }
838
839 /* Save offset of outer header relative to skb->head,
840 * because we are going to reset the network header to the inner header
841 * and might change skb->head.
842 */
843 nh = skb_network_header(skb) - skb->head;
844
845 skb_reset_network_header(skb);
846
847 if (!pskb_inet_may_pull(skb)) {
848 DEV_STATS_INC(tunnel->dev, rx_length_errors);
849 DEV_STATS_INC(tunnel->dev, rx_errors);
850 goto drop;
851 }
852
853 /* Get the outer header. */
854 ipv6h = (struct ipv6hdr *)(skb->head + nh);
855
856 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
857
858 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
859
860 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
861 if (unlikely(err)) {
862 if (log_ecn_err)
863 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
864 &ipv6h->saddr,
865 ipv6_get_dsfield(ipv6h));
866 if (err > 1) {
867 DEV_STATS_INC(tunnel->dev, rx_frame_errors);
868 DEV_STATS_INC(tunnel->dev, rx_errors);
869 goto drop;
870 }
871 }
872
873 dev_sw_netstats_rx_add(tunnel->dev, skb->len);
874
875 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
876
877 if (tun_dst)
878 skb_dst_set(skb, (struct dst_entry *)tun_dst);
879
880 gro_cells_receive(&tunnel->gro_cells, skb);
881 return 0;
882
883 drop:
884 if (tun_dst)
885 dst_release((struct dst_entry *)tun_dst);
886 kfree_skb(skb);
887 return 0;
888 }
889
ip6_tnl_rcv(struct ip6_tnl * t,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,bool log_ecn_err)890 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
891 const struct tnl_ptk_info *tpi,
892 struct metadata_dst *tun_dst,
893 bool log_ecn_err)
894 {
895 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
896 const struct ipv6hdr *ipv6h,
897 struct sk_buff *skb);
898
899 dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
900 if (tpi->proto == htons(ETH_P_IP))
901 dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
902
903 return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
904 log_ecn_err);
905 }
906 EXPORT_SYMBOL(ip6_tnl_rcv);
907
908 static const struct tnl_ptk_info tpi_v6 = {
909 /* no tunnel info required for ipxip6. */
910 .proto = htons(ETH_P_IPV6),
911 };
912
913 static const struct tnl_ptk_info tpi_v4 = {
914 /* no tunnel info required for ipxip6. */
915 .proto = htons(ETH_P_IP),
916 };
917
918 static const struct tnl_ptk_info tpi_mpls = {
919 /* no tunnel info required for mplsip6. */
920 .proto = htons(ETH_P_MPLS_UC),
921 };
922
ipxip6_rcv(struct sk_buff * skb,u8 ipproto,const struct tnl_ptk_info * tpi,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb))923 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
924 const struct tnl_ptk_info *tpi,
925 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
926 const struct ipv6hdr *ipv6h,
927 struct sk_buff *skb))
928 {
929 struct ip6_tnl *t;
930 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
931 struct metadata_dst *tun_dst = NULL;
932 int ret = -1;
933
934 rcu_read_lock();
935 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
936
937 if (t) {
938 u8 tproto = READ_ONCE(t->parms.proto);
939
940 if (tproto != ipproto && tproto != 0)
941 goto drop;
942 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
943 goto drop;
944 ipv6h = ipv6_hdr(skb);
945 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
946 goto drop;
947 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
948 goto drop;
949 if (t->parms.collect_md) {
950 IP_TUNNEL_DECLARE_FLAGS(flags) = { };
951
952 tun_dst = ipv6_tun_rx_dst(skb, flags, 0, 0);
953 if (!tun_dst)
954 goto drop;
955 }
956 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
957 log_ecn_error);
958 }
959
960 rcu_read_unlock();
961
962 return ret;
963
964 drop:
965 rcu_read_unlock();
966 kfree_skb(skb);
967 return 0;
968 }
969
ip4ip6_rcv(struct sk_buff * skb)970 static int ip4ip6_rcv(struct sk_buff *skb)
971 {
972 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
973 ip4ip6_dscp_ecn_decapsulate);
974 }
975
ip6ip6_rcv(struct sk_buff * skb)976 static int ip6ip6_rcv(struct sk_buff *skb)
977 {
978 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
979 ip6ip6_dscp_ecn_decapsulate);
980 }
981
mplsip6_rcv(struct sk_buff * skb)982 static int mplsip6_rcv(struct sk_buff *skb)
983 {
984 return ipxip6_rcv(skb, IPPROTO_MPLS, &tpi_mpls,
985 mplsip6_dscp_ecn_decapsulate);
986 }
987
988 struct ipv6_tel_txoption {
989 struct ipv6_txoptions ops;
990 __u8 dst_opt[8];
991 };
992
init_tel_txopt(struct ipv6_tel_txoption * opt,__u8 encap_limit)993 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
994 {
995 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
996
997 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
998 opt->dst_opt[3] = 1;
999 opt->dst_opt[4] = encap_limit;
1000 opt->dst_opt[5] = IPV6_TLV_PADN;
1001 opt->dst_opt[6] = 1;
1002
1003 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
1004 opt->ops.opt_nflen = 8;
1005 }
1006
1007 /**
1008 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
1009 * @t: the outgoing tunnel device
1010 * @hdr: IPv6 header from the incoming packet
1011 *
1012 * Description:
1013 * Avoid trivial tunneling loop by checking that tunnel exit-point
1014 * doesn't match source of incoming packet.
1015 *
1016 * Return:
1017 * 1 if conflict,
1018 * 0 else
1019 **/
1020
1021 static inline bool
ip6_tnl_addr_conflict(const struct ip6_tnl * t,const struct ipv6hdr * hdr)1022 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
1023 {
1024 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
1025 }
1026
ip6_tnl_xmit_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)1027 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
1028 const struct in6_addr *laddr,
1029 const struct in6_addr *raddr)
1030 {
1031 struct __ip6_tnl_parm *p = &t->parms;
1032 int ret = 0;
1033 struct net *net = t->net;
1034
1035 if (t->parms.collect_md)
1036 return 1;
1037
1038 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
1039 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
1040 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
1041 struct net_device *ldev = NULL;
1042
1043 rcu_read_lock();
1044 if (p->link)
1045 ldev = dev_get_by_index_rcu(net, p->link);
1046
1047 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
1048 0, IFA_F_TENTATIVE)))
1049 pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
1050 p->name);
1051 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
1052 !ipv6_addr_is_multicast(raddr) &&
1053 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
1054 true, 0, IFA_F_TENTATIVE)))
1055 pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
1056 p->name);
1057 else
1058 ret = 1;
1059 rcu_read_unlock();
1060 }
1061 return ret;
1062 }
1063 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1064
1065 /**
1066 * ip6_tnl_xmit - encapsulate packet and send
1067 * @skb: the outgoing socket buffer
1068 * @dev: the outgoing tunnel device
1069 * @dsfield: dscp code for outer header
1070 * @fl6: flow of tunneled packet
1071 * @encap_limit: encapsulation limit
1072 * @pmtu: Path MTU is stored if packet is too big
1073 * @proto: next header value
1074 *
1075 * Description:
1076 * Build new header and do some sanity checks on the packet before sending
1077 * it.
1078 *
1079 * Return:
1080 * 0 on success
1081 * -1 fail
1082 * %-EMSGSIZE message too big. return mtu in this case.
1083 **/
1084
ip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev,__u8 dsfield,struct flowi6 * fl6,int encap_limit,__u32 * pmtu,__u8 proto)1085 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1086 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1087 __u8 proto)
1088 {
1089 struct ip6_tnl *t = netdev_priv(dev);
1090 struct net *net = t->net;
1091 struct ipv6hdr *ipv6h;
1092 struct ipv6_tel_txoption opt;
1093 struct dst_entry *dst = NULL, *ndst = NULL;
1094 struct net_device *tdev;
1095 int mtu;
1096 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1097 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1098 unsigned int max_headroom = psh_hlen;
1099 __be16 payload_protocol;
1100 bool use_cache = false;
1101 u8 hop_limit;
1102 int err = -1;
1103
1104 payload_protocol = skb_protocol(skb, true);
1105
1106 if (t->parms.collect_md) {
1107 hop_limit = skb_tunnel_info(skb)->key.ttl;
1108 goto route_lookup;
1109 } else {
1110 hop_limit = t->parms.hop_limit;
1111 }
1112
1113 /* NBMA tunnel */
1114 if (ipv6_addr_any(&t->parms.raddr)) {
1115 if (payload_protocol == htons(ETH_P_IPV6)) {
1116 struct in6_addr *addr6;
1117 struct neighbour *neigh;
1118 int addr_type;
1119
1120 if (!skb_dst(skb))
1121 goto tx_err_link_failure;
1122
1123 neigh = dst_neigh_lookup(skb_dst(skb),
1124 &ipv6_hdr(skb)->daddr);
1125 if (!neigh)
1126 goto tx_err_link_failure;
1127
1128 addr6 = (struct in6_addr *)&neigh->primary_key;
1129 addr_type = ipv6_addr_type(addr6);
1130
1131 if (addr_type == IPV6_ADDR_ANY)
1132 addr6 = &ipv6_hdr(skb)->daddr;
1133
1134 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1135 neigh_release(neigh);
1136 } else if (payload_protocol == htons(ETH_P_IP)) {
1137 const struct rtable *rt = skb_rtable(skb);
1138
1139 if (!rt)
1140 goto tx_err_link_failure;
1141
1142 if (rt->rt_gw_family == AF_INET6)
1143 memcpy(&fl6->daddr, &rt->rt_gw6, sizeof(fl6->daddr));
1144 }
1145 } else if (t->parms.proto != 0 && !(t->parms.flags &
1146 (IP6_TNL_F_USE_ORIG_TCLASS |
1147 IP6_TNL_F_USE_ORIG_FWMARK))) {
1148 /* enable the cache only if neither the outer protocol nor the
1149 * routing decision depends on the current inner header value
1150 */
1151 use_cache = true;
1152 }
1153
1154 if (use_cache)
1155 dst = dst_cache_get(&t->dst_cache);
1156
1157 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1158 goto tx_err_link_failure;
1159
1160 if (!dst) {
1161 route_lookup:
1162 /* add dsfield to flowlabel for route lookup */
1163 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1164
1165 dst = ip6_route_output(net, NULL, fl6);
1166
1167 if (dst->error)
1168 goto tx_err_link_failure;
1169 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1170 if (IS_ERR(dst)) {
1171 err = PTR_ERR(dst);
1172 dst = NULL;
1173 goto tx_err_link_failure;
1174 }
1175 if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
1176 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1177 &fl6->daddr, 0, &fl6->saddr))
1178 goto tx_err_link_failure;
1179 ndst = dst;
1180 }
1181
1182 tdev = dst->dev;
1183
1184 if (tdev == dev) {
1185 DEV_STATS_INC(dev, collisions);
1186 net_warn_ratelimited("%s: Local routing loop detected!\n",
1187 t->parms.name);
1188 goto tx_err_dst_release;
1189 }
1190 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1191 if (encap_limit >= 0) {
1192 max_headroom += 8;
1193 mtu -= 8;
1194 }
1195 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1196 IPV6_MIN_MTU : IPV4_MIN_MTU);
1197
1198 skb_dst_update_pmtu_no_confirm(skb, mtu);
1199 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1200 *pmtu = mtu;
1201 err = -EMSGSIZE;
1202 goto tx_err_dst_release;
1203 }
1204
1205 if (t->err_count > 0) {
1206 if (time_before(jiffies,
1207 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1208 t->err_count--;
1209
1210 dst_link_failure(skb);
1211 } else {
1212 t->err_count = 0;
1213 }
1214 }
1215
1216 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1217
1218 /*
1219 * Okay, now see if we can stuff it in the buffer as-is.
1220 */
1221 max_headroom += LL_RESERVED_SPACE(tdev);
1222
1223 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1224 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1225 struct sk_buff *new_skb;
1226
1227 new_skb = skb_realloc_headroom(skb, max_headroom);
1228 if (!new_skb)
1229 goto tx_err_dst_release;
1230
1231 if (skb->sk)
1232 skb_set_owner_w(new_skb, skb->sk);
1233 consume_skb(skb);
1234 skb = new_skb;
1235 }
1236
1237 if (t->parms.collect_md) {
1238 if (t->encap.type != TUNNEL_ENCAP_NONE)
1239 goto tx_err_dst_release;
1240 } else {
1241 if (use_cache && ndst)
1242 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1243 }
1244 skb_dst_set(skb, dst);
1245
1246 if (hop_limit == 0) {
1247 if (payload_protocol == htons(ETH_P_IP))
1248 hop_limit = ip_hdr(skb)->ttl;
1249 else if (payload_protocol == htons(ETH_P_IPV6))
1250 hop_limit = ipv6_hdr(skb)->hop_limit;
1251 else
1252 hop_limit = ip6_dst_hoplimit(dst);
1253 }
1254
1255 /* Calculate max headroom for all the headers and adjust
1256 * needed_headroom if necessary.
1257 */
1258 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1259 + dst->header_len + t->hlen;
1260 if (max_headroom > READ_ONCE(dev->needed_headroom))
1261 WRITE_ONCE(dev->needed_headroom, max_headroom);
1262
1263 err = ip6_tnl_encap(skb, t, &proto, fl6);
1264 if (err)
1265 return err;
1266
1267 if (encap_limit >= 0) {
1268 init_tel_txopt(&opt, encap_limit);
1269 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1270 }
1271
1272 skb_push(skb, sizeof(struct ipv6hdr));
1273 skb_reset_network_header(skb);
1274 ipv6h = ipv6_hdr(skb);
1275 ip6_flow_hdr(ipv6h, dsfield,
1276 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1277 ipv6h->hop_limit = hop_limit;
1278 ipv6h->nexthdr = proto;
1279 ipv6h->saddr = fl6->saddr;
1280 ipv6h->daddr = fl6->daddr;
1281 ip6tunnel_xmit(NULL, skb, dev);
1282 return 0;
1283 tx_err_link_failure:
1284 DEV_STATS_INC(dev, tx_carrier_errors);
1285 dst_link_failure(skb);
1286 tx_err_dst_release:
1287 dst_release(dst);
1288 return err;
1289 }
1290 EXPORT_SYMBOL(ip6_tnl_xmit);
1291
1292 static inline int
ipxip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev,u8 protocol)1293 ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
1294 u8 protocol)
1295 {
1296 struct ip6_tnl *t = netdev_priv(dev);
1297 struct ipv6hdr *ipv6h;
1298 const struct iphdr *iph;
1299 int encap_limit = -1;
1300 __u16 offset;
1301 struct flowi6 fl6;
1302 __u8 dsfield, orig_dsfield;
1303 __u32 mtu;
1304 u8 tproto;
1305 int err;
1306
1307 tproto = READ_ONCE(t->parms.proto);
1308 if (tproto != protocol && tproto != 0)
1309 return -1;
1310
1311 if (t->parms.collect_md) {
1312 struct ip_tunnel_info *tun_info;
1313 const struct ip_tunnel_key *key;
1314
1315 tun_info = skb_tunnel_info(skb);
1316 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1317 ip_tunnel_info_af(tun_info) != AF_INET6))
1318 return -1;
1319 key = &tun_info->key;
1320 memset(&fl6, 0, sizeof(fl6));
1321 fl6.flowi6_proto = protocol;
1322 fl6.saddr = key->u.ipv6.src;
1323 fl6.daddr = key->u.ipv6.dst;
1324 fl6.flowlabel = key->label;
1325 dsfield = key->tos;
1326 switch (protocol) {
1327 case IPPROTO_IPIP:
1328 iph = ip_hdr(skb);
1329 orig_dsfield = ipv4_get_dsfield(iph);
1330 break;
1331 case IPPROTO_IPV6:
1332 ipv6h = ipv6_hdr(skb);
1333 orig_dsfield = ipv6_get_dsfield(ipv6h);
1334 break;
1335 default:
1336 orig_dsfield = dsfield;
1337 break;
1338 }
1339 } else {
1340 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1341 encap_limit = t->parms.encap_limit;
1342 if (protocol == IPPROTO_IPV6) {
1343 offset = ip6_tnl_parse_tlv_enc_lim(skb,
1344 skb_network_header(skb));
1345 /* ip6_tnl_parse_tlv_enc_lim() might have
1346 * reallocated skb->head
1347 */
1348 if (offset > 0) {
1349 struct ipv6_tlv_tnl_enc_lim *tel;
1350
1351 tel = (void *)&skb_network_header(skb)[offset];
1352 if (tel->encap_limit == 0) {
1353 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
1354 ICMPV6_HDR_FIELD, offset + 2);
1355 return -1;
1356 }
1357 encap_limit = tel->encap_limit - 1;
1358 }
1359 }
1360
1361 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1362 fl6.flowi6_proto = protocol;
1363
1364 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1365 fl6.flowi6_mark = skb->mark;
1366 else
1367 fl6.flowi6_mark = t->parms.fwmark;
1368 switch (protocol) {
1369 case IPPROTO_IPIP:
1370 iph = ip_hdr(skb);
1371 orig_dsfield = ipv4_get_dsfield(iph);
1372 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1373 dsfield = orig_dsfield;
1374 else
1375 dsfield = ip6_tclass(t->parms.flowinfo);
1376 break;
1377 case IPPROTO_IPV6:
1378 ipv6h = ipv6_hdr(skb);
1379 orig_dsfield = ipv6_get_dsfield(ipv6h);
1380 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1381 dsfield = orig_dsfield;
1382 else
1383 dsfield = ip6_tclass(t->parms.flowinfo);
1384 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1385 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1386 break;
1387 default:
1388 orig_dsfield = dsfield = ip6_tclass(t->parms.flowinfo);
1389 break;
1390 }
1391 }
1392
1393 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1394 dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
1395
1396 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1397 return -1;
1398
1399 skb_set_inner_ipproto(skb, protocol);
1400
1401 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1402 protocol);
1403 if (err != 0) {
1404 /* XXX: send ICMP error even if DF is not set. */
1405 if (err == -EMSGSIZE)
1406 switch (protocol) {
1407 case IPPROTO_IPIP:
1408 icmp_ndo_send(skb, ICMP_DEST_UNREACH,
1409 ICMP_FRAG_NEEDED, htonl(mtu));
1410 break;
1411 case IPPROTO_IPV6:
1412 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1413 break;
1414 default:
1415 break;
1416 }
1417 return -1;
1418 }
1419
1420 return 0;
1421 }
1422
1423 static netdev_tx_t
ip6_tnl_start_xmit(struct sk_buff * skb,struct net_device * dev)1424 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1425 {
1426 struct ip6_tnl *t = netdev_priv(dev);
1427 u8 ipproto;
1428 int ret;
1429
1430 if (!pskb_inet_may_pull(skb))
1431 goto tx_err;
1432
1433 switch (skb->protocol) {
1434 case htons(ETH_P_IP):
1435 ipproto = IPPROTO_IPIP;
1436 break;
1437 case htons(ETH_P_IPV6):
1438 if (ip6_tnl_addr_conflict(t, ipv6_hdr(skb)))
1439 goto tx_err;
1440 ipproto = IPPROTO_IPV6;
1441 break;
1442 case htons(ETH_P_MPLS_UC):
1443 ipproto = IPPROTO_MPLS;
1444 break;
1445 default:
1446 goto tx_err;
1447 }
1448
1449 ret = ipxip6_tnl_xmit(skb, dev, ipproto);
1450 if (ret < 0)
1451 goto tx_err;
1452
1453 return NETDEV_TX_OK;
1454
1455 tx_err:
1456 DEV_STATS_INC(dev, tx_errors);
1457 DEV_STATS_INC(dev, tx_dropped);
1458 kfree_skb(skb);
1459 return NETDEV_TX_OK;
1460 }
1461
ip6_tnl_link_config(struct ip6_tnl * t)1462 static void ip6_tnl_link_config(struct ip6_tnl *t)
1463 {
1464 struct net_device *dev = t->dev;
1465 struct net_device *tdev = NULL;
1466 struct __ip6_tnl_parm *p = &t->parms;
1467 struct flowi6 *fl6 = &t->fl.u.ip6;
1468 int t_hlen;
1469 int mtu;
1470
1471 __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
1472 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1473
1474 /* Set up flowi template */
1475 fl6->saddr = p->laddr;
1476 fl6->daddr = p->raddr;
1477 fl6->flowi6_oif = p->link;
1478 fl6->flowlabel = 0;
1479
1480 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1481 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1482 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1483 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1484
1485 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1486 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1487
1488 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1489 dev->flags |= IFF_POINTOPOINT;
1490 else
1491 dev->flags &= ~IFF_POINTOPOINT;
1492
1493 t->tun_hlen = 0;
1494 t->hlen = t->encap_hlen + t->tun_hlen;
1495 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1496
1497 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1498 int strict = (ipv6_addr_type(&p->raddr) &
1499 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1500
1501 struct rt6_info *rt = rt6_lookup(t->net,
1502 &p->raddr, &p->laddr,
1503 p->link, NULL, strict);
1504 if (rt) {
1505 tdev = rt->dst.dev;
1506 ip6_rt_put(rt);
1507 }
1508
1509 if (!tdev && p->link)
1510 tdev = __dev_get_by_index(t->net, p->link);
1511
1512 if (tdev) {
1513 dev->needed_headroom = tdev->hard_header_len +
1514 tdev->needed_headroom + t_hlen;
1515 mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
1516
1517 mtu = mtu - t_hlen;
1518 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1519 mtu -= 8;
1520
1521 if (mtu < IPV6_MIN_MTU)
1522 mtu = IPV6_MIN_MTU;
1523 WRITE_ONCE(dev->mtu, mtu);
1524 }
1525 }
1526 }
1527
1528 /**
1529 * ip6_tnl_change - update the tunnel parameters
1530 * @t: tunnel to be changed
1531 * @p: tunnel configuration parameters
1532 *
1533 * Description:
1534 * ip6_tnl_change() updates the tunnel parameters
1535 **/
1536
1537 static void
ip6_tnl_change(struct ip6_tnl * t,const struct __ip6_tnl_parm * p)1538 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1539 {
1540 t->parms.laddr = p->laddr;
1541 t->parms.raddr = p->raddr;
1542 t->parms.flags = p->flags;
1543 t->parms.hop_limit = p->hop_limit;
1544 t->parms.encap_limit = p->encap_limit;
1545 t->parms.flowinfo = p->flowinfo;
1546 t->parms.link = p->link;
1547 t->parms.proto = p->proto;
1548 t->parms.fwmark = p->fwmark;
1549 dst_cache_reset(&t->dst_cache);
1550 ip6_tnl_link_config(t);
1551 }
1552
ip6_tnl_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1553 static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1554 {
1555 struct net *net = t->net;
1556 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1557
1558 ip6_tnl_unlink(ip6n, t);
1559 synchronize_net();
1560 ip6_tnl_change(t, p);
1561 ip6_tnl_link(ip6n, t);
1562 netdev_state_change(t->dev);
1563 }
1564
ip6_tnl0_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1565 static void ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1566 {
1567 /* for default tnl0 device allow to change only the proto */
1568 t->parms.proto = p->proto;
1569 netdev_state_change(t->dev);
1570 }
1571
1572 static void
ip6_tnl_parm_from_user(struct __ip6_tnl_parm * p,const struct ip6_tnl_parm * u)1573 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1574 {
1575 p->laddr = u->laddr;
1576 p->raddr = u->raddr;
1577 p->flags = u->flags;
1578 p->hop_limit = u->hop_limit;
1579 p->encap_limit = u->encap_limit;
1580 p->flowinfo = u->flowinfo;
1581 p->link = u->link;
1582 p->proto = u->proto;
1583 memcpy(p->name, u->name, sizeof(u->name));
1584 }
1585
1586 static void
ip6_tnl_parm_to_user(struct ip6_tnl_parm * u,const struct __ip6_tnl_parm * p)1587 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1588 {
1589 u->laddr = p->laddr;
1590 u->raddr = p->raddr;
1591 u->flags = p->flags;
1592 u->hop_limit = p->hop_limit;
1593 u->encap_limit = p->encap_limit;
1594 u->flowinfo = p->flowinfo;
1595 u->link = p->link;
1596 u->proto = p->proto;
1597 memcpy(u->name, p->name, sizeof(u->name));
1598 }
1599
1600 /**
1601 * ip6_tnl_siocdevprivate - configure ipv6 tunnels from userspace
1602 * @dev: virtual device associated with tunnel
1603 * @ifr: unused
1604 * @data: parameters passed from userspace
1605 * @cmd: command to be performed
1606 *
1607 * Description:
1608 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1609 * from userspace.
1610 *
1611 * The possible commands are the following:
1612 * %SIOCGETTUNNEL: get tunnel parameters for device
1613 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1614 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1615 * %SIOCDELTUNNEL: delete tunnel
1616 *
1617 * The fallback device "ip6tnl0", created during module
1618 * initialization, can be used for creating other tunnel devices.
1619 *
1620 * Return:
1621 * 0 on success,
1622 * %-EFAULT if unable to copy data to or from userspace,
1623 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1624 * %-EINVAL if passed tunnel parameters are invalid,
1625 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1626 * %-ENODEV if attempting to change or delete a nonexisting device
1627 **/
1628
1629 static int
ip6_tnl_siocdevprivate(struct net_device * dev,struct ifreq * ifr,void __user * data,int cmd)1630 ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
1631 void __user *data, int cmd)
1632 {
1633 int err = 0;
1634 struct ip6_tnl_parm p;
1635 struct __ip6_tnl_parm p1;
1636 struct ip6_tnl *t = netdev_priv(dev);
1637 struct net *net = t->net;
1638 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1639
1640 memset(&p1, 0, sizeof(p1));
1641
1642 switch (cmd) {
1643 case SIOCGETTUNNEL:
1644 if (dev == ip6n->fb_tnl_dev) {
1645 if (copy_from_user(&p, data, sizeof(p))) {
1646 err = -EFAULT;
1647 break;
1648 }
1649 ip6_tnl_parm_from_user(&p1, &p);
1650 t = ip6_tnl_locate(net, &p1, 0);
1651 if (IS_ERR(t))
1652 t = netdev_priv(dev);
1653 } else {
1654 memset(&p, 0, sizeof(p));
1655 }
1656 ip6_tnl_parm_to_user(&p, &t->parms);
1657 if (copy_to_user(data, &p, sizeof(p)))
1658 err = -EFAULT;
1659 break;
1660 case SIOCADDTUNNEL:
1661 case SIOCCHGTUNNEL:
1662 err = -EPERM;
1663 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1664 break;
1665 err = -EFAULT;
1666 if (copy_from_user(&p, data, sizeof(p)))
1667 break;
1668 err = -EINVAL;
1669 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1670 p.proto != 0)
1671 break;
1672 ip6_tnl_parm_from_user(&p1, &p);
1673 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1674 if (cmd == SIOCCHGTUNNEL) {
1675 if (!IS_ERR(t)) {
1676 if (t->dev != dev) {
1677 err = -EEXIST;
1678 break;
1679 }
1680 } else
1681 t = netdev_priv(dev);
1682 if (dev == ip6n->fb_tnl_dev)
1683 ip6_tnl0_update(t, &p1);
1684 else
1685 ip6_tnl_update(t, &p1);
1686 }
1687 if (!IS_ERR(t)) {
1688 err = 0;
1689 ip6_tnl_parm_to_user(&p, &t->parms);
1690 if (copy_to_user(data, &p, sizeof(p)))
1691 err = -EFAULT;
1692
1693 } else {
1694 err = PTR_ERR(t);
1695 }
1696 break;
1697 case SIOCDELTUNNEL:
1698 err = -EPERM;
1699 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1700 break;
1701
1702 if (dev == ip6n->fb_tnl_dev) {
1703 err = -EFAULT;
1704 if (copy_from_user(&p, data, sizeof(p)))
1705 break;
1706 err = -ENOENT;
1707 ip6_tnl_parm_from_user(&p1, &p);
1708 t = ip6_tnl_locate(net, &p1, 0);
1709 if (IS_ERR(t))
1710 break;
1711 err = -EPERM;
1712 if (t->dev == ip6n->fb_tnl_dev)
1713 break;
1714 dev = t->dev;
1715 }
1716 err = 0;
1717 unregister_netdevice(dev);
1718 break;
1719 default:
1720 err = -EINVAL;
1721 }
1722 return err;
1723 }
1724
1725 /**
1726 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1727 * @dev: virtual device associated with tunnel
1728 * @new_mtu: the new mtu
1729 *
1730 * Return:
1731 * 0 on success,
1732 * %-EINVAL if mtu too small
1733 **/
1734
ip6_tnl_change_mtu(struct net_device * dev,int new_mtu)1735 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1736 {
1737 struct ip6_tnl *tnl = netdev_priv(dev);
1738 int t_hlen;
1739
1740 t_hlen = tnl->hlen + sizeof(struct ipv6hdr);
1741 if (tnl->parms.proto == IPPROTO_IPV6) {
1742 if (new_mtu < IPV6_MIN_MTU)
1743 return -EINVAL;
1744 } else {
1745 if (new_mtu < ETH_MIN_MTU)
1746 return -EINVAL;
1747 }
1748 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1749 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen)
1750 return -EINVAL;
1751 } else {
1752 if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen)
1753 return -EINVAL;
1754 }
1755 WRITE_ONCE(dev->mtu, new_mtu);
1756 return 0;
1757 }
1758 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1759
ip6_tnl_get_iflink(const struct net_device * dev)1760 int ip6_tnl_get_iflink(const struct net_device *dev)
1761 {
1762 struct ip6_tnl *t = netdev_priv(dev);
1763
1764 return READ_ONCE(t->parms.link);
1765 }
1766 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1767
ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1768 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1769 unsigned int num)
1770 {
1771 if (num >= MAX_IPTUN_ENCAP_OPS)
1772 return -ERANGE;
1773
1774 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1775 &ip6tun_encaps[num],
1776 NULL, ops) ? 0 : -1;
1777 }
1778 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1779
ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1780 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1781 unsigned int num)
1782 {
1783 int ret;
1784
1785 if (num >= MAX_IPTUN_ENCAP_OPS)
1786 return -ERANGE;
1787
1788 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1789 &ip6tun_encaps[num],
1790 ops, NULL) == ops) ? 0 : -1;
1791
1792 synchronize_net();
1793
1794 return ret;
1795 }
1796 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1797
ip6_tnl_encap_setup(struct ip6_tnl * t,struct ip_tunnel_encap * ipencap)1798 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1799 struct ip_tunnel_encap *ipencap)
1800 {
1801 int hlen;
1802
1803 memset(&t->encap, 0, sizeof(t->encap));
1804
1805 hlen = ip6_encap_hlen(ipencap);
1806 if (hlen < 0)
1807 return hlen;
1808
1809 t->encap.type = ipencap->type;
1810 t->encap.sport = ipencap->sport;
1811 t->encap.dport = ipencap->dport;
1812 t->encap.flags = ipencap->flags;
1813
1814 t->encap_hlen = hlen;
1815 t->hlen = t->encap_hlen + t->tun_hlen;
1816
1817 return 0;
1818 }
1819 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1820
1821 static const struct net_device_ops ip6_tnl_netdev_ops = {
1822 .ndo_init = ip6_tnl_dev_init,
1823 .ndo_uninit = ip6_tnl_dev_uninit,
1824 .ndo_start_xmit = ip6_tnl_start_xmit,
1825 .ndo_siocdevprivate = ip6_tnl_siocdevprivate,
1826 .ndo_change_mtu = ip6_tnl_change_mtu,
1827 .ndo_get_stats64 = dev_get_tstats64,
1828 .ndo_get_iflink = ip6_tnl_get_iflink,
1829 };
1830
1831 #define IPXIPX_FEATURES (NETIF_F_SG | \
1832 NETIF_F_FRAGLIST | \
1833 NETIF_F_HIGHDMA | \
1834 NETIF_F_GSO_SOFTWARE | \
1835 NETIF_F_HW_CSUM)
1836
1837 /**
1838 * ip6_tnl_dev_setup - setup virtual tunnel device
1839 * @dev: virtual device associated with tunnel
1840 *
1841 * Description:
1842 * Initialize function pointers and device parameters
1843 **/
1844
ip6_tnl_dev_setup(struct net_device * dev)1845 static void ip6_tnl_dev_setup(struct net_device *dev)
1846 {
1847 dev->netdev_ops = &ip6_tnl_netdev_ops;
1848 dev->header_ops = &ip_tunnel_header_ops;
1849 dev->needs_free_netdev = true;
1850 dev->priv_destructor = ip6_dev_free;
1851
1852 dev->type = ARPHRD_TUNNEL6;
1853 dev->flags |= IFF_NOARP;
1854 dev->addr_len = sizeof(struct in6_addr);
1855 dev->lltx = true;
1856 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1857 netif_keep_dst(dev);
1858
1859 dev->features |= IPXIPX_FEATURES;
1860 dev->hw_features |= IPXIPX_FEATURES;
1861
1862 /* This perm addr will be used as interface identifier by IPv6 */
1863 dev->addr_assign_type = NET_ADDR_RANDOM;
1864 eth_random_addr(dev->perm_addr);
1865 }
1866
1867
1868 /**
1869 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1870 * @dev: virtual device associated with tunnel
1871 **/
1872
1873 static inline int
ip6_tnl_dev_init_gen(struct net_device * dev)1874 ip6_tnl_dev_init_gen(struct net_device *dev)
1875 {
1876 struct ip6_tnl *t = netdev_priv(dev);
1877 int ret;
1878 int t_hlen;
1879
1880 t->dev = dev;
1881 t->net = dev_net(dev);
1882
1883 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1884 if (ret)
1885 return ret;
1886
1887 ret = gro_cells_init(&t->gro_cells, dev);
1888 if (ret)
1889 goto destroy_dst;
1890
1891 t->tun_hlen = 0;
1892 t->hlen = t->encap_hlen + t->tun_hlen;
1893 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1894
1895 dev->type = ARPHRD_TUNNEL6;
1896 dev->mtu = ETH_DATA_LEN - t_hlen;
1897 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1898 dev->mtu -= 8;
1899 dev->min_mtu = ETH_MIN_MTU;
1900 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen;
1901
1902 netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
1903 netdev_lockdep_set_classes(dev);
1904 return 0;
1905
1906 destroy_dst:
1907 dst_cache_destroy(&t->dst_cache);
1908
1909 return ret;
1910 }
1911
1912 /**
1913 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1914 * @dev: virtual device associated with tunnel
1915 **/
1916
ip6_tnl_dev_init(struct net_device * dev)1917 static int ip6_tnl_dev_init(struct net_device *dev)
1918 {
1919 struct ip6_tnl *t = netdev_priv(dev);
1920 int err = ip6_tnl_dev_init_gen(dev);
1921
1922 if (err)
1923 return err;
1924 ip6_tnl_link_config(t);
1925 if (t->parms.collect_md)
1926 netif_keep_dst(dev);
1927 return 0;
1928 }
1929
1930 /**
1931 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1932 * @dev: fallback device
1933 *
1934 * Return: 0
1935 **/
1936
ip6_fb_tnl_dev_init(struct net_device * dev)1937 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1938 {
1939 struct ip6_tnl *t = netdev_priv(dev);
1940 struct net *net = dev_net(dev);
1941 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1942
1943 t->parms.proto = IPPROTO_IPV6;
1944
1945 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1946 return 0;
1947 }
1948
ip6_tnl_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1949 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1950 struct netlink_ext_ack *extack)
1951 {
1952 u8 proto;
1953
1954 if (!data || !data[IFLA_IPTUN_PROTO])
1955 return 0;
1956
1957 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1958 if (proto != IPPROTO_IPV6 &&
1959 proto != IPPROTO_IPIP &&
1960 proto != 0)
1961 return -EINVAL;
1962
1963 return 0;
1964 }
1965
ip6_tnl_netlink_parms(struct nlattr * data[],struct __ip6_tnl_parm * parms)1966 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1967 struct __ip6_tnl_parm *parms)
1968 {
1969 memset(parms, 0, sizeof(*parms));
1970
1971 if (!data)
1972 return;
1973
1974 if (data[IFLA_IPTUN_LINK])
1975 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1976
1977 if (data[IFLA_IPTUN_LOCAL])
1978 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1979
1980 if (data[IFLA_IPTUN_REMOTE])
1981 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1982
1983 if (data[IFLA_IPTUN_TTL])
1984 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1985
1986 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1987 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1988
1989 if (data[IFLA_IPTUN_FLOWINFO])
1990 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1991
1992 if (data[IFLA_IPTUN_FLAGS])
1993 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1994
1995 if (data[IFLA_IPTUN_PROTO])
1996 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1997
1998 if (data[IFLA_IPTUN_COLLECT_METADATA])
1999 parms->collect_md = true;
2000
2001 if (data[IFLA_IPTUN_FWMARK])
2002 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
2003 }
2004
ip6_tnl_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2005 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
2006 struct nlattr *tb[], struct nlattr *data[],
2007 struct netlink_ext_ack *extack)
2008 {
2009 struct net *net = dev_net(dev);
2010 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2011 struct ip_tunnel_encap ipencap;
2012 struct ip6_tnl *nt, *t;
2013 int err;
2014
2015 nt = netdev_priv(dev);
2016
2017 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
2018 err = ip6_tnl_encap_setup(nt, &ipencap);
2019 if (err < 0)
2020 return err;
2021 }
2022
2023 ip6_tnl_netlink_parms(data, &nt->parms);
2024
2025 if (nt->parms.collect_md) {
2026 if (rtnl_dereference(ip6n->collect_md_tun))
2027 return -EEXIST;
2028 } else {
2029 t = ip6_tnl_locate(net, &nt->parms, 0);
2030 if (!IS_ERR(t))
2031 return -EEXIST;
2032 }
2033
2034 err = ip6_tnl_create2(dev);
2035 if (!err && tb[IFLA_MTU])
2036 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2037
2038 return err;
2039 }
2040
ip6_tnl_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2041 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2042 struct nlattr *data[],
2043 struct netlink_ext_ack *extack)
2044 {
2045 struct ip6_tnl *t = netdev_priv(dev);
2046 struct __ip6_tnl_parm p;
2047 struct net *net = t->net;
2048 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2049 struct ip_tunnel_encap ipencap;
2050
2051 if (dev == ip6n->fb_tnl_dev)
2052 return -EINVAL;
2053
2054 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
2055 int err = ip6_tnl_encap_setup(t, &ipencap);
2056
2057 if (err < 0)
2058 return err;
2059 }
2060 ip6_tnl_netlink_parms(data, &p);
2061 if (p.collect_md)
2062 return -EINVAL;
2063
2064 t = ip6_tnl_locate(net, &p, 0);
2065 if (!IS_ERR(t)) {
2066 if (t->dev != dev)
2067 return -EEXIST;
2068 } else
2069 t = netdev_priv(dev);
2070
2071 ip6_tnl_update(t, &p);
2072 return 0;
2073 }
2074
ip6_tnl_dellink(struct net_device * dev,struct list_head * head)2075 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2076 {
2077 struct net *net = dev_net(dev);
2078 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2079
2080 if (dev != ip6n->fb_tnl_dev)
2081 unregister_netdevice_queue(dev, head);
2082 }
2083
ip6_tnl_get_size(const struct net_device * dev)2084 static size_t ip6_tnl_get_size(const struct net_device *dev)
2085 {
2086 return
2087 /* IFLA_IPTUN_LINK */
2088 nla_total_size(4) +
2089 /* IFLA_IPTUN_LOCAL */
2090 nla_total_size(sizeof(struct in6_addr)) +
2091 /* IFLA_IPTUN_REMOTE */
2092 nla_total_size(sizeof(struct in6_addr)) +
2093 /* IFLA_IPTUN_TTL */
2094 nla_total_size(1) +
2095 /* IFLA_IPTUN_ENCAP_LIMIT */
2096 nla_total_size(1) +
2097 /* IFLA_IPTUN_FLOWINFO */
2098 nla_total_size(4) +
2099 /* IFLA_IPTUN_FLAGS */
2100 nla_total_size(4) +
2101 /* IFLA_IPTUN_PROTO */
2102 nla_total_size(1) +
2103 /* IFLA_IPTUN_ENCAP_TYPE */
2104 nla_total_size(2) +
2105 /* IFLA_IPTUN_ENCAP_FLAGS */
2106 nla_total_size(2) +
2107 /* IFLA_IPTUN_ENCAP_SPORT */
2108 nla_total_size(2) +
2109 /* IFLA_IPTUN_ENCAP_DPORT */
2110 nla_total_size(2) +
2111 /* IFLA_IPTUN_COLLECT_METADATA */
2112 nla_total_size(0) +
2113 /* IFLA_IPTUN_FWMARK */
2114 nla_total_size(4) +
2115 0;
2116 }
2117
ip6_tnl_fill_info(struct sk_buff * skb,const struct net_device * dev)2118 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2119 {
2120 struct ip6_tnl *tunnel = netdev_priv(dev);
2121 struct __ip6_tnl_parm *parm = &tunnel->parms;
2122
2123 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2124 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2125 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2126 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2127 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2128 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2129 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2130 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2131 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2132 goto nla_put_failure;
2133
2134 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2135 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2136 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2137 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2138 goto nla_put_failure;
2139
2140 if (parm->collect_md)
2141 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2142 goto nla_put_failure;
2143
2144 return 0;
2145
2146 nla_put_failure:
2147 return -EMSGSIZE;
2148 }
2149
ip6_tnl_get_link_net(const struct net_device * dev)2150 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2151 {
2152 struct ip6_tnl *tunnel = netdev_priv(dev);
2153
2154 return READ_ONCE(tunnel->net);
2155 }
2156 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2157
2158 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2159 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2160 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2161 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2162 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2163 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2164 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2165 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2166 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2167 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2168 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2169 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2170 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2171 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2172 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
2173 };
2174
2175 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2176 .kind = "ip6tnl",
2177 .maxtype = IFLA_IPTUN_MAX,
2178 .policy = ip6_tnl_policy,
2179 .priv_size = sizeof(struct ip6_tnl),
2180 .setup = ip6_tnl_dev_setup,
2181 .validate = ip6_tnl_validate,
2182 .newlink = ip6_tnl_newlink,
2183 .changelink = ip6_tnl_changelink,
2184 .dellink = ip6_tnl_dellink,
2185 .get_size = ip6_tnl_get_size,
2186 .fill_info = ip6_tnl_fill_info,
2187 .get_link_net = ip6_tnl_get_link_net,
2188 };
2189
2190 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2191 .handler = ip4ip6_rcv,
2192 .err_handler = ip4ip6_err,
2193 .priority = 1,
2194 };
2195
2196 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2197 .handler = ip6ip6_rcv,
2198 .err_handler = ip6ip6_err,
2199 .priority = 1,
2200 };
2201
2202 static struct xfrm6_tunnel mplsip6_handler __read_mostly = {
2203 .handler = mplsip6_rcv,
2204 .err_handler = mplsip6_err,
2205 .priority = 1,
2206 };
2207
ip6_tnl_destroy_tunnels(struct net * net,struct list_head * list)2208 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2209 {
2210 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2211 struct net_device *dev, *aux;
2212 int h;
2213 struct ip6_tnl *t;
2214
2215 for_each_netdev_safe(net, dev, aux)
2216 if (dev->rtnl_link_ops == &ip6_link_ops)
2217 unregister_netdevice_queue(dev, list);
2218
2219 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2220 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2221 while (t) {
2222 /* If dev is in the same netns, it has already
2223 * been added to the list by the previous loop.
2224 */
2225 if (!net_eq(dev_net(t->dev), net))
2226 unregister_netdevice_queue(t->dev, list);
2227 t = rtnl_dereference(t->next);
2228 }
2229 }
2230
2231 t = rtnl_dereference(ip6n->tnls_wc[0]);
2232 while (t) {
2233 /* If dev is in the same netns, it has already
2234 * been added to the list by the previous loop.
2235 */
2236 if (!net_eq(dev_net(t->dev), net))
2237 unregister_netdevice_queue(t->dev, list);
2238 t = rtnl_dereference(t->next);
2239 }
2240 }
2241
ip6_tnl_init_net(struct net * net)2242 static int __net_init ip6_tnl_init_net(struct net *net)
2243 {
2244 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2245 struct ip6_tnl *t = NULL;
2246 int err;
2247
2248 ip6n->tnls[0] = ip6n->tnls_wc;
2249 ip6n->tnls[1] = ip6n->tnls_r_l;
2250
2251 if (!net_has_fallback_tunnels(net))
2252 return 0;
2253 err = -ENOMEM;
2254 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2255 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2256
2257 if (!ip6n->fb_tnl_dev)
2258 goto err_alloc_dev;
2259 dev_net_set(ip6n->fb_tnl_dev, net);
2260 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2261 /* FB netdevice is special: we have one, and only one per netns.
2262 * Allowing to move it to another netns is clearly unsafe.
2263 */
2264 ip6n->fb_tnl_dev->netns_local = true;
2265
2266 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2267 if (err < 0)
2268 goto err_register;
2269
2270 err = register_netdev(ip6n->fb_tnl_dev);
2271 if (err < 0)
2272 goto err_register;
2273
2274 t = netdev_priv(ip6n->fb_tnl_dev);
2275
2276 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2277 return 0;
2278
2279 err_register:
2280 free_netdev(ip6n->fb_tnl_dev);
2281 err_alloc_dev:
2282 return err;
2283 }
2284
ip6_tnl_exit_batch_rtnl(struct list_head * net_list,struct list_head * dev_to_kill)2285 static void __net_exit ip6_tnl_exit_batch_rtnl(struct list_head *net_list,
2286 struct list_head *dev_to_kill)
2287 {
2288 struct net *net;
2289
2290 ASSERT_RTNL();
2291 list_for_each_entry(net, net_list, exit_list)
2292 ip6_tnl_destroy_tunnels(net, dev_to_kill);
2293 }
2294
2295 static struct pernet_operations ip6_tnl_net_ops = {
2296 .init = ip6_tnl_init_net,
2297 .exit_batch_rtnl = ip6_tnl_exit_batch_rtnl,
2298 .id = &ip6_tnl_net_id,
2299 .size = sizeof(struct ip6_tnl_net),
2300 };
2301
2302 /**
2303 * ip6_tunnel_init - register protocol and reserve needed resources
2304 *
2305 * Return: 0 on success
2306 **/
2307
ip6_tunnel_init(void)2308 static int __init ip6_tunnel_init(void)
2309 {
2310 int err;
2311
2312 if (!ipv6_mod_enabled())
2313 return -EOPNOTSUPP;
2314
2315 err = register_pernet_device(&ip6_tnl_net_ops);
2316 if (err < 0)
2317 goto out_pernet;
2318
2319 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2320 if (err < 0) {
2321 pr_err("%s: can't register ip4ip6\n", __func__);
2322 goto out_ip4ip6;
2323 }
2324
2325 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2326 if (err < 0) {
2327 pr_err("%s: can't register ip6ip6\n", __func__);
2328 goto out_ip6ip6;
2329 }
2330
2331 if (ip6_tnl_mpls_supported()) {
2332 err = xfrm6_tunnel_register(&mplsip6_handler, AF_MPLS);
2333 if (err < 0) {
2334 pr_err("%s: can't register mplsip6\n", __func__);
2335 goto out_mplsip6;
2336 }
2337 }
2338
2339 err = rtnl_link_register(&ip6_link_ops);
2340 if (err < 0)
2341 goto rtnl_link_failed;
2342
2343 return 0;
2344
2345 rtnl_link_failed:
2346 if (ip6_tnl_mpls_supported())
2347 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS);
2348 out_mplsip6:
2349 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2350 out_ip6ip6:
2351 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2352 out_ip4ip6:
2353 unregister_pernet_device(&ip6_tnl_net_ops);
2354 out_pernet:
2355 return err;
2356 }
2357
2358 /**
2359 * ip6_tunnel_cleanup - free resources and unregister protocol
2360 **/
2361
ip6_tunnel_cleanup(void)2362 static void __exit ip6_tunnel_cleanup(void)
2363 {
2364 rtnl_link_unregister(&ip6_link_ops);
2365 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2366 pr_info("%s: can't deregister ip4ip6\n", __func__);
2367
2368 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2369 pr_info("%s: can't deregister ip6ip6\n", __func__);
2370
2371 if (ip6_tnl_mpls_supported() &&
2372 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS))
2373 pr_info("%s: can't deregister mplsip6\n", __func__);
2374 unregister_pernet_device(&ip6_tnl_net_ops);
2375 }
2376
2377 module_init(ip6_tunnel_init);
2378 module_exit(ip6_tunnel_cleanup);
2379