1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPv6 tunneling device 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Ville Nuorvala <vnuorval@tcs.hut.fi> 8 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 9 * 10 * Based on: 11 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 12 * 13 * RFC 2473 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/capability.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/sockios.h> 23 #include <linux/icmp.h> 24 #include <linux/if.h> 25 #include <linux/in.h> 26 #include <linux/ip.h> 27 #include <linux/net.h> 28 #include <linux/in6.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/icmpv6.h> 32 #include <linux/init.h> 33 #include <linux/route.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/netfilter_ipv6.h> 36 #include <linux/slab.h> 37 #include <linux/hash.h> 38 #include <linux/etherdevice.h> 39 40 #include <linux/uaccess.h> 41 #include <linux/atomic.h> 42 43 #include <net/icmp.h> 44 #include <net/ip.h> 45 #include <net/ip_tunnels.h> 46 #include <net/ipv6.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 #include <net/ip6_tunnel.h> 50 #include <net/xfrm.h> 51 #include <net/dsfield.h> 52 #include <net/inet_ecn.h> 53 #include <net/net_namespace.h> 54 #include <net/netns/generic.h> 55 #include <net/netdev_lock.h> 56 #include <net/dst_metadata.h> 57 #include <net/inet_dscp.h> 58 59 MODULE_AUTHOR("Ville Nuorvala"); 60 MODULE_DESCRIPTION("IPv6 tunneling device"); 61 MODULE_LICENSE("GPL"); 62 MODULE_ALIAS_RTNL_LINK("ip6tnl"); 63 MODULE_ALIAS_NETDEV("ip6tnl0"); 64 65 #define IP6_TUNNEL_MAX_DEST_TLVS 8 66 67 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5 68 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) 69 70 static bool log_ecn_error = true; 71 module_param(log_ecn_error, bool, 0644); 72 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 73 74 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 75 { 76 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 77 78 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); 79 } 80 81 static int ip6_tnl_dev_init(struct net_device *dev); 82 static void ip6_tnl_dev_setup(struct net_device *dev); 83 static struct rtnl_link_ops ip6_link_ops __read_mostly; 84 85 static unsigned int ip6_tnl_net_id __read_mostly; 86 struct ip6_tnl_net { 87 /* the IPv6 tunnel fallback device */ 88 struct net_device *fb_tnl_dev; 89 /* lists for storing tunnels in use */ 90 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE]; 91 struct ip6_tnl __rcu *tnls_wc[1]; 92 struct ip6_tnl __rcu **tnls[2]; 93 struct ip6_tnl __rcu *collect_md_tun; 94 }; 95 96 static inline int ip6_tnl_mpls_supported(void) 97 { 98 return IS_ENABLED(CONFIG_MPLS); 99 } 100 101 /** 102 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 103 * @net: network namespace 104 * @link: ifindex of underlying interface 105 * @remote: the address of the tunnel exit-point 106 * @local: the address of the tunnel entry-point 107 * 108 * Return: 109 * tunnel matching given end-points if found, 110 * else fallback tunnel if its device is up, 111 * else %NULL 112 **/ 113 114 static struct ip6_tnl * 115 ip6_tnl_lookup(struct net *net, int link, 116 const struct in6_addr *remote, const struct in6_addr *local) 117 { 118 unsigned int hash = HASH(remote, local); 119 struct ip6_tnl *t, *cand = NULL; 120 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 121 struct in6_addr any; 122 123 for_each_ip_tunnel_rcu(t, ip6n->tnls_r_l[hash]) { 124 if (!ipv6_addr_equal(local, &t->parms.laddr) || 125 !ipv6_addr_equal(remote, &t->parms.raddr) || 126 !(t->dev->flags & IFF_UP)) 127 continue; 128 129 if (link == t->parms.link) 130 return t; 131 else 132 cand = t; 133 } 134 135 memset(&any, 0, sizeof(any)); 136 hash = HASH(&any, local); 137 for_each_ip_tunnel_rcu(t, ip6n->tnls_r_l[hash]) { 138 if (!ipv6_addr_equal(local, &t->parms.laddr) || 139 !ipv6_addr_any(&t->parms.raddr) || 140 !(t->dev->flags & IFF_UP)) 141 continue; 142 143 if (link == t->parms.link) 144 return t; 145 else if (!cand) 146 cand = t; 147 } 148 149 hash = HASH(remote, &any); 150 for_each_ip_tunnel_rcu(t, ip6n->tnls_r_l[hash]) { 151 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 152 !ipv6_addr_any(&t->parms.laddr) || 153 !(t->dev->flags & IFF_UP)) 154 continue; 155 156 if (link == t->parms.link) 157 return t; 158 else if (!cand) 159 cand = t; 160 } 161 162 if (cand) 163 return cand; 164 165 t = rcu_dereference(ip6n->collect_md_tun); 166 if (t && t->dev->flags & IFF_UP) 167 return t; 168 169 t = rcu_dereference(ip6n->tnls_wc[0]); 170 if (t && (t->dev->flags & IFF_UP)) 171 return t; 172 173 return NULL; 174 } 175 176 /** 177 * ip6_tnl_bucket - get head of list matching given tunnel parameters 178 * @ip6n: the private data for ip6_vti in the netns 179 * @p: parameters containing tunnel end-points 180 * 181 * Description: 182 * ip6_tnl_bucket() returns the head of the list matching the 183 * &struct in6_addr entries laddr and raddr in @p. 184 * 185 * Return: head of IPv6 tunnel list 186 **/ 187 188 static struct ip6_tnl __rcu ** 189 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) 190 { 191 const struct in6_addr *remote = &p->raddr; 192 const struct in6_addr *local = &p->laddr; 193 unsigned int h = 0; 194 int prio = 0; 195 196 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 197 prio = 1; 198 h = HASH(remote, local); 199 } 200 return &ip6n->tnls[prio][h]; 201 } 202 203 /** 204 * ip6_tnl_link - add tunnel to hash table 205 * @ip6n: the private data for ip6_vti in the netns 206 * @t: tunnel to be added 207 **/ 208 209 static void 210 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 211 { 212 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 213 214 if (t->parms.collect_md) 215 rcu_assign_pointer(ip6n->collect_md_tun, t); 216 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 217 rcu_assign_pointer(*tp, t); 218 } 219 220 /** 221 * ip6_tnl_unlink - remove tunnel from hash table 222 * @ip6n: the private data for ip6_vti in the netns 223 * @t: tunnel to be removed 224 **/ 225 226 static void 227 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 228 { 229 struct ip6_tnl __rcu **tp; 230 struct ip6_tnl *iter; 231 232 if (t->parms.collect_md) 233 rcu_assign_pointer(ip6n->collect_md_tun, NULL); 234 235 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 236 (iter = rtnl_dereference(*tp)) != NULL; 237 tp = &iter->next) { 238 if (t == iter) { 239 rcu_assign_pointer(*tp, t->next); 240 break; 241 } 242 } 243 } 244 245 static void ip6_dev_free(struct net_device *dev) 246 { 247 struct ip6_tnl *t = netdev_priv(dev); 248 249 gro_cells_destroy(&t->gro_cells); 250 dst_cache_destroy(&t->dst_cache); 251 } 252 253 static int ip6_tnl_create2(struct net_device *dev) 254 { 255 struct ip6_tnl *t = netdev_priv(dev); 256 struct ip6_tnl_net *ip6n = net_generic(t->net, ip6_tnl_net_id); 257 int err; 258 259 dev->rtnl_link_ops = &ip6_link_ops; 260 err = register_netdevice(dev); 261 if (err < 0) 262 goto out; 263 264 strcpy(t->parms.name, dev->name); 265 266 ip6_tnl_link(ip6n, t); 267 return 0; 268 269 out: 270 return err; 271 } 272 273 /** 274 * ip6_tnl_create - create a new tunnel 275 * @net: network namespace 276 * @p: tunnel parameters 277 * 278 * Description: 279 * Create tunnel matching given parameters. 280 * 281 * Return: 282 * created tunnel or error pointer 283 **/ 284 285 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 286 { 287 struct net_device *dev; 288 struct ip6_tnl *t; 289 char name[IFNAMSIZ]; 290 int err = -E2BIG; 291 292 if (p->name[0]) { 293 if (!dev_valid_name(p->name)) 294 goto failed; 295 strscpy(name, p->name, IFNAMSIZ); 296 } else { 297 sprintf(name, "ip6tnl%%d"); 298 } 299 err = -ENOMEM; 300 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 301 ip6_tnl_dev_setup); 302 if (!dev) 303 goto failed; 304 305 dev_net_set(dev, net); 306 307 t = netdev_priv(dev); 308 t->parms = *p; 309 t->net = dev_net(dev); 310 err = ip6_tnl_create2(dev); 311 if (err < 0) 312 goto failed_free; 313 314 return t; 315 316 failed_free: 317 free_netdev(dev); 318 failed: 319 return ERR_PTR(err); 320 } 321 322 /** 323 * ip6_tnl_locate - find or create tunnel matching given parameters 324 * @net: network namespace 325 * @p: tunnel parameters 326 * @create: != 0 if allowed to create new tunnel if no match found 327 * 328 * Description: 329 * ip6_tnl_locate() first tries to locate an existing tunnel 330 * based on @parms. If this is unsuccessful, but @create is set a new 331 * tunnel device is created and registered for use. 332 * 333 * Return: 334 * matching tunnel or error pointer 335 **/ 336 337 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 338 struct __ip6_tnl_parm *p, int create) 339 { 340 const struct in6_addr *remote = &p->raddr; 341 const struct in6_addr *local = &p->laddr; 342 struct ip6_tnl __rcu **tp; 343 struct ip6_tnl *t; 344 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 345 346 for (tp = ip6_tnl_bucket(ip6n, p); 347 (t = rtnl_dereference(*tp)) != NULL; 348 tp = &t->next) { 349 if (ipv6_addr_equal(local, &t->parms.laddr) && 350 ipv6_addr_equal(remote, &t->parms.raddr) && 351 p->link == t->parms.link) { 352 if (create) 353 return ERR_PTR(-EEXIST); 354 355 return t; 356 } 357 } 358 if (!create) 359 return ERR_PTR(-ENODEV); 360 return ip6_tnl_create(net, p); 361 } 362 363 /** 364 * ip6_tnl_dev_uninit - tunnel device uninitializer 365 * @dev: the device to be destroyed 366 * 367 * Description: 368 * ip6_tnl_dev_uninit() removes tunnel from its list 369 **/ 370 371 static void 372 ip6_tnl_dev_uninit(struct net_device *dev) 373 { 374 struct ip6_tnl *t = netdev_priv(dev); 375 struct net *net = t->net; 376 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 377 378 if (dev == ip6n->fb_tnl_dev) 379 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 380 else 381 ip6_tnl_unlink(ip6n, t); 382 dst_cache_reset(&t->dst_cache); 383 netdev_put(dev, &t->dev_tracker); 384 } 385 386 /** 387 * ip6_tnl_parse_tlv_enc_lim - handle encapsulation limit option 388 * @skb: received socket buffer 389 * @raw: the ICMPv6 error message data 390 * 391 * Return: 392 * 0 if none was found, 393 * else index to encapsulation limit 394 **/ 395 396 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 397 { 398 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; 399 unsigned int nhoff = raw - skb->data; 400 unsigned int off = nhoff + sizeof(*ipv6h); 401 u8 nexthdr = ipv6h->nexthdr; 402 int exthdr_cnt = 0; 403 404 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 405 struct ipv6_opt_hdr *hdr; 406 u16 optlen; 407 408 if (unlikely(exthdr_cnt++ >= IP6_MAX_EXT_HDRS_CNT)) 409 break; 410 411 if (!pskb_may_pull(skb, off + sizeof(*hdr))) 412 break; 413 414 hdr = (struct ipv6_opt_hdr *)(skb->data + off); 415 if (nexthdr == NEXTHDR_FRAGMENT) { 416 optlen = 8; 417 } else if (nexthdr == NEXTHDR_AUTH) { 418 optlen = ipv6_authlen(hdr); 419 } else { 420 optlen = ipv6_optlen(hdr); 421 } 422 423 if (!pskb_may_pull(skb, off + optlen)) 424 break; 425 426 hdr = (struct ipv6_opt_hdr *)(skb->data + off); 427 if (nexthdr == NEXTHDR_FRAGMENT) { 428 struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr; 429 430 if (frag_hdr->frag_off) 431 break; 432 } 433 if (nexthdr == NEXTHDR_DEST) { 434 int tlv_cnt = 0; 435 u16 i = 2; 436 437 while (1) { 438 struct ipv6_tlv_tnl_enc_lim *tel; 439 440 if (unlikely(tlv_cnt++ >= IP6_TUNNEL_MAX_DEST_TLVS)) 441 break; 442 443 /* No more room for encapsulation limit */ 444 if (i + sizeof(*tel) > optlen) 445 break; 446 447 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); 448 /* return index of option if found and valid */ 449 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 450 tel->length == 1) 451 return i + off - nhoff; 452 /* else jump to next option */ 453 if (tel->type) 454 i += tel->length + 2; 455 else 456 i++; 457 } 458 } 459 nexthdr = hdr->nexthdr; 460 off += optlen; 461 } 462 return 0; 463 } 464 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); 465 466 /* ip6_tnl_err() should handle errors in the tunnel according to the 467 * specifications in RFC 2473. 468 */ 469 static int 470 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 471 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 472 { 473 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; 474 struct net *net = dev_net(skb->dev); 475 u8 rel_type = ICMPV6_DEST_UNREACH; 476 u8 rel_code = ICMPV6_ADDR_UNREACH; 477 __u32 rel_info = 0; 478 struct ip6_tnl *t; 479 int err = -ENOENT; 480 int rel_msg = 0; 481 u8 tproto; 482 __u16 len; 483 484 /* If the packet doesn't contain the original IPv6 header we are 485 in trouble since we might need the source address for further 486 processing of the error. */ 487 488 rcu_read_lock(); 489 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr); 490 if (!t) 491 goto out; 492 493 tproto = READ_ONCE(t->parms.proto); 494 if (tproto != ipproto && tproto != 0) 495 goto out; 496 497 err = 0; 498 499 switch (*type) { 500 case ICMPV6_DEST_UNREACH: 501 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 502 t->parms.name); 503 rel_msg = 1; 504 break; 505 case ICMPV6_TIME_EXCEED: 506 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 507 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 508 t->parms.name); 509 rel_msg = 1; 510 } 511 break; 512 case ICMPV6_PARAMPROB: { 513 struct ipv6_tlv_tnl_enc_lim *tel; 514 __u32 teli; 515 516 teli = 0; 517 if ((*code) == ICMPV6_HDR_FIELD) 518 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 519 520 if (teli && teli == *info - 2) { 521 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 522 if (tel->encap_limit == 0) { 523 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 524 t->parms.name); 525 rel_msg = 1; 526 } 527 } else { 528 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 529 t->parms.name); 530 } 531 break; 532 } 533 case ICMPV6_PKT_TOOBIG: { 534 __u32 mtu; 535 536 ip6_update_pmtu(skb, net, htonl(*info), 0, 0, 537 sock_net_uid(net, NULL)); 538 mtu = *info - offset; 539 if (mtu < IPV6_MIN_MTU) 540 mtu = IPV6_MIN_MTU; 541 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); 542 if (len > mtu) { 543 rel_type = ICMPV6_PKT_TOOBIG; 544 rel_code = 0; 545 rel_info = mtu; 546 rel_msg = 1; 547 } 548 break; 549 } 550 case NDISC_REDIRECT: 551 ip6_redirect(skb, net, skb->dev->ifindex, 0, 552 sock_net_uid(net, NULL)); 553 break; 554 } 555 556 *type = rel_type; 557 *code = rel_code; 558 *info = rel_info; 559 *msg = rel_msg; 560 561 out: 562 rcu_read_unlock(); 563 return err; 564 } 565 566 static int 567 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 568 u8 type, u8 code, int offset, __be32 info) 569 { 570 __u32 rel_info = ntohl(info); 571 const struct iphdr *eiph; 572 struct sk_buff *skb2; 573 int err, rel_msg = 0; 574 u8 rel_type = type; 575 u8 rel_code = code; 576 struct rtable *rt; 577 struct flowi4 fl4; 578 579 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 580 &rel_msg, &rel_info, offset); 581 if (err < 0) 582 return err; 583 584 if (rel_msg == 0) 585 return 0; 586 587 switch (rel_type) { 588 case ICMPV6_DEST_UNREACH: 589 if (rel_code != ICMPV6_ADDR_UNREACH) 590 return 0; 591 rel_type = ICMP_DEST_UNREACH; 592 rel_code = ICMP_HOST_UNREACH; 593 break; 594 case ICMPV6_PKT_TOOBIG: 595 if (rel_code != 0) 596 return 0; 597 rel_type = ICMP_DEST_UNREACH; 598 rel_code = ICMP_FRAG_NEEDED; 599 break; 600 default: 601 return 0; 602 } 603 604 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 605 return 0; 606 607 skb2 = skb_clone(skb, GFP_ATOMIC); 608 if (!skb2) 609 return 0; 610 611 /* Remove debris left by IPv6 stack. */ 612 memset(IPCB(skb2), 0, sizeof(*IPCB(skb2))); 613 614 skb_dst_drop(skb2); 615 616 skb_pull(skb2, offset); 617 skb_reset_network_header(skb2); 618 eiph = ip_hdr(skb2); 619 if (eiph->version != 4 || eiph->ihl < 5) 620 goto out; 621 622 /* Try to guess incoming interface */ 623 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr, 624 0, 0, 0, IPPROTO_IPIP, 625 eiph->tos & INET_DSCP_MASK, 0); 626 if (IS_ERR(rt)) 627 goto out; 628 629 skb2->dev = rt->dst.dev; 630 ip_rt_put(rt); 631 632 /* route "incoming" packet */ 633 if (rt->rt_flags & RTCF_LOCAL) { 634 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 635 eiph->daddr, eiph->saddr, 0, 0, 636 IPPROTO_IPIP, 637 eiph->tos & INET_DSCP_MASK, 0); 638 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) { 639 if (!IS_ERR(rt)) 640 ip_rt_put(rt); 641 goto out; 642 } 643 skb_dst_set(skb2, &rt->dst); 644 } else { 645 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, 646 ip4h_dscp(eiph), skb2->dev) || 647 skb_dst_dev(skb2)->type != ARPHRD_TUNNEL6) 648 goto out; 649 } 650 651 /* change mtu on this route */ 652 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 653 if (rel_info > dst6_mtu(skb_dst(skb2))) 654 goto out; 655 656 skb_dst_update_pmtu_no_confirm(skb2, rel_info); 657 } 658 659 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 660 661 out: 662 kfree_skb(skb2); 663 return 0; 664 } 665 666 static int 667 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 668 u8 type, u8 code, int offset, __be32 info) 669 { 670 __u32 rel_info = ntohl(info); 671 int err, rel_msg = 0; 672 u8 rel_type = type; 673 u8 rel_code = code; 674 675 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 676 &rel_msg, &rel_info, offset); 677 if (err < 0) 678 return err; 679 680 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 681 struct rt6_info *rt; 682 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 683 684 if (!skb2) 685 return 0; 686 687 skb_dst_drop(skb2); 688 skb_pull(skb2, offset); 689 skb_reset_network_header(skb2); 690 691 /* Try to guess incoming interface */ 692 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 693 NULL, 0, skb2, 0); 694 695 if (rt && rt->dst.dev) 696 skb2->dev = rt->dst.dev; 697 698 icmpv6_send(skb2, rel_type, rel_code, rel_info); 699 700 ip6_rt_put(rt); 701 702 kfree_skb(skb2); 703 } 704 705 return 0; 706 } 707 708 static int 709 mplsip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 710 u8 type, u8 code, int offset, __be32 info) 711 { 712 __u32 rel_info = ntohl(info); 713 int err, rel_msg = 0; 714 u8 rel_type = type; 715 u8 rel_code = code; 716 717 err = ip6_tnl_err(skb, IPPROTO_MPLS, opt, &rel_type, &rel_code, 718 &rel_msg, &rel_info, offset); 719 return err; 720 } 721 722 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 723 const struct ipv6hdr *ipv6h, 724 struct sk_buff *skb) 725 { 726 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 727 728 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 729 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 730 731 return IP6_ECN_decapsulate(ipv6h, skb); 732 } 733 734 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 735 const struct ipv6hdr *ipv6h, 736 struct sk_buff *skb) 737 { 738 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 739 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 740 741 return IP6_ECN_decapsulate(ipv6h, skb); 742 } 743 744 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 745 const struct ipv6hdr *ipv6h, 746 struct sk_buff *skb) 747 { 748 /* ECN is not supported in AF_MPLS */ 749 return 0; 750 } 751 752 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 753 const struct in6_addr *laddr, 754 const struct in6_addr *raddr) 755 { 756 struct __ip6_tnl_parm *p = &t->parms; 757 int ltype = ipv6_addr_type(laddr); 758 int rtype = ipv6_addr_type(raddr); 759 __u32 flags = 0; 760 761 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 762 flags = IP6_TNL_F_CAP_PER_PACKET; 763 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 764 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 765 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 766 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 767 if (ltype&IPV6_ADDR_UNICAST) 768 flags |= IP6_TNL_F_CAP_XMIT; 769 if (rtype&IPV6_ADDR_UNICAST) 770 flags |= IP6_TNL_F_CAP_RCV; 771 } 772 return flags; 773 } 774 EXPORT_SYMBOL(ip6_tnl_get_cap); 775 776 /* called with rcu_read_lock() */ 777 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 778 const struct in6_addr *laddr, 779 const struct in6_addr *raddr) 780 { 781 struct __ip6_tnl_parm *p = &t->parms; 782 int ret = 0; 783 struct net *net = t->net; 784 785 if ((p->flags & IP6_TNL_F_CAP_RCV) || 786 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 787 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 788 struct net_device *ldev = NULL; 789 790 if (p->link) 791 ldev = dev_get_by_index_rcu(net, p->link); 792 793 if ((ipv6_addr_is_multicast(laddr) || 794 likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false, 795 0, IFA_F_TENTATIVE))) && 796 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) || 797 likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true, 798 0, IFA_F_TENTATIVE)))) 799 ret = 1; 800 } 801 return ret; 802 } 803 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); 804 805 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, 806 const struct tnl_ptk_info *tpi, 807 struct metadata_dst *tun_dst, 808 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 809 const struct ipv6hdr *ipv6h, 810 struct sk_buff *skb), 811 bool log_ecn_err) 812 { 813 const struct ipv6hdr *ipv6h; 814 int nh, err; 815 816 if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) != 817 test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) { 818 DEV_STATS_INC(tunnel->dev, rx_crc_errors); 819 DEV_STATS_INC(tunnel->dev, rx_errors); 820 goto drop; 821 } 822 823 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) { 824 if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) || 825 (tunnel->i_seqno && 826 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 827 DEV_STATS_INC(tunnel->dev, rx_fifo_errors); 828 DEV_STATS_INC(tunnel->dev, rx_errors); 829 goto drop; 830 } 831 tunnel->i_seqno = ntohl(tpi->seq) + 1; 832 } 833 834 skb->protocol = tpi->proto; 835 836 /* Warning: All skb pointers will be invalidated! */ 837 if (tunnel->dev->type == ARPHRD_ETHER) { 838 if (!pskb_may_pull(skb, ETH_HLEN)) { 839 DEV_STATS_INC(tunnel->dev, rx_length_errors); 840 DEV_STATS_INC(tunnel->dev, rx_errors); 841 goto drop; 842 } 843 844 skb->protocol = eth_type_trans(skb, tunnel->dev); 845 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 846 } else { 847 skb->dev = tunnel->dev; 848 skb_reset_mac_header(skb); 849 } 850 851 /* Save offset of outer header relative to skb->head, 852 * because we are going to reset the network header to the inner header 853 * and might change skb->head. 854 */ 855 nh = skb_network_header(skb) - skb->head; 856 857 skb_reset_network_header(skb); 858 859 if (skb_vlan_inet_prepare(skb, true)) { 860 DEV_STATS_INC(tunnel->dev, rx_length_errors); 861 DEV_STATS_INC(tunnel->dev, rx_errors); 862 goto drop; 863 } 864 865 /* Get the outer header. */ 866 ipv6h = (struct ipv6hdr *)(skb->head + nh); 867 868 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 869 870 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 871 872 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); 873 if (unlikely(err)) { 874 if (log_ecn_err) 875 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", 876 &ipv6h->saddr, 877 ipv6_get_dsfield(ipv6h)); 878 if (err > 1) { 879 DEV_STATS_INC(tunnel->dev, rx_frame_errors); 880 DEV_STATS_INC(tunnel->dev, rx_errors); 881 goto drop; 882 } 883 } 884 885 dev_sw_netstats_rx_add(tunnel->dev, skb->len); 886 887 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); 888 889 if (tun_dst) 890 skb_dst_set(skb, (struct dst_entry *)tun_dst); 891 892 gro_cells_receive(&tunnel->gro_cells, skb); 893 return 0; 894 895 drop: 896 if (tun_dst) 897 dst_release((struct dst_entry *)tun_dst); 898 kfree_skb(skb); 899 return 0; 900 } 901 902 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, 903 const struct tnl_ptk_info *tpi, 904 struct metadata_dst *tun_dst, 905 bool log_ecn_err) 906 { 907 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 908 const struct ipv6hdr *ipv6h, 909 struct sk_buff *skb); 910 911 dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; 912 if (tpi->proto == htons(ETH_P_IP)) 913 dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; 914 915 return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 916 log_ecn_err); 917 } 918 EXPORT_SYMBOL(ip6_tnl_rcv); 919 920 static const struct tnl_ptk_info tpi_v6 = { 921 /* no tunnel info required for ipxip6. */ 922 .proto = htons(ETH_P_IPV6), 923 }; 924 925 static const struct tnl_ptk_info tpi_v4 = { 926 /* no tunnel info required for ipxip6. */ 927 .proto = htons(ETH_P_IP), 928 }; 929 930 static const struct tnl_ptk_info tpi_mpls = { 931 /* no tunnel info required for mplsip6. */ 932 .proto = htons(ETH_P_MPLS_UC), 933 }; 934 935 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, 936 const struct tnl_ptk_info *tpi, 937 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 938 const struct ipv6hdr *ipv6h, 939 struct sk_buff *skb)) 940 { 941 struct ip6_tnl *t; 942 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 943 struct metadata_dst *tun_dst = NULL; 944 int ret = -1; 945 946 rcu_read_lock(); 947 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr); 948 949 if (t) { 950 u8 tproto = READ_ONCE(t->parms.proto); 951 952 if (tproto != ipproto && tproto != 0) 953 goto drop; 954 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 955 goto drop; 956 ipv6h = ipv6_hdr(skb); 957 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) 958 goto drop; 959 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 960 goto drop; 961 if (t->parms.collect_md) { 962 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 963 964 tun_dst = ipv6_tun_rx_dst(skb, flags, 0, 0); 965 if (!tun_dst) 966 goto drop; 967 } 968 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 969 log_ecn_error); 970 } 971 972 rcu_read_unlock(); 973 974 return ret; 975 976 drop: 977 rcu_read_unlock(); 978 kfree_skb(skb); 979 return 0; 980 } 981 982 static int ip4ip6_rcv(struct sk_buff *skb) 983 { 984 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, 985 ip4ip6_dscp_ecn_decapsulate); 986 } 987 988 static int ip6ip6_rcv(struct sk_buff *skb) 989 { 990 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, 991 ip6ip6_dscp_ecn_decapsulate); 992 } 993 994 static int mplsip6_rcv(struct sk_buff *skb) 995 { 996 return ipxip6_rcv(skb, IPPROTO_MPLS, &tpi_mpls, 997 mplsip6_dscp_ecn_decapsulate); 998 } 999 1000 struct ipv6_tel_txoption { 1001 struct ipv6_txoptions ops; 1002 __u8 dst_opt[8]; 1003 }; 1004 1005 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 1006 { 1007 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 1008 1009 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 1010 opt->dst_opt[3] = 1; 1011 opt->dst_opt[4] = encap_limit; 1012 opt->dst_opt[5] = IPV6_TLV_PADN; 1013 opt->dst_opt[6] = 1; 1014 1015 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt; 1016 opt->ops.opt_nflen = 8; 1017 } 1018 1019 /** 1020 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 1021 * @t: the outgoing tunnel device 1022 * @hdr: IPv6 header from the incoming packet 1023 * 1024 * Description: 1025 * Avoid trivial tunneling loop by checking that tunnel exit-point 1026 * doesn't match source of incoming packet. 1027 * 1028 * Return: 1029 * 1 if conflict, 1030 * 0 else 1031 **/ 1032 1033 static inline bool 1034 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 1035 { 1036 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 1037 } 1038 1039 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, 1040 const struct in6_addr *laddr, 1041 const struct in6_addr *raddr) 1042 { 1043 struct __ip6_tnl_parm *p = &t->parms; 1044 int ret = 0; 1045 struct net *net = t->net; 1046 1047 if (t->parms.collect_md) 1048 return 1; 1049 1050 if ((p->flags & IP6_TNL_F_CAP_XMIT) || 1051 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 1052 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { 1053 struct net_device *ldev = NULL; 1054 1055 rcu_read_lock(); 1056 if (p->link) 1057 ldev = dev_get_by_index_rcu(net, p->link); 1058 1059 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, 1060 0, IFA_F_TENTATIVE))) 1061 pr_warn_ratelimited("%s xmit: Local address not yet configured!\n", 1062 p->name); 1063 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && 1064 !ipv6_addr_is_multicast(raddr) && 1065 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, 1066 true, 0, IFA_F_TENTATIVE))) 1067 pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n", 1068 p->name); 1069 else 1070 ret = 1; 1071 rcu_read_unlock(); 1072 } 1073 return ret; 1074 } 1075 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); 1076 1077 /** 1078 * ip6_tnl_xmit - encapsulate packet and send 1079 * @skb: the outgoing socket buffer 1080 * @dev: the outgoing tunnel device 1081 * @dsfield: dscp code for outer header 1082 * @fl6: flow of tunneled packet 1083 * @encap_limit: encapsulation limit 1084 * @pmtu: Path MTU is stored if packet is too big 1085 * @proto: next header value 1086 * 1087 * Description: 1088 * Build new header and do some sanity checks on the packet before sending 1089 * it. 1090 * 1091 * Return: 1092 * 0 on success 1093 * -1 fail 1094 * %-EMSGSIZE message too big. return mtu in this case. 1095 **/ 1096 1097 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, 1098 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, 1099 __u8 proto) 1100 { 1101 struct ip6_tnl *t = netdev_priv(dev); 1102 struct net *net = t->net; 1103 struct ipv6hdr *ipv6h; 1104 struct ipv6_tel_txoption opt; 1105 struct dst_entry *dst = NULL, *ndst = NULL; 1106 struct net_device *tdev; 1107 int mtu; 1108 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0; 1109 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1110 unsigned int max_headroom = psh_hlen; 1111 __be16 payload_protocol; 1112 bool use_cache = false; 1113 u8 hop_limit; 1114 int err = -1; 1115 1116 payload_protocol = skb_protocol(skb, true); 1117 1118 if (t->parms.collect_md) { 1119 hop_limit = skb_tunnel_info(skb)->key.ttl; 1120 goto route_lookup; 1121 } else { 1122 hop_limit = t->parms.hop_limit; 1123 } 1124 1125 /* NBMA tunnel */ 1126 if (ipv6_addr_any(&t->parms.raddr)) { 1127 if (payload_protocol == htons(ETH_P_IPV6)) { 1128 struct in6_addr *addr6; 1129 struct neighbour *neigh; 1130 int addr_type; 1131 1132 if (!skb_dst(skb)) 1133 goto tx_err_link_failure; 1134 1135 neigh = dst_neigh_lookup(skb_dst(skb), 1136 &ipv6_hdr(skb)->daddr); 1137 if (!neigh) 1138 goto tx_err_link_failure; 1139 1140 addr6 = (struct in6_addr *)&neigh->primary_key; 1141 addr_type = ipv6_addr_type(addr6); 1142 1143 if (addr_type == IPV6_ADDR_ANY) 1144 addr6 = &ipv6_hdr(skb)->daddr; 1145 1146 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1147 neigh_release(neigh); 1148 } else if (payload_protocol == htons(ETH_P_IP)) { 1149 const struct rtable *rt = skb_rtable(skb); 1150 1151 if (!rt) 1152 goto tx_err_link_failure; 1153 1154 if (rt->rt_gw_family == AF_INET6) 1155 memcpy(&fl6->daddr, &rt->rt_gw6, sizeof(fl6->daddr)); 1156 } 1157 } else if (t->parms.proto != 0 && !(t->parms.flags & 1158 (IP6_TNL_F_USE_ORIG_TCLASS | 1159 IP6_TNL_F_USE_ORIG_FWMARK))) { 1160 /* enable the cache only if neither the outer protocol nor the 1161 * routing decision depends on the current inner header value 1162 */ 1163 use_cache = true; 1164 } 1165 1166 if (use_cache) 1167 dst = dst_cache_get(&t->dst_cache); 1168 1169 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1170 goto tx_err_link_failure; 1171 1172 if (!dst) { 1173 route_lookup: 1174 /* add dsfield to flowlabel for route lookup */ 1175 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); 1176 1177 dst = ip6_route_output(net, NULL, fl6); 1178 1179 if (dst->error) 1180 goto tx_err_link_failure; 1181 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 1182 if (IS_ERR(dst)) { 1183 err = PTR_ERR(dst); 1184 dst = NULL; 1185 goto tx_err_link_failure; 1186 } 1187 if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) && 1188 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, 1189 &fl6->daddr, 0, &fl6->saddr)) 1190 goto tx_err_link_failure; 1191 ndst = dst; 1192 } 1193 1194 tdev = dst_dev(dst); 1195 1196 if (tdev == dev) { 1197 DEV_STATS_INC(dev, collisions); 1198 net_warn_ratelimited("%s: Local routing loop detected!\n", 1199 t->parms.name); 1200 goto tx_err_dst_release; 1201 } 1202 mtu = dst6_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen; 1203 if (encap_limit >= 0) { 1204 max_headroom += 8; 1205 mtu -= 8; 1206 } 1207 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ? 1208 IPV6_MIN_MTU : IPV4_MIN_MTU); 1209 1210 skb_dst_update_pmtu_no_confirm(skb, mtu); 1211 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1212 *pmtu = mtu; 1213 err = -EMSGSIZE; 1214 goto tx_err_dst_release; 1215 } 1216 1217 if (t->err_count > 0) { 1218 if (time_before(jiffies, 1219 t->err_time + IP6TUNNEL_ERR_TIMEO)) { 1220 t->err_count--; 1221 1222 dst_link_failure(skb); 1223 } else { 1224 t->err_count = 0; 1225 } 1226 } 1227 1228 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 1229 1230 /* 1231 * Okay, now see if we can stuff it in the buffer as-is. 1232 */ 1233 max_headroom += LL_RESERVED_SPACE(tdev); 1234 1235 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 1236 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 1237 struct sk_buff *new_skb; 1238 1239 new_skb = skb_realloc_headroom(skb, max_headroom); 1240 if (!new_skb) 1241 goto tx_err_dst_release; 1242 1243 if (skb->sk) 1244 skb_set_owner_w(new_skb, skb->sk); 1245 consume_skb(skb); 1246 skb = new_skb; 1247 } 1248 1249 if (t->parms.collect_md) { 1250 if (t->encap.type != TUNNEL_ENCAP_NONE) 1251 goto tx_err_dst_release; 1252 } else { 1253 if (use_cache && ndst) 1254 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1255 } 1256 skb_dst_set(skb, dst); 1257 1258 if (hop_limit == 0) { 1259 if (payload_protocol == htons(ETH_P_IP)) 1260 hop_limit = ip_hdr(skb)->ttl; 1261 else if (payload_protocol == htons(ETH_P_IPV6)) 1262 hop_limit = ipv6_hdr(skb)->hop_limit; 1263 else 1264 hop_limit = ip6_dst_hoplimit(dst); 1265 } 1266 1267 /* Calculate max headroom for all the headers and adjust 1268 * needed_headroom if necessary. 1269 */ 1270 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr) 1271 + dst->header_len + t->hlen; 1272 ip_tunnel_adj_headroom(dev, max_headroom); 1273 1274 err = ip6_tnl_encap(skb, t, &proto, fl6); 1275 if (err) 1276 return err; 1277 1278 if (encap_limit >= 0) { 1279 init_tel_txopt(&opt, encap_limit); 1280 proto = ipv6_push_frag_opts(skb, &opt.ops, proto); 1281 } 1282 1283 skb_push(skb, sizeof(struct ipv6hdr)); 1284 skb_reset_network_header(skb); 1285 ipv6h = ipv6_hdr(skb); 1286 ip6_flow_hdr(ipv6h, dsfield, 1287 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1288 ipv6h->hop_limit = hop_limit; 1289 ipv6h->nexthdr = proto; 1290 ipv6h->saddr = fl6->saddr; 1291 ipv6h->daddr = fl6->daddr; 1292 ip6tunnel_xmit(NULL, skb, dev, 0); 1293 return 0; 1294 tx_err_link_failure: 1295 DEV_STATS_INC(dev, tx_carrier_errors); 1296 dst_link_failure(skb); 1297 tx_err_dst_release: 1298 dst_release(dst); 1299 return err; 1300 } 1301 EXPORT_SYMBOL(ip6_tnl_xmit); 1302 1303 static inline int 1304 ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, 1305 u8 protocol) 1306 { 1307 struct ip6_tnl *t = netdev_priv(dev); 1308 struct ipv6hdr *ipv6h; 1309 const struct iphdr *iph; 1310 int encap_limit = -1; 1311 __u16 offset; 1312 struct flowi6 fl6; 1313 __u8 dsfield, orig_dsfield; 1314 __u32 mtu; 1315 u8 tproto; 1316 int err; 1317 1318 tproto = READ_ONCE(t->parms.proto); 1319 if (tproto != protocol && tproto != 0) 1320 return -1; 1321 1322 if (t->parms.collect_md) { 1323 struct ip_tunnel_info *tun_info; 1324 const struct ip_tunnel_key *key; 1325 1326 tun_info = skb_tunnel_info(skb); 1327 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1328 ip_tunnel_info_af(tun_info) != AF_INET6)) 1329 return -1; 1330 key = &tun_info->key; 1331 memset(&fl6, 0, sizeof(fl6)); 1332 fl6.flowi6_proto = protocol; 1333 fl6.saddr = key->u.ipv6.src; 1334 fl6.daddr = key->u.ipv6.dst; 1335 fl6.flowlabel = key->label; 1336 dsfield = key->tos; 1337 switch (protocol) { 1338 case IPPROTO_IPIP: 1339 iph = ip_hdr(skb); 1340 orig_dsfield = ipv4_get_dsfield(iph); 1341 break; 1342 case IPPROTO_IPV6: 1343 ipv6h = ipv6_hdr(skb); 1344 orig_dsfield = ipv6_get_dsfield(ipv6h); 1345 break; 1346 default: 1347 orig_dsfield = dsfield; 1348 break; 1349 } 1350 } else { 1351 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1352 encap_limit = t->parms.encap_limit; 1353 if (protocol == IPPROTO_IPV6) { 1354 offset = ip6_tnl_parse_tlv_enc_lim(skb, 1355 skb_network_header(skb)); 1356 /* ip6_tnl_parse_tlv_enc_lim() might have 1357 * reallocated skb->head 1358 */ 1359 if (offset > 0) { 1360 struct ipv6_tlv_tnl_enc_lim *tel; 1361 1362 tel = (void *)&skb_network_header(skb)[offset]; 1363 if (tel->encap_limit == 0) { 1364 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB, 1365 ICMPV6_HDR_FIELD, offset + 2); 1366 return -1; 1367 } 1368 encap_limit = tel->encap_limit - 1; 1369 } 1370 } 1371 1372 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1373 fl6.flowi6_proto = protocol; 1374 1375 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1376 fl6.flowi6_mark = skb->mark; 1377 else 1378 fl6.flowi6_mark = t->parms.fwmark; 1379 switch (protocol) { 1380 case IPPROTO_IPIP: 1381 iph = ip_hdr(skb); 1382 orig_dsfield = ipv4_get_dsfield(iph); 1383 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1384 dsfield = orig_dsfield; 1385 else 1386 dsfield = ip6_tclass(t->parms.flowinfo); 1387 break; 1388 case IPPROTO_IPV6: 1389 ipv6h = ipv6_hdr(skb); 1390 orig_dsfield = ipv6_get_dsfield(ipv6h); 1391 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1392 dsfield = orig_dsfield; 1393 else 1394 dsfield = ip6_tclass(t->parms.flowinfo); 1395 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1396 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1397 break; 1398 default: 1399 orig_dsfield = dsfield = ip6_tclass(t->parms.flowinfo); 1400 break; 1401 } 1402 } 1403 1404 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1405 dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield); 1406 1407 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1408 return -1; 1409 1410 skb_set_inner_ipproto(skb, protocol); 1411 1412 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1413 protocol); 1414 if (err != 0) { 1415 /* XXX: send ICMP error even if DF is not set. */ 1416 if (err == -EMSGSIZE) 1417 switch (protocol) { 1418 case IPPROTO_IPIP: 1419 icmp_ndo_send(skb, ICMP_DEST_UNREACH, 1420 ICMP_FRAG_NEEDED, htonl(mtu)); 1421 break; 1422 case IPPROTO_IPV6: 1423 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1424 break; 1425 default: 1426 break; 1427 } 1428 return -1; 1429 } 1430 1431 return 0; 1432 } 1433 1434 static netdev_tx_t 1435 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) 1436 { 1437 struct ip6_tnl *t = netdev_priv(dev); 1438 u8 ipproto; 1439 int ret; 1440 1441 if (!pskb_inet_may_pull(skb)) 1442 goto tx_err; 1443 1444 switch (skb->protocol) { 1445 case htons(ETH_P_IP): 1446 ipproto = IPPROTO_IPIP; 1447 break; 1448 case htons(ETH_P_IPV6): 1449 if (ip6_tnl_addr_conflict(t, ipv6_hdr(skb))) 1450 goto tx_err; 1451 ipproto = IPPROTO_IPV6; 1452 break; 1453 case htons(ETH_P_MPLS_UC): 1454 ipproto = IPPROTO_MPLS; 1455 break; 1456 default: 1457 goto tx_err; 1458 } 1459 1460 ret = ipxip6_tnl_xmit(skb, dev, ipproto); 1461 if (ret < 0) 1462 goto tx_err; 1463 1464 return NETDEV_TX_OK; 1465 1466 tx_err: 1467 DEV_STATS_INC(dev, tx_errors); 1468 DEV_STATS_INC(dev, tx_dropped); 1469 kfree_skb(skb); 1470 return NETDEV_TX_OK; 1471 } 1472 1473 static void ip6_tnl_link_config(struct ip6_tnl *t) 1474 { 1475 struct net_device *dev = t->dev; 1476 struct net_device *tdev = NULL; 1477 struct __ip6_tnl_parm *p = &t->parms; 1478 struct flowi6 *fl6 = &t->fl.u.ip6; 1479 int t_hlen; 1480 int mtu; 1481 1482 __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); 1483 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1484 1485 /* Set up flowi template */ 1486 fl6->saddr = p->laddr; 1487 fl6->daddr = p->raddr; 1488 fl6->flowi6_oif = p->link; 1489 fl6->flowlabel = 0; 1490 1491 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1492 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1493 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1494 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1495 1496 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1497 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1498 1499 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1500 dev->flags |= IFF_POINTOPOINT; 1501 else 1502 dev->flags &= ~IFF_POINTOPOINT; 1503 1504 t->tun_hlen = 0; 1505 t->hlen = t->encap_hlen + t->tun_hlen; 1506 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1507 1508 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1509 int strict = (ipv6_addr_type(&p->raddr) & 1510 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1511 1512 struct rt6_info *rt = rt6_lookup(t->net, 1513 &p->raddr, &p->laddr, 1514 p->link, NULL, strict); 1515 if (rt) { 1516 tdev = rt->dst.dev; 1517 ip6_rt_put(rt); 1518 } 1519 1520 if (!tdev && p->link) 1521 tdev = __dev_get_by_index(t->net, p->link); 1522 1523 if (tdev) { 1524 dev->needed_headroom = tdev->hard_header_len + 1525 tdev->needed_headroom + t_hlen; 1526 mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU); 1527 1528 mtu = mtu - t_hlen; 1529 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1530 mtu -= 8; 1531 1532 if (mtu < IPV6_MIN_MTU) 1533 mtu = IPV6_MIN_MTU; 1534 WRITE_ONCE(dev->mtu, mtu); 1535 } 1536 } 1537 } 1538 1539 /** 1540 * ip6_tnl_change - update the tunnel parameters 1541 * @t: tunnel to be changed 1542 * @p: tunnel configuration parameters 1543 * 1544 * Description: 1545 * ip6_tnl_change() updates the tunnel parameters 1546 **/ 1547 1548 static void 1549 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 1550 { 1551 t->parms.laddr = p->laddr; 1552 t->parms.raddr = p->raddr; 1553 t->parms.flags = p->flags; 1554 t->parms.hop_limit = p->hop_limit; 1555 t->parms.encap_limit = p->encap_limit; 1556 t->parms.flowinfo = p->flowinfo; 1557 t->parms.link = p->link; 1558 t->parms.proto = p->proto; 1559 t->parms.fwmark = p->fwmark; 1560 dst_cache_reset(&t->dst_cache); 1561 ip6_tnl_link_config(t); 1562 } 1563 1564 static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1565 { 1566 struct net *net = t->net; 1567 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1568 1569 ip6_tnl_unlink(ip6n, t); 1570 synchronize_net(); 1571 ip6_tnl_change(t, p); 1572 ip6_tnl_link(ip6n, t); 1573 netdev_state_change(t->dev); 1574 } 1575 1576 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p, 1577 bool strict) 1578 { 1579 /* For the default ip6tnl0 device, allow changing only the protocol 1580 * (the IP6_TNL_F_CAP_PER_PACKET flag is set on ip6tnl0, and all other 1581 * parameters are 0). 1582 */ 1583 if (strict && 1584 (!ipv6_addr_any(&p->laddr) || !ipv6_addr_any(&p->raddr) || 1585 p->flags != t->parms.flags || p->hop_limit || p->encap_limit || 1586 p->flowinfo || p->link || p->fwmark || p->collect_md)) 1587 return -EINVAL; 1588 1589 t->parms.proto = p->proto; 1590 netdev_state_change(t->dev); 1591 return 0; 1592 } 1593 1594 static void 1595 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1596 { 1597 p->laddr = u->laddr; 1598 p->raddr = u->raddr; 1599 p->flags = u->flags; 1600 p->hop_limit = u->hop_limit; 1601 p->encap_limit = u->encap_limit; 1602 p->flowinfo = u->flowinfo; 1603 p->link = u->link; 1604 p->proto = u->proto; 1605 memcpy(p->name, u->name, sizeof(u->name)); 1606 } 1607 1608 static void 1609 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) 1610 { 1611 u->laddr = p->laddr; 1612 u->raddr = p->raddr; 1613 u->flags = p->flags; 1614 u->hop_limit = p->hop_limit; 1615 u->encap_limit = p->encap_limit; 1616 u->flowinfo = p->flowinfo; 1617 u->link = p->link; 1618 u->proto = p->proto; 1619 memcpy(u->name, p->name, sizeof(u->name)); 1620 } 1621 1622 /** 1623 * ip6_tnl_siocdevprivate - configure ipv6 tunnels from userspace 1624 * @dev: virtual device associated with tunnel 1625 * @ifr: unused 1626 * @data: parameters passed from userspace 1627 * @cmd: command to be performed 1628 * 1629 * Description: 1630 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1631 * from userspace. 1632 * 1633 * The possible commands are the following: 1634 * %SIOCGETTUNNEL: get tunnel parameters for device 1635 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1636 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1637 * %SIOCDELTUNNEL: delete tunnel 1638 * 1639 * The fallback device "ip6tnl0", created during module 1640 * initialization, can be used for creating other tunnel devices. 1641 * 1642 * Return: 1643 * 0 on success, 1644 * %-EFAULT if unable to copy data to or from userspace, 1645 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1646 * %-EINVAL if passed tunnel parameters are invalid, 1647 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1648 * %-ENODEV if attempting to change or delete a nonexisting device 1649 **/ 1650 1651 static int 1652 ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 1653 void __user *data, int cmd) 1654 { 1655 int err = 0; 1656 struct ip6_tnl_parm p; 1657 struct __ip6_tnl_parm p1; 1658 struct ip6_tnl *t = netdev_priv(dev); 1659 struct net *net = t->net; 1660 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1661 1662 memset(&p1, 0, sizeof(p1)); 1663 1664 switch (cmd) { 1665 case SIOCGETTUNNEL: 1666 if (dev == ip6n->fb_tnl_dev) { 1667 if (copy_from_user(&p, data, sizeof(p))) { 1668 err = -EFAULT; 1669 break; 1670 } 1671 ip6_tnl_parm_from_user(&p1, &p); 1672 t = ip6_tnl_locate(net, &p1, 0); 1673 if (IS_ERR(t)) 1674 t = netdev_priv(dev); 1675 } else { 1676 memset(&p, 0, sizeof(p)); 1677 } 1678 ip6_tnl_parm_to_user(&p, &t->parms); 1679 if (copy_to_user(data, &p, sizeof(p))) 1680 err = -EFAULT; 1681 break; 1682 case SIOCADDTUNNEL: 1683 case SIOCCHGTUNNEL: 1684 err = -EPERM; 1685 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1686 break; 1687 err = -EFAULT; 1688 if (copy_from_user(&p, data, sizeof(p))) 1689 break; 1690 err = -EINVAL; 1691 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1692 p.proto != 0) 1693 break; 1694 ip6_tnl_parm_from_user(&p1, &p); 1695 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1696 if (cmd == SIOCCHGTUNNEL) { 1697 if (!IS_ERR(t)) { 1698 if (t->dev != dev) { 1699 err = -EEXIST; 1700 break; 1701 } 1702 } else 1703 t = netdev_priv(dev); 1704 if (dev == ip6n->fb_tnl_dev) 1705 ip6_tnl0_update(t, &p1, false); 1706 else 1707 ip6_tnl_update(t, &p1); 1708 } 1709 if (!IS_ERR(t)) { 1710 err = 0; 1711 ip6_tnl_parm_to_user(&p, &t->parms); 1712 if (copy_to_user(data, &p, sizeof(p))) 1713 err = -EFAULT; 1714 1715 } else { 1716 err = PTR_ERR(t); 1717 } 1718 break; 1719 case SIOCDELTUNNEL: 1720 err = -EPERM; 1721 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1722 break; 1723 1724 if (dev == ip6n->fb_tnl_dev) { 1725 err = -EFAULT; 1726 if (copy_from_user(&p, data, sizeof(p))) 1727 break; 1728 err = -ENOENT; 1729 ip6_tnl_parm_from_user(&p1, &p); 1730 t = ip6_tnl_locate(net, &p1, 0); 1731 if (IS_ERR(t)) 1732 break; 1733 err = -EPERM; 1734 if (t->dev == ip6n->fb_tnl_dev) 1735 break; 1736 dev = t->dev; 1737 } 1738 err = 0; 1739 unregister_netdevice(dev); 1740 break; 1741 default: 1742 err = -EINVAL; 1743 } 1744 return err; 1745 } 1746 1747 /** 1748 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1749 * @dev: virtual device associated with tunnel 1750 * @new_mtu: the new mtu 1751 * 1752 * Return: 1753 * 0 on success, 1754 * %-EINVAL if mtu too small 1755 **/ 1756 1757 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1758 { 1759 struct ip6_tnl *tnl = netdev_priv(dev); 1760 int t_hlen; 1761 1762 t_hlen = tnl->hlen + sizeof(struct ipv6hdr); 1763 if (tnl->parms.proto == IPPROTO_IPV6) { 1764 if (new_mtu < IPV6_MIN_MTU) 1765 return -EINVAL; 1766 } else { 1767 if (new_mtu < ETH_MIN_MTU) 1768 return -EINVAL; 1769 } 1770 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { 1771 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen) 1772 return -EINVAL; 1773 } else { 1774 if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen) 1775 return -EINVAL; 1776 } 1777 WRITE_ONCE(dev->mtu, new_mtu); 1778 return 0; 1779 } 1780 EXPORT_SYMBOL(ip6_tnl_change_mtu); 1781 1782 int ip6_tnl_get_iflink(const struct net_device *dev) 1783 { 1784 struct ip6_tnl *t = netdev_priv(dev); 1785 1786 return READ_ONCE(t->parms.link); 1787 } 1788 EXPORT_SYMBOL(ip6_tnl_get_iflink); 1789 1790 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, 1791 unsigned int num) 1792 { 1793 if (num >= MAX_IPTUN_ENCAP_OPS) 1794 return -ERANGE; 1795 1796 return !cmpxchg((const struct ip6_tnl_encap_ops **) 1797 &ip6tun_encaps[num], 1798 NULL, ops) ? 0 : -1; 1799 } 1800 EXPORT_SYMBOL(ip6_tnl_encap_add_ops); 1801 1802 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, 1803 unsigned int num) 1804 { 1805 int ret; 1806 1807 if (num >= MAX_IPTUN_ENCAP_OPS) 1808 return -ERANGE; 1809 1810 ret = (cmpxchg((const struct ip6_tnl_encap_ops **) 1811 &ip6tun_encaps[num], 1812 ops, NULL) == ops) ? 0 : -1; 1813 1814 synchronize_net(); 1815 1816 return ret; 1817 } 1818 EXPORT_SYMBOL(ip6_tnl_encap_del_ops); 1819 1820 int ip6_tnl_encap_setup(struct ip6_tnl *t, 1821 struct ip_tunnel_encap *ipencap) 1822 { 1823 int hlen; 1824 1825 memset(&t->encap, 0, sizeof(t->encap)); 1826 1827 hlen = ip6_encap_hlen(ipencap); 1828 if (hlen < 0) 1829 return hlen; 1830 1831 t->encap.type = ipencap->type; 1832 t->encap.sport = ipencap->sport; 1833 t->encap.dport = ipencap->dport; 1834 t->encap.flags = ipencap->flags; 1835 1836 t->encap_hlen = hlen; 1837 t->hlen = t->encap_hlen + t->tun_hlen; 1838 1839 return 0; 1840 } 1841 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1842 1843 static int ip6_tnl_fill_forward_path(struct net_device_path_ctx *ctx, 1844 struct net_device_path *path) 1845 { 1846 struct ip6_tnl *t = netdev_priv(ctx->dev); 1847 struct flowi6 fl6 = { 1848 .daddr = t->parms.raddr, 1849 }; 1850 struct dst_entry *dst; 1851 int err; 1852 1853 dst = ip6_route_output(dev_net(ctx->dev), NULL, &fl6); 1854 if (!dst->error) { 1855 path->type = DEV_PATH_TUN; 1856 path->tun.src_v6 = t->parms.laddr; 1857 path->tun.dst_v6 = t->parms.raddr; 1858 path->tun.l3_proto = IPPROTO_IPV6; 1859 path->dev = ctx->dev; 1860 ctx->dev = dst->dev; 1861 } 1862 1863 err = dst->error; 1864 dst_release(dst); 1865 1866 return err; 1867 } 1868 1869 static const struct net_device_ops ip6_tnl_netdev_ops = { 1870 .ndo_init = ip6_tnl_dev_init, 1871 .ndo_uninit = ip6_tnl_dev_uninit, 1872 .ndo_start_xmit = ip6_tnl_start_xmit, 1873 .ndo_siocdevprivate = ip6_tnl_siocdevprivate, 1874 .ndo_change_mtu = ip6_tnl_change_mtu, 1875 .ndo_get_stats64 = dev_get_tstats64, 1876 .ndo_get_iflink = ip6_tnl_get_iflink, 1877 .ndo_fill_forward_path = ip6_tnl_fill_forward_path, 1878 }; 1879 1880 #define IPXIPX_FEATURES (NETIF_F_SG | \ 1881 NETIF_F_FRAGLIST | \ 1882 NETIF_F_HIGHDMA | \ 1883 NETIF_F_GSO_SOFTWARE | \ 1884 NETIF_F_HW_CSUM) 1885 1886 /** 1887 * ip6_tnl_dev_setup - setup virtual tunnel device 1888 * @dev: virtual device associated with tunnel 1889 * 1890 * Description: 1891 * Initialize function pointers and device parameters 1892 **/ 1893 1894 static void ip6_tnl_dev_setup(struct net_device *dev) 1895 { 1896 dev->netdev_ops = &ip6_tnl_netdev_ops; 1897 dev->header_ops = &ip_tunnel_header_ops; 1898 dev->needs_free_netdev = true; 1899 dev->priv_destructor = ip6_dev_free; 1900 1901 dev->type = ARPHRD_TUNNEL6; 1902 dev->flags |= IFF_NOARP; 1903 dev->addr_len = sizeof(struct in6_addr); 1904 dev->lltx = true; 1905 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1906 netif_keep_dst(dev); 1907 1908 dev->features |= IPXIPX_FEATURES; 1909 dev->hw_features |= IPXIPX_FEATURES; 1910 1911 /* This perm addr will be used as interface identifier by IPv6 */ 1912 dev->addr_assign_type = NET_ADDR_RANDOM; 1913 eth_random_addr(dev->perm_addr); 1914 } 1915 1916 1917 /** 1918 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1919 * @dev: virtual device associated with tunnel 1920 **/ 1921 1922 static inline int 1923 ip6_tnl_dev_init_gen(struct net_device *dev) 1924 { 1925 struct ip6_tnl *t = netdev_priv(dev); 1926 int ret; 1927 int t_hlen; 1928 1929 t->dev = dev; 1930 1931 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); 1932 if (ret) 1933 return ret; 1934 1935 ret = gro_cells_init(&t->gro_cells, dev); 1936 if (ret) 1937 goto destroy_dst; 1938 1939 t->tun_hlen = 0; 1940 t->hlen = t->encap_hlen + t->tun_hlen; 1941 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1942 1943 dev->type = ARPHRD_TUNNEL6; 1944 dev->mtu = ETH_DATA_LEN - t_hlen; 1945 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1946 dev->mtu -= 8; 1947 dev->min_mtu = ETH_MIN_MTU; 1948 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen; 1949 1950 netdev_hold(dev, &t->dev_tracker, GFP_KERNEL); 1951 netdev_lockdep_set_classes(dev); 1952 return 0; 1953 1954 destroy_dst: 1955 dst_cache_destroy(&t->dst_cache); 1956 1957 return ret; 1958 } 1959 1960 /** 1961 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1962 * @dev: virtual device associated with tunnel 1963 **/ 1964 1965 static int ip6_tnl_dev_init(struct net_device *dev) 1966 { 1967 struct ip6_tnl *t = netdev_priv(dev); 1968 int err = ip6_tnl_dev_init_gen(dev); 1969 1970 if (err) 1971 return err; 1972 ip6_tnl_link_config(t); 1973 if (t->parms.collect_md) 1974 netif_keep_dst(dev); 1975 return 0; 1976 } 1977 1978 /** 1979 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1980 * @dev: fallback device 1981 * 1982 * Return: 0 1983 **/ 1984 1985 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1986 { 1987 struct ip6_tnl *t = netdev_priv(dev); 1988 struct net *net = dev_net(dev); 1989 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1990 1991 t->net = net; 1992 t->parms.proto = IPPROTO_IPV6; 1993 1994 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1995 return 0; 1996 } 1997 1998 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[], 1999 struct netlink_ext_ack *extack) 2000 { 2001 u8 proto; 2002 2003 if (!data || !data[IFLA_IPTUN_PROTO]) 2004 return 0; 2005 2006 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 2007 if (proto != IPPROTO_IPV6 && 2008 proto != IPPROTO_IPIP && 2009 proto != 0) 2010 return -EINVAL; 2011 2012 return 0; 2013 } 2014 2015 static void ip6_tnl_netlink_parms(struct nlattr *data[], 2016 struct __ip6_tnl_parm *parms) 2017 { 2018 memset(parms, 0, sizeof(*parms)); 2019 2020 if (!data) 2021 return; 2022 2023 if (data[IFLA_IPTUN_LINK]) 2024 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); 2025 2026 if (data[IFLA_IPTUN_LOCAL]) 2027 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]); 2028 2029 if (data[IFLA_IPTUN_REMOTE]) 2030 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]); 2031 2032 if (data[IFLA_IPTUN_TTL]) 2033 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); 2034 2035 if (data[IFLA_IPTUN_ENCAP_LIMIT]) 2036 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); 2037 2038 if (data[IFLA_IPTUN_FLOWINFO]) 2039 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); 2040 2041 if (data[IFLA_IPTUN_FLAGS]) 2042 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); 2043 2044 if (data[IFLA_IPTUN_PROTO]) 2045 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 2046 2047 if (data[IFLA_IPTUN_COLLECT_METADATA]) 2048 parms->collect_md = true; 2049 2050 if (data[IFLA_IPTUN_FWMARK]) 2051 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); 2052 } 2053 2054 static int ip6_tnl_newlink(struct net_device *dev, 2055 struct rtnl_newlink_params *params, 2056 struct netlink_ext_ack *extack) 2057 { 2058 struct nlattr **data = params->data; 2059 struct nlattr **tb = params->tb; 2060 struct ip_tunnel_encap ipencap; 2061 struct ip6_tnl_net *ip6n; 2062 struct ip6_tnl *nt, *t; 2063 struct net *net; 2064 int err; 2065 2066 net = params->link_net ? : dev_net(dev); 2067 ip6n = net_generic(net, ip6_tnl_net_id); 2068 nt = netdev_priv(dev); 2069 nt->net = net; 2070 2071 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { 2072 err = ip6_tnl_encap_setup(nt, &ipencap); 2073 if (err < 0) 2074 return err; 2075 } 2076 2077 ip6_tnl_netlink_parms(data, &nt->parms); 2078 2079 if (nt->parms.collect_md) { 2080 if (rtnl_dereference(ip6n->collect_md_tun)) 2081 return -EEXIST; 2082 } else { 2083 t = ip6_tnl_locate(net, &nt->parms, 0); 2084 if (!IS_ERR(t)) 2085 return -EEXIST; 2086 } 2087 2088 err = ip6_tnl_create2(dev); 2089 if (!err && tb[IFLA_MTU]) 2090 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2091 2092 return err; 2093 } 2094 2095 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 2096 struct nlattr *data[], 2097 struct netlink_ext_ack *extack) 2098 { 2099 struct ip6_tnl *t = netdev_priv(dev); 2100 struct __ip6_tnl_parm p; 2101 struct net *net = t->net; 2102 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2103 struct ip_tunnel_encap ipencap; 2104 2105 if (dev == ip6n->fb_tnl_dev) { 2106 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { 2107 /* iproute2 always sets TUNNEL_ENCAP_FLAG_CSUM6, so 2108 * let's ignore this flag. 2109 */ 2110 ipencap.flags &= ~TUNNEL_ENCAP_FLAG_CSUM6; 2111 if (memchr_inv(&ipencap, 0, sizeof(ipencap))) { 2112 NL_SET_ERR_MSG(extack, 2113 "Only protocol can be changed for fallback tunnel, not encap params"); 2114 return -EINVAL; 2115 } 2116 } 2117 2118 ip6_tnl_netlink_parms(data, &p); 2119 if (ip6_tnl0_update(t, &p, true) < 0) { 2120 NL_SET_ERR_MSG(extack, 2121 "Only protocol can be changed for fallback tunnel"); 2122 return -EINVAL; 2123 } 2124 2125 return 0; 2126 } 2127 2128 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { 2129 int err = ip6_tnl_encap_setup(t, &ipencap); 2130 2131 if (err < 0) 2132 return err; 2133 } 2134 ip6_tnl_netlink_parms(data, &p); 2135 if (p.collect_md) 2136 return -EINVAL; 2137 2138 t = ip6_tnl_locate(net, &p, 0); 2139 if (!IS_ERR(t)) { 2140 if (t->dev != dev) 2141 return -EEXIST; 2142 } else 2143 t = netdev_priv(dev); 2144 2145 ip6_tnl_update(t, &p); 2146 return 0; 2147 } 2148 2149 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) 2150 { 2151 struct net *net = dev_net(dev); 2152 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2153 2154 if (dev != ip6n->fb_tnl_dev) 2155 unregister_netdevice_queue(dev, head); 2156 } 2157 2158 static size_t ip6_tnl_get_size(const struct net_device *dev) 2159 { 2160 return 2161 /* IFLA_IPTUN_LINK */ 2162 nla_total_size(4) + 2163 /* IFLA_IPTUN_LOCAL */ 2164 nla_total_size(sizeof(struct in6_addr)) + 2165 /* IFLA_IPTUN_REMOTE */ 2166 nla_total_size(sizeof(struct in6_addr)) + 2167 /* IFLA_IPTUN_TTL */ 2168 nla_total_size(1) + 2169 /* IFLA_IPTUN_ENCAP_LIMIT */ 2170 nla_total_size(1) + 2171 /* IFLA_IPTUN_FLOWINFO */ 2172 nla_total_size(4) + 2173 /* IFLA_IPTUN_FLAGS */ 2174 nla_total_size(4) + 2175 /* IFLA_IPTUN_PROTO */ 2176 nla_total_size(1) + 2177 /* IFLA_IPTUN_ENCAP_TYPE */ 2178 nla_total_size(2) + 2179 /* IFLA_IPTUN_ENCAP_FLAGS */ 2180 nla_total_size(2) + 2181 /* IFLA_IPTUN_ENCAP_SPORT */ 2182 nla_total_size(2) + 2183 /* IFLA_IPTUN_ENCAP_DPORT */ 2184 nla_total_size(2) + 2185 /* IFLA_IPTUN_COLLECT_METADATA */ 2186 nla_total_size(0) + 2187 /* IFLA_IPTUN_FWMARK */ 2188 nla_total_size(4) + 2189 0; 2190 } 2191 2192 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) 2193 { 2194 struct ip6_tnl *tunnel = netdev_priv(dev); 2195 struct __ip6_tnl_parm *parm = &tunnel->parms; 2196 2197 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 2198 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || 2199 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) || 2200 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 2201 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 2202 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 2203 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || 2204 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || 2205 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark)) 2206 goto nla_put_failure; 2207 2208 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || 2209 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || 2210 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || 2211 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) 2212 goto nla_put_failure; 2213 2214 if (parm->collect_md) 2215 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA)) 2216 goto nla_put_failure; 2217 2218 return 0; 2219 2220 nla_put_failure: 2221 return -EMSGSIZE; 2222 } 2223 2224 struct net *ip6_tnl_get_link_net(const struct net_device *dev) 2225 { 2226 struct ip6_tnl *tunnel = netdev_priv(dev); 2227 2228 return READ_ONCE(tunnel->net); 2229 } 2230 EXPORT_SYMBOL(ip6_tnl_get_link_net); 2231 2232 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { 2233 [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, 2234 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, 2235 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, 2236 [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, 2237 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, 2238 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, 2239 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, 2240 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, 2241 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, 2242 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, 2243 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, 2244 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, 2245 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, 2246 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, 2247 }; 2248 2249 static struct rtnl_link_ops ip6_link_ops __read_mostly = { 2250 .kind = "ip6tnl", 2251 .maxtype = IFLA_IPTUN_MAX, 2252 .policy = ip6_tnl_policy, 2253 .priv_size = sizeof(struct ip6_tnl), 2254 .setup = ip6_tnl_dev_setup, 2255 .validate = ip6_tnl_validate, 2256 .newlink = ip6_tnl_newlink, 2257 .changelink = ip6_tnl_changelink, 2258 .dellink = ip6_tnl_dellink, 2259 .get_size = ip6_tnl_get_size, 2260 .fill_info = ip6_tnl_fill_info, 2261 .get_link_net = ip6_tnl_get_link_net, 2262 }; 2263 2264 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 2265 .handler = ip4ip6_rcv, 2266 .err_handler = ip4ip6_err, 2267 .priority = 1, 2268 }; 2269 2270 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 2271 .handler = ip6ip6_rcv, 2272 .err_handler = ip6ip6_err, 2273 .priority = 1, 2274 }; 2275 2276 static struct xfrm6_tunnel mplsip6_handler __read_mostly = { 2277 .handler = mplsip6_rcv, 2278 .err_handler = mplsip6_err, 2279 .priority = 1, 2280 }; 2281 2282 static void __net_exit ip6_tnl_exit_rtnl_net(struct net *net, struct list_head *list) 2283 { 2284 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2285 struct net_device *dev, *aux; 2286 int h; 2287 struct ip6_tnl *t; 2288 2289 for_each_netdev_safe(net, dev, aux) 2290 if (dev->rtnl_link_ops == &ip6_link_ops) 2291 unregister_netdevice_queue(dev, list); 2292 2293 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { 2294 t = rtnl_net_dereference(net, ip6n->tnls_r_l[h]); 2295 while (t) { 2296 /* If dev is in the same netns, it has already 2297 * been added to the list by the previous loop. 2298 */ 2299 if (!net_eq(dev_net(t->dev), net)) 2300 unregister_netdevice_queue(t->dev, list); 2301 2302 t = rtnl_net_dereference(net, t->next); 2303 } 2304 } 2305 2306 t = rtnl_net_dereference(net, ip6n->tnls_wc[0]); 2307 while (t) { 2308 /* If dev is in the same netns, it has already 2309 * been added to the list by the previous loop. 2310 */ 2311 if (!net_eq(dev_net(t->dev), net)) 2312 unregister_netdevice_queue(t->dev, list); 2313 2314 t = rtnl_net_dereference(net, t->next); 2315 } 2316 } 2317 2318 static int __net_init ip6_tnl_init_net(struct net *net) 2319 { 2320 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2321 struct ip6_tnl *t = NULL; 2322 int err; 2323 2324 ip6n->tnls[0] = ip6n->tnls_wc; 2325 ip6n->tnls[1] = ip6n->tnls_r_l; 2326 2327 if (!net_has_fallback_tunnels(net)) 2328 return 0; 2329 err = -ENOMEM; 2330 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 2331 NET_NAME_UNKNOWN, ip6_tnl_dev_setup); 2332 2333 if (!ip6n->fb_tnl_dev) 2334 goto err_alloc_dev; 2335 dev_net_set(ip6n->fb_tnl_dev, net); 2336 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; 2337 /* FB netdevice is special: we have one, and only one per netns. 2338 * Allowing to move it to another netns is clearly unsafe. 2339 */ 2340 ip6n->fb_tnl_dev->netns_immutable = true; 2341 2342 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 2343 if (err < 0) 2344 goto err_register; 2345 2346 err = register_netdev(ip6n->fb_tnl_dev); 2347 if (err < 0) 2348 goto err_register; 2349 2350 t = netdev_priv(ip6n->fb_tnl_dev); 2351 2352 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 2353 return 0; 2354 2355 err_register: 2356 free_netdev(ip6n->fb_tnl_dev); 2357 err_alloc_dev: 2358 return err; 2359 } 2360 2361 static struct pernet_operations ip6_tnl_net_ops = { 2362 .init = ip6_tnl_init_net, 2363 .exit_rtnl = ip6_tnl_exit_rtnl_net, 2364 .id = &ip6_tnl_net_id, 2365 .size = sizeof(struct ip6_tnl_net), 2366 }; 2367 2368 /** 2369 * ip6_tunnel_init - register protocol and reserve needed resources 2370 * 2371 * Return: 0 on success 2372 **/ 2373 2374 static int __init ip6_tunnel_init(void) 2375 { 2376 int err; 2377 2378 if (!ipv6_mod_enabled()) 2379 return -EOPNOTSUPP; 2380 2381 err = register_pernet_device(&ip6_tnl_net_ops); 2382 if (err < 0) 2383 goto out_pernet; 2384 2385 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 2386 if (err < 0) { 2387 pr_err("%s: can't register ip4ip6\n", __func__); 2388 goto out_ip4ip6; 2389 } 2390 2391 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 2392 if (err < 0) { 2393 pr_err("%s: can't register ip6ip6\n", __func__); 2394 goto out_ip6ip6; 2395 } 2396 2397 if (ip6_tnl_mpls_supported()) { 2398 err = xfrm6_tunnel_register(&mplsip6_handler, AF_MPLS); 2399 if (err < 0) { 2400 pr_err("%s: can't register mplsip6\n", __func__); 2401 goto out_mplsip6; 2402 } 2403 } 2404 2405 err = rtnl_link_register(&ip6_link_ops); 2406 if (err < 0) 2407 goto rtnl_link_failed; 2408 2409 return 0; 2410 2411 rtnl_link_failed: 2412 if (ip6_tnl_mpls_supported()) 2413 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS); 2414 out_mplsip6: 2415 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 2416 out_ip6ip6: 2417 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 2418 out_ip4ip6: 2419 unregister_pernet_device(&ip6_tnl_net_ops); 2420 out_pernet: 2421 return err; 2422 } 2423 2424 /** 2425 * ip6_tunnel_cleanup - free resources and unregister protocol 2426 **/ 2427 2428 static void __exit ip6_tunnel_cleanup(void) 2429 { 2430 rtnl_link_unregister(&ip6_link_ops); 2431 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 2432 pr_info("%s: can't deregister ip4ip6\n", __func__); 2433 2434 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 2435 pr_info("%s: can't deregister ip6ip6\n", __func__); 2436 2437 if (ip6_tnl_mpls_supported() && 2438 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS)) 2439 pr_info("%s: can't deregister mplsip6\n", __func__); 2440 unregister_pernet_device(&ip6_tnl_net_ops); 2441 } 2442 2443 module_init(ip6_tunnel_init); 2444 module_exit(ip6_tunnel_cleanup); 2445