1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPv6 tunneling device 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Ville Nuorvala <vnuorval@tcs.hut.fi> 8 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 9 * 10 * Based on: 11 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 12 * 13 * RFC 2473 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/capability.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/sockios.h> 23 #include <linux/icmp.h> 24 #include <linux/if.h> 25 #include <linux/in.h> 26 #include <linux/ip.h> 27 #include <linux/net.h> 28 #include <linux/in6.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/icmpv6.h> 32 #include <linux/init.h> 33 #include <linux/route.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/netfilter_ipv6.h> 36 #include <linux/slab.h> 37 #include <linux/hash.h> 38 #include <linux/etherdevice.h> 39 40 #include <linux/uaccess.h> 41 #include <linux/atomic.h> 42 43 #include <net/icmp.h> 44 #include <net/ip.h> 45 #include <net/ip_tunnels.h> 46 #include <net/ipv6.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 #include <net/ip6_tunnel.h> 50 #include <net/xfrm.h> 51 #include <net/dsfield.h> 52 #include <net/inet_ecn.h> 53 #include <net/net_namespace.h> 54 #include <net/netns/generic.h> 55 #include <net/netdev_lock.h> 56 #include <net/dst_metadata.h> 57 #include <net/inet_dscp.h> 58 59 MODULE_AUTHOR("Ville Nuorvala"); 60 MODULE_DESCRIPTION("IPv6 tunneling device"); 61 MODULE_LICENSE("GPL"); 62 MODULE_ALIAS_RTNL_LINK("ip6tnl"); 63 MODULE_ALIAS_NETDEV("ip6tnl0"); 64 65 #define IP6_TUNNEL_MAX_DEST_TLVS 8 66 67 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5 68 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) 69 70 static bool log_ecn_error = true; 71 module_param(log_ecn_error, bool, 0644); 72 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 73 74 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 75 { 76 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 77 78 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); 79 } 80 81 static int ip6_tnl_dev_init(struct net_device *dev); 82 static void ip6_tnl_dev_setup(struct net_device *dev); 83 static struct rtnl_link_ops ip6_link_ops __read_mostly; 84 85 static unsigned int ip6_tnl_net_id __read_mostly; 86 struct ip6_tnl_net { 87 /* the IPv6 tunnel fallback device */ 88 struct net_device *fb_tnl_dev; 89 /* lists for storing tunnels in use */ 90 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE]; 91 struct ip6_tnl __rcu *tnls_wc[1]; 92 struct ip6_tnl __rcu **tnls[2]; 93 struct ip6_tnl __rcu *collect_md_tun; 94 }; 95 96 static inline int ip6_tnl_mpls_supported(void) 97 { 98 return IS_ENABLED(CONFIG_MPLS); 99 } 100 101 /** 102 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 103 * @net: network namespace 104 * @link: ifindex of underlying interface 105 * @remote: the address of the tunnel exit-point 106 * @local: the address of the tunnel entry-point 107 * 108 * Return: 109 * tunnel matching given end-points if found, 110 * else fallback tunnel if its device is up, 111 * else %NULL 112 **/ 113 114 static struct ip6_tnl * 115 ip6_tnl_lookup(struct net *net, int link, 116 const struct in6_addr *remote, const struct in6_addr *local) 117 { 118 unsigned int hash = HASH(remote, local); 119 struct ip6_tnl *t, *cand = NULL; 120 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 121 struct in6_addr any; 122 123 for_each_ip_tunnel_rcu(t, ip6n->tnls_r_l[hash]) { 124 if (!ipv6_addr_equal(local, &t->parms.laddr) || 125 !ipv6_addr_equal(remote, &t->parms.raddr) || 126 !(t->dev->flags & IFF_UP)) 127 continue; 128 129 if (link == t->parms.link) 130 return t; 131 else 132 cand = t; 133 } 134 135 memset(&any, 0, sizeof(any)); 136 hash = HASH(&any, local); 137 for_each_ip_tunnel_rcu(t, ip6n->tnls_r_l[hash]) { 138 if (!ipv6_addr_equal(local, &t->parms.laddr) || 139 !ipv6_addr_any(&t->parms.raddr) || 140 !(t->dev->flags & IFF_UP)) 141 continue; 142 143 if (link == t->parms.link) 144 return t; 145 else if (!cand) 146 cand = t; 147 } 148 149 hash = HASH(remote, &any); 150 for_each_ip_tunnel_rcu(t, ip6n->tnls_r_l[hash]) { 151 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 152 !ipv6_addr_any(&t->parms.laddr) || 153 !(t->dev->flags & IFF_UP)) 154 continue; 155 156 if (link == t->parms.link) 157 return t; 158 else if (!cand) 159 cand = t; 160 } 161 162 if (cand) 163 return cand; 164 165 t = rcu_dereference(ip6n->collect_md_tun); 166 if (t && t->dev->flags & IFF_UP) 167 return t; 168 169 t = rcu_dereference(ip6n->tnls_wc[0]); 170 if (t && (t->dev->flags & IFF_UP)) 171 return t; 172 173 return NULL; 174 } 175 176 /** 177 * ip6_tnl_bucket - get head of list matching given tunnel parameters 178 * @ip6n: the private data for ip6_vti in the netns 179 * @p: parameters containing tunnel end-points 180 * 181 * Description: 182 * ip6_tnl_bucket() returns the head of the list matching the 183 * &struct in6_addr entries laddr and raddr in @p. 184 * 185 * Return: head of IPv6 tunnel list 186 **/ 187 188 static struct ip6_tnl __rcu ** 189 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) 190 { 191 const struct in6_addr *remote = &p->raddr; 192 const struct in6_addr *local = &p->laddr; 193 unsigned int h = 0; 194 int prio = 0; 195 196 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 197 prio = 1; 198 h = HASH(remote, local); 199 } 200 return &ip6n->tnls[prio][h]; 201 } 202 203 /** 204 * ip6_tnl_link - add tunnel to hash table 205 * @ip6n: the private data for ip6_vti in the netns 206 * @t: tunnel to be added 207 **/ 208 209 static void 210 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 211 { 212 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 213 214 if (t->parms.collect_md) 215 rcu_assign_pointer(ip6n->collect_md_tun, t); 216 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 217 rcu_assign_pointer(*tp, t); 218 } 219 220 /** 221 * ip6_tnl_unlink - remove tunnel from hash table 222 * @ip6n: the private data for ip6_vti in the netns 223 * @t: tunnel to be removed 224 **/ 225 226 static void 227 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 228 { 229 struct ip6_tnl __rcu **tp; 230 struct ip6_tnl *iter; 231 232 if (t->parms.collect_md) 233 rcu_assign_pointer(ip6n->collect_md_tun, NULL); 234 235 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 236 (iter = rtnl_dereference(*tp)) != NULL; 237 tp = &iter->next) { 238 if (t == iter) { 239 rcu_assign_pointer(*tp, t->next); 240 break; 241 } 242 } 243 } 244 245 static void ip6_dev_free(struct net_device *dev) 246 { 247 struct ip6_tnl *t = netdev_priv(dev); 248 249 gro_cells_destroy(&t->gro_cells); 250 dst_cache_destroy(&t->dst_cache); 251 } 252 253 static int ip6_tnl_create2(struct net_device *dev) 254 { 255 struct ip6_tnl *t = netdev_priv(dev); 256 struct ip6_tnl_net *ip6n = net_generic(t->net, ip6_tnl_net_id); 257 int err; 258 259 dev->rtnl_link_ops = &ip6_link_ops; 260 err = register_netdevice(dev); 261 if (err < 0) 262 goto out; 263 264 strcpy(t->parms.name, dev->name); 265 266 ip6_tnl_link(ip6n, t); 267 return 0; 268 269 out: 270 return err; 271 } 272 273 /** 274 * ip6_tnl_create - create a new tunnel 275 * @net: network namespace 276 * @p: tunnel parameters 277 * 278 * Description: 279 * Create tunnel matching given parameters. 280 * 281 * Return: 282 * created tunnel or error pointer 283 **/ 284 285 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 286 { 287 struct net_device *dev; 288 struct ip6_tnl *t; 289 char name[IFNAMSIZ]; 290 int err = -E2BIG; 291 292 if (p->name[0]) { 293 if (!dev_valid_name(p->name)) 294 goto failed; 295 strscpy(name, p->name, IFNAMSIZ); 296 } else { 297 sprintf(name, "ip6tnl%%d"); 298 } 299 err = -ENOMEM; 300 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 301 ip6_tnl_dev_setup); 302 if (!dev) 303 goto failed; 304 305 dev_net_set(dev, net); 306 307 t = netdev_priv(dev); 308 t->parms = *p; 309 t->net = dev_net(dev); 310 err = ip6_tnl_create2(dev); 311 if (err < 0) 312 goto failed_free; 313 314 return t; 315 316 failed_free: 317 free_netdev(dev); 318 failed: 319 return ERR_PTR(err); 320 } 321 322 /** 323 * ip6_tnl_locate - find or create tunnel matching given parameters 324 * @net: network namespace 325 * @p: tunnel parameters 326 * @create: != 0 if allowed to create new tunnel if no match found 327 * 328 * Description: 329 * ip6_tnl_locate() first tries to locate an existing tunnel 330 * based on @parms. If this is unsuccessful, but @create is set a new 331 * tunnel device is created and registered for use. 332 * 333 * Return: 334 * matching tunnel or error pointer 335 **/ 336 337 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 338 struct __ip6_tnl_parm *p, int create) 339 { 340 const struct in6_addr *remote = &p->raddr; 341 const struct in6_addr *local = &p->laddr; 342 struct ip6_tnl __rcu **tp; 343 struct ip6_tnl *t; 344 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 345 346 for (tp = ip6_tnl_bucket(ip6n, p); 347 (t = rtnl_dereference(*tp)) != NULL; 348 tp = &t->next) { 349 if (ipv6_addr_equal(local, &t->parms.laddr) && 350 ipv6_addr_equal(remote, &t->parms.raddr) && 351 p->link == t->parms.link) { 352 if (create) 353 return ERR_PTR(-EEXIST); 354 355 return t; 356 } 357 } 358 if (!create) 359 return ERR_PTR(-ENODEV); 360 return ip6_tnl_create(net, p); 361 } 362 363 /** 364 * ip6_tnl_dev_uninit - tunnel device uninitializer 365 * @dev: the device to be destroyed 366 * 367 * Description: 368 * ip6_tnl_dev_uninit() removes tunnel from its list 369 **/ 370 371 static void 372 ip6_tnl_dev_uninit(struct net_device *dev) 373 { 374 struct ip6_tnl *t = netdev_priv(dev); 375 struct net *net = t->net; 376 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 377 378 if (dev == ip6n->fb_tnl_dev) 379 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 380 else 381 ip6_tnl_unlink(ip6n, t); 382 dst_cache_reset(&t->dst_cache); 383 netdev_put(dev, &t->dev_tracker); 384 } 385 386 /** 387 * ip6_tnl_parse_tlv_enc_lim - handle encapsulation limit option 388 * @skb: received socket buffer 389 * @raw: the ICMPv6 error message data 390 * 391 * Return: 392 * 0 if none was found, 393 * else index to encapsulation limit 394 **/ 395 396 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 397 { 398 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; 399 unsigned int nhoff = raw - skb->data; 400 unsigned int off = nhoff + sizeof(*ipv6h); 401 u8 nexthdr = ipv6h->nexthdr; 402 403 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 404 struct ipv6_opt_hdr *hdr; 405 u16 optlen; 406 407 if (!pskb_may_pull(skb, off + sizeof(*hdr))) 408 break; 409 410 hdr = (struct ipv6_opt_hdr *)(skb->data + off); 411 if (nexthdr == NEXTHDR_FRAGMENT) { 412 optlen = 8; 413 } else if (nexthdr == NEXTHDR_AUTH) { 414 optlen = ipv6_authlen(hdr); 415 } else { 416 optlen = ipv6_optlen(hdr); 417 } 418 419 if (!pskb_may_pull(skb, off + optlen)) 420 break; 421 422 hdr = (struct ipv6_opt_hdr *)(skb->data + off); 423 if (nexthdr == NEXTHDR_FRAGMENT) { 424 struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr; 425 426 if (frag_hdr->frag_off) 427 break; 428 } 429 if (nexthdr == NEXTHDR_DEST) { 430 int tlv_cnt = 0; 431 u16 i = 2; 432 433 while (1) { 434 struct ipv6_tlv_tnl_enc_lim *tel; 435 436 if (unlikely(tlv_cnt++ >= IP6_TUNNEL_MAX_DEST_TLVS)) 437 break; 438 439 /* No more room for encapsulation limit */ 440 if (i + sizeof(*tel) > optlen) 441 break; 442 443 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); 444 /* return index of option if found and valid */ 445 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 tel->length == 1) 447 return i + off - nhoff; 448 /* else jump to next option */ 449 if (tel->type) 450 i += tel->length + 2; 451 else 452 i++; 453 } 454 } 455 nexthdr = hdr->nexthdr; 456 off += optlen; 457 } 458 return 0; 459 } 460 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); 461 462 /* ip6_tnl_err() should handle errors in the tunnel according to the 463 * specifications in RFC 2473. 464 */ 465 static int 466 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 467 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 468 { 469 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; 470 struct net *net = dev_net(skb->dev); 471 u8 rel_type = ICMPV6_DEST_UNREACH; 472 u8 rel_code = ICMPV6_ADDR_UNREACH; 473 __u32 rel_info = 0; 474 struct ip6_tnl *t; 475 int err = -ENOENT; 476 int rel_msg = 0; 477 u8 tproto; 478 __u16 len; 479 480 /* If the packet doesn't contain the original IPv6 header we are 481 in trouble since we might need the source address for further 482 processing of the error. */ 483 484 rcu_read_lock(); 485 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr); 486 if (!t) 487 goto out; 488 489 tproto = READ_ONCE(t->parms.proto); 490 if (tproto != ipproto && tproto != 0) 491 goto out; 492 493 err = 0; 494 495 switch (*type) { 496 case ICMPV6_DEST_UNREACH: 497 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 498 t->parms.name); 499 rel_msg = 1; 500 break; 501 case ICMPV6_TIME_EXCEED: 502 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 503 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 504 t->parms.name); 505 rel_msg = 1; 506 } 507 break; 508 case ICMPV6_PARAMPROB: { 509 struct ipv6_tlv_tnl_enc_lim *tel; 510 __u32 teli; 511 512 teli = 0; 513 if ((*code) == ICMPV6_HDR_FIELD) 514 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 515 516 if (teli && teli == *info - 2) { 517 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 518 if (tel->encap_limit == 0) { 519 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 520 t->parms.name); 521 rel_msg = 1; 522 } 523 } else { 524 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 525 t->parms.name); 526 } 527 break; 528 } 529 case ICMPV6_PKT_TOOBIG: { 530 __u32 mtu; 531 532 ip6_update_pmtu(skb, net, htonl(*info), 0, 0, 533 sock_net_uid(net, NULL)); 534 mtu = *info - offset; 535 if (mtu < IPV6_MIN_MTU) 536 mtu = IPV6_MIN_MTU; 537 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); 538 if (len > mtu) { 539 rel_type = ICMPV6_PKT_TOOBIG; 540 rel_code = 0; 541 rel_info = mtu; 542 rel_msg = 1; 543 } 544 break; 545 } 546 case NDISC_REDIRECT: 547 ip6_redirect(skb, net, skb->dev->ifindex, 0, 548 sock_net_uid(net, NULL)); 549 break; 550 } 551 552 *type = rel_type; 553 *code = rel_code; 554 *info = rel_info; 555 *msg = rel_msg; 556 557 out: 558 rcu_read_unlock(); 559 return err; 560 } 561 562 static int 563 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 564 u8 type, u8 code, int offset, __be32 info) 565 { 566 __u32 rel_info = ntohl(info); 567 const struct iphdr *eiph; 568 struct sk_buff *skb2; 569 int err, rel_msg = 0; 570 u8 rel_type = type; 571 u8 rel_code = code; 572 struct rtable *rt; 573 struct flowi4 fl4; 574 575 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 576 &rel_msg, &rel_info, offset); 577 if (err < 0) 578 return err; 579 580 if (rel_msg == 0) 581 return 0; 582 583 switch (rel_type) { 584 case ICMPV6_DEST_UNREACH: 585 if (rel_code != ICMPV6_ADDR_UNREACH) 586 return 0; 587 rel_type = ICMP_DEST_UNREACH; 588 rel_code = ICMP_HOST_UNREACH; 589 break; 590 case ICMPV6_PKT_TOOBIG: 591 if (rel_code != 0) 592 return 0; 593 rel_type = ICMP_DEST_UNREACH; 594 rel_code = ICMP_FRAG_NEEDED; 595 break; 596 default: 597 return 0; 598 } 599 600 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 601 return 0; 602 603 skb2 = skb_clone(skb, GFP_ATOMIC); 604 if (!skb2) 605 return 0; 606 607 /* Remove debris left by IPv6 stack. */ 608 memset(IPCB(skb2), 0, sizeof(*IPCB(skb2))); 609 610 skb_dst_drop(skb2); 611 612 skb_pull(skb2, offset); 613 skb_reset_network_header(skb2); 614 eiph = ip_hdr(skb2); 615 if (eiph->version != 4 || eiph->ihl < 5) 616 goto out; 617 618 /* Try to guess incoming interface */ 619 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr, 620 0, 0, 0, IPPROTO_IPIP, 621 eiph->tos & INET_DSCP_MASK, 0); 622 if (IS_ERR(rt)) 623 goto out; 624 625 skb2->dev = rt->dst.dev; 626 ip_rt_put(rt); 627 628 /* route "incoming" packet */ 629 if (rt->rt_flags & RTCF_LOCAL) { 630 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 631 eiph->daddr, eiph->saddr, 0, 0, 632 IPPROTO_IPIP, 633 eiph->tos & INET_DSCP_MASK, 0); 634 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) { 635 if (!IS_ERR(rt)) 636 ip_rt_put(rt); 637 goto out; 638 } 639 skb_dst_set(skb2, &rt->dst); 640 } else { 641 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, 642 ip4h_dscp(eiph), skb2->dev) || 643 skb_dst_dev(skb2)->type != ARPHRD_TUNNEL6) 644 goto out; 645 } 646 647 /* change mtu on this route */ 648 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 649 if (rel_info > dst6_mtu(skb_dst(skb2))) 650 goto out; 651 652 skb_dst_update_pmtu_no_confirm(skb2, rel_info); 653 } 654 655 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 656 657 out: 658 kfree_skb(skb2); 659 return 0; 660 } 661 662 static int 663 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 664 u8 type, u8 code, int offset, __be32 info) 665 { 666 __u32 rel_info = ntohl(info); 667 int err, rel_msg = 0; 668 u8 rel_type = type; 669 u8 rel_code = code; 670 671 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 672 &rel_msg, &rel_info, offset); 673 if (err < 0) 674 return err; 675 676 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 677 struct rt6_info *rt; 678 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 679 680 if (!skb2) 681 return 0; 682 683 skb_dst_drop(skb2); 684 skb_pull(skb2, offset); 685 skb_reset_network_header(skb2); 686 687 /* Try to guess incoming interface */ 688 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 689 NULL, 0, skb2, 0); 690 691 if (rt && rt->dst.dev) 692 skb2->dev = rt->dst.dev; 693 694 icmpv6_send(skb2, rel_type, rel_code, rel_info); 695 696 ip6_rt_put(rt); 697 698 kfree_skb(skb2); 699 } 700 701 return 0; 702 } 703 704 static int 705 mplsip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 706 u8 type, u8 code, int offset, __be32 info) 707 { 708 __u32 rel_info = ntohl(info); 709 int err, rel_msg = 0; 710 u8 rel_type = type; 711 u8 rel_code = code; 712 713 err = ip6_tnl_err(skb, IPPROTO_MPLS, opt, &rel_type, &rel_code, 714 &rel_msg, &rel_info, offset); 715 return err; 716 } 717 718 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 719 const struct ipv6hdr *ipv6h, 720 struct sk_buff *skb) 721 { 722 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 723 724 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 725 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 726 727 return IP6_ECN_decapsulate(ipv6h, skb); 728 } 729 730 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 731 const struct ipv6hdr *ipv6h, 732 struct sk_buff *skb) 733 { 734 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 735 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 736 737 return IP6_ECN_decapsulate(ipv6h, skb); 738 } 739 740 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 741 const struct ipv6hdr *ipv6h, 742 struct sk_buff *skb) 743 { 744 /* ECN is not supported in AF_MPLS */ 745 return 0; 746 } 747 748 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 749 const struct in6_addr *laddr, 750 const struct in6_addr *raddr) 751 { 752 struct __ip6_tnl_parm *p = &t->parms; 753 int ltype = ipv6_addr_type(laddr); 754 int rtype = ipv6_addr_type(raddr); 755 __u32 flags = 0; 756 757 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 758 flags = IP6_TNL_F_CAP_PER_PACKET; 759 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 760 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 761 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 762 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 763 if (ltype&IPV6_ADDR_UNICAST) 764 flags |= IP6_TNL_F_CAP_XMIT; 765 if (rtype&IPV6_ADDR_UNICAST) 766 flags |= IP6_TNL_F_CAP_RCV; 767 } 768 return flags; 769 } 770 EXPORT_SYMBOL(ip6_tnl_get_cap); 771 772 /* called with rcu_read_lock() */ 773 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 774 const struct in6_addr *laddr, 775 const struct in6_addr *raddr) 776 { 777 struct __ip6_tnl_parm *p = &t->parms; 778 int ret = 0; 779 struct net *net = t->net; 780 781 if ((p->flags & IP6_TNL_F_CAP_RCV) || 782 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 783 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 784 struct net_device *ldev = NULL; 785 786 if (p->link) 787 ldev = dev_get_by_index_rcu(net, p->link); 788 789 if ((ipv6_addr_is_multicast(laddr) || 790 likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false, 791 0, IFA_F_TENTATIVE))) && 792 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) || 793 likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true, 794 0, IFA_F_TENTATIVE)))) 795 ret = 1; 796 } 797 return ret; 798 } 799 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); 800 801 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, 802 const struct tnl_ptk_info *tpi, 803 struct metadata_dst *tun_dst, 804 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 805 const struct ipv6hdr *ipv6h, 806 struct sk_buff *skb), 807 bool log_ecn_err) 808 { 809 const struct ipv6hdr *ipv6h; 810 int nh, err; 811 812 if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) != 813 test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) { 814 DEV_STATS_INC(tunnel->dev, rx_crc_errors); 815 DEV_STATS_INC(tunnel->dev, rx_errors); 816 goto drop; 817 } 818 819 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) { 820 if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) || 821 (tunnel->i_seqno && 822 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 823 DEV_STATS_INC(tunnel->dev, rx_fifo_errors); 824 DEV_STATS_INC(tunnel->dev, rx_errors); 825 goto drop; 826 } 827 tunnel->i_seqno = ntohl(tpi->seq) + 1; 828 } 829 830 skb->protocol = tpi->proto; 831 832 /* Warning: All skb pointers will be invalidated! */ 833 if (tunnel->dev->type == ARPHRD_ETHER) { 834 if (!pskb_may_pull(skb, ETH_HLEN)) { 835 DEV_STATS_INC(tunnel->dev, rx_length_errors); 836 DEV_STATS_INC(tunnel->dev, rx_errors); 837 goto drop; 838 } 839 840 skb->protocol = eth_type_trans(skb, tunnel->dev); 841 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 842 } else { 843 skb->dev = tunnel->dev; 844 skb_reset_mac_header(skb); 845 } 846 847 /* Save offset of outer header relative to skb->head, 848 * because we are going to reset the network header to the inner header 849 * and might change skb->head. 850 */ 851 nh = skb_network_header(skb) - skb->head; 852 853 skb_reset_network_header(skb); 854 855 if (skb_vlan_inet_prepare(skb, true)) { 856 DEV_STATS_INC(tunnel->dev, rx_length_errors); 857 DEV_STATS_INC(tunnel->dev, rx_errors); 858 goto drop; 859 } 860 861 /* Get the outer header. */ 862 ipv6h = (struct ipv6hdr *)(skb->head + nh); 863 864 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 865 866 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 867 868 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); 869 if (unlikely(err)) { 870 if (log_ecn_err) 871 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", 872 &ipv6h->saddr, 873 ipv6_get_dsfield(ipv6h)); 874 if (err > 1) { 875 DEV_STATS_INC(tunnel->dev, rx_frame_errors); 876 DEV_STATS_INC(tunnel->dev, rx_errors); 877 goto drop; 878 } 879 } 880 881 dev_sw_netstats_rx_add(tunnel->dev, skb->len); 882 883 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); 884 885 if (tun_dst) 886 skb_dst_set(skb, (struct dst_entry *)tun_dst); 887 888 gro_cells_receive(&tunnel->gro_cells, skb); 889 return 0; 890 891 drop: 892 if (tun_dst) 893 dst_release((struct dst_entry *)tun_dst); 894 kfree_skb(skb); 895 return 0; 896 } 897 898 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, 899 const struct tnl_ptk_info *tpi, 900 struct metadata_dst *tun_dst, 901 bool log_ecn_err) 902 { 903 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 904 const struct ipv6hdr *ipv6h, 905 struct sk_buff *skb); 906 907 dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; 908 if (tpi->proto == htons(ETH_P_IP)) 909 dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; 910 911 return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 912 log_ecn_err); 913 } 914 EXPORT_SYMBOL(ip6_tnl_rcv); 915 916 static const struct tnl_ptk_info tpi_v6 = { 917 /* no tunnel info required for ipxip6. */ 918 .proto = htons(ETH_P_IPV6), 919 }; 920 921 static const struct tnl_ptk_info tpi_v4 = { 922 /* no tunnel info required for ipxip6. */ 923 .proto = htons(ETH_P_IP), 924 }; 925 926 static const struct tnl_ptk_info tpi_mpls = { 927 /* no tunnel info required for mplsip6. */ 928 .proto = htons(ETH_P_MPLS_UC), 929 }; 930 931 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, 932 const struct tnl_ptk_info *tpi, 933 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 934 const struct ipv6hdr *ipv6h, 935 struct sk_buff *skb)) 936 { 937 struct ip6_tnl *t; 938 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 939 struct metadata_dst *tun_dst = NULL; 940 int ret = -1; 941 942 rcu_read_lock(); 943 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr); 944 945 if (t) { 946 u8 tproto = READ_ONCE(t->parms.proto); 947 948 if (tproto != ipproto && tproto != 0) 949 goto drop; 950 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 951 goto drop; 952 ipv6h = ipv6_hdr(skb); 953 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) 954 goto drop; 955 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 956 goto drop; 957 if (t->parms.collect_md) { 958 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 959 960 tun_dst = ipv6_tun_rx_dst(skb, flags, 0, 0); 961 if (!tun_dst) 962 goto drop; 963 } 964 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 965 log_ecn_error); 966 } 967 968 rcu_read_unlock(); 969 970 return ret; 971 972 drop: 973 rcu_read_unlock(); 974 kfree_skb(skb); 975 return 0; 976 } 977 978 static int ip4ip6_rcv(struct sk_buff *skb) 979 { 980 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, 981 ip4ip6_dscp_ecn_decapsulate); 982 } 983 984 static int ip6ip6_rcv(struct sk_buff *skb) 985 { 986 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, 987 ip6ip6_dscp_ecn_decapsulate); 988 } 989 990 static int mplsip6_rcv(struct sk_buff *skb) 991 { 992 return ipxip6_rcv(skb, IPPROTO_MPLS, &tpi_mpls, 993 mplsip6_dscp_ecn_decapsulate); 994 } 995 996 struct ipv6_tel_txoption { 997 struct ipv6_txoptions ops; 998 __u8 dst_opt[8]; 999 }; 1000 1001 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 1002 { 1003 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 1004 1005 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 1006 opt->dst_opt[3] = 1; 1007 opt->dst_opt[4] = encap_limit; 1008 opt->dst_opt[5] = IPV6_TLV_PADN; 1009 opt->dst_opt[6] = 1; 1010 1011 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt; 1012 opt->ops.opt_nflen = 8; 1013 } 1014 1015 /** 1016 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 1017 * @t: the outgoing tunnel device 1018 * @hdr: IPv6 header from the incoming packet 1019 * 1020 * Description: 1021 * Avoid trivial tunneling loop by checking that tunnel exit-point 1022 * doesn't match source of incoming packet. 1023 * 1024 * Return: 1025 * 1 if conflict, 1026 * 0 else 1027 **/ 1028 1029 static inline bool 1030 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 1031 { 1032 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 1033 } 1034 1035 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, 1036 const struct in6_addr *laddr, 1037 const struct in6_addr *raddr) 1038 { 1039 struct __ip6_tnl_parm *p = &t->parms; 1040 int ret = 0; 1041 struct net *net = t->net; 1042 1043 if (t->parms.collect_md) 1044 return 1; 1045 1046 if ((p->flags & IP6_TNL_F_CAP_XMIT) || 1047 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 1048 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { 1049 struct net_device *ldev = NULL; 1050 1051 rcu_read_lock(); 1052 if (p->link) 1053 ldev = dev_get_by_index_rcu(net, p->link); 1054 1055 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, 1056 0, IFA_F_TENTATIVE))) 1057 pr_warn_ratelimited("%s xmit: Local address not yet configured!\n", 1058 p->name); 1059 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && 1060 !ipv6_addr_is_multicast(raddr) && 1061 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, 1062 true, 0, IFA_F_TENTATIVE))) 1063 pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n", 1064 p->name); 1065 else 1066 ret = 1; 1067 rcu_read_unlock(); 1068 } 1069 return ret; 1070 } 1071 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); 1072 1073 /** 1074 * ip6_tnl_xmit - encapsulate packet and send 1075 * @skb: the outgoing socket buffer 1076 * @dev: the outgoing tunnel device 1077 * @dsfield: dscp code for outer header 1078 * @fl6: flow of tunneled packet 1079 * @encap_limit: encapsulation limit 1080 * @pmtu: Path MTU is stored if packet is too big 1081 * @proto: next header value 1082 * 1083 * Description: 1084 * Build new header and do some sanity checks on the packet before sending 1085 * it. 1086 * 1087 * Return: 1088 * 0 on success 1089 * -1 fail 1090 * %-EMSGSIZE message too big. return mtu in this case. 1091 **/ 1092 1093 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, 1094 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, 1095 __u8 proto) 1096 { 1097 struct ip6_tnl *t = netdev_priv(dev); 1098 struct net *net = t->net; 1099 struct ipv6hdr *ipv6h; 1100 struct ipv6_tel_txoption opt; 1101 struct dst_entry *dst = NULL, *ndst = NULL; 1102 struct net_device *tdev; 1103 int mtu; 1104 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0; 1105 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1106 unsigned int max_headroom = psh_hlen; 1107 __be16 payload_protocol; 1108 bool use_cache = false; 1109 u8 hop_limit; 1110 int err = -1; 1111 1112 payload_protocol = skb_protocol(skb, true); 1113 1114 if (t->parms.collect_md) { 1115 hop_limit = skb_tunnel_info(skb)->key.ttl; 1116 goto route_lookup; 1117 } else { 1118 hop_limit = t->parms.hop_limit; 1119 } 1120 1121 /* NBMA tunnel */ 1122 if (ipv6_addr_any(&t->parms.raddr)) { 1123 if (payload_protocol == htons(ETH_P_IPV6)) { 1124 struct in6_addr *addr6; 1125 struct neighbour *neigh; 1126 int addr_type; 1127 1128 if (!skb_dst(skb)) 1129 goto tx_err_link_failure; 1130 1131 neigh = dst_neigh_lookup(skb_dst(skb), 1132 &ipv6_hdr(skb)->daddr); 1133 if (!neigh) 1134 goto tx_err_link_failure; 1135 1136 addr6 = (struct in6_addr *)&neigh->primary_key; 1137 addr_type = ipv6_addr_type(addr6); 1138 1139 if (addr_type == IPV6_ADDR_ANY) 1140 addr6 = &ipv6_hdr(skb)->daddr; 1141 1142 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1143 neigh_release(neigh); 1144 } else if (payload_protocol == htons(ETH_P_IP)) { 1145 const struct rtable *rt = skb_rtable(skb); 1146 1147 if (!rt) 1148 goto tx_err_link_failure; 1149 1150 if (rt->rt_gw_family == AF_INET6) 1151 memcpy(&fl6->daddr, &rt->rt_gw6, sizeof(fl6->daddr)); 1152 } 1153 } else if (t->parms.proto != 0 && !(t->parms.flags & 1154 (IP6_TNL_F_USE_ORIG_TCLASS | 1155 IP6_TNL_F_USE_ORIG_FWMARK))) { 1156 /* enable the cache only if neither the outer protocol nor the 1157 * routing decision depends on the current inner header value 1158 */ 1159 use_cache = true; 1160 } 1161 1162 if (use_cache) 1163 dst = dst_cache_get(&t->dst_cache); 1164 1165 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1166 goto tx_err_link_failure; 1167 1168 if (!dst) { 1169 route_lookup: 1170 /* add dsfield to flowlabel for route lookup */ 1171 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); 1172 1173 dst = ip6_route_output(net, NULL, fl6); 1174 1175 if (dst->error) 1176 goto tx_err_link_failure; 1177 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 1178 if (IS_ERR(dst)) { 1179 err = PTR_ERR(dst); 1180 dst = NULL; 1181 goto tx_err_link_failure; 1182 } 1183 if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) && 1184 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, 1185 &fl6->daddr, 0, &fl6->saddr)) 1186 goto tx_err_link_failure; 1187 ndst = dst; 1188 } 1189 1190 tdev = dst_dev(dst); 1191 1192 if (tdev == dev) { 1193 DEV_STATS_INC(dev, collisions); 1194 net_warn_ratelimited("%s: Local routing loop detected!\n", 1195 t->parms.name); 1196 goto tx_err_dst_release; 1197 } 1198 mtu = dst6_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen; 1199 if (encap_limit >= 0) { 1200 max_headroom += 8; 1201 mtu -= 8; 1202 } 1203 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ? 1204 IPV6_MIN_MTU : IPV4_MIN_MTU); 1205 1206 skb_dst_update_pmtu_no_confirm(skb, mtu); 1207 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1208 *pmtu = mtu; 1209 err = -EMSGSIZE; 1210 goto tx_err_dst_release; 1211 } 1212 1213 if (t->err_count > 0) { 1214 if (time_before(jiffies, 1215 t->err_time + IP6TUNNEL_ERR_TIMEO)) { 1216 t->err_count--; 1217 1218 dst_link_failure(skb); 1219 } else { 1220 t->err_count = 0; 1221 } 1222 } 1223 1224 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 1225 1226 /* 1227 * Okay, now see if we can stuff it in the buffer as-is. 1228 */ 1229 max_headroom += LL_RESERVED_SPACE(tdev); 1230 1231 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 1232 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 1233 struct sk_buff *new_skb; 1234 1235 new_skb = skb_realloc_headroom(skb, max_headroom); 1236 if (!new_skb) 1237 goto tx_err_dst_release; 1238 1239 if (skb->sk) 1240 skb_set_owner_w(new_skb, skb->sk); 1241 consume_skb(skb); 1242 skb = new_skb; 1243 } 1244 1245 if (t->parms.collect_md) { 1246 if (t->encap.type != TUNNEL_ENCAP_NONE) 1247 goto tx_err_dst_release; 1248 } else { 1249 if (use_cache && ndst) 1250 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1251 } 1252 skb_dst_set(skb, dst); 1253 1254 if (hop_limit == 0) { 1255 if (payload_protocol == htons(ETH_P_IP)) 1256 hop_limit = ip_hdr(skb)->ttl; 1257 else if (payload_protocol == htons(ETH_P_IPV6)) 1258 hop_limit = ipv6_hdr(skb)->hop_limit; 1259 else 1260 hop_limit = ip6_dst_hoplimit(dst); 1261 } 1262 1263 /* Calculate max headroom for all the headers and adjust 1264 * needed_headroom if necessary. 1265 */ 1266 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr) 1267 + dst->header_len + t->hlen; 1268 ip_tunnel_adj_headroom(dev, max_headroom); 1269 1270 err = ip6_tnl_encap(skb, t, &proto, fl6); 1271 if (err) 1272 return err; 1273 1274 if (encap_limit >= 0) { 1275 init_tel_txopt(&opt, encap_limit); 1276 proto = ipv6_push_frag_opts(skb, &opt.ops, proto); 1277 } 1278 1279 skb_push(skb, sizeof(struct ipv6hdr)); 1280 skb_reset_network_header(skb); 1281 ipv6h = ipv6_hdr(skb); 1282 ip6_flow_hdr(ipv6h, dsfield, 1283 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1284 ipv6h->hop_limit = hop_limit; 1285 ipv6h->nexthdr = proto; 1286 ipv6h->saddr = fl6->saddr; 1287 ipv6h->daddr = fl6->daddr; 1288 ip6tunnel_xmit(NULL, skb, dev, 0); 1289 return 0; 1290 tx_err_link_failure: 1291 DEV_STATS_INC(dev, tx_carrier_errors); 1292 dst_link_failure(skb); 1293 tx_err_dst_release: 1294 dst_release(dst); 1295 return err; 1296 } 1297 EXPORT_SYMBOL(ip6_tnl_xmit); 1298 1299 static inline int 1300 ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, 1301 u8 protocol) 1302 { 1303 struct ip6_tnl *t = netdev_priv(dev); 1304 struct ipv6hdr *ipv6h; 1305 const struct iphdr *iph; 1306 int encap_limit = -1; 1307 __u16 offset; 1308 struct flowi6 fl6; 1309 __u8 dsfield, orig_dsfield; 1310 __u32 mtu; 1311 u8 tproto; 1312 int err; 1313 1314 tproto = READ_ONCE(t->parms.proto); 1315 if (tproto != protocol && tproto != 0) 1316 return -1; 1317 1318 if (t->parms.collect_md) { 1319 struct ip_tunnel_info *tun_info; 1320 const struct ip_tunnel_key *key; 1321 1322 tun_info = skb_tunnel_info(skb); 1323 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1324 ip_tunnel_info_af(tun_info) != AF_INET6)) 1325 return -1; 1326 key = &tun_info->key; 1327 memset(&fl6, 0, sizeof(fl6)); 1328 fl6.flowi6_proto = protocol; 1329 fl6.saddr = key->u.ipv6.src; 1330 fl6.daddr = key->u.ipv6.dst; 1331 fl6.flowlabel = key->label; 1332 dsfield = key->tos; 1333 switch (protocol) { 1334 case IPPROTO_IPIP: 1335 iph = ip_hdr(skb); 1336 orig_dsfield = ipv4_get_dsfield(iph); 1337 break; 1338 case IPPROTO_IPV6: 1339 ipv6h = ipv6_hdr(skb); 1340 orig_dsfield = ipv6_get_dsfield(ipv6h); 1341 break; 1342 default: 1343 orig_dsfield = dsfield; 1344 break; 1345 } 1346 } else { 1347 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1348 encap_limit = t->parms.encap_limit; 1349 if (protocol == IPPROTO_IPV6) { 1350 offset = ip6_tnl_parse_tlv_enc_lim(skb, 1351 skb_network_header(skb)); 1352 /* ip6_tnl_parse_tlv_enc_lim() might have 1353 * reallocated skb->head 1354 */ 1355 if (offset > 0) { 1356 struct ipv6_tlv_tnl_enc_lim *tel; 1357 1358 tel = (void *)&skb_network_header(skb)[offset]; 1359 if (tel->encap_limit == 0) { 1360 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB, 1361 ICMPV6_HDR_FIELD, offset + 2); 1362 return -1; 1363 } 1364 encap_limit = tel->encap_limit - 1; 1365 } 1366 } 1367 1368 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1369 fl6.flowi6_proto = protocol; 1370 1371 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1372 fl6.flowi6_mark = skb->mark; 1373 else 1374 fl6.flowi6_mark = t->parms.fwmark; 1375 switch (protocol) { 1376 case IPPROTO_IPIP: 1377 iph = ip_hdr(skb); 1378 orig_dsfield = ipv4_get_dsfield(iph); 1379 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1380 dsfield = orig_dsfield; 1381 else 1382 dsfield = ip6_tclass(t->parms.flowinfo); 1383 break; 1384 case IPPROTO_IPV6: 1385 ipv6h = ipv6_hdr(skb); 1386 orig_dsfield = ipv6_get_dsfield(ipv6h); 1387 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1388 dsfield = orig_dsfield; 1389 else 1390 dsfield = ip6_tclass(t->parms.flowinfo); 1391 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1392 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1393 break; 1394 default: 1395 orig_dsfield = dsfield = ip6_tclass(t->parms.flowinfo); 1396 break; 1397 } 1398 } 1399 1400 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1401 dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield); 1402 1403 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1404 return -1; 1405 1406 skb_set_inner_ipproto(skb, protocol); 1407 1408 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1409 protocol); 1410 if (err != 0) { 1411 /* XXX: send ICMP error even if DF is not set. */ 1412 if (err == -EMSGSIZE) 1413 switch (protocol) { 1414 case IPPROTO_IPIP: 1415 icmp_ndo_send(skb, ICMP_DEST_UNREACH, 1416 ICMP_FRAG_NEEDED, htonl(mtu)); 1417 break; 1418 case IPPROTO_IPV6: 1419 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1420 break; 1421 default: 1422 break; 1423 } 1424 return -1; 1425 } 1426 1427 return 0; 1428 } 1429 1430 static netdev_tx_t 1431 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) 1432 { 1433 struct ip6_tnl *t = netdev_priv(dev); 1434 u8 ipproto; 1435 int ret; 1436 1437 if (!pskb_inet_may_pull(skb)) 1438 goto tx_err; 1439 1440 switch (skb->protocol) { 1441 case htons(ETH_P_IP): 1442 ipproto = IPPROTO_IPIP; 1443 break; 1444 case htons(ETH_P_IPV6): 1445 if (ip6_tnl_addr_conflict(t, ipv6_hdr(skb))) 1446 goto tx_err; 1447 ipproto = IPPROTO_IPV6; 1448 break; 1449 case htons(ETH_P_MPLS_UC): 1450 ipproto = IPPROTO_MPLS; 1451 break; 1452 default: 1453 goto tx_err; 1454 } 1455 1456 ret = ipxip6_tnl_xmit(skb, dev, ipproto); 1457 if (ret < 0) 1458 goto tx_err; 1459 1460 return NETDEV_TX_OK; 1461 1462 tx_err: 1463 DEV_STATS_INC(dev, tx_errors); 1464 DEV_STATS_INC(dev, tx_dropped); 1465 kfree_skb(skb); 1466 return NETDEV_TX_OK; 1467 } 1468 1469 static void ip6_tnl_link_config(struct ip6_tnl *t) 1470 { 1471 struct net_device *dev = t->dev; 1472 struct net_device *tdev = NULL; 1473 struct __ip6_tnl_parm *p = &t->parms; 1474 struct flowi6 *fl6 = &t->fl.u.ip6; 1475 int t_hlen; 1476 int mtu; 1477 1478 __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); 1479 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1480 1481 /* Set up flowi template */ 1482 fl6->saddr = p->laddr; 1483 fl6->daddr = p->raddr; 1484 fl6->flowi6_oif = p->link; 1485 fl6->flowlabel = 0; 1486 1487 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1488 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1489 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1490 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1491 1492 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1493 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1494 1495 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1496 dev->flags |= IFF_POINTOPOINT; 1497 else 1498 dev->flags &= ~IFF_POINTOPOINT; 1499 1500 t->tun_hlen = 0; 1501 t->hlen = t->encap_hlen + t->tun_hlen; 1502 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1503 1504 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1505 int strict = (ipv6_addr_type(&p->raddr) & 1506 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1507 1508 struct rt6_info *rt = rt6_lookup(t->net, 1509 &p->raddr, &p->laddr, 1510 p->link, NULL, strict); 1511 if (rt) { 1512 tdev = rt->dst.dev; 1513 ip6_rt_put(rt); 1514 } 1515 1516 if (!tdev && p->link) 1517 tdev = __dev_get_by_index(t->net, p->link); 1518 1519 if (tdev) { 1520 dev->needed_headroom = tdev->hard_header_len + 1521 tdev->needed_headroom + t_hlen; 1522 mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU); 1523 1524 mtu = mtu - t_hlen; 1525 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1526 mtu -= 8; 1527 1528 if (mtu < IPV6_MIN_MTU) 1529 mtu = IPV6_MIN_MTU; 1530 WRITE_ONCE(dev->mtu, mtu); 1531 } 1532 } 1533 } 1534 1535 /** 1536 * ip6_tnl_change - update the tunnel parameters 1537 * @t: tunnel to be changed 1538 * @p: tunnel configuration parameters 1539 * 1540 * Description: 1541 * ip6_tnl_change() updates the tunnel parameters 1542 **/ 1543 1544 static void 1545 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 1546 { 1547 t->parms.laddr = p->laddr; 1548 t->parms.raddr = p->raddr; 1549 t->parms.flags = p->flags; 1550 t->parms.hop_limit = p->hop_limit; 1551 t->parms.encap_limit = p->encap_limit; 1552 t->parms.flowinfo = p->flowinfo; 1553 t->parms.link = p->link; 1554 t->parms.proto = p->proto; 1555 t->parms.fwmark = p->fwmark; 1556 dst_cache_reset(&t->dst_cache); 1557 ip6_tnl_link_config(t); 1558 } 1559 1560 static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1561 { 1562 struct net *net = t->net; 1563 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1564 1565 ip6_tnl_unlink(ip6n, t); 1566 synchronize_net(); 1567 ip6_tnl_change(t, p); 1568 ip6_tnl_link(ip6n, t); 1569 netdev_state_change(t->dev); 1570 } 1571 1572 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p, 1573 bool strict) 1574 { 1575 /* For the default ip6tnl0 device, allow changing only the protocol 1576 * (the IP6_TNL_F_CAP_PER_PACKET flag is set on ip6tnl0, and all other 1577 * parameters are 0). 1578 */ 1579 if (strict && 1580 (!ipv6_addr_any(&p->laddr) || !ipv6_addr_any(&p->raddr) || 1581 p->flags != t->parms.flags || p->hop_limit || p->encap_limit || 1582 p->flowinfo || p->link || p->fwmark || p->collect_md)) 1583 return -EINVAL; 1584 1585 t->parms.proto = p->proto; 1586 netdev_state_change(t->dev); 1587 return 0; 1588 } 1589 1590 static void 1591 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1592 { 1593 p->laddr = u->laddr; 1594 p->raddr = u->raddr; 1595 p->flags = u->flags; 1596 p->hop_limit = u->hop_limit; 1597 p->encap_limit = u->encap_limit; 1598 p->flowinfo = u->flowinfo; 1599 p->link = u->link; 1600 p->proto = u->proto; 1601 memcpy(p->name, u->name, sizeof(u->name)); 1602 } 1603 1604 static void 1605 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) 1606 { 1607 u->laddr = p->laddr; 1608 u->raddr = p->raddr; 1609 u->flags = p->flags; 1610 u->hop_limit = p->hop_limit; 1611 u->encap_limit = p->encap_limit; 1612 u->flowinfo = p->flowinfo; 1613 u->link = p->link; 1614 u->proto = p->proto; 1615 memcpy(u->name, p->name, sizeof(u->name)); 1616 } 1617 1618 /** 1619 * ip6_tnl_siocdevprivate - configure ipv6 tunnels from userspace 1620 * @dev: virtual device associated with tunnel 1621 * @ifr: unused 1622 * @data: parameters passed from userspace 1623 * @cmd: command to be performed 1624 * 1625 * Description: 1626 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1627 * from userspace. 1628 * 1629 * The possible commands are the following: 1630 * %SIOCGETTUNNEL: get tunnel parameters for device 1631 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1632 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1633 * %SIOCDELTUNNEL: delete tunnel 1634 * 1635 * The fallback device "ip6tnl0", created during module 1636 * initialization, can be used for creating other tunnel devices. 1637 * 1638 * Return: 1639 * 0 on success, 1640 * %-EFAULT if unable to copy data to or from userspace, 1641 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1642 * %-EINVAL if passed tunnel parameters are invalid, 1643 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1644 * %-ENODEV if attempting to change or delete a nonexisting device 1645 **/ 1646 1647 static int 1648 ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 1649 void __user *data, int cmd) 1650 { 1651 int err = 0; 1652 struct ip6_tnl_parm p; 1653 struct __ip6_tnl_parm p1; 1654 struct ip6_tnl *t = netdev_priv(dev); 1655 struct net *net = t->net; 1656 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1657 1658 memset(&p1, 0, sizeof(p1)); 1659 1660 switch (cmd) { 1661 case SIOCGETTUNNEL: 1662 if (dev == ip6n->fb_tnl_dev) { 1663 if (copy_from_user(&p, data, sizeof(p))) { 1664 err = -EFAULT; 1665 break; 1666 } 1667 ip6_tnl_parm_from_user(&p1, &p); 1668 t = ip6_tnl_locate(net, &p1, 0); 1669 if (IS_ERR(t)) 1670 t = netdev_priv(dev); 1671 } else { 1672 memset(&p, 0, sizeof(p)); 1673 } 1674 ip6_tnl_parm_to_user(&p, &t->parms); 1675 if (copy_to_user(data, &p, sizeof(p))) 1676 err = -EFAULT; 1677 break; 1678 case SIOCADDTUNNEL: 1679 case SIOCCHGTUNNEL: 1680 err = -EPERM; 1681 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1682 break; 1683 err = -EFAULT; 1684 if (copy_from_user(&p, data, sizeof(p))) 1685 break; 1686 err = -EINVAL; 1687 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1688 p.proto != 0) 1689 break; 1690 ip6_tnl_parm_from_user(&p1, &p); 1691 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1692 if (cmd == SIOCCHGTUNNEL) { 1693 if (!IS_ERR(t)) { 1694 if (t->dev != dev) { 1695 err = -EEXIST; 1696 break; 1697 } 1698 } else 1699 t = netdev_priv(dev); 1700 if (dev == ip6n->fb_tnl_dev) 1701 ip6_tnl0_update(t, &p1, false); 1702 else 1703 ip6_tnl_update(t, &p1); 1704 } 1705 if (!IS_ERR(t)) { 1706 err = 0; 1707 ip6_tnl_parm_to_user(&p, &t->parms); 1708 if (copy_to_user(data, &p, sizeof(p))) 1709 err = -EFAULT; 1710 1711 } else { 1712 err = PTR_ERR(t); 1713 } 1714 break; 1715 case SIOCDELTUNNEL: 1716 err = -EPERM; 1717 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1718 break; 1719 1720 if (dev == ip6n->fb_tnl_dev) { 1721 err = -EFAULT; 1722 if (copy_from_user(&p, data, sizeof(p))) 1723 break; 1724 err = -ENOENT; 1725 ip6_tnl_parm_from_user(&p1, &p); 1726 t = ip6_tnl_locate(net, &p1, 0); 1727 if (IS_ERR(t)) 1728 break; 1729 err = -EPERM; 1730 if (t->dev == ip6n->fb_tnl_dev) 1731 break; 1732 dev = t->dev; 1733 } 1734 err = 0; 1735 unregister_netdevice(dev); 1736 break; 1737 default: 1738 err = -EINVAL; 1739 } 1740 return err; 1741 } 1742 1743 /** 1744 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1745 * @dev: virtual device associated with tunnel 1746 * @new_mtu: the new mtu 1747 * 1748 * Return: 1749 * 0 on success, 1750 * %-EINVAL if mtu too small 1751 **/ 1752 1753 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1754 { 1755 struct ip6_tnl *tnl = netdev_priv(dev); 1756 int t_hlen; 1757 1758 t_hlen = tnl->hlen + sizeof(struct ipv6hdr); 1759 if (tnl->parms.proto == IPPROTO_IPV6) { 1760 if (new_mtu < IPV6_MIN_MTU) 1761 return -EINVAL; 1762 } else { 1763 if (new_mtu < ETH_MIN_MTU) 1764 return -EINVAL; 1765 } 1766 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { 1767 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen) 1768 return -EINVAL; 1769 } else { 1770 if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen) 1771 return -EINVAL; 1772 } 1773 WRITE_ONCE(dev->mtu, new_mtu); 1774 return 0; 1775 } 1776 EXPORT_SYMBOL(ip6_tnl_change_mtu); 1777 1778 int ip6_tnl_get_iflink(const struct net_device *dev) 1779 { 1780 struct ip6_tnl *t = netdev_priv(dev); 1781 1782 return READ_ONCE(t->parms.link); 1783 } 1784 EXPORT_SYMBOL(ip6_tnl_get_iflink); 1785 1786 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, 1787 unsigned int num) 1788 { 1789 if (num >= MAX_IPTUN_ENCAP_OPS) 1790 return -ERANGE; 1791 1792 return !cmpxchg((const struct ip6_tnl_encap_ops **) 1793 &ip6tun_encaps[num], 1794 NULL, ops) ? 0 : -1; 1795 } 1796 EXPORT_SYMBOL(ip6_tnl_encap_add_ops); 1797 1798 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, 1799 unsigned int num) 1800 { 1801 int ret; 1802 1803 if (num >= MAX_IPTUN_ENCAP_OPS) 1804 return -ERANGE; 1805 1806 ret = (cmpxchg((const struct ip6_tnl_encap_ops **) 1807 &ip6tun_encaps[num], 1808 ops, NULL) == ops) ? 0 : -1; 1809 1810 synchronize_net(); 1811 1812 return ret; 1813 } 1814 EXPORT_SYMBOL(ip6_tnl_encap_del_ops); 1815 1816 int ip6_tnl_encap_setup(struct ip6_tnl *t, 1817 struct ip_tunnel_encap *ipencap) 1818 { 1819 int hlen; 1820 1821 memset(&t->encap, 0, sizeof(t->encap)); 1822 1823 hlen = ip6_encap_hlen(ipencap); 1824 if (hlen < 0) 1825 return hlen; 1826 1827 t->encap.type = ipencap->type; 1828 t->encap.sport = ipencap->sport; 1829 t->encap.dport = ipencap->dport; 1830 t->encap.flags = ipencap->flags; 1831 1832 t->encap_hlen = hlen; 1833 t->hlen = t->encap_hlen + t->tun_hlen; 1834 1835 return 0; 1836 } 1837 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1838 1839 static int ip6_tnl_fill_forward_path(struct net_device_path_ctx *ctx, 1840 struct net_device_path *path) 1841 { 1842 struct ip6_tnl *t = netdev_priv(ctx->dev); 1843 struct flowi6 fl6 = { 1844 .daddr = t->parms.raddr, 1845 }; 1846 struct dst_entry *dst; 1847 int err; 1848 1849 dst = ip6_route_output(dev_net(ctx->dev), NULL, &fl6); 1850 if (!dst->error) { 1851 path->type = DEV_PATH_TUN; 1852 path->tun.src_v6 = t->parms.laddr; 1853 path->tun.dst_v6 = t->parms.raddr; 1854 path->tun.l3_proto = IPPROTO_IPV6; 1855 path->dev = ctx->dev; 1856 ctx->dev = dst->dev; 1857 } 1858 1859 err = dst->error; 1860 dst_release(dst); 1861 1862 return err; 1863 } 1864 1865 static const struct net_device_ops ip6_tnl_netdev_ops = { 1866 .ndo_init = ip6_tnl_dev_init, 1867 .ndo_uninit = ip6_tnl_dev_uninit, 1868 .ndo_start_xmit = ip6_tnl_start_xmit, 1869 .ndo_siocdevprivate = ip6_tnl_siocdevprivate, 1870 .ndo_change_mtu = ip6_tnl_change_mtu, 1871 .ndo_get_stats64 = dev_get_tstats64, 1872 .ndo_get_iflink = ip6_tnl_get_iflink, 1873 .ndo_fill_forward_path = ip6_tnl_fill_forward_path, 1874 }; 1875 1876 #define IPXIPX_FEATURES (NETIF_F_SG | \ 1877 NETIF_F_FRAGLIST | \ 1878 NETIF_F_HIGHDMA | \ 1879 NETIF_F_GSO_SOFTWARE | \ 1880 NETIF_F_HW_CSUM) 1881 1882 /** 1883 * ip6_tnl_dev_setup - setup virtual tunnel device 1884 * @dev: virtual device associated with tunnel 1885 * 1886 * Description: 1887 * Initialize function pointers and device parameters 1888 **/ 1889 1890 static void ip6_tnl_dev_setup(struct net_device *dev) 1891 { 1892 dev->netdev_ops = &ip6_tnl_netdev_ops; 1893 dev->header_ops = &ip_tunnel_header_ops; 1894 dev->needs_free_netdev = true; 1895 dev->priv_destructor = ip6_dev_free; 1896 1897 dev->type = ARPHRD_TUNNEL6; 1898 dev->flags |= IFF_NOARP; 1899 dev->addr_len = sizeof(struct in6_addr); 1900 dev->lltx = true; 1901 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1902 netif_keep_dst(dev); 1903 1904 dev->features |= IPXIPX_FEATURES; 1905 dev->hw_features |= IPXIPX_FEATURES; 1906 1907 /* This perm addr will be used as interface identifier by IPv6 */ 1908 dev->addr_assign_type = NET_ADDR_RANDOM; 1909 eth_random_addr(dev->perm_addr); 1910 } 1911 1912 1913 /** 1914 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1915 * @dev: virtual device associated with tunnel 1916 **/ 1917 1918 static inline int 1919 ip6_tnl_dev_init_gen(struct net_device *dev) 1920 { 1921 struct ip6_tnl *t = netdev_priv(dev); 1922 int ret; 1923 int t_hlen; 1924 1925 t->dev = dev; 1926 1927 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); 1928 if (ret) 1929 return ret; 1930 1931 ret = gro_cells_init(&t->gro_cells, dev); 1932 if (ret) 1933 goto destroy_dst; 1934 1935 t->tun_hlen = 0; 1936 t->hlen = t->encap_hlen + t->tun_hlen; 1937 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1938 1939 dev->type = ARPHRD_TUNNEL6; 1940 dev->mtu = ETH_DATA_LEN - t_hlen; 1941 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1942 dev->mtu -= 8; 1943 dev->min_mtu = ETH_MIN_MTU; 1944 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen; 1945 1946 netdev_hold(dev, &t->dev_tracker, GFP_KERNEL); 1947 netdev_lockdep_set_classes(dev); 1948 return 0; 1949 1950 destroy_dst: 1951 dst_cache_destroy(&t->dst_cache); 1952 1953 return ret; 1954 } 1955 1956 /** 1957 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1958 * @dev: virtual device associated with tunnel 1959 **/ 1960 1961 static int ip6_tnl_dev_init(struct net_device *dev) 1962 { 1963 struct ip6_tnl *t = netdev_priv(dev); 1964 int err = ip6_tnl_dev_init_gen(dev); 1965 1966 if (err) 1967 return err; 1968 ip6_tnl_link_config(t); 1969 if (t->parms.collect_md) 1970 netif_keep_dst(dev); 1971 return 0; 1972 } 1973 1974 /** 1975 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1976 * @dev: fallback device 1977 * 1978 * Return: 0 1979 **/ 1980 1981 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1982 { 1983 struct ip6_tnl *t = netdev_priv(dev); 1984 struct net *net = dev_net(dev); 1985 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1986 1987 t->net = net; 1988 t->parms.proto = IPPROTO_IPV6; 1989 1990 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1991 return 0; 1992 } 1993 1994 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[], 1995 struct netlink_ext_ack *extack) 1996 { 1997 u8 proto; 1998 1999 if (!data || !data[IFLA_IPTUN_PROTO]) 2000 return 0; 2001 2002 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 2003 if (proto != IPPROTO_IPV6 && 2004 proto != IPPROTO_IPIP && 2005 proto != 0) 2006 return -EINVAL; 2007 2008 return 0; 2009 } 2010 2011 static void ip6_tnl_netlink_parms(struct nlattr *data[], 2012 struct __ip6_tnl_parm *parms) 2013 { 2014 memset(parms, 0, sizeof(*parms)); 2015 2016 if (!data) 2017 return; 2018 2019 if (data[IFLA_IPTUN_LINK]) 2020 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); 2021 2022 if (data[IFLA_IPTUN_LOCAL]) 2023 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]); 2024 2025 if (data[IFLA_IPTUN_REMOTE]) 2026 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]); 2027 2028 if (data[IFLA_IPTUN_TTL]) 2029 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); 2030 2031 if (data[IFLA_IPTUN_ENCAP_LIMIT]) 2032 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); 2033 2034 if (data[IFLA_IPTUN_FLOWINFO]) 2035 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); 2036 2037 if (data[IFLA_IPTUN_FLAGS]) 2038 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); 2039 2040 if (data[IFLA_IPTUN_PROTO]) 2041 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 2042 2043 if (data[IFLA_IPTUN_COLLECT_METADATA]) 2044 parms->collect_md = true; 2045 2046 if (data[IFLA_IPTUN_FWMARK]) 2047 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); 2048 } 2049 2050 static int ip6_tnl_newlink(struct net_device *dev, 2051 struct rtnl_newlink_params *params, 2052 struct netlink_ext_ack *extack) 2053 { 2054 struct nlattr **data = params->data; 2055 struct nlattr **tb = params->tb; 2056 struct ip_tunnel_encap ipencap; 2057 struct ip6_tnl_net *ip6n; 2058 struct ip6_tnl *nt, *t; 2059 struct net *net; 2060 int err; 2061 2062 net = params->link_net ? : dev_net(dev); 2063 ip6n = net_generic(net, ip6_tnl_net_id); 2064 nt = netdev_priv(dev); 2065 nt->net = net; 2066 2067 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { 2068 err = ip6_tnl_encap_setup(nt, &ipencap); 2069 if (err < 0) 2070 return err; 2071 } 2072 2073 ip6_tnl_netlink_parms(data, &nt->parms); 2074 2075 if (nt->parms.collect_md) { 2076 if (rtnl_dereference(ip6n->collect_md_tun)) 2077 return -EEXIST; 2078 } else { 2079 t = ip6_tnl_locate(net, &nt->parms, 0); 2080 if (!IS_ERR(t)) 2081 return -EEXIST; 2082 } 2083 2084 err = ip6_tnl_create2(dev); 2085 if (!err && tb[IFLA_MTU]) 2086 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2087 2088 return err; 2089 } 2090 2091 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 2092 struct nlattr *data[], 2093 struct netlink_ext_ack *extack) 2094 { 2095 struct ip6_tnl *t = netdev_priv(dev); 2096 struct __ip6_tnl_parm p; 2097 struct net *net = t->net; 2098 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2099 struct ip_tunnel_encap ipencap; 2100 2101 if (dev == ip6n->fb_tnl_dev) { 2102 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { 2103 /* iproute2 always sets TUNNEL_ENCAP_FLAG_CSUM6, so 2104 * let's ignore this flag. 2105 */ 2106 ipencap.flags &= ~TUNNEL_ENCAP_FLAG_CSUM6; 2107 if (memchr_inv(&ipencap, 0, sizeof(ipencap))) { 2108 NL_SET_ERR_MSG(extack, 2109 "Only protocol can be changed for fallback tunnel, not encap params"); 2110 return -EINVAL; 2111 } 2112 } 2113 2114 ip6_tnl_netlink_parms(data, &p); 2115 if (ip6_tnl0_update(t, &p, true) < 0) { 2116 NL_SET_ERR_MSG(extack, 2117 "Only protocol can be changed for fallback tunnel"); 2118 return -EINVAL; 2119 } 2120 2121 return 0; 2122 } 2123 2124 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { 2125 int err = ip6_tnl_encap_setup(t, &ipencap); 2126 2127 if (err < 0) 2128 return err; 2129 } 2130 ip6_tnl_netlink_parms(data, &p); 2131 if (p.collect_md) 2132 return -EINVAL; 2133 2134 t = ip6_tnl_locate(net, &p, 0); 2135 if (!IS_ERR(t)) { 2136 if (t->dev != dev) 2137 return -EEXIST; 2138 } else 2139 t = netdev_priv(dev); 2140 2141 ip6_tnl_update(t, &p); 2142 return 0; 2143 } 2144 2145 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) 2146 { 2147 struct net *net = dev_net(dev); 2148 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2149 2150 if (dev != ip6n->fb_tnl_dev) 2151 unregister_netdevice_queue(dev, head); 2152 } 2153 2154 static size_t ip6_tnl_get_size(const struct net_device *dev) 2155 { 2156 return 2157 /* IFLA_IPTUN_LINK */ 2158 nla_total_size(4) + 2159 /* IFLA_IPTUN_LOCAL */ 2160 nla_total_size(sizeof(struct in6_addr)) + 2161 /* IFLA_IPTUN_REMOTE */ 2162 nla_total_size(sizeof(struct in6_addr)) + 2163 /* IFLA_IPTUN_TTL */ 2164 nla_total_size(1) + 2165 /* IFLA_IPTUN_ENCAP_LIMIT */ 2166 nla_total_size(1) + 2167 /* IFLA_IPTUN_FLOWINFO */ 2168 nla_total_size(4) + 2169 /* IFLA_IPTUN_FLAGS */ 2170 nla_total_size(4) + 2171 /* IFLA_IPTUN_PROTO */ 2172 nla_total_size(1) + 2173 /* IFLA_IPTUN_ENCAP_TYPE */ 2174 nla_total_size(2) + 2175 /* IFLA_IPTUN_ENCAP_FLAGS */ 2176 nla_total_size(2) + 2177 /* IFLA_IPTUN_ENCAP_SPORT */ 2178 nla_total_size(2) + 2179 /* IFLA_IPTUN_ENCAP_DPORT */ 2180 nla_total_size(2) + 2181 /* IFLA_IPTUN_COLLECT_METADATA */ 2182 nla_total_size(0) + 2183 /* IFLA_IPTUN_FWMARK */ 2184 nla_total_size(4) + 2185 0; 2186 } 2187 2188 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) 2189 { 2190 struct ip6_tnl *tunnel = netdev_priv(dev); 2191 struct __ip6_tnl_parm *parm = &tunnel->parms; 2192 2193 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 2194 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || 2195 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) || 2196 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 2197 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 2198 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 2199 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || 2200 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || 2201 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark)) 2202 goto nla_put_failure; 2203 2204 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || 2205 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || 2206 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || 2207 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) 2208 goto nla_put_failure; 2209 2210 if (parm->collect_md) 2211 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA)) 2212 goto nla_put_failure; 2213 2214 return 0; 2215 2216 nla_put_failure: 2217 return -EMSGSIZE; 2218 } 2219 2220 struct net *ip6_tnl_get_link_net(const struct net_device *dev) 2221 { 2222 struct ip6_tnl *tunnel = netdev_priv(dev); 2223 2224 return READ_ONCE(tunnel->net); 2225 } 2226 EXPORT_SYMBOL(ip6_tnl_get_link_net); 2227 2228 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { 2229 [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, 2230 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, 2231 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, 2232 [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, 2233 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, 2234 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, 2235 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, 2236 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, 2237 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, 2238 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, 2239 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, 2240 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, 2241 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, 2242 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, 2243 }; 2244 2245 static struct rtnl_link_ops ip6_link_ops __read_mostly = { 2246 .kind = "ip6tnl", 2247 .maxtype = IFLA_IPTUN_MAX, 2248 .policy = ip6_tnl_policy, 2249 .priv_size = sizeof(struct ip6_tnl), 2250 .setup = ip6_tnl_dev_setup, 2251 .validate = ip6_tnl_validate, 2252 .newlink = ip6_tnl_newlink, 2253 .changelink = ip6_tnl_changelink, 2254 .dellink = ip6_tnl_dellink, 2255 .get_size = ip6_tnl_get_size, 2256 .fill_info = ip6_tnl_fill_info, 2257 .get_link_net = ip6_tnl_get_link_net, 2258 }; 2259 2260 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 2261 .handler = ip4ip6_rcv, 2262 .err_handler = ip4ip6_err, 2263 .priority = 1, 2264 }; 2265 2266 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 2267 .handler = ip6ip6_rcv, 2268 .err_handler = ip6ip6_err, 2269 .priority = 1, 2270 }; 2271 2272 static struct xfrm6_tunnel mplsip6_handler __read_mostly = { 2273 .handler = mplsip6_rcv, 2274 .err_handler = mplsip6_err, 2275 .priority = 1, 2276 }; 2277 2278 static void __net_exit ip6_tnl_exit_rtnl_net(struct net *net, struct list_head *list) 2279 { 2280 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2281 struct net_device *dev, *aux; 2282 int h; 2283 struct ip6_tnl *t; 2284 2285 for_each_netdev_safe(net, dev, aux) 2286 if (dev->rtnl_link_ops == &ip6_link_ops) 2287 unregister_netdevice_queue(dev, list); 2288 2289 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { 2290 t = rtnl_net_dereference(net, ip6n->tnls_r_l[h]); 2291 while (t) { 2292 /* If dev is in the same netns, it has already 2293 * been added to the list by the previous loop. 2294 */ 2295 if (!net_eq(dev_net(t->dev), net)) 2296 unregister_netdevice_queue(t->dev, list); 2297 2298 t = rtnl_net_dereference(net, t->next); 2299 } 2300 } 2301 2302 t = rtnl_net_dereference(net, ip6n->tnls_wc[0]); 2303 while (t) { 2304 /* If dev is in the same netns, it has already 2305 * been added to the list by the previous loop. 2306 */ 2307 if (!net_eq(dev_net(t->dev), net)) 2308 unregister_netdevice_queue(t->dev, list); 2309 2310 t = rtnl_net_dereference(net, t->next); 2311 } 2312 } 2313 2314 static int __net_init ip6_tnl_init_net(struct net *net) 2315 { 2316 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2317 struct ip6_tnl *t = NULL; 2318 int err; 2319 2320 ip6n->tnls[0] = ip6n->tnls_wc; 2321 ip6n->tnls[1] = ip6n->tnls_r_l; 2322 2323 if (!net_has_fallback_tunnels(net)) 2324 return 0; 2325 err = -ENOMEM; 2326 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 2327 NET_NAME_UNKNOWN, ip6_tnl_dev_setup); 2328 2329 if (!ip6n->fb_tnl_dev) 2330 goto err_alloc_dev; 2331 dev_net_set(ip6n->fb_tnl_dev, net); 2332 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; 2333 /* FB netdevice is special: we have one, and only one per netns. 2334 * Allowing to move it to another netns is clearly unsafe. 2335 */ 2336 ip6n->fb_tnl_dev->netns_immutable = true; 2337 2338 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 2339 if (err < 0) 2340 goto err_register; 2341 2342 err = register_netdev(ip6n->fb_tnl_dev); 2343 if (err < 0) 2344 goto err_register; 2345 2346 t = netdev_priv(ip6n->fb_tnl_dev); 2347 2348 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 2349 return 0; 2350 2351 err_register: 2352 free_netdev(ip6n->fb_tnl_dev); 2353 err_alloc_dev: 2354 return err; 2355 } 2356 2357 static struct pernet_operations ip6_tnl_net_ops = { 2358 .init = ip6_tnl_init_net, 2359 .exit_rtnl = ip6_tnl_exit_rtnl_net, 2360 .id = &ip6_tnl_net_id, 2361 .size = sizeof(struct ip6_tnl_net), 2362 }; 2363 2364 /** 2365 * ip6_tunnel_init - register protocol and reserve needed resources 2366 * 2367 * Return: 0 on success 2368 **/ 2369 2370 static int __init ip6_tunnel_init(void) 2371 { 2372 int err; 2373 2374 if (!ipv6_mod_enabled()) 2375 return -EOPNOTSUPP; 2376 2377 err = register_pernet_device(&ip6_tnl_net_ops); 2378 if (err < 0) 2379 goto out_pernet; 2380 2381 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 2382 if (err < 0) { 2383 pr_err("%s: can't register ip4ip6\n", __func__); 2384 goto out_ip4ip6; 2385 } 2386 2387 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 2388 if (err < 0) { 2389 pr_err("%s: can't register ip6ip6\n", __func__); 2390 goto out_ip6ip6; 2391 } 2392 2393 if (ip6_tnl_mpls_supported()) { 2394 err = xfrm6_tunnel_register(&mplsip6_handler, AF_MPLS); 2395 if (err < 0) { 2396 pr_err("%s: can't register mplsip6\n", __func__); 2397 goto out_mplsip6; 2398 } 2399 } 2400 2401 err = rtnl_link_register(&ip6_link_ops); 2402 if (err < 0) 2403 goto rtnl_link_failed; 2404 2405 return 0; 2406 2407 rtnl_link_failed: 2408 if (ip6_tnl_mpls_supported()) 2409 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS); 2410 out_mplsip6: 2411 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 2412 out_ip6ip6: 2413 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 2414 out_ip4ip6: 2415 unregister_pernet_device(&ip6_tnl_net_ops); 2416 out_pernet: 2417 return err; 2418 } 2419 2420 /** 2421 * ip6_tunnel_cleanup - free resources and unregister protocol 2422 **/ 2423 2424 static void __exit ip6_tunnel_cleanup(void) 2425 { 2426 rtnl_link_unregister(&ip6_link_ops); 2427 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 2428 pr_info("%s: can't deregister ip4ip6\n", __func__); 2429 2430 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 2431 pr_info("%s: can't deregister ip6ip6\n", __func__); 2432 2433 if (ip6_tnl_mpls_supported() && 2434 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS)) 2435 pr_info("%s: can't deregister mplsip6\n", __func__); 2436 unregister_pernet_device(&ip6_tnl_net_ops); 2437 } 2438 2439 module_init(ip6_tunnel_init); 2440 module_exit(ip6_tunnel_cleanup); 2441