1 /* 2 * IPv6 tunneling device 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 8 * 9 * Based on: 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 11 * 12 * RFC 2473 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/errno.h> 26 #include <linux/types.h> 27 #include <linux/sockios.h> 28 #include <linux/icmp.h> 29 #include <linux/if.h> 30 #include <linux/in.h> 31 #include <linux/ip.h> 32 #include <linux/net.h> 33 #include <linux/in6.h> 34 #include <linux/netdevice.h> 35 #include <linux/if_arp.h> 36 #include <linux/icmpv6.h> 37 #include <linux/init.h> 38 #include <linux/route.h> 39 #include <linux/rtnetlink.h> 40 #include <linux/netfilter_ipv6.h> 41 #include <linux/slab.h> 42 #include <linux/hash.h> 43 #include <linux/etherdevice.h> 44 45 #include <linux/uaccess.h> 46 #include <linux/atomic.h> 47 48 #include <net/icmp.h> 49 #include <net/ip.h> 50 #include <net/ip_tunnels.h> 51 #include <net/ipv6.h> 52 #include <net/ip6_route.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_tunnel.h> 55 #include <net/xfrm.h> 56 #include <net/dsfield.h> 57 #include <net/inet_ecn.h> 58 #include <net/net_namespace.h> 59 #include <net/netns/generic.h> 60 #include <net/dst_metadata.h> 61 62 MODULE_AUTHOR("Ville Nuorvala"); 63 MODULE_DESCRIPTION("IPv6 tunneling device"); 64 MODULE_LICENSE("GPL"); 65 MODULE_ALIAS_RTNL_LINK("ip6tnl"); 66 MODULE_ALIAS_NETDEV("ip6tnl0"); 67 68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5 69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) 70 71 static bool log_ecn_error = true; 72 module_param(log_ecn_error, bool, 0644); 73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 74 75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 76 { 77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 78 79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); 80 } 81 82 static int ip6_tnl_dev_init(struct net_device *dev); 83 static void ip6_tnl_dev_setup(struct net_device *dev); 84 static struct rtnl_link_ops ip6_link_ops __read_mostly; 85 86 static unsigned int ip6_tnl_net_id __read_mostly; 87 struct ip6_tnl_net { 88 /* the IPv6 tunnel fallback device */ 89 struct net_device *fb_tnl_dev; 90 /* lists for storing tunnels in use */ 91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE]; 92 struct ip6_tnl __rcu *tnls_wc[1]; 93 struct ip6_tnl __rcu **tnls[2]; 94 struct ip6_tnl __rcu *collect_md_tun; 95 }; 96 97 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 98 { 99 struct pcpu_sw_netstats tmp, sum = { 0 }; 100 int i; 101 102 for_each_possible_cpu(i) { 103 unsigned int start; 104 const struct pcpu_sw_netstats *tstats = 105 per_cpu_ptr(dev->tstats, i); 106 107 do { 108 start = u64_stats_fetch_begin_irq(&tstats->syncp); 109 tmp.rx_packets = tstats->rx_packets; 110 tmp.rx_bytes = tstats->rx_bytes; 111 tmp.tx_packets = tstats->tx_packets; 112 tmp.tx_bytes = tstats->tx_bytes; 113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 114 115 sum.rx_packets += tmp.rx_packets; 116 sum.rx_bytes += tmp.rx_bytes; 117 sum.tx_packets += tmp.tx_packets; 118 sum.tx_bytes += tmp.tx_bytes; 119 } 120 dev->stats.rx_packets = sum.rx_packets; 121 dev->stats.rx_bytes = sum.rx_bytes; 122 dev->stats.tx_packets = sum.tx_packets; 123 dev->stats.tx_bytes = sum.tx_bytes; 124 return &dev->stats; 125 } 126 127 /** 128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 129 * @remote: the address of the tunnel exit-point 130 * @local: the address of the tunnel entry-point 131 * 132 * Return: 133 * tunnel matching given end-points if found, 134 * else fallback tunnel if its device is up, 135 * else %NULL 136 **/ 137 138 #define for_each_ip6_tunnel_rcu(start) \ 139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 140 141 static struct ip6_tnl * 142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 143 { 144 unsigned int hash = HASH(remote, local); 145 struct ip6_tnl *t; 146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 147 struct in6_addr any; 148 149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 150 if (ipv6_addr_equal(local, &t->parms.laddr) && 151 ipv6_addr_equal(remote, &t->parms.raddr) && 152 (t->dev->flags & IFF_UP)) 153 return t; 154 } 155 156 memset(&any, 0, sizeof(any)); 157 hash = HASH(&any, local); 158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 159 if (ipv6_addr_equal(local, &t->parms.laddr) && 160 ipv6_addr_any(&t->parms.raddr) && 161 (t->dev->flags & IFF_UP)) 162 return t; 163 } 164 165 hash = HASH(remote, &any); 166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 167 if (ipv6_addr_equal(remote, &t->parms.raddr) && 168 ipv6_addr_any(&t->parms.laddr) && 169 (t->dev->flags & IFF_UP)) 170 return t; 171 } 172 173 t = rcu_dereference(ip6n->collect_md_tun); 174 if (t && t->dev->flags & IFF_UP) 175 return t; 176 177 t = rcu_dereference(ip6n->tnls_wc[0]); 178 if (t && (t->dev->flags & IFF_UP)) 179 return t; 180 181 return NULL; 182 } 183 184 /** 185 * ip6_tnl_bucket - get head of list matching given tunnel parameters 186 * @p: parameters containing tunnel end-points 187 * 188 * Description: 189 * ip6_tnl_bucket() returns the head of the list matching the 190 * &struct in6_addr entries laddr and raddr in @p. 191 * 192 * Return: head of IPv6 tunnel list 193 **/ 194 195 static struct ip6_tnl __rcu ** 196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) 197 { 198 const struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *local = &p->laddr; 200 unsigned int h = 0; 201 int prio = 0; 202 203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 204 prio = 1; 205 h = HASH(remote, local); 206 } 207 return &ip6n->tnls[prio][h]; 208 } 209 210 /** 211 * ip6_tnl_link - add tunnel to hash table 212 * @t: tunnel to be added 213 **/ 214 215 static void 216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 217 { 218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 219 220 if (t->parms.collect_md) 221 rcu_assign_pointer(ip6n->collect_md_tun, t); 222 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 223 rcu_assign_pointer(*tp, t); 224 } 225 226 /** 227 * ip6_tnl_unlink - remove tunnel from hash table 228 * @t: tunnel to be removed 229 **/ 230 231 static void 232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 233 { 234 struct ip6_tnl __rcu **tp; 235 struct ip6_tnl *iter; 236 237 if (t->parms.collect_md) 238 rcu_assign_pointer(ip6n->collect_md_tun, NULL); 239 240 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 241 (iter = rtnl_dereference(*tp)) != NULL; 242 tp = &iter->next) { 243 if (t == iter) { 244 rcu_assign_pointer(*tp, t->next); 245 break; 246 } 247 } 248 } 249 250 static void ip6_dev_free(struct net_device *dev) 251 { 252 struct ip6_tnl *t = netdev_priv(dev); 253 254 gro_cells_destroy(&t->gro_cells); 255 dst_cache_destroy(&t->dst_cache); 256 free_percpu(dev->tstats); 257 } 258 259 static int ip6_tnl_create2(struct net_device *dev) 260 { 261 struct ip6_tnl *t = netdev_priv(dev); 262 struct net *net = dev_net(dev); 263 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 264 int err; 265 266 t = netdev_priv(dev); 267 268 dev->rtnl_link_ops = &ip6_link_ops; 269 err = register_netdevice(dev); 270 if (err < 0) 271 goto out; 272 273 strcpy(t->parms.name, dev->name); 274 275 dev_hold(dev); 276 ip6_tnl_link(ip6n, t); 277 return 0; 278 279 out: 280 return err; 281 } 282 283 /** 284 * ip6_tnl_create - create a new tunnel 285 * @p: tunnel parameters 286 * @pt: pointer to new tunnel 287 * 288 * Description: 289 * Create tunnel matching given parameters. 290 * 291 * Return: 292 * created tunnel or error pointer 293 **/ 294 295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 296 { 297 struct net_device *dev; 298 struct ip6_tnl *t; 299 char name[IFNAMSIZ]; 300 int err = -E2BIG; 301 302 if (p->name[0]) { 303 if (!dev_valid_name(p->name)) 304 goto failed; 305 strlcpy(name, p->name, IFNAMSIZ); 306 } else { 307 sprintf(name, "ip6tnl%%d"); 308 } 309 err = -ENOMEM; 310 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 311 ip6_tnl_dev_setup); 312 if (!dev) 313 goto failed; 314 315 dev_net_set(dev, net); 316 317 t = netdev_priv(dev); 318 t->parms = *p; 319 t->net = dev_net(dev); 320 err = ip6_tnl_create2(dev); 321 if (err < 0) 322 goto failed_free; 323 324 return t; 325 326 failed_free: 327 free_netdev(dev); 328 failed: 329 return ERR_PTR(err); 330 } 331 332 /** 333 * ip6_tnl_locate - find or create tunnel matching given parameters 334 * @p: tunnel parameters 335 * @create: != 0 if allowed to create new tunnel if no match found 336 * 337 * Description: 338 * ip6_tnl_locate() first tries to locate an existing tunnel 339 * based on @parms. If this is unsuccessful, but @create is set a new 340 * tunnel device is created and registered for use. 341 * 342 * Return: 343 * matching tunnel or error pointer 344 **/ 345 346 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 347 struct __ip6_tnl_parm *p, int create) 348 { 349 const struct in6_addr *remote = &p->raddr; 350 const struct in6_addr *local = &p->laddr; 351 struct ip6_tnl __rcu **tp; 352 struct ip6_tnl *t; 353 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 354 355 for (tp = ip6_tnl_bucket(ip6n, p); 356 (t = rtnl_dereference(*tp)) != NULL; 357 tp = &t->next) { 358 if (ipv6_addr_equal(local, &t->parms.laddr) && 359 ipv6_addr_equal(remote, &t->parms.raddr)) { 360 if (create) 361 return ERR_PTR(-EEXIST); 362 363 return t; 364 } 365 } 366 if (!create) 367 return ERR_PTR(-ENODEV); 368 return ip6_tnl_create(net, p); 369 } 370 371 /** 372 * ip6_tnl_dev_uninit - tunnel device uninitializer 373 * @dev: the device to be destroyed 374 * 375 * Description: 376 * ip6_tnl_dev_uninit() removes tunnel from its list 377 **/ 378 379 static void 380 ip6_tnl_dev_uninit(struct net_device *dev) 381 { 382 struct ip6_tnl *t = netdev_priv(dev); 383 struct net *net = t->net; 384 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 385 386 if (dev == ip6n->fb_tnl_dev) 387 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 388 else 389 ip6_tnl_unlink(ip6n, t); 390 dst_cache_reset(&t->dst_cache); 391 dev_put(dev); 392 } 393 394 /** 395 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 396 * @skb: received socket buffer 397 * 398 * Return: 399 * 0 if none was found, 400 * else index to encapsulation limit 401 **/ 402 403 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 404 { 405 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; 406 unsigned int nhoff = raw - skb->data; 407 unsigned int off = nhoff + sizeof(*ipv6h); 408 u8 next, nexthdr = ipv6h->nexthdr; 409 410 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 411 struct ipv6_opt_hdr *hdr; 412 u16 optlen; 413 414 if (!pskb_may_pull(skb, off + sizeof(*hdr))) 415 break; 416 417 hdr = (struct ipv6_opt_hdr *)(skb->data + off); 418 if (nexthdr == NEXTHDR_FRAGMENT) { 419 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 420 if (frag_hdr->frag_off) 421 break; 422 optlen = 8; 423 } else if (nexthdr == NEXTHDR_AUTH) { 424 optlen = (hdr->hdrlen + 2) << 2; 425 } else { 426 optlen = ipv6_optlen(hdr); 427 } 428 /* cache hdr->nexthdr, since pskb_may_pull() might 429 * invalidate hdr 430 */ 431 next = hdr->nexthdr; 432 if (nexthdr == NEXTHDR_DEST) { 433 u16 i = 2; 434 435 /* Remember : hdr is no longer valid at this point. */ 436 if (!pskb_may_pull(skb, off + optlen)) 437 break; 438 439 while (1) { 440 struct ipv6_tlv_tnl_enc_lim *tel; 441 442 /* No more room for encapsulation limit */ 443 if (i + sizeof(*tel) > optlen) 444 break; 445 446 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); 447 /* return index of option if found and valid */ 448 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 449 tel->length == 1) 450 return i + off - nhoff; 451 /* else jump to next option */ 452 if (tel->type) 453 i += tel->length + 2; 454 else 455 i++; 456 } 457 } 458 nexthdr = next; 459 off += optlen; 460 } 461 return 0; 462 } 463 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); 464 465 /** 466 * ip6_tnl_err - tunnel error handler 467 * 468 * Description: 469 * ip6_tnl_err() should handle errors in the tunnel according 470 * to the specifications in RFC 2473. 471 **/ 472 473 static int 474 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 475 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 476 { 477 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; 478 struct net *net = dev_net(skb->dev); 479 u8 rel_type = ICMPV6_DEST_UNREACH; 480 u8 rel_code = ICMPV6_ADDR_UNREACH; 481 __u32 rel_info = 0; 482 struct ip6_tnl *t; 483 int err = -ENOENT; 484 int rel_msg = 0; 485 u8 tproto; 486 __u16 len; 487 488 /* If the packet doesn't contain the original IPv6 header we are 489 in trouble since we might need the source address for further 490 processing of the error. */ 491 492 rcu_read_lock(); 493 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr); 494 if (!t) 495 goto out; 496 497 tproto = READ_ONCE(t->parms.proto); 498 if (tproto != ipproto && tproto != 0) 499 goto out; 500 501 err = 0; 502 503 switch (*type) { 504 struct ipv6_tlv_tnl_enc_lim *tel; 505 __u32 mtu, teli; 506 case ICMPV6_DEST_UNREACH: 507 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 508 t->parms.name); 509 rel_msg = 1; 510 break; 511 case ICMPV6_TIME_EXCEED: 512 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 513 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 514 t->parms.name); 515 rel_msg = 1; 516 } 517 break; 518 case ICMPV6_PARAMPROB: 519 teli = 0; 520 if ((*code) == ICMPV6_HDR_FIELD) 521 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 522 523 if (teli && teli == *info - 2) { 524 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 525 if (tel->encap_limit == 0) { 526 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 527 t->parms.name); 528 rel_msg = 1; 529 } 530 } else { 531 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 532 t->parms.name); 533 } 534 break; 535 case ICMPV6_PKT_TOOBIG: 536 ip6_update_pmtu(skb, net, htonl(*info), 0, 0, 537 sock_net_uid(net, NULL)); 538 mtu = *info - offset; 539 if (mtu < IPV6_MIN_MTU) 540 mtu = IPV6_MIN_MTU; 541 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); 542 if (len > mtu) { 543 rel_type = ICMPV6_PKT_TOOBIG; 544 rel_code = 0; 545 rel_info = mtu; 546 rel_msg = 1; 547 } 548 break; 549 case NDISC_REDIRECT: 550 ip6_redirect(skb, net, skb->dev->ifindex, 0, 551 sock_net_uid(net, NULL)); 552 break; 553 } 554 555 *type = rel_type; 556 *code = rel_code; 557 *info = rel_info; 558 *msg = rel_msg; 559 560 out: 561 rcu_read_unlock(); 562 return err; 563 } 564 565 static int 566 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 567 u8 type, u8 code, int offset, __be32 info) 568 { 569 __u32 rel_info = ntohl(info); 570 const struct iphdr *eiph; 571 struct sk_buff *skb2; 572 int err, rel_msg = 0; 573 u8 rel_type = type; 574 u8 rel_code = code; 575 struct rtable *rt; 576 struct flowi4 fl4; 577 578 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 579 &rel_msg, &rel_info, offset); 580 if (err < 0) 581 return err; 582 583 if (rel_msg == 0) 584 return 0; 585 586 switch (rel_type) { 587 case ICMPV6_DEST_UNREACH: 588 if (rel_code != ICMPV6_ADDR_UNREACH) 589 return 0; 590 rel_type = ICMP_DEST_UNREACH; 591 rel_code = ICMP_HOST_UNREACH; 592 break; 593 case ICMPV6_PKT_TOOBIG: 594 if (rel_code != 0) 595 return 0; 596 rel_type = ICMP_DEST_UNREACH; 597 rel_code = ICMP_FRAG_NEEDED; 598 break; 599 default: 600 return 0; 601 } 602 603 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 604 return 0; 605 606 skb2 = skb_clone(skb, GFP_ATOMIC); 607 if (!skb2) 608 return 0; 609 610 skb_dst_drop(skb2); 611 612 skb_pull(skb2, offset); 613 skb_reset_network_header(skb2); 614 eiph = ip_hdr(skb2); 615 616 /* Try to guess incoming interface */ 617 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr, 618 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 619 if (IS_ERR(rt)) 620 goto out; 621 622 skb2->dev = rt->dst.dev; 623 ip_rt_put(rt); 624 625 /* route "incoming" packet */ 626 if (rt->rt_flags & RTCF_LOCAL) { 627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 628 eiph->daddr, eiph->saddr, 0, 0, 629 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 630 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { 631 if (!IS_ERR(rt)) 632 ip_rt_put(rt); 633 goto out; 634 } 635 skb_dst_set(skb2, &rt->dst); 636 } else { 637 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 638 skb2->dev) || 639 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 640 goto out; 641 } 642 643 /* change mtu on this route */ 644 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 645 if (rel_info > dst_mtu(skb_dst(skb2))) 646 goto out; 647 648 skb_dst_update_pmtu(skb2, rel_info); 649 } 650 651 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 652 653 out: 654 kfree_skb(skb2); 655 return 0; 656 } 657 658 static int 659 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 660 u8 type, u8 code, int offset, __be32 info) 661 { 662 __u32 rel_info = ntohl(info); 663 int err, rel_msg = 0; 664 u8 rel_type = type; 665 u8 rel_code = code; 666 667 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 668 &rel_msg, &rel_info, offset); 669 if (err < 0) 670 return err; 671 672 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 673 struct rt6_info *rt; 674 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 675 676 if (!skb2) 677 return 0; 678 679 skb_dst_drop(skb2); 680 skb_pull(skb2, offset); 681 skb_reset_network_header(skb2); 682 683 /* Try to guess incoming interface */ 684 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 685 NULL, 0, skb2, 0); 686 687 if (rt && rt->dst.dev) 688 skb2->dev = rt->dst.dev; 689 690 icmpv6_send(skb2, rel_type, rel_code, rel_info); 691 692 ip6_rt_put(rt); 693 694 kfree_skb(skb2); 695 } 696 697 return 0; 698 } 699 700 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 701 const struct ipv6hdr *ipv6h, 702 struct sk_buff *skb) 703 { 704 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 705 706 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 707 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 708 709 return IP6_ECN_decapsulate(ipv6h, skb); 710 } 711 712 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 713 const struct ipv6hdr *ipv6h, 714 struct sk_buff *skb) 715 { 716 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 717 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 718 719 return IP6_ECN_decapsulate(ipv6h, skb); 720 } 721 722 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 723 const struct in6_addr *laddr, 724 const struct in6_addr *raddr) 725 { 726 struct __ip6_tnl_parm *p = &t->parms; 727 int ltype = ipv6_addr_type(laddr); 728 int rtype = ipv6_addr_type(raddr); 729 __u32 flags = 0; 730 731 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 732 flags = IP6_TNL_F_CAP_PER_PACKET; 733 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 734 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 735 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 736 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 737 if (ltype&IPV6_ADDR_UNICAST) 738 flags |= IP6_TNL_F_CAP_XMIT; 739 if (rtype&IPV6_ADDR_UNICAST) 740 flags |= IP6_TNL_F_CAP_RCV; 741 } 742 return flags; 743 } 744 EXPORT_SYMBOL(ip6_tnl_get_cap); 745 746 /* called with rcu_read_lock() */ 747 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 748 const struct in6_addr *laddr, 749 const struct in6_addr *raddr) 750 { 751 struct __ip6_tnl_parm *p = &t->parms; 752 int ret = 0; 753 struct net *net = t->net; 754 755 if ((p->flags & IP6_TNL_F_CAP_RCV) || 756 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 757 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 758 struct net_device *ldev = NULL; 759 760 if (p->link) 761 ldev = dev_get_by_index_rcu(net, p->link); 762 763 if ((ipv6_addr_is_multicast(laddr) || 764 likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false, 765 0, IFA_F_TENTATIVE))) && 766 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) || 767 likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true, 768 0, IFA_F_TENTATIVE)))) 769 ret = 1; 770 } 771 return ret; 772 } 773 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); 774 775 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, 776 const struct tnl_ptk_info *tpi, 777 struct metadata_dst *tun_dst, 778 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 779 const struct ipv6hdr *ipv6h, 780 struct sk_buff *skb), 781 bool log_ecn_err) 782 { 783 struct pcpu_sw_netstats *tstats; 784 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 785 int err; 786 787 if ((!(tpi->flags & TUNNEL_CSUM) && 788 (tunnel->parms.i_flags & TUNNEL_CSUM)) || 789 ((tpi->flags & TUNNEL_CSUM) && 790 !(tunnel->parms.i_flags & TUNNEL_CSUM))) { 791 tunnel->dev->stats.rx_crc_errors++; 792 tunnel->dev->stats.rx_errors++; 793 goto drop; 794 } 795 796 if (tunnel->parms.i_flags & TUNNEL_SEQ) { 797 if (!(tpi->flags & TUNNEL_SEQ) || 798 (tunnel->i_seqno && 799 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 800 tunnel->dev->stats.rx_fifo_errors++; 801 tunnel->dev->stats.rx_errors++; 802 goto drop; 803 } 804 tunnel->i_seqno = ntohl(tpi->seq) + 1; 805 } 806 807 skb->protocol = tpi->proto; 808 809 /* Warning: All skb pointers will be invalidated! */ 810 if (tunnel->dev->type == ARPHRD_ETHER) { 811 if (!pskb_may_pull(skb, ETH_HLEN)) { 812 tunnel->dev->stats.rx_length_errors++; 813 tunnel->dev->stats.rx_errors++; 814 goto drop; 815 } 816 817 ipv6h = ipv6_hdr(skb); 818 skb->protocol = eth_type_trans(skb, tunnel->dev); 819 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 820 } else { 821 skb->dev = tunnel->dev; 822 } 823 824 skb_reset_network_header(skb); 825 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 826 827 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 828 829 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); 830 if (unlikely(err)) { 831 if (log_ecn_err) 832 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", 833 &ipv6h->saddr, 834 ipv6_get_dsfield(ipv6h)); 835 if (err > 1) { 836 ++tunnel->dev->stats.rx_frame_errors; 837 ++tunnel->dev->stats.rx_errors; 838 goto drop; 839 } 840 } 841 842 tstats = this_cpu_ptr(tunnel->dev->tstats); 843 u64_stats_update_begin(&tstats->syncp); 844 tstats->rx_packets++; 845 tstats->rx_bytes += skb->len; 846 u64_stats_update_end(&tstats->syncp); 847 848 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); 849 850 if (tun_dst) 851 skb_dst_set(skb, (struct dst_entry *)tun_dst); 852 853 gro_cells_receive(&tunnel->gro_cells, skb); 854 return 0; 855 856 drop: 857 if (tun_dst) 858 dst_release((struct dst_entry *)tun_dst); 859 kfree_skb(skb); 860 return 0; 861 } 862 863 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, 864 const struct tnl_ptk_info *tpi, 865 struct metadata_dst *tun_dst, 866 bool log_ecn_err) 867 { 868 return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate, 869 log_ecn_err); 870 } 871 EXPORT_SYMBOL(ip6_tnl_rcv); 872 873 static const struct tnl_ptk_info tpi_v6 = { 874 /* no tunnel info required for ipxip6. */ 875 .proto = htons(ETH_P_IPV6), 876 }; 877 878 static const struct tnl_ptk_info tpi_v4 = { 879 /* no tunnel info required for ipxip6. */ 880 .proto = htons(ETH_P_IP), 881 }; 882 883 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, 884 const struct tnl_ptk_info *tpi, 885 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 886 const struct ipv6hdr *ipv6h, 887 struct sk_buff *skb)) 888 { 889 struct ip6_tnl *t; 890 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 891 struct metadata_dst *tun_dst = NULL; 892 int ret = -1; 893 894 rcu_read_lock(); 895 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); 896 897 if (t) { 898 u8 tproto = READ_ONCE(t->parms.proto); 899 900 if (tproto != ipproto && tproto != 0) 901 goto drop; 902 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 903 goto drop; 904 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) 905 goto drop; 906 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 907 goto drop; 908 if (t->parms.collect_md) { 909 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0); 910 if (!tun_dst) 911 goto drop; 912 } 913 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 914 log_ecn_error); 915 } 916 917 rcu_read_unlock(); 918 919 return ret; 920 921 drop: 922 rcu_read_unlock(); 923 kfree_skb(skb); 924 return 0; 925 } 926 927 static int ip4ip6_rcv(struct sk_buff *skb) 928 { 929 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, 930 ip4ip6_dscp_ecn_decapsulate); 931 } 932 933 static int ip6ip6_rcv(struct sk_buff *skb) 934 { 935 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, 936 ip6ip6_dscp_ecn_decapsulate); 937 } 938 939 struct ipv6_tel_txoption { 940 struct ipv6_txoptions ops; 941 __u8 dst_opt[8]; 942 }; 943 944 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 945 { 946 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 947 948 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 949 opt->dst_opt[3] = 1; 950 opt->dst_opt[4] = encap_limit; 951 opt->dst_opt[5] = IPV6_TLV_PADN; 952 opt->dst_opt[6] = 1; 953 954 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt; 955 opt->ops.opt_nflen = 8; 956 } 957 958 /** 959 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 960 * @t: the outgoing tunnel device 961 * @hdr: IPv6 header from the incoming packet 962 * 963 * Description: 964 * Avoid trivial tunneling loop by checking that tunnel exit-point 965 * doesn't match source of incoming packet. 966 * 967 * Return: 968 * 1 if conflict, 969 * 0 else 970 **/ 971 972 static inline bool 973 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 974 { 975 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 976 } 977 978 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, 979 const struct in6_addr *laddr, 980 const struct in6_addr *raddr) 981 { 982 struct __ip6_tnl_parm *p = &t->parms; 983 int ret = 0; 984 struct net *net = t->net; 985 986 if (t->parms.collect_md) 987 return 1; 988 989 if ((p->flags & IP6_TNL_F_CAP_XMIT) || 990 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 991 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { 992 struct net_device *ldev = NULL; 993 994 rcu_read_lock(); 995 if (p->link) 996 ldev = dev_get_by_index_rcu(net, p->link); 997 998 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, 999 0, IFA_F_TENTATIVE))) 1000 pr_warn("%s xmit: Local address not yet configured!\n", 1001 p->name); 1002 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && 1003 !ipv6_addr_is_multicast(raddr) && 1004 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, 1005 true, 0, IFA_F_TENTATIVE))) 1006 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", 1007 p->name); 1008 else 1009 ret = 1; 1010 rcu_read_unlock(); 1011 } 1012 return ret; 1013 } 1014 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); 1015 1016 /** 1017 * ip6_tnl_xmit - encapsulate packet and send 1018 * @skb: the outgoing socket buffer 1019 * @dev: the outgoing tunnel device 1020 * @dsfield: dscp code for outer header 1021 * @fl6: flow of tunneled packet 1022 * @encap_limit: encapsulation limit 1023 * @pmtu: Path MTU is stored if packet is too big 1024 * @proto: next header value 1025 * 1026 * Description: 1027 * Build new header and do some sanity checks on the packet before sending 1028 * it. 1029 * 1030 * Return: 1031 * 0 on success 1032 * -1 fail 1033 * %-EMSGSIZE message too big. return mtu in this case. 1034 **/ 1035 1036 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, 1037 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, 1038 __u8 proto) 1039 { 1040 struct ip6_tnl *t = netdev_priv(dev); 1041 struct net *net = t->net; 1042 struct net_device_stats *stats = &t->dev->stats; 1043 struct ipv6hdr *ipv6h; 1044 struct ipv6_tel_txoption opt; 1045 struct dst_entry *dst = NULL, *ndst = NULL; 1046 struct net_device *tdev; 1047 int mtu; 1048 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0; 1049 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1050 unsigned int max_headroom = psh_hlen; 1051 bool use_cache = false; 1052 u8 hop_limit; 1053 int err = -1; 1054 1055 if (t->parms.collect_md) { 1056 hop_limit = skb_tunnel_info(skb)->key.ttl; 1057 goto route_lookup; 1058 } else { 1059 hop_limit = t->parms.hop_limit; 1060 } 1061 1062 /* NBMA tunnel */ 1063 if (ipv6_addr_any(&t->parms.raddr)) { 1064 if (skb->protocol == htons(ETH_P_IPV6)) { 1065 struct in6_addr *addr6; 1066 struct neighbour *neigh; 1067 int addr_type; 1068 1069 if (!skb_dst(skb)) 1070 goto tx_err_link_failure; 1071 1072 neigh = dst_neigh_lookup(skb_dst(skb), 1073 &ipv6_hdr(skb)->daddr); 1074 if (!neigh) 1075 goto tx_err_link_failure; 1076 1077 addr6 = (struct in6_addr *)&neigh->primary_key; 1078 addr_type = ipv6_addr_type(addr6); 1079 1080 if (addr_type == IPV6_ADDR_ANY) 1081 addr6 = &ipv6_hdr(skb)->daddr; 1082 1083 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1084 neigh_release(neigh); 1085 } 1086 } else if (t->parms.proto != 0 && !(t->parms.flags & 1087 (IP6_TNL_F_USE_ORIG_TCLASS | 1088 IP6_TNL_F_USE_ORIG_FWMARK))) { 1089 /* enable the cache only if neither the outer protocol nor the 1090 * routing decision depends on the current inner header value 1091 */ 1092 use_cache = true; 1093 } 1094 1095 if (use_cache) 1096 dst = dst_cache_get(&t->dst_cache); 1097 1098 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1099 goto tx_err_link_failure; 1100 1101 if (!dst) { 1102 route_lookup: 1103 /* add dsfield to flowlabel for route lookup */ 1104 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); 1105 1106 dst = ip6_route_output(net, NULL, fl6); 1107 1108 if (dst->error) 1109 goto tx_err_link_failure; 1110 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 1111 if (IS_ERR(dst)) { 1112 err = PTR_ERR(dst); 1113 dst = NULL; 1114 goto tx_err_link_failure; 1115 } 1116 if (t->parms.collect_md && 1117 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, 1118 &fl6->daddr, 0, &fl6->saddr)) 1119 goto tx_err_link_failure; 1120 ndst = dst; 1121 } 1122 1123 tdev = dst->dev; 1124 1125 if (tdev == dev) { 1126 stats->collisions++; 1127 net_warn_ratelimited("%s: Local routing loop detected!\n", 1128 t->parms.name); 1129 goto tx_err_dst_release; 1130 } 1131 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen; 1132 if (encap_limit >= 0) { 1133 max_headroom += 8; 1134 mtu -= 8; 1135 } 1136 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ? 1137 IPV6_MIN_MTU : IPV4_MIN_MTU); 1138 1139 skb_dst_update_pmtu(skb, mtu); 1140 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1141 *pmtu = mtu; 1142 err = -EMSGSIZE; 1143 goto tx_err_dst_release; 1144 } 1145 1146 if (t->err_count > 0) { 1147 if (time_before(jiffies, 1148 t->err_time + IP6TUNNEL_ERR_TIMEO)) { 1149 t->err_count--; 1150 1151 dst_link_failure(skb); 1152 } else { 1153 t->err_count = 0; 1154 } 1155 } 1156 1157 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 1158 1159 /* 1160 * Okay, now see if we can stuff it in the buffer as-is. 1161 */ 1162 max_headroom += LL_RESERVED_SPACE(tdev); 1163 1164 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 1165 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 1166 struct sk_buff *new_skb; 1167 1168 new_skb = skb_realloc_headroom(skb, max_headroom); 1169 if (!new_skb) 1170 goto tx_err_dst_release; 1171 1172 if (skb->sk) 1173 skb_set_owner_w(new_skb, skb->sk); 1174 consume_skb(skb); 1175 skb = new_skb; 1176 } 1177 1178 if (t->parms.collect_md) { 1179 if (t->encap.type != TUNNEL_ENCAP_NONE) 1180 goto tx_err_dst_release; 1181 } else { 1182 if (use_cache && ndst) 1183 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1184 } 1185 skb_dst_set(skb, dst); 1186 1187 if (encap_limit >= 0) { 1188 init_tel_txopt(&opt, encap_limit); 1189 ipv6_push_frag_opts(skb, &opt.ops, &proto); 1190 } 1191 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); 1192 1193 /* Calculate max headroom for all the headers and adjust 1194 * needed_headroom if necessary. 1195 */ 1196 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) 1197 + dst->header_len + t->hlen; 1198 if (max_headroom > dev->needed_headroom) 1199 dev->needed_headroom = max_headroom; 1200 1201 err = ip6_tnl_encap(skb, t, &proto, fl6); 1202 if (err) 1203 return err; 1204 1205 skb_push(skb, sizeof(struct ipv6hdr)); 1206 skb_reset_network_header(skb); 1207 ipv6h = ipv6_hdr(skb); 1208 ip6_flow_hdr(ipv6h, dsfield, 1209 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1210 ipv6h->hop_limit = hop_limit; 1211 ipv6h->nexthdr = proto; 1212 ipv6h->saddr = fl6->saddr; 1213 ipv6h->daddr = fl6->daddr; 1214 ip6tunnel_xmit(NULL, skb, dev); 1215 return 0; 1216 tx_err_link_failure: 1217 stats->tx_carrier_errors++; 1218 dst_link_failure(skb); 1219 tx_err_dst_release: 1220 dst_release(dst); 1221 return err; 1222 } 1223 EXPORT_SYMBOL(ip6_tnl_xmit); 1224 1225 static inline int 1226 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1227 { 1228 struct ip6_tnl *t = netdev_priv(dev); 1229 const struct iphdr *iph = ip_hdr(skb); 1230 int encap_limit = -1; 1231 struct flowi6 fl6; 1232 __u8 dsfield; 1233 __u32 mtu; 1234 u8 tproto; 1235 int err; 1236 1237 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1238 1239 tproto = READ_ONCE(t->parms.proto); 1240 if (tproto != IPPROTO_IPIP && tproto != 0) 1241 return -1; 1242 1243 if (t->parms.collect_md) { 1244 struct ip_tunnel_info *tun_info; 1245 const struct ip_tunnel_key *key; 1246 1247 tun_info = skb_tunnel_info(skb); 1248 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1249 ip_tunnel_info_af(tun_info) != AF_INET6)) 1250 return -1; 1251 key = &tun_info->key; 1252 memset(&fl6, 0, sizeof(fl6)); 1253 fl6.flowi6_proto = IPPROTO_IPIP; 1254 fl6.daddr = key->u.ipv6.dst; 1255 fl6.flowlabel = key->label; 1256 dsfield = key->tos; 1257 } else { 1258 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1259 encap_limit = t->parms.encap_limit; 1260 1261 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1262 fl6.flowi6_proto = IPPROTO_IPIP; 1263 1264 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1265 dsfield = ipv4_get_dsfield(iph); 1266 else 1267 dsfield = ip6_tclass(t->parms.flowinfo); 1268 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1269 fl6.flowi6_mark = skb->mark; 1270 else 1271 fl6.flowi6_mark = t->parms.fwmark; 1272 } 1273 1274 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1275 1276 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1277 return -1; 1278 1279 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph)); 1280 1281 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1282 1283 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1284 IPPROTO_IPIP); 1285 if (err != 0) { 1286 /* XXX: send ICMP error even if DF is not set. */ 1287 if (err == -EMSGSIZE) 1288 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 1289 htonl(mtu)); 1290 return -1; 1291 } 1292 1293 return 0; 1294 } 1295 1296 static inline int 1297 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1298 { 1299 struct ip6_tnl *t = netdev_priv(dev); 1300 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1301 int encap_limit = -1; 1302 __u16 offset; 1303 struct flowi6 fl6; 1304 __u8 dsfield; 1305 __u32 mtu; 1306 u8 tproto; 1307 int err; 1308 1309 tproto = READ_ONCE(t->parms.proto); 1310 if ((tproto != IPPROTO_IPV6 && tproto != 0) || 1311 ip6_tnl_addr_conflict(t, ipv6h)) 1312 return -1; 1313 1314 if (t->parms.collect_md) { 1315 struct ip_tunnel_info *tun_info; 1316 const struct ip_tunnel_key *key; 1317 1318 tun_info = skb_tunnel_info(skb); 1319 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1320 ip_tunnel_info_af(tun_info) != AF_INET6)) 1321 return -1; 1322 key = &tun_info->key; 1323 memset(&fl6, 0, sizeof(fl6)); 1324 fl6.flowi6_proto = IPPROTO_IPV6; 1325 fl6.daddr = key->u.ipv6.dst; 1326 fl6.flowlabel = key->label; 1327 dsfield = key->tos; 1328 } else { 1329 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1330 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1331 ipv6h = ipv6_hdr(skb); 1332 if (offset > 0) { 1333 struct ipv6_tlv_tnl_enc_lim *tel; 1334 1335 tel = (void *)&skb_network_header(skb)[offset]; 1336 if (tel->encap_limit == 0) { 1337 icmpv6_send(skb, ICMPV6_PARAMPROB, 1338 ICMPV6_HDR_FIELD, offset + 2); 1339 return -1; 1340 } 1341 encap_limit = tel->encap_limit - 1; 1342 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 1343 encap_limit = t->parms.encap_limit; 1344 } 1345 1346 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1347 fl6.flowi6_proto = IPPROTO_IPV6; 1348 1349 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1350 dsfield = ipv6_get_dsfield(ipv6h); 1351 else 1352 dsfield = ip6_tclass(t->parms.flowinfo); 1353 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1354 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1355 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1356 fl6.flowi6_mark = skb->mark; 1357 else 1358 fl6.flowi6_mark = t->parms.fwmark; 1359 } 1360 1361 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1362 1363 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1364 return -1; 1365 1366 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h)); 1367 1368 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1369 1370 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1371 IPPROTO_IPV6); 1372 if (err != 0) { 1373 if (err == -EMSGSIZE) 1374 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1375 return -1; 1376 } 1377 1378 return 0; 1379 } 1380 1381 static netdev_tx_t 1382 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) 1383 { 1384 struct ip6_tnl *t = netdev_priv(dev); 1385 struct net_device_stats *stats = &t->dev->stats; 1386 int ret; 1387 1388 switch (skb->protocol) { 1389 case htons(ETH_P_IP): 1390 ret = ip4ip6_tnl_xmit(skb, dev); 1391 break; 1392 case htons(ETH_P_IPV6): 1393 ret = ip6ip6_tnl_xmit(skb, dev); 1394 break; 1395 default: 1396 goto tx_err; 1397 } 1398 1399 if (ret < 0) 1400 goto tx_err; 1401 1402 return NETDEV_TX_OK; 1403 1404 tx_err: 1405 stats->tx_errors++; 1406 stats->tx_dropped++; 1407 kfree_skb(skb); 1408 return NETDEV_TX_OK; 1409 } 1410 1411 static void ip6_tnl_link_config(struct ip6_tnl *t) 1412 { 1413 struct net_device *dev = t->dev; 1414 struct __ip6_tnl_parm *p = &t->parms; 1415 struct flowi6 *fl6 = &t->fl.u.ip6; 1416 int t_hlen; 1417 1418 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1419 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1420 1421 /* Set up flowi template */ 1422 fl6->saddr = p->laddr; 1423 fl6->daddr = p->raddr; 1424 fl6->flowi6_oif = p->link; 1425 fl6->flowlabel = 0; 1426 1427 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1428 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1429 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1430 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1431 1432 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1433 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1434 1435 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1436 dev->flags |= IFF_POINTOPOINT; 1437 else 1438 dev->flags &= ~IFF_POINTOPOINT; 1439 1440 t->tun_hlen = 0; 1441 t->hlen = t->encap_hlen + t->tun_hlen; 1442 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1443 1444 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1445 int strict = (ipv6_addr_type(&p->raddr) & 1446 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1447 1448 struct rt6_info *rt = rt6_lookup(t->net, 1449 &p->raddr, &p->laddr, 1450 p->link, NULL, strict); 1451 1452 if (!rt) 1453 return; 1454 1455 if (rt->dst.dev) { 1456 dev->hard_header_len = rt->dst.dev->hard_header_len + 1457 t_hlen; 1458 1459 dev->mtu = rt->dst.dev->mtu - t_hlen; 1460 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1461 dev->mtu -= 8; 1462 1463 if (dev->mtu < IPV6_MIN_MTU) 1464 dev->mtu = IPV6_MIN_MTU; 1465 } 1466 ip6_rt_put(rt); 1467 } 1468 } 1469 1470 /** 1471 * ip6_tnl_change - update the tunnel parameters 1472 * @t: tunnel to be changed 1473 * @p: tunnel configuration parameters 1474 * 1475 * Description: 1476 * ip6_tnl_change() updates the tunnel parameters 1477 **/ 1478 1479 static int 1480 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 1481 { 1482 t->parms.laddr = p->laddr; 1483 t->parms.raddr = p->raddr; 1484 t->parms.flags = p->flags; 1485 t->parms.hop_limit = p->hop_limit; 1486 t->parms.encap_limit = p->encap_limit; 1487 t->parms.flowinfo = p->flowinfo; 1488 t->parms.link = p->link; 1489 t->parms.proto = p->proto; 1490 t->parms.fwmark = p->fwmark; 1491 dst_cache_reset(&t->dst_cache); 1492 ip6_tnl_link_config(t); 1493 return 0; 1494 } 1495 1496 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1497 { 1498 struct net *net = t->net; 1499 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1500 int err; 1501 1502 ip6_tnl_unlink(ip6n, t); 1503 synchronize_net(); 1504 err = ip6_tnl_change(t, p); 1505 ip6_tnl_link(ip6n, t); 1506 netdev_state_change(t->dev); 1507 return err; 1508 } 1509 1510 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1511 { 1512 /* for default tnl0 device allow to change only the proto */ 1513 t->parms.proto = p->proto; 1514 netdev_state_change(t->dev); 1515 return 0; 1516 } 1517 1518 static void 1519 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1520 { 1521 p->laddr = u->laddr; 1522 p->raddr = u->raddr; 1523 p->flags = u->flags; 1524 p->hop_limit = u->hop_limit; 1525 p->encap_limit = u->encap_limit; 1526 p->flowinfo = u->flowinfo; 1527 p->link = u->link; 1528 p->proto = u->proto; 1529 memcpy(p->name, u->name, sizeof(u->name)); 1530 } 1531 1532 static void 1533 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) 1534 { 1535 u->laddr = p->laddr; 1536 u->raddr = p->raddr; 1537 u->flags = p->flags; 1538 u->hop_limit = p->hop_limit; 1539 u->encap_limit = p->encap_limit; 1540 u->flowinfo = p->flowinfo; 1541 u->link = p->link; 1542 u->proto = p->proto; 1543 memcpy(u->name, p->name, sizeof(u->name)); 1544 } 1545 1546 /** 1547 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1548 * @dev: virtual device associated with tunnel 1549 * @ifr: parameters passed from userspace 1550 * @cmd: command to be performed 1551 * 1552 * Description: 1553 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1554 * from userspace. 1555 * 1556 * The possible commands are the following: 1557 * %SIOCGETTUNNEL: get tunnel parameters for device 1558 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1559 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1560 * %SIOCDELTUNNEL: delete tunnel 1561 * 1562 * The fallback device "ip6tnl0", created during module 1563 * initialization, can be used for creating other tunnel devices. 1564 * 1565 * Return: 1566 * 0 on success, 1567 * %-EFAULT if unable to copy data to or from userspace, 1568 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1569 * %-EINVAL if passed tunnel parameters are invalid, 1570 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1571 * %-ENODEV if attempting to change or delete a nonexisting device 1572 **/ 1573 1574 static int 1575 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1576 { 1577 int err = 0; 1578 struct ip6_tnl_parm p; 1579 struct __ip6_tnl_parm p1; 1580 struct ip6_tnl *t = netdev_priv(dev); 1581 struct net *net = t->net; 1582 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1583 1584 memset(&p1, 0, sizeof(p1)); 1585 1586 switch (cmd) { 1587 case SIOCGETTUNNEL: 1588 if (dev == ip6n->fb_tnl_dev) { 1589 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1590 err = -EFAULT; 1591 break; 1592 } 1593 ip6_tnl_parm_from_user(&p1, &p); 1594 t = ip6_tnl_locate(net, &p1, 0); 1595 if (IS_ERR(t)) 1596 t = netdev_priv(dev); 1597 } else { 1598 memset(&p, 0, sizeof(p)); 1599 } 1600 ip6_tnl_parm_to_user(&p, &t->parms); 1601 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) { 1602 err = -EFAULT; 1603 } 1604 break; 1605 case SIOCADDTUNNEL: 1606 case SIOCCHGTUNNEL: 1607 err = -EPERM; 1608 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1609 break; 1610 err = -EFAULT; 1611 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1612 break; 1613 err = -EINVAL; 1614 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1615 p.proto != 0) 1616 break; 1617 ip6_tnl_parm_from_user(&p1, &p); 1618 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1619 if (cmd == SIOCCHGTUNNEL) { 1620 if (!IS_ERR(t)) { 1621 if (t->dev != dev) { 1622 err = -EEXIST; 1623 break; 1624 } 1625 } else 1626 t = netdev_priv(dev); 1627 if (dev == ip6n->fb_tnl_dev) 1628 err = ip6_tnl0_update(t, &p1); 1629 else 1630 err = ip6_tnl_update(t, &p1); 1631 } 1632 if (!IS_ERR(t)) { 1633 err = 0; 1634 ip6_tnl_parm_to_user(&p, &t->parms); 1635 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1636 err = -EFAULT; 1637 1638 } else { 1639 err = PTR_ERR(t); 1640 } 1641 break; 1642 case SIOCDELTUNNEL: 1643 err = -EPERM; 1644 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1645 break; 1646 1647 if (dev == ip6n->fb_tnl_dev) { 1648 err = -EFAULT; 1649 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1650 break; 1651 err = -ENOENT; 1652 ip6_tnl_parm_from_user(&p1, &p); 1653 t = ip6_tnl_locate(net, &p1, 0); 1654 if (IS_ERR(t)) 1655 break; 1656 err = -EPERM; 1657 if (t->dev == ip6n->fb_tnl_dev) 1658 break; 1659 dev = t->dev; 1660 } 1661 err = 0; 1662 unregister_netdevice(dev); 1663 break; 1664 default: 1665 err = -EINVAL; 1666 } 1667 return err; 1668 } 1669 1670 /** 1671 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1672 * @dev: virtual device associated with tunnel 1673 * @new_mtu: the new mtu 1674 * 1675 * Return: 1676 * 0 on success, 1677 * %-EINVAL if mtu too small 1678 **/ 1679 1680 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1681 { 1682 struct ip6_tnl *tnl = netdev_priv(dev); 1683 1684 if (tnl->parms.proto == IPPROTO_IPV6) { 1685 if (new_mtu < IPV6_MIN_MTU) 1686 return -EINVAL; 1687 } else { 1688 if (new_mtu < ETH_MIN_MTU) 1689 return -EINVAL; 1690 } 1691 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { 1692 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len) 1693 return -EINVAL; 1694 } else { 1695 if (new_mtu > IP_MAX_MTU - dev->hard_header_len) 1696 return -EINVAL; 1697 } 1698 dev->mtu = new_mtu; 1699 return 0; 1700 } 1701 EXPORT_SYMBOL(ip6_tnl_change_mtu); 1702 1703 int ip6_tnl_get_iflink(const struct net_device *dev) 1704 { 1705 struct ip6_tnl *t = netdev_priv(dev); 1706 1707 return t->parms.link; 1708 } 1709 EXPORT_SYMBOL(ip6_tnl_get_iflink); 1710 1711 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, 1712 unsigned int num) 1713 { 1714 if (num >= MAX_IPTUN_ENCAP_OPS) 1715 return -ERANGE; 1716 1717 return !cmpxchg((const struct ip6_tnl_encap_ops **) 1718 &ip6tun_encaps[num], 1719 NULL, ops) ? 0 : -1; 1720 } 1721 EXPORT_SYMBOL(ip6_tnl_encap_add_ops); 1722 1723 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, 1724 unsigned int num) 1725 { 1726 int ret; 1727 1728 if (num >= MAX_IPTUN_ENCAP_OPS) 1729 return -ERANGE; 1730 1731 ret = (cmpxchg((const struct ip6_tnl_encap_ops **) 1732 &ip6tun_encaps[num], 1733 ops, NULL) == ops) ? 0 : -1; 1734 1735 synchronize_net(); 1736 1737 return ret; 1738 } 1739 EXPORT_SYMBOL(ip6_tnl_encap_del_ops); 1740 1741 int ip6_tnl_encap_setup(struct ip6_tnl *t, 1742 struct ip_tunnel_encap *ipencap) 1743 { 1744 int hlen; 1745 1746 memset(&t->encap, 0, sizeof(t->encap)); 1747 1748 hlen = ip6_encap_hlen(ipencap); 1749 if (hlen < 0) 1750 return hlen; 1751 1752 t->encap.type = ipencap->type; 1753 t->encap.sport = ipencap->sport; 1754 t->encap.dport = ipencap->dport; 1755 t->encap.flags = ipencap->flags; 1756 1757 t->encap_hlen = hlen; 1758 t->hlen = t->encap_hlen + t->tun_hlen; 1759 1760 return 0; 1761 } 1762 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1763 1764 static const struct net_device_ops ip6_tnl_netdev_ops = { 1765 .ndo_init = ip6_tnl_dev_init, 1766 .ndo_uninit = ip6_tnl_dev_uninit, 1767 .ndo_start_xmit = ip6_tnl_start_xmit, 1768 .ndo_do_ioctl = ip6_tnl_ioctl, 1769 .ndo_change_mtu = ip6_tnl_change_mtu, 1770 .ndo_get_stats = ip6_get_stats, 1771 .ndo_get_iflink = ip6_tnl_get_iflink, 1772 }; 1773 1774 #define IPXIPX_FEATURES (NETIF_F_SG | \ 1775 NETIF_F_FRAGLIST | \ 1776 NETIF_F_HIGHDMA | \ 1777 NETIF_F_GSO_SOFTWARE | \ 1778 NETIF_F_HW_CSUM) 1779 1780 /** 1781 * ip6_tnl_dev_setup - setup virtual tunnel device 1782 * @dev: virtual device associated with tunnel 1783 * 1784 * Description: 1785 * Initialize function pointers and device parameters 1786 **/ 1787 1788 static void ip6_tnl_dev_setup(struct net_device *dev) 1789 { 1790 dev->netdev_ops = &ip6_tnl_netdev_ops; 1791 dev->needs_free_netdev = true; 1792 dev->priv_destructor = ip6_dev_free; 1793 1794 dev->type = ARPHRD_TUNNEL6; 1795 dev->flags |= IFF_NOARP; 1796 dev->addr_len = sizeof(struct in6_addr); 1797 dev->features |= NETIF_F_LLTX; 1798 netif_keep_dst(dev); 1799 1800 dev->features |= IPXIPX_FEATURES; 1801 dev->hw_features |= IPXIPX_FEATURES; 1802 1803 /* This perm addr will be used as interface identifier by IPv6 */ 1804 dev->addr_assign_type = NET_ADDR_RANDOM; 1805 eth_random_addr(dev->perm_addr); 1806 } 1807 1808 1809 /** 1810 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1811 * @dev: virtual device associated with tunnel 1812 **/ 1813 1814 static inline int 1815 ip6_tnl_dev_init_gen(struct net_device *dev) 1816 { 1817 struct ip6_tnl *t = netdev_priv(dev); 1818 int ret; 1819 int t_hlen; 1820 1821 t->dev = dev; 1822 t->net = dev_net(dev); 1823 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1824 if (!dev->tstats) 1825 return -ENOMEM; 1826 1827 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); 1828 if (ret) 1829 goto free_stats; 1830 1831 ret = gro_cells_init(&t->gro_cells, dev); 1832 if (ret) 1833 goto destroy_dst; 1834 1835 t->tun_hlen = 0; 1836 t->hlen = t->encap_hlen + t->tun_hlen; 1837 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1838 1839 dev->type = ARPHRD_TUNNEL6; 1840 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1841 dev->mtu = ETH_DATA_LEN - t_hlen; 1842 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1843 dev->mtu -= 8; 1844 dev->min_mtu = ETH_MIN_MTU; 1845 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; 1846 1847 return 0; 1848 1849 destroy_dst: 1850 dst_cache_destroy(&t->dst_cache); 1851 free_stats: 1852 free_percpu(dev->tstats); 1853 dev->tstats = NULL; 1854 1855 return ret; 1856 } 1857 1858 /** 1859 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1860 * @dev: virtual device associated with tunnel 1861 **/ 1862 1863 static int ip6_tnl_dev_init(struct net_device *dev) 1864 { 1865 struct ip6_tnl *t = netdev_priv(dev); 1866 int err = ip6_tnl_dev_init_gen(dev); 1867 1868 if (err) 1869 return err; 1870 ip6_tnl_link_config(t); 1871 if (t->parms.collect_md) { 1872 dev->features |= NETIF_F_NETNS_LOCAL; 1873 netif_keep_dst(dev); 1874 } 1875 return 0; 1876 } 1877 1878 /** 1879 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1880 * @dev: fallback device 1881 * 1882 * Return: 0 1883 **/ 1884 1885 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1886 { 1887 struct ip6_tnl *t = netdev_priv(dev); 1888 struct net *net = dev_net(dev); 1889 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1890 1891 t->parms.proto = IPPROTO_IPV6; 1892 dev_hold(dev); 1893 1894 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1895 return 0; 1896 } 1897 1898 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[], 1899 struct netlink_ext_ack *extack) 1900 { 1901 u8 proto; 1902 1903 if (!data || !data[IFLA_IPTUN_PROTO]) 1904 return 0; 1905 1906 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1907 if (proto != IPPROTO_IPV6 && 1908 proto != IPPROTO_IPIP && 1909 proto != 0) 1910 return -EINVAL; 1911 1912 return 0; 1913 } 1914 1915 static void ip6_tnl_netlink_parms(struct nlattr *data[], 1916 struct __ip6_tnl_parm *parms) 1917 { 1918 memset(parms, 0, sizeof(*parms)); 1919 1920 if (!data) 1921 return; 1922 1923 if (data[IFLA_IPTUN_LINK]) 1924 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); 1925 1926 if (data[IFLA_IPTUN_LOCAL]) 1927 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]); 1928 1929 if (data[IFLA_IPTUN_REMOTE]) 1930 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]); 1931 1932 if (data[IFLA_IPTUN_TTL]) 1933 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); 1934 1935 if (data[IFLA_IPTUN_ENCAP_LIMIT]) 1936 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); 1937 1938 if (data[IFLA_IPTUN_FLOWINFO]) 1939 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); 1940 1941 if (data[IFLA_IPTUN_FLAGS]) 1942 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); 1943 1944 if (data[IFLA_IPTUN_PROTO]) 1945 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1946 1947 if (data[IFLA_IPTUN_COLLECT_METADATA]) 1948 parms->collect_md = true; 1949 1950 if (data[IFLA_IPTUN_FWMARK]) 1951 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); 1952 } 1953 1954 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], 1955 struct ip_tunnel_encap *ipencap) 1956 { 1957 bool ret = false; 1958 1959 memset(ipencap, 0, sizeof(*ipencap)); 1960 1961 if (!data) 1962 return ret; 1963 1964 if (data[IFLA_IPTUN_ENCAP_TYPE]) { 1965 ret = true; 1966 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); 1967 } 1968 1969 if (data[IFLA_IPTUN_ENCAP_FLAGS]) { 1970 ret = true; 1971 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); 1972 } 1973 1974 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1975 ret = true; 1976 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); 1977 } 1978 1979 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1980 ret = true; 1981 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); 1982 } 1983 1984 return ret; 1985 } 1986 1987 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, 1988 struct nlattr *tb[], struct nlattr *data[], 1989 struct netlink_ext_ack *extack) 1990 { 1991 struct net *net = dev_net(dev); 1992 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1993 struct ip_tunnel_encap ipencap; 1994 struct ip6_tnl *nt, *t; 1995 int err; 1996 1997 nt = netdev_priv(dev); 1998 1999 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 2000 err = ip6_tnl_encap_setup(nt, &ipencap); 2001 if (err < 0) 2002 return err; 2003 } 2004 2005 ip6_tnl_netlink_parms(data, &nt->parms); 2006 2007 if (nt->parms.collect_md) { 2008 if (rtnl_dereference(ip6n->collect_md_tun)) 2009 return -EEXIST; 2010 } else { 2011 t = ip6_tnl_locate(net, &nt->parms, 0); 2012 if (!IS_ERR(t)) 2013 return -EEXIST; 2014 } 2015 2016 err = ip6_tnl_create2(dev); 2017 if (!err && tb[IFLA_MTU]) 2018 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2019 2020 return err; 2021 } 2022 2023 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 2024 struct nlattr *data[], 2025 struct netlink_ext_ack *extack) 2026 { 2027 struct ip6_tnl *t = netdev_priv(dev); 2028 struct __ip6_tnl_parm p; 2029 struct net *net = t->net; 2030 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2031 struct ip_tunnel_encap ipencap; 2032 2033 if (dev == ip6n->fb_tnl_dev) 2034 return -EINVAL; 2035 2036 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 2037 int err = ip6_tnl_encap_setup(t, &ipencap); 2038 2039 if (err < 0) 2040 return err; 2041 } 2042 ip6_tnl_netlink_parms(data, &p); 2043 if (p.collect_md) 2044 return -EINVAL; 2045 2046 t = ip6_tnl_locate(net, &p, 0); 2047 if (!IS_ERR(t)) { 2048 if (t->dev != dev) 2049 return -EEXIST; 2050 } else 2051 t = netdev_priv(dev); 2052 2053 return ip6_tnl_update(t, &p); 2054 } 2055 2056 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) 2057 { 2058 struct net *net = dev_net(dev); 2059 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2060 2061 if (dev != ip6n->fb_tnl_dev) 2062 unregister_netdevice_queue(dev, head); 2063 } 2064 2065 static size_t ip6_tnl_get_size(const struct net_device *dev) 2066 { 2067 return 2068 /* IFLA_IPTUN_LINK */ 2069 nla_total_size(4) + 2070 /* IFLA_IPTUN_LOCAL */ 2071 nla_total_size(sizeof(struct in6_addr)) + 2072 /* IFLA_IPTUN_REMOTE */ 2073 nla_total_size(sizeof(struct in6_addr)) + 2074 /* IFLA_IPTUN_TTL */ 2075 nla_total_size(1) + 2076 /* IFLA_IPTUN_ENCAP_LIMIT */ 2077 nla_total_size(1) + 2078 /* IFLA_IPTUN_FLOWINFO */ 2079 nla_total_size(4) + 2080 /* IFLA_IPTUN_FLAGS */ 2081 nla_total_size(4) + 2082 /* IFLA_IPTUN_PROTO */ 2083 nla_total_size(1) + 2084 /* IFLA_IPTUN_ENCAP_TYPE */ 2085 nla_total_size(2) + 2086 /* IFLA_IPTUN_ENCAP_FLAGS */ 2087 nla_total_size(2) + 2088 /* IFLA_IPTUN_ENCAP_SPORT */ 2089 nla_total_size(2) + 2090 /* IFLA_IPTUN_ENCAP_DPORT */ 2091 nla_total_size(2) + 2092 /* IFLA_IPTUN_COLLECT_METADATA */ 2093 nla_total_size(0) + 2094 /* IFLA_IPTUN_FWMARK */ 2095 nla_total_size(4) + 2096 0; 2097 } 2098 2099 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) 2100 { 2101 struct ip6_tnl *tunnel = netdev_priv(dev); 2102 struct __ip6_tnl_parm *parm = &tunnel->parms; 2103 2104 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 2105 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || 2106 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) || 2107 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 2108 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 2109 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 2110 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || 2111 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || 2112 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark)) 2113 goto nla_put_failure; 2114 2115 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || 2116 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || 2117 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || 2118 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) 2119 goto nla_put_failure; 2120 2121 if (parm->collect_md) 2122 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA)) 2123 goto nla_put_failure; 2124 2125 return 0; 2126 2127 nla_put_failure: 2128 return -EMSGSIZE; 2129 } 2130 2131 struct net *ip6_tnl_get_link_net(const struct net_device *dev) 2132 { 2133 struct ip6_tnl *tunnel = netdev_priv(dev); 2134 2135 return tunnel->net; 2136 } 2137 EXPORT_SYMBOL(ip6_tnl_get_link_net); 2138 2139 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { 2140 [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, 2141 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, 2142 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, 2143 [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, 2144 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, 2145 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, 2146 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, 2147 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, 2148 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, 2149 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, 2150 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, 2151 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, 2152 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, 2153 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, 2154 }; 2155 2156 static struct rtnl_link_ops ip6_link_ops __read_mostly = { 2157 .kind = "ip6tnl", 2158 .maxtype = IFLA_IPTUN_MAX, 2159 .policy = ip6_tnl_policy, 2160 .priv_size = sizeof(struct ip6_tnl), 2161 .setup = ip6_tnl_dev_setup, 2162 .validate = ip6_tnl_validate, 2163 .newlink = ip6_tnl_newlink, 2164 .changelink = ip6_tnl_changelink, 2165 .dellink = ip6_tnl_dellink, 2166 .get_size = ip6_tnl_get_size, 2167 .fill_info = ip6_tnl_fill_info, 2168 .get_link_net = ip6_tnl_get_link_net, 2169 }; 2170 2171 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 2172 .handler = ip4ip6_rcv, 2173 .err_handler = ip4ip6_err, 2174 .priority = 1, 2175 }; 2176 2177 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 2178 .handler = ip6ip6_rcv, 2179 .err_handler = ip6ip6_err, 2180 .priority = 1, 2181 }; 2182 2183 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list) 2184 { 2185 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2186 struct net_device *dev, *aux; 2187 int h; 2188 struct ip6_tnl *t; 2189 2190 for_each_netdev_safe(net, dev, aux) 2191 if (dev->rtnl_link_ops == &ip6_link_ops) 2192 unregister_netdevice_queue(dev, list); 2193 2194 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { 2195 t = rtnl_dereference(ip6n->tnls_r_l[h]); 2196 while (t) { 2197 /* If dev is in the same netns, it has already 2198 * been added to the list by the previous loop. 2199 */ 2200 if (!net_eq(dev_net(t->dev), net)) 2201 unregister_netdevice_queue(t->dev, list); 2202 t = rtnl_dereference(t->next); 2203 } 2204 } 2205 } 2206 2207 static int __net_init ip6_tnl_init_net(struct net *net) 2208 { 2209 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2210 struct ip6_tnl *t = NULL; 2211 int err; 2212 2213 ip6n->tnls[0] = ip6n->tnls_wc; 2214 ip6n->tnls[1] = ip6n->tnls_r_l; 2215 2216 if (!net_has_fallback_tunnels(net)) 2217 return 0; 2218 err = -ENOMEM; 2219 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 2220 NET_NAME_UNKNOWN, ip6_tnl_dev_setup); 2221 2222 if (!ip6n->fb_tnl_dev) 2223 goto err_alloc_dev; 2224 dev_net_set(ip6n->fb_tnl_dev, net); 2225 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; 2226 /* FB netdevice is special: we have one, and only one per netns. 2227 * Allowing to move it to another netns is clearly unsafe. 2228 */ 2229 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL; 2230 2231 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 2232 if (err < 0) 2233 goto err_register; 2234 2235 err = register_netdev(ip6n->fb_tnl_dev); 2236 if (err < 0) 2237 goto err_register; 2238 2239 t = netdev_priv(ip6n->fb_tnl_dev); 2240 2241 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 2242 return 0; 2243 2244 err_register: 2245 free_netdev(ip6n->fb_tnl_dev); 2246 err_alloc_dev: 2247 return err; 2248 } 2249 2250 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list) 2251 { 2252 struct net *net; 2253 LIST_HEAD(list); 2254 2255 rtnl_lock(); 2256 list_for_each_entry(net, net_list, exit_list) 2257 ip6_tnl_destroy_tunnels(net, &list); 2258 unregister_netdevice_many(&list); 2259 rtnl_unlock(); 2260 } 2261 2262 static struct pernet_operations ip6_tnl_net_ops = { 2263 .init = ip6_tnl_init_net, 2264 .exit_batch = ip6_tnl_exit_batch_net, 2265 .id = &ip6_tnl_net_id, 2266 .size = sizeof(struct ip6_tnl_net), 2267 }; 2268 2269 /** 2270 * ip6_tunnel_init - register protocol and reserve needed resources 2271 * 2272 * Return: 0 on success 2273 **/ 2274 2275 static int __init ip6_tunnel_init(void) 2276 { 2277 int err; 2278 2279 if (!ipv6_mod_enabled()) 2280 return -EOPNOTSUPP; 2281 2282 err = register_pernet_device(&ip6_tnl_net_ops); 2283 if (err < 0) 2284 goto out_pernet; 2285 2286 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 2287 if (err < 0) { 2288 pr_err("%s: can't register ip4ip6\n", __func__); 2289 goto out_ip4ip6; 2290 } 2291 2292 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 2293 if (err < 0) { 2294 pr_err("%s: can't register ip6ip6\n", __func__); 2295 goto out_ip6ip6; 2296 } 2297 err = rtnl_link_register(&ip6_link_ops); 2298 if (err < 0) 2299 goto rtnl_link_failed; 2300 2301 return 0; 2302 2303 rtnl_link_failed: 2304 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 2305 out_ip6ip6: 2306 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 2307 out_ip4ip6: 2308 unregister_pernet_device(&ip6_tnl_net_ops); 2309 out_pernet: 2310 return err; 2311 } 2312 2313 /** 2314 * ip6_tunnel_cleanup - free resources and unregister protocol 2315 **/ 2316 2317 static void __exit ip6_tunnel_cleanup(void) 2318 { 2319 rtnl_link_unregister(&ip6_link_ops); 2320 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 2321 pr_info("%s: can't deregister ip4ip6\n", __func__); 2322 2323 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 2324 pr_info("%s: can't deregister ip6ip6\n", __func__); 2325 2326 unregister_pernet_device(&ip6_tnl_net_ops); 2327 } 2328 2329 module_init(ip6_tunnel_init); 2330 module_exit(ip6_tunnel_cleanup); 2331