1 /* 2 * IPv6 tunneling device 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 8 * 9 * Based on: 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 11 * 12 * RFC 2473 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/errno.h> 26 #include <linux/types.h> 27 #include <linux/sockios.h> 28 #include <linux/icmp.h> 29 #include <linux/if.h> 30 #include <linux/in.h> 31 #include <linux/ip.h> 32 #include <linux/net.h> 33 #include <linux/in6.h> 34 #include <linux/netdevice.h> 35 #include <linux/if_arp.h> 36 #include <linux/icmpv6.h> 37 #include <linux/init.h> 38 #include <linux/route.h> 39 #include <linux/rtnetlink.h> 40 #include <linux/netfilter_ipv6.h> 41 #include <linux/slab.h> 42 #include <linux/hash.h> 43 #include <linux/etherdevice.h> 44 45 #include <asm/uaccess.h> 46 #include <linux/atomic.h> 47 48 #include <net/icmp.h> 49 #include <net/ip.h> 50 #include <net/ip_tunnels.h> 51 #include <net/ipv6.h> 52 #include <net/ip6_route.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_tunnel.h> 55 #include <net/xfrm.h> 56 #include <net/dsfield.h> 57 #include <net/inet_ecn.h> 58 #include <net/net_namespace.h> 59 #include <net/netns/generic.h> 60 #include <net/dst_metadata.h> 61 62 MODULE_AUTHOR("Ville Nuorvala"); 63 MODULE_DESCRIPTION("IPv6 tunneling device"); 64 MODULE_LICENSE("GPL"); 65 MODULE_ALIAS_RTNL_LINK("ip6tnl"); 66 MODULE_ALIAS_NETDEV("ip6tnl0"); 67 68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5 69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) 70 71 static bool log_ecn_error = true; 72 module_param(log_ecn_error, bool, 0644); 73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 74 75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 76 { 77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 78 79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); 80 } 81 82 static int ip6_tnl_dev_init(struct net_device *dev); 83 static void ip6_tnl_dev_setup(struct net_device *dev); 84 static struct rtnl_link_ops ip6_link_ops __read_mostly; 85 86 static int ip6_tnl_net_id __read_mostly; 87 struct ip6_tnl_net { 88 /* the IPv6 tunnel fallback device */ 89 struct net_device *fb_tnl_dev; 90 /* lists for storing tunnels in use */ 91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE]; 92 struct ip6_tnl __rcu *tnls_wc[1]; 93 struct ip6_tnl __rcu **tnls[2]; 94 struct ip6_tnl __rcu *collect_md_tun; 95 }; 96 97 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 98 { 99 struct pcpu_sw_netstats tmp, sum = { 0 }; 100 int i; 101 102 for_each_possible_cpu(i) { 103 unsigned int start; 104 const struct pcpu_sw_netstats *tstats = 105 per_cpu_ptr(dev->tstats, i); 106 107 do { 108 start = u64_stats_fetch_begin_irq(&tstats->syncp); 109 tmp.rx_packets = tstats->rx_packets; 110 tmp.rx_bytes = tstats->rx_bytes; 111 tmp.tx_packets = tstats->tx_packets; 112 tmp.tx_bytes = tstats->tx_bytes; 113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 114 115 sum.rx_packets += tmp.rx_packets; 116 sum.rx_bytes += tmp.rx_bytes; 117 sum.tx_packets += tmp.tx_packets; 118 sum.tx_bytes += tmp.tx_bytes; 119 } 120 dev->stats.rx_packets = sum.rx_packets; 121 dev->stats.rx_bytes = sum.rx_bytes; 122 dev->stats.tx_packets = sum.tx_packets; 123 dev->stats.tx_bytes = sum.tx_bytes; 124 return &dev->stats; 125 } 126 127 /** 128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 129 * @remote: the address of the tunnel exit-point 130 * @local: the address of the tunnel entry-point 131 * 132 * Return: 133 * tunnel matching given end-points if found, 134 * else fallback tunnel if its device is up, 135 * else %NULL 136 **/ 137 138 #define for_each_ip6_tunnel_rcu(start) \ 139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 140 141 static struct ip6_tnl * 142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 143 { 144 unsigned int hash = HASH(remote, local); 145 struct ip6_tnl *t; 146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 147 struct in6_addr any; 148 149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 150 if (ipv6_addr_equal(local, &t->parms.laddr) && 151 ipv6_addr_equal(remote, &t->parms.raddr) && 152 (t->dev->flags & IFF_UP)) 153 return t; 154 } 155 156 memset(&any, 0, sizeof(any)); 157 hash = HASH(&any, local); 158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 159 if (ipv6_addr_equal(local, &t->parms.laddr) && 160 ipv6_addr_any(&t->parms.raddr) && 161 (t->dev->flags & IFF_UP)) 162 return t; 163 } 164 165 hash = HASH(remote, &any); 166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 167 if (ipv6_addr_equal(remote, &t->parms.raddr) && 168 ipv6_addr_any(&t->parms.laddr) && 169 (t->dev->flags & IFF_UP)) 170 return t; 171 } 172 173 t = rcu_dereference(ip6n->collect_md_tun); 174 if (t) 175 return t; 176 177 t = rcu_dereference(ip6n->tnls_wc[0]); 178 if (t && (t->dev->flags & IFF_UP)) 179 return t; 180 181 return NULL; 182 } 183 184 /** 185 * ip6_tnl_bucket - get head of list matching given tunnel parameters 186 * @p: parameters containing tunnel end-points 187 * 188 * Description: 189 * ip6_tnl_bucket() returns the head of the list matching the 190 * &struct in6_addr entries laddr and raddr in @p. 191 * 192 * Return: head of IPv6 tunnel list 193 **/ 194 195 static struct ip6_tnl __rcu ** 196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) 197 { 198 const struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *local = &p->laddr; 200 unsigned int h = 0; 201 int prio = 0; 202 203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 204 prio = 1; 205 h = HASH(remote, local); 206 } 207 return &ip6n->tnls[prio][h]; 208 } 209 210 /** 211 * ip6_tnl_link - add tunnel to hash table 212 * @t: tunnel to be added 213 **/ 214 215 static void 216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 217 { 218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 219 220 if (t->parms.collect_md) 221 rcu_assign_pointer(ip6n->collect_md_tun, t); 222 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 223 rcu_assign_pointer(*tp, t); 224 } 225 226 /** 227 * ip6_tnl_unlink - remove tunnel from hash table 228 * @t: tunnel to be removed 229 **/ 230 231 static void 232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 233 { 234 struct ip6_tnl __rcu **tp; 235 struct ip6_tnl *iter; 236 237 if (t->parms.collect_md) 238 rcu_assign_pointer(ip6n->collect_md_tun, NULL); 239 240 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 241 (iter = rtnl_dereference(*tp)) != NULL; 242 tp = &iter->next) { 243 if (t == iter) { 244 rcu_assign_pointer(*tp, t->next); 245 break; 246 } 247 } 248 } 249 250 static void ip6_dev_free(struct net_device *dev) 251 { 252 struct ip6_tnl *t = netdev_priv(dev); 253 254 gro_cells_destroy(&t->gro_cells); 255 dst_cache_destroy(&t->dst_cache); 256 free_percpu(dev->tstats); 257 free_netdev(dev); 258 } 259 260 static int ip6_tnl_create2(struct net_device *dev) 261 { 262 struct ip6_tnl *t = netdev_priv(dev); 263 struct net *net = dev_net(dev); 264 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 265 int err; 266 267 t = netdev_priv(dev); 268 269 dev->rtnl_link_ops = &ip6_link_ops; 270 err = register_netdevice(dev); 271 if (err < 0) 272 goto out; 273 274 strcpy(t->parms.name, dev->name); 275 276 dev_hold(dev); 277 ip6_tnl_link(ip6n, t); 278 return 0; 279 280 out: 281 return err; 282 } 283 284 /** 285 * ip6_tnl_create - create a new tunnel 286 * @p: tunnel parameters 287 * @pt: pointer to new tunnel 288 * 289 * Description: 290 * Create tunnel matching given parameters. 291 * 292 * Return: 293 * created tunnel or error pointer 294 **/ 295 296 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 297 { 298 struct net_device *dev; 299 struct ip6_tnl *t; 300 char name[IFNAMSIZ]; 301 int err = -ENOMEM; 302 303 if (p->name[0]) 304 strlcpy(name, p->name, IFNAMSIZ); 305 else 306 sprintf(name, "ip6tnl%%d"); 307 308 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 309 ip6_tnl_dev_setup); 310 if (!dev) 311 goto failed; 312 313 dev_net_set(dev, net); 314 315 t = netdev_priv(dev); 316 t->parms = *p; 317 t->net = dev_net(dev); 318 err = ip6_tnl_create2(dev); 319 if (err < 0) 320 goto failed_free; 321 322 return t; 323 324 failed_free: 325 ip6_dev_free(dev); 326 failed: 327 return ERR_PTR(err); 328 } 329 330 /** 331 * ip6_tnl_locate - find or create tunnel matching given parameters 332 * @p: tunnel parameters 333 * @create: != 0 if allowed to create new tunnel if no match found 334 * 335 * Description: 336 * ip6_tnl_locate() first tries to locate an existing tunnel 337 * based on @parms. If this is unsuccessful, but @create is set a new 338 * tunnel device is created and registered for use. 339 * 340 * Return: 341 * matching tunnel or error pointer 342 **/ 343 344 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 345 struct __ip6_tnl_parm *p, int create) 346 { 347 const struct in6_addr *remote = &p->raddr; 348 const struct in6_addr *local = &p->laddr; 349 struct ip6_tnl __rcu **tp; 350 struct ip6_tnl *t; 351 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 352 353 for (tp = ip6_tnl_bucket(ip6n, p); 354 (t = rtnl_dereference(*tp)) != NULL; 355 tp = &t->next) { 356 if (ipv6_addr_equal(local, &t->parms.laddr) && 357 ipv6_addr_equal(remote, &t->parms.raddr)) { 358 if (create) 359 return ERR_PTR(-EEXIST); 360 361 return t; 362 } 363 } 364 if (!create) 365 return ERR_PTR(-ENODEV); 366 return ip6_tnl_create(net, p); 367 } 368 369 /** 370 * ip6_tnl_dev_uninit - tunnel device uninitializer 371 * @dev: the device to be destroyed 372 * 373 * Description: 374 * ip6_tnl_dev_uninit() removes tunnel from its list 375 **/ 376 377 static void 378 ip6_tnl_dev_uninit(struct net_device *dev) 379 { 380 struct ip6_tnl *t = netdev_priv(dev); 381 struct net *net = t->net; 382 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 383 384 if (dev == ip6n->fb_tnl_dev) 385 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 386 else 387 ip6_tnl_unlink(ip6n, t); 388 dst_cache_reset(&t->dst_cache); 389 dev_put(dev); 390 } 391 392 /** 393 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 394 * @skb: received socket buffer 395 * 396 * Return: 397 * 0 if none was found, 398 * else index to encapsulation limit 399 **/ 400 401 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 402 { 403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 404 __u8 nexthdr = ipv6h->nexthdr; 405 __u16 off = sizeof(*ipv6h); 406 407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 408 __u16 optlen = 0; 409 struct ipv6_opt_hdr *hdr; 410 if (raw + off + sizeof(*hdr) > skb->data && 411 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 412 break; 413 414 hdr = (struct ipv6_opt_hdr *) (raw + off); 415 if (nexthdr == NEXTHDR_FRAGMENT) { 416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 417 if (frag_hdr->frag_off) 418 break; 419 optlen = 8; 420 } else if (nexthdr == NEXTHDR_AUTH) { 421 optlen = (hdr->hdrlen + 2) << 2; 422 } else { 423 optlen = ipv6_optlen(hdr); 424 } 425 if (nexthdr == NEXTHDR_DEST) { 426 __u16 i = off + 2; 427 while (1) { 428 struct ipv6_tlv_tnl_enc_lim *tel; 429 430 /* No more room for encapsulation limit */ 431 if (i + sizeof (*tel) > off + optlen) 432 break; 433 434 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 435 /* return index of option if found and valid */ 436 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 437 tel->length == 1) 438 return i; 439 /* else jump to next option */ 440 if (tel->type) 441 i += tel->length + 2; 442 else 443 i++; 444 } 445 } 446 nexthdr = hdr->nexthdr; 447 off += optlen; 448 } 449 return 0; 450 } 451 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); 452 453 /** 454 * ip6_tnl_err - tunnel error handler 455 * 456 * Description: 457 * ip6_tnl_err() should handle errors in the tunnel according 458 * to the specifications in RFC 2473. 459 **/ 460 461 static int 462 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 463 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 464 { 465 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data; 466 struct ip6_tnl *t; 467 int rel_msg = 0; 468 u8 rel_type = ICMPV6_DEST_UNREACH; 469 u8 rel_code = ICMPV6_ADDR_UNREACH; 470 u8 tproto; 471 __u32 rel_info = 0; 472 __u16 len; 473 int err = -ENOENT; 474 475 /* If the packet doesn't contain the original IPv6 header we are 476 in trouble since we might need the source address for further 477 processing of the error. */ 478 479 rcu_read_lock(); 480 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr); 481 if (!t) 482 goto out; 483 484 tproto = ACCESS_ONCE(t->parms.proto); 485 if (tproto != ipproto && tproto != 0) 486 goto out; 487 488 err = 0; 489 490 switch (*type) { 491 __u32 teli; 492 struct ipv6_tlv_tnl_enc_lim *tel; 493 __u32 mtu; 494 case ICMPV6_DEST_UNREACH: 495 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 496 t->parms.name); 497 rel_msg = 1; 498 break; 499 case ICMPV6_TIME_EXCEED: 500 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 501 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 502 t->parms.name); 503 rel_msg = 1; 504 } 505 break; 506 case ICMPV6_PARAMPROB: 507 teli = 0; 508 if ((*code) == ICMPV6_HDR_FIELD) 509 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 510 511 if (teli && teli == *info - 2) { 512 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 513 if (tel->encap_limit == 0) { 514 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 515 t->parms.name); 516 rel_msg = 1; 517 } 518 } else { 519 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 520 t->parms.name); 521 } 522 break; 523 case ICMPV6_PKT_TOOBIG: 524 mtu = *info - offset; 525 if (mtu < IPV6_MIN_MTU) 526 mtu = IPV6_MIN_MTU; 527 t->dev->mtu = mtu; 528 529 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); 530 if (len > mtu) { 531 rel_type = ICMPV6_PKT_TOOBIG; 532 rel_code = 0; 533 rel_info = mtu; 534 rel_msg = 1; 535 } 536 break; 537 } 538 539 *type = rel_type; 540 *code = rel_code; 541 *info = rel_info; 542 *msg = rel_msg; 543 544 out: 545 rcu_read_unlock(); 546 return err; 547 } 548 549 static int 550 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 551 u8 type, u8 code, int offset, __be32 info) 552 { 553 int rel_msg = 0; 554 u8 rel_type = type; 555 u8 rel_code = code; 556 __u32 rel_info = ntohl(info); 557 int err; 558 struct sk_buff *skb2; 559 const struct iphdr *eiph; 560 struct rtable *rt; 561 struct flowi4 fl4; 562 563 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 564 &rel_msg, &rel_info, offset); 565 if (err < 0) 566 return err; 567 568 if (rel_msg == 0) 569 return 0; 570 571 switch (rel_type) { 572 case ICMPV6_DEST_UNREACH: 573 if (rel_code != ICMPV6_ADDR_UNREACH) 574 return 0; 575 rel_type = ICMP_DEST_UNREACH; 576 rel_code = ICMP_HOST_UNREACH; 577 break; 578 case ICMPV6_PKT_TOOBIG: 579 if (rel_code != 0) 580 return 0; 581 rel_type = ICMP_DEST_UNREACH; 582 rel_code = ICMP_FRAG_NEEDED; 583 break; 584 case NDISC_REDIRECT: 585 rel_type = ICMP_REDIRECT; 586 rel_code = ICMP_REDIR_HOST; 587 default: 588 return 0; 589 } 590 591 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 592 return 0; 593 594 skb2 = skb_clone(skb, GFP_ATOMIC); 595 if (!skb2) 596 return 0; 597 598 skb_dst_drop(skb2); 599 600 skb_pull(skb2, offset); 601 skb_reset_network_header(skb2); 602 eiph = ip_hdr(skb2); 603 604 /* Try to guess incoming interface */ 605 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 606 eiph->saddr, 0, 607 0, 0, 608 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 609 if (IS_ERR(rt)) 610 goto out; 611 612 skb2->dev = rt->dst.dev; 613 614 /* route "incoming" packet */ 615 if (rt->rt_flags & RTCF_LOCAL) { 616 ip_rt_put(rt); 617 rt = NULL; 618 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 619 eiph->daddr, eiph->saddr, 620 0, 0, 621 IPPROTO_IPIP, 622 RT_TOS(eiph->tos), 0); 623 if (IS_ERR(rt) || 624 rt->dst.dev->type != ARPHRD_TUNNEL) { 625 if (!IS_ERR(rt)) 626 ip_rt_put(rt); 627 goto out; 628 } 629 skb_dst_set(skb2, &rt->dst); 630 } else { 631 ip_rt_put(rt); 632 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 633 skb2->dev) || 634 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 635 goto out; 636 } 637 638 /* change mtu on this route */ 639 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 640 if (rel_info > dst_mtu(skb_dst(skb2))) 641 goto out; 642 643 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info); 644 } 645 if (rel_type == ICMP_REDIRECT) 646 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2); 647 648 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 649 650 out: 651 kfree_skb(skb2); 652 return 0; 653 } 654 655 static int 656 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 657 u8 type, u8 code, int offset, __be32 info) 658 { 659 int rel_msg = 0; 660 u8 rel_type = type; 661 u8 rel_code = code; 662 __u32 rel_info = ntohl(info); 663 int err; 664 665 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 666 &rel_msg, &rel_info, offset); 667 if (err < 0) 668 return err; 669 670 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 671 struct rt6_info *rt; 672 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 673 674 if (!skb2) 675 return 0; 676 677 skb_dst_drop(skb2); 678 skb_pull(skb2, offset); 679 skb_reset_network_header(skb2); 680 681 /* Try to guess incoming interface */ 682 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 683 NULL, 0, 0); 684 685 if (rt && rt->dst.dev) 686 skb2->dev = rt->dst.dev; 687 688 icmpv6_send(skb2, rel_type, rel_code, rel_info); 689 690 ip6_rt_put(rt); 691 692 kfree_skb(skb2); 693 } 694 695 return 0; 696 } 697 698 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 699 const struct ipv6hdr *ipv6h, 700 struct sk_buff *skb) 701 { 702 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 703 704 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 705 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 706 707 return IP6_ECN_decapsulate(ipv6h, skb); 708 } 709 710 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 711 const struct ipv6hdr *ipv6h, 712 struct sk_buff *skb) 713 { 714 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 715 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 716 717 return IP6_ECN_decapsulate(ipv6h, skb); 718 } 719 720 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 721 const struct in6_addr *laddr, 722 const struct in6_addr *raddr) 723 { 724 struct __ip6_tnl_parm *p = &t->parms; 725 int ltype = ipv6_addr_type(laddr); 726 int rtype = ipv6_addr_type(raddr); 727 __u32 flags = 0; 728 729 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 730 flags = IP6_TNL_F_CAP_PER_PACKET; 731 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 732 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 733 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 734 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 735 if (ltype&IPV6_ADDR_UNICAST) 736 flags |= IP6_TNL_F_CAP_XMIT; 737 if (rtype&IPV6_ADDR_UNICAST) 738 flags |= IP6_TNL_F_CAP_RCV; 739 } 740 return flags; 741 } 742 EXPORT_SYMBOL(ip6_tnl_get_cap); 743 744 /* called with rcu_read_lock() */ 745 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 746 const struct in6_addr *laddr, 747 const struct in6_addr *raddr) 748 { 749 struct __ip6_tnl_parm *p = &t->parms; 750 int ret = 0; 751 struct net *net = t->net; 752 753 if ((p->flags & IP6_TNL_F_CAP_RCV) || 754 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 755 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 756 struct net_device *ldev = NULL; 757 758 if (p->link) 759 ldev = dev_get_by_index_rcu(net, p->link); 760 761 if ((ipv6_addr_is_multicast(laddr) || 762 likely(ipv6_chk_addr(net, laddr, ldev, 0))) && 763 likely(!ipv6_chk_addr(net, raddr, NULL, 0))) 764 ret = 1; 765 } 766 return ret; 767 } 768 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); 769 770 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, 771 const struct tnl_ptk_info *tpi, 772 struct metadata_dst *tun_dst, 773 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 774 const struct ipv6hdr *ipv6h, 775 struct sk_buff *skb), 776 bool log_ecn_err) 777 { 778 struct pcpu_sw_netstats *tstats; 779 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 780 int err; 781 782 if ((!(tpi->flags & TUNNEL_CSUM) && 783 (tunnel->parms.i_flags & TUNNEL_CSUM)) || 784 ((tpi->flags & TUNNEL_CSUM) && 785 !(tunnel->parms.i_flags & TUNNEL_CSUM))) { 786 tunnel->dev->stats.rx_crc_errors++; 787 tunnel->dev->stats.rx_errors++; 788 goto drop; 789 } 790 791 if (tunnel->parms.i_flags & TUNNEL_SEQ) { 792 if (!(tpi->flags & TUNNEL_SEQ) || 793 (tunnel->i_seqno && 794 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 795 tunnel->dev->stats.rx_fifo_errors++; 796 tunnel->dev->stats.rx_errors++; 797 goto drop; 798 } 799 tunnel->i_seqno = ntohl(tpi->seq) + 1; 800 } 801 802 skb->protocol = tpi->proto; 803 804 /* Warning: All skb pointers will be invalidated! */ 805 if (tunnel->dev->type == ARPHRD_ETHER) { 806 if (!pskb_may_pull(skb, ETH_HLEN)) { 807 tunnel->dev->stats.rx_length_errors++; 808 tunnel->dev->stats.rx_errors++; 809 goto drop; 810 } 811 812 ipv6h = ipv6_hdr(skb); 813 skb->protocol = eth_type_trans(skb, tunnel->dev); 814 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 815 } else { 816 skb->dev = tunnel->dev; 817 } 818 819 skb_reset_network_header(skb); 820 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 821 822 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 823 824 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); 825 if (unlikely(err)) { 826 if (log_ecn_err) 827 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", 828 &ipv6h->saddr, 829 ipv6_get_dsfield(ipv6h)); 830 if (err > 1) { 831 ++tunnel->dev->stats.rx_frame_errors; 832 ++tunnel->dev->stats.rx_errors; 833 goto drop; 834 } 835 } 836 837 tstats = this_cpu_ptr(tunnel->dev->tstats); 838 u64_stats_update_begin(&tstats->syncp); 839 tstats->rx_packets++; 840 tstats->rx_bytes += skb->len; 841 u64_stats_update_end(&tstats->syncp); 842 843 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); 844 845 if (tun_dst) 846 skb_dst_set(skb, (struct dst_entry *)tun_dst); 847 848 gro_cells_receive(&tunnel->gro_cells, skb); 849 return 0; 850 851 drop: 852 kfree_skb(skb); 853 return 0; 854 } 855 856 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, 857 const struct tnl_ptk_info *tpi, 858 struct metadata_dst *tun_dst, 859 bool log_ecn_err) 860 { 861 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate, 862 log_ecn_err); 863 } 864 EXPORT_SYMBOL(ip6_tnl_rcv); 865 866 static const struct tnl_ptk_info tpi_v6 = { 867 /* no tunnel info required for ipxip6. */ 868 .proto = htons(ETH_P_IPV6), 869 }; 870 871 static const struct tnl_ptk_info tpi_v4 = { 872 /* no tunnel info required for ipxip6. */ 873 .proto = htons(ETH_P_IP), 874 }; 875 876 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, 877 const struct tnl_ptk_info *tpi, 878 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 879 const struct ipv6hdr *ipv6h, 880 struct sk_buff *skb)) 881 { 882 struct ip6_tnl *t; 883 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 884 struct metadata_dst *tun_dst = NULL; 885 int ret = -1; 886 887 rcu_read_lock(); 888 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); 889 890 if (t) { 891 u8 tproto = ACCESS_ONCE(t->parms.proto); 892 893 if (tproto != ipproto && tproto != 0) 894 goto drop; 895 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 896 goto drop; 897 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) 898 goto drop; 899 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 900 goto drop; 901 if (t->parms.collect_md) { 902 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0); 903 if (!tun_dst) 904 return 0; 905 } 906 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 907 log_ecn_error); 908 } 909 910 rcu_read_unlock(); 911 912 return ret; 913 914 drop: 915 rcu_read_unlock(); 916 kfree_skb(skb); 917 return 0; 918 } 919 920 static int ip4ip6_rcv(struct sk_buff *skb) 921 { 922 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, 923 ip4ip6_dscp_ecn_decapsulate); 924 } 925 926 static int ip6ip6_rcv(struct sk_buff *skb) 927 { 928 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, 929 ip6ip6_dscp_ecn_decapsulate); 930 } 931 932 struct ipv6_tel_txoption { 933 struct ipv6_txoptions ops; 934 __u8 dst_opt[8]; 935 }; 936 937 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 938 { 939 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 940 941 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 942 opt->dst_opt[3] = 1; 943 opt->dst_opt[4] = encap_limit; 944 opt->dst_opt[5] = IPV6_TLV_PADN; 945 opt->dst_opt[6] = 1; 946 947 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; 948 opt->ops.opt_nflen = 8; 949 } 950 951 /** 952 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 953 * @t: the outgoing tunnel device 954 * @hdr: IPv6 header from the incoming packet 955 * 956 * Description: 957 * Avoid trivial tunneling loop by checking that tunnel exit-point 958 * doesn't match source of incoming packet. 959 * 960 * Return: 961 * 1 if conflict, 962 * 0 else 963 **/ 964 965 static inline bool 966 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 967 { 968 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 969 } 970 971 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, 972 const struct in6_addr *laddr, 973 const struct in6_addr *raddr) 974 { 975 struct __ip6_tnl_parm *p = &t->parms; 976 int ret = 0; 977 struct net *net = t->net; 978 979 if ((p->flags & IP6_TNL_F_CAP_XMIT) || 980 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 981 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { 982 struct net_device *ldev = NULL; 983 984 rcu_read_lock(); 985 if (p->link) 986 ldev = dev_get_by_index_rcu(net, p->link); 987 988 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0))) 989 pr_warn("%s xmit: Local address not yet configured!\n", 990 p->name); 991 else if (!ipv6_addr_is_multicast(raddr) && 992 unlikely(ipv6_chk_addr(net, raddr, NULL, 0))) 993 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", 994 p->name); 995 else 996 ret = 1; 997 rcu_read_unlock(); 998 } 999 return ret; 1000 } 1001 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); 1002 1003 /** 1004 * ip6_tnl_xmit - encapsulate packet and send 1005 * @skb: the outgoing socket buffer 1006 * @dev: the outgoing tunnel device 1007 * @dsfield: dscp code for outer header 1008 * @fl6: flow of tunneled packet 1009 * @encap_limit: encapsulation limit 1010 * @pmtu: Path MTU is stored if packet is too big 1011 * @proto: next header value 1012 * 1013 * Description: 1014 * Build new header and do some sanity checks on the packet before sending 1015 * it. 1016 * 1017 * Return: 1018 * 0 on success 1019 * -1 fail 1020 * %-EMSGSIZE message too big. return mtu in this case. 1021 **/ 1022 1023 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, 1024 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, 1025 __u8 proto) 1026 { 1027 struct ip6_tnl *t = netdev_priv(dev); 1028 struct net *net = t->net; 1029 struct net_device_stats *stats = &t->dev->stats; 1030 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1031 struct ipv6_tel_txoption opt; 1032 struct dst_entry *dst = NULL, *ndst = NULL; 1033 struct net_device *tdev; 1034 int mtu; 1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1036 unsigned int max_headroom = psh_hlen; 1037 bool use_cache = false; 1038 u8 hop_limit; 1039 int err = -1; 1040 1041 if (t->parms.collect_md) { 1042 hop_limit = skb_tunnel_info(skb)->key.ttl; 1043 goto route_lookup; 1044 } else { 1045 hop_limit = t->parms.hop_limit; 1046 } 1047 1048 /* NBMA tunnel */ 1049 if (ipv6_addr_any(&t->parms.raddr)) { 1050 struct in6_addr *addr6; 1051 struct neighbour *neigh; 1052 int addr_type; 1053 1054 if (!skb_dst(skb)) 1055 goto tx_err_link_failure; 1056 1057 neigh = dst_neigh_lookup(skb_dst(skb), 1058 &ipv6_hdr(skb)->daddr); 1059 if (!neigh) 1060 goto tx_err_link_failure; 1061 1062 addr6 = (struct in6_addr *)&neigh->primary_key; 1063 addr_type = ipv6_addr_type(addr6); 1064 1065 if (addr_type == IPV6_ADDR_ANY) 1066 addr6 = &ipv6_hdr(skb)->daddr; 1067 1068 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1069 neigh_release(neigh); 1070 } else if (!(t->parms.flags & 1071 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1072 /* enable the cache only only if the routing decision does 1073 * not depend on the current inner header value 1074 */ 1075 use_cache = true; 1076 } 1077 1078 if (use_cache) 1079 dst = dst_cache_get(&t->dst_cache); 1080 1081 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1082 goto tx_err_link_failure; 1083 1084 if (!dst) { 1085 route_lookup: 1086 dst = ip6_route_output(net, NULL, fl6); 1087 1088 if (dst->error) 1089 goto tx_err_link_failure; 1090 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 1091 if (IS_ERR(dst)) { 1092 err = PTR_ERR(dst); 1093 dst = NULL; 1094 goto tx_err_link_failure; 1095 } 1096 if (t->parms.collect_md && 1097 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, 1098 &fl6->daddr, 0, &fl6->saddr)) 1099 goto tx_err_link_failure; 1100 ndst = dst; 1101 } 1102 1103 tdev = dst->dev; 1104 1105 if (tdev == dev) { 1106 stats->collisions++; 1107 net_warn_ratelimited("%s: Local routing loop detected!\n", 1108 t->parms.name); 1109 goto tx_err_dst_release; 1110 } 1111 mtu = dst_mtu(dst) - psh_hlen; 1112 if (encap_limit >= 0) { 1113 max_headroom += 8; 1114 mtu -= 8; 1115 } 1116 if (mtu < IPV6_MIN_MTU) 1117 mtu = IPV6_MIN_MTU; 1118 if (skb_dst(skb) && !t->parms.collect_md) 1119 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 1120 if (skb->len > mtu && !skb_is_gso(skb)) { 1121 *pmtu = mtu; 1122 err = -EMSGSIZE; 1123 goto tx_err_dst_release; 1124 } 1125 1126 if (t->err_count > 0) { 1127 if (time_before(jiffies, 1128 t->err_time + IP6TUNNEL_ERR_TIMEO)) { 1129 t->err_count--; 1130 1131 dst_link_failure(skb); 1132 } else { 1133 t->err_count = 0; 1134 } 1135 } 1136 1137 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 1138 1139 /* 1140 * Okay, now see if we can stuff it in the buffer as-is. 1141 */ 1142 max_headroom += LL_RESERVED_SPACE(tdev); 1143 1144 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 1145 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 1146 struct sk_buff *new_skb; 1147 1148 new_skb = skb_realloc_headroom(skb, max_headroom); 1149 if (!new_skb) 1150 goto tx_err_dst_release; 1151 1152 if (skb->sk) 1153 skb_set_owner_w(new_skb, skb->sk); 1154 consume_skb(skb); 1155 skb = new_skb; 1156 } 1157 1158 if (t->parms.collect_md) { 1159 if (t->encap.type != TUNNEL_ENCAP_NONE) 1160 goto tx_err_dst_release; 1161 } else { 1162 if (use_cache && ndst) 1163 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1164 } 1165 skb_dst_set(skb, dst); 1166 1167 if (encap_limit >= 0) { 1168 init_tel_txopt(&opt, encap_limit); 1169 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 1170 } 1171 1172 /* Calculate max headroom for all the headers and adjust 1173 * needed_headroom if necessary. 1174 */ 1175 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) 1176 + dst->header_len + t->hlen; 1177 if (max_headroom > dev->needed_headroom) 1178 dev->needed_headroom = max_headroom; 1179 1180 err = ip6_tnl_encap(skb, t, &proto, fl6); 1181 if (err) 1182 return err; 1183 1184 skb_push(skb, sizeof(struct ipv6hdr)); 1185 skb_reset_network_header(skb); 1186 ipv6h = ipv6_hdr(skb); 1187 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), 1188 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1189 ipv6h->hop_limit = hop_limit; 1190 ipv6h->nexthdr = proto; 1191 ipv6h->saddr = fl6->saddr; 1192 ipv6h->daddr = fl6->daddr; 1193 ip6tunnel_xmit(NULL, skb, dev); 1194 return 0; 1195 tx_err_link_failure: 1196 stats->tx_carrier_errors++; 1197 dst_link_failure(skb); 1198 tx_err_dst_release: 1199 dst_release(dst); 1200 return err; 1201 } 1202 EXPORT_SYMBOL(ip6_tnl_xmit); 1203 1204 static inline int 1205 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1206 { 1207 struct ip6_tnl *t = netdev_priv(dev); 1208 const struct iphdr *iph = ip_hdr(skb); 1209 int encap_limit = -1; 1210 struct flowi6 fl6; 1211 __u8 dsfield; 1212 __u32 mtu; 1213 u8 tproto; 1214 int err; 1215 1216 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1217 1218 tproto = ACCESS_ONCE(t->parms.proto); 1219 if (tproto != IPPROTO_IPIP && tproto != 0) 1220 return -1; 1221 1222 dsfield = ipv4_get_dsfield(iph); 1223 1224 if (t->parms.collect_md) { 1225 struct ip_tunnel_info *tun_info; 1226 const struct ip_tunnel_key *key; 1227 1228 tun_info = skb_tunnel_info(skb); 1229 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1230 ip_tunnel_info_af(tun_info) != AF_INET6)) 1231 return -1; 1232 key = &tun_info->key; 1233 memset(&fl6, 0, sizeof(fl6)); 1234 fl6.flowi6_proto = IPPROTO_IPIP; 1235 fl6.daddr = key->u.ipv6.dst; 1236 fl6.flowlabel = key->label; 1237 } else { 1238 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1239 encap_limit = t->parms.encap_limit; 1240 1241 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1242 fl6.flowi6_proto = IPPROTO_IPIP; 1243 1244 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1245 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1246 & IPV6_TCLASS_MASK; 1247 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1248 fl6.flowi6_mark = skb->mark; 1249 } 1250 1251 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1252 return -1; 1253 1254 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1255 1256 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1257 IPPROTO_IPIP); 1258 if (err != 0) { 1259 /* XXX: send ICMP error even if DF is not set. */ 1260 if (err == -EMSGSIZE) 1261 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 1262 htonl(mtu)); 1263 return -1; 1264 } 1265 1266 return 0; 1267 } 1268 1269 static inline int 1270 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1271 { 1272 struct ip6_tnl *t = netdev_priv(dev); 1273 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1274 int encap_limit = -1; 1275 __u16 offset; 1276 struct flowi6 fl6; 1277 __u8 dsfield; 1278 __u32 mtu; 1279 u8 tproto; 1280 int err; 1281 1282 tproto = ACCESS_ONCE(t->parms.proto); 1283 if ((tproto != IPPROTO_IPV6 && tproto != 0) || 1284 ip6_tnl_addr_conflict(t, ipv6h)) 1285 return -1; 1286 1287 dsfield = ipv6_get_dsfield(ipv6h); 1288 1289 if (t->parms.collect_md) { 1290 struct ip_tunnel_info *tun_info; 1291 const struct ip_tunnel_key *key; 1292 1293 tun_info = skb_tunnel_info(skb); 1294 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1295 ip_tunnel_info_af(tun_info) != AF_INET6)) 1296 return -1; 1297 key = &tun_info->key; 1298 memset(&fl6, 0, sizeof(fl6)); 1299 fl6.flowi6_proto = IPPROTO_IPV6; 1300 fl6.daddr = key->u.ipv6.dst; 1301 fl6.flowlabel = key->label; 1302 } else { 1303 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1304 if (offset > 0) { 1305 struct ipv6_tlv_tnl_enc_lim *tel; 1306 1307 tel = (void *)&skb_network_header(skb)[offset]; 1308 if (tel->encap_limit == 0) { 1309 icmpv6_send(skb, ICMPV6_PARAMPROB, 1310 ICMPV6_HDR_FIELD, offset + 2); 1311 return -1; 1312 } 1313 encap_limit = tel->encap_limit - 1; 1314 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 1315 encap_limit = t->parms.encap_limit; 1316 } 1317 1318 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1319 fl6.flowi6_proto = IPPROTO_IPV6; 1320 1321 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1322 fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); 1323 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1324 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1325 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1326 fl6.flowi6_mark = skb->mark; 1327 } 1328 1329 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1330 return -1; 1331 1332 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1333 1334 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1335 IPPROTO_IPV6); 1336 if (err != 0) { 1337 if (err == -EMSGSIZE) 1338 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1339 return -1; 1340 } 1341 1342 return 0; 1343 } 1344 1345 static netdev_tx_t 1346 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) 1347 { 1348 struct ip6_tnl *t = netdev_priv(dev); 1349 struct net_device_stats *stats = &t->dev->stats; 1350 int ret; 1351 1352 switch (skb->protocol) { 1353 case htons(ETH_P_IP): 1354 ret = ip4ip6_tnl_xmit(skb, dev); 1355 break; 1356 case htons(ETH_P_IPV6): 1357 ret = ip6ip6_tnl_xmit(skb, dev); 1358 break; 1359 default: 1360 goto tx_err; 1361 } 1362 1363 if (ret < 0) 1364 goto tx_err; 1365 1366 return NETDEV_TX_OK; 1367 1368 tx_err: 1369 stats->tx_errors++; 1370 stats->tx_dropped++; 1371 kfree_skb(skb); 1372 return NETDEV_TX_OK; 1373 } 1374 1375 static void ip6_tnl_link_config(struct ip6_tnl *t) 1376 { 1377 struct net_device *dev = t->dev; 1378 struct __ip6_tnl_parm *p = &t->parms; 1379 struct flowi6 *fl6 = &t->fl.u.ip6; 1380 int t_hlen; 1381 1382 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1383 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1384 1385 /* Set up flowi template */ 1386 fl6->saddr = p->laddr; 1387 fl6->daddr = p->raddr; 1388 fl6->flowi6_oif = p->link; 1389 fl6->flowlabel = 0; 1390 1391 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1392 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1393 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1394 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1395 1396 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1397 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1398 1399 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1400 dev->flags |= IFF_POINTOPOINT; 1401 else 1402 dev->flags &= ~IFF_POINTOPOINT; 1403 1404 t->tun_hlen = 0; 1405 t->hlen = t->encap_hlen + t->tun_hlen; 1406 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1407 1408 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1409 int strict = (ipv6_addr_type(&p->raddr) & 1410 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1411 1412 struct rt6_info *rt = rt6_lookup(t->net, 1413 &p->raddr, &p->laddr, 1414 p->link, strict); 1415 1416 if (!rt) 1417 return; 1418 1419 if (rt->dst.dev) { 1420 dev->hard_header_len = rt->dst.dev->hard_header_len + 1421 t_hlen; 1422 1423 dev->mtu = rt->dst.dev->mtu - t_hlen; 1424 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1425 dev->mtu -= 8; 1426 1427 if (dev->mtu < IPV6_MIN_MTU) 1428 dev->mtu = IPV6_MIN_MTU; 1429 } 1430 ip6_rt_put(rt); 1431 } 1432 } 1433 1434 /** 1435 * ip6_tnl_change - update the tunnel parameters 1436 * @t: tunnel to be changed 1437 * @p: tunnel configuration parameters 1438 * 1439 * Description: 1440 * ip6_tnl_change() updates the tunnel parameters 1441 **/ 1442 1443 static int 1444 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 1445 { 1446 t->parms.laddr = p->laddr; 1447 t->parms.raddr = p->raddr; 1448 t->parms.flags = p->flags; 1449 t->parms.hop_limit = p->hop_limit; 1450 t->parms.encap_limit = p->encap_limit; 1451 t->parms.flowinfo = p->flowinfo; 1452 t->parms.link = p->link; 1453 t->parms.proto = p->proto; 1454 dst_cache_reset(&t->dst_cache); 1455 ip6_tnl_link_config(t); 1456 return 0; 1457 } 1458 1459 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1460 { 1461 struct net *net = t->net; 1462 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1463 int err; 1464 1465 ip6_tnl_unlink(ip6n, t); 1466 synchronize_net(); 1467 err = ip6_tnl_change(t, p); 1468 ip6_tnl_link(ip6n, t); 1469 netdev_state_change(t->dev); 1470 return err; 1471 } 1472 1473 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1474 { 1475 /* for default tnl0 device allow to change only the proto */ 1476 t->parms.proto = p->proto; 1477 netdev_state_change(t->dev); 1478 return 0; 1479 } 1480 1481 static void 1482 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1483 { 1484 p->laddr = u->laddr; 1485 p->raddr = u->raddr; 1486 p->flags = u->flags; 1487 p->hop_limit = u->hop_limit; 1488 p->encap_limit = u->encap_limit; 1489 p->flowinfo = u->flowinfo; 1490 p->link = u->link; 1491 p->proto = u->proto; 1492 memcpy(p->name, u->name, sizeof(u->name)); 1493 } 1494 1495 static void 1496 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) 1497 { 1498 u->laddr = p->laddr; 1499 u->raddr = p->raddr; 1500 u->flags = p->flags; 1501 u->hop_limit = p->hop_limit; 1502 u->encap_limit = p->encap_limit; 1503 u->flowinfo = p->flowinfo; 1504 u->link = p->link; 1505 u->proto = p->proto; 1506 memcpy(u->name, p->name, sizeof(u->name)); 1507 } 1508 1509 /** 1510 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1511 * @dev: virtual device associated with tunnel 1512 * @ifr: parameters passed from userspace 1513 * @cmd: command to be performed 1514 * 1515 * Description: 1516 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1517 * from userspace. 1518 * 1519 * The possible commands are the following: 1520 * %SIOCGETTUNNEL: get tunnel parameters for device 1521 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1522 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1523 * %SIOCDELTUNNEL: delete tunnel 1524 * 1525 * The fallback device "ip6tnl0", created during module 1526 * initialization, can be used for creating other tunnel devices. 1527 * 1528 * Return: 1529 * 0 on success, 1530 * %-EFAULT if unable to copy data to or from userspace, 1531 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1532 * %-EINVAL if passed tunnel parameters are invalid, 1533 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1534 * %-ENODEV if attempting to change or delete a nonexisting device 1535 **/ 1536 1537 static int 1538 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1539 { 1540 int err = 0; 1541 struct ip6_tnl_parm p; 1542 struct __ip6_tnl_parm p1; 1543 struct ip6_tnl *t = netdev_priv(dev); 1544 struct net *net = t->net; 1545 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1546 1547 memset(&p1, 0, sizeof(p1)); 1548 1549 switch (cmd) { 1550 case SIOCGETTUNNEL: 1551 if (dev == ip6n->fb_tnl_dev) { 1552 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1553 err = -EFAULT; 1554 break; 1555 } 1556 ip6_tnl_parm_from_user(&p1, &p); 1557 t = ip6_tnl_locate(net, &p1, 0); 1558 if (IS_ERR(t)) 1559 t = netdev_priv(dev); 1560 } else { 1561 memset(&p, 0, sizeof(p)); 1562 } 1563 ip6_tnl_parm_to_user(&p, &t->parms); 1564 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) { 1565 err = -EFAULT; 1566 } 1567 break; 1568 case SIOCADDTUNNEL: 1569 case SIOCCHGTUNNEL: 1570 err = -EPERM; 1571 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1572 break; 1573 err = -EFAULT; 1574 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1575 break; 1576 err = -EINVAL; 1577 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1578 p.proto != 0) 1579 break; 1580 ip6_tnl_parm_from_user(&p1, &p); 1581 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1582 if (cmd == SIOCCHGTUNNEL) { 1583 if (!IS_ERR(t)) { 1584 if (t->dev != dev) { 1585 err = -EEXIST; 1586 break; 1587 } 1588 } else 1589 t = netdev_priv(dev); 1590 if (dev == ip6n->fb_tnl_dev) 1591 err = ip6_tnl0_update(t, &p1); 1592 else 1593 err = ip6_tnl_update(t, &p1); 1594 } 1595 if (!IS_ERR(t)) { 1596 err = 0; 1597 ip6_tnl_parm_to_user(&p, &t->parms); 1598 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1599 err = -EFAULT; 1600 1601 } else { 1602 err = PTR_ERR(t); 1603 } 1604 break; 1605 case SIOCDELTUNNEL: 1606 err = -EPERM; 1607 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1608 break; 1609 1610 if (dev == ip6n->fb_tnl_dev) { 1611 err = -EFAULT; 1612 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1613 break; 1614 err = -ENOENT; 1615 ip6_tnl_parm_from_user(&p1, &p); 1616 t = ip6_tnl_locate(net, &p1, 0); 1617 if (IS_ERR(t)) 1618 break; 1619 err = -EPERM; 1620 if (t->dev == ip6n->fb_tnl_dev) 1621 break; 1622 dev = t->dev; 1623 } 1624 err = 0; 1625 unregister_netdevice(dev); 1626 break; 1627 default: 1628 err = -EINVAL; 1629 } 1630 return err; 1631 } 1632 1633 /** 1634 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1635 * @dev: virtual device associated with tunnel 1636 * @new_mtu: the new mtu 1637 * 1638 * Return: 1639 * 0 on success, 1640 * %-EINVAL if mtu too small 1641 **/ 1642 1643 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1644 { 1645 struct ip6_tnl *tnl = netdev_priv(dev); 1646 1647 if (tnl->parms.proto == IPPROTO_IPIP) { 1648 if (new_mtu < 68) 1649 return -EINVAL; 1650 } else { 1651 if (new_mtu < IPV6_MIN_MTU) 1652 return -EINVAL; 1653 } 1654 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1655 return -EINVAL; 1656 dev->mtu = new_mtu; 1657 return 0; 1658 } 1659 EXPORT_SYMBOL(ip6_tnl_change_mtu); 1660 1661 int ip6_tnl_get_iflink(const struct net_device *dev) 1662 { 1663 struct ip6_tnl *t = netdev_priv(dev); 1664 1665 return t->parms.link; 1666 } 1667 EXPORT_SYMBOL(ip6_tnl_get_iflink); 1668 1669 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, 1670 unsigned int num) 1671 { 1672 if (num >= MAX_IPTUN_ENCAP_OPS) 1673 return -ERANGE; 1674 1675 return !cmpxchg((const struct ip6_tnl_encap_ops **) 1676 &ip6tun_encaps[num], 1677 NULL, ops) ? 0 : -1; 1678 } 1679 EXPORT_SYMBOL(ip6_tnl_encap_add_ops); 1680 1681 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, 1682 unsigned int num) 1683 { 1684 int ret; 1685 1686 if (num >= MAX_IPTUN_ENCAP_OPS) 1687 return -ERANGE; 1688 1689 ret = (cmpxchg((const struct ip6_tnl_encap_ops **) 1690 &ip6tun_encaps[num], 1691 ops, NULL) == ops) ? 0 : -1; 1692 1693 synchronize_net(); 1694 1695 return ret; 1696 } 1697 EXPORT_SYMBOL(ip6_tnl_encap_del_ops); 1698 1699 int ip6_tnl_encap_setup(struct ip6_tnl *t, 1700 struct ip_tunnel_encap *ipencap) 1701 { 1702 int hlen; 1703 1704 memset(&t->encap, 0, sizeof(t->encap)); 1705 1706 hlen = ip6_encap_hlen(ipencap); 1707 if (hlen < 0) 1708 return hlen; 1709 1710 t->encap.type = ipencap->type; 1711 t->encap.sport = ipencap->sport; 1712 t->encap.dport = ipencap->dport; 1713 t->encap.flags = ipencap->flags; 1714 1715 t->encap_hlen = hlen; 1716 t->hlen = t->encap_hlen + t->tun_hlen; 1717 1718 return 0; 1719 } 1720 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1721 1722 static const struct net_device_ops ip6_tnl_netdev_ops = { 1723 .ndo_init = ip6_tnl_dev_init, 1724 .ndo_uninit = ip6_tnl_dev_uninit, 1725 .ndo_start_xmit = ip6_tnl_start_xmit, 1726 .ndo_do_ioctl = ip6_tnl_ioctl, 1727 .ndo_change_mtu = ip6_tnl_change_mtu, 1728 .ndo_get_stats = ip6_get_stats, 1729 .ndo_get_iflink = ip6_tnl_get_iflink, 1730 }; 1731 1732 #define IPXIPX_FEATURES (NETIF_F_SG | \ 1733 NETIF_F_FRAGLIST | \ 1734 NETIF_F_HIGHDMA | \ 1735 NETIF_F_GSO_SOFTWARE | \ 1736 NETIF_F_HW_CSUM) 1737 1738 /** 1739 * ip6_tnl_dev_setup - setup virtual tunnel device 1740 * @dev: virtual device associated with tunnel 1741 * 1742 * Description: 1743 * Initialize function pointers and device parameters 1744 **/ 1745 1746 static void ip6_tnl_dev_setup(struct net_device *dev) 1747 { 1748 dev->netdev_ops = &ip6_tnl_netdev_ops; 1749 dev->destructor = ip6_dev_free; 1750 1751 dev->type = ARPHRD_TUNNEL6; 1752 dev->flags |= IFF_NOARP; 1753 dev->addr_len = sizeof(struct in6_addr); 1754 dev->features |= NETIF_F_LLTX; 1755 netif_keep_dst(dev); 1756 1757 dev->features |= IPXIPX_FEATURES; 1758 dev->hw_features |= IPXIPX_FEATURES; 1759 1760 /* This perm addr will be used as interface identifier by IPv6 */ 1761 dev->addr_assign_type = NET_ADDR_RANDOM; 1762 eth_random_addr(dev->perm_addr); 1763 } 1764 1765 1766 /** 1767 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1768 * @dev: virtual device associated with tunnel 1769 **/ 1770 1771 static inline int 1772 ip6_tnl_dev_init_gen(struct net_device *dev) 1773 { 1774 struct ip6_tnl *t = netdev_priv(dev); 1775 int ret; 1776 int t_hlen; 1777 1778 t->dev = dev; 1779 t->net = dev_net(dev); 1780 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1781 if (!dev->tstats) 1782 return -ENOMEM; 1783 1784 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); 1785 if (ret) 1786 goto free_stats; 1787 1788 ret = gro_cells_init(&t->gro_cells, dev); 1789 if (ret) 1790 goto destroy_dst; 1791 1792 t->tun_hlen = 0; 1793 t->hlen = t->encap_hlen + t->tun_hlen; 1794 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1795 1796 dev->type = ARPHRD_TUNNEL6; 1797 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1798 dev->mtu = ETH_DATA_LEN - t_hlen; 1799 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1800 dev->mtu -= 8; 1801 1802 return 0; 1803 1804 destroy_dst: 1805 dst_cache_destroy(&t->dst_cache); 1806 free_stats: 1807 free_percpu(dev->tstats); 1808 dev->tstats = NULL; 1809 1810 return ret; 1811 } 1812 1813 /** 1814 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1815 * @dev: virtual device associated with tunnel 1816 **/ 1817 1818 static int ip6_tnl_dev_init(struct net_device *dev) 1819 { 1820 struct ip6_tnl *t = netdev_priv(dev); 1821 int err = ip6_tnl_dev_init_gen(dev); 1822 1823 if (err) 1824 return err; 1825 ip6_tnl_link_config(t); 1826 if (t->parms.collect_md) { 1827 dev->features |= NETIF_F_NETNS_LOCAL; 1828 netif_keep_dst(dev); 1829 } 1830 return 0; 1831 } 1832 1833 /** 1834 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1835 * @dev: fallback device 1836 * 1837 * Return: 0 1838 **/ 1839 1840 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1841 { 1842 struct ip6_tnl *t = netdev_priv(dev); 1843 struct net *net = dev_net(dev); 1844 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1845 1846 t->parms.proto = IPPROTO_IPV6; 1847 dev_hold(dev); 1848 1849 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1850 return 0; 1851 } 1852 1853 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[]) 1854 { 1855 u8 proto; 1856 1857 if (!data || !data[IFLA_IPTUN_PROTO]) 1858 return 0; 1859 1860 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1861 if (proto != IPPROTO_IPV6 && 1862 proto != IPPROTO_IPIP && 1863 proto != 0) 1864 return -EINVAL; 1865 1866 return 0; 1867 } 1868 1869 static void ip6_tnl_netlink_parms(struct nlattr *data[], 1870 struct __ip6_tnl_parm *parms) 1871 { 1872 memset(parms, 0, sizeof(*parms)); 1873 1874 if (!data) 1875 return; 1876 1877 if (data[IFLA_IPTUN_LINK]) 1878 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); 1879 1880 if (data[IFLA_IPTUN_LOCAL]) 1881 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]); 1882 1883 if (data[IFLA_IPTUN_REMOTE]) 1884 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]); 1885 1886 if (data[IFLA_IPTUN_TTL]) 1887 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); 1888 1889 if (data[IFLA_IPTUN_ENCAP_LIMIT]) 1890 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); 1891 1892 if (data[IFLA_IPTUN_FLOWINFO]) 1893 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); 1894 1895 if (data[IFLA_IPTUN_FLAGS]) 1896 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); 1897 1898 if (data[IFLA_IPTUN_PROTO]) 1899 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1900 1901 if (data[IFLA_IPTUN_COLLECT_METADATA]) 1902 parms->collect_md = true; 1903 } 1904 1905 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], 1906 struct ip_tunnel_encap *ipencap) 1907 { 1908 bool ret = false; 1909 1910 memset(ipencap, 0, sizeof(*ipencap)); 1911 1912 if (!data) 1913 return ret; 1914 1915 if (data[IFLA_IPTUN_ENCAP_TYPE]) { 1916 ret = true; 1917 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); 1918 } 1919 1920 if (data[IFLA_IPTUN_ENCAP_FLAGS]) { 1921 ret = true; 1922 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); 1923 } 1924 1925 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1926 ret = true; 1927 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); 1928 } 1929 1930 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1931 ret = true; 1932 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); 1933 } 1934 1935 return ret; 1936 } 1937 1938 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, 1939 struct nlattr *tb[], struct nlattr *data[]) 1940 { 1941 struct net *net = dev_net(dev); 1942 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1943 struct ip6_tnl *nt, *t; 1944 struct ip_tunnel_encap ipencap; 1945 1946 nt = netdev_priv(dev); 1947 1948 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1949 int err = ip6_tnl_encap_setup(nt, &ipencap); 1950 1951 if (err < 0) 1952 return err; 1953 } 1954 1955 ip6_tnl_netlink_parms(data, &nt->parms); 1956 1957 if (nt->parms.collect_md) { 1958 if (rtnl_dereference(ip6n->collect_md_tun)) 1959 return -EEXIST; 1960 } else { 1961 t = ip6_tnl_locate(net, &nt->parms, 0); 1962 if (!IS_ERR(t)) 1963 return -EEXIST; 1964 } 1965 1966 return ip6_tnl_create2(dev); 1967 } 1968 1969 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 1970 struct nlattr *data[]) 1971 { 1972 struct ip6_tnl *t = netdev_priv(dev); 1973 struct __ip6_tnl_parm p; 1974 struct net *net = t->net; 1975 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1976 struct ip_tunnel_encap ipencap; 1977 1978 if (dev == ip6n->fb_tnl_dev) 1979 return -EINVAL; 1980 1981 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1982 int err = ip6_tnl_encap_setup(t, &ipencap); 1983 1984 if (err < 0) 1985 return err; 1986 } 1987 ip6_tnl_netlink_parms(data, &p); 1988 if (p.collect_md) 1989 return -EINVAL; 1990 1991 t = ip6_tnl_locate(net, &p, 0); 1992 if (!IS_ERR(t)) { 1993 if (t->dev != dev) 1994 return -EEXIST; 1995 } else 1996 t = netdev_priv(dev); 1997 1998 return ip6_tnl_update(t, &p); 1999 } 2000 2001 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) 2002 { 2003 struct net *net = dev_net(dev); 2004 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2005 2006 if (dev != ip6n->fb_tnl_dev) 2007 unregister_netdevice_queue(dev, head); 2008 } 2009 2010 static size_t ip6_tnl_get_size(const struct net_device *dev) 2011 { 2012 return 2013 /* IFLA_IPTUN_LINK */ 2014 nla_total_size(4) + 2015 /* IFLA_IPTUN_LOCAL */ 2016 nla_total_size(sizeof(struct in6_addr)) + 2017 /* IFLA_IPTUN_REMOTE */ 2018 nla_total_size(sizeof(struct in6_addr)) + 2019 /* IFLA_IPTUN_TTL */ 2020 nla_total_size(1) + 2021 /* IFLA_IPTUN_ENCAP_LIMIT */ 2022 nla_total_size(1) + 2023 /* IFLA_IPTUN_FLOWINFO */ 2024 nla_total_size(4) + 2025 /* IFLA_IPTUN_FLAGS */ 2026 nla_total_size(4) + 2027 /* IFLA_IPTUN_PROTO */ 2028 nla_total_size(1) + 2029 /* IFLA_IPTUN_ENCAP_TYPE */ 2030 nla_total_size(2) + 2031 /* IFLA_IPTUN_ENCAP_FLAGS */ 2032 nla_total_size(2) + 2033 /* IFLA_IPTUN_ENCAP_SPORT */ 2034 nla_total_size(2) + 2035 /* IFLA_IPTUN_ENCAP_DPORT */ 2036 nla_total_size(2) + 2037 /* IFLA_IPTUN_COLLECT_METADATA */ 2038 nla_total_size(0) + 2039 0; 2040 } 2041 2042 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) 2043 { 2044 struct ip6_tnl *tunnel = netdev_priv(dev); 2045 struct __ip6_tnl_parm *parm = &tunnel->parms; 2046 2047 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 2048 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || 2049 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) || 2050 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 2051 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 2052 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 2053 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || 2054 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto)) 2055 goto nla_put_failure; 2056 2057 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || 2058 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || 2059 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || 2060 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) 2061 goto nla_put_failure; 2062 2063 if (parm->collect_md) 2064 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA)) 2065 goto nla_put_failure; 2066 return 0; 2067 2068 nla_put_failure: 2069 return -EMSGSIZE; 2070 } 2071 2072 struct net *ip6_tnl_get_link_net(const struct net_device *dev) 2073 { 2074 struct ip6_tnl *tunnel = netdev_priv(dev); 2075 2076 return tunnel->net; 2077 } 2078 EXPORT_SYMBOL(ip6_tnl_get_link_net); 2079 2080 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { 2081 [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, 2082 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, 2083 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, 2084 [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, 2085 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, 2086 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, 2087 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, 2088 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, 2089 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, 2090 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, 2091 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, 2092 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, 2093 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, 2094 }; 2095 2096 static struct rtnl_link_ops ip6_link_ops __read_mostly = { 2097 .kind = "ip6tnl", 2098 .maxtype = IFLA_IPTUN_MAX, 2099 .policy = ip6_tnl_policy, 2100 .priv_size = sizeof(struct ip6_tnl), 2101 .setup = ip6_tnl_dev_setup, 2102 .validate = ip6_tnl_validate, 2103 .newlink = ip6_tnl_newlink, 2104 .changelink = ip6_tnl_changelink, 2105 .dellink = ip6_tnl_dellink, 2106 .get_size = ip6_tnl_get_size, 2107 .fill_info = ip6_tnl_fill_info, 2108 .get_link_net = ip6_tnl_get_link_net, 2109 }; 2110 2111 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 2112 .handler = ip4ip6_rcv, 2113 .err_handler = ip4ip6_err, 2114 .priority = 1, 2115 }; 2116 2117 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 2118 .handler = ip6ip6_rcv, 2119 .err_handler = ip6ip6_err, 2120 .priority = 1, 2121 }; 2122 2123 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net) 2124 { 2125 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2126 struct net_device *dev, *aux; 2127 int h; 2128 struct ip6_tnl *t; 2129 LIST_HEAD(list); 2130 2131 for_each_netdev_safe(net, dev, aux) 2132 if (dev->rtnl_link_ops == &ip6_link_ops) 2133 unregister_netdevice_queue(dev, &list); 2134 2135 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { 2136 t = rtnl_dereference(ip6n->tnls_r_l[h]); 2137 while (t) { 2138 /* If dev is in the same netns, it has already 2139 * been added to the list by the previous loop. 2140 */ 2141 if (!net_eq(dev_net(t->dev), net)) 2142 unregister_netdevice_queue(t->dev, &list); 2143 t = rtnl_dereference(t->next); 2144 } 2145 } 2146 2147 unregister_netdevice_many(&list); 2148 } 2149 2150 static int __net_init ip6_tnl_init_net(struct net *net) 2151 { 2152 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2153 struct ip6_tnl *t = NULL; 2154 int err; 2155 2156 ip6n->tnls[0] = ip6n->tnls_wc; 2157 ip6n->tnls[1] = ip6n->tnls_r_l; 2158 2159 err = -ENOMEM; 2160 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 2161 NET_NAME_UNKNOWN, ip6_tnl_dev_setup); 2162 2163 if (!ip6n->fb_tnl_dev) 2164 goto err_alloc_dev; 2165 dev_net_set(ip6n->fb_tnl_dev, net); 2166 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; 2167 /* FB netdevice is special: we have one, and only one per netns. 2168 * Allowing to move it to another netns is clearly unsafe. 2169 */ 2170 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL; 2171 2172 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 2173 if (err < 0) 2174 goto err_register; 2175 2176 err = register_netdev(ip6n->fb_tnl_dev); 2177 if (err < 0) 2178 goto err_register; 2179 2180 t = netdev_priv(ip6n->fb_tnl_dev); 2181 2182 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 2183 return 0; 2184 2185 err_register: 2186 ip6_dev_free(ip6n->fb_tnl_dev); 2187 err_alloc_dev: 2188 return err; 2189 } 2190 2191 static void __net_exit ip6_tnl_exit_net(struct net *net) 2192 { 2193 rtnl_lock(); 2194 ip6_tnl_destroy_tunnels(net); 2195 rtnl_unlock(); 2196 } 2197 2198 static struct pernet_operations ip6_tnl_net_ops = { 2199 .init = ip6_tnl_init_net, 2200 .exit = ip6_tnl_exit_net, 2201 .id = &ip6_tnl_net_id, 2202 .size = sizeof(struct ip6_tnl_net), 2203 }; 2204 2205 /** 2206 * ip6_tunnel_init - register protocol and reserve needed resources 2207 * 2208 * Return: 0 on success 2209 **/ 2210 2211 static int __init ip6_tunnel_init(void) 2212 { 2213 int err; 2214 2215 err = register_pernet_device(&ip6_tnl_net_ops); 2216 if (err < 0) 2217 goto out_pernet; 2218 2219 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 2220 if (err < 0) { 2221 pr_err("%s: can't register ip4ip6\n", __func__); 2222 goto out_ip4ip6; 2223 } 2224 2225 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 2226 if (err < 0) { 2227 pr_err("%s: can't register ip6ip6\n", __func__); 2228 goto out_ip6ip6; 2229 } 2230 err = rtnl_link_register(&ip6_link_ops); 2231 if (err < 0) 2232 goto rtnl_link_failed; 2233 2234 return 0; 2235 2236 rtnl_link_failed: 2237 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 2238 out_ip6ip6: 2239 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 2240 out_ip4ip6: 2241 unregister_pernet_device(&ip6_tnl_net_ops); 2242 out_pernet: 2243 return err; 2244 } 2245 2246 /** 2247 * ip6_tunnel_cleanup - free resources and unregister protocol 2248 **/ 2249 2250 static void __exit ip6_tunnel_cleanup(void) 2251 { 2252 rtnl_link_unregister(&ip6_link_ops); 2253 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 2254 pr_info("%s: can't deregister ip4ip6\n", __func__); 2255 2256 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 2257 pr_info("%s: can't deregister ip6ip6\n", __func__); 2258 2259 unregister_pernet_device(&ip6_tnl_net_ops); 2260 } 2261 2262 module_init(ip6_tunnel_init); 2263 module_exit(ip6_tunnel_cleanup); 2264