1 /* 2 * IPv6 virtual tunneling interface 3 * 4 * Copyright (C) 2013 secunet Security Networks AG 5 * 6 * Author: 7 * Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * Based on: 10 * net/ipv6/ip6_tunnel.c 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/capability.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/sockios.h> 23 #include <linux/icmp.h> 24 #include <linux/if.h> 25 #include <linux/in.h> 26 #include <linux/ip.h> 27 #include <linux/net.h> 28 #include <linux/in6.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/icmpv6.h> 32 #include <linux/init.h> 33 #include <linux/route.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/netfilter_ipv6.h> 36 #include <linux/slab.h> 37 #include <linux/hash.h> 38 39 #include <linux/uaccess.h> 40 #include <linux/atomic.h> 41 42 #include <net/icmp.h> 43 #include <net/ip.h> 44 #include <net/ip_tunnels.h> 45 #include <net/ipv6.h> 46 #include <net/ip6_route.h> 47 #include <net/addrconf.h> 48 #include <net/ip6_tunnel.h> 49 #include <net/xfrm.h> 50 #include <net/net_namespace.h> 51 #include <net/netns/generic.h> 52 #include <linux/etherdevice.h> 53 54 #define IP6_VTI_HASH_SIZE_SHIFT 5 55 #define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT) 56 57 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 58 { 59 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 60 61 return hash_32(hash, IP6_VTI_HASH_SIZE_SHIFT); 62 } 63 64 static int vti6_dev_init(struct net_device *dev); 65 static void vti6_dev_setup(struct net_device *dev); 66 static struct rtnl_link_ops vti6_link_ops __read_mostly; 67 68 static unsigned int vti6_net_id __read_mostly; 69 struct vti6_net { 70 /* the vti6 tunnel fallback device */ 71 struct net_device *fb_tnl_dev; 72 /* lists for storing tunnels in use */ 73 struct ip6_tnl __rcu *tnls_r_l[IP6_VTI_HASH_SIZE]; 74 struct ip6_tnl __rcu *tnls_wc[1]; 75 struct ip6_tnl __rcu **tnls[2]; 76 }; 77 78 #define for_each_vti6_tunnel_rcu(start) \ 79 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 80 81 /** 82 * vti6_tnl_lookup - fetch tunnel matching the end-point addresses 83 * @net: network namespace 84 * @remote: the address of the tunnel exit-point 85 * @local: the address of the tunnel entry-point 86 * 87 * Return: 88 * tunnel matching given end-points if found, 89 * else fallback tunnel if its device is up, 90 * else %NULL 91 **/ 92 static struct ip6_tnl * 93 vti6_tnl_lookup(struct net *net, const struct in6_addr *remote, 94 const struct in6_addr *local) 95 { 96 unsigned int hash = HASH(remote, local); 97 struct ip6_tnl *t; 98 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 99 struct in6_addr any; 100 101 for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 102 if (ipv6_addr_equal(local, &t->parms.laddr) && 103 ipv6_addr_equal(remote, &t->parms.raddr) && 104 (t->dev->flags & IFF_UP)) 105 return t; 106 } 107 108 memset(&any, 0, sizeof(any)); 109 hash = HASH(&any, local); 110 for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 111 if (ipv6_addr_equal(local, &t->parms.laddr) && 112 (t->dev->flags & IFF_UP)) 113 return t; 114 } 115 116 hash = HASH(remote, &any); 117 for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 118 if (ipv6_addr_equal(remote, &t->parms.raddr) && 119 (t->dev->flags & IFF_UP)) 120 return t; 121 } 122 123 t = rcu_dereference(ip6n->tnls_wc[0]); 124 if (t && (t->dev->flags & IFF_UP)) 125 return t; 126 127 return NULL; 128 } 129 130 /** 131 * vti6_tnl_bucket - get head of list matching given tunnel parameters 132 * @p: parameters containing tunnel end-points 133 * 134 * Description: 135 * vti6_tnl_bucket() returns the head of the list matching the 136 * &struct in6_addr entries laddr and raddr in @p. 137 * 138 * Return: head of IPv6 tunnel list 139 **/ 140 static struct ip6_tnl __rcu ** 141 vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p) 142 { 143 const struct in6_addr *remote = &p->raddr; 144 const struct in6_addr *local = &p->laddr; 145 unsigned int h = 0; 146 int prio = 0; 147 148 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 149 prio = 1; 150 h = HASH(remote, local); 151 } 152 return &ip6n->tnls[prio][h]; 153 } 154 155 static void 156 vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t) 157 { 158 struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms); 159 160 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 161 rcu_assign_pointer(*tp, t); 162 } 163 164 static void 165 vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t) 166 { 167 struct ip6_tnl __rcu **tp; 168 struct ip6_tnl *iter; 169 170 for (tp = vti6_tnl_bucket(ip6n, &t->parms); 171 (iter = rtnl_dereference(*tp)) != NULL; 172 tp = &iter->next) { 173 if (t == iter) { 174 rcu_assign_pointer(*tp, t->next); 175 break; 176 } 177 } 178 } 179 180 static void vti6_dev_free(struct net_device *dev) 181 { 182 free_percpu(dev->tstats); 183 } 184 185 static int vti6_tnl_create2(struct net_device *dev) 186 { 187 struct ip6_tnl *t = netdev_priv(dev); 188 struct net *net = dev_net(dev); 189 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 190 int err; 191 192 dev->rtnl_link_ops = &vti6_link_ops; 193 err = register_netdevice(dev); 194 if (err < 0) 195 goto out; 196 197 strcpy(t->parms.name, dev->name); 198 199 dev_hold(dev); 200 vti6_tnl_link(ip6n, t); 201 202 return 0; 203 204 out: 205 return err; 206 } 207 208 static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 209 { 210 struct net_device *dev; 211 struct ip6_tnl *t; 212 char name[IFNAMSIZ]; 213 int err; 214 215 if (p->name[0]) { 216 if (!dev_valid_name(p->name)) 217 goto failed; 218 strlcpy(name, p->name, IFNAMSIZ); 219 } else { 220 sprintf(name, "ip6_vti%%d"); 221 } 222 223 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup); 224 if (!dev) 225 goto failed; 226 227 dev_net_set(dev, net); 228 229 t = netdev_priv(dev); 230 t->parms = *p; 231 t->net = dev_net(dev); 232 233 err = vti6_tnl_create2(dev); 234 if (err < 0) 235 goto failed_free; 236 237 return t; 238 239 failed_free: 240 free_netdev(dev); 241 failed: 242 return NULL; 243 } 244 245 /** 246 * vti6_locate - find or create tunnel matching given parameters 247 * @net: network namespace 248 * @p: tunnel parameters 249 * @create: != 0 if allowed to create new tunnel if no match found 250 * 251 * Description: 252 * vti6_locate() first tries to locate an existing tunnel 253 * based on @parms. If this is unsuccessful, but @create is set a new 254 * tunnel device is created and registered for use. 255 * 256 * Return: 257 * matching tunnel or NULL 258 **/ 259 static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p, 260 int create) 261 { 262 const struct in6_addr *remote = &p->raddr; 263 const struct in6_addr *local = &p->laddr; 264 struct ip6_tnl __rcu **tp; 265 struct ip6_tnl *t; 266 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 267 268 for (tp = vti6_tnl_bucket(ip6n, p); 269 (t = rtnl_dereference(*tp)) != NULL; 270 tp = &t->next) { 271 if (ipv6_addr_equal(local, &t->parms.laddr) && 272 ipv6_addr_equal(remote, &t->parms.raddr)) { 273 if (create) 274 return NULL; 275 276 return t; 277 } 278 } 279 if (!create) 280 return NULL; 281 return vti6_tnl_create(net, p); 282 } 283 284 /** 285 * vti6_dev_uninit - tunnel device uninitializer 286 * @dev: the device to be destroyed 287 * 288 * Description: 289 * vti6_dev_uninit() removes tunnel from its list 290 **/ 291 static void vti6_dev_uninit(struct net_device *dev) 292 { 293 struct ip6_tnl *t = netdev_priv(dev); 294 struct vti6_net *ip6n = net_generic(t->net, vti6_net_id); 295 296 if (dev == ip6n->fb_tnl_dev) 297 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 298 else 299 vti6_tnl_unlink(ip6n, t); 300 dev_put(dev); 301 } 302 303 static int vti6_rcv(struct sk_buff *skb) 304 { 305 struct ip6_tnl *t; 306 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 307 308 rcu_read_lock(); 309 t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); 310 if (t) { 311 if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) { 312 rcu_read_unlock(); 313 goto discard; 314 } 315 316 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 317 rcu_read_unlock(); 318 return 0; 319 } 320 321 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { 322 t->dev->stats.rx_dropped++; 323 rcu_read_unlock(); 324 goto discard; 325 } 326 327 rcu_read_unlock(); 328 329 return xfrm6_rcv_tnl(skb, t); 330 } 331 rcu_read_unlock(); 332 return -EINVAL; 333 discard: 334 kfree_skb(skb); 335 return 0; 336 } 337 338 static int vti6_rcv_cb(struct sk_buff *skb, int err) 339 { 340 unsigned short family; 341 struct net_device *dev; 342 struct pcpu_sw_netstats *tstats; 343 struct xfrm_state *x; 344 struct xfrm_mode *inner_mode; 345 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; 346 u32 orig_mark = skb->mark; 347 int ret; 348 349 if (!t) 350 return 1; 351 352 dev = t->dev; 353 354 if (err) { 355 dev->stats.rx_errors++; 356 dev->stats.rx_dropped++; 357 358 return 0; 359 } 360 361 x = xfrm_input_state(skb); 362 363 inner_mode = x->inner_mode; 364 365 if (x->sel.family == AF_UNSPEC) { 366 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); 367 if (inner_mode == NULL) { 368 XFRM_INC_STATS(dev_net(skb->dev), 369 LINUX_MIB_XFRMINSTATEMODEERROR); 370 return -EINVAL; 371 } 372 } 373 374 family = inner_mode->afinfo->family; 375 376 skb->mark = be32_to_cpu(t->parms.i_key); 377 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 378 skb->mark = orig_mark; 379 380 if (!ret) 381 return -EPERM; 382 383 skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); 384 skb->dev = dev; 385 386 tstats = this_cpu_ptr(dev->tstats); 387 u64_stats_update_begin(&tstats->syncp); 388 tstats->rx_packets++; 389 tstats->rx_bytes += skb->len; 390 u64_stats_update_end(&tstats->syncp); 391 392 return 0; 393 } 394 395 /** 396 * vti6_addr_conflict - compare packet addresses to tunnel's own 397 * @t: the outgoing tunnel device 398 * @hdr: IPv6 header from the incoming packet 399 * 400 * Description: 401 * Avoid trivial tunneling loop by checking that tunnel exit-point 402 * doesn't match source of incoming packet. 403 * 404 * Return: 405 * 1 if conflict, 406 * 0 else 407 **/ 408 static inline bool 409 vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 410 { 411 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 412 } 413 414 static bool vti6_state_check(const struct xfrm_state *x, 415 const struct in6_addr *dst, 416 const struct in6_addr *src) 417 { 418 xfrm_address_t *daddr = (xfrm_address_t *)dst; 419 xfrm_address_t *saddr = (xfrm_address_t *)src; 420 421 /* if there is no transform then this tunnel is not functional. 422 * Or if the xfrm is not mode tunnel. 423 */ 424 if (!x || x->props.mode != XFRM_MODE_TUNNEL || 425 x->props.family != AF_INET6) 426 return false; 427 428 if (ipv6_addr_any(dst)) 429 return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET6); 430 431 if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET6)) 432 return false; 433 434 return true; 435 } 436 437 /** 438 * vti6_xmit - send a packet 439 * @skb: the outgoing socket buffer 440 * @dev: the outgoing tunnel device 441 * @fl: the flow informations for the xfrm_lookup 442 **/ 443 static int 444 vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) 445 { 446 struct ip6_tnl *t = netdev_priv(dev); 447 struct net_device_stats *stats = &t->dev->stats; 448 struct dst_entry *dst = skb_dst(skb); 449 struct net_device *tdev; 450 struct xfrm_state *x; 451 int pkt_len = skb->len; 452 int err = -1; 453 int mtu; 454 455 if (!dst) 456 goto tx_err_link_failure; 457 458 dst_hold(dst); 459 dst = xfrm_lookup(t->net, dst, fl, NULL, 0); 460 if (IS_ERR(dst)) { 461 err = PTR_ERR(dst); 462 dst = NULL; 463 goto tx_err_link_failure; 464 } 465 466 x = dst->xfrm; 467 if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr)) 468 goto tx_err_link_failure; 469 470 if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr, 471 (const struct in6_addr *)&x->id.daddr)) 472 goto tx_err_link_failure; 473 474 tdev = dst->dev; 475 476 if (tdev == dev) { 477 stats->collisions++; 478 net_warn_ratelimited("%s: Local routing loop detected!\n", 479 t->parms.name); 480 goto tx_err_dst_release; 481 } 482 483 mtu = dst_mtu(dst); 484 if (!skb->ignore_df && skb->len > mtu) { 485 skb_dst_update_pmtu(skb, mtu); 486 487 if (skb->protocol == htons(ETH_P_IPV6)) { 488 if (mtu < IPV6_MIN_MTU) 489 mtu = IPV6_MIN_MTU; 490 491 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 492 } else { 493 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 494 htonl(mtu)); 495 } 496 497 err = -EMSGSIZE; 498 goto tx_err_dst_release; 499 } 500 501 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 502 skb_dst_set(skb, dst); 503 skb->dev = skb_dst(skb)->dev; 504 505 err = dst_output(t->net, skb->sk, skb); 506 if (net_xmit_eval(err) == 0) { 507 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 508 509 u64_stats_update_begin(&tstats->syncp); 510 tstats->tx_bytes += pkt_len; 511 tstats->tx_packets++; 512 u64_stats_update_end(&tstats->syncp); 513 } else { 514 stats->tx_errors++; 515 stats->tx_aborted_errors++; 516 } 517 518 return 0; 519 tx_err_link_failure: 520 stats->tx_carrier_errors++; 521 dst_link_failure(skb); 522 tx_err_dst_release: 523 dst_release(dst); 524 return err; 525 } 526 527 static netdev_tx_t 528 vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 529 { 530 struct ip6_tnl *t = netdev_priv(dev); 531 struct net_device_stats *stats = &t->dev->stats; 532 struct ipv6hdr *ipv6h; 533 struct flowi fl; 534 int ret; 535 536 memset(&fl, 0, sizeof(fl)); 537 538 switch (skb->protocol) { 539 case htons(ETH_P_IPV6): 540 ipv6h = ipv6_hdr(skb); 541 542 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || 543 vti6_addr_conflict(t, ipv6h)) 544 goto tx_err; 545 546 xfrm_decode_session(skb, &fl, AF_INET6); 547 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 548 break; 549 case htons(ETH_P_IP): 550 xfrm_decode_session(skb, &fl, AF_INET); 551 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 552 break; 553 default: 554 goto tx_err; 555 } 556 557 /* override mark with tunnel output key */ 558 fl.flowi_mark = be32_to_cpu(t->parms.o_key); 559 560 ret = vti6_xmit(skb, dev, &fl); 561 if (ret < 0) 562 goto tx_err; 563 564 return NETDEV_TX_OK; 565 566 tx_err: 567 stats->tx_errors++; 568 stats->tx_dropped++; 569 kfree_skb(skb); 570 return NETDEV_TX_OK; 571 } 572 573 static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 574 u8 type, u8 code, int offset, __be32 info) 575 { 576 __be32 spi; 577 __u32 mark; 578 struct xfrm_state *x; 579 struct ip6_tnl *t; 580 struct ip_esp_hdr *esph; 581 struct ip_auth_hdr *ah; 582 struct ip_comp_hdr *ipch; 583 struct net *net = dev_net(skb->dev); 584 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; 585 int protocol = iph->nexthdr; 586 587 t = vti6_tnl_lookup(dev_net(skb->dev), &iph->daddr, &iph->saddr); 588 if (!t) 589 return -1; 590 591 mark = be32_to_cpu(t->parms.o_key); 592 593 switch (protocol) { 594 case IPPROTO_ESP: 595 esph = (struct ip_esp_hdr *)(skb->data + offset); 596 spi = esph->spi; 597 break; 598 case IPPROTO_AH: 599 ah = (struct ip_auth_hdr *)(skb->data + offset); 600 spi = ah->spi; 601 break; 602 case IPPROTO_COMP: 603 ipch = (struct ip_comp_hdr *)(skb->data + offset); 604 spi = htonl(ntohs(ipch->cpi)); 605 break; 606 default: 607 return 0; 608 } 609 610 if (type != ICMPV6_PKT_TOOBIG && 611 type != NDISC_REDIRECT) 612 return 0; 613 614 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr, 615 spi, protocol, AF_INET6); 616 if (!x) 617 return 0; 618 619 if (type == NDISC_REDIRECT) 620 ip6_redirect(skb, net, skb->dev->ifindex, 0, 621 sock_net_uid(net, NULL)); 622 else 623 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 624 xfrm_state_put(x); 625 626 return 0; 627 } 628 629 static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu) 630 { 631 struct net_device *dev = t->dev; 632 struct __ip6_tnl_parm *p = &t->parms; 633 struct net_device *tdev = NULL; 634 int mtu; 635 636 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 637 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 638 639 p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV | 640 IP6_TNL_F_CAP_PER_PACKET); 641 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 642 643 if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV) 644 dev->flags |= IFF_POINTOPOINT; 645 else 646 dev->flags &= ~IFF_POINTOPOINT; 647 648 if (keep_mtu && dev->mtu) { 649 dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu); 650 return; 651 } 652 653 if (p->flags & IP6_TNL_F_CAP_XMIT) { 654 int strict = (ipv6_addr_type(&p->raddr) & 655 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); 656 struct rt6_info *rt = rt6_lookup(t->net, 657 &p->raddr, &p->laddr, 658 p->link, NULL, strict); 659 660 if (rt) 661 tdev = rt->dst.dev; 662 ip6_rt_put(rt); 663 } 664 665 if (!tdev && p->link) 666 tdev = __dev_get_by_index(t->net, p->link); 667 668 if (tdev) 669 mtu = tdev->mtu - sizeof(struct ipv6hdr); 670 else 671 mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr); 672 673 dev->mtu = max_t(int, mtu, IPV4_MIN_MTU); 674 } 675 676 /** 677 * vti6_tnl_change - update the tunnel parameters 678 * @t: tunnel to be changed 679 * @p: tunnel configuration parameters 680 * @keep_mtu: MTU was set from userspace, don't re-compute it 681 * 682 * Description: 683 * vti6_tnl_change() updates the tunnel parameters 684 **/ 685 static int 686 vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, 687 bool keep_mtu) 688 { 689 t->parms.laddr = p->laddr; 690 t->parms.raddr = p->raddr; 691 t->parms.link = p->link; 692 t->parms.i_key = p->i_key; 693 t->parms.o_key = p->o_key; 694 t->parms.proto = p->proto; 695 t->parms.fwmark = p->fwmark; 696 dst_cache_reset(&t->dst_cache); 697 vti6_link_config(t, keep_mtu); 698 return 0; 699 } 700 701 static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p, 702 bool keep_mtu) 703 { 704 struct net *net = dev_net(t->dev); 705 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 706 int err; 707 708 vti6_tnl_unlink(ip6n, t); 709 synchronize_net(); 710 err = vti6_tnl_change(t, p, keep_mtu); 711 vti6_tnl_link(ip6n, t); 712 netdev_state_change(t->dev); 713 return err; 714 } 715 716 static void 717 vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u) 718 { 719 p->laddr = u->laddr; 720 p->raddr = u->raddr; 721 p->link = u->link; 722 p->i_key = u->i_key; 723 p->o_key = u->o_key; 724 p->proto = u->proto; 725 726 memcpy(p->name, u->name, sizeof(u->name)); 727 } 728 729 static void 730 vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) 731 { 732 u->laddr = p->laddr; 733 u->raddr = p->raddr; 734 u->link = p->link; 735 u->i_key = p->i_key; 736 u->o_key = p->o_key; 737 if (u->i_key) 738 u->i_flags |= GRE_KEY; 739 if (u->o_key) 740 u->o_flags |= GRE_KEY; 741 u->proto = p->proto; 742 743 memcpy(u->name, p->name, sizeof(u->name)); 744 } 745 746 /** 747 * vti6_ioctl - configure vti6 tunnels from userspace 748 * @dev: virtual device associated with tunnel 749 * @ifr: parameters passed from userspace 750 * @cmd: command to be performed 751 * 752 * Description: 753 * vti6_ioctl() is used for managing vti6 tunnels 754 * from userspace. 755 * 756 * The possible commands are the following: 757 * %SIOCGETTUNNEL: get tunnel parameters for device 758 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 759 * %SIOCCHGTUNNEL: change tunnel parameters to those given 760 * %SIOCDELTUNNEL: delete tunnel 761 * 762 * The fallback device "ip6_vti0", created during module 763 * initialization, can be used for creating other tunnel devices. 764 * 765 * Return: 766 * 0 on success, 767 * %-EFAULT if unable to copy data to or from userspace, 768 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 769 * %-EINVAL if passed tunnel parameters are invalid, 770 * %-EEXIST if changing a tunnel's parameters would cause a conflict 771 * %-ENODEV if attempting to change or delete a nonexisting device 772 **/ 773 static int 774 vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 775 { 776 int err = 0; 777 struct ip6_tnl_parm2 p; 778 struct __ip6_tnl_parm p1; 779 struct ip6_tnl *t = NULL; 780 struct net *net = dev_net(dev); 781 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 782 783 switch (cmd) { 784 case SIOCGETTUNNEL: 785 if (dev == ip6n->fb_tnl_dev) { 786 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 787 err = -EFAULT; 788 break; 789 } 790 vti6_parm_from_user(&p1, &p); 791 t = vti6_locate(net, &p1, 0); 792 } else { 793 memset(&p, 0, sizeof(p)); 794 } 795 if (!t) 796 t = netdev_priv(dev); 797 vti6_parm_to_user(&p, &t->parms); 798 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 799 err = -EFAULT; 800 break; 801 case SIOCADDTUNNEL: 802 case SIOCCHGTUNNEL: 803 err = -EPERM; 804 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 805 break; 806 err = -EFAULT; 807 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 808 break; 809 err = -EINVAL; 810 if (p.proto != IPPROTO_IPV6 && p.proto != 0) 811 break; 812 vti6_parm_from_user(&p1, &p); 813 t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL); 814 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 815 if (t) { 816 if (t->dev != dev) { 817 err = -EEXIST; 818 break; 819 } 820 } else 821 t = netdev_priv(dev); 822 823 err = vti6_update(t, &p1, false); 824 } 825 if (t) { 826 err = 0; 827 vti6_parm_to_user(&p, &t->parms); 828 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 829 err = -EFAULT; 830 831 } else 832 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 833 break; 834 case SIOCDELTUNNEL: 835 err = -EPERM; 836 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 837 break; 838 839 if (dev == ip6n->fb_tnl_dev) { 840 err = -EFAULT; 841 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 842 break; 843 err = -ENOENT; 844 vti6_parm_from_user(&p1, &p); 845 t = vti6_locate(net, &p1, 0); 846 if (!t) 847 break; 848 err = -EPERM; 849 if (t->dev == ip6n->fb_tnl_dev) 850 break; 851 dev = t->dev; 852 } 853 err = 0; 854 unregister_netdevice(dev); 855 break; 856 default: 857 err = -EINVAL; 858 } 859 return err; 860 } 861 862 static const struct net_device_ops vti6_netdev_ops = { 863 .ndo_init = vti6_dev_init, 864 .ndo_uninit = vti6_dev_uninit, 865 .ndo_start_xmit = vti6_tnl_xmit, 866 .ndo_do_ioctl = vti6_ioctl, 867 .ndo_get_stats64 = ip_tunnel_get_stats64, 868 .ndo_get_iflink = ip6_tnl_get_iflink, 869 }; 870 871 /** 872 * vti6_dev_setup - setup virtual tunnel device 873 * @dev: virtual device associated with tunnel 874 * 875 * Description: 876 * Initialize function pointers and device parameters 877 **/ 878 static void vti6_dev_setup(struct net_device *dev) 879 { 880 dev->netdev_ops = &vti6_netdev_ops; 881 dev->needs_free_netdev = true; 882 dev->priv_destructor = vti6_dev_free; 883 884 dev->type = ARPHRD_TUNNEL6; 885 dev->min_mtu = IPV4_MIN_MTU; 886 dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); 887 dev->flags |= IFF_NOARP; 888 dev->addr_len = sizeof(struct in6_addr); 889 netif_keep_dst(dev); 890 /* This perm addr will be used as interface identifier by IPv6 */ 891 dev->addr_assign_type = NET_ADDR_RANDOM; 892 eth_random_addr(dev->perm_addr); 893 } 894 895 /** 896 * vti6_dev_init_gen - general initializer for all tunnel devices 897 * @dev: virtual device associated with tunnel 898 **/ 899 static inline int vti6_dev_init_gen(struct net_device *dev) 900 { 901 struct ip6_tnl *t = netdev_priv(dev); 902 903 t->dev = dev; 904 t->net = dev_net(dev); 905 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 906 if (!dev->tstats) 907 return -ENOMEM; 908 return 0; 909 } 910 911 /** 912 * vti6_dev_init - initializer for all non fallback tunnel devices 913 * @dev: virtual device associated with tunnel 914 **/ 915 static int vti6_dev_init(struct net_device *dev) 916 { 917 struct ip6_tnl *t = netdev_priv(dev); 918 int err = vti6_dev_init_gen(dev); 919 920 if (err) 921 return err; 922 vti6_link_config(t, true); 923 return 0; 924 } 925 926 /** 927 * vti6_fb_tnl_dev_init - initializer for fallback tunnel device 928 * @dev: fallback device 929 * 930 * Return: 0 931 **/ 932 static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) 933 { 934 struct ip6_tnl *t = netdev_priv(dev); 935 struct net *net = dev_net(dev); 936 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 937 938 t->parms.proto = IPPROTO_IPV6; 939 dev_hold(dev); 940 941 rcu_assign_pointer(ip6n->tnls_wc[0], t); 942 return 0; 943 } 944 945 static int vti6_validate(struct nlattr *tb[], struct nlattr *data[], 946 struct netlink_ext_ack *extack) 947 { 948 return 0; 949 } 950 951 static void vti6_netlink_parms(struct nlattr *data[], 952 struct __ip6_tnl_parm *parms) 953 { 954 memset(parms, 0, sizeof(*parms)); 955 956 if (!data) 957 return; 958 959 if (data[IFLA_VTI_LINK]) 960 parms->link = nla_get_u32(data[IFLA_VTI_LINK]); 961 962 if (data[IFLA_VTI_LOCAL]) 963 parms->laddr = nla_get_in6_addr(data[IFLA_VTI_LOCAL]); 964 965 if (data[IFLA_VTI_REMOTE]) 966 parms->raddr = nla_get_in6_addr(data[IFLA_VTI_REMOTE]); 967 968 if (data[IFLA_VTI_IKEY]) 969 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]); 970 971 if (data[IFLA_VTI_OKEY]) 972 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]); 973 974 if (data[IFLA_VTI_FWMARK]) 975 parms->fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]); 976 } 977 978 static int vti6_newlink(struct net *src_net, struct net_device *dev, 979 struct nlattr *tb[], struct nlattr *data[], 980 struct netlink_ext_ack *extack) 981 { 982 struct net *net = dev_net(dev); 983 struct ip6_tnl *nt; 984 985 nt = netdev_priv(dev); 986 vti6_netlink_parms(data, &nt->parms); 987 988 nt->parms.proto = IPPROTO_IPV6; 989 990 if (vti6_locate(net, &nt->parms, 0)) 991 return -EEXIST; 992 993 return vti6_tnl_create2(dev); 994 } 995 996 static void vti6_dellink(struct net_device *dev, struct list_head *head) 997 { 998 struct net *net = dev_net(dev); 999 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 1000 1001 if (dev != ip6n->fb_tnl_dev) 1002 unregister_netdevice_queue(dev, head); 1003 } 1004 1005 static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], 1006 struct nlattr *data[], 1007 struct netlink_ext_ack *extack) 1008 { 1009 struct ip6_tnl *t; 1010 struct __ip6_tnl_parm p; 1011 struct net *net = dev_net(dev); 1012 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 1013 1014 if (dev == ip6n->fb_tnl_dev) 1015 return -EINVAL; 1016 1017 vti6_netlink_parms(data, &p); 1018 1019 t = vti6_locate(net, &p, 0); 1020 1021 if (t) { 1022 if (t->dev != dev) 1023 return -EEXIST; 1024 } else 1025 t = netdev_priv(dev); 1026 1027 return vti6_update(t, &p, tb && tb[IFLA_MTU]); 1028 } 1029 1030 static size_t vti6_get_size(const struct net_device *dev) 1031 { 1032 return 1033 /* IFLA_VTI_LINK */ 1034 nla_total_size(4) + 1035 /* IFLA_VTI_LOCAL */ 1036 nla_total_size(sizeof(struct in6_addr)) + 1037 /* IFLA_VTI_REMOTE */ 1038 nla_total_size(sizeof(struct in6_addr)) + 1039 /* IFLA_VTI_IKEY */ 1040 nla_total_size(4) + 1041 /* IFLA_VTI_OKEY */ 1042 nla_total_size(4) + 1043 /* IFLA_VTI_FWMARK */ 1044 nla_total_size(4) + 1045 0; 1046 } 1047 1048 static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev) 1049 { 1050 struct ip6_tnl *tunnel = netdev_priv(dev); 1051 struct __ip6_tnl_parm *parm = &tunnel->parms; 1052 1053 if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) || 1054 nla_put_in6_addr(skb, IFLA_VTI_LOCAL, &parm->laddr) || 1055 nla_put_in6_addr(skb, IFLA_VTI_REMOTE, &parm->raddr) || 1056 nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) || 1057 nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key) || 1058 nla_put_u32(skb, IFLA_VTI_FWMARK, parm->fwmark)) 1059 goto nla_put_failure; 1060 return 0; 1061 1062 nla_put_failure: 1063 return -EMSGSIZE; 1064 } 1065 1066 static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = { 1067 [IFLA_VTI_LINK] = { .type = NLA_U32 }, 1068 [IFLA_VTI_LOCAL] = { .len = sizeof(struct in6_addr) }, 1069 [IFLA_VTI_REMOTE] = { .len = sizeof(struct in6_addr) }, 1070 [IFLA_VTI_IKEY] = { .type = NLA_U32 }, 1071 [IFLA_VTI_OKEY] = { .type = NLA_U32 }, 1072 [IFLA_VTI_FWMARK] = { .type = NLA_U32 }, 1073 }; 1074 1075 static struct rtnl_link_ops vti6_link_ops __read_mostly = { 1076 .kind = "vti6", 1077 .maxtype = IFLA_VTI_MAX, 1078 .policy = vti6_policy, 1079 .priv_size = sizeof(struct ip6_tnl), 1080 .setup = vti6_dev_setup, 1081 .validate = vti6_validate, 1082 .newlink = vti6_newlink, 1083 .dellink = vti6_dellink, 1084 .changelink = vti6_changelink, 1085 .get_size = vti6_get_size, 1086 .fill_info = vti6_fill_info, 1087 .get_link_net = ip6_tnl_get_link_net, 1088 }; 1089 1090 static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n, 1091 struct list_head *list) 1092 { 1093 int h; 1094 struct ip6_tnl *t; 1095 1096 for (h = 0; h < IP6_VTI_HASH_SIZE; h++) { 1097 t = rtnl_dereference(ip6n->tnls_r_l[h]); 1098 while (t) { 1099 unregister_netdevice_queue(t->dev, list); 1100 t = rtnl_dereference(t->next); 1101 } 1102 } 1103 1104 t = rtnl_dereference(ip6n->tnls_wc[0]); 1105 unregister_netdevice_queue(t->dev, list); 1106 } 1107 1108 static int __net_init vti6_init_net(struct net *net) 1109 { 1110 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 1111 struct ip6_tnl *t = NULL; 1112 int err; 1113 1114 ip6n->tnls[0] = ip6n->tnls_wc; 1115 ip6n->tnls[1] = ip6n->tnls_r_l; 1116 1117 err = -ENOMEM; 1118 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0", 1119 NET_NAME_UNKNOWN, vti6_dev_setup); 1120 1121 if (!ip6n->fb_tnl_dev) 1122 goto err_alloc_dev; 1123 dev_net_set(ip6n->fb_tnl_dev, net); 1124 ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops; 1125 1126 err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 1127 if (err < 0) 1128 goto err_register; 1129 1130 err = register_netdev(ip6n->fb_tnl_dev); 1131 if (err < 0) 1132 goto err_register; 1133 1134 t = netdev_priv(ip6n->fb_tnl_dev); 1135 1136 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 1137 return 0; 1138 1139 err_register: 1140 free_netdev(ip6n->fb_tnl_dev); 1141 err_alloc_dev: 1142 return err; 1143 } 1144 1145 static void __net_exit vti6_exit_batch_net(struct list_head *net_list) 1146 { 1147 struct vti6_net *ip6n; 1148 struct net *net; 1149 LIST_HEAD(list); 1150 1151 rtnl_lock(); 1152 list_for_each_entry(net, net_list, exit_list) { 1153 ip6n = net_generic(net, vti6_net_id); 1154 vti6_destroy_tunnels(ip6n, &list); 1155 } 1156 unregister_netdevice_many(&list); 1157 rtnl_unlock(); 1158 } 1159 1160 static struct pernet_operations vti6_net_ops = { 1161 .init = vti6_init_net, 1162 .exit_batch = vti6_exit_batch_net, 1163 .id = &vti6_net_id, 1164 .size = sizeof(struct vti6_net), 1165 }; 1166 1167 static struct xfrm6_protocol vti_esp6_protocol __read_mostly = { 1168 .handler = vti6_rcv, 1169 .cb_handler = vti6_rcv_cb, 1170 .err_handler = vti6_err, 1171 .priority = 100, 1172 }; 1173 1174 static struct xfrm6_protocol vti_ah6_protocol __read_mostly = { 1175 .handler = vti6_rcv, 1176 .cb_handler = vti6_rcv_cb, 1177 .err_handler = vti6_err, 1178 .priority = 100, 1179 }; 1180 1181 static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { 1182 .handler = vti6_rcv, 1183 .cb_handler = vti6_rcv_cb, 1184 .err_handler = vti6_err, 1185 .priority = 100, 1186 }; 1187 1188 /** 1189 * vti6_tunnel_init - register protocol and reserve needed resources 1190 * 1191 * Return: 0 on success 1192 **/ 1193 static int __init vti6_tunnel_init(void) 1194 { 1195 const char *msg; 1196 int err; 1197 1198 msg = "tunnel device"; 1199 err = register_pernet_device(&vti6_net_ops); 1200 if (err < 0) 1201 goto pernet_dev_failed; 1202 1203 msg = "tunnel protocols"; 1204 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); 1205 if (err < 0) 1206 goto xfrm_proto_esp_failed; 1207 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); 1208 if (err < 0) 1209 goto xfrm_proto_ah_failed; 1210 err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP); 1211 if (err < 0) 1212 goto xfrm_proto_comp_failed; 1213 1214 msg = "netlink interface"; 1215 err = rtnl_link_register(&vti6_link_ops); 1216 if (err < 0) 1217 goto rtnl_link_failed; 1218 1219 return 0; 1220 1221 rtnl_link_failed: 1222 xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); 1223 xfrm_proto_comp_failed: 1224 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1225 xfrm_proto_ah_failed: 1226 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1227 xfrm_proto_esp_failed: 1228 unregister_pernet_device(&vti6_net_ops); 1229 pernet_dev_failed: 1230 pr_err("vti6 init: failed to register %s\n", msg); 1231 return err; 1232 } 1233 1234 /** 1235 * vti6_tunnel_cleanup - free resources and unregister protocol 1236 **/ 1237 static void __exit vti6_tunnel_cleanup(void) 1238 { 1239 rtnl_link_unregister(&vti6_link_ops); 1240 xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); 1241 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1242 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1243 unregister_pernet_device(&vti6_net_ops); 1244 } 1245 1246 module_init(vti6_tunnel_init); 1247 module_exit(vti6_tunnel_cleanup); 1248 MODULE_LICENSE("GPL"); 1249 MODULE_ALIAS_RTNL_LINK("vti6"); 1250 MODULE_ALIAS_NETDEV("ip6_vti0"); 1251 MODULE_AUTHOR("Steffen Klassert"); 1252 MODULE_DESCRIPTION("IPv6 virtual tunnel interface"); 1253