1 /* 2 * IPv6 virtual tunneling interface 3 * 4 * Copyright (C) 2013 secunet Security Networks AG 5 * 6 * Author: 7 * Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * Based on: 10 * net/ipv6/ip6_tunnel.c 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/capability.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/sockios.h> 23 #include <linux/icmp.h> 24 #include <linux/if.h> 25 #include <linux/in.h> 26 #include <linux/ip.h> 27 #include <linux/net.h> 28 #include <linux/in6.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/icmpv6.h> 32 #include <linux/init.h> 33 #include <linux/route.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/netfilter_ipv6.h> 36 #include <linux/slab.h> 37 #include <linux/hash.h> 38 39 #include <linux/uaccess.h> 40 #include <linux/atomic.h> 41 42 #include <net/icmp.h> 43 #include <net/ip.h> 44 #include <net/ip_tunnels.h> 45 #include <net/ipv6.h> 46 #include <net/ip6_route.h> 47 #include <net/addrconf.h> 48 #include <net/ip6_tunnel.h> 49 #include <net/xfrm.h> 50 #include <net/net_namespace.h> 51 #include <net/netns/generic.h> 52 #include <linux/etherdevice.h> 53 54 #define IP6_VTI_HASH_SIZE_SHIFT 5 55 #define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT) 56 57 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 58 { 59 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 60 61 return hash_32(hash, IP6_VTI_HASH_SIZE_SHIFT); 62 } 63 64 static int vti6_dev_init(struct net_device *dev); 65 static void vti6_dev_setup(struct net_device *dev); 66 static struct rtnl_link_ops vti6_link_ops __read_mostly; 67 68 static unsigned int vti6_net_id __read_mostly; 69 struct vti6_net { 70 /* the vti6 tunnel fallback device */ 71 struct net_device *fb_tnl_dev; 72 /* lists for storing tunnels in use */ 73 struct ip6_tnl __rcu *tnls_r_l[IP6_VTI_HASH_SIZE]; 74 struct ip6_tnl __rcu *tnls_wc[1]; 75 struct ip6_tnl __rcu **tnls[2]; 76 }; 77 78 #define for_each_vti6_tunnel_rcu(start) \ 79 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 80 81 /** 82 * vti6_tnl_lookup - fetch tunnel matching the end-point addresses 83 * @net: network namespace 84 * @remote: the address of the tunnel exit-point 85 * @local: the address of the tunnel entry-point 86 * 87 * Return: 88 * tunnel matching given end-points if found, 89 * else fallback tunnel if its device is up, 90 * else %NULL 91 **/ 92 static struct ip6_tnl * 93 vti6_tnl_lookup(struct net *net, const struct in6_addr *remote, 94 const struct in6_addr *local) 95 { 96 unsigned int hash = HASH(remote, local); 97 struct ip6_tnl *t; 98 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 99 struct in6_addr any; 100 101 for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 102 if (ipv6_addr_equal(local, &t->parms.laddr) && 103 ipv6_addr_equal(remote, &t->parms.raddr) && 104 (t->dev->flags & IFF_UP)) 105 return t; 106 } 107 108 memset(&any, 0, sizeof(any)); 109 hash = HASH(&any, local); 110 for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 111 if (ipv6_addr_equal(local, &t->parms.laddr) && 112 (t->dev->flags & IFF_UP)) 113 return t; 114 } 115 116 hash = HASH(remote, &any); 117 for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 118 if (ipv6_addr_equal(remote, &t->parms.raddr) && 119 (t->dev->flags & IFF_UP)) 120 return t; 121 } 122 123 t = rcu_dereference(ip6n->tnls_wc[0]); 124 if (t && (t->dev->flags & IFF_UP)) 125 return t; 126 127 return NULL; 128 } 129 130 /** 131 * vti6_tnl_bucket - get head of list matching given tunnel parameters 132 * @p: parameters containing tunnel end-points 133 * 134 * Description: 135 * vti6_tnl_bucket() returns the head of the list matching the 136 * &struct in6_addr entries laddr and raddr in @p. 137 * 138 * Return: head of IPv6 tunnel list 139 **/ 140 static struct ip6_tnl __rcu ** 141 vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p) 142 { 143 const struct in6_addr *remote = &p->raddr; 144 const struct in6_addr *local = &p->laddr; 145 unsigned int h = 0; 146 int prio = 0; 147 148 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 149 prio = 1; 150 h = HASH(remote, local); 151 } 152 return &ip6n->tnls[prio][h]; 153 } 154 155 static void 156 vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t) 157 { 158 struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms); 159 160 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 161 rcu_assign_pointer(*tp, t); 162 } 163 164 static void 165 vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t) 166 { 167 struct ip6_tnl __rcu **tp; 168 struct ip6_tnl *iter; 169 170 for (tp = vti6_tnl_bucket(ip6n, &t->parms); 171 (iter = rtnl_dereference(*tp)) != NULL; 172 tp = &iter->next) { 173 if (t == iter) { 174 rcu_assign_pointer(*tp, t->next); 175 break; 176 } 177 } 178 } 179 180 static void vti6_dev_free(struct net_device *dev) 181 { 182 free_percpu(dev->tstats); 183 } 184 185 static int vti6_tnl_create2(struct net_device *dev) 186 { 187 struct ip6_tnl *t = netdev_priv(dev); 188 struct net *net = dev_net(dev); 189 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 190 int err; 191 192 dev->rtnl_link_ops = &vti6_link_ops; 193 err = register_netdevice(dev); 194 if (err < 0) 195 goto out; 196 197 strcpy(t->parms.name, dev->name); 198 199 dev_hold(dev); 200 vti6_tnl_link(ip6n, t); 201 202 return 0; 203 204 out: 205 return err; 206 } 207 208 static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 209 { 210 struct net_device *dev; 211 struct ip6_tnl *t; 212 char name[IFNAMSIZ]; 213 int err; 214 215 if (p->name[0]) 216 strlcpy(name, p->name, IFNAMSIZ); 217 else 218 sprintf(name, "ip6_vti%%d"); 219 220 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup); 221 if (!dev) 222 goto failed; 223 224 dev_net_set(dev, net); 225 226 t = netdev_priv(dev); 227 t->parms = *p; 228 t->net = dev_net(dev); 229 230 err = vti6_tnl_create2(dev); 231 if (err < 0) 232 goto failed_free; 233 234 return t; 235 236 failed_free: 237 free_netdev(dev); 238 failed: 239 return NULL; 240 } 241 242 /** 243 * vti6_locate - find or create tunnel matching given parameters 244 * @net: network namespace 245 * @p: tunnel parameters 246 * @create: != 0 if allowed to create new tunnel if no match found 247 * 248 * Description: 249 * vti6_locate() first tries to locate an existing tunnel 250 * based on @parms. If this is unsuccessful, but @create is set a new 251 * tunnel device is created and registered for use. 252 * 253 * Return: 254 * matching tunnel or NULL 255 **/ 256 static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p, 257 int create) 258 { 259 const struct in6_addr *remote = &p->raddr; 260 const struct in6_addr *local = &p->laddr; 261 struct ip6_tnl __rcu **tp; 262 struct ip6_tnl *t; 263 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 264 265 for (tp = vti6_tnl_bucket(ip6n, p); 266 (t = rtnl_dereference(*tp)) != NULL; 267 tp = &t->next) { 268 if (ipv6_addr_equal(local, &t->parms.laddr) && 269 ipv6_addr_equal(remote, &t->parms.raddr)) { 270 if (create) 271 return NULL; 272 273 return t; 274 } 275 } 276 if (!create) 277 return NULL; 278 return vti6_tnl_create(net, p); 279 } 280 281 /** 282 * vti6_dev_uninit - tunnel device uninitializer 283 * @dev: the device to be destroyed 284 * 285 * Description: 286 * vti6_dev_uninit() removes tunnel from its list 287 **/ 288 static void vti6_dev_uninit(struct net_device *dev) 289 { 290 struct ip6_tnl *t = netdev_priv(dev); 291 struct vti6_net *ip6n = net_generic(t->net, vti6_net_id); 292 293 if (dev == ip6n->fb_tnl_dev) 294 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 295 else 296 vti6_tnl_unlink(ip6n, t); 297 dev_put(dev); 298 } 299 300 static int vti6_rcv(struct sk_buff *skb) 301 { 302 struct ip6_tnl *t; 303 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 304 305 rcu_read_lock(); 306 t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); 307 if (t) { 308 if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) { 309 rcu_read_unlock(); 310 goto discard; 311 } 312 313 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 314 rcu_read_unlock(); 315 return 0; 316 } 317 318 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { 319 t->dev->stats.rx_dropped++; 320 rcu_read_unlock(); 321 goto discard; 322 } 323 324 rcu_read_unlock(); 325 326 return xfrm6_rcv_tnl(skb, t); 327 } 328 rcu_read_unlock(); 329 return -EINVAL; 330 discard: 331 kfree_skb(skb); 332 return 0; 333 } 334 335 static int vti6_rcv_cb(struct sk_buff *skb, int err) 336 { 337 unsigned short family; 338 struct net_device *dev; 339 struct pcpu_sw_netstats *tstats; 340 struct xfrm_state *x; 341 struct xfrm_mode *inner_mode; 342 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; 343 u32 orig_mark = skb->mark; 344 int ret; 345 346 if (!t) 347 return 1; 348 349 dev = t->dev; 350 351 if (err) { 352 dev->stats.rx_errors++; 353 dev->stats.rx_dropped++; 354 355 return 0; 356 } 357 358 x = xfrm_input_state(skb); 359 360 inner_mode = x->inner_mode; 361 362 if (x->sel.family == AF_UNSPEC) { 363 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); 364 if (inner_mode == NULL) { 365 XFRM_INC_STATS(dev_net(skb->dev), 366 LINUX_MIB_XFRMINSTATEMODEERROR); 367 return -EINVAL; 368 } 369 } 370 371 family = inner_mode->afinfo->family; 372 373 skb->mark = be32_to_cpu(t->parms.i_key); 374 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 375 skb->mark = orig_mark; 376 377 if (!ret) 378 return -EPERM; 379 380 skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); 381 skb->dev = dev; 382 383 tstats = this_cpu_ptr(dev->tstats); 384 u64_stats_update_begin(&tstats->syncp); 385 tstats->rx_packets++; 386 tstats->rx_bytes += skb->len; 387 u64_stats_update_end(&tstats->syncp); 388 389 return 0; 390 } 391 392 /** 393 * vti6_addr_conflict - compare packet addresses to tunnel's own 394 * @t: the outgoing tunnel device 395 * @hdr: IPv6 header from the incoming packet 396 * 397 * Description: 398 * Avoid trivial tunneling loop by checking that tunnel exit-point 399 * doesn't match source of incoming packet. 400 * 401 * Return: 402 * 1 if conflict, 403 * 0 else 404 **/ 405 static inline bool 406 vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 407 { 408 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 409 } 410 411 static bool vti6_state_check(const struct xfrm_state *x, 412 const struct in6_addr *dst, 413 const struct in6_addr *src) 414 { 415 xfrm_address_t *daddr = (xfrm_address_t *)dst; 416 xfrm_address_t *saddr = (xfrm_address_t *)src; 417 418 /* if there is no transform then this tunnel is not functional. 419 * Or if the xfrm is not mode tunnel. 420 */ 421 if (!x || x->props.mode != XFRM_MODE_TUNNEL || 422 x->props.family != AF_INET6) 423 return false; 424 425 if (ipv6_addr_any(dst)) 426 return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET6); 427 428 if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET6)) 429 return false; 430 431 return true; 432 } 433 434 /** 435 * vti6_xmit - send a packet 436 * @skb: the outgoing socket buffer 437 * @dev: the outgoing tunnel device 438 * @fl: the flow informations for the xfrm_lookup 439 **/ 440 static int 441 vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) 442 { 443 struct ip6_tnl *t = netdev_priv(dev); 444 struct net_device_stats *stats = &t->dev->stats; 445 struct dst_entry *dst = skb_dst(skb); 446 struct net_device *tdev; 447 struct xfrm_state *x; 448 int pkt_len = skb->len; 449 int err = -1; 450 int mtu; 451 452 if (!dst) 453 goto tx_err_link_failure; 454 455 dst_hold(dst); 456 dst = xfrm_lookup(t->net, dst, fl, NULL, 0); 457 if (IS_ERR(dst)) { 458 err = PTR_ERR(dst); 459 dst = NULL; 460 goto tx_err_link_failure; 461 } 462 463 x = dst->xfrm; 464 if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr)) 465 goto tx_err_link_failure; 466 467 if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr, 468 (const struct in6_addr *)&x->id.daddr)) 469 goto tx_err_link_failure; 470 471 tdev = dst->dev; 472 473 if (tdev == dev) { 474 stats->collisions++; 475 net_warn_ratelimited("%s: Local routing loop detected!\n", 476 t->parms.name); 477 goto tx_err_dst_release; 478 } 479 480 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 481 skb_dst_set(skb, dst); 482 skb->dev = skb_dst(skb)->dev; 483 484 mtu = dst_mtu(dst); 485 if (!skb->ignore_df && skb->len > mtu) { 486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); 487 488 if (skb->protocol == htons(ETH_P_IPV6)) { 489 if (mtu < IPV6_MIN_MTU) 490 mtu = IPV6_MIN_MTU; 491 492 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 493 } else { 494 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 495 htonl(mtu)); 496 } 497 498 return -EMSGSIZE; 499 } 500 501 err = dst_output(t->net, skb->sk, skb); 502 if (net_xmit_eval(err) == 0) { 503 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 504 505 u64_stats_update_begin(&tstats->syncp); 506 tstats->tx_bytes += pkt_len; 507 tstats->tx_packets++; 508 u64_stats_update_end(&tstats->syncp); 509 } else { 510 stats->tx_errors++; 511 stats->tx_aborted_errors++; 512 } 513 514 return 0; 515 tx_err_link_failure: 516 stats->tx_carrier_errors++; 517 dst_link_failure(skb); 518 tx_err_dst_release: 519 dst_release(dst); 520 return err; 521 } 522 523 static netdev_tx_t 524 vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 525 { 526 struct ip6_tnl *t = netdev_priv(dev); 527 struct net_device_stats *stats = &t->dev->stats; 528 struct ipv6hdr *ipv6h; 529 struct flowi fl; 530 int ret; 531 532 memset(&fl, 0, sizeof(fl)); 533 534 switch (skb->protocol) { 535 case htons(ETH_P_IPV6): 536 ipv6h = ipv6_hdr(skb); 537 538 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || 539 vti6_addr_conflict(t, ipv6h)) 540 goto tx_err; 541 542 xfrm_decode_session(skb, &fl, AF_INET6); 543 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 544 break; 545 case htons(ETH_P_IP): 546 xfrm_decode_session(skb, &fl, AF_INET); 547 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 548 break; 549 default: 550 goto tx_err; 551 } 552 553 /* override mark with tunnel output key */ 554 fl.flowi_mark = be32_to_cpu(t->parms.o_key); 555 556 ret = vti6_xmit(skb, dev, &fl); 557 if (ret < 0) 558 goto tx_err; 559 560 return NETDEV_TX_OK; 561 562 tx_err: 563 stats->tx_errors++; 564 stats->tx_dropped++; 565 kfree_skb(skb); 566 return NETDEV_TX_OK; 567 } 568 569 static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 570 u8 type, u8 code, int offset, __be32 info) 571 { 572 __be32 spi; 573 __u32 mark; 574 struct xfrm_state *x; 575 struct ip6_tnl *t; 576 struct ip_esp_hdr *esph; 577 struct ip_auth_hdr *ah; 578 struct ip_comp_hdr *ipch; 579 struct net *net = dev_net(skb->dev); 580 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; 581 int protocol = iph->nexthdr; 582 583 t = vti6_tnl_lookup(dev_net(skb->dev), &iph->daddr, &iph->saddr); 584 if (!t) 585 return -1; 586 587 mark = be32_to_cpu(t->parms.o_key); 588 589 switch (protocol) { 590 case IPPROTO_ESP: 591 esph = (struct ip_esp_hdr *)(skb->data + offset); 592 spi = esph->spi; 593 break; 594 case IPPROTO_AH: 595 ah = (struct ip_auth_hdr *)(skb->data + offset); 596 spi = ah->spi; 597 break; 598 case IPPROTO_COMP: 599 ipch = (struct ip_comp_hdr *)(skb->data + offset); 600 spi = htonl(ntohs(ipch->cpi)); 601 break; 602 default: 603 return 0; 604 } 605 606 if (type != ICMPV6_PKT_TOOBIG && 607 type != NDISC_REDIRECT) 608 return 0; 609 610 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr, 611 spi, protocol, AF_INET6); 612 if (!x) 613 return 0; 614 615 if (type == NDISC_REDIRECT) 616 ip6_redirect(skb, net, skb->dev->ifindex, 0, 617 sock_net_uid(net, NULL)); 618 else 619 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 620 xfrm_state_put(x); 621 622 return 0; 623 } 624 625 static void vti6_link_config(struct ip6_tnl *t) 626 { 627 struct net_device *dev = t->dev; 628 struct __ip6_tnl_parm *p = &t->parms; 629 630 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 631 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 632 633 p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV | 634 IP6_TNL_F_CAP_PER_PACKET); 635 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 636 637 if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV) 638 dev->flags |= IFF_POINTOPOINT; 639 else 640 dev->flags &= ~IFF_POINTOPOINT; 641 } 642 643 /** 644 * vti6_tnl_change - update the tunnel parameters 645 * @t: tunnel to be changed 646 * @p: tunnel configuration parameters 647 * 648 * Description: 649 * vti6_tnl_change() updates the tunnel parameters 650 **/ 651 static int 652 vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 653 { 654 t->parms.laddr = p->laddr; 655 t->parms.raddr = p->raddr; 656 t->parms.link = p->link; 657 t->parms.i_key = p->i_key; 658 t->parms.o_key = p->o_key; 659 t->parms.proto = p->proto; 660 t->parms.fwmark = p->fwmark; 661 dst_cache_reset(&t->dst_cache); 662 vti6_link_config(t); 663 return 0; 664 } 665 666 static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 667 { 668 struct net *net = dev_net(t->dev); 669 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 670 int err; 671 672 vti6_tnl_unlink(ip6n, t); 673 synchronize_net(); 674 err = vti6_tnl_change(t, p); 675 vti6_tnl_link(ip6n, t); 676 netdev_state_change(t->dev); 677 return err; 678 } 679 680 static void 681 vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u) 682 { 683 p->laddr = u->laddr; 684 p->raddr = u->raddr; 685 p->link = u->link; 686 p->i_key = u->i_key; 687 p->o_key = u->o_key; 688 p->proto = u->proto; 689 690 memcpy(p->name, u->name, sizeof(u->name)); 691 } 692 693 static void 694 vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) 695 { 696 u->laddr = p->laddr; 697 u->raddr = p->raddr; 698 u->link = p->link; 699 u->i_key = p->i_key; 700 u->o_key = p->o_key; 701 if (u->i_key) 702 u->i_flags |= GRE_KEY; 703 if (u->o_key) 704 u->o_flags |= GRE_KEY; 705 u->proto = p->proto; 706 707 memcpy(u->name, p->name, sizeof(u->name)); 708 } 709 710 /** 711 * vti6_tnl_ioctl - configure vti6 tunnels from userspace 712 * @dev: virtual device associated with tunnel 713 * @ifr: parameters passed from userspace 714 * @cmd: command to be performed 715 * 716 * Description: 717 * vti6_ioctl() is used for managing vti6 tunnels 718 * from userspace. 719 * 720 * The possible commands are the following: 721 * %SIOCGETTUNNEL: get tunnel parameters for device 722 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 723 * %SIOCCHGTUNNEL: change tunnel parameters to those given 724 * %SIOCDELTUNNEL: delete tunnel 725 * 726 * The fallback device "ip6_vti0", created during module 727 * initialization, can be used for creating other tunnel devices. 728 * 729 * Return: 730 * 0 on success, 731 * %-EFAULT if unable to copy data to or from userspace, 732 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 733 * %-EINVAL if passed tunnel parameters are invalid, 734 * %-EEXIST if changing a tunnel's parameters would cause a conflict 735 * %-ENODEV if attempting to change or delete a nonexisting device 736 **/ 737 static int 738 vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 739 { 740 int err = 0; 741 struct ip6_tnl_parm2 p; 742 struct __ip6_tnl_parm p1; 743 struct ip6_tnl *t = NULL; 744 struct net *net = dev_net(dev); 745 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 746 747 switch (cmd) { 748 case SIOCGETTUNNEL: 749 if (dev == ip6n->fb_tnl_dev) { 750 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 751 err = -EFAULT; 752 break; 753 } 754 vti6_parm_from_user(&p1, &p); 755 t = vti6_locate(net, &p1, 0); 756 } else { 757 memset(&p, 0, sizeof(p)); 758 } 759 if (!t) 760 t = netdev_priv(dev); 761 vti6_parm_to_user(&p, &t->parms); 762 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 763 err = -EFAULT; 764 break; 765 case SIOCADDTUNNEL: 766 case SIOCCHGTUNNEL: 767 err = -EPERM; 768 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 769 break; 770 err = -EFAULT; 771 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 772 break; 773 err = -EINVAL; 774 if (p.proto != IPPROTO_IPV6 && p.proto != 0) 775 break; 776 vti6_parm_from_user(&p1, &p); 777 t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL); 778 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 779 if (t) { 780 if (t->dev != dev) { 781 err = -EEXIST; 782 break; 783 } 784 } else 785 t = netdev_priv(dev); 786 787 err = vti6_update(t, &p1); 788 } 789 if (t) { 790 err = 0; 791 vti6_parm_to_user(&p, &t->parms); 792 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 793 err = -EFAULT; 794 795 } else 796 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 797 break; 798 case SIOCDELTUNNEL: 799 err = -EPERM; 800 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 801 break; 802 803 if (dev == ip6n->fb_tnl_dev) { 804 err = -EFAULT; 805 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 806 break; 807 err = -ENOENT; 808 vti6_parm_from_user(&p1, &p); 809 t = vti6_locate(net, &p1, 0); 810 if (!t) 811 break; 812 err = -EPERM; 813 if (t->dev == ip6n->fb_tnl_dev) 814 break; 815 dev = t->dev; 816 } 817 err = 0; 818 unregister_netdevice(dev); 819 break; 820 default: 821 err = -EINVAL; 822 } 823 return err; 824 } 825 826 static const struct net_device_ops vti6_netdev_ops = { 827 .ndo_init = vti6_dev_init, 828 .ndo_uninit = vti6_dev_uninit, 829 .ndo_start_xmit = vti6_tnl_xmit, 830 .ndo_do_ioctl = vti6_ioctl, 831 .ndo_get_stats64 = ip_tunnel_get_stats64, 832 .ndo_get_iflink = ip6_tnl_get_iflink, 833 }; 834 835 /** 836 * vti6_dev_setup - setup virtual tunnel device 837 * @dev: virtual device associated with tunnel 838 * 839 * Description: 840 * Initialize function pointers and device parameters 841 **/ 842 static void vti6_dev_setup(struct net_device *dev) 843 { 844 dev->netdev_ops = &vti6_netdev_ops; 845 dev->needs_free_netdev = true; 846 dev->priv_destructor = vti6_dev_free; 847 848 dev->type = ARPHRD_TUNNEL6; 849 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 850 dev->mtu = ETH_DATA_LEN; 851 dev->min_mtu = IPV6_MIN_MTU; 852 dev->max_mtu = IP_MAX_MTU; 853 dev->flags |= IFF_NOARP; 854 dev->addr_len = sizeof(struct in6_addr); 855 netif_keep_dst(dev); 856 /* This perm addr will be used as interface identifier by IPv6 */ 857 dev->addr_assign_type = NET_ADDR_RANDOM; 858 eth_random_addr(dev->perm_addr); 859 } 860 861 /** 862 * vti6_dev_init_gen - general initializer for all tunnel devices 863 * @dev: virtual device associated with tunnel 864 **/ 865 static inline int vti6_dev_init_gen(struct net_device *dev) 866 { 867 struct ip6_tnl *t = netdev_priv(dev); 868 869 t->dev = dev; 870 t->net = dev_net(dev); 871 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 872 if (!dev->tstats) 873 return -ENOMEM; 874 return 0; 875 } 876 877 /** 878 * vti6_dev_init - initializer for all non fallback tunnel devices 879 * @dev: virtual device associated with tunnel 880 **/ 881 static int vti6_dev_init(struct net_device *dev) 882 { 883 struct ip6_tnl *t = netdev_priv(dev); 884 int err = vti6_dev_init_gen(dev); 885 886 if (err) 887 return err; 888 vti6_link_config(t); 889 return 0; 890 } 891 892 /** 893 * vti6_fb_tnl_dev_init - initializer for fallback tunnel device 894 * @dev: fallback device 895 * 896 * Return: 0 897 **/ 898 static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) 899 { 900 struct ip6_tnl *t = netdev_priv(dev); 901 struct net *net = dev_net(dev); 902 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 903 904 t->parms.proto = IPPROTO_IPV6; 905 dev_hold(dev); 906 907 rcu_assign_pointer(ip6n->tnls_wc[0], t); 908 return 0; 909 } 910 911 static int vti6_validate(struct nlattr *tb[], struct nlattr *data[], 912 struct netlink_ext_ack *extack) 913 { 914 return 0; 915 } 916 917 static void vti6_netlink_parms(struct nlattr *data[], 918 struct __ip6_tnl_parm *parms) 919 { 920 memset(parms, 0, sizeof(*parms)); 921 922 if (!data) 923 return; 924 925 if (data[IFLA_VTI_LINK]) 926 parms->link = nla_get_u32(data[IFLA_VTI_LINK]); 927 928 if (data[IFLA_VTI_LOCAL]) 929 parms->laddr = nla_get_in6_addr(data[IFLA_VTI_LOCAL]); 930 931 if (data[IFLA_VTI_REMOTE]) 932 parms->raddr = nla_get_in6_addr(data[IFLA_VTI_REMOTE]); 933 934 if (data[IFLA_VTI_IKEY]) 935 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]); 936 937 if (data[IFLA_VTI_OKEY]) 938 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]); 939 940 if (data[IFLA_VTI_FWMARK]) 941 parms->fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]); 942 } 943 944 static int vti6_newlink(struct net *src_net, struct net_device *dev, 945 struct nlattr *tb[], struct nlattr *data[], 946 struct netlink_ext_ack *extack) 947 { 948 struct net *net = dev_net(dev); 949 struct ip6_tnl *nt; 950 951 nt = netdev_priv(dev); 952 vti6_netlink_parms(data, &nt->parms); 953 954 nt->parms.proto = IPPROTO_IPV6; 955 956 if (vti6_locate(net, &nt->parms, 0)) 957 return -EEXIST; 958 959 return vti6_tnl_create2(dev); 960 } 961 962 static void vti6_dellink(struct net_device *dev, struct list_head *head) 963 { 964 struct net *net = dev_net(dev); 965 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 966 967 if (dev != ip6n->fb_tnl_dev) 968 unregister_netdevice_queue(dev, head); 969 } 970 971 static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], 972 struct nlattr *data[], 973 struct netlink_ext_ack *extack) 974 { 975 struct ip6_tnl *t; 976 struct __ip6_tnl_parm p; 977 struct net *net = dev_net(dev); 978 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 979 980 if (dev == ip6n->fb_tnl_dev) 981 return -EINVAL; 982 983 vti6_netlink_parms(data, &p); 984 985 t = vti6_locate(net, &p, 0); 986 987 if (t) { 988 if (t->dev != dev) 989 return -EEXIST; 990 } else 991 t = netdev_priv(dev); 992 993 return vti6_update(t, &p); 994 } 995 996 static size_t vti6_get_size(const struct net_device *dev) 997 { 998 return 999 /* IFLA_VTI_LINK */ 1000 nla_total_size(4) + 1001 /* IFLA_VTI_LOCAL */ 1002 nla_total_size(sizeof(struct in6_addr)) + 1003 /* IFLA_VTI_REMOTE */ 1004 nla_total_size(sizeof(struct in6_addr)) + 1005 /* IFLA_VTI_IKEY */ 1006 nla_total_size(4) + 1007 /* IFLA_VTI_OKEY */ 1008 nla_total_size(4) + 1009 /* IFLA_VTI_FWMARK */ 1010 nla_total_size(4) + 1011 0; 1012 } 1013 1014 static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev) 1015 { 1016 struct ip6_tnl *tunnel = netdev_priv(dev); 1017 struct __ip6_tnl_parm *parm = &tunnel->parms; 1018 1019 if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) || 1020 nla_put_in6_addr(skb, IFLA_VTI_LOCAL, &parm->laddr) || 1021 nla_put_in6_addr(skb, IFLA_VTI_REMOTE, &parm->raddr) || 1022 nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) || 1023 nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key) || 1024 nla_put_u32(skb, IFLA_VTI_FWMARK, parm->fwmark)) 1025 goto nla_put_failure; 1026 return 0; 1027 1028 nla_put_failure: 1029 return -EMSGSIZE; 1030 } 1031 1032 static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = { 1033 [IFLA_VTI_LINK] = { .type = NLA_U32 }, 1034 [IFLA_VTI_LOCAL] = { .len = sizeof(struct in6_addr) }, 1035 [IFLA_VTI_REMOTE] = { .len = sizeof(struct in6_addr) }, 1036 [IFLA_VTI_IKEY] = { .type = NLA_U32 }, 1037 [IFLA_VTI_OKEY] = { .type = NLA_U32 }, 1038 [IFLA_VTI_FWMARK] = { .type = NLA_U32 }, 1039 }; 1040 1041 static struct rtnl_link_ops vti6_link_ops __read_mostly = { 1042 .kind = "vti6", 1043 .maxtype = IFLA_VTI_MAX, 1044 .policy = vti6_policy, 1045 .priv_size = sizeof(struct ip6_tnl), 1046 .setup = vti6_dev_setup, 1047 .validate = vti6_validate, 1048 .newlink = vti6_newlink, 1049 .dellink = vti6_dellink, 1050 .changelink = vti6_changelink, 1051 .get_size = vti6_get_size, 1052 .fill_info = vti6_fill_info, 1053 .get_link_net = ip6_tnl_get_link_net, 1054 }; 1055 1056 static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n, 1057 struct list_head *list) 1058 { 1059 int h; 1060 struct ip6_tnl *t; 1061 1062 for (h = 0; h < IP6_VTI_HASH_SIZE; h++) { 1063 t = rtnl_dereference(ip6n->tnls_r_l[h]); 1064 while (t) { 1065 unregister_netdevice_queue(t->dev, list); 1066 t = rtnl_dereference(t->next); 1067 } 1068 } 1069 1070 t = rtnl_dereference(ip6n->tnls_wc[0]); 1071 unregister_netdevice_queue(t->dev, list); 1072 } 1073 1074 static int __net_init vti6_init_net(struct net *net) 1075 { 1076 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 1077 struct ip6_tnl *t = NULL; 1078 int err; 1079 1080 ip6n->tnls[0] = ip6n->tnls_wc; 1081 ip6n->tnls[1] = ip6n->tnls_r_l; 1082 1083 err = -ENOMEM; 1084 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0", 1085 NET_NAME_UNKNOWN, vti6_dev_setup); 1086 1087 if (!ip6n->fb_tnl_dev) 1088 goto err_alloc_dev; 1089 dev_net_set(ip6n->fb_tnl_dev, net); 1090 ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops; 1091 1092 err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 1093 if (err < 0) 1094 goto err_register; 1095 1096 err = register_netdev(ip6n->fb_tnl_dev); 1097 if (err < 0) 1098 goto err_register; 1099 1100 t = netdev_priv(ip6n->fb_tnl_dev); 1101 1102 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 1103 return 0; 1104 1105 err_register: 1106 free_netdev(ip6n->fb_tnl_dev); 1107 err_alloc_dev: 1108 return err; 1109 } 1110 1111 static void __net_exit vti6_exit_batch_net(struct list_head *net_list) 1112 { 1113 struct vti6_net *ip6n; 1114 struct net *net; 1115 LIST_HEAD(list); 1116 1117 rtnl_lock(); 1118 list_for_each_entry(net, net_list, exit_list) { 1119 ip6n = net_generic(net, vti6_net_id); 1120 vti6_destroy_tunnels(ip6n, &list); 1121 } 1122 unregister_netdevice_many(&list); 1123 rtnl_unlock(); 1124 } 1125 1126 static struct pernet_operations vti6_net_ops = { 1127 .init = vti6_init_net, 1128 .exit_batch = vti6_exit_batch_net, 1129 .id = &vti6_net_id, 1130 .size = sizeof(struct vti6_net), 1131 }; 1132 1133 static struct xfrm6_protocol vti_esp6_protocol __read_mostly = { 1134 .handler = vti6_rcv, 1135 .cb_handler = vti6_rcv_cb, 1136 .err_handler = vti6_err, 1137 .priority = 100, 1138 }; 1139 1140 static struct xfrm6_protocol vti_ah6_protocol __read_mostly = { 1141 .handler = vti6_rcv, 1142 .cb_handler = vti6_rcv_cb, 1143 .err_handler = vti6_err, 1144 .priority = 100, 1145 }; 1146 1147 static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { 1148 .handler = vti6_rcv, 1149 .cb_handler = vti6_rcv_cb, 1150 .err_handler = vti6_err, 1151 .priority = 100, 1152 }; 1153 1154 /** 1155 * vti6_tunnel_init - register protocol and reserve needed resources 1156 * 1157 * Return: 0 on success 1158 **/ 1159 static int __init vti6_tunnel_init(void) 1160 { 1161 const char *msg; 1162 int err; 1163 1164 msg = "tunnel device"; 1165 err = register_pernet_device(&vti6_net_ops); 1166 if (err < 0) 1167 goto pernet_dev_failed; 1168 1169 msg = "tunnel protocols"; 1170 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); 1171 if (err < 0) 1172 goto xfrm_proto_esp_failed; 1173 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); 1174 if (err < 0) 1175 goto xfrm_proto_ah_failed; 1176 err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP); 1177 if (err < 0) 1178 goto xfrm_proto_comp_failed; 1179 1180 msg = "netlink interface"; 1181 err = rtnl_link_register(&vti6_link_ops); 1182 if (err < 0) 1183 goto rtnl_link_failed; 1184 1185 return 0; 1186 1187 rtnl_link_failed: 1188 xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); 1189 xfrm_proto_comp_failed: 1190 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1191 xfrm_proto_ah_failed: 1192 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1193 xfrm_proto_esp_failed: 1194 unregister_pernet_device(&vti6_net_ops); 1195 pernet_dev_failed: 1196 pr_err("vti6 init: failed to register %s\n", msg); 1197 return err; 1198 } 1199 1200 /** 1201 * vti6_tunnel_cleanup - free resources and unregister protocol 1202 **/ 1203 static void __exit vti6_tunnel_cleanup(void) 1204 { 1205 rtnl_link_unregister(&vti6_link_ops); 1206 xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); 1207 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1208 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1209 unregister_pernet_device(&vti6_net_ops); 1210 } 1211 1212 module_init(vti6_tunnel_init); 1213 module_exit(vti6_tunnel_cleanup); 1214 MODULE_LICENSE("GPL"); 1215 MODULE_ALIAS_RTNL_LINK("vti6"); 1216 MODULE_ALIAS_NETDEV("ip6_vti0"); 1217 MODULE_AUTHOR("Steffen Klassert"); 1218 MODULE_DESCRIPTION("IPv6 virtual tunnel interface"); 1219