1 /* 2 * IPv6 tunneling device 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 8 * 9 * Based on: 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 11 * 12 * RFC 2473 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/errno.h> 26 #include <linux/types.h> 27 #include <linux/sockios.h> 28 #include <linux/icmp.h> 29 #include <linux/if.h> 30 #include <linux/in.h> 31 #include <linux/ip.h> 32 #include <linux/if_tunnel.h> 33 #include <linux/net.h> 34 #include <linux/in6.h> 35 #include <linux/netdevice.h> 36 #include <linux/if_arp.h> 37 #include <linux/icmpv6.h> 38 #include <linux/init.h> 39 #include <linux/route.h> 40 #include <linux/rtnetlink.h> 41 #include <linux/netfilter_ipv6.h> 42 #include <linux/slab.h> 43 #include <linux/hash.h> 44 45 #include <asm/uaccess.h> 46 #include <linux/atomic.h> 47 48 #include <net/icmp.h> 49 #include <net/ip.h> 50 #include <net/ipv6.h> 51 #include <net/ip6_route.h> 52 #include <net/addrconf.h> 53 #include <net/ip6_tunnel.h> 54 #include <net/xfrm.h> 55 #include <net/dsfield.h> 56 #include <net/inet_ecn.h> 57 #include <net/net_namespace.h> 58 #include <net/netns/generic.h> 59 60 MODULE_AUTHOR("Ville Nuorvala"); 61 MODULE_DESCRIPTION("IPv6 tunneling device"); 62 MODULE_LICENSE("GPL"); 63 MODULE_ALIAS_NETDEV("ip6tnl0"); 64 65 #ifdef IP6_TNL_DEBUG 66 #define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__) 67 #else 68 #define IP6_TNL_TRACE(x...) do {;} while(0) 69 #endif 70 71 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) 72 #define IPV6_TCLASS_SHIFT 20 73 74 #define HASH_SIZE_SHIFT 5 75 #define HASH_SIZE (1 << HASH_SIZE_SHIFT) 76 77 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 78 { 79 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 80 81 return hash_32(hash, HASH_SIZE_SHIFT); 82 } 83 84 static int ip6_tnl_dev_init(struct net_device *dev); 85 static void ip6_tnl_dev_setup(struct net_device *dev); 86 87 static int ip6_tnl_net_id __read_mostly; 88 struct ip6_tnl_net { 89 /* the IPv6 tunnel fallback device */ 90 struct net_device *fb_tnl_dev; 91 /* lists for storing tunnels in use */ 92 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; 93 struct ip6_tnl __rcu *tnls_wc[1]; 94 struct ip6_tnl __rcu **tnls[2]; 95 }; 96 97 /* often modified stats are per cpu, other are shared (netdev->stats) */ 98 struct pcpu_tstats { 99 unsigned long rx_packets; 100 unsigned long rx_bytes; 101 unsigned long tx_packets; 102 unsigned long tx_bytes; 103 } __attribute__((aligned(4*sizeof(unsigned long)))); 104 105 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 106 { 107 struct pcpu_tstats sum = { 0 }; 108 int i; 109 110 for_each_possible_cpu(i) { 111 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 112 113 sum.rx_packets += tstats->rx_packets; 114 sum.rx_bytes += tstats->rx_bytes; 115 sum.tx_packets += tstats->tx_packets; 116 sum.tx_bytes += tstats->tx_bytes; 117 } 118 dev->stats.rx_packets = sum.rx_packets; 119 dev->stats.rx_bytes = sum.rx_bytes; 120 dev->stats.tx_packets = sum.tx_packets; 121 dev->stats.tx_bytes = sum.tx_bytes; 122 return &dev->stats; 123 } 124 125 /* 126 * Locking : hash tables are protected by RCU and RTNL 127 */ 128 129 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 130 { 131 struct dst_entry *dst = t->dst_cache; 132 133 if (dst && dst->obsolete && 134 dst->ops->check(dst, t->dst_cookie) == NULL) { 135 t->dst_cache = NULL; 136 dst_release(dst); 137 return NULL; 138 } 139 140 return dst; 141 } 142 143 static inline void ip6_tnl_dst_reset(struct ip6_tnl *t) 144 { 145 dst_release(t->dst_cache); 146 t->dst_cache = NULL; 147 } 148 149 static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) 150 { 151 struct rt6_info *rt = (struct rt6_info *) dst; 152 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 153 dst_release(t->dst_cache); 154 t->dst_cache = dst; 155 } 156 157 /** 158 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 159 * @remote: the address of the tunnel exit-point 160 * @local: the address of the tunnel entry-point 161 * 162 * Return: 163 * tunnel matching given end-points if found, 164 * else fallback tunnel if its device is up, 165 * else %NULL 166 **/ 167 168 #define for_each_ip6_tunnel_rcu(start) \ 169 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 170 171 static struct ip6_tnl * 172 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 173 { 174 unsigned int hash = HASH(remote, local); 175 struct ip6_tnl *t; 176 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 177 178 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 179 if (ipv6_addr_equal(local, &t->parms.laddr) && 180 ipv6_addr_equal(remote, &t->parms.raddr) && 181 (t->dev->flags & IFF_UP)) 182 return t; 183 } 184 t = rcu_dereference(ip6n->tnls_wc[0]); 185 if (t && (t->dev->flags & IFF_UP)) 186 return t; 187 188 return NULL; 189 } 190 191 /** 192 * ip6_tnl_bucket - get head of list matching given tunnel parameters 193 * @p: parameters containing tunnel end-points 194 * 195 * Description: 196 * ip6_tnl_bucket() returns the head of the list matching the 197 * &struct in6_addr entries laddr and raddr in @p. 198 * 199 * Return: head of IPv6 tunnel list 200 **/ 201 202 static struct ip6_tnl __rcu ** 203 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) 204 { 205 const struct in6_addr *remote = &p->raddr; 206 const struct in6_addr *local = &p->laddr; 207 unsigned int h = 0; 208 int prio = 0; 209 210 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 211 prio = 1; 212 h = HASH(remote, local); 213 } 214 return &ip6n->tnls[prio][h]; 215 } 216 217 /** 218 * ip6_tnl_link - add tunnel to hash table 219 * @t: tunnel to be added 220 **/ 221 222 static void 223 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 224 { 225 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 226 227 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 228 rcu_assign_pointer(*tp, t); 229 } 230 231 /** 232 * ip6_tnl_unlink - remove tunnel from hash table 233 * @t: tunnel to be removed 234 **/ 235 236 static void 237 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 238 { 239 struct ip6_tnl __rcu **tp; 240 struct ip6_tnl *iter; 241 242 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 243 (iter = rtnl_dereference(*tp)) != NULL; 244 tp = &iter->next) { 245 if (t == iter) { 246 rcu_assign_pointer(*tp, t->next); 247 break; 248 } 249 } 250 } 251 252 static void ip6_dev_free(struct net_device *dev) 253 { 254 free_percpu(dev->tstats); 255 free_netdev(dev); 256 } 257 258 /** 259 * ip6_tnl_create - create a new tunnel 260 * @p: tunnel parameters 261 * @pt: pointer to new tunnel 262 * 263 * Description: 264 * Create tunnel matching given parameters. 265 * 266 * Return: 267 * created tunnel or NULL 268 **/ 269 270 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) 271 { 272 struct net_device *dev; 273 struct ip6_tnl *t; 274 char name[IFNAMSIZ]; 275 int err; 276 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 277 278 if (p->name[0]) 279 strlcpy(name, p->name, IFNAMSIZ); 280 else 281 sprintf(name, "ip6tnl%%d"); 282 283 dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup); 284 if (dev == NULL) 285 goto failed; 286 287 dev_net_set(dev, net); 288 289 t = netdev_priv(dev); 290 t->parms = *p; 291 err = ip6_tnl_dev_init(dev); 292 if (err < 0) 293 goto failed_free; 294 295 if ((err = register_netdevice(dev)) < 0) 296 goto failed_free; 297 298 strcpy(t->parms.name, dev->name); 299 300 dev_hold(dev); 301 ip6_tnl_link(ip6n, t); 302 return t; 303 304 failed_free: 305 ip6_dev_free(dev); 306 failed: 307 return NULL; 308 } 309 310 /** 311 * ip6_tnl_locate - find or create tunnel matching given parameters 312 * @p: tunnel parameters 313 * @create: != 0 if allowed to create new tunnel if no match found 314 * 315 * Description: 316 * ip6_tnl_locate() first tries to locate an existing tunnel 317 * based on @parms. If this is unsuccessful, but @create is set a new 318 * tunnel device is created and registered for use. 319 * 320 * Return: 321 * matching tunnel or NULL 322 **/ 323 324 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 325 struct ip6_tnl_parm *p, int create) 326 { 327 const struct in6_addr *remote = &p->raddr; 328 const struct in6_addr *local = &p->laddr; 329 struct ip6_tnl __rcu **tp; 330 struct ip6_tnl *t; 331 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 332 333 for (tp = ip6_tnl_bucket(ip6n, p); 334 (t = rtnl_dereference(*tp)) != NULL; 335 tp = &t->next) { 336 if (ipv6_addr_equal(local, &t->parms.laddr) && 337 ipv6_addr_equal(remote, &t->parms.raddr)) 338 return t; 339 } 340 if (!create) 341 return NULL; 342 return ip6_tnl_create(net, p); 343 } 344 345 /** 346 * ip6_tnl_dev_uninit - tunnel device uninitializer 347 * @dev: the device to be destroyed 348 * 349 * Description: 350 * ip6_tnl_dev_uninit() removes tunnel from its list 351 **/ 352 353 static void 354 ip6_tnl_dev_uninit(struct net_device *dev) 355 { 356 struct ip6_tnl *t = netdev_priv(dev); 357 struct net *net = dev_net(dev); 358 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 359 360 if (dev == ip6n->fb_tnl_dev) 361 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 362 else 363 ip6_tnl_unlink(ip6n, t); 364 ip6_tnl_dst_reset(t); 365 dev_put(dev); 366 } 367 368 /** 369 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 370 * @skb: received socket buffer 371 * 372 * Return: 373 * 0 if none was found, 374 * else index to encapsulation limit 375 **/ 376 377 static __u16 378 parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) 379 { 380 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 381 __u8 nexthdr = ipv6h->nexthdr; 382 __u16 off = sizeof (*ipv6h); 383 384 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 385 __u16 optlen = 0; 386 struct ipv6_opt_hdr *hdr; 387 if (raw + off + sizeof (*hdr) > skb->data && 388 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 389 break; 390 391 hdr = (struct ipv6_opt_hdr *) (raw + off); 392 if (nexthdr == NEXTHDR_FRAGMENT) { 393 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 394 if (frag_hdr->frag_off) 395 break; 396 optlen = 8; 397 } else if (nexthdr == NEXTHDR_AUTH) { 398 optlen = (hdr->hdrlen + 2) << 2; 399 } else { 400 optlen = ipv6_optlen(hdr); 401 } 402 if (nexthdr == NEXTHDR_DEST) { 403 __u16 i = off + 2; 404 while (1) { 405 struct ipv6_tlv_tnl_enc_lim *tel; 406 407 /* No more room for encapsulation limit */ 408 if (i + sizeof (*tel) > off + optlen) 409 break; 410 411 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 412 /* return index of option if found and valid */ 413 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 414 tel->length == 1) 415 return i; 416 /* else jump to next option */ 417 if (tel->type) 418 i += tel->length + 2; 419 else 420 i++; 421 } 422 } 423 nexthdr = hdr->nexthdr; 424 off += optlen; 425 } 426 return 0; 427 } 428 429 /** 430 * ip6_tnl_err - tunnel error handler 431 * 432 * Description: 433 * ip6_tnl_err() should handle errors in the tunnel according 434 * to the specifications in RFC 2473. 435 **/ 436 437 static int 438 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 439 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 440 { 441 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data; 442 struct ip6_tnl *t; 443 int rel_msg = 0; 444 u8 rel_type = ICMPV6_DEST_UNREACH; 445 u8 rel_code = ICMPV6_ADDR_UNREACH; 446 __u32 rel_info = 0; 447 __u16 len; 448 int err = -ENOENT; 449 450 /* If the packet doesn't contain the original IPv6 header we are 451 in trouble since we might need the source address for further 452 processing of the error. */ 453 454 rcu_read_lock(); 455 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, 456 &ipv6h->saddr)) == NULL) 457 goto out; 458 459 if (t->parms.proto != ipproto && t->parms.proto != 0) 460 goto out; 461 462 err = 0; 463 464 switch (*type) { 465 __u32 teli; 466 struct ipv6_tlv_tnl_enc_lim *tel; 467 __u32 mtu; 468 case ICMPV6_DEST_UNREACH: 469 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", 470 t->parms.name); 471 rel_msg = 1; 472 break; 473 case ICMPV6_TIME_EXCEED: 474 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 475 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 476 t->parms.name); 477 rel_msg = 1; 478 } 479 break; 480 case ICMPV6_PARAMPROB: 481 teli = 0; 482 if ((*code) == ICMPV6_HDR_FIELD) 483 teli = parse_tlv_tnl_enc_lim(skb, skb->data); 484 485 if (teli && teli == *info - 2) { 486 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 487 if (tel->encap_limit == 0) { 488 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 489 t->parms.name); 490 rel_msg = 1; 491 } 492 } else { 493 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 494 t->parms.name); 495 } 496 break; 497 case ICMPV6_PKT_TOOBIG: 498 mtu = *info - offset; 499 if (mtu < IPV6_MIN_MTU) 500 mtu = IPV6_MIN_MTU; 501 t->dev->mtu = mtu; 502 503 if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { 504 rel_type = ICMPV6_PKT_TOOBIG; 505 rel_code = 0; 506 rel_info = mtu; 507 rel_msg = 1; 508 } 509 break; 510 } 511 512 *type = rel_type; 513 *code = rel_code; 514 *info = rel_info; 515 *msg = rel_msg; 516 517 out: 518 rcu_read_unlock(); 519 return err; 520 } 521 522 static int 523 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 524 u8 type, u8 code, int offset, __be32 info) 525 { 526 int rel_msg = 0; 527 u8 rel_type = type; 528 u8 rel_code = code; 529 __u32 rel_info = ntohl(info); 530 int err; 531 struct sk_buff *skb2; 532 const struct iphdr *eiph; 533 struct rtable *rt; 534 struct flowi4 fl4; 535 536 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 537 &rel_msg, &rel_info, offset); 538 if (err < 0) 539 return err; 540 541 if (rel_msg == 0) 542 return 0; 543 544 switch (rel_type) { 545 case ICMPV6_DEST_UNREACH: 546 if (rel_code != ICMPV6_ADDR_UNREACH) 547 return 0; 548 rel_type = ICMP_DEST_UNREACH; 549 rel_code = ICMP_HOST_UNREACH; 550 break; 551 case ICMPV6_PKT_TOOBIG: 552 if (rel_code != 0) 553 return 0; 554 rel_type = ICMP_DEST_UNREACH; 555 rel_code = ICMP_FRAG_NEEDED; 556 break; 557 case NDISC_REDIRECT: 558 rel_type = ICMP_REDIRECT; 559 rel_code = ICMP_REDIR_HOST; 560 default: 561 return 0; 562 } 563 564 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 565 return 0; 566 567 skb2 = skb_clone(skb, GFP_ATOMIC); 568 if (!skb2) 569 return 0; 570 571 skb_dst_drop(skb2); 572 573 skb_pull(skb2, offset); 574 skb_reset_network_header(skb2); 575 eiph = ip_hdr(skb2); 576 577 /* Try to guess incoming interface */ 578 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 579 eiph->saddr, 0, 580 0, 0, 581 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 582 if (IS_ERR(rt)) 583 goto out; 584 585 skb2->dev = rt->dst.dev; 586 587 /* route "incoming" packet */ 588 if (rt->rt_flags & RTCF_LOCAL) { 589 ip_rt_put(rt); 590 rt = NULL; 591 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 592 eiph->daddr, eiph->saddr, 593 0, 0, 594 IPPROTO_IPIP, 595 RT_TOS(eiph->tos), 0); 596 if (IS_ERR(rt) || 597 rt->dst.dev->type != ARPHRD_TUNNEL) { 598 if (!IS_ERR(rt)) 599 ip_rt_put(rt); 600 goto out; 601 } 602 skb_dst_set(skb2, &rt->dst); 603 } else { 604 ip_rt_put(rt); 605 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 606 skb2->dev) || 607 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 608 goto out; 609 } 610 611 /* change mtu on this route */ 612 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 613 if (rel_info > dst_mtu(skb_dst(skb2))) 614 goto out; 615 616 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info); 617 } 618 if (rel_type == ICMP_REDIRECT) 619 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2); 620 621 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 622 623 out: 624 kfree_skb(skb2); 625 return 0; 626 } 627 628 static int 629 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 630 u8 type, u8 code, int offset, __be32 info) 631 { 632 int rel_msg = 0; 633 u8 rel_type = type; 634 u8 rel_code = code; 635 __u32 rel_info = ntohl(info); 636 int err; 637 638 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 639 &rel_msg, &rel_info, offset); 640 if (err < 0) 641 return err; 642 643 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 644 struct rt6_info *rt; 645 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 646 647 if (!skb2) 648 return 0; 649 650 skb_dst_drop(skb2); 651 skb_pull(skb2, offset); 652 skb_reset_network_header(skb2); 653 654 /* Try to guess incoming interface */ 655 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 656 NULL, 0, 0); 657 658 if (rt && rt->dst.dev) 659 skb2->dev = rt->dst.dev; 660 661 icmpv6_send(skb2, rel_type, rel_code, rel_info); 662 663 if (rt) 664 dst_release(&rt->dst); 665 666 kfree_skb(skb2); 667 } 668 669 return 0; 670 } 671 672 static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 673 const struct ipv6hdr *ipv6h, 674 struct sk_buff *skb) 675 { 676 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 677 678 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 679 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 680 681 if (INET_ECN_is_ce(dsfield)) 682 IP_ECN_set_ce(ip_hdr(skb)); 683 } 684 685 static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 686 const struct ipv6hdr *ipv6h, 687 struct sk_buff *skb) 688 { 689 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 690 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 691 692 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) 693 IP6_ECN_set_ce(ipv6_hdr(skb)); 694 } 695 696 static __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 697 const struct in6_addr *laddr, 698 const struct in6_addr *raddr) 699 { 700 struct ip6_tnl_parm *p = &t->parms; 701 int ltype = ipv6_addr_type(laddr); 702 int rtype = ipv6_addr_type(raddr); 703 __u32 flags = 0; 704 705 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 706 flags = IP6_TNL_F_CAP_PER_PACKET; 707 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 708 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 709 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 710 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 711 if (ltype&IPV6_ADDR_UNICAST) 712 flags |= IP6_TNL_F_CAP_XMIT; 713 if (rtype&IPV6_ADDR_UNICAST) 714 flags |= IP6_TNL_F_CAP_RCV; 715 } 716 return flags; 717 } 718 719 /* called with rcu_read_lock() */ 720 static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 721 const struct in6_addr *laddr, 722 const struct in6_addr *raddr) 723 { 724 struct ip6_tnl_parm *p = &t->parms; 725 int ret = 0; 726 struct net *net = dev_net(t->dev); 727 728 if ((p->flags & IP6_TNL_F_CAP_RCV) || 729 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 730 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 731 struct net_device *ldev = NULL; 732 733 if (p->link) 734 ldev = dev_get_by_index_rcu(net, p->link); 735 736 if ((ipv6_addr_is_multicast(laddr) || 737 likely(ipv6_chk_addr(net, laddr, ldev, 0))) && 738 likely(!ipv6_chk_addr(net, raddr, NULL, 0))) 739 ret = 1; 740 } 741 return ret; 742 } 743 744 /** 745 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally 746 * @skb: received socket buffer 747 * @protocol: ethernet protocol ID 748 * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN 749 * 750 * Return: 0 751 **/ 752 753 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, 754 __u8 ipproto, 755 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 756 const struct ipv6hdr *ipv6h, 757 struct sk_buff *skb)) 758 { 759 struct ip6_tnl *t; 760 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 761 762 rcu_read_lock(); 763 764 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, 765 &ipv6h->daddr)) != NULL) { 766 struct pcpu_tstats *tstats; 767 768 if (t->parms.proto != ipproto && t->parms.proto != 0) { 769 rcu_read_unlock(); 770 goto discard; 771 } 772 773 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 774 rcu_read_unlock(); 775 goto discard; 776 } 777 778 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { 779 t->dev->stats.rx_dropped++; 780 rcu_read_unlock(); 781 goto discard; 782 } 783 secpath_reset(skb); 784 skb->mac_header = skb->network_header; 785 skb_reset_network_header(skb); 786 skb->protocol = htons(protocol); 787 skb->pkt_type = PACKET_HOST; 788 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 789 790 tstats = this_cpu_ptr(t->dev->tstats); 791 tstats->rx_packets++; 792 tstats->rx_bytes += skb->len; 793 794 __skb_tunnel_rx(skb, t->dev); 795 796 dscp_ecn_decapsulate(t, ipv6h, skb); 797 798 netif_rx(skb); 799 800 rcu_read_unlock(); 801 return 0; 802 } 803 rcu_read_unlock(); 804 return 1; 805 806 discard: 807 kfree_skb(skb); 808 return 0; 809 } 810 811 static int ip4ip6_rcv(struct sk_buff *skb) 812 { 813 return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP, 814 ip4ip6_dscp_ecn_decapsulate); 815 } 816 817 static int ip6ip6_rcv(struct sk_buff *skb) 818 { 819 return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6, 820 ip6ip6_dscp_ecn_decapsulate); 821 } 822 823 struct ipv6_tel_txoption { 824 struct ipv6_txoptions ops; 825 __u8 dst_opt[8]; 826 }; 827 828 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 829 { 830 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 831 832 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 833 opt->dst_opt[3] = 1; 834 opt->dst_opt[4] = encap_limit; 835 opt->dst_opt[5] = IPV6_TLV_PADN; 836 opt->dst_opt[6] = 1; 837 838 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; 839 opt->ops.opt_nflen = 8; 840 } 841 842 /** 843 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 844 * @t: the outgoing tunnel device 845 * @hdr: IPv6 header from the incoming packet 846 * 847 * Description: 848 * Avoid trivial tunneling loop by checking that tunnel exit-point 849 * doesn't match source of incoming packet. 850 * 851 * Return: 852 * 1 if conflict, 853 * 0 else 854 **/ 855 856 static inline bool 857 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 858 { 859 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 860 } 861 862 static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) 863 { 864 struct ip6_tnl_parm *p = &t->parms; 865 int ret = 0; 866 struct net *net = dev_net(t->dev); 867 868 if (p->flags & IP6_TNL_F_CAP_XMIT) { 869 struct net_device *ldev = NULL; 870 871 rcu_read_lock(); 872 if (p->link) 873 ldev = dev_get_by_index_rcu(net, p->link); 874 875 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) 876 pr_warn("%s xmit: Local address not yet configured!\n", 877 p->name); 878 else if (!ipv6_addr_is_multicast(&p->raddr) && 879 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) 880 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", 881 p->name); 882 else 883 ret = 1; 884 rcu_read_unlock(); 885 } 886 return ret; 887 } 888 /** 889 * ip6_tnl_xmit2 - encapsulate packet and send 890 * @skb: the outgoing socket buffer 891 * @dev: the outgoing tunnel device 892 * @dsfield: dscp code for outer header 893 * @fl: flow of tunneled packet 894 * @encap_limit: encapsulation limit 895 * @pmtu: Path MTU is stored if packet is too big 896 * 897 * Description: 898 * Build new header and do some sanity checks on the packet before sending 899 * it. 900 * 901 * Return: 902 * 0 on success 903 * -1 fail 904 * %-EMSGSIZE message too big. return mtu in this case. 905 **/ 906 907 static int ip6_tnl_xmit2(struct sk_buff *skb, 908 struct net_device *dev, 909 __u8 dsfield, 910 struct flowi6 *fl6, 911 int encap_limit, 912 __u32 *pmtu) 913 { 914 struct net *net = dev_net(dev); 915 struct ip6_tnl *t = netdev_priv(dev); 916 struct net_device_stats *stats = &t->dev->stats; 917 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 918 struct ipv6_tel_txoption opt; 919 struct dst_entry *dst = NULL, *ndst = NULL; 920 struct net_device *tdev; 921 int mtu; 922 unsigned int max_headroom = sizeof(struct ipv6hdr); 923 u8 proto; 924 int err = -1; 925 int pkt_len; 926 927 if (!fl6->flowi6_mark) 928 dst = ip6_tnl_dst_check(t); 929 if (!dst) { 930 ndst = ip6_route_output(net, NULL, fl6); 931 932 if (ndst->error) 933 goto tx_err_link_failure; 934 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); 935 if (IS_ERR(ndst)) { 936 err = PTR_ERR(ndst); 937 ndst = NULL; 938 goto tx_err_link_failure; 939 } 940 dst = ndst; 941 } 942 943 tdev = dst->dev; 944 945 if (tdev == dev) { 946 stats->collisions++; 947 net_warn_ratelimited("%s: Local routing loop detected!\n", 948 t->parms.name); 949 goto tx_err_dst_release; 950 } 951 mtu = dst_mtu(dst) - sizeof (*ipv6h); 952 if (encap_limit >= 0) { 953 max_headroom += 8; 954 mtu -= 8; 955 } 956 if (mtu < IPV6_MIN_MTU) 957 mtu = IPV6_MIN_MTU; 958 if (skb_dst(skb)) 959 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 960 if (skb->len > mtu) { 961 *pmtu = mtu; 962 err = -EMSGSIZE; 963 goto tx_err_dst_release; 964 } 965 966 /* 967 * Okay, now see if we can stuff it in the buffer as-is. 968 */ 969 max_headroom += LL_RESERVED_SPACE(tdev); 970 971 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 972 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 973 struct sk_buff *new_skb; 974 975 if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) 976 goto tx_err_dst_release; 977 978 if (skb->sk) 979 skb_set_owner_w(new_skb, skb->sk); 980 consume_skb(skb); 981 skb = new_skb; 982 } 983 skb_dst_drop(skb); 984 if (fl6->flowi6_mark) { 985 skb_dst_set(skb, dst); 986 ndst = NULL; 987 } else { 988 skb_dst_set_noref(skb, dst); 989 } 990 skb->transport_header = skb->network_header; 991 992 proto = fl6->flowi6_proto; 993 if (encap_limit >= 0) { 994 init_tel_txopt(&opt, encap_limit); 995 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 996 } 997 skb_push(skb, sizeof(struct ipv6hdr)); 998 skb_reset_network_header(skb); 999 ipv6h = ipv6_hdr(skb); 1000 *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000); 1001 dsfield = INET_ECN_encapsulate(0, dsfield); 1002 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); 1003 ipv6h->hop_limit = t->parms.hop_limit; 1004 ipv6h->nexthdr = proto; 1005 ipv6h->saddr = fl6->saddr; 1006 ipv6h->daddr = fl6->daddr; 1007 nf_reset(skb); 1008 pkt_len = skb->len; 1009 err = ip6_local_out(skb); 1010 1011 if (net_xmit_eval(err) == 0) { 1012 struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats); 1013 1014 tstats->tx_bytes += pkt_len; 1015 tstats->tx_packets++; 1016 } else { 1017 stats->tx_errors++; 1018 stats->tx_aborted_errors++; 1019 } 1020 if (ndst) 1021 ip6_tnl_dst_store(t, ndst); 1022 return 0; 1023 tx_err_link_failure: 1024 stats->tx_carrier_errors++; 1025 dst_link_failure(skb); 1026 tx_err_dst_release: 1027 dst_release(ndst); 1028 return err; 1029 } 1030 1031 static inline int 1032 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1033 { 1034 struct ip6_tnl *t = netdev_priv(dev); 1035 const struct iphdr *iph = ip_hdr(skb); 1036 int encap_limit = -1; 1037 struct flowi6 fl6; 1038 __u8 dsfield; 1039 __u32 mtu; 1040 int err; 1041 1042 if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || 1043 !ip6_tnl_xmit_ctl(t)) 1044 return -1; 1045 1046 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1047 encap_limit = t->parms.encap_limit; 1048 1049 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); 1050 fl6.flowi6_proto = IPPROTO_IPIP; 1051 1052 dsfield = ipv4_get_dsfield(iph); 1053 1054 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1055 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1056 & IPV6_TCLASS_MASK; 1057 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1058 fl6.flowi6_mark = skb->mark; 1059 1060 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1061 if (err != 0) { 1062 /* XXX: send ICMP error even if DF is not set. */ 1063 if (err == -EMSGSIZE) 1064 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 1065 htonl(mtu)); 1066 return -1; 1067 } 1068 1069 return 0; 1070 } 1071 1072 static inline int 1073 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1074 { 1075 struct ip6_tnl *t = netdev_priv(dev); 1076 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1077 int encap_limit = -1; 1078 __u16 offset; 1079 struct flowi6 fl6; 1080 __u8 dsfield; 1081 __u32 mtu; 1082 int err; 1083 1084 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || 1085 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) 1086 return -1; 1087 1088 offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); 1089 if (offset > 0) { 1090 struct ipv6_tlv_tnl_enc_lim *tel; 1091 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1092 if (tel->encap_limit == 0) { 1093 icmpv6_send(skb, ICMPV6_PARAMPROB, 1094 ICMPV6_HDR_FIELD, offset + 2); 1095 return -1; 1096 } 1097 encap_limit = tel->encap_limit - 1; 1098 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1099 encap_limit = t->parms.encap_limit; 1100 1101 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); 1102 fl6.flowi6_proto = IPPROTO_IPV6; 1103 1104 dsfield = ipv6_get_dsfield(ipv6h); 1105 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1106 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 1107 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1108 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); 1109 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1110 fl6.flowi6_mark = skb->mark; 1111 1112 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1113 if (err != 0) { 1114 if (err == -EMSGSIZE) 1115 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1116 return -1; 1117 } 1118 1119 return 0; 1120 } 1121 1122 static netdev_tx_t 1123 ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1124 { 1125 struct ip6_tnl *t = netdev_priv(dev); 1126 struct net_device_stats *stats = &t->dev->stats; 1127 int ret; 1128 1129 switch (skb->protocol) { 1130 case htons(ETH_P_IP): 1131 ret = ip4ip6_tnl_xmit(skb, dev); 1132 break; 1133 case htons(ETH_P_IPV6): 1134 ret = ip6ip6_tnl_xmit(skb, dev); 1135 break; 1136 default: 1137 goto tx_err; 1138 } 1139 1140 if (ret < 0) 1141 goto tx_err; 1142 1143 return NETDEV_TX_OK; 1144 1145 tx_err: 1146 stats->tx_errors++; 1147 stats->tx_dropped++; 1148 kfree_skb(skb); 1149 return NETDEV_TX_OK; 1150 } 1151 1152 static void ip6_tnl_link_config(struct ip6_tnl *t) 1153 { 1154 struct net_device *dev = t->dev; 1155 struct ip6_tnl_parm *p = &t->parms; 1156 struct flowi6 *fl6 = &t->fl.u.ip6; 1157 1158 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1159 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1160 1161 /* Set up flowi template */ 1162 fl6->saddr = p->laddr; 1163 fl6->daddr = p->raddr; 1164 fl6->flowi6_oif = p->link; 1165 fl6->flowlabel = 0; 1166 1167 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1168 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1169 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1170 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1171 1172 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1173 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1174 1175 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1176 dev->flags |= IFF_POINTOPOINT; 1177 else 1178 dev->flags &= ~IFF_POINTOPOINT; 1179 1180 dev->iflink = p->link; 1181 1182 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1183 int strict = (ipv6_addr_type(&p->raddr) & 1184 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1185 1186 struct rt6_info *rt = rt6_lookup(dev_net(dev), 1187 &p->raddr, &p->laddr, 1188 p->link, strict); 1189 1190 if (rt == NULL) 1191 return; 1192 1193 if (rt->dst.dev) { 1194 dev->hard_header_len = rt->dst.dev->hard_header_len + 1195 sizeof (struct ipv6hdr); 1196 1197 dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr); 1198 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1199 dev->mtu-=8; 1200 1201 if (dev->mtu < IPV6_MIN_MTU) 1202 dev->mtu = IPV6_MIN_MTU; 1203 } 1204 dst_release(&rt->dst); 1205 } 1206 } 1207 1208 /** 1209 * ip6_tnl_change - update the tunnel parameters 1210 * @t: tunnel to be changed 1211 * @p: tunnel configuration parameters 1212 * 1213 * Description: 1214 * ip6_tnl_change() updates the tunnel parameters 1215 **/ 1216 1217 static int 1218 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) 1219 { 1220 t->parms.laddr = p->laddr; 1221 t->parms.raddr = p->raddr; 1222 t->parms.flags = p->flags; 1223 t->parms.hop_limit = p->hop_limit; 1224 t->parms.encap_limit = p->encap_limit; 1225 t->parms.flowinfo = p->flowinfo; 1226 t->parms.link = p->link; 1227 t->parms.proto = p->proto; 1228 ip6_tnl_dst_reset(t); 1229 ip6_tnl_link_config(t); 1230 return 0; 1231 } 1232 1233 /** 1234 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1235 * @dev: virtual device associated with tunnel 1236 * @ifr: parameters passed from userspace 1237 * @cmd: command to be performed 1238 * 1239 * Description: 1240 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1241 * from userspace. 1242 * 1243 * The possible commands are the following: 1244 * %SIOCGETTUNNEL: get tunnel parameters for device 1245 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1246 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1247 * %SIOCDELTUNNEL: delete tunnel 1248 * 1249 * The fallback device "ip6tnl0", created during module 1250 * initialization, can be used for creating other tunnel devices. 1251 * 1252 * Return: 1253 * 0 on success, 1254 * %-EFAULT if unable to copy data to or from userspace, 1255 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1256 * %-EINVAL if passed tunnel parameters are invalid, 1257 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1258 * %-ENODEV if attempting to change or delete a nonexisting device 1259 **/ 1260 1261 static int 1262 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1263 { 1264 int err = 0; 1265 struct ip6_tnl_parm p; 1266 struct ip6_tnl *t = NULL; 1267 struct net *net = dev_net(dev); 1268 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1269 1270 switch (cmd) { 1271 case SIOCGETTUNNEL: 1272 if (dev == ip6n->fb_tnl_dev) { 1273 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { 1274 err = -EFAULT; 1275 break; 1276 } 1277 t = ip6_tnl_locate(net, &p, 0); 1278 } 1279 if (t == NULL) 1280 t = netdev_priv(dev); 1281 memcpy(&p, &t->parms, sizeof (p)); 1282 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { 1283 err = -EFAULT; 1284 } 1285 break; 1286 case SIOCADDTUNNEL: 1287 case SIOCCHGTUNNEL: 1288 err = -EPERM; 1289 if (!capable(CAP_NET_ADMIN)) 1290 break; 1291 err = -EFAULT; 1292 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1293 break; 1294 err = -EINVAL; 1295 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1296 p.proto != 0) 1297 break; 1298 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL); 1299 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 1300 if (t != NULL) { 1301 if (t->dev != dev) { 1302 err = -EEXIST; 1303 break; 1304 } 1305 } else 1306 t = netdev_priv(dev); 1307 1308 ip6_tnl_unlink(ip6n, t); 1309 synchronize_net(); 1310 err = ip6_tnl_change(t, &p); 1311 ip6_tnl_link(ip6n, t); 1312 netdev_state_change(dev); 1313 } 1314 if (t) { 1315 err = 0; 1316 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p))) 1317 err = -EFAULT; 1318 1319 } else 1320 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1321 break; 1322 case SIOCDELTUNNEL: 1323 err = -EPERM; 1324 if (!capable(CAP_NET_ADMIN)) 1325 break; 1326 1327 if (dev == ip6n->fb_tnl_dev) { 1328 err = -EFAULT; 1329 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1330 break; 1331 err = -ENOENT; 1332 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL) 1333 break; 1334 err = -EPERM; 1335 if (t->dev == ip6n->fb_tnl_dev) 1336 break; 1337 dev = t->dev; 1338 } 1339 err = 0; 1340 unregister_netdevice(dev); 1341 break; 1342 default: 1343 err = -EINVAL; 1344 } 1345 return err; 1346 } 1347 1348 /** 1349 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1350 * @dev: virtual device associated with tunnel 1351 * @new_mtu: the new mtu 1352 * 1353 * Return: 1354 * 0 on success, 1355 * %-EINVAL if mtu too small 1356 **/ 1357 1358 static int 1359 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1360 { 1361 if (new_mtu < IPV6_MIN_MTU) { 1362 return -EINVAL; 1363 } 1364 dev->mtu = new_mtu; 1365 return 0; 1366 } 1367 1368 1369 static const struct net_device_ops ip6_tnl_netdev_ops = { 1370 .ndo_uninit = ip6_tnl_dev_uninit, 1371 .ndo_start_xmit = ip6_tnl_xmit, 1372 .ndo_do_ioctl = ip6_tnl_ioctl, 1373 .ndo_change_mtu = ip6_tnl_change_mtu, 1374 .ndo_get_stats = ip6_get_stats, 1375 }; 1376 1377 1378 /** 1379 * ip6_tnl_dev_setup - setup virtual tunnel device 1380 * @dev: virtual device associated with tunnel 1381 * 1382 * Description: 1383 * Initialize function pointers and device parameters 1384 **/ 1385 1386 static void ip6_tnl_dev_setup(struct net_device *dev) 1387 { 1388 struct ip6_tnl *t; 1389 1390 dev->netdev_ops = &ip6_tnl_netdev_ops; 1391 dev->destructor = ip6_dev_free; 1392 1393 dev->type = ARPHRD_TUNNEL6; 1394 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); 1395 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); 1396 t = netdev_priv(dev); 1397 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1398 dev->mtu-=8; 1399 dev->flags |= IFF_NOARP; 1400 dev->addr_len = sizeof(struct in6_addr); 1401 dev->features |= NETIF_F_NETNS_LOCAL; 1402 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1403 } 1404 1405 1406 /** 1407 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1408 * @dev: virtual device associated with tunnel 1409 **/ 1410 1411 static inline int 1412 ip6_tnl_dev_init_gen(struct net_device *dev) 1413 { 1414 struct ip6_tnl *t = netdev_priv(dev); 1415 1416 t->dev = dev; 1417 dev->tstats = alloc_percpu(struct pcpu_tstats); 1418 if (!dev->tstats) 1419 return -ENOMEM; 1420 return 0; 1421 } 1422 1423 /** 1424 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1425 * @dev: virtual device associated with tunnel 1426 **/ 1427 1428 static int ip6_tnl_dev_init(struct net_device *dev) 1429 { 1430 struct ip6_tnl *t = netdev_priv(dev); 1431 int err = ip6_tnl_dev_init_gen(dev); 1432 1433 if (err) 1434 return err; 1435 ip6_tnl_link_config(t); 1436 return 0; 1437 } 1438 1439 /** 1440 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1441 * @dev: fallback device 1442 * 1443 * Return: 0 1444 **/ 1445 1446 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1447 { 1448 struct ip6_tnl *t = netdev_priv(dev); 1449 struct net *net = dev_net(dev); 1450 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1451 int err = ip6_tnl_dev_init_gen(dev); 1452 1453 if (err) 1454 return err; 1455 1456 t->parms.proto = IPPROTO_IPV6; 1457 dev_hold(dev); 1458 1459 ip6_tnl_link_config(t); 1460 1461 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1462 return 0; 1463 } 1464 1465 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 1466 .handler = ip4ip6_rcv, 1467 .err_handler = ip4ip6_err, 1468 .priority = 1, 1469 }; 1470 1471 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 1472 .handler = ip6ip6_rcv, 1473 .err_handler = ip6ip6_err, 1474 .priority = 1, 1475 }; 1476 1477 static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) 1478 { 1479 int h; 1480 struct ip6_tnl *t; 1481 LIST_HEAD(list); 1482 1483 for (h = 0; h < HASH_SIZE; h++) { 1484 t = rtnl_dereference(ip6n->tnls_r_l[h]); 1485 while (t != NULL) { 1486 unregister_netdevice_queue(t->dev, &list); 1487 t = rtnl_dereference(t->next); 1488 } 1489 } 1490 1491 t = rtnl_dereference(ip6n->tnls_wc[0]); 1492 unregister_netdevice_queue(t->dev, &list); 1493 unregister_netdevice_many(&list); 1494 } 1495 1496 static int __net_init ip6_tnl_init_net(struct net *net) 1497 { 1498 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1499 struct ip6_tnl *t = NULL; 1500 int err; 1501 1502 ip6n->tnls[0] = ip6n->tnls_wc; 1503 ip6n->tnls[1] = ip6n->tnls_r_l; 1504 1505 err = -ENOMEM; 1506 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 1507 ip6_tnl_dev_setup); 1508 1509 if (!ip6n->fb_tnl_dev) 1510 goto err_alloc_dev; 1511 dev_net_set(ip6n->fb_tnl_dev, net); 1512 1513 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 1514 if (err < 0) 1515 goto err_register; 1516 1517 err = register_netdev(ip6n->fb_tnl_dev); 1518 if (err < 0) 1519 goto err_register; 1520 1521 t = netdev_priv(ip6n->fb_tnl_dev); 1522 1523 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 1524 return 0; 1525 1526 err_register: 1527 ip6_dev_free(ip6n->fb_tnl_dev); 1528 err_alloc_dev: 1529 return err; 1530 } 1531 1532 static void __net_exit ip6_tnl_exit_net(struct net *net) 1533 { 1534 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1535 1536 rtnl_lock(); 1537 ip6_tnl_destroy_tunnels(ip6n); 1538 rtnl_unlock(); 1539 } 1540 1541 static struct pernet_operations ip6_tnl_net_ops = { 1542 .init = ip6_tnl_init_net, 1543 .exit = ip6_tnl_exit_net, 1544 .id = &ip6_tnl_net_id, 1545 .size = sizeof(struct ip6_tnl_net), 1546 }; 1547 1548 /** 1549 * ip6_tunnel_init - register protocol and reserve needed resources 1550 * 1551 * Return: 0 on success 1552 **/ 1553 1554 static int __init ip6_tunnel_init(void) 1555 { 1556 int err; 1557 1558 err = register_pernet_device(&ip6_tnl_net_ops); 1559 if (err < 0) 1560 goto out_pernet; 1561 1562 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 1563 if (err < 0) { 1564 pr_err("%s: can't register ip4ip6\n", __func__); 1565 goto out_ip4ip6; 1566 } 1567 1568 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 1569 if (err < 0) { 1570 pr_err("%s: can't register ip6ip6\n", __func__); 1571 goto out_ip6ip6; 1572 } 1573 1574 return 0; 1575 1576 out_ip6ip6: 1577 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 1578 out_ip4ip6: 1579 unregister_pernet_device(&ip6_tnl_net_ops); 1580 out_pernet: 1581 return err; 1582 } 1583 1584 /** 1585 * ip6_tunnel_cleanup - free resources and unregister protocol 1586 **/ 1587 1588 static void __exit ip6_tunnel_cleanup(void) 1589 { 1590 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 1591 pr_info("%s: can't deregister ip4ip6\n", __func__); 1592 1593 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 1594 pr_info("%s: can't deregister ip6ip6\n", __func__); 1595 1596 unregister_pernet_device(&ip6_tnl_net_ops); 1597 } 1598 1599 module_init(ip6_tunnel_init); 1600 module_exit(ip6_tunnel_cleanup); 1601