1 /* 2 * IPv6 tunneling device 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 8 * 9 * Based on: 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 11 * 12 * RFC 2473 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/errno.h> 26 #include <linux/types.h> 27 #include <linux/sockios.h> 28 #include <linux/icmp.h> 29 #include <linux/if.h> 30 #include <linux/in.h> 31 #include <linux/ip.h> 32 #include <linux/if_tunnel.h> 33 #include <linux/net.h> 34 #include <linux/in6.h> 35 #include <linux/netdevice.h> 36 #include <linux/if_arp.h> 37 #include <linux/icmpv6.h> 38 #include <linux/init.h> 39 #include <linux/route.h> 40 #include <linux/rtnetlink.h> 41 #include <linux/netfilter_ipv6.h> 42 #include <linux/slab.h> 43 44 #include <asm/uaccess.h> 45 #include <linux/atomic.h> 46 47 #include <net/icmp.h> 48 #include <net/ip.h> 49 #include <net/ipv6.h> 50 #include <net/ip6_route.h> 51 #include <net/addrconf.h> 52 #include <net/ip6_tunnel.h> 53 #include <net/xfrm.h> 54 #include <net/dsfield.h> 55 #include <net/inet_ecn.h> 56 #include <net/net_namespace.h> 57 #include <net/netns/generic.h> 58 59 MODULE_AUTHOR("Ville Nuorvala"); 60 MODULE_DESCRIPTION("IPv6 tunneling device"); 61 MODULE_LICENSE("GPL"); 62 MODULE_ALIAS_NETDEV("ip6tnl0"); 63 64 #ifdef IP6_TNL_DEBUG 65 #define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__) 66 #else 67 #define IP6_TNL_TRACE(x...) do {;} while(0) 68 #endif 69 70 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) 71 #define IPV6_TCLASS_SHIFT 20 72 73 #define HASH_SIZE 32 74 75 #define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ 76 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ 77 (HASH_SIZE - 1)) 78 79 static int ip6_tnl_dev_init(struct net_device *dev); 80 static void ip6_tnl_dev_setup(struct net_device *dev); 81 82 static int ip6_tnl_net_id __read_mostly; 83 struct ip6_tnl_net { 84 /* the IPv6 tunnel fallback device */ 85 struct net_device *fb_tnl_dev; 86 /* lists for storing tunnels in use */ 87 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; 88 struct ip6_tnl __rcu *tnls_wc[1]; 89 struct ip6_tnl __rcu **tnls[2]; 90 }; 91 92 /* often modified stats are per cpu, other are shared (netdev->stats) */ 93 struct pcpu_tstats { 94 unsigned long rx_packets; 95 unsigned long rx_bytes; 96 unsigned long tx_packets; 97 unsigned long tx_bytes; 98 } __attribute__((aligned(4*sizeof(unsigned long)))); 99 100 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 101 { 102 struct pcpu_tstats sum = { 0 }; 103 int i; 104 105 for_each_possible_cpu(i) { 106 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 107 108 sum.rx_packets += tstats->rx_packets; 109 sum.rx_bytes += tstats->rx_bytes; 110 sum.tx_packets += tstats->tx_packets; 111 sum.tx_bytes += tstats->tx_bytes; 112 } 113 dev->stats.rx_packets = sum.rx_packets; 114 dev->stats.rx_bytes = sum.rx_bytes; 115 dev->stats.tx_packets = sum.tx_packets; 116 dev->stats.tx_bytes = sum.tx_bytes; 117 return &dev->stats; 118 } 119 120 /* 121 * Locking : hash tables are protected by RCU and RTNL 122 */ 123 124 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 125 { 126 struct dst_entry *dst = t->dst_cache; 127 128 if (dst && dst->obsolete && 129 dst->ops->check(dst, t->dst_cookie) == NULL) { 130 t->dst_cache = NULL; 131 dst_release(dst); 132 return NULL; 133 } 134 135 return dst; 136 } 137 138 static inline void ip6_tnl_dst_reset(struct ip6_tnl *t) 139 { 140 dst_release(t->dst_cache); 141 t->dst_cache = NULL; 142 } 143 144 static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) 145 { 146 struct rt6_info *rt = (struct rt6_info *) dst; 147 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 148 dst_release(t->dst_cache); 149 t->dst_cache = dst; 150 } 151 152 /** 153 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 154 * @remote: the address of the tunnel exit-point 155 * @local: the address of the tunnel entry-point 156 * 157 * Return: 158 * tunnel matching given end-points if found, 159 * else fallback tunnel if its device is up, 160 * else %NULL 161 **/ 162 163 #define for_each_ip6_tunnel_rcu(start) \ 164 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 165 166 static struct ip6_tnl * 167 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 168 { 169 unsigned int h0 = HASH(remote); 170 unsigned int h1 = HASH(local); 171 struct ip6_tnl *t; 172 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 173 174 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) { 175 if (ipv6_addr_equal(local, &t->parms.laddr) && 176 ipv6_addr_equal(remote, &t->parms.raddr) && 177 (t->dev->flags & IFF_UP)) 178 return t; 179 } 180 t = rcu_dereference(ip6n->tnls_wc[0]); 181 if (t && (t->dev->flags & IFF_UP)) 182 return t; 183 184 return NULL; 185 } 186 187 /** 188 * ip6_tnl_bucket - get head of list matching given tunnel parameters 189 * @p: parameters containing tunnel end-points 190 * 191 * Description: 192 * ip6_tnl_bucket() returns the head of the list matching the 193 * &struct in6_addr entries laddr and raddr in @p. 194 * 195 * Return: head of IPv6 tunnel list 196 **/ 197 198 static struct ip6_tnl __rcu ** 199 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) 200 { 201 const struct in6_addr *remote = &p->raddr; 202 const struct in6_addr *local = &p->laddr; 203 unsigned int h = 0; 204 int prio = 0; 205 206 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 207 prio = 1; 208 h = HASH(remote) ^ HASH(local); 209 } 210 return &ip6n->tnls[prio][h]; 211 } 212 213 /** 214 * ip6_tnl_link - add tunnel to hash table 215 * @t: tunnel to be added 216 **/ 217 218 static void 219 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 220 { 221 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 222 223 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 224 rcu_assign_pointer(*tp, t); 225 } 226 227 /** 228 * ip6_tnl_unlink - remove tunnel from hash table 229 * @t: tunnel to be removed 230 **/ 231 232 static void 233 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 234 { 235 struct ip6_tnl __rcu **tp; 236 struct ip6_tnl *iter; 237 238 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 239 (iter = rtnl_dereference(*tp)) != NULL; 240 tp = &iter->next) { 241 if (t == iter) { 242 rcu_assign_pointer(*tp, t->next); 243 break; 244 } 245 } 246 } 247 248 static void ip6_dev_free(struct net_device *dev) 249 { 250 free_percpu(dev->tstats); 251 free_netdev(dev); 252 } 253 254 /** 255 * ip6_tnl_create() - create a new tunnel 256 * @p: tunnel parameters 257 * @pt: pointer to new tunnel 258 * 259 * Description: 260 * Create tunnel matching given parameters. 261 * 262 * Return: 263 * created tunnel or NULL 264 **/ 265 266 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) 267 { 268 struct net_device *dev; 269 struct ip6_tnl *t; 270 char name[IFNAMSIZ]; 271 int err; 272 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 273 274 if (p->name[0]) 275 strlcpy(name, p->name, IFNAMSIZ); 276 else 277 sprintf(name, "ip6tnl%%d"); 278 279 dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup); 280 if (dev == NULL) 281 goto failed; 282 283 dev_net_set(dev, net); 284 285 t = netdev_priv(dev); 286 t->parms = *p; 287 err = ip6_tnl_dev_init(dev); 288 if (err < 0) 289 goto failed_free; 290 291 if ((err = register_netdevice(dev)) < 0) 292 goto failed_free; 293 294 strcpy(t->parms.name, dev->name); 295 296 dev_hold(dev); 297 ip6_tnl_link(ip6n, t); 298 return t; 299 300 failed_free: 301 ip6_dev_free(dev); 302 failed: 303 return NULL; 304 } 305 306 /** 307 * ip6_tnl_locate - find or create tunnel matching given parameters 308 * @p: tunnel parameters 309 * @create: != 0 if allowed to create new tunnel if no match found 310 * 311 * Description: 312 * ip6_tnl_locate() first tries to locate an existing tunnel 313 * based on @parms. If this is unsuccessful, but @create is set a new 314 * tunnel device is created and registered for use. 315 * 316 * Return: 317 * matching tunnel or NULL 318 **/ 319 320 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 321 struct ip6_tnl_parm *p, int create) 322 { 323 const struct in6_addr *remote = &p->raddr; 324 const struct in6_addr *local = &p->laddr; 325 struct ip6_tnl __rcu **tp; 326 struct ip6_tnl *t; 327 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 328 329 for (tp = ip6_tnl_bucket(ip6n, p); 330 (t = rtnl_dereference(*tp)) != NULL; 331 tp = &t->next) { 332 if (ipv6_addr_equal(local, &t->parms.laddr) && 333 ipv6_addr_equal(remote, &t->parms.raddr)) 334 return t; 335 } 336 if (!create) 337 return NULL; 338 return ip6_tnl_create(net, p); 339 } 340 341 /** 342 * ip6_tnl_dev_uninit - tunnel device uninitializer 343 * @dev: the device to be destroyed 344 * 345 * Description: 346 * ip6_tnl_dev_uninit() removes tunnel from its list 347 **/ 348 349 static void 350 ip6_tnl_dev_uninit(struct net_device *dev) 351 { 352 struct ip6_tnl *t = netdev_priv(dev); 353 struct net *net = dev_net(dev); 354 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 355 356 if (dev == ip6n->fb_tnl_dev) 357 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 358 else 359 ip6_tnl_unlink(ip6n, t); 360 ip6_tnl_dst_reset(t); 361 dev_put(dev); 362 } 363 364 /** 365 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 366 * @skb: received socket buffer 367 * 368 * Return: 369 * 0 if none was found, 370 * else index to encapsulation limit 371 **/ 372 373 static __u16 374 parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) 375 { 376 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 377 __u8 nexthdr = ipv6h->nexthdr; 378 __u16 off = sizeof (*ipv6h); 379 380 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 381 __u16 optlen = 0; 382 struct ipv6_opt_hdr *hdr; 383 if (raw + off + sizeof (*hdr) > skb->data && 384 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 385 break; 386 387 hdr = (struct ipv6_opt_hdr *) (raw + off); 388 if (nexthdr == NEXTHDR_FRAGMENT) { 389 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 390 if (frag_hdr->frag_off) 391 break; 392 optlen = 8; 393 } else if (nexthdr == NEXTHDR_AUTH) { 394 optlen = (hdr->hdrlen + 2) << 2; 395 } else { 396 optlen = ipv6_optlen(hdr); 397 } 398 if (nexthdr == NEXTHDR_DEST) { 399 __u16 i = off + 2; 400 while (1) { 401 struct ipv6_tlv_tnl_enc_lim *tel; 402 403 /* No more room for encapsulation limit */ 404 if (i + sizeof (*tel) > off + optlen) 405 break; 406 407 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 408 /* return index of option if found and valid */ 409 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 410 tel->length == 1) 411 return i; 412 /* else jump to next option */ 413 if (tel->type) 414 i += tel->length + 2; 415 else 416 i++; 417 } 418 } 419 nexthdr = hdr->nexthdr; 420 off += optlen; 421 } 422 return 0; 423 } 424 425 /** 426 * ip6_tnl_err - tunnel error handler 427 * 428 * Description: 429 * ip6_tnl_err() should handle errors in the tunnel according 430 * to the specifications in RFC 2473. 431 **/ 432 433 static int 434 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 435 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 436 { 437 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data; 438 struct ip6_tnl *t; 439 int rel_msg = 0; 440 u8 rel_type = ICMPV6_DEST_UNREACH; 441 u8 rel_code = ICMPV6_ADDR_UNREACH; 442 __u32 rel_info = 0; 443 __u16 len; 444 int err = -ENOENT; 445 446 /* If the packet doesn't contain the original IPv6 header we are 447 in trouble since we might need the source address for further 448 processing of the error. */ 449 450 rcu_read_lock(); 451 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, 452 &ipv6h->saddr)) == NULL) 453 goto out; 454 455 if (t->parms.proto != ipproto && t->parms.proto != 0) 456 goto out; 457 458 err = 0; 459 460 switch (*type) { 461 __u32 teli; 462 struct ipv6_tlv_tnl_enc_lim *tel; 463 __u32 mtu; 464 case ICMPV6_DEST_UNREACH: 465 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", 466 t->parms.name); 467 rel_msg = 1; 468 break; 469 case ICMPV6_TIME_EXCEED: 470 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 471 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 472 t->parms.name); 473 rel_msg = 1; 474 } 475 break; 476 case ICMPV6_PARAMPROB: 477 teli = 0; 478 if ((*code) == ICMPV6_HDR_FIELD) 479 teli = parse_tlv_tnl_enc_lim(skb, skb->data); 480 481 if (teli && teli == *info - 2) { 482 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 483 if (tel->encap_limit == 0) { 484 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 485 t->parms.name); 486 rel_msg = 1; 487 } 488 } else { 489 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 490 t->parms.name); 491 } 492 break; 493 case ICMPV6_PKT_TOOBIG: 494 mtu = *info - offset; 495 if (mtu < IPV6_MIN_MTU) 496 mtu = IPV6_MIN_MTU; 497 t->dev->mtu = mtu; 498 499 if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { 500 rel_type = ICMPV6_PKT_TOOBIG; 501 rel_code = 0; 502 rel_info = mtu; 503 rel_msg = 1; 504 } 505 break; 506 } 507 508 *type = rel_type; 509 *code = rel_code; 510 *info = rel_info; 511 *msg = rel_msg; 512 513 out: 514 rcu_read_unlock(); 515 return err; 516 } 517 518 static int 519 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 520 u8 type, u8 code, int offset, __be32 info) 521 { 522 int rel_msg = 0; 523 u8 rel_type = type; 524 u8 rel_code = code; 525 __u32 rel_info = ntohl(info); 526 int err; 527 struct sk_buff *skb2; 528 const struct iphdr *eiph; 529 struct rtable *rt; 530 struct flowi4 fl4; 531 532 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 533 &rel_msg, &rel_info, offset); 534 if (err < 0) 535 return err; 536 537 if (rel_msg == 0) 538 return 0; 539 540 switch (rel_type) { 541 case ICMPV6_DEST_UNREACH: 542 if (rel_code != ICMPV6_ADDR_UNREACH) 543 return 0; 544 rel_type = ICMP_DEST_UNREACH; 545 rel_code = ICMP_HOST_UNREACH; 546 break; 547 case ICMPV6_PKT_TOOBIG: 548 if (rel_code != 0) 549 return 0; 550 rel_type = ICMP_DEST_UNREACH; 551 rel_code = ICMP_FRAG_NEEDED; 552 break; 553 default: 554 return 0; 555 } 556 557 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 558 return 0; 559 560 skb2 = skb_clone(skb, GFP_ATOMIC); 561 if (!skb2) 562 return 0; 563 564 skb_dst_drop(skb2); 565 566 skb_pull(skb2, offset); 567 skb_reset_network_header(skb2); 568 eiph = ip_hdr(skb2); 569 570 /* Try to guess incoming interface */ 571 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 572 eiph->saddr, 0, 573 0, 0, 574 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 575 if (IS_ERR(rt)) 576 goto out; 577 578 skb2->dev = rt->dst.dev; 579 580 /* route "incoming" packet */ 581 if (rt->rt_flags & RTCF_LOCAL) { 582 ip_rt_put(rt); 583 rt = NULL; 584 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 585 eiph->daddr, eiph->saddr, 586 0, 0, 587 IPPROTO_IPIP, 588 RT_TOS(eiph->tos), 0); 589 if (IS_ERR(rt) || 590 rt->dst.dev->type != ARPHRD_TUNNEL) { 591 if (!IS_ERR(rt)) 592 ip_rt_put(rt); 593 goto out; 594 } 595 skb_dst_set(skb2, &rt->dst); 596 } else { 597 ip_rt_put(rt); 598 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 599 skb2->dev) || 600 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 601 goto out; 602 } 603 604 /* change mtu on this route */ 605 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 606 if (rel_info > dst_mtu(skb_dst(skb2))) 607 goto out; 608 609 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info); 610 } 611 612 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 613 614 out: 615 kfree_skb(skb2); 616 return 0; 617 } 618 619 static int 620 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 621 u8 type, u8 code, int offset, __be32 info) 622 { 623 int rel_msg = 0; 624 u8 rel_type = type; 625 u8 rel_code = code; 626 __u32 rel_info = ntohl(info); 627 int err; 628 629 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 630 &rel_msg, &rel_info, offset); 631 if (err < 0) 632 return err; 633 634 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 635 struct rt6_info *rt; 636 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 637 638 if (!skb2) 639 return 0; 640 641 skb_dst_drop(skb2); 642 skb_pull(skb2, offset); 643 skb_reset_network_header(skb2); 644 645 /* Try to guess incoming interface */ 646 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 647 NULL, 0, 0); 648 649 if (rt && rt->dst.dev) 650 skb2->dev = rt->dst.dev; 651 652 icmpv6_send(skb2, rel_type, rel_code, rel_info); 653 654 if (rt) 655 dst_release(&rt->dst); 656 657 kfree_skb(skb2); 658 } 659 660 return 0; 661 } 662 663 static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 664 const struct ipv6hdr *ipv6h, 665 struct sk_buff *skb) 666 { 667 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 668 669 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 670 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 671 672 if (INET_ECN_is_ce(dsfield)) 673 IP_ECN_set_ce(ip_hdr(skb)); 674 } 675 676 static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 677 const struct ipv6hdr *ipv6h, 678 struct sk_buff *skb) 679 { 680 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 681 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 682 683 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) 684 IP6_ECN_set_ce(ipv6_hdr(skb)); 685 } 686 687 /* called with rcu_read_lock() */ 688 static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) 689 { 690 struct ip6_tnl_parm *p = &t->parms; 691 int ret = 0; 692 struct net *net = dev_net(t->dev); 693 694 if (p->flags & IP6_TNL_F_CAP_RCV) { 695 struct net_device *ldev = NULL; 696 697 if (p->link) 698 ldev = dev_get_by_index_rcu(net, p->link); 699 700 if ((ipv6_addr_is_multicast(&p->laddr) || 701 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && 702 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) 703 ret = 1; 704 705 } 706 return ret; 707 } 708 709 /** 710 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally 711 * @skb: received socket buffer 712 * @protocol: ethernet protocol ID 713 * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN 714 * 715 * Return: 0 716 **/ 717 718 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, 719 __u8 ipproto, 720 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 721 const struct ipv6hdr *ipv6h, 722 struct sk_buff *skb)) 723 { 724 struct ip6_tnl *t; 725 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 726 727 rcu_read_lock(); 728 729 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, 730 &ipv6h->daddr)) != NULL) { 731 struct pcpu_tstats *tstats; 732 733 if (t->parms.proto != ipproto && t->parms.proto != 0) { 734 rcu_read_unlock(); 735 goto discard; 736 } 737 738 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 739 rcu_read_unlock(); 740 goto discard; 741 } 742 743 if (!ip6_tnl_rcv_ctl(t)) { 744 t->dev->stats.rx_dropped++; 745 rcu_read_unlock(); 746 goto discard; 747 } 748 secpath_reset(skb); 749 skb->mac_header = skb->network_header; 750 skb_reset_network_header(skb); 751 skb->protocol = htons(protocol); 752 skb->pkt_type = PACKET_HOST; 753 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 754 755 tstats = this_cpu_ptr(t->dev->tstats); 756 tstats->rx_packets++; 757 tstats->rx_bytes += skb->len; 758 759 __skb_tunnel_rx(skb, t->dev); 760 761 dscp_ecn_decapsulate(t, ipv6h, skb); 762 763 netif_rx(skb); 764 765 rcu_read_unlock(); 766 return 0; 767 } 768 rcu_read_unlock(); 769 return 1; 770 771 discard: 772 kfree_skb(skb); 773 return 0; 774 } 775 776 static int ip4ip6_rcv(struct sk_buff *skb) 777 { 778 return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP, 779 ip4ip6_dscp_ecn_decapsulate); 780 } 781 782 static int ip6ip6_rcv(struct sk_buff *skb) 783 { 784 return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6, 785 ip6ip6_dscp_ecn_decapsulate); 786 } 787 788 struct ipv6_tel_txoption { 789 struct ipv6_txoptions ops; 790 __u8 dst_opt[8]; 791 }; 792 793 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 794 { 795 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 796 797 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 798 opt->dst_opt[3] = 1; 799 opt->dst_opt[4] = encap_limit; 800 opt->dst_opt[5] = IPV6_TLV_PADN; 801 opt->dst_opt[6] = 1; 802 803 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; 804 opt->ops.opt_nflen = 8; 805 } 806 807 /** 808 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 809 * @t: the outgoing tunnel device 810 * @hdr: IPv6 header from the incoming packet 811 * 812 * Description: 813 * Avoid trivial tunneling loop by checking that tunnel exit-point 814 * doesn't match source of incoming packet. 815 * 816 * Return: 817 * 1 if conflict, 818 * 0 else 819 **/ 820 821 static inline bool 822 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 823 { 824 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 825 } 826 827 static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) 828 { 829 struct ip6_tnl_parm *p = &t->parms; 830 int ret = 0; 831 struct net *net = dev_net(t->dev); 832 833 if (p->flags & IP6_TNL_F_CAP_XMIT) { 834 struct net_device *ldev = NULL; 835 836 rcu_read_lock(); 837 if (p->link) 838 ldev = dev_get_by_index_rcu(net, p->link); 839 840 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) 841 pr_warn("%s xmit: Local address not yet configured!\n", 842 p->name); 843 else if (!ipv6_addr_is_multicast(&p->raddr) && 844 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) 845 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", 846 p->name); 847 else 848 ret = 1; 849 rcu_read_unlock(); 850 } 851 return ret; 852 } 853 /** 854 * ip6_tnl_xmit2 - encapsulate packet and send 855 * @skb: the outgoing socket buffer 856 * @dev: the outgoing tunnel device 857 * @dsfield: dscp code for outer header 858 * @fl: flow of tunneled packet 859 * @encap_limit: encapsulation limit 860 * @pmtu: Path MTU is stored if packet is too big 861 * 862 * Description: 863 * Build new header and do some sanity checks on the packet before sending 864 * it. 865 * 866 * Return: 867 * 0 on success 868 * -1 fail 869 * %-EMSGSIZE message too big. return mtu in this case. 870 **/ 871 872 static int ip6_tnl_xmit2(struct sk_buff *skb, 873 struct net_device *dev, 874 __u8 dsfield, 875 struct flowi6 *fl6, 876 int encap_limit, 877 __u32 *pmtu) 878 { 879 struct net *net = dev_net(dev); 880 struct ip6_tnl *t = netdev_priv(dev); 881 struct net_device_stats *stats = &t->dev->stats; 882 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 883 struct ipv6_tel_txoption opt; 884 struct dst_entry *dst = NULL, *ndst = NULL; 885 struct net_device *tdev; 886 int mtu; 887 unsigned int max_headroom = sizeof(struct ipv6hdr); 888 u8 proto; 889 int err = -1; 890 int pkt_len; 891 892 if (!fl6->flowi6_mark) 893 dst = ip6_tnl_dst_check(t); 894 if (!dst) { 895 ndst = ip6_route_output(net, NULL, fl6); 896 897 if (ndst->error) 898 goto tx_err_link_failure; 899 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); 900 if (IS_ERR(ndst)) { 901 err = PTR_ERR(ndst); 902 ndst = NULL; 903 goto tx_err_link_failure; 904 } 905 dst = ndst; 906 } 907 908 tdev = dst->dev; 909 910 if (tdev == dev) { 911 stats->collisions++; 912 net_warn_ratelimited("%s: Local routing loop detected!\n", 913 t->parms.name); 914 goto tx_err_dst_release; 915 } 916 mtu = dst_mtu(dst) - sizeof (*ipv6h); 917 if (encap_limit >= 0) { 918 max_headroom += 8; 919 mtu -= 8; 920 } 921 if (mtu < IPV6_MIN_MTU) 922 mtu = IPV6_MIN_MTU; 923 if (skb_dst(skb)) 924 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 925 if (skb->len > mtu) { 926 *pmtu = mtu; 927 err = -EMSGSIZE; 928 goto tx_err_dst_release; 929 } 930 931 /* 932 * Okay, now see if we can stuff it in the buffer as-is. 933 */ 934 max_headroom += LL_RESERVED_SPACE(tdev); 935 936 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 937 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 938 struct sk_buff *new_skb; 939 940 if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) 941 goto tx_err_dst_release; 942 943 if (skb->sk) 944 skb_set_owner_w(new_skb, skb->sk); 945 consume_skb(skb); 946 skb = new_skb; 947 } 948 skb_dst_drop(skb); 949 if (fl6->flowi6_mark) { 950 skb_dst_set(skb, dst); 951 ndst = NULL; 952 } else { 953 skb_dst_set_noref(skb, dst); 954 } 955 skb->transport_header = skb->network_header; 956 957 proto = fl6->flowi6_proto; 958 if (encap_limit >= 0) { 959 init_tel_txopt(&opt, encap_limit); 960 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 961 } 962 skb_push(skb, sizeof(struct ipv6hdr)); 963 skb_reset_network_header(skb); 964 ipv6h = ipv6_hdr(skb); 965 *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000); 966 dsfield = INET_ECN_encapsulate(0, dsfield); 967 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); 968 ipv6h->hop_limit = t->parms.hop_limit; 969 ipv6h->nexthdr = proto; 970 ipv6h->saddr = fl6->saddr; 971 ipv6h->daddr = fl6->daddr; 972 nf_reset(skb); 973 pkt_len = skb->len; 974 err = ip6_local_out(skb); 975 976 if (net_xmit_eval(err) == 0) { 977 struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats); 978 979 tstats->tx_bytes += pkt_len; 980 tstats->tx_packets++; 981 } else { 982 stats->tx_errors++; 983 stats->tx_aborted_errors++; 984 } 985 if (ndst) 986 ip6_tnl_dst_store(t, ndst); 987 return 0; 988 tx_err_link_failure: 989 stats->tx_carrier_errors++; 990 dst_link_failure(skb); 991 tx_err_dst_release: 992 dst_release(ndst); 993 return err; 994 } 995 996 static inline int 997 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 998 { 999 struct ip6_tnl *t = netdev_priv(dev); 1000 const struct iphdr *iph = ip_hdr(skb); 1001 int encap_limit = -1; 1002 struct flowi6 fl6; 1003 __u8 dsfield; 1004 __u32 mtu; 1005 int err; 1006 1007 if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || 1008 !ip6_tnl_xmit_ctl(t)) 1009 return -1; 1010 1011 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1012 encap_limit = t->parms.encap_limit; 1013 1014 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); 1015 fl6.flowi6_proto = IPPROTO_IPIP; 1016 1017 dsfield = ipv4_get_dsfield(iph); 1018 1019 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1020 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1021 & IPV6_TCLASS_MASK; 1022 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1023 fl6.flowi6_mark = skb->mark; 1024 1025 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1026 if (err != 0) { 1027 /* XXX: send ICMP error even if DF is not set. */ 1028 if (err == -EMSGSIZE) 1029 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 1030 htonl(mtu)); 1031 return -1; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static inline int 1038 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1039 { 1040 struct ip6_tnl *t = netdev_priv(dev); 1041 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1042 int encap_limit = -1; 1043 __u16 offset; 1044 struct flowi6 fl6; 1045 __u8 dsfield; 1046 __u32 mtu; 1047 int err; 1048 1049 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || 1050 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) 1051 return -1; 1052 1053 offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); 1054 if (offset > 0) { 1055 struct ipv6_tlv_tnl_enc_lim *tel; 1056 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1057 if (tel->encap_limit == 0) { 1058 icmpv6_send(skb, ICMPV6_PARAMPROB, 1059 ICMPV6_HDR_FIELD, offset + 2); 1060 return -1; 1061 } 1062 encap_limit = tel->encap_limit - 1; 1063 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1064 encap_limit = t->parms.encap_limit; 1065 1066 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); 1067 fl6.flowi6_proto = IPPROTO_IPV6; 1068 1069 dsfield = ipv6_get_dsfield(ipv6h); 1070 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1071 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 1072 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1073 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); 1074 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1075 fl6.flowi6_mark = skb->mark; 1076 1077 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); 1078 if (err != 0) { 1079 if (err == -EMSGSIZE) 1080 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1081 return -1; 1082 } 1083 1084 return 0; 1085 } 1086 1087 static netdev_tx_t 1088 ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1089 { 1090 struct ip6_tnl *t = netdev_priv(dev); 1091 struct net_device_stats *stats = &t->dev->stats; 1092 int ret; 1093 1094 switch (skb->protocol) { 1095 case htons(ETH_P_IP): 1096 ret = ip4ip6_tnl_xmit(skb, dev); 1097 break; 1098 case htons(ETH_P_IPV6): 1099 ret = ip6ip6_tnl_xmit(skb, dev); 1100 break; 1101 default: 1102 goto tx_err; 1103 } 1104 1105 if (ret < 0) 1106 goto tx_err; 1107 1108 return NETDEV_TX_OK; 1109 1110 tx_err: 1111 stats->tx_errors++; 1112 stats->tx_dropped++; 1113 kfree_skb(skb); 1114 return NETDEV_TX_OK; 1115 } 1116 1117 static void ip6_tnl_set_cap(struct ip6_tnl *t) 1118 { 1119 struct ip6_tnl_parm *p = &t->parms; 1120 int ltype = ipv6_addr_type(&p->laddr); 1121 int rtype = ipv6_addr_type(&p->raddr); 1122 1123 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV); 1124 1125 if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 1126 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 1127 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 1128 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 1129 if (ltype&IPV6_ADDR_UNICAST) 1130 p->flags |= IP6_TNL_F_CAP_XMIT; 1131 if (rtype&IPV6_ADDR_UNICAST) 1132 p->flags |= IP6_TNL_F_CAP_RCV; 1133 } 1134 } 1135 1136 static void ip6_tnl_link_config(struct ip6_tnl *t) 1137 { 1138 struct net_device *dev = t->dev; 1139 struct ip6_tnl_parm *p = &t->parms; 1140 struct flowi6 *fl6 = &t->fl.u.ip6; 1141 1142 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1143 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1144 1145 /* Set up flowi template */ 1146 fl6->saddr = p->laddr; 1147 fl6->daddr = p->raddr; 1148 fl6->flowi6_oif = p->link; 1149 fl6->flowlabel = 0; 1150 1151 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1152 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1153 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1154 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1155 1156 ip6_tnl_set_cap(t); 1157 1158 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1159 dev->flags |= IFF_POINTOPOINT; 1160 else 1161 dev->flags &= ~IFF_POINTOPOINT; 1162 1163 dev->iflink = p->link; 1164 1165 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1166 int strict = (ipv6_addr_type(&p->raddr) & 1167 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1168 1169 struct rt6_info *rt = rt6_lookup(dev_net(dev), 1170 &p->raddr, &p->laddr, 1171 p->link, strict); 1172 1173 if (rt == NULL) 1174 return; 1175 1176 if (rt->dst.dev) { 1177 dev->hard_header_len = rt->dst.dev->hard_header_len + 1178 sizeof (struct ipv6hdr); 1179 1180 dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr); 1181 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1182 dev->mtu-=8; 1183 1184 if (dev->mtu < IPV6_MIN_MTU) 1185 dev->mtu = IPV6_MIN_MTU; 1186 } 1187 dst_release(&rt->dst); 1188 } 1189 } 1190 1191 /** 1192 * ip6_tnl_change - update the tunnel parameters 1193 * @t: tunnel to be changed 1194 * @p: tunnel configuration parameters 1195 * 1196 * Description: 1197 * ip6_tnl_change() updates the tunnel parameters 1198 **/ 1199 1200 static int 1201 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) 1202 { 1203 t->parms.laddr = p->laddr; 1204 t->parms.raddr = p->raddr; 1205 t->parms.flags = p->flags; 1206 t->parms.hop_limit = p->hop_limit; 1207 t->parms.encap_limit = p->encap_limit; 1208 t->parms.flowinfo = p->flowinfo; 1209 t->parms.link = p->link; 1210 t->parms.proto = p->proto; 1211 ip6_tnl_dst_reset(t); 1212 ip6_tnl_link_config(t); 1213 return 0; 1214 } 1215 1216 /** 1217 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1218 * @dev: virtual device associated with tunnel 1219 * @ifr: parameters passed from userspace 1220 * @cmd: command to be performed 1221 * 1222 * Description: 1223 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1224 * from userspace. 1225 * 1226 * The possible commands are the following: 1227 * %SIOCGETTUNNEL: get tunnel parameters for device 1228 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1229 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1230 * %SIOCDELTUNNEL: delete tunnel 1231 * 1232 * The fallback device "ip6tnl0", created during module 1233 * initialization, can be used for creating other tunnel devices. 1234 * 1235 * Return: 1236 * 0 on success, 1237 * %-EFAULT if unable to copy data to or from userspace, 1238 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1239 * %-EINVAL if passed tunnel parameters are invalid, 1240 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1241 * %-ENODEV if attempting to change or delete a nonexisting device 1242 **/ 1243 1244 static int 1245 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1246 { 1247 int err = 0; 1248 struct ip6_tnl_parm p; 1249 struct ip6_tnl *t = NULL; 1250 struct net *net = dev_net(dev); 1251 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1252 1253 switch (cmd) { 1254 case SIOCGETTUNNEL: 1255 if (dev == ip6n->fb_tnl_dev) { 1256 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { 1257 err = -EFAULT; 1258 break; 1259 } 1260 t = ip6_tnl_locate(net, &p, 0); 1261 } 1262 if (t == NULL) 1263 t = netdev_priv(dev); 1264 memcpy(&p, &t->parms, sizeof (p)); 1265 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { 1266 err = -EFAULT; 1267 } 1268 break; 1269 case SIOCADDTUNNEL: 1270 case SIOCCHGTUNNEL: 1271 err = -EPERM; 1272 if (!capable(CAP_NET_ADMIN)) 1273 break; 1274 err = -EFAULT; 1275 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1276 break; 1277 err = -EINVAL; 1278 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1279 p.proto != 0) 1280 break; 1281 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL); 1282 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 1283 if (t != NULL) { 1284 if (t->dev != dev) { 1285 err = -EEXIST; 1286 break; 1287 } 1288 } else 1289 t = netdev_priv(dev); 1290 1291 ip6_tnl_unlink(ip6n, t); 1292 synchronize_net(); 1293 err = ip6_tnl_change(t, &p); 1294 ip6_tnl_link(ip6n, t); 1295 netdev_state_change(dev); 1296 } 1297 if (t) { 1298 err = 0; 1299 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p))) 1300 err = -EFAULT; 1301 1302 } else 1303 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1304 break; 1305 case SIOCDELTUNNEL: 1306 err = -EPERM; 1307 if (!capable(CAP_NET_ADMIN)) 1308 break; 1309 1310 if (dev == ip6n->fb_tnl_dev) { 1311 err = -EFAULT; 1312 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1313 break; 1314 err = -ENOENT; 1315 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL) 1316 break; 1317 err = -EPERM; 1318 if (t->dev == ip6n->fb_tnl_dev) 1319 break; 1320 dev = t->dev; 1321 } 1322 err = 0; 1323 unregister_netdevice(dev); 1324 break; 1325 default: 1326 err = -EINVAL; 1327 } 1328 return err; 1329 } 1330 1331 /** 1332 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1333 * @dev: virtual device associated with tunnel 1334 * @new_mtu: the new mtu 1335 * 1336 * Return: 1337 * 0 on success, 1338 * %-EINVAL if mtu too small 1339 **/ 1340 1341 static int 1342 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1343 { 1344 if (new_mtu < IPV6_MIN_MTU) { 1345 return -EINVAL; 1346 } 1347 dev->mtu = new_mtu; 1348 return 0; 1349 } 1350 1351 1352 static const struct net_device_ops ip6_tnl_netdev_ops = { 1353 .ndo_uninit = ip6_tnl_dev_uninit, 1354 .ndo_start_xmit = ip6_tnl_xmit, 1355 .ndo_do_ioctl = ip6_tnl_ioctl, 1356 .ndo_change_mtu = ip6_tnl_change_mtu, 1357 .ndo_get_stats = ip6_get_stats, 1358 }; 1359 1360 1361 /** 1362 * ip6_tnl_dev_setup - setup virtual tunnel device 1363 * @dev: virtual device associated with tunnel 1364 * 1365 * Description: 1366 * Initialize function pointers and device parameters 1367 **/ 1368 1369 static void ip6_tnl_dev_setup(struct net_device *dev) 1370 { 1371 struct ip6_tnl *t; 1372 1373 dev->netdev_ops = &ip6_tnl_netdev_ops; 1374 dev->destructor = ip6_dev_free; 1375 1376 dev->type = ARPHRD_TUNNEL6; 1377 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); 1378 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); 1379 t = netdev_priv(dev); 1380 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1381 dev->mtu-=8; 1382 dev->flags |= IFF_NOARP; 1383 dev->addr_len = sizeof(struct in6_addr); 1384 dev->features |= NETIF_F_NETNS_LOCAL; 1385 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1386 } 1387 1388 1389 /** 1390 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1391 * @dev: virtual device associated with tunnel 1392 **/ 1393 1394 static inline int 1395 ip6_tnl_dev_init_gen(struct net_device *dev) 1396 { 1397 struct ip6_tnl *t = netdev_priv(dev); 1398 1399 t->dev = dev; 1400 dev->tstats = alloc_percpu(struct pcpu_tstats); 1401 if (!dev->tstats) 1402 return -ENOMEM; 1403 return 0; 1404 } 1405 1406 /** 1407 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1408 * @dev: virtual device associated with tunnel 1409 **/ 1410 1411 static int ip6_tnl_dev_init(struct net_device *dev) 1412 { 1413 struct ip6_tnl *t = netdev_priv(dev); 1414 int err = ip6_tnl_dev_init_gen(dev); 1415 1416 if (err) 1417 return err; 1418 ip6_tnl_link_config(t); 1419 return 0; 1420 } 1421 1422 /** 1423 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1424 * @dev: fallback device 1425 * 1426 * Return: 0 1427 **/ 1428 1429 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1430 { 1431 struct ip6_tnl *t = netdev_priv(dev); 1432 struct net *net = dev_net(dev); 1433 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1434 int err = ip6_tnl_dev_init_gen(dev); 1435 1436 if (err) 1437 return err; 1438 1439 t->parms.proto = IPPROTO_IPV6; 1440 dev_hold(dev); 1441 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1442 return 0; 1443 } 1444 1445 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 1446 .handler = ip4ip6_rcv, 1447 .err_handler = ip4ip6_err, 1448 .priority = 1, 1449 }; 1450 1451 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 1452 .handler = ip6ip6_rcv, 1453 .err_handler = ip6ip6_err, 1454 .priority = 1, 1455 }; 1456 1457 static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) 1458 { 1459 int h; 1460 struct ip6_tnl *t; 1461 LIST_HEAD(list); 1462 1463 for (h = 0; h < HASH_SIZE; h++) { 1464 t = rtnl_dereference(ip6n->tnls_r_l[h]); 1465 while (t != NULL) { 1466 unregister_netdevice_queue(t->dev, &list); 1467 t = rtnl_dereference(t->next); 1468 } 1469 } 1470 1471 t = rtnl_dereference(ip6n->tnls_wc[0]); 1472 unregister_netdevice_queue(t->dev, &list); 1473 unregister_netdevice_many(&list); 1474 } 1475 1476 static int __net_init ip6_tnl_init_net(struct net *net) 1477 { 1478 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1479 struct ip6_tnl *t = NULL; 1480 int err; 1481 1482 ip6n->tnls[0] = ip6n->tnls_wc; 1483 ip6n->tnls[1] = ip6n->tnls_r_l; 1484 1485 err = -ENOMEM; 1486 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 1487 ip6_tnl_dev_setup); 1488 1489 if (!ip6n->fb_tnl_dev) 1490 goto err_alloc_dev; 1491 dev_net_set(ip6n->fb_tnl_dev, net); 1492 1493 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 1494 if (err < 0) 1495 goto err_register; 1496 1497 err = register_netdev(ip6n->fb_tnl_dev); 1498 if (err < 0) 1499 goto err_register; 1500 1501 t = netdev_priv(ip6n->fb_tnl_dev); 1502 1503 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 1504 return 0; 1505 1506 err_register: 1507 ip6_dev_free(ip6n->fb_tnl_dev); 1508 err_alloc_dev: 1509 return err; 1510 } 1511 1512 static void __net_exit ip6_tnl_exit_net(struct net *net) 1513 { 1514 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1515 1516 rtnl_lock(); 1517 ip6_tnl_destroy_tunnels(ip6n); 1518 rtnl_unlock(); 1519 } 1520 1521 static struct pernet_operations ip6_tnl_net_ops = { 1522 .init = ip6_tnl_init_net, 1523 .exit = ip6_tnl_exit_net, 1524 .id = &ip6_tnl_net_id, 1525 .size = sizeof(struct ip6_tnl_net), 1526 }; 1527 1528 /** 1529 * ip6_tunnel_init - register protocol and reserve needed resources 1530 * 1531 * Return: 0 on success 1532 **/ 1533 1534 static int __init ip6_tunnel_init(void) 1535 { 1536 int err; 1537 1538 err = register_pernet_device(&ip6_tnl_net_ops); 1539 if (err < 0) 1540 goto out_pernet; 1541 1542 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 1543 if (err < 0) { 1544 pr_err("%s: can't register ip4ip6\n", __func__); 1545 goto out_ip4ip6; 1546 } 1547 1548 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 1549 if (err < 0) { 1550 pr_err("%s: can't register ip6ip6\n", __func__); 1551 goto out_ip6ip6; 1552 } 1553 1554 return 0; 1555 1556 out_ip6ip6: 1557 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 1558 out_ip4ip6: 1559 unregister_pernet_device(&ip6_tnl_net_ops); 1560 out_pernet: 1561 return err; 1562 } 1563 1564 /** 1565 * ip6_tunnel_cleanup - free resources and unregister protocol 1566 **/ 1567 1568 static void __exit ip6_tunnel_cleanup(void) 1569 { 1570 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 1571 pr_info("%s: can't deregister ip4ip6\n", __func__); 1572 1573 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 1574 pr_info("%s: can't deregister ip6ip6\n", __func__); 1575 1576 unregister_pernet_device(&ip6_tnl_net_ops); 1577 } 1578 1579 module_init(ip6_tunnel_init); 1580 module_exit(ip6_tunnel_cleanup); 1581