1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * GRE over IPv6 protocol decoder. 4 * 5 * Authors: Dmitry Kozlov (xeb@mail.ru) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/capability.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/uaccess.h> 16 #include <linux/skbuff.h> 17 #include <linux/netdevice.h> 18 #include <linux/in.h> 19 #include <linux/tcp.h> 20 #include <linux/udp.h> 21 #include <linux/if_arp.h> 22 #include <linux/init.h> 23 #include <linux/in6.h> 24 #include <linux/inetdevice.h> 25 #include <linux/igmp.h> 26 #include <linux/netfilter_ipv4.h> 27 #include <linux/etherdevice.h> 28 #include <linux/if_ether.h> 29 #include <linux/hash.h> 30 #include <linux/if_tunnel.h> 31 #include <linux/ip6_tunnel.h> 32 33 #include <net/sock.h> 34 #include <net/ip.h> 35 #include <net/ip_tunnels.h> 36 #include <net/icmp.h> 37 #include <net/protocol.h> 38 #include <net/addrconf.h> 39 #include <net/arp.h> 40 #include <net/checksum.h> 41 #include <net/dsfield.h> 42 #include <net/inet_ecn.h> 43 #include <net/xfrm.h> 44 #include <net/net_namespace.h> 45 #include <net/netns/generic.h> 46 #include <net/rtnetlink.h> 47 48 #include <net/ipv6.h> 49 #include <net/ip6_fib.h> 50 #include <net/ip6_route.h> 51 #include <net/ip6_tunnel.h> 52 #include <net/gre.h> 53 #include <net/erspan.h> 54 #include <net/dst_metadata.h> 55 56 57 static bool log_ecn_error = true; 58 module_param(log_ecn_error, bool, 0644); 59 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 60 61 #define IP6_GRE_HASH_SIZE_SHIFT 5 62 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT) 63 64 static unsigned int ip6gre_net_id __read_mostly; 65 struct ip6gre_net { 66 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; 67 68 struct ip6_tnl __rcu *collect_md_tun; 69 struct ip6_tnl __rcu *collect_md_tun_erspan; 70 struct net_device *fb_tunnel_dev; 71 }; 72 73 static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 74 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly; 75 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly; 76 static int ip6gre_tunnel_init(struct net_device *dev); 77 static void ip6gre_tunnel_setup(struct net_device *dev); 78 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 79 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); 80 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu); 81 82 /* Tunnel hash table */ 83 84 /* 85 4 hash tables: 86 87 3: (remote,local) 88 2: (remote,*) 89 1: (*,local) 90 0: (*,*) 91 92 We require exact key match i.e. if a key is present in packet 93 it will match only tunnel with the same key; if it is not present, 94 it will match only keyless tunnel. 95 96 All keysless packets, if not matched configured keyless tunnels 97 will match fallback tunnel. 98 */ 99 100 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1)) 101 static u32 HASH_ADDR(const struct in6_addr *addr) 102 { 103 u32 hash = ipv6_addr_hash(addr); 104 105 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT); 106 } 107 108 #define tunnels_r_l tunnels[3] 109 #define tunnels_r tunnels[2] 110 #define tunnels_l tunnels[1] 111 #define tunnels_wc tunnels[0] 112 113 /* Given src, dst and key, find appropriate for input tunnel. */ 114 115 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, 116 const struct in6_addr *remote, const struct in6_addr *local, 117 __be32 key, __be16 gre_proto) 118 { 119 struct net *net = dev_net(dev); 120 int link = dev->ifindex; 121 unsigned int h0 = HASH_ADDR(remote); 122 unsigned int h1 = HASH_KEY(key); 123 struct ip6_tnl *t, *cand = NULL; 124 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 125 int dev_type = (gre_proto == htons(ETH_P_TEB) || 126 gre_proto == htons(ETH_P_ERSPAN) || 127 gre_proto == htons(ETH_P_ERSPAN2)) ? 128 ARPHRD_ETHER : ARPHRD_IP6GRE; 129 int score, cand_score = 4; 130 struct net_device *ndev; 131 132 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { 133 if (!ipv6_addr_equal(local, &t->parms.laddr) || 134 !ipv6_addr_equal(remote, &t->parms.raddr) || 135 key != t->parms.i_key || 136 !(t->dev->flags & IFF_UP)) 137 continue; 138 139 if (t->dev->type != ARPHRD_IP6GRE && 140 t->dev->type != dev_type) 141 continue; 142 143 score = 0; 144 if (t->parms.link != link) 145 score |= 1; 146 if (t->dev->type != dev_type) 147 score |= 2; 148 if (score == 0) 149 return t; 150 151 if (score < cand_score) { 152 cand = t; 153 cand_score = score; 154 } 155 } 156 157 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { 158 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 159 key != t->parms.i_key || 160 !(t->dev->flags & IFF_UP)) 161 continue; 162 163 if (t->dev->type != ARPHRD_IP6GRE && 164 t->dev->type != dev_type) 165 continue; 166 167 score = 0; 168 if (t->parms.link != link) 169 score |= 1; 170 if (t->dev->type != dev_type) 171 score |= 2; 172 if (score == 0) 173 return t; 174 175 if (score < cand_score) { 176 cand = t; 177 cand_score = score; 178 } 179 } 180 181 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { 182 if ((!ipv6_addr_equal(local, &t->parms.laddr) && 183 (!ipv6_addr_equal(local, &t->parms.raddr) || 184 !ipv6_addr_is_multicast(local))) || 185 key != t->parms.i_key || 186 !(t->dev->flags & IFF_UP)) 187 continue; 188 189 if (t->dev->type != ARPHRD_IP6GRE && 190 t->dev->type != dev_type) 191 continue; 192 193 score = 0; 194 if (t->parms.link != link) 195 score |= 1; 196 if (t->dev->type != dev_type) 197 score |= 2; 198 if (score == 0) 199 return t; 200 201 if (score < cand_score) { 202 cand = t; 203 cand_score = score; 204 } 205 } 206 207 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { 208 if (t->parms.i_key != key || 209 !(t->dev->flags & IFF_UP)) 210 continue; 211 212 if (t->dev->type != ARPHRD_IP6GRE && 213 t->dev->type != dev_type) 214 continue; 215 216 score = 0; 217 if (t->parms.link != link) 218 score |= 1; 219 if (t->dev->type != dev_type) 220 score |= 2; 221 if (score == 0) 222 return t; 223 224 if (score < cand_score) { 225 cand = t; 226 cand_score = score; 227 } 228 } 229 230 if (cand) 231 return cand; 232 233 if (gre_proto == htons(ETH_P_ERSPAN) || 234 gre_proto == htons(ETH_P_ERSPAN2)) 235 t = rcu_dereference(ign->collect_md_tun_erspan); 236 else 237 t = rcu_dereference(ign->collect_md_tun); 238 239 if (t && t->dev->flags & IFF_UP) 240 return t; 241 242 ndev = READ_ONCE(ign->fb_tunnel_dev); 243 if (ndev && ndev->flags & IFF_UP) 244 return netdev_priv(ndev); 245 246 return NULL; 247 } 248 249 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign, 250 const struct __ip6_tnl_parm *p) 251 { 252 const struct in6_addr *remote = &p->raddr; 253 const struct in6_addr *local = &p->laddr; 254 unsigned int h = HASH_KEY(p->i_key); 255 int prio = 0; 256 257 if (!ipv6_addr_any(local)) 258 prio |= 1; 259 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) { 260 prio |= 2; 261 h ^= HASH_ADDR(remote); 262 } 263 264 return &ign->tunnels[prio][h]; 265 } 266 267 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 268 { 269 if (t->parms.collect_md) 270 rcu_assign_pointer(ign->collect_md_tun, t); 271 } 272 273 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 274 { 275 if (t->parms.collect_md) 276 rcu_assign_pointer(ign->collect_md_tun_erspan, t); 277 } 278 279 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t) 280 { 281 if (t->parms.collect_md) 282 rcu_assign_pointer(ign->collect_md_tun, NULL); 283 } 284 285 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign, 286 struct ip6_tnl *t) 287 { 288 if (t->parms.collect_md) 289 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL); 290 } 291 292 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, 293 const struct ip6_tnl *t) 294 { 295 return __ip6gre_bucket(ign, &t->parms); 296 } 297 298 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t) 299 { 300 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); 301 302 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 303 rcu_assign_pointer(*tp, t); 304 } 305 306 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t) 307 { 308 struct ip6_tnl __rcu **tp; 309 struct ip6_tnl *iter; 310 311 for (tp = ip6gre_bucket(ign, t); 312 (iter = rtnl_dereference(*tp)) != NULL; 313 tp = &iter->next) { 314 if (t == iter) { 315 rcu_assign_pointer(*tp, t->next); 316 break; 317 } 318 } 319 } 320 321 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net, 322 const struct __ip6_tnl_parm *parms, 323 int type) 324 { 325 const struct in6_addr *remote = &parms->raddr; 326 const struct in6_addr *local = &parms->laddr; 327 __be32 key = parms->i_key; 328 int link = parms->link; 329 struct ip6_tnl *t; 330 struct ip6_tnl __rcu **tp; 331 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 332 333 for (tp = __ip6gre_bucket(ign, parms); 334 (t = rtnl_dereference(*tp)) != NULL; 335 tp = &t->next) 336 if (ipv6_addr_equal(local, &t->parms.laddr) && 337 ipv6_addr_equal(remote, &t->parms.raddr) && 338 key == t->parms.i_key && 339 link == t->parms.link && 340 type == t->dev->type) 341 break; 342 343 return t; 344 } 345 346 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, 347 const struct __ip6_tnl_parm *parms, int create) 348 { 349 struct ip6_tnl *t, *nt; 350 struct net_device *dev; 351 char name[IFNAMSIZ]; 352 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 353 354 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); 355 if (t && create) 356 return NULL; 357 if (t || !create) 358 return t; 359 360 if (parms->name[0]) { 361 if (!dev_valid_name(parms->name)) 362 return NULL; 363 strscpy(name, parms->name, IFNAMSIZ); 364 } else { 365 strcpy(name, "ip6gre%d"); 366 } 367 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 368 ip6gre_tunnel_setup); 369 if (!dev) 370 return NULL; 371 372 dev_net_set(dev, net); 373 374 nt = netdev_priv(dev); 375 nt->parms = *parms; 376 dev->rtnl_link_ops = &ip6gre_link_ops; 377 378 nt->dev = dev; 379 nt->net = dev_net(dev); 380 381 if (register_netdevice(dev) < 0) 382 goto failed_free; 383 384 ip6gre_tnl_link_config(nt, 1); 385 ip6gre_tunnel_link(ign, nt); 386 return nt; 387 388 failed_free: 389 free_netdev(dev); 390 return NULL; 391 } 392 393 static void ip6erspan_tunnel_uninit(struct net_device *dev) 394 { 395 struct ip6_tnl *t = netdev_priv(dev); 396 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 397 398 ip6erspan_tunnel_unlink_md(ign, t); 399 ip6gre_tunnel_unlink(ign, t); 400 dst_cache_reset(&t->dst_cache); 401 netdev_put(dev, &t->dev_tracker); 402 } 403 404 static void ip6gre_tunnel_uninit(struct net_device *dev) 405 { 406 struct ip6_tnl *t = netdev_priv(dev); 407 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 408 409 ip6gre_tunnel_unlink_md(ign, t); 410 ip6gre_tunnel_unlink(ign, t); 411 if (ign->fb_tunnel_dev == dev) 412 WRITE_ONCE(ign->fb_tunnel_dev, NULL); 413 dst_cache_reset(&t->dst_cache); 414 netdev_put(dev, &t->dev_tracker); 415 } 416 417 418 static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 419 u8 type, u8 code, int offset, __be32 info) 420 { 421 struct net *net = dev_net(skb->dev); 422 const struct ipv6hdr *ipv6h; 423 struct tnl_ptk_info tpi; 424 struct ip6_tnl *t; 425 426 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6), 427 offset) < 0) 428 return -EINVAL; 429 430 ipv6h = (const struct ipv6hdr *)skb->data; 431 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, 432 tpi.key, tpi.proto); 433 if (!t) 434 return -ENOENT; 435 436 switch (type) { 437 case ICMPV6_DEST_UNREACH: 438 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 439 t->parms.name); 440 if (code != ICMPV6_PORT_UNREACH) 441 break; 442 return 0; 443 case ICMPV6_TIME_EXCEED: 444 if (code == ICMPV6_EXC_HOPLIMIT) { 445 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 446 t->parms.name); 447 break; 448 } 449 return 0; 450 case ICMPV6_PARAMPROB: { 451 struct ipv6_tlv_tnl_enc_lim *tel; 452 __u32 teli; 453 454 teli = 0; 455 if (code == ICMPV6_HDR_FIELD) 456 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 457 458 if (teli && teli == be32_to_cpu(info) - 2) { 459 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 460 if (tel->encap_limit == 0) { 461 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 462 t->parms.name); 463 } 464 } else { 465 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 466 t->parms.name); 467 } 468 return 0; 469 } 470 case ICMPV6_PKT_TOOBIG: 471 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 472 return 0; 473 case NDISC_REDIRECT: 474 ip6_redirect(skb, net, skb->dev->ifindex, 0, 475 sock_net_uid(net, NULL)); 476 return 0; 477 } 478 479 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) 480 t->err_count++; 481 else 482 t->err_count = 1; 483 t->err_time = jiffies; 484 485 return 0; 486 } 487 488 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) 489 { 490 const struct ipv6hdr *ipv6h; 491 struct ip6_tnl *tunnel; 492 493 ipv6h = ipv6_hdr(skb); 494 tunnel = ip6gre_tunnel_lookup(skb->dev, 495 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 496 tpi->proto); 497 if (tunnel) { 498 if (tunnel->parms.collect_md) { 499 IP_TUNNEL_DECLARE_FLAGS(flags); 500 struct metadata_dst *tun_dst; 501 __be64 tun_id; 502 503 ip_tunnel_flags_copy(flags, tpi->flags); 504 tun_id = key32_to_tunnel_id(tpi->key); 505 506 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0); 507 if (!tun_dst) 508 return PACKET_REJECT; 509 510 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 511 } else { 512 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 513 } 514 515 return PACKET_RCVD; 516 } 517 518 return PACKET_REJECT; 519 } 520 521 static int ip6erspan_rcv(struct sk_buff *skb, 522 struct tnl_ptk_info *tpi, 523 int gre_hdr_len) 524 { 525 struct erspan_base_hdr *ershdr; 526 const struct ipv6hdr *ipv6h; 527 struct erspan_md2 *md2; 528 struct ip6_tnl *tunnel; 529 u8 ver; 530 531 if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr)))) 532 return PACKET_REJECT; 533 534 ipv6h = ipv6_hdr(skb); 535 ershdr = (struct erspan_base_hdr *)skb->data; 536 ver = ershdr->ver; 537 538 tunnel = ip6gre_tunnel_lookup(skb->dev, 539 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 540 tpi->proto); 541 if (tunnel) { 542 int len = erspan_hdr_len(ver); 543 544 if (unlikely(!pskb_may_pull(skb, len))) 545 return PACKET_REJECT; 546 547 if (__iptunnel_pull_header(skb, len, 548 htons(ETH_P_TEB), 549 false, false) < 0) 550 return PACKET_REJECT; 551 552 if (tunnel->parms.collect_md) { 553 struct erspan_metadata *pkt_md, *md; 554 IP_TUNNEL_DECLARE_FLAGS(flags); 555 struct metadata_dst *tun_dst; 556 struct ip_tunnel_info *info; 557 unsigned char *gh; 558 __be64 tun_id; 559 560 __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags); 561 ip_tunnel_flags_copy(flags, tpi->flags); 562 tun_id = key32_to_tunnel_id(tpi->key); 563 564 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 565 sizeof(*md)); 566 if (!tun_dst) 567 return PACKET_REJECT; 568 569 /* skb can be uncloned in __iptunnel_pull_header, so 570 * old pkt_md is no longer valid and we need to reset 571 * it 572 */ 573 gh = skb_network_header(skb) + 574 skb_network_header_len(skb); 575 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + 576 sizeof(*ershdr)); 577 info = &tun_dst->u.tun_info; 578 md = ip_tunnel_info_opts(info); 579 md->version = ver; 580 md2 = &md->u.md2; 581 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 582 ERSPAN_V2_MDSIZE); 583 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 584 info->key.tun_flags); 585 info->options_len = sizeof(*md); 586 587 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 588 589 } else { 590 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 591 } 592 593 return PACKET_RCVD; 594 } 595 596 return PACKET_REJECT; 597 } 598 599 static int gre_rcv(struct sk_buff *skb) 600 { 601 struct tnl_ptk_info tpi; 602 bool csum_err = false; 603 int hdr_len; 604 605 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0); 606 if (hdr_len < 0) 607 goto drop; 608 609 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) 610 goto drop; 611 612 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || 613 tpi.proto == htons(ETH_P_ERSPAN2))) { 614 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) 615 return 0; 616 goto out; 617 } 618 619 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD) 620 return 0; 621 622 out: 623 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 624 drop: 625 kfree_skb(skb); 626 return 0; 627 } 628 629 static int gre_handle_offloads(struct sk_buff *skb, bool csum) 630 { 631 return iptunnel_handle_offloads(skb, 632 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 633 } 634 635 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb, 636 struct net_device *dev, 637 struct flowi6 *fl6, __u8 *dsfield, 638 int *encap_limit) 639 { 640 const struct iphdr *iph = ip_hdr(skb); 641 struct ip6_tnl *t = netdev_priv(dev); 642 643 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 644 *encap_limit = t->parms.encap_limit; 645 646 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 647 648 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 649 *dsfield = ipv4_get_dsfield(iph); 650 else 651 *dsfield = ip6_tclass(t->parms.flowinfo); 652 653 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 654 fl6->flowi6_mark = skb->mark; 655 else 656 fl6->flowi6_mark = t->parms.fwmark; 657 658 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 659 } 660 661 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb, 662 struct net_device *dev, 663 struct flowi6 *fl6, __u8 *dsfield, 664 int *encap_limit) 665 { 666 struct ipv6hdr *ipv6h; 667 struct ip6_tnl *t = netdev_priv(dev); 668 __u16 offset; 669 670 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 671 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 672 ipv6h = ipv6_hdr(skb); 673 674 if (offset > 0) { 675 struct ipv6_tlv_tnl_enc_lim *tel; 676 677 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 678 if (tel->encap_limit == 0) { 679 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB, 680 ICMPV6_HDR_FIELD, offset + 2); 681 return -1; 682 } 683 *encap_limit = tel->encap_limit - 1; 684 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 685 *encap_limit = t->parms.encap_limit; 686 } 687 688 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 689 690 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 691 *dsfield = ipv6_get_dsfield(ipv6h); 692 else 693 *dsfield = ip6_tclass(t->parms.flowinfo); 694 695 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 696 fl6->flowlabel |= ip6_flowlabel(ipv6h); 697 698 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 699 fl6->flowi6_mark = skb->mark; 700 else 701 fl6->flowi6_mark = t->parms.fwmark; 702 703 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 704 705 return 0; 706 } 707 708 static int prepare_ip6gre_xmit_other(struct sk_buff *skb, 709 struct net_device *dev, 710 struct flowi6 *fl6, __u8 *dsfield, 711 int *encap_limit) 712 { 713 struct ip6_tnl *t = netdev_priv(dev); 714 715 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 716 *encap_limit = t->parms.encap_limit; 717 718 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 719 720 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 721 *dsfield = 0; 722 else 723 *dsfield = ip6_tclass(t->parms.flowinfo); 724 725 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 726 fl6->flowi6_mark = skb->mark; 727 else 728 fl6->flowi6_mark = t->parms.fwmark; 729 730 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 731 732 return 0; 733 } 734 735 static struct ip_tunnel_info *skb_tunnel_info_txcheck(struct sk_buff *skb) 736 { 737 struct ip_tunnel_info *tun_info; 738 739 tun_info = skb_tunnel_info(skb); 740 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))) 741 return ERR_PTR(-EINVAL); 742 743 return tun_info; 744 } 745 746 static netdev_tx_t __gre6_xmit(struct sk_buff *skb, 747 struct net_device *dev, __u8 dsfield, 748 struct flowi6 *fl6, int encap_limit, 749 __u32 *pmtu, __be16 proto) 750 { 751 struct ip6_tnl *tunnel = netdev_priv(dev); 752 IP_TUNNEL_DECLARE_FLAGS(flags); 753 __be16 protocol; 754 755 if (dev->type == ARPHRD_ETHER) 756 IPCB(skb)->flags = 0; 757 758 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) 759 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr; 760 else 761 fl6->daddr = tunnel->parms.raddr; 762 763 /* Push GRE header. */ 764 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 765 766 if (tunnel->parms.collect_md) { 767 struct ip_tunnel_info *tun_info; 768 const struct ip_tunnel_key *key; 769 int tun_hlen; 770 771 tun_info = skb_tunnel_info_txcheck(skb); 772 if (IS_ERR(tun_info) || 773 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6)) 774 return -EINVAL; 775 776 key = &tun_info->key; 777 memset(fl6, 0, sizeof(*fl6)); 778 fl6->flowi6_proto = IPPROTO_GRE; 779 fl6->daddr = key->u.ipv6.dst; 780 fl6->flowlabel = key->label; 781 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 782 fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id); 783 784 dsfield = key->tos; 785 ip_tunnel_flags_zero(flags); 786 __set_bit(IP_TUNNEL_CSUM_BIT, flags); 787 __set_bit(IP_TUNNEL_KEY_BIT, flags); 788 __set_bit(IP_TUNNEL_SEQ_BIT, flags); 789 ip_tunnel_flags_and(flags, flags, key->tun_flags); 790 tun_hlen = gre_calc_hlen(flags); 791 792 if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen)) 793 return -ENOMEM; 794 795 gre_build_header(skb, tun_hlen, 796 flags, protocol, 797 tunnel_id_to_key32(tun_info->key.tun_id), 798 test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 799 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 800 0); 801 802 } else { 803 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) 804 return -ENOMEM; 805 806 ip_tunnel_flags_copy(flags, tunnel->parms.o_flags); 807 808 gre_build_header(skb, tunnel->tun_hlen, flags, 809 protocol, tunnel->parms.o_key, 810 test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 811 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 812 0); 813 } 814 815 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 816 NEXTHDR_GRE); 817 } 818 819 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) 820 { 821 struct ip6_tnl *t = netdev_priv(dev); 822 int encap_limit = -1; 823 struct flowi6 fl6; 824 __u8 dsfield = 0; 825 __u32 mtu; 826 int err; 827 828 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 829 830 if (!t->parms.collect_md) 831 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 832 &dsfield, &encap_limit); 833 834 err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 835 t->parms.o_flags)); 836 if (err) 837 return -1; 838 839 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 840 skb->protocol); 841 if (err != 0) { 842 /* XXX: send ICMP error even if DF is not set. */ 843 if (err == -EMSGSIZE) 844 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 845 htonl(mtu)); 846 return -1; 847 } 848 849 return 0; 850 } 851 852 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) 853 { 854 struct ip6_tnl *t = netdev_priv(dev); 855 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 856 int encap_limit = -1; 857 struct flowi6 fl6; 858 __u8 dsfield = 0; 859 __u32 mtu; 860 int err; 861 862 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr)) 863 return -1; 864 865 if (!t->parms.collect_md && 866 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit)) 867 return -1; 868 869 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 870 t->parms.o_flags))) 871 return -1; 872 873 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, 874 &mtu, skb->protocol); 875 if (err != 0) { 876 if (err == -EMSGSIZE) 877 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 878 return -1; 879 } 880 881 return 0; 882 } 883 884 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) 885 { 886 struct ip6_tnl *t = netdev_priv(dev); 887 int encap_limit = -1; 888 struct flowi6 fl6; 889 __u8 dsfield = 0; 890 __u32 mtu; 891 int err; 892 893 if (!t->parms.collect_md && 894 prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit)) 895 return -1; 896 897 err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 898 t->parms.o_flags)); 899 if (err) 900 return err; 901 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol); 902 903 return err; 904 } 905 906 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, 907 struct net_device *dev) 908 { 909 struct ip6_tnl *t = netdev_priv(dev); 910 __be16 payload_protocol; 911 int ret; 912 913 if (!pskb_inet_may_pull(skb)) 914 goto tx_err; 915 916 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 917 goto tx_err; 918 919 payload_protocol = skb_protocol(skb, true); 920 switch (payload_protocol) { 921 case htons(ETH_P_IP): 922 ret = ip6gre_xmit_ipv4(skb, dev); 923 break; 924 case htons(ETH_P_IPV6): 925 ret = ip6gre_xmit_ipv6(skb, dev); 926 break; 927 default: 928 ret = ip6gre_xmit_other(skb, dev); 929 break; 930 } 931 932 if (ret < 0) 933 goto tx_err; 934 935 return NETDEV_TX_OK; 936 937 tx_err: 938 if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb))) 939 DEV_STATS_INC(dev, tx_errors); 940 DEV_STATS_INC(dev, tx_dropped); 941 kfree_skb(skb); 942 return NETDEV_TX_OK; 943 } 944 945 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, 946 struct net_device *dev) 947 { 948 struct ip_tunnel_info *tun_info = NULL; 949 struct ip6_tnl *t = netdev_priv(dev); 950 struct dst_entry *dst = skb_dst(skb); 951 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 952 bool truncate = false; 953 int encap_limit = -1; 954 __u8 dsfield = false; 955 struct flowi6 fl6; 956 int err = -EINVAL; 957 __be16 proto; 958 __u32 mtu; 959 int nhoff; 960 961 if (!pskb_inet_may_pull(skb)) 962 goto tx_err; 963 964 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 965 goto tx_err; 966 967 if (gre_handle_offloads(skb, false)) 968 goto tx_err; 969 970 if (skb->len > dev->mtu + dev->hard_header_len) { 971 if (pskb_trim(skb, dev->mtu + dev->hard_header_len)) 972 goto tx_err; 973 truncate = true; 974 } 975 976 nhoff = skb_network_offset(skb); 977 if (skb->protocol == htons(ETH_P_IP) && 978 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) 979 truncate = true; 980 981 if (skb->protocol == htons(ETH_P_IPV6)) { 982 int thoff; 983 984 if (skb_transport_header_was_set(skb)) 985 thoff = skb_transport_offset(skb); 986 else 987 thoff = nhoff + sizeof(struct ipv6hdr); 988 if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) 989 truncate = true; 990 } 991 992 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) 993 goto tx_err; 994 995 __clear_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags); 996 IPCB(skb)->flags = 0; 997 998 /* For collect_md mode, derive fl6 from the tunnel key, 999 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}. 1000 */ 1001 if (t->parms.collect_md) { 1002 const struct ip_tunnel_key *key; 1003 struct erspan_metadata *md; 1004 __be32 tun_id; 1005 1006 tun_info = skb_tunnel_info_txcheck(skb); 1007 if (IS_ERR(tun_info) || 1008 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6)) 1009 goto tx_err; 1010 1011 key = &tun_info->key; 1012 memset(&fl6, 0, sizeof(fl6)); 1013 fl6.flowi6_proto = IPPROTO_GRE; 1014 fl6.daddr = key->u.ipv6.dst; 1015 fl6.flowlabel = key->label; 1016 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1017 fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id); 1018 1019 dsfield = key->tos; 1020 if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 1021 tun_info->key.tun_flags)) 1022 goto tx_err; 1023 if (tun_info->options_len < sizeof(*md)) 1024 goto tx_err; 1025 md = ip_tunnel_info_opts(tun_info); 1026 1027 tun_id = tunnel_id_to_key32(key->tun_id); 1028 if (md->version == 1) { 1029 erspan_build_header(skb, 1030 ntohl(tun_id), 1031 ntohl(md->u.index), truncate, 1032 false); 1033 proto = htons(ETH_P_ERSPAN); 1034 } else if (md->version == 2) { 1035 erspan_build_header_v2(skb, 1036 ntohl(tun_id), 1037 md->u.md2.dir, 1038 get_hwid(&md->u.md2), 1039 truncate, false); 1040 proto = htons(ETH_P_ERSPAN2); 1041 } else { 1042 goto tx_err; 1043 } 1044 } else { 1045 switch (skb->protocol) { 1046 case htons(ETH_P_IP): 1047 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1048 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 1049 &dsfield, &encap_limit); 1050 break; 1051 case htons(ETH_P_IPV6): 1052 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr)) 1053 goto tx_err; 1054 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, 1055 &dsfield, &encap_limit)) 1056 goto tx_err; 1057 break; 1058 default: 1059 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1060 break; 1061 } 1062 1063 if (t->parms.erspan_ver == 1) { 1064 erspan_build_header(skb, ntohl(t->parms.o_key), 1065 t->parms.index, 1066 truncate, false); 1067 proto = htons(ETH_P_ERSPAN); 1068 } else if (t->parms.erspan_ver == 2) { 1069 erspan_build_header_v2(skb, ntohl(t->parms.o_key), 1070 t->parms.dir, 1071 t->parms.hwid, 1072 truncate, false); 1073 proto = htons(ETH_P_ERSPAN2); 1074 } else { 1075 goto tx_err; 1076 } 1077 1078 fl6.daddr = t->parms.raddr; 1079 } 1080 1081 /* Push GRE header. */ 1082 __set_bit(IP_TUNNEL_SEQ_BIT, flags); 1083 gre_build_header(skb, 8, flags, proto, 0, 1084 htonl(atomic_fetch_inc(&t->o_seqno))); 1085 1086 /* TooBig packet may have updated dst->dev's mtu */ 1087 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) 1088 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false); 1089 1090 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1091 NEXTHDR_GRE); 1092 if (err != 0) { 1093 /* XXX: send ICMP error even if DF is not set. */ 1094 if (err == -EMSGSIZE) { 1095 if (skb->protocol == htons(ETH_P_IP)) 1096 icmp_ndo_send(skb, ICMP_DEST_UNREACH, 1097 ICMP_FRAG_NEEDED, htonl(mtu)); 1098 else 1099 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1100 } 1101 1102 goto tx_err; 1103 } 1104 return NETDEV_TX_OK; 1105 1106 tx_err: 1107 if (!IS_ERR(tun_info)) 1108 DEV_STATS_INC(dev, tx_errors); 1109 DEV_STATS_INC(dev, tx_dropped); 1110 kfree_skb(skb); 1111 return NETDEV_TX_OK; 1112 } 1113 1114 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) 1115 { 1116 struct net_device *dev = t->dev; 1117 struct __ip6_tnl_parm *p = &t->parms; 1118 struct flowi6 *fl6 = &t->fl.u.ip6; 1119 1120 if (dev->type != ARPHRD_ETHER) { 1121 __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); 1122 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1123 } 1124 1125 /* Set up flowi template */ 1126 fl6->saddr = p->laddr; 1127 fl6->daddr = p->raddr; 1128 fl6->flowi6_oif = p->link; 1129 fl6->flowlabel = 0; 1130 fl6->flowi6_proto = IPPROTO_GRE; 1131 fl6->fl6_gre_key = t->parms.o_key; 1132 1133 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1134 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1135 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1136 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1137 1138 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1139 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1140 1141 if (p->flags&IP6_TNL_F_CAP_XMIT && 1142 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER) 1143 dev->flags |= IFF_POINTOPOINT; 1144 else 1145 dev->flags &= ~IFF_POINTOPOINT; 1146 } 1147 1148 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, 1149 int t_hlen) 1150 { 1151 const struct __ip6_tnl_parm *p = &t->parms; 1152 struct net_device *dev = t->dev; 1153 1154 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1155 int strict = (ipv6_addr_type(&p->raddr) & 1156 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1157 1158 struct rt6_info *rt = rt6_lookup(t->net, 1159 &p->raddr, &p->laddr, 1160 p->link, NULL, strict); 1161 1162 if (!rt) 1163 return; 1164 1165 if (rt->dst.dev) { 1166 unsigned short dst_len = rt->dst.dev->hard_header_len + 1167 t_hlen; 1168 1169 if (t->dev->header_ops) 1170 dev->hard_header_len = dst_len; 1171 else 1172 dev->needed_headroom = dst_len; 1173 1174 if (set_mtu) { 1175 int mtu = rt->dst.dev->mtu - t_hlen; 1176 1177 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1178 mtu -= 8; 1179 if (dev->type == ARPHRD_ETHER) 1180 mtu -= ETH_HLEN; 1181 1182 if (mtu < IPV6_MIN_MTU) 1183 mtu = IPV6_MIN_MTU; 1184 WRITE_ONCE(dev->mtu, mtu); 1185 } 1186 } 1187 ip6_rt_put(rt); 1188 } 1189 } 1190 1191 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) 1192 { 1193 int t_hlen; 1194 1195 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 1196 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 1197 1198 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1199 1200 if (tunnel->dev->header_ops) 1201 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1202 else 1203 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1204 1205 return t_hlen; 1206 } 1207 1208 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) 1209 { 1210 ip6gre_tnl_link_config_common(t); 1211 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t)); 1212 } 1213 1214 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, 1215 const struct __ip6_tnl_parm *p) 1216 { 1217 t->parms.laddr = p->laddr; 1218 t->parms.raddr = p->raddr; 1219 t->parms.flags = p->flags; 1220 t->parms.hop_limit = p->hop_limit; 1221 t->parms.encap_limit = p->encap_limit; 1222 t->parms.flowinfo = p->flowinfo; 1223 t->parms.link = p->link; 1224 t->parms.proto = p->proto; 1225 t->parms.i_key = p->i_key; 1226 t->parms.o_key = p->o_key; 1227 ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags); 1228 ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags); 1229 t->parms.fwmark = p->fwmark; 1230 t->parms.erspan_ver = p->erspan_ver; 1231 t->parms.index = p->index; 1232 t->parms.dir = p->dir; 1233 t->parms.hwid = p->hwid; 1234 dst_cache_reset(&t->dst_cache); 1235 } 1236 1237 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, 1238 int set_mtu) 1239 { 1240 ip6gre_tnl_copy_tnl_parm(t, p); 1241 ip6gre_tnl_link_config(t, set_mtu); 1242 return 0; 1243 } 1244 1245 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p, 1246 const struct ip6_tnl_parm2 *u) 1247 { 1248 p->laddr = u->laddr; 1249 p->raddr = u->raddr; 1250 p->flags = u->flags; 1251 p->hop_limit = u->hop_limit; 1252 p->encap_limit = u->encap_limit; 1253 p->flowinfo = u->flowinfo; 1254 p->link = u->link; 1255 p->i_key = u->i_key; 1256 p->o_key = u->o_key; 1257 gre_flags_to_tnl_flags(p->i_flags, u->i_flags); 1258 gre_flags_to_tnl_flags(p->o_flags, u->o_flags); 1259 memcpy(p->name, u->name, sizeof(u->name)); 1260 } 1261 1262 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u, 1263 const struct __ip6_tnl_parm *p) 1264 { 1265 u->proto = IPPROTO_GRE; 1266 u->laddr = p->laddr; 1267 u->raddr = p->raddr; 1268 u->flags = p->flags; 1269 u->hop_limit = p->hop_limit; 1270 u->encap_limit = p->encap_limit; 1271 u->flowinfo = p->flowinfo; 1272 u->link = p->link; 1273 u->i_key = p->i_key; 1274 u->o_key = p->o_key; 1275 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 1276 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 1277 memcpy(u->name, p->name, sizeof(u->name)); 1278 } 1279 1280 static int ip6gre_tunnel_siocdevprivate(struct net_device *dev, 1281 struct ifreq *ifr, void __user *data, 1282 int cmd) 1283 { 1284 int err = 0; 1285 struct ip6_tnl_parm2 p; 1286 struct __ip6_tnl_parm p1; 1287 struct ip6_tnl *t = netdev_priv(dev); 1288 struct net *net = t->net; 1289 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1290 1291 memset(&p1, 0, sizeof(p1)); 1292 1293 switch (cmd) { 1294 case SIOCGETTUNNEL: 1295 if (dev == ign->fb_tunnel_dev) { 1296 if (copy_from_user(&p, data, sizeof(p))) { 1297 err = -EFAULT; 1298 break; 1299 } 1300 ip6gre_tnl_parm_from_user(&p1, &p); 1301 t = ip6gre_tunnel_locate(net, &p1, 0); 1302 if (!t) 1303 t = netdev_priv(dev); 1304 } 1305 memset(&p, 0, sizeof(p)); 1306 ip6gre_tnl_parm_to_user(&p, &t->parms); 1307 if (copy_to_user(data, &p, sizeof(p))) 1308 err = -EFAULT; 1309 break; 1310 1311 case SIOCADDTUNNEL: 1312 case SIOCCHGTUNNEL: 1313 err = -EPERM; 1314 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1315 goto done; 1316 1317 err = -EFAULT; 1318 if (copy_from_user(&p, data, sizeof(p))) 1319 goto done; 1320 1321 err = -EINVAL; 1322 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)) 1323 goto done; 1324 1325 if (!(p.i_flags&GRE_KEY)) 1326 p.i_key = 0; 1327 if (!(p.o_flags&GRE_KEY)) 1328 p.o_key = 0; 1329 1330 ip6gre_tnl_parm_from_user(&p1, &p); 1331 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL); 1332 1333 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1334 if (t) { 1335 if (t->dev != dev) { 1336 err = -EEXIST; 1337 break; 1338 } 1339 } else { 1340 t = netdev_priv(dev); 1341 1342 ip6gre_tunnel_unlink(ign, t); 1343 synchronize_net(); 1344 ip6gre_tnl_change(t, &p1, 1); 1345 ip6gre_tunnel_link(ign, t); 1346 netdev_state_change(dev); 1347 } 1348 } 1349 1350 if (t) { 1351 err = 0; 1352 1353 memset(&p, 0, sizeof(p)); 1354 ip6gre_tnl_parm_to_user(&p, &t->parms); 1355 if (copy_to_user(data, &p, sizeof(p))) 1356 err = -EFAULT; 1357 } else 1358 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1359 break; 1360 1361 case SIOCDELTUNNEL: 1362 err = -EPERM; 1363 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1364 goto done; 1365 1366 if (dev == ign->fb_tunnel_dev) { 1367 err = -EFAULT; 1368 if (copy_from_user(&p, data, sizeof(p))) 1369 goto done; 1370 err = -ENOENT; 1371 ip6gre_tnl_parm_from_user(&p1, &p); 1372 t = ip6gre_tunnel_locate(net, &p1, 0); 1373 if (!t) 1374 goto done; 1375 err = -EPERM; 1376 if (t == netdev_priv(ign->fb_tunnel_dev)) 1377 goto done; 1378 dev = t->dev; 1379 } 1380 unregister_netdevice(dev); 1381 err = 0; 1382 break; 1383 1384 default: 1385 err = -EINVAL; 1386 } 1387 1388 done: 1389 return err; 1390 } 1391 1392 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, 1393 unsigned short type, const void *daddr, 1394 const void *saddr, unsigned int len) 1395 { 1396 struct ip6_tnl *t = netdev_priv(dev); 1397 struct ipv6hdr *ipv6h; 1398 __be16 *p; 1399 1400 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h)); 1401 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb, 1402 t->fl.u.ip6.flowlabel, 1403 true, &t->fl.u.ip6)); 1404 ipv6h->hop_limit = t->parms.hop_limit; 1405 ipv6h->nexthdr = NEXTHDR_GRE; 1406 ipv6h->saddr = t->parms.laddr; 1407 ipv6h->daddr = t->parms.raddr; 1408 1409 p = (__be16 *)(ipv6h + 1); 1410 p[0] = ip_tunnel_flags_to_be16(t->parms.o_flags); 1411 p[1] = htons(type); 1412 1413 /* 1414 * Set the source hardware address. 1415 */ 1416 1417 if (saddr) 1418 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr)); 1419 if (daddr) 1420 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr)); 1421 if (!ipv6_addr_any(&ipv6h->daddr)) 1422 return t->hlen; 1423 1424 return -t->hlen; 1425 } 1426 1427 static const struct header_ops ip6gre_header_ops = { 1428 .create = ip6gre_header, 1429 }; 1430 1431 static const struct net_device_ops ip6gre_netdev_ops = { 1432 .ndo_init = ip6gre_tunnel_init, 1433 .ndo_uninit = ip6gre_tunnel_uninit, 1434 .ndo_start_xmit = ip6gre_tunnel_xmit, 1435 .ndo_siocdevprivate = ip6gre_tunnel_siocdevprivate, 1436 .ndo_change_mtu = ip6_tnl_change_mtu, 1437 .ndo_get_iflink = ip6_tnl_get_iflink, 1438 }; 1439 1440 static void ip6gre_dev_free(struct net_device *dev) 1441 { 1442 struct ip6_tnl *t = netdev_priv(dev); 1443 1444 gro_cells_destroy(&t->gro_cells); 1445 dst_cache_destroy(&t->dst_cache); 1446 } 1447 1448 static void ip6gre_tunnel_setup(struct net_device *dev) 1449 { 1450 dev->netdev_ops = &ip6gre_netdev_ops; 1451 dev->needs_free_netdev = true; 1452 dev->priv_destructor = ip6gre_dev_free; 1453 1454 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1455 dev->type = ARPHRD_IP6GRE; 1456 1457 dev->flags |= IFF_NOARP; 1458 dev->addr_len = sizeof(struct in6_addr); 1459 netif_keep_dst(dev); 1460 /* This perm addr will be used as interface identifier by IPv6 */ 1461 dev->addr_assign_type = NET_ADDR_RANDOM; 1462 eth_random_addr(dev->perm_addr); 1463 } 1464 1465 #define GRE6_FEATURES (NETIF_F_SG | \ 1466 NETIF_F_FRAGLIST | \ 1467 NETIF_F_HIGHDMA | \ 1468 NETIF_F_HW_CSUM) 1469 1470 static void ip6gre_tnl_init_features(struct net_device *dev) 1471 { 1472 struct ip6_tnl *nt = netdev_priv(dev); 1473 1474 dev->features |= GRE6_FEATURES | NETIF_F_LLTX; 1475 dev->hw_features |= GRE6_FEATURES; 1476 1477 /* TCP offload with GRE SEQ is not supported, nor can we support 2 1478 * levels of outer headers requiring an update. 1479 */ 1480 if (test_bit(IP_TUNNEL_SEQ_BIT, nt->parms.o_flags)) 1481 return; 1482 if (test_bit(IP_TUNNEL_CSUM_BIT, nt->parms.o_flags) && 1483 nt->encap.type != TUNNEL_ENCAP_NONE) 1484 return; 1485 1486 dev->features |= NETIF_F_GSO_SOFTWARE; 1487 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1488 } 1489 1490 static int ip6gre_tunnel_init_common(struct net_device *dev) 1491 { 1492 struct ip6_tnl *tunnel; 1493 int ret; 1494 int t_hlen; 1495 1496 tunnel = netdev_priv(dev); 1497 1498 tunnel->dev = dev; 1499 tunnel->net = dev_net(dev); 1500 strcpy(tunnel->parms.name, dev->name); 1501 1502 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1503 if (ret) 1504 return ret; 1505 1506 ret = gro_cells_init(&tunnel->gro_cells, dev); 1507 if (ret) 1508 goto cleanup_dst_cache_init; 1509 1510 t_hlen = ip6gre_calc_hlen(tunnel); 1511 dev->mtu = ETH_DATA_LEN - t_hlen; 1512 if (dev->type == ARPHRD_ETHER) 1513 dev->mtu -= ETH_HLEN; 1514 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1515 dev->mtu -= 8; 1516 1517 if (tunnel->parms.collect_md) { 1518 netif_keep_dst(dev); 1519 } 1520 ip6gre_tnl_init_features(dev); 1521 1522 netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL); 1523 netdev_lockdep_set_classes(dev); 1524 return 0; 1525 1526 cleanup_dst_cache_init: 1527 dst_cache_destroy(&tunnel->dst_cache); 1528 return ret; 1529 } 1530 1531 static int ip6gre_tunnel_init(struct net_device *dev) 1532 { 1533 struct ip6_tnl *tunnel; 1534 int ret; 1535 1536 ret = ip6gre_tunnel_init_common(dev); 1537 if (ret) 1538 return ret; 1539 1540 tunnel = netdev_priv(dev); 1541 1542 if (tunnel->parms.collect_md) 1543 return 0; 1544 1545 __dev_addr_set(dev, &tunnel->parms.laddr, sizeof(struct in6_addr)); 1546 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); 1547 1548 if (ipv6_addr_any(&tunnel->parms.raddr)) 1549 dev->header_ops = &ip6gre_header_ops; 1550 1551 return 0; 1552 } 1553 1554 static void ip6gre_fb_tunnel_init(struct net_device *dev) 1555 { 1556 struct ip6_tnl *tunnel = netdev_priv(dev); 1557 1558 tunnel->dev = dev; 1559 tunnel->net = dev_net(dev); 1560 strcpy(tunnel->parms.name, dev->name); 1561 1562 tunnel->hlen = sizeof(struct ipv6hdr) + 4; 1563 } 1564 1565 static struct inet6_protocol ip6gre_protocol __read_mostly = { 1566 .handler = gre_rcv, 1567 .err_handler = ip6gre_err, 1568 .flags = INET6_PROTO_FINAL, 1569 }; 1570 1571 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) 1572 { 1573 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1574 struct net_device *dev, *aux; 1575 int prio; 1576 1577 for_each_netdev_safe(net, dev, aux) 1578 if (dev->rtnl_link_ops == &ip6gre_link_ops || 1579 dev->rtnl_link_ops == &ip6gre_tap_ops || 1580 dev->rtnl_link_ops == &ip6erspan_tap_ops) 1581 unregister_netdevice_queue(dev, head); 1582 1583 for (prio = 0; prio < 4; prio++) { 1584 int h; 1585 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) { 1586 struct ip6_tnl *t; 1587 1588 t = rtnl_dereference(ign->tunnels[prio][h]); 1589 1590 while (t) { 1591 /* If dev is in the same netns, it has already 1592 * been added to the list by the previous loop. 1593 */ 1594 if (!net_eq(dev_net(t->dev), net)) 1595 unregister_netdevice_queue(t->dev, 1596 head); 1597 t = rtnl_dereference(t->next); 1598 } 1599 } 1600 } 1601 } 1602 1603 static int __net_init ip6gre_init_net(struct net *net) 1604 { 1605 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1606 struct net_device *ndev; 1607 int err; 1608 1609 if (!net_has_fallback_tunnels(net)) 1610 return 0; 1611 ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", 1612 NET_NAME_UNKNOWN, ip6gre_tunnel_setup); 1613 if (!ndev) { 1614 err = -ENOMEM; 1615 goto err_alloc_dev; 1616 } 1617 ign->fb_tunnel_dev = ndev; 1618 dev_net_set(ign->fb_tunnel_dev, net); 1619 /* FB netdevice is special: we have one, and only one per netns. 1620 * Allowing to move it to another netns is clearly unsafe. 1621 */ 1622 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 1623 1624 1625 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1626 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1627 1628 err = register_netdev(ign->fb_tunnel_dev); 1629 if (err) 1630 goto err_reg_dev; 1631 1632 rcu_assign_pointer(ign->tunnels_wc[0], 1633 netdev_priv(ign->fb_tunnel_dev)); 1634 return 0; 1635 1636 err_reg_dev: 1637 free_netdev(ndev); 1638 err_alloc_dev: 1639 return err; 1640 } 1641 1642 static void __net_exit ip6gre_exit_batch_rtnl(struct list_head *net_list, 1643 struct list_head *dev_to_kill) 1644 { 1645 struct net *net; 1646 1647 ASSERT_RTNL(); 1648 list_for_each_entry(net, net_list, exit_list) 1649 ip6gre_destroy_tunnels(net, dev_to_kill); 1650 } 1651 1652 static struct pernet_operations ip6gre_net_ops = { 1653 .init = ip6gre_init_net, 1654 .exit_batch_rtnl = ip6gre_exit_batch_rtnl, 1655 .id = &ip6gre_net_id, 1656 .size = sizeof(struct ip6gre_net), 1657 }; 1658 1659 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 1660 struct netlink_ext_ack *extack) 1661 { 1662 __be16 flags; 1663 1664 if (!data) 1665 return 0; 1666 1667 flags = 0; 1668 if (data[IFLA_GRE_IFLAGS]) 1669 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1670 if (data[IFLA_GRE_OFLAGS]) 1671 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1672 if (flags & (GRE_VERSION|GRE_ROUTING)) 1673 return -EINVAL; 1674 1675 return 0; 1676 } 1677 1678 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1679 struct netlink_ext_ack *extack) 1680 { 1681 struct in6_addr daddr; 1682 1683 if (tb[IFLA_ADDRESS]) { 1684 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1685 return -EINVAL; 1686 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1687 return -EADDRNOTAVAIL; 1688 } 1689 1690 if (!data) 1691 goto out; 1692 1693 if (data[IFLA_GRE_REMOTE]) { 1694 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1695 if (ipv6_addr_any(&daddr)) 1696 return -EINVAL; 1697 } 1698 1699 out: 1700 return ip6gre_tunnel_validate(tb, data, extack); 1701 } 1702 1703 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1704 struct netlink_ext_ack *extack) 1705 { 1706 __be16 flags = 0; 1707 int ret, ver = 0; 1708 1709 if (!data) 1710 return 0; 1711 1712 ret = ip6gre_tap_validate(tb, data, extack); 1713 if (ret) 1714 return ret; 1715 1716 /* ERSPAN should only have GRE sequence and key flag */ 1717 if (data[IFLA_GRE_OFLAGS]) 1718 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1719 if (data[IFLA_GRE_IFLAGS]) 1720 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1721 if (!data[IFLA_GRE_COLLECT_METADATA] && 1722 flags != (GRE_SEQ | GRE_KEY)) 1723 return -EINVAL; 1724 1725 /* ERSPAN Session ID only has 10-bit. Since we reuse 1726 * 32-bit key field as ID, check it's range. 1727 */ 1728 if (data[IFLA_GRE_IKEY] && 1729 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) 1730 return -EINVAL; 1731 1732 if (data[IFLA_GRE_OKEY] && 1733 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) 1734 return -EINVAL; 1735 1736 if (data[IFLA_GRE_ERSPAN_VER]) { 1737 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1738 if (ver != 1 && ver != 2) 1739 return -EINVAL; 1740 } 1741 1742 if (ver == 1) { 1743 if (data[IFLA_GRE_ERSPAN_INDEX]) { 1744 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1745 1746 if (index & ~INDEX_MASK) 1747 return -EINVAL; 1748 } 1749 } else if (ver == 2) { 1750 if (data[IFLA_GRE_ERSPAN_DIR]) { 1751 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1752 1753 if (dir & ~(DIR_MASK >> DIR_OFFSET)) 1754 return -EINVAL; 1755 } 1756 1757 if (data[IFLA_GRE_ERSPAN_HWID]) { 1758 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1759 1760 if (hwid & ~(HWID_MASK >> HWID_OFFSET)) 1761 return -EINVAL; 1762 } 1763 } 1764 1765 return 0; 1766 } 1767 1768 static void ip6erspan_set_version(struct nlattr *data[], 1769 struct __ip6_tnl_parm *parms) 1770 { 1771 if (!data) 1772 return; 1773 1774 parms->erspan_ver = 1; 1775 if (data[IFLA_GRE_ERSPAN_VER]) 1776 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1777 1778 if (parms->erspan_ver == 1) { 1779 if (data[IFLA_GRE_ERSPAN_INDEX]) 1780 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1781 } else if (parms->erspan_ver == 2) { 1782 if (data[IFLA_GRE_ERSPAN_DIR]) 1783 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1784 if (data[IFLA_GRE_ERSPAN_HWID]) 1785 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1786 } 1787 } 1788 1789 static void ip6gre_netlink_parms(struct nlattr *data[], 1790 struct __ip6_tnl_parm *parms) 1791 { 1792 memset(parms, 0, sizeof(*parms)); 1793 1794 if (!data) 1795 return; 1796 1797 if (data[IFLA_GRE_LINK]) 1798 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1799 1800 if (data[IFLA_GRE_IFLAGS]) 1801 gre_flags_to_tnl_flags(parms->i_flags, 1802 nla_get_be16(data[IFLA_GRE_IFLAGS])); 1803 1804 if (data[IFLA_GRE_OFLAGS]) 1805 gre_flags_to_tnl_flags(parms->o_flags, 1806 nla_get_be16(data[IFLA_GRE_OFLAGS])); 1807 1808 if (data[IFLA_GRE_IKEY]) 1809 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1810 1811 if (data[IFLA_GRE_OKEY]) 1812 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1813 1814 if (data[IFLA_GRE_LOCAL]) 1815 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]); 1816 1817 if (data[IFLA_GRE_REMOTE]) 1818 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1819 1820 if (data[IFLA_GRE_TTL]) 1821 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]); 1822 1823 if (data[IFLA_GRE_ENCAP_LIMIT]) 1824 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]); 1825 1826 if (data[IFLA_GRE_FLOWINFO]) 1827 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]); 1828 1829 if (data[IFLA_GRE_FLAGS]) 1830 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]); 1831 1832 if (data[IFLA_GRE_FWMARK]) 1833 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); 1834 1835 if (data[IFLA_GRE_COLLECT_METADATA]) 1836 parms->collect_md = true; 1837 } 1838 1839 static int ip6gre_tap_init(struct net_device *dev) 1840 { 1841 int ret; 1842 1843 ret = ip6gre_tunnel_init_common(dev); 1844 if (ret) 1845 return ret; 1846 1847 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1848 1849 return 0; 1850 } 1851 1852 static const struct net_device_ops ip6gre_tap_netdev_ops = { 1853 .ndo_init = ip6gre_tap_init, 1854 .ndo_uninit = ip6gre_tunnel_uninit, 1855 .ndo_start_xmit = ip6gre_tunnel_xmit, 1856 .ndo_set_mac_address = eth_mac_addr, 1857 .ndo_validate_addr = eth_validate_addr, 1858 .ndo_change_mtu = ip6_tnl_change_mtu, 1859 .ndo_get_iflink = ip6_tnl_get_iflink, 1860 }; 1861 1862 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel) 1863 { 1864 int t_hlen; 1865 1866 tunnel->tun_hlen = 8; 1867 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + 1868 erspan_hdr_len(tunnel->parms.erspan_ver); 1869 1870 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1871 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1872 return t_hlen; 1873 } 1874 1875 static int ip6erspan_tap_init(struct net_device *dev) 1876 { 1877 struct ip6_tnl *tunnel; 1878 int t_hlen; 1879 int ret; 1880 1881 tunnel = netdev_priv(dev); 1882 1883 tunnel->dev = dev; 1884 tunnel->net = dev_net(dev); 1885 strcpy(tunnel->parms.name, dev->name); 1886 1887 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1888 if (ret) 1889 return ret; 1890 1891 ret = gro_cells_init(&tunnel->gro_cells, dev); 1892 if (ret) 1893 goto cleanup_dst_cache_init; 1894 1895 t_hlen = ip6erspan_calc_hlen(tunnel); 1896 dev->mtu = ETH_DATA_LEN - t_hlen; 1897 if (dev->type == ARPHRD_ETHER) 1898 dev->mtu -= ETH_HLEN; 1899 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1900 dev->mtu -= 8; 1901 1902 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1903 ip6erspan_tnl_link_config(tunnel, 1); 1904 1905 netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL); 1906 netdev_lockdep_set_classes(dev); 1907 return 0; 1908 1909 cleanup_dst_cache_init: 1910 dst_cache_destroy(&tunnel->dst_cache); 1911 return ret; 1912 } 1913 1914 static const struct net_device_ops ip6erspan_netdev_ops = { 1915 .ndo_init = ip6erspan_tap_init, 1916 .ndo_uninit = ip6erspan_tunnel_uninit, 1917 .ndo_start_xmit = ip6erspan_tunnel_xmit, 1918 .ndo_set_mac_address = eth_mac_addr, 1919 .ndo_validate_addr = eth_validate_addr, 1920 .ndo_change_mtu = ip6_tnl_change_mtu, 1921 .ndo_get_iflink = ip6_tnl_get_iflink, 1922 }; 1923 1924 static void ip6gre_tap_setup(struct net_device *dev) 1925 { 1926 1927 ether_setup(dev); 1928 1929 dev->max_mtu = 0; 1930 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1931 dev->needs_free_netdev = true; 1932 dev->priv_destructor = ip6gre_dev_free; 1933 1934 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1935 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1936 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1937 netif_keep_dst(dev); 1938 } 1939 1940 static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1941 struct ip_tunnel_encap *ipencap) 1942 { 1943 bool ret = false; 1944 1945 memset(ipencap, 0, sizeof(*ipencap)); 1946 1947 if (!data) 1948 return ret; 1949 1950 if (data[IFLA_GRE_ENCAP_TYPE]) { 1951 ret = true; 1952 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1953 } 1954 1955 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1956 ret = true; 1957 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1958 } 1959 1960 if (data[IFLA_GRE_ENCAP_SPORT]) { 1961 ret = true; 1962 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1963 } 1964 1965 if (data[IFLA_GRE_ENCAP_DPORT]) { 1966 ret = true; 1967 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1968 } 1969 1970 return ret; 1971 } 1972 1973 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, 1974 struct nlattr *tb[], struct nlattr *data[], 1975 struct netlink_ext_ack *extack) 1976 { 1977 struct ip6_tnl *nt; 1978 struct ip_tunnel_encap ipencap; 1979 int err; 1980 1981 nt = netdev_priv(dev); 1982 1983 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 1984 int err = ip6_tnl_encap_setup(nt, &ipencap); 1985 1986 if (err < 0) 1987 return err; 1988 } 1989 1990 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1991 eth_hw_addr_random(dev); 1992 1993 nt->dev = dev; 1994 nt->net = dev_net(dev); 1995 1996 err = register_netdevice(dev); 1997 if (err) 1998 goto out; 1999 2000 if (tb[IFLA_MTU]) 2001 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2002 2003 out: 2004 return err; 2005 } 2006 2007 static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 2008 struct nlattr *tb[], struct nlattr *data[], 2009 struct netlink_ext_ack *extack) 2010 { 2011 struct ip6_tnl *nt = netdev_priv(dev); 2012 struct net *net = dev_net(dev); 2013 struct ip6gre_net *ign; 2014 int err; 2015 2016 ip6gre_netlink_parms(data, &nt->parms); 2017 ign = net_generic(net, ip6gre_net_id); 2018 2019 if (nt->parms.collect_md) { 2020 if (rtnl_dereference(ign->collect_md_tun)) 2021 return -EEXIST; 2022 } else { 2023 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 2024 return -EEXIST; 2025 } 2026 2027 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 2028 if (!err) { 2029 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); 2030 ip6gre_tunnel_link_md(ign, nt); 2031 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2032 } 2033 return err; 2034 } 2035 2036 static struct ip6_tnl * 2037 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[], 2038 struct nlattr *data[], struct __ip6_tnl_parm *p_p, 2039 struct netlink_ext_ack *extack) 2040 { 2041 struct ip6_tnl *t, *nt = netdev_priv(dev); 2042 struct net *net = nt->net; 2043 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2044 struct ip_tunnel_encap ipencap; 2045 2046 if (dev == ign->fb_tunnel_dev) 2047 return ERR_PTR(-EINVAL); 2048 2049 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 2050 int err = ip6_tnl_encap_setup(nt, &ipencap); 2051 2052 if (err < 0) 2053 return ERR_PTR(err); 2054 } 2055 2056 ip6gre_netlink_parms(data, p_p); 2057 2058 t = ip6gre_tunnel_locate(net, p_p, 0); 2059 2060 if (t) { 2061 if (t->dev != dev) 2062 return ERR_PTR(-EEXIST); 2063 } else { 2064 t = nt; 2065 } 2066 2067 return t; 2068 } 2069 2070 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 2071 struct nlattr *data[], 2072 struct netlink_ext_ack *extack) 2073 { 2074 struct ip6_tnl *t = netdev_priv(dev); 2075 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 2076 struct __ip6_tnl_parm p; 2077 2078 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2079 if (IS_ERR(t)) 2080 return PTR_ERR(t); 2081 2082 ip6gre_tunnel_unlink_md(ign, t); 2083 ip6gre_tunnel_unlink(ign, t); 2084 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 2085 ip6gre_tunnel_link_md(ign, t); 2086 ip6gre_tunnel_link(ign, t); 2087 return 0; 2088 } 2089 2090 static void ip6gre_dellink(struct net_device *dev, struct list_head *head) 2091 { 2092 struct net *net = dev_net(dev); 2093 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2094 2095 if (dev != ign->fb_tunnel_dev) 2096 unregister_netdevice_queue(dev, head); 2097 } 2098 2099 static size_t ip6gre_get_size(const struct net_device *dev) 2100 { 2101 return 2102 /* IFLA_GRE_LINK */ 2103 nla_total_size(4) + 2104 /* IFLA_GRE_IFLAGS */ 2105 nla_total_size(2) + 2106 /* IFLA_GRE_OFLAGS */ 2107 nla_total_size(2) + 2108 /* IFLA_GRE_IKEY */ 2109 nla_total_size(4) + 2110 /* IFLA_GRE_OKEY */ 2111 nla_total_size(4) + 2112 /* IFLA_GRE_LOCAL */ 2113 nla_total_size(sizeof(struct in6_addr)) + 2114 /* IFLA_GRE_REMOTE */ 2115 nla_total_size(sizeof(struct in6_addr)) + 2116 /* IFLA_GRE_TTL */ 2117 nla_total_size(1) + 2118 /* IFLA_GRE_ENCAP_LIMIT */ 2119 nla_total_size(1) + 2120 /* IFLA_GRE_FLOWINFO */ 2121 nla_total_size(4) + 2122 /* IFLA_GRE_FLAGS */ 2123 nla_total_size(4) + 2124 /* IFLA_GRE_ENCAP_TYPE */ 2125 nla_total_size(2) + 2126 /* IFLA_GRE_ENCAP_FLAGS */ 2127 nla_total_size(2) + 2128 /* IFLA_GRE_ENCAP_SPORT */ 2129 nla_total_size(2) + 2130 /* IFLA_GRE_ENCAP_DPORT */ 2131 nla_total_size(2) + 2132 /* IFLA_GRE_COLLECT_METADATA */ 2133 nla_total_size(0) + 2134 /* IFLA_GRE_FWMARK */ 2135 nla_total_size(4) + 2136 /* IFLA_GRE_ERSPAN_INDEX */ 2137 nla_total_size(4) + 2138 0; 2139 } 2140 2141 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) 2142 { 2143 struct ip6_tnl *t = netdev_priv(dev); 2144 struct __ip6_tnl_parm *p = &t->parms; 2145 IP_TUNNEL_DECLARE_FLAGS(o_flags); 2146 2147 ip_tunnel_flags_copy(o_flags, p->o_flags); 2148 2149 if (p->erspan_ver == 1 || p->erspan_ver == 2) { 2150 if (!p->collect_md) 2151 __set_bit(IP_TUNNEL_KEY_BIT, o_flags); 2152 2153 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) 2154 goto nla_put_failure; 2155 2156 if (p->erspan_ver == 1) { 2157 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) 2158 goto nla_put_failure; 2159 } else { 2160 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) 2161 goto nla_put_failure; 2162 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) 2163 goto nla_put_failure; 2164 } 2165 } 2166 2167 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2168 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2169 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2170 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2171 gre_tnl_flags_to_gre_flags(o_flags)) || 2172 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2173 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2174 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2175 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) || 2176 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || 2177 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2178 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2179 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2180 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark)) 2181 goto nla_put_failure; 2182 2183 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2184 t->encap.type) || 2185 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 2186 t->encap.sport) || 2187 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 2188 t->encap.dport) || 2189 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 2190 t->encap.flags)) 2191 goto nla_put_failure; 2192 2193 if (p->collect_md) { 2194 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 2195 goto nla_put_failure; 2196 } 2197 2198 return 0; 2199 2200 nla_put_failure: 2201 return -EMSGSIZE; 2202 } 2203 2204 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { 2205 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 2206 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 2207 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 2208 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 2209 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 2210 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) }, 2211 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) }, 2212 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 2213 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 }, 2214 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 }, 2215 [IFLA_GRE_FLAGS] = { .type = NLA_U32 }, 2216 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 2217 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 2218 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 2219 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 2220 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 2221 [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, 2222 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, 2223 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 }, 2224 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 }, 2225 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 }, 2226 }; 2227 2228 static void ip6erspan_tap_setup(struct net_device *dev) 2229 { 2230 ether_setup(dev); 2231 2232 dev->max_mtu = 0; 2233 dev->netdev_ops = &ip6erspan_netdev_ops; 2234 dev->needs_free_netdev = true; 2235 dev->priv_destructor = ip6gre_dev_free; 2236 2237 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 2238 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2239 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2240 netif_keep_dst(dev); 2241 } 2242 2243 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, 2244 struct nlattr *tb[], struct nlattr *data[], 2245 struct netlink_ext_ack *extack) 2246 { 2247 struct ip6_tnl *nt = netdev_priv(dev); 2248 struct net *net = dev_net(dev); 2249 struct ip6gre_net *ign; 2250 int err; 2251 2252 ip6gre_netlink_parms(data, &nt->parms); 2253 ip6erspan_set_version(data, &nt->parms); 2254 ign = net_generic(net, ip6gre_net_id); 2255 2256 if (nt->parms.collect_md) { 2257 if (rtnl_dereference(ign->collect_md_tun_erspan)) 2258 return -EEXIST; 2259 } else { 2260 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 2261 return -EEXIST; 2262 } 2263 2264 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 2265 if (!err) { 2266 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]); 2267 ip6erspan_tunnel_link_md(ign, nt); 2268 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2269 } 2270 return err; 2271 } 2272 2273 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu) 2274 { 2275 ip6gre_tnl_link_config_common(t); 2276 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t)); 2277 } 2278 2279 static int ip6erspan_tnl_change(struct ip6_tnl *t, 2280 const struct __ip6_tnl_parm *p, int set_mtu) 2281 { 2282 ip6gre_tnl_copy_tnl_parm(t, p); 2283 ip6erspan_tnl_link_config(t, set_mtu); 2284 return 0; 2285 } 2286 2287 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], 2288 struct nlattr *data[], 2289 struct netlink_ext_ack *extack) 2290 { 2291 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2292 struct __ip6_tnl_parm p; 2293 struct ip6_tnl *t; 2294 2295 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2296 if (IS_ERR(t)) 2297 return PTR_ERR(t); 2298 2299 ip6erspan_set_version(data, &p); 2300 ip6gre_tunnel_unlink_md(ign, t); 2301 ip6gre_tunnel_unlink(ign, t); 2302 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); 2303 ip6erspan_tunnel_link_md(ign, t); 2304 ip6gre_tunnel_link(ign, t); 2305 return 0; 2306 } 2307 2308 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { 2309 .kind = "ip6gre", 2310 .maxtype = IFLA_GRE_MAX, 2311 .policy = ip6gre_policy, 2312 .priv_size = sizeof(struct ip6_tnl), 2313 .setup = ip6gre_tunnel_setup, 2314 .validate = ip6gre_tunnel_validate, 2315 .newlink = ip6gre_newlink, 2316 .changelink = ip6gre_changelink, 2317 .dellink = ip6gre_dellink, 2318 .get_size = ip6gre_get_size, 2319 .fill_info = ip6gre_fill_info, 2320 .get_link_net = ip6_tnl_get_link_net, 2321 }; 2322 2323 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = { 2324 .kind = "ip6gretap", 2325 .maxtype = IFLA_GRE_MAX, 2326 .policy = ip6gre_policy, 2327 .priv_size = sizeof(struct ip6_tnl), 2328 .setup = ip6gre_tap_setup, 2329 .validate = ip6gre_tap_validate, 2330 .newlink = ip6gre_newlink, 2331 .changelink = ip6gre_changelink, 2332 .get_size = ip6gre_get_size, 2333 .fill_info = ip6gre_fill_info, 2334 .get_link_net = ip6_tnl_get_link_net, 2335 }; 2336 2337 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = { 2338 .kind = "ip6erspan", 2339 .maxtype = IFLA_GRE_MAX, 2340 .policy = ip6gre_policy, 2341 .priv_size = sizeof(struct ip6_tnl), 2342 .setup = ip6erspan_tap_setup, 2343 .validate = ip6erspan_tap_validate, 2344 .newlink = ip6erspan_newlink, 2345 .changelink = ip6erspan_changelink, 2346 .get_size = ip6gre_get_size, 2347 .fill_info = ip6gre_fill_info, 2348 .get_link_net = ip6_tnl_get_link_net, 2349 }; 2350 2351 /* 2352 * And now the modules code and kernel interface. 2353 */ 2354 2355 static int __init ip6gre_init(void) 2356 { 2357 int err; 2358 2359 pr_info("GRE over IPv6 tunneling driver\n"); 2360 2361 err = register_pernet_device(&ip6gre_net_ops); 2362 if (err < 0) 2363 return err; 2364 2365 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE); 2366 if (err < 0) { 2367 pr_info("%s: can't add protocol\n", __func__); 2368 goto add_proto_failed; 2369 } 2370 2371 err = rtnl_link_register(&ip6gre_link_ops); 2372 if (err < 0) 2373 goto rtnl_link_failed; 2374 2375 err = rtnl_link_register(&ip6gre_tap_ops); 2376 if (err < 0) 2377 goto tap_ops_failed; 2378 2379 err = rtnl_link_register(&ip6erspan_tap_ops); 2380 if (err < 0) 2381 goto erspan_link_failed; 2382 2383 out: 2384 return err; 2385 2386 erspan_link_failed: 2387 rtnl_link_unregister(&ip6gre_tap_ops); 2388 tap_ops_failed: 2389 rtnl_link_unregister(&ip6gre_link_ops); 2390 rtnl_link_failed: 2391 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2392 add_proto_failed: 2393 unregister_pernet_device(&ip6gre_net_ops); 2394 goto out; 2395 } 2396 2397 static void __exit ip6gre_fini(void) 2398 { 2399 rtnl_link_unregister(&ip6gre_tap_ops); 2400 rtnl_link_unregister(&ip6gre_link_ops); 2401 rtnl_link_unregister(&ip6erspan_tap_ops); 2402 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2403 unregister_pernet_device(&ip6gre_net_ops); 2404 } 2405 2406 module_init(ip6gre_init); 2407 module_exit(ip6gre_fini); 2408 MODULE_LICENSE("GPL"); 2409 MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>"); 2410 MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); 2411 MODULE_ALIAS_RTNL_LINK("ip6gre"); 2412 MODULE_ALIAS_RTNL_LINK("ip6gretap"); 2413 MODULE_ALIAS_RTNL_LINK("ip6erspan"); 2414 MODULE_ALIAS_NETDEV("ip6gre0"); 2415