1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * GRE over IPv6 protocol decoder. 4 * 5 * Authors: Dmitry Kozlov (xeb@mail.ru) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/capability.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/uaccess.h> 16 #include <linux/skbuff.h> 17 #include <linux/netdevice.h> 18 #include <linux/in.h> 19 #include <linux/tcp.h> 20 #include <linux/udp.h> 21 #include <linux/if_arp.h> 22 #include <linux/init.h> 23 #include <linux/in6.h> 24 #include <linux/inetdevice.h> 25 #include <linux/igmp.h> 26 #include <linux/netfilter_ipv4.h> 27 #include <linux/etherdevice.h> 28 #include <linux/if_ether.h> 29 #include <linux/hash.h> 30 #include <linux/if_tunnel.h> 31 #include <linux/ip6_tunnel.h> 32 33 #include <net/sock.h> 34 #include <net/ip.h> 35 #include <net/ip_tunnels.h> 36 #include <net/icmp.h> 37 #include <net/protocol.h> 38 #include <net/addrconf.h> 39 #include <net/arp.h> 40 #include <net/checksum.h> 41 #include <net/dsfield.h> 42 #include <net/inet_ecn.h> 43 #include <net/xfrm.h> 44 #include <net/net_namespace.h> 45 #include <net/netns/generic.h> 46 #include <net/netdev_lock.h> 47 #include <net/rtnetlink.h> 48 49 #include <net/ipv6.h> 50 #include <net/ip6_fib.h> 51 #include <net/ip6_route.h> 52 #include <net/ip6_tunnel.h> 53 #include <net/gre.h> 54 #include <net/erspan.h> 55 #include <net/dst_metadata.h> 56 57 58 static bool log_ecn_error = true; 59 module_param(log_ecn_error, bool, 0644); 60 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 61 62 #define IP6_GRE_HASH_SIZE_SHIFT 5 63 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT) 64 65 static unsigned int ip6gre_net_id __read_mostly; 66 struct ip6gre_net { 67 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; 68 69 struct ip6_tnl __rcu *collect_md_tun; 70 struct ip6_tnl __rcu *collect_md_tun_erspan; 71 struct net_device *fb_tunnel_dev; 72 }; 73 74 static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 75 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly; 76 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly; 77 static int ip6gre_tunnel_init(struct net_device *dev); 78 static void ip6gre_tunnel_setup(struct net_device *dev); 79 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 80 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); 81 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu); 82 83 /* Tunnel hash table */ 84 85 /* 86 4 hash tables: 87 88 3: (remote,local) 89 2: (remote,*) 90 1: (*,local) 91 0: (*,*) 92 93 We require exact key match i.e. if a key is present in packet 94 it will match only tunnel with the same key; if it is not present, 95 it will match only keyless tunnel. 96 97 All keysless packets, if not matched configured keyless tunnels 98 will match fallback tunnel. 99 */ 100 101 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1)) 102 static u32 HASH_ADDR(const struct in6_addr *addr) 103 { 104 u32 hash = ipv6_addr_hash(addr); 105 106 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT); 107 } 108 109 #define tunnels_r_l tunnels[3] 110 #define tunnels_r tunnels[2] 111 #define tunnels_l tunnels[1] 112 #define tunnels_wc tunnels[0] 113 114 static bool ip6gre_tunnel_match(struct ip6_tnl *t, int dev_type, int link, 115 int *cand_score, struct ip6_tnl **ret) 116 { 117 int score = 0; 118 119 if (t->dev->type != ARPHRD_IP6GRE && 120 t->dev->type != dev_type) 121 return false; 122 123 if (t->parms.link != link) 124 score |= 1; 125 if (t->dev->type != dev_type) 126 score |= 2; 127 if (score == 0) { 128 *ret = t; 129 return true; 130 } 131 132 if (score < *cand_score) { 133 *ret = t; 134 *cand_score = score; 135 } 136 return false; 137 } 138 139 /* Given src, dst and key, find appropriate for input tunnel. */ 140 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, 141 const struct in6_addr *remote, const struct in6_addr *local, 142 __be32 key, __be16 gre_proto) 143 { 144 struct net *net = dev_net(dev); 145 int link = dev->ifindex; 146 unsigned int h0 = HASH_ADDR(remote); 147 unsigned int h1 = HASH_KEY(key); 148 struct ip6_tnl *t, *cand = NULL; 149 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 150 int dev_type = (gre_proto == htons(ETH_P_TEB) || 151 gre_proto == htons(ETH_P_ERSPAN) || 152 gre_proto == htons(ETH_P_ERSPAN2)) ? 153 ARPHRD_ETHER : ARPHRD_IP6GRE; 154 struct net_device *ndev; 155 int cand_score = 4; 156 157 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { 158 if (!ipv6_addr_equal(local, &t->parms.laddr) || 159 !ipv6_addr_equal(remote, &t->parms.raddr) || 160 key != t->parms.i_key || 161 !(t->dev->flags & IFF_UP)) 162 continue; 163 164 if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand)) 165 return cand; 166 } 167 168 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { 169 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 170 key != t->parms.i_key || 171 !(t->dev->flags & IFF_UP)) 172 continue; 173 174 if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand)) 175 return cand; 176 } 177 178 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { 179 if ((!ipv6_addr_equal(local, &t->parms.laddr) && 180 (!ipv6_addr_equal(local, &t->parms.raddr) || 181 !ipv6_addr_is_multicast(local))) || 182 key != t->parms.i_key || 183 !(t->dev->flags & IFF_UP)) 184 continue; 185 186 if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand)) 187 return cand; 188 } 189 190 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { 191 if (t->parms.i_key != key || 192 !(t->dev->flags & IFF_UP)) 193 continue; 194 195 if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand)) 196 return cand; 197 } 198 199 if (cand) 200 return cand; 201 202 if (gre_proto == htons(ETH_P_ERSPAN) || 203 gre_proto == htons(ETH_P_ERSPAN2)) 204 t = rcu_dereference(ign->collect_md_tun_erspan); 205 else 206 t = rcu_dereference(ign->collect_md_tun); 207 208 if (t && t->dev->flags & IFF_UP) 209 return t; 210 211 ndev = READ_ONCE(ign->fb_tunnel_dev); 212 if (ndev && ndev->flags & IFF_UP) 213 return netdev_priv(ndev); 214 215 return NULL; 216 } 217 218 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign, 219 const struct __ip6_tnl_parm *p) 220 { 221 const struct in6_addr *remote = &p->raddr; 222 const struct in6_addr *local = &p->laddr; 223 unsigned int h = HASH_KEY(p->i_key); 224 int prio = 0; 225 226 if (!ipv6_addr_any(local)) 227 prio |= 1; 228 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) { 229 prio |= 2; 230 h ^= HASH_ADDR(remote); 231 } 232 233 return &ign->tunnels[prio][h]; 234 } 235 236 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 237 { 238 if (t->parms.collect_md) 239 rcu_assign_pointer(ign->collect_md_tun, t); 240 } 241 242 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 243 { 244 if (t->parms.collect_md) 245 rcu_assign_pointer(ign->collect_md_tun_erspan, t); 246 } 247 248 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t) 249 { 250 if (t->parms.collect_md) 251 rcu_assign_pointer(ign->collect_md_tun, NULL); 252 } 253 254 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign, 255 struct ip6_tnl *t) 256 { 257 if (t->parms.collect_md) 258 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL); 259 } 260 261 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, 262 const struct ip6_tnl *t) 263 { 264 return __ip6gre_bucket(ign, &t->parms); 265 } 266 267 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t) 268 { 269 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); 270 271 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 272 rcu_assign_pointer(*tp, t); 273 } 274 275 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t) 276 { 277 struct ip6_tnl __rcu **tp; 278 struct ip6_tnl *iter; 279 280 for (tp = ip6gre_bucket(ign, t); 281 (iter = rtnl_dereference(*tp)) != NULL; 282 tp = &iter->next) { 283 if (t == iter) { 284 rcu_assign_pointer(*tp, t->next); 285 break; 286 } 287 } 288 } 289 290 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net, 291 const struct __ip6_tnl_parm *parms, 292 int type) 293 { 294 const struct in6_addr *remote = &parms->raddr; 295 const struct in6_addr *local = &parms->laddr; 296 __be32 key = parms->i_key; 297 int link = parms->link; 298 struct ip6_tnl *t; 299 struct ip6_tnl __rcu **tp; 300 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 301 302 for (tp = __ip6gre_bucket(ign, parms); 303 (t = rtnl_dereference(*tp)) != NULL; 304 tp = &t->next) 305 if (ipv6_addr_equal(local, &t->parms.laddr) && 306 ipv6_addr_equal(remote, &t->parms.raddr) && 307 key == t->parms.i_key && 308 link == t->parms.link && 309 type == t->dev->type) 310 break; 311 312 return t; 313 } 314 315 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, 316 const struct __ip6_tnl_parm *parms, int create) 317 { 318 struct ip6_tnl *t, *nt; 319 struct net_device *dev; 320 char name[IFNAMSIZ]; 321 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 322 323 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); 324 if (t && create) 325 return NULL; 326 if (t || !create) 327 return t; 328 329 if (parms->name[0]) { 330 if (!dev_valid_name(parms->name)) 331 return NULL; 332 strscpy(name, parms->name); 333 } else { 334 strscpy(name, "ip6gre%d"); 335 } 336 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 337 ip6gre_tunnel_setup); 338 if (!dev) 339 return NULL; 340 341 dev_net_set(dev, net); 342 343 nt = netdev_priv(dev); 344 nt->parms = *parms; 345 dev->rtnl_link_ops = &ip6gre_link_ops; 346 347 nt->dev = dev; 348 nt->net = dev_net(dev); 349 350 if (register_netdevice(dev) < 0) 351 goto failed_free; 352 353 ip6gre_tnl_link_config(nt, 1); 354 ip6gre_tunnel_link(ign, nt); 355 return nt; 356 357 failed_free: 358 free_netdev(dev); 359 return NULL; 360 } 361 362 static void ip6erspan_tunnel_uninit(struct net_device *dev) 363 { 364 struct ip6_tnl *t = netdev_priv(dev); 365 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 366 367 ip6erspan_tunnel_unlink_md(ign, t); 368 ip6gre_tunnel_unlink(ign, t); 369 dst_cache_reset(&t->dst_cache); 370 netdev_put(dev, &t->dev_tracker); 371 } 372 373 static void ip6gre_tunnel_uninit(struct net_device *dev) 374 { 375 struct ip6_tnl *t = netdev_priv(dev); 376 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 377 378 ip6gre_tunnel_unlink_md(ign, t); 379 ip6gre_tunnel_unlink(ign, t); 380 if (ign->fb_tunnel_dev == dev) 381 WRITE_ONCE(ign->fb_tunnel_dev, NULL); 382 dst_cache_reset(&t->dst_cache); 383 netdev_put(dev, &t->dev_tracker); 384 } 385 386 387 static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 388 u8 type, u8 code, int offset, __be32 info) 389 { 390 struct net *net = dev_net(skb->dev); 391 const struct ipv6hdr *ipv6h; 392 struct tnl_ptk_info tpi; 393 struct ip6_tnl *t; 394 395 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6), 396 offset) < 0) 397 return -EINVAL; 398 399 ipv6h = (const struct ipv6hdr *)skb->data; 400 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, 401 tpi.key, tpi.proto); 402 if (!t) 403 return -ENOENT; 404 405 switch (type) { 406 case ICMPV6_DEST_UNREACH: 407 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 408 t->parms.name); 409 if (code != ICMPV6_PORT_UNREACH) 410 break; 411 return 0; 412 case ICMPV6_TIME_EXCEED: 413 if (code == ICMPV6_EXC_HOPLIMIT) { 414 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 415 t->parms.name); 416 break; 417 } 418 return 0; 419 case ICMPV6_PARAMPROB: { 420 struct ipv6_tlv_tnl_enc_lim *tel; 421 __u32 teli; 422 423 teli = 0; 424 if (code == ICMPV6_HDR_FIELD) 425 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 426 427 if (teli && teli == be32_to_cpu(info) - 2) { 428 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 429 if (tel->encap_limit == 0) { 430 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 431 t->parms.name); 432 } 433 } else { 434 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 435 t->parms.name); 436 } 437 return 0; 438 } 439 case ICMPV6_PKT_TOOBIG: 440 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 441 return 0; 442 case NDISC_REDIRECT: 443 ip6_redirect(skb, net, skb->dev->ifindex, 0, 444 sock_net_uid(net, NULL)); 445 return 0; 446 } 447 448 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) 449 t->err_count++; 450 else 451 t->err_count = 1; 452 t->err_time = jiffies; 453 454 return 0; 455 } 456 457 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) 458 { 459 const struct ipv6hdr *ipv6h; 460 struct ip6_tnl *tunnel; 461 462 ipv6h = ipv6_hdr(skb); 463 tunnel = ip6gre_tunnel_lookup(skb->dev, 464 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 465 tpi->proto); 466 if (tunnel) { 467 if (tunnel->parms.collect_md) { 468 IP_TUNNEL_DECLARE_FLAGS(flags); 469 struct metadata_dst *tun_dst; 470 __be64 tun_id; 471 472 ip_tunnel_flags_copy(flags, tpi->flags); 473 tun_id = key32_to_tunnel_id(tpi->key); 474 475 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0); 476 if (!tun_dst) 477 return PACKET_REJECT; 478 479 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 480 } else { 481 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 482 } 483 484 return PACKET_RCVD; 485 } 486 487 return PACKET_REJECT; 488 } 489 490 static int ip6erspan_rcv(struct sk_buff *skb, 491 struct tnl_ptk_info *tpi, 492 int gre_hdr_len) 493 { 494 struct erspan_base_hdr *ershdr; 495 const struct ipv6hdr *ipv6h; 496 struct erspan_md2 *md2; 497 struct ip6_tnl *tunnel; 498 u8 ver; 499 500 if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr)))) 501 return PACKET_REJECT; 502 503 ipv6h = ipv6_hdr(skb); 504 ershdr = (struct erspan_base_hdr *)skb->data; 505 ver = ershdr->ver; 506 507 tunnel = ip6gre_tunnel_lookup(skb->dev, 508 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 509 tpi->proto); 510 if (tunnel) { 511 int len = erspan_hdr_len(ver); 512 513 if (unlikely(!pskb_may_pull(skb, len))) 514 return PACKET_REJECT; 515 516 if (__iptunnel_pull_header(skb, len, 517 htons(ETH_P_TEB), 518 false, false) < 0) 519 return PACKET_REJECT; 520 521 if (tunnel->parms.collect_md) { 522 struct erspan_metadata *pkt_md, *md; 523 IP_TUNNEL_DECLARE_FLAGS(flags); 524 struct metadata_dst *tun_dst; 525 struct ip_tunnel_info *info; 526 unsigned char *gh; 527 __be64 tun_id; 528 529 __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags); 530 ip_tunnel_flags_copy(flags, tpi->flags); 531 tun_id = key32_to_tunnel_id(tpi->key); 532 533 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 534 sizeof(*md)); 535 if (!tun_dst) 536 return PACKET_REJECT; 537 538 /* MUST set options_len before referencing options */ 539 info = &tun_dst->u.tun_info; 540 info->options_len = sizeof(*md); 541 542 /* skb can be uncloned in __iptunnel_pull_header, so 543 * old pkt_md is no longer valid and we need to reset 544 * it 545 */ 546 gh = skb_network_header(skb) + 547 skb_network_header_len(skb); 548 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + 549 sizeof(*ershdr)); 550 md = ip_tunnel_info_opts(info); 551 md->version = ver; 552 md2 = &md->u.md2; 553 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 554 ERSPAN_V2_MDSIZE); 555 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 556 info->key.tun_flags); 557 558 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 559 560 } else { 561 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 562 } 563 564 return PACKET_RCVD; 565 } 566 567 return PACKET_REJECT; 568 } 569 570 static int gre_rcv(struct sk_buff *skb) 571 { 572 struct tnl_ptk_info tpi; 573 bool csum_err = false; 574 int hdr_len; 575 576 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0); 577 if (hdr_len < 0) 578 goto drop; 579 580 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) 581 goto drop; 582 583 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || 584 tpi.proto == htons(ETH_P_ERSPAN2))) { 585 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) 586 return 0; 587 goto out; 588 } 589 590 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD) 591 return 0; 592 593 out: 594 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 595 drop: 596 dev_core_stats_rx_dropped_inc(skb->dev); 597 kfree_skb(skb); 598 return 0; 599 } 600 601 static int gre_handle_offloads(struct sk_buff *skb, bool csum) 602 { 603 return iptunnel_handle_offloads(skb, 604 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 605 } 606 607 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb, 608 struct net_device *dev, 609 struct flowi6 *fl6, __u8 *dsfield, 610 int *encap_limit) 611 { 612 const struct iphdr *iph = ip_hdr(skb); 613 struct ip6_tnl *t = netdev_priv(dev); 614 615 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 616 *encap_limit = t->parms.encap_limit; 617 618 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 619 620 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 621 *dsfield = ipv4_get_dsfield(iph); 622 else 623 *dsfield = ip6_tclass(t->parms.flowinfo); 624 625 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 626 fl6->flowi6_mark = skb->mark; 627 else 628 fl6->flowi6_mark = t->parms.fwmark; 629 630 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 631 } 632 633 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb, 634 struct net_device *dev, 635 struct flowi6 *fl6, __u8 *dsfield, 636 int *encap_limit) 637 { 638 struct ipv6hdr *ipv6h; 639 struct ip6_tnl *t = netdev_priv(dev); 640 __u16 offset; 641 642 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 643 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 644 ipv6h = ipv6_hdr(skb); 645 646 if (offset > 0) { 647 struct ipv6_tlv_tnl_enc_lim *tel; 648 649 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 650 if (tel->encap_limit == 0) { 651 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB, 652 ICMPV6_HDR_FIELD, offset + 2); 653 return -1; 654 } 655 *encap_limit = tel->encap_limit - 1; 656 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 657 *encap_limit = t->parms.encap_limit; 658 } 659 660 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 661 662 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 663 *dsfield = ipv6_get_dsfield(ipv6h); 664 else 665 *dsfield = ip6_tclass(t->parms.flowinfo); 666 667 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 668 fl6->flowlabel |= ip6_flowlabel(ipv6h); 669 670 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 671 fl6->flowi6_mark = skb->mark; 672 else 673 fl6->flowi6_mark = t->parms.fwmark; 674 675 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 676 677 return 0; 678 } 679 680 static int prepare_ip6gre_xmit_other(struct sk_buff *skb, 681 struct net_device *dev, 682 struct flowi6 *fl6, __u8 *dsfield, 683 int *encap_limit) 684 { 685 struct ip6_tnl *t = netdev_priv(dev); 686 687 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 688 *encap_limit = t->parms.encap_limit; 689 690 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 691 692 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 693 *dsfield = 0; 694 else 695 *dsfield = ip6_tclass(t->parms.flowinfo); 696 697 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 698 fl6->flowi6_mark = skb->mark; 699 else 700 fl6->flowi6_mark = t->parms.fwmark; 701 702 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 703 704 return 0; 705 } 706 707 static struct ip_tunnel_info *skb_tunnel_info_txcheck(struct sk_buff *skb) 708 { 709 struct ip_tunnel_info *tun_info; 710 711 tun_info = skb_tunnel_info(skb); 712 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))) 713 return ERR_PTR(-EINVAL); 714 715 return tun_info; 716 } 717 718 static netdev_tx_t __gre6_xmit(struct sk_buff *skb, 719 struct net_device *dev, __u8 dsfield, 720 struct flowi6 *fl6, int encap_limit, 721 __u32 *pmtu, __be16 proto) 722 { 723 struct ip6_tnl *tunnel = netdev_priv(dev); 724 IP_TUNNEL_DECLARE_FLAGS(flags); 725 __be16 protocol; 726 727 if (dev->type == ARPHRD_ETHER) 728 IPCB(skb)->flags = 0; 729 730 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) 731 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr; 732 else 733 fl6->daddr = tunnel->parms.raddr; 734 735 /* Push GRE header. */ 736 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 737 738 if (tunnel->parms.collect_md) { 739 struct ip_tunnel_info *tun_info; 740 const struct ip_tunnel_key *key; 741 int tun_hlen; 742 743 tun_info = skb_tunnel_info_txcheck(skb); 744 if (IS_ERR(tun_info) || 745 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6)) 746 return -EINVAL; 747 748 key = &tun_info->key; 749 memset(fl6, 0, sizeof(*fl6)); 750 fl6->flowi6_proto = IPPROTO_GRE; 751 fl6->daddr = key->u.ipv6.dst; 752 fl6->flowlabel = key->label; 753 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 754 fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id); 755 756 dsfield = key->tos; 757 ip_tunnel_flags_zero(flags); 758 __set_bit(IP_TUNNEL_CSUM_BIT, flags); 759 __set_bit(IP_TUNNEL_KEY_BIT, flags); 760 __set_bit(IP_TUNNEL_SEQ_BIT, flags); 761 ip_tunnel_flags_and(flags, flags, key->tun_flags); 762 tun_hlen = gre_calc_hlen(flags); 763 764 if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen)) 765 return -ENOMEM; 766 767 gre_build_header(skb, tun_hlen, 768 flags, protocol, 769 tunnel_id_to_key32(tun_info->key.tun_id), 770 test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 771 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 772 0); 773 774 } else { 775 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) 776 return -ENOMEM; 777 778 ip_tunnel_flags_copy(flags, tunnel->parms.o_flags); 779 780 gre_build_header(skb, tunnel->tun_hlen, flags, 781 protocol, tunnel->parms.o_key, 782 test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 783 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 784 0); 785 } 786 787 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 788 NEXTHDR_GRE); 789 } 790 791 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) 792 { 793 struct ip6_tnl *t = netdev_priv(dev); 794 int encap_limit = -1; 795 struct flowi6 fl6; 796 __u8 dsfield = 0; 797 __u32 mtu; 798 int err; 799 800 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 801 802 if (!t->parms.collect_md) 803 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 804 &dsfield, &encap_limit); 805 806 err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 807 t->parms.o_flags)); 808 if (err) 809 return -1; 810 811 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 812 skb->protocol); 813 if (err != 0) { 814 /* XXX: send ICMP error even if DF is not set. */ 815 if (err == -EMSGSIZE) 816 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 817 htonl(mtu)); 818 return -1; 819 } 820 821 return 0; 822 } 823 824 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) 825 { 826 struct ip6_tnl *t = netdev_priv(dev); 827 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 828 int encap_limit = -1; 829 struct flowi6 fl6; 830 __u8 dsfield = 0; 831 __u32 mtu; 832 int err; 833 834 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr)) 835 return -1; 836 837 if (!t->parms.collect_md && 838 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit)) 839 return -1; 840 841 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 842 t->parms.o_flags))) 843 return -1; 844 845 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, 846 &mtu, skb->protocol); 847 if (err != 0) { 848 if (err == -EMSGSIZE) 849 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 850 return -1; 851 } 852 853 return 0; 854 } 855 856 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) 857 { 858 struct ip6_tnl *t = netdev_priv(dev); 859 int encap_limit = -1; 860 struct flowi6 fl6; 861 __u8 dsfield = 0; 862 __u32 mtu; 863 int err; 864 865 if (!t->parms.collect_md && 866 prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit)) 867 return -1; 868 869 err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 870 t->parms.o_flags)); 871 if (err) 872 return err; 873 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol); 874 875 return err; 876 } 877 878 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, 879 struct net_device *dev) 880 { 881 struct ip6_tnl *t = netdev_priv(dev); 882 __be16 payload_protocol; 883 int ret; 884 885 if (!pskb_inet_may_pull(skb)) 886 goto tx_err; 887 888 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 889 goto tx_err; 890 891 payload_protocol = skb_protocol(skb, true); 892 switch (payload_protocol) { 893 case htons(ETH_P_IP): 894 ret = ip6gre_xmit_ipv4(skb, dev); 895 break; 896 case htons(ETH_P_IPV6): 897 ret = ip6gre_xmit_ipv6(skb, dev); 898 break; 899 default: 900 ret = ip6gre_xmit_other(skb, dev); 901 break; 902 } 903 904 if (ret < 0) 905 goto tx_err; 906 907 return NETDEV_TX_OK; 908 909 tx_err: 910 if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb))) 911 DEV_STATS_INC(dev, tx_errors); 912 DEV_STATS_INC(dev, tx_dropped); 913 kfree_skb(skb); 914 return NETDEV_TX_OK; 915 } 916 917 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, 918 struct net_device *dev) 919 { 920 struct ip_tunnel_info *tun_info = NULL; 921 struct ip6_tnl *t = netdev_priv(dev); 922 struct dst_entry *dst = skb_dst(skb); 923 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 924 bool truncate = false; 925 int encap_limit = -1; 926 __u8 dsfield = false; 927 struct flowi6 fl6; 928 int err = -EINVAL; 929 __be16 proto; 930 __u32 mtu; 931 int nhoff; 932 933 if (!pskb_inet_may_pull(skb)) 934 goto tx_err; 935 936 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 937 goto tx_err; 938 939 if (gre_handle_offloads(skb, false)) 940 goto tx_err; 941 942 if (skb->len > dev->mtu + dev->hard_header_len) { 943 if (pskb_trim(skb, dev->mtu + dev->hard_header_len)) 944 goto tx_err; 945 truncate = true; 946 } 947 948 nhoff = skb_network_offset(skb); 949 if (skb->protocol == htons(ETH_P_IP) && 950 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) 951 truncate = true; 952 953 if (skb->protocol == htons(ETH_P_IPV6)) { 954 int thoff; 955 956 if (skb_transport_header_was_set(skb)) 957 thoff = skb_transport_offset(skb); 958 else 959 thoff = nhoff + sizeof(struct ipv6hdr); 960 if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) 961 truncate = true; 962 } 963 964 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) 965 goto tx_err; 966 967 __clear_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags); 968 IPCB(skb)->flags = 0; 969 970 /* For collect_md mode, derive fl6 from the tunnel key, 971 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}. 972 */ 973 if (t->parms.collect_md) { 974 const struct ip_tunnel_key *key; 975 struct erspan_metadata *md; 976 __be32 tun_id; 977 978 tun_info = skb_tunnel_info_txcheck(skb); 979 if (IS_ERR(tun_info) || 980 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6)) 981 goto tx_err; 982 983 key = &tun_info->key; 984 memset(&fl6, 0, sizeof(fl6)); 985 fl6.flowi6_proto = IPPROTO_GRE; 986 fl6.daddr = key->u.ipv6.dst; 987 fl6.flowlabel = key->label; 988 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 989 fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id); 990 991 dsfield = key->tos; 992 if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 993 tun_info->key.tun_flags)) 994 goto tx_err; 995 if (tun_info->options_len < sizeof(*md)) 996 goto tx_err; 997 md = ip_tunnel_info_opts(tun_info); 998 999 tun_id = tunnel_id_to_key32(key->tun_id); 1000 if (md->version == 1) { 1001 erspan_build_header(skb, 1002 ntohl(tun_id), 1003 ntohl(md->u.index), truncate, 1004 false); 1005 proto = htons(ETH_P_ERSPAN); 1006 } else if (md->version == 2) { 1007 erspan_build_header_v2(skb, 1008 ntohl(tun_id), 1009 md->u.md2.dir, 1010 get_hwid(&md->u.md2), 1011 truncate, false); 1012 proto = htons(ETH_P_ERSPAN2); 1013 } else { 1014 goto tx_err; 1015 } 1016 } else { 1017 switch (skb->protocol) { 1018 case htons(ETH_P_IP): 1019 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1020 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 1021 &dsfield, &encap_limit); 1022 break; 1023 case htons(ETH_P_IPV6): 1024 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr)) 1025 goto tx_err; 1026 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, 1027 &dsfield, &encap_limit)) 1028 goto tx_err; 1029 break; 1030 default: 1031 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1032 break; 1033 } 1034 1035 if (t->parms.erspan_ver == 1) { 1036 erspan_build_header(skb, ntohl(t->parms.o_key), 1037 t->parms.index, 1038 truncate, false); 1039 proto = htons(ETH_P_ERSPAN); 1040 } else if (t->parms.erspan_ver == 2) { 1041 erspan_build_header_v2(skb, ntohl(t->parms.o_key), 1042 t->parms.dir, 1043 t->parms.hwid, 1044 truncate, false); 1045 proto = htons(ETH_P_ERSPAN2); 1046 } else { 1047 goto tx_err; 1048 } 1049 1050 fl6.daddr = t->parms.raddr; 1051 } 1052 1053 /* Push GRE header. */ 1054 __set_bit(IP_TUNNEL_SEQ_BIT, flags); 1055 gre_build_header(skb, 8, flags, proto, 0, 1056 htonl(atomic_fetch_inc(&t->o_seqno))); 1057 1058 /* TooBig packet may have updated dst->dev's mtu */ 1059 if (!t->parms.collect_md && dst) { 1060 mtu = READ_ONCE(dst_dev(dst)->mtu); 1061 if (dst6_mtu(dst) > mtu) 1062 dst->ops->update_pmtu(dst, NULL, skb, mtu, false); 1063 } 1064 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1065 NEXTHDR_GRE); 1066 if (err != 0) { 1067 /* XXX: send ICMP error even if DF is not set. */ 1068 if (err == -EMSGSIZE) { 1069 if (skb->protocol == htons(ETH_P_IP)) 1070 icmp_ndo_send(skb, ICMP_DEST_UNREACH, 1071 ICMP_FRAG_NEEDED, htonl(mtu)); 1072 else 1073 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1074 } 1075 1076 goto tx_err; 1077 } 1078 return NETDEV_TX_OK; 1079 1080 tx_err: 1081 if (!IS_ERR(tun_info)) 1082 DEV_STATS_INC(dev, tx_errors); 1083 DEV_STATS_INC(dev, tx_dropped); 1084 kfree_skb(skb); 1085 return NETDEV_TX_OK; 1086 } 1087 1088 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) 1089 { 1090 struct net_device *dev = t->dev; 1091 struct __ip6_tnl_parm *p = &t->parms; 1092 struct flowi6 *fl6 = &t->fl.u.ip6; 1093 1094 if (dev->type != ARPHRD_ETHER) { 1095 __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); 1096 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1097 } 1098 1099 /* Set up flowi template */ 1100 fl6->saddr = p->laddr; 1101 fl6->daddr = p->raddr; 1102 fl6->flowi6_oif = p->link; 1103 fl6->flowlabel = 0; 1104 fl6->flowi6_proto = IPPROTO_GRE; 1105 fl6->fl6_gre_key = t->parms.o_key; 1106 1107 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1108 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1109 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1110 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1111 1112 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1113 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1114 1115 if (p->flags&IP6_TNL_F_CAP_XMIT && 1116 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER) 1117 dev->flags |= IFF_POINTOPOINT; 1118 else 1119 dev->flags &= ~IFF_POINTOPOINT; 1120 } 1121 1122 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, 1123 int t_hlen) 1124 { 1125 const struct __ip6_tnl_parm *p = &t->parms; 1126 struct net_device *dev = t->dev; 1127 1128 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1129 int strict = (ipv6_addr_type(&p->raddr) & 1130 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1131 1132 struct rt6_info *rt = rt6_lookup(t->net, 1133 &p->raddr, &p->laddr, 1134 p->link, NULL, strict); 1135 1136 if (!rt) 1137 return; 1138 1139 if (rt->dst.dev) { 1140 unsigned short dst_len = rt->dst.dev->hard_header_len + 1141 t_hlen; 1142 1143 if (t->dev->header_ops) 1144 dev->hard_header_len = dst_len; 1145 else 1146 dev->needed_headroom = dst_len; 1147 1148 if (set_mtu) { 1149 int mtu = rt->dst.dev->mtu - t_hlen; 1150 1151 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1152 mtu -= 8; 1153 if (dev->type == ARPHRD_ETHER) 1154 mtu -= ETH_HLEN; 1155 1156 if (mtu < IPV6_MIN_MTU) 1157 mtu = IPV6_MIN_MTU; 1158 WRITE_ONCE(dev->mtu, mtu); 1159 } 1160 } 1161 ip6_rt_put(rt); 1162 } 1163 } 1164 1165 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) 1166 { 1167 int t_hlen; 1168 1169 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 1170 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 1171 1172 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1173 1174 if (tunnel->dev->header_ops) 1175 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1176 else 1177 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1178 1179 return t_hlen; 1180 } 1181 1182 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) 1183 { 1184 ip6gre_tnl_link_config_common(t); 1185 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t)); 1186 } 1187 1188 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, 1189 const struct __ip6_tnl_parm *p) 1190 { 1191 t->parms.laddr = p->laddr; 1192 t->parms.raddr = p->raddr; 1193 t->parms.flags = p->flags; 1194 t->parms.hop_limit = p->hop_limit; 1195 t->parms.encap_limit = p->encap_limit; 1196 t->parms.flowinfo = p->flowinfo; 1197 t->parms.link = p->link; 1198 t->parms.proto = p->proto; 1199 t->parms.i_key = p->i_key; 1200 t->parms.o_key = p->o_key; 1201 ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags); 1202 ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags); 1203 t->parms.fwmark = p->fwmark; 1204 t->parms.erspan_ver = p->erspan_ver; 1205 t->parms.index = p->index; 1206 t->parms.dir = p->dir; 1207 t->parms.hwid = p->hwid; 1208 dst_cache_reset(&t->dst_cache); 1209 } 1210 1211 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, 1212 int set_mtu) 1213 { 1214 ip6gre_tnl_copy_tnl_parm(t, p); 1215 ip6gre_tnl_link_config(t, set_mtu); 1216 return 0; 1217 } 1218 1219 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p, 1220 const struct ip6_tnl_parm2 *u) 1221 { 1222 p->laddr = u->laddr; 1223 p->raddr = u->raddr; 1224 p->flags = u->flags; 1225 p->hop_limit = u->hop_limit; 1226 p->encap_limit = u->encap_limit; 1227 p->flowinfo = u->flowinfo; 1228 p->link = u->link; 1229 p->i_key = u->i_key; 1230 p->o_key = u->o_key; 1231 gre_flags_to_tnl_flags(p->i_flags, u->i_flags); 1232 gre_flags_to_tnl_flags(p->o_flags, u->o_flags); 1233 memcpy(p->name, u->name, sizeof(u->name)); 1234 } 1235 1236 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u, 1237 const struct __ip6_tnl_parm *p) 1238 { 1239 u->proto = IPPROTO_GRE; 1240 u->laddr = p->laddr; 1241 u->raddr = p->raddr; 1242 u->flags = p->flags; 1243 u->hop_limit = p->hop_limit; 1244 u->encap_limit = p->encap_limit; 1245 u->flowinfo = p->flowinfo; 1246 u->link = p->link; 1247 u->i_key = p->i_key; 1248 u->o_key = p->o_key; 1249 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 1250 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 1251 memcpy(u->name, p->name, sizeof(u->name)); 1252 } 1253 1254 static int ip6gre_tunnel_siocdevprivate(struct net_device *dev, 1255 struct ifreq *ifr, void __user *data, 1256 int cmd) 1257 { 1258 int err = 0; 1259 struct ip6_tnl_parm2 p; 1260 struct __ip6_tnl_parm p1; 1261 struct ip6_tnl *t = netdev_priv(dev); 1262 struct net *net = t->net; 1263 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1264 1265 memset(&p1, 0, sizeof(p1)); 1266 1267 switch (cmd) { 1268 case SIOCGETTUNNEL: 1269 if (dev == ign->fb_tunnel_dev) { 1270 if (copy_from_user(&p, data, sizeof(p))) { 1271 err = -EFAULT; 1272 break; 1273 } 1274 ip6gre_tnl_parm_from_user(&p1, &p); 1275 t = ip6gre_tunnel_locate(net, &p1, 0); 1276 if (!t) 1277 t = netdev_priv(dev); 1278 } 1279 memset(&p, 0, sizeof(p)); 1280 ip6gre_tnl_parm_to_user(&p, &t->parms); 1281 if (copy_to_user(data, &p, sizeof(p))) 1282 err = -EFAULT; 1283 break; 1284 1285 case SIOCADDTUNNEL: 1286 case SIOCCHGTUNNEL: 1287 err = -EPERM; 1288 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1289 goto done; 1290 1291 err = -EFAULT; 1292 if (copy_from_user(&p, data, sizeof(p))) 1293 goto done; 1294 1295 err = -EINVAL; 1296 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)) 1297 goto done; 1298 1299 if (!(p.i_flags&GRE_KEY)) 1300 p.i_key = 0; 1301 if (!(p.o_flags&GRE_KEY)) 1302 p.o_key = 0; 1303 1304 ip6gre_tnl_parm_from_user(&p1, &p); 1305 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL); 1306 1307 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1308 if (t) { 1309 if (t->dev != dev) { 1310 err = -EEXIST; 1311 break; 1312 } 1313 } else { 1314 t = netdev_priv(dev); 1315 1316 ip6gre_tunnel_unlink(ign, t); 1317 synchronize_net(); 1318 ip6gre_tnl_change(t, &p1, 1); 1319 ip6gre_tunnel_link(ign, t); 1320 netdev_state_change(dev); 1321 } 1322 } 1323 1324 if (t) { 1325 err = 0; 1326 1327 memset(&p, 0, sizeof(p)); 1328 ip6gre_tnl_parm_to_user(&p, &t->parms); 1329 if (copy_to_user(data, &p, sizeof(p))) 1330 err = -EFAULT; 1331 } else 1332 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1333 break; 1334 1335 case SIOCDELTUNNEL: 1336 err = -EPERM; 1337 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1338 goto done; 1339 1340 if (dev == ign->fb_tunnel_dev) { 1341 err = -EFAULT; 1342 if (copy_from_user(&p, data, sizeof(p))) 1343 goto done; 1344 err = -ENOENT; 1345 ip6gre_tnl_parm_from_user(&p1, &p); 1346 t = ip6gre_tunnel_locate(net, &p1, 0); 1347 if (!t) 1348 goto done; 1349 err = -EPERM; 1350 if (t == netdev_priv(ign->fb_tunnel_dev)) 1351 goto done; 1352 dev = t->dev; 1353 } 1354 unregister_netdevice(dev); 1355 err = 0; 1356 break; 1357 1358 default: 1359 err = -EINVAL; 1360 } 1361 1362 done: 1363 return err; 1364 } 1365 1366 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, 1367 unsigned short type, const void *daddr, 1368 const void *saddr, unsigned int len) 1369 { 1370 struct ip6_tnl *t = netdev_priv(dev); 1371 struct ipv6hdr *ipv6h; 1372 int needed; 1373 __be16 *p; 1374 1375 needed = t->hlen + sizeof(*ipv6h); 1376 if (skb_headroom(skb) < needed && 1377 pskb_expand_head(skb, HH_DATA_ALIGN(needed - skb_headroom(skb)), 1378 0, GFP_ATOMIC)) 1379 return -needed; 1380 1381 ipv6h = skb_push(skb, needed); 1382 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb, 1383 t->fl.u.ip6.flowlabel, 1384 true, &t->fl.u.ip6)); 1385 ipv6h->hop_limit = t->parms.hop_limit; 1386 ipv6h->nexthdr = NEXTHDR_GRE; 1387 ipv6h->saddr = t->parms.laddr; 1388 ipv6h->daddr = t->parms.raddr; 1389 1390 p = (__be16 *)(ipv6h + 1); 1391 p[0] = ip_tunnel_flags_to_be16(t->parms.o_flags); 1392 p[1] = htons(type); 1393 1394 /* 1395 * Set the source hardware address. 1396 */ 1397 1398 if (saddr) 1399 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr)); 1400 if (daddr) 1401 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr)); 1402 if (!ipv6_addr_any(&ipv6h->daddr)) 1403 return t->hlen; 1404 1405 return -t->hlen; 1406 } 1407 1408 static const struct header_ops ip6gre_header_ops = { 1409 .create = ip6gre_header, 1410 }; 1411 1412 static const struct net_device_ops ip6gre_netdev_ops = { 1413 .ndo_init = ip6gre_tunnel_init, 1414 .ndo_uninit = ip6gre_tunnel_uninit, 1415 .ndo_start_xmit = ip6gre_tunnel_xmit, 1416 .ndo_siocdevprivate = ip6gre_tunnel_siocdevprivate, 1417 .ndo_change_mtu = ip6_tnl_change_mtu, 1418 .ndo_get_iflink = ip6_tnl_get_iflink, 1419 }; 1420 1421 static void ip6gre_dev_free(struct net_device *dev) 1422 { 1423 struct ip6_tnl *t = netdev_priv(dev); 1424 1425 gro_cells_destroy(&t->gro_cells); 1426 dst_cache_destroy(&t->dst_cache); 1427 } 1428 1429 static void ip6gre_tunnel_setup(struct net_device *dev) 1430 { 1431 dev->netdev_ops = &ip6gre_netdev_ops; 1432 dev->needs_free_netdev = true; 1433 dev->priv_destructor = ip6gre_dev_free; 1434 1435 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1436 dev->type = ARPHRD_IP6GRE; 1437 1438 dev->flags |= IFF_NOARP; 1439 dev->addr_len = sizeof(struct in6_addr); 1440 netif_keep_dst(dev); 1441 /* This perm addr will be used as interface identifier by IPv6 */ 1442 dev->addr_assign_type = NET_ADDR_RANDOM; 1443 eth_random_addr(dev->perm_addr); 1444 } 1445 1446 #define GRE6_FEATURES (NETIF_F_SG | \ 1447 NETIF_F_FRAGLIST | \ 1448 NETIF_F_HIGHDMA | \ 1449 NETIF_F_HW_CSUM) 1450 1451 static void ip6gre_tnl_init_features(struct net_device *dev) 1452 { 1453 struct ip6_tnl *nt = netdev_priv(dev); 1454 1455 dev->features |= GRE6_FEATURES; 1456 dev->hw_features |= GRE6_FEATURES; 1457 1458 /* TCP offload with GRE SEQ is not supported, nor can we support 2 1459 * levels of outer headers requiring an update. 1460 */ 1461 if (test_bit(IP_TUNNEL_SEQ_BIT, nt->parms.o_flags)) 1462 return; 1463 if (test_bit(IP_TUNNEL_CSUM_BIT, nt->parms.o_flags) && 1464 nt->encap.type != TUNNEL_ENCAP_NONE) 1465 return; 1466 1467 dev->features |= NETIF_F_GSO_SOFTWARE; 1468 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1469 1470 dev->lltx = true; 1471 } 1472 1473 static int ip6gre_tunnel_init_common(struct net_device *dev) 1474 { 1475 struct ip6_tnl *tunnel; 1476 int ret; 1477 int t_hlen; 1478 1479 tunnel = netdev_priv(dev); 1480 1481 tunnel->dev = dev; 1482 strscpy(tunnel->parms.name, dev->name); 1483 1484 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1485 if (ret) 1486 return ret; 1487 1488 ret = gro_cells_init(&tunnel->gro_cells, dev); 1489 if (ret) 1490 goto cleanup_dst_cache_init; 1491 1492 t_hlen = ip6gre_calc_hlen(tunnel); 1493 dev->mtu = ETH_DATA_LEN - t_hlen; 1494 if (dev->type == ARPHRD_ETHER) 1495 dev->mtu -= ETH_HLEN; 1496 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1497 dev->mtu -= 8; 1498 1499 if (tunnel->parms.collect_md) { 1500 netif_keep_dst(dev); 1501 } 1502 ip6gre_tnl_init_features(dev); 1503 1504 netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL); 1505 netdev_lockdep_set_classes(dev); 1506 return 0; 1507 1508 cleanup_dst_cache_init: 1509 dst_cache_destroy(&tunnel->dst_cache); 1510 return ret; 1511 } 1512 1513 static int ip6gre_tunnel_init(struct net_device *dev) 1514 { 1515 struct ip6_tnl *tunnel; 1516 int ret; 1517 1518 ret = ip6gre_tunnel_init_common(dev); 1519 if (ret) 1520 return ret; 1521 1522 tunnel = netdev_priv(dev); 1523 1524 if (tunnel->parms.collect_md) 1525 return 0; 1526 1527 __dev_addr_set(dev, &tunnel->parms.laddr, sizeof(struct in6_addr)); 1528 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); 1529 1530 if (ipv6_addr_any(&tunnel->parms.raddr)) 1531 dev->header_ops = &ip6gre_header_ops; 1532 1533 return 0; 1534 } 1535 1536 static void ip6gre_fb_tunnel_init(struct net_device *dev) 1537 { 1538 struct ip6_tnl *tunnel = netdev_priv(dev); 1539 1540 tunnel->dev = dev; 1541 tunnel->net = dev_net(dev); 1542 strscpy(tunnel->parms.name, dev->name); 1543 1544 tunnel->hlen = sizeof(struct ipv6hdr) + 4; 1545 } 1546 1547 static struct inet6_protocol ip6gre_protocol __read_mostly = { 1548 .handler = gre_rcv, 1549 .err_handler = ip6gre_err, 1550 .flags = INET6_PROTO_FINAL, 1551 }; 1552 1553 static void __net_exit ip6gre_exit_rtnl_net(struct net *net, struct list_head *head) 1554 { 1555 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1556 struct net_device *dev, *aux; 1557 int prio; 1558 1559 for_each_netdev_safe(net, dev, aux) 1560 if (dev->rtnl_link_ops == &ip6gre_link_ops || 1561 dev->rtnl_link_ops == &ip6gre_tap_ops || 1562 dev->rtnl_link_ops == &ip6erspan_tap_ops) 1563 unregister_netdevice_queue(dev, head); 1564 1565 for (prio = 0; prio < 4; prio++) { 1566 int h; 1567 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) { 1568 struct ip6_tnl *t; 1569 1570 t = rtnl_net_dereference(net, ign->tunnels[prio][h]); 1571 1572 while (t) { 1573 /* If dev is in the same netns, it has already 1574 * been added to the list by the previous loop. 1575 */ 1576 if (!net_eq(dev_net(t->dev), net)) 1577 unregister_netdevice_queue(t->dev, head); 1578 1579 t = rtnl_net_dereference(net, t->next); 1580 } 1581 } 1582 } 1583 } 1584 1585 static int __net_init ip6gre_init_net(struct net *net) 1586 { 1587 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1588 struct net_device *ndev; 1589 int err; 1590 1591 if (!net_has_fallback_tunnels(net)) 1592 return 0; 1593 ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", 1594 NET_NAME_UNKNOWN, ip6gre_tunnel_setup); 1595 if (!ndev) { 1596 err = -ENOMEM; 1597 goto err_alloc_dev; 1598 } 1599 ign->fb_tunnel_dev = ndev; 1600 dev_net_set(ign->fb_tunnel_dev, net); 1601 /* FB netdevice is special: we have one, and only one per netns. 1602 * Allowing to move it to another netns is clearly unsafe. 1603 */ 1604 ign->fb_tunnel_dev->netns_immutable = true; 1605 1606 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1607 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1608 1609 err = register_netdev(ign->fb_tunnel_dev); 1610 if (err) 1611 goto err_reg_dev; 1612 1613 rcu_assign_pointer(ign->tunnels_wc[0], 1614 netdev_priv(ign->fb_tunnel_dev)); 1615 return 0; 1616 1617 err_reg_dev: 1618 free_netdev(ndev); 1619 err_alloc_dev: 1620 return err; 1621 } 1622 1623 static struct pernet_operations ip6gre_net_ops = { 1624 .init = ip6gre_init_net, 1625 .exit_rtnl = ip6gre_exit_rtnl_net, 1626 .id = &ip6gre_net_id, 1627 .size = sizeof(struct ip6gre_net), 1628 }; 1629 1630 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 1631 struct netlink_ext_ack *extack) 1632 { 1633 __be16 flags; 1634 1635 if (!data) 1636 return 0; 1637 1638 flags = 0; 1639 if (data[IFLA_GRE_IFLAGS]) 1640 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1641 if (data[IFLA_GRE_OFLAGS]) 1642 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1643 if (flags & (GRE_VERSION|GRE_ROUTING)) 1644 return -EINVAL; 1645 1646 return 0; 1647 } 1648 1649 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1650 struct netlink_ext_ack *extack) 1651 { 1652 struct in6_addr daddr; 1653 1654 if (tb[IFLA_ADDRESS]) { 1655 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1656 return -EINVAL; 1657 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1658 return -EADDRNOTAVAIL; 1659 } 1660 1661 if (!data) 1662 goto out; 1663 1664 if (data[IFLA_GRE_REMOTE]) { 1665 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1666 if (ipv6_addr_any(&daddr)) 1667 return -EINVAL; 1668 } 1669 1670 out: 1671 return ip6gre_tunnel_validate(tb, data, extack); 1672 } 1673 1674 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1675 struct netlink_ext_ack *extack) 1676 { 1677 __be16 flags = 0; 1678 int ret, ver = 0; 1679 1680 if (!data) 1681 return 0; 1682 1683 ret = ip6gre_tap_validate(tb, data, extack); 1684 if (ret) 1685 return ret; 1686 1687 /* ERSPAN should only have GRE sequence and key flag */ 1688 if (data[IFLA_GRE_OFLAGS]) 1689 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1690 if (data[IFLA_GRE_IFLAGS]) 1691 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1692 if (!data[IFLA_GRE_COLLECT_METADATA] && 1693 flags != (GRE_SEQ | GRE_KEY)) 1694 return -EINVAL; 1695 1696 /* ERSPAN Session ID only has 10-bit. Since we reuse 1697 * 32-bit key field as ID, check it's range. 1698 */ 1699 if (data[IFLA_GRE_IKEY] && 1700 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) 1701 return -EINVAL; 1702 1703 if (data[IFLA_GRE_OKEY] && 1704 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) 1705 return -EINVAL; 1706 1707 if (data[IFLA_GRE_ERSPAN_VER]) { 1708 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1709 if (ver != 1 && ver != 2) 1710 return -EINVAL; 1711 } 1712 1713 if (ver == 1) { 1714 if (data[IFLA_GRE_ERSPAN_INDEX]) { 1715 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1716 1717 if (index & ~INDEX_MASK) 1718 return -EINVAL; 1719 } 1720 } else if (ver == 2) { 1721 if (data[IFLA_GRE_ERSPAN_DIR]) { 1722 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1723 1724 if (dir & ~(DIR_MASK >> DIR_OFFSET)) 1725 return -EINVAL; 1726 } 1727 1728 if (data[IFLA_GRE_ERSPAN_HWID]) { 1729 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1730 1731 if (hwid & ~(HWID_MASK >> HWID_OFFSET)) 1732 return -EINVAL; 1733 } 1734 } 1735 1736 return 0; 1737 } 1738 1739 static void ip6erspan_set_version(struct nlattr *data[], 1740 struct __ip6_tnl_parm *parms) 1741 { 1742 if (!data) 1743 return; 1744 1745 parms->erspan_ver = 1; 1746 if (data[IFLA_GRE_ERSPAN_VER]) 1747 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1748 1749 if (parms->erspan_ver == 1) { 1750 if (data[IFLA_GRE_ERSPAN_INDEX]) 1751 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1752 } else if (parms->erspan_ver == 2) { 1753 if (data[IFLA_GRE_ERSPAN_DIR]) 1754 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1755 if (data[IFLA_GRE_ERSPAN_HWID]) 1756 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1757 } 1758 } 1759 1760 static void ip6gre_netlink_parms(struct nlattr *data[], 1761 struct __ip6_tnl_parm *parms) 1762 { 1763 memset(parms, 0, sizeof(*parms)); 1764 1765 if (!data) 1766 return; 1767 1768 if (data[IFLA_GRE_LINK]) 1769 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1770 1771 if (data[IFLA_GRE_IFLAGS]) 1772 gre_flags_to_tnl_flags(parms->i_flags, 1773 nla_get_be16(data[IFLA_GRE_IFLAGS])); 1774 1775 if (data[IFLA_GRE_OFLAGS]) 1776 gre_flags_to_tnl_flags(parms->o_flags, 1777 nla_get_be16(data[IFLA_GRE_OFLAGS])); 1778 1779 if (data[IFLA_GRE_IKEY]) 1780 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1781 1782 if (data[IFLA_GRE_OKEY]) 1783 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1784 1785 if (data[IFLA_GRE_LOCAL]) 1786 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]); 1787 1788 if (data[IFLA_GRE_REMOTE]) 1789 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1790 1791 if (data[IFLA_GRE_TTL]) 1792 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]); 1793 1794 if (data[IFLA_GRE_ENCAP_LIMIT]) 1795 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]); 1796 1797 if (data[IFLA_GRE_FLOWINFO]) 1798 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]); 1799 1800 if (data[IFLA_GRE_FLAGS]) 1801 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]); 1802 1803 if (data[IFLA_GRE_FWMARK]) 1804 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); 1805 1806 if (data[IFLA_GRE_COLLECT_METADATA]) 1807 parms->collect_md = true; 1808 } 1809 1810 static int ip6gre_tap_init(struct net_device *dev) 1811 { 1812 int ret; 1813 1814 ret = ip6gre_tunnel_init_common(dev); 1815 if (ret) 1816 return ret; 1817 1818 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1819 1820 return 0; 1821 } 1822 1823 static const struct net_device_ops ip6gre_tap_netdev_ops = { 1824 .ndo_init = ip6gre_tap_init, 1825 .ndo_uninit = ip6gre_tunnel_uninit, 1826 .ndo_start_xmit = ip6gre_tunnel_xmit, 1827 .ndo_set_mac_address = eth_mac_addr, 1828 .ndo_validate_addr = eth_validate_addr, 1829 .ndo_change_mtu = ip6_tnl_change_mtu, 1830 .ndo_get_iflink = ip6_tnl_get_iflink, 1831 }; 1832 1833 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel) 1834 { 1835 int t_hlen; 1836 1837 tunnel->tun_hlen = 8; 1838 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + 1839 erspan_hdr_len(tunnel->parms.erspan_ver); 1840 1841 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1842 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1843 return t_hlen; 1844 } 1845 1846 static int ip6erspan_tap_init(struct net_device *dev) 1847 { 1848 struct ip6_tnl *tunnel; 1849 int t_hlen; 1850 int ret; 1851 1852 tunnel = netdev_priv(dev); 1853 1854 tunnel->dev = dev; 1855 strscpy(tunnel->parms.name, dev->name); 1856 1857 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1858 if (ret) 1859 return ret; 1860 1861 ret = gro_cells_init(&tunnel->gro_cells, dev); 1862 if (ret) 1863 goto cleanup_dst_cache_init; 1864 1865 t_hlen = ip6erspan_calc_hlen(tunnel); 1866 dev->mtu = ETH_DATA_LEN - t_hlen; 1867 if (dev->type == ARPHRD_ETHER) 1868 dev->mtu -= ETH_HLEN; 1869 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1870 dev->mtu -= 8; 1871 1872 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1873 ip6erspan_tnl_link_config(tunnel, 1); 1874 1875 netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL); 1876 netdev_lockdep_set_classes(dev); 1877 return 0; 1878 1879 cleanup_dst_cache_init: 1880 dst_cache_destroy(&tunnel->dst_cache); 1881 return ret; 1882 } 1883 1884 static const struct net_device_ops ip6erspan_netdev_ops = { 1885 .ndo_init = ip6erspan_tap_init, 1886 .ndo_uninit = ip6erspan_tunnel_uninit, 1887 .ndo_start_xmit = ip6erspan_tunnel_xmit, 1888 .ndo_set_mac_address = eth_mac_addr, 1889 .ndo_validate_addr = eth_validate_addr, 1890 .ndo_change_mtu = ip6_tnl_change_mtu, 1891 .ndo_get_iflink = ip6_tnl_get_iflink, 1892 }; 1893 1894 static void ip6gre_tap_setup(struct net_device *dev) 1895 { 1896 1897 ether_setup(dev); 1898 1899 dev->max_mtu = 0; 1900 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1901 dev->needs_free_netdev = true; 1902 dev->priv_destructor = ip6gre_dev_free; 1903 1904 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 1905 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1906 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1907 netif_keep_dst(dev); 1908 } 1909 1910 static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1911 struct ip_tunnel_encap *ipencap) 1912 { 1913 bool ret = false; 1914 1915 memset(ipencap, 0, sizeof(*ipencap)); 1916 1917 if (!data) 1918 return ret; 1919 1920 if (data[IFLA_GRE_ENCAP_TYPE]) { 1921 ret = true; 1922 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1923 } 1924 1925 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1926 ret = true; 1927 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1928 } 1929 1930 if (data[IFLA_GRE_ENCAP_SPORT]) { 1931 ret = true; 1932 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1933 } 1934 1935 if (data[IFLA_GRE_ENCAP_DPORT]) { 1936 ret = true; 1937 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1938 } 1939 1940 return ret; 1941 } 1942 1943 static int ip6gre_newlink_common(struct net *link_net, struct net_device *dev, 1944 struct nlattr *tb[], struct nlattr *data[], 1945 struct netlink_ext_ack *extack) 1946 { 1947 struct ip6_tnl *nt; 1948 struct ip_tunnel_encap ipencap; 1949 int err; 1950 1951 nt = netdev_priv(dev); 1952 1953 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 1954 int err = ip6_tnl_encap_setup(nt, &ipencap); 1955 1956 if (err < 0) 1957 return err; 1958 } 1959 1960 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1961 eth_hw_addr_random(dev); 1962 1963 nt->dev = dev; 1964 nt->net = link_net; 1965 1966 err = register_netdevice(dev); 1967 if (err) 1968 goto out; 1969 1970 if (tb[IFLA_MTU]) 1971 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 1972 1973 out: 1974 return err; 1975 } 1976 1977 static int ip6gre_newlink(struct net_device *dev, 1978 struct rtnl_newlink_params *params, 1979 struct netlink_ext_ack *extack) 1980 { 1981 struct net *net = params->link_net ? : dev_net(dev); 1982 struct ip6_tnl *nt = netdev_priv(dev); 1983 struct nlattr **data = params->data; 1984 struct nlattr **tb = params->tb; 1985 struct ip6gre_net *ign; 1986 int err; 1987 1988 ip6gre_netlink_parms(data, &nt->parms); 1989 ign = net_generic(net, ip6gre_net_id); 1990 1991 if (nt->parms.collect_md) { 1992 if (rtnl_dereference(ign->collect_md_tun)) 1993 return -EEXIST; 1994 } else { 1995 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 1996 return -EEXIST; 1997 } 1998 1999 err = ip6gre_newlink_common(net, dev, tb, data, extack); 2000 if (!err) { 2001 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); 2002 ip6gre_tunnel_link_md(ign, nt); 2003 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2004 } 2005 return err; 2006 } 2007 2008 static struct ip6_tnl * 2009 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[], 2010 struct nlattr *data[], struct __ip6_tnl_parm *p_p, 2011 struct netlink_ext_ack *extack) 2012 { 2013 struct ip6_tnl *t, *nt = netdev_priv(dev); 2014 struct net *net = nt->net; 2015 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2016 struct ip_tunnel_encap ipencap; 2017 2018 if (dev == ign->fb_tunnel_dev) 2019 return ERR_PTR(-EINVAL); 2020 2021 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 2022 int err = ip6_tnl_encap_setup(nt, &ipencap); 2023 2024 if (err < 0) 2025 return ERR_PTR(err); 2026 } 2027 2028 ip6gre_netlink_parms(data, p_p); 2029 2030 t = ip6gre_tunnel_locate(net, p_p, 0); 2031 2032 if (t) { 2033 if (t->dev != dev) 2034 return ERR_PTR(-EEXIST); 2035 } else { 2036 t = nt; 2037 } 2038 2039 return t; 2040 } 2041 2042 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 2043 struct nlattr *data[], 2044 struct netlink_ext_ack *extack) 2045 { 2046 struct ip6_tnl *t = netdev_priv(dev); 2047 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 2048 struct __ip6_tnl_parm p; 2049 2050 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2051 if (IS_ERR(t)) 2052 return PTR_ERR(t); 2053 2054 ip6gre_tunnel_unlink_md(ign, t); 2055 ip6gre_tunnel_unlink(ign, t); 2056 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 2057 ip6gre_tunnel_link_md(ign, t); 2058 ip6gre_tunnel_link(ign, t); 2059 return 0; 2060 } 2061 2062 static void ip6gre_dellink(struct net_device *dev, struct list_head *head) 2063 { 2064 struct net *net = dev_net(dev); 2065 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2066 2067 if (dev != ign->fb_tunnel_dev) 2068 unregister_netdevice_queue(dev, head); 2069 } 2070 2071 static size_t ip6gre_get_size(const struct net_device *dev) 2072 { 2073 return 2074 /* IFLA_GRE_LINK */ 2075 nla_total_size(4) + 2076 /* IFLA_GRE_IFLAGS */ 2077 nla_total_size(2) + 2078 /* IFLA_GRE_OFLAGS */ 2079 nla_total_size(2) + 2080 /* IFLA_GRE_IKEY */ 2081 nla_total_size(4) + 2082 /* IFLA_GRE_OKEY */ 2083 nla_total_size(4) + 2084 /* IFLA_GRE_LOCAL */ 2085 nla_total_size(sizeof(struct in6_addr)) + 2086 /* IFLA_GRE_REMOTE */ 2087 nla_total_size(sizeof(struct in6_addr)) + 2088 /* IFLA_GRE_TTL */ 2089 nla_total_size(1) + 2090 /* IFLA_GRE_ENCAP_LIMIT */ 2091 nla_total_size(1) + 2092 /* IFLA_GRE_FLOWINFO */ 2093 nla_total_size(4) + 2094 /* IFLA_GRE_FLAGS */ 2095 nla_total_size(4) + 2096 /* IFLA_GRE_ENCAP_TYPE */ 2097 nla_total_size(2) + 2098 /* IFLA_GRE_ENCAP_FLAGS */ 2099 nla_total_size(2) + 2100 /* IFLA_GRE_ENCAP_SPORT */ 2101 nla_total_size(2) + 2102 /* IFLA_GRE_ENCAP_DPORT */ 2103 nla_total_size(2) + 2104 /* IFLA_GRE_COLLECT_METADATA */ 2105 nla_total_size(0) + 2106 /* IFLA_GRE_FWMARK */ 2107 nla_total_size(4) + 2108 /* IFLA_GRE_ERSPAN_INDEX */ 2109 nla_total_size(4) + 2110 0; 2111 } 2112 2113 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) 2114 { 2115 struct ip6_tnl *t = netdev_priv(dev); 2116 struct __ip6_tnl_parm *p = &t->parms; 2117 IP_TUNNEL_DECLARE_FLAGS(o_flags); 2118 2119 ip_tunnel_flags_copy(o_flags, p->o_flags); 2120 2121 if (p->erspan_ver == 1 || p->erspan_ver == 2) { 2122 if (!p->collect_md) 2123 __set_bit(IP_TUNNEL_KEY_BIT, o_flags); 2124 2125 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) 2126 goto nla_put_failure; 2127 2128 if (p->erspan_ver == 1) { 2129 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) 2130 goto nla_put_failure; 2131 } else { 2132 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) 2133 goto nla_put_failure; 2134 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) 2135 goto nla_put_failure; 2136 } 2137 } 2138 2139 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2140 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2141 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2142 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2143 gre_tnl_flags_to_gre_flags(o_flags)) || 2144 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2145 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2146 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2147 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) || 2148 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || 2149 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2150 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2151 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2152 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark)) 2153 goto nla_put_failure; 2154 2155 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2156 t->encap.type) || 2157 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 2158 t->encap.sport) || 2159 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 2160 t->encap.dport) || 2161 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 2162 t->encap.flags)) 2163 goto nla_put_failure; 2164 2165 if (p->collect_md) { 2166 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 2167 goto nla_put_failure; 2168 } 2169 2170 return 0; 2171 2172 nla_put_failure: 2173 return -EMSGSIZE; 2174 } 2175 2176 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { 2177 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 2178 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 2179 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 2180 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 2181 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 2182 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) }, 2183 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) }, 2184 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 2185 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 }, 2186 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 }, 2187 [IFLA_GRE_FLAGS] = { .type = NLA_U32 }, 2188 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 2189 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 2190 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 2191 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 2192 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 2193 [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, 2194 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, 2195 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 }, 2196 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 }, 2197 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 }, 2198 }; 2199 2200 static void ip6erspan_tap_setup(struct net_device *dev) 2201 { 2202 ether_setup(dev); 2203 2204 dev->max_mtu = 0; 2205 dev->netdev_ops = &ip6erspan_netdev_ops; 2206 dev->needs_free_netdev = true; 2207 dev->priv_destructor = ip6gre_dev_free; 2208 2209 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 2210 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2211 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2212 netif_keep_dst(dev); 2213 } 2214 2215 static int ip6erspan_newlink(struct net_device *dev, 2216 struct rtnl_newlink_params *params, 2217 struct netlink_ext_ack *extack) 2218 { 2219 struct net *net = params->link_net ? : dev_net(dev); 2220 struct ip6_tnl *nt = netdev_priv(dev); 2221 struct nlattr **data = params->data; 2222 struct nlattr **tb = params->tb; 2223 struct ip6gre_net *ign; 2224 int err; 2225 2226 ip6gre_netlink_parms(data, &nt->parms); 2227 ip6erspan_set_version(data, &nt->parms); 2228 ign = net_generic(net, ip6gre_net_id); 2229 2230 if (nt->parms.collect_md) { 2231 if (rtnl_dereference(ign->collect_md_tun_erspan)) 2232 return -EEXIST; 2233 } else { 2234 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 2235 return -EEXIST; 2236 } 2237 2238 err = ip6gre_newlink_common(net, dev, tb, data, extack); 2239 if (!err) { 2240 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]); 2241 ip6erspan_tunnel_link_md(ign, nt); 2242 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2243 } 2244 return err; 2245 } 2246 2247 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu) 2248 { 2249 ip6gre_tnl_link_config_common(t); 2250 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t)); 2251 } 2252 2253 static int ip6erspan_tnl_change(struct ip6_tnl *t, 2254 const struct __ip6_tnl_parm *p, int set_mtu) 2255 { 2256 ip6gre_tnl_copy_tnl_parm(t, p); 2257 ip6erspan_tnl_link_config(t, set_mtu); 2258 return 0; 2259 } 2260 2261 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], 2262 struct nlattr *data[], 2263 struct netlink_ext_ack *extack) 2264 { 2265 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2266 struct __ip6_tnl_parm p; 2267 struct ip6_tnl *t; 2268 2269 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2270 if (IS_ERR(t)) 2271 return PTR_ERR(t); 2272 2273 ip6erspan_set_version(data, &p); 2274 ip6gre_tunnel_unlink_md(ign, t); 2275 ip6gre_tunnel_unlink(ign, t); 2276 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); 2277 ip6erspan_tunnel_link_md(ign, t); 2278 ip6gre_tunnel_link(ign, t); 2279 return 0; 2280 } 2281 2282 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { 2283 .kind = "ip6gre", 2284 .maxtype = IFLA_GRE_MAX, 2285 .policy = ip6gre_policy, 2286 .priv_size = sizeof(struct ip6_tnl), 2287 .setup = ip6gre_tunnel_setup, 2288 .validate = ip6gre_tunnel_validate, 2289 .newlink = ip6gre_newlink, 2290 .changelink = ip6gre_changelink, 2291 .dellink = ip6gre_dellink, 2292 .get_size = ip6gre_get_size, 2293 .fill_info = ip6gre_fill_info, 2294 .get_link_net = ip6_tnl_get_link_net, 2295 }; 2296 2297 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = { 2298 .kind = "ip6gretap", 2299 .maxtype = IFLA_GRE_MAX, 2300 .policy = ip6gre_policy, 2301 .priv_size = sizeof(struct ip6_tnl), 2302 .setup = ip6gre_tap_setup, 2303 .validate = ip6gre_tap_validate, 2304 .newlink = ip6gre_newlink, 2305 .changelink = ip6gre_changelink, 2306 .get_size = ip6gre_get_size, 2307 .fill_info = ip6gre_fill_info, 2308 .get_link_net = ip6_tnl_get_link_net, 2309 }; 2310 2311 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = { 2312 .kind = "ip6erspan", 2313 .maxtype = IFLA_GRE_MAX, 2314 .policy = ip6gre_policy, 2315 .priv_size = sizeof(struct ip6_tnl), 2316 .setup = ip6erspan_tap_setup, 2317 .validate = ip6erspan_tap_validate, 2318 .newlink = ip6erspan_newlink, 2319 .changelink = ip6erspan_changelink, 2320 .get_size = ip6gre_get_size, 2321 .fill_info = ip6gre_fill_info, 2322 .get_link_net = ip6_tnl_get_link_net, 2323 }; 2324 2325 /* 2326 * And now the modules code and kernel interface. 2327 */ 2328 2329 static int __init ip6gre_init(void) 2330 { 2331 int err; 2332 2333 pr_info("GRE over IPv6 tunneling driver\n"); 2334 2335 err = register_pernet_device(&ip6gre_net_ops); 2336 if (err < 0) 2337 return err; 2338 2339 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE); 2340 if (err < 0) { 2341 pr_info("%s: can't add protocol\n", __func__); 2342 goto add_proto_failed; 2343 } 2344 2345 err = rtnl_link_register(&ip6gre_link_ops); 2346 if (err < 0) 2347 goto rtnl_link_failed; 2348 2349 err = rtnl_link_register(&ip6gre_tap_ops); 2350 if (err < 0) 2351 goto tap_ops_failed; 2352 2353 err = rtnl_link_register(&ip6erspan_tap_ops); 2354 if (err < 0) 2355 goto erspan_link_failed; 2356 2357 out: 2358 return err; 2359 2360 erspan_link_failed: 2361 rtnl_link_unregister(&ip6gre_tap_ops); 2362 tap_ops_failed: 2363 rtnl_link_unregister(&ip6gre_link_ops); 2364 rtnl_link_failed: 2365 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2366 add_proto_failed: 2367 unregister_pernet_device(&ip6gre_net_ops); 2368 goto out; 2369 } 2370 2371 static void __exit ip6gre_fini(void) 2372 { 2373 rtnl_link_unregister(&ip6gre_tap_ops); 2374 rtnl_link_unregister(&ip6gre_link_ops); 2375 rtnl_link_unregister(&ip6erspan_tap_ops); 2376 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2377 unregister_pernet_device(&ip6gre_net_ops); 2378 } 2379 2380 module_init(ip6gre_init); 2381 module_exit(ip6gre_fini); 2382 MODULE_LICENSE("GPL"); 2383 MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>"); 2384 MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); 2385 MODULE_ALIAS_RTNL_LINK("ip6gre"); 2386 MODULE_ALIAS_RTNL_LINK("ip6gretap"); 2387 MODULE_ALIAS_RTNL_LINK("ip6erspan"); 2388 MODULE_ALIAS_NETDEV("ip6gre0"); 2389