1 /* 2 * Linux NET3: GRE over IP protocol decoder. 3 * 4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/capability.h> 16 #include <linux/module.h> 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/uaccess.h> 21 #include <linux/skbuff.h> 22 #include <linux/netdevice.h> 23 #include <linux/in.h> 24 #include <linux/tcp.h> 25 #include <linux/udp.h> 26 #include <linux/if_arp.h> 27 #include <linux/if_vlan.h> 28 #include <linux/init.h> 29 #include <linux/in6.h> 30 #include <linux/inetdevice.h> 31 #include <linux/igmp.h> 32 #include <linux/netfilter_ipv4.h> 33 #include <linux/etherdevice.h> 34 #include <linux/if_ether.h> 35 36 #include <net/sock.h> 37 #include <net/ip.h> 38 #include <net/icmp.h> 39 #include <net/protocol.h> 40 #include <net/ip_tunnels.h> 41 #include <net/arp.h> 42 #include <net/checksum.h> 43 #include <net/dsfield.h> 44 #include <net/inet_ecn.h> 45 #include <net/xfrm.h> 46 #include <net/net_namespace.h> 47 #include <net/netns/generic.h> 48 #include <net/rtnetlink.h> 49 #include <net/gre.h> 50 #include <net/dst_metadata.h> 51 #include <net/erspan.h> 52 53 /* 54 Problems & solutions 55 -------------------- 56 57 1. The most important issue is detecting local dead loops. 58 They would cause complete host lockup in transmit, which 59 would be "resolved" by stack overflow or, if queueing is enabled, 60 with infinite looping in net_bh. 61 62 We cannot track such dead loops during route installation, 63 it is infeasible task. The most general solutions would be 64 to keep skb->encapsulation counter (sort of local ttl), 65 and silently drop packet when it expires. It is a good 66 solution, but it supposes maintaining new variable in ALL 67 skb, even if no tunneling is used. 68 69 Current solution: xmit_recursion breaks dead loops. This is a percpu 70 counter, since when we enter the first ndo_xmit(), cpu migration is 71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT 72 73 2. Networking dead loops would not kill routers, but would really 74 kill network. IP hop limit plays role of "t->recursion" in this case, 75 if we copy it from packet being encapsulated to upper header. 76 It is very good solution, but it introduces two problems: 77 78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2), 79 do not work over tunnels. 80 - traceroute does not work. I planned to relay ICMP from tunnel, 81 so that this problem would be solved and traceroute output 82 would even more informative. This idea appeared to be wrong: 83 only Linux complies to rfc1812 now (yes, guys, Linux is the only 84 true router now :-)), all routers (at least, in neighbourhood of mine) 85 return only 8 bytes of payload. It is the end. 86 87 Hence, if we want that OSPF worked or traceroute said something reasonable, 88 we should search for another solution. 89 90 One of them is to parse packet trying to detect inner encapsulation 91 made by our node. It is difficult or even impossible, especially, 92 taking into account fragmentation. TO be short, ttl is not solution at all. 93 94 Current solution: The solution was UNEXPECTEDLY SIMPLE. 95 We force DF flag on tunnels with preconfigured hop limit, 96 that is ALL. :-) Well, it does not remove the problem completely, 97 but exponential growth of network traffic is changed to linear 98 (branches, that exceed pmtu are pruned) and tunnel mtu 99 rapidly degrades to value <68, where looping stops. 100 Yes, it is not good if there exists a router in the loop, 101 which does not force DF, even when encapsulating packets have DF set. 102 But it is not our problem! Nobody could accuse us, we made 103 all that we could make. Even if it is your gated who injected 104 fatal route to network, even if it were you who configured 105 fatal static route: you are innocent. :-) 106 107 Alexey Kuznetsov. 108 */ 109 110 static bool log_ecn_error = true; 111 module_param(log_ecn_error, bool, 0644); 112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 113 114 static struct rtnl_link_ops ipgre_link_ops __read_mostly; 115 static int ipgre_tunnel_init(struct net_device *dev); 116 static void erspan_build_header(struct sk_buff *skb, 117 u32 id, u32 index, 118 bool truncate, bool is_ipv4); 119 120 static unsigned int ipgre_net_id __read_mostly; 121 static unsigned int gre_tap_net_id __read_mostly; 122 static unsigned int erspan_net_id __read_mostly; 123 124 static void ipgre_err(struct sk_buff *skb, u32 info, 125 const struct tnl_ptk_info *tpi) 126 { 127 128 /* All the routers (except for Linux) return only 129 8 bytes of packet payload. It means, that precise relaying of 130 ICMP in the real Internet is absolutely infeasible. 131 132 Moreover, Cisco "wise men" put GRE key to the third word 133 in GRE header. It makes impossible maintaining even soft 134 state for keyed GRE tunnels with enabled checksum. Tell 135 them "thank you". 136 137 Well, I wonder, rfc1812 was written by Cisco employee, 138 what the hell these idiots break standards established 139 by themselves??? 140 */ 141 struct net *net = dev_net(skb->dev); 142 struct ip_tunnel_net *itn; 143 const struct iphdr *iph; 144 const int type = icmp_hdr(skb)->type; 145 const int code = icmp_hdr(skb)->code; 146 unsigned int data_len = 0; 147 struct ip_tunnel *t; 148 149 switch (type) { 150 default: 151 case ICMP_PARAMETERPROB: 152 return; 153 154 case ICMP_DEST_UNREACH: 155 switch (code) { 156 case ICMP_SR_FAILED: 157 case ICMP_PORT_UNREACH: 158 /* Impossible event. */ 159 return; 160 default: 161 /* All others are translated to HOST_UNREACH. 162 rfc2003 contains "deep thoughts" about NET_UNREACH, 163 I believe they are just ether pollution. --ANK 164 */ 165 break; 166 } 167 break; 168 169 case ICMP_TIME_EXCEEDED: 170 if (code != ICMP_EXC_TTL) 171 return; 172 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ 173 break; 174 175 case ICMP_REDIRECT: 176 break; 177 } 178 179 if (tpi->proto == htons(ETH_P_TEB)) 180 itn = net_generic(net, gre_tap_net_id); 181 else 182 itn = net_generic(net, ipgre_net_id); 183 184 iph = (const struct iphdr *)(icmp_hdr(skb) + 1); 185 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 186 iph->daddr, iph->saddr, tpi->key); 187 188 if (!t) 189 return; 190 191 #if IS_ENABLED(CONFIG_IPV6) 192 if (tpi->proto == htons(ETH_P_IPV6) && 193 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, 194 type, data_len)) 195 return; 196 #endif 197 198 if (t->parms.iph.daddr == 0 || 199 ipv4_is_multicast(t->parms.iph.daddr)) 200 return; 201 202 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 203 return; 204 205 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) 206 t->err_count++; 207 else 208 t->err_count = 1; 209 t->err_time = jiffies; 210 } 211 212 static void gre_err(struct sk_buff *skb, u32 info) 213 { 214 /* All the routers (except for Linux) return only 215 * 8 bytes of packet payload. It means, that precise relaying of 216 * ICMP in the real Internet is absolutely infeasible. 217 * 218 * Moreover, Cisco "wise men" put GRE key to the third word 219 * in GRE header. It makes impossible maintaining even soft 220 * state for keyed 221 * GRE tunnels with enabled checksum. Tell them "thank you". 222 * 223 * Well, I wonder, rfc1812 was written by Cisco employee, 224 * what the hell these idiots break standards established 225 * by themselves??? 226 */ 227 228 const struct iphdr *iph = (struct iphdr *)skb->data; 229 const int type = icmp_hdr(skb)->type; 230 const int code = icmp_hdr(skb)->code; 231 struct tnl_ptk_info tpi; 232 bool csum_err = false; 233 234 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 235 iph->ihl * 4) < 0) { 236 if (!csum_err) /* ignore csum errors. */ 237 return; 238 } 239 240 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 241 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 242 skb->dev->ifindex, 0, IPPROTO_GRE, 0); 243 return; 244 } 245 if (type == ICMP_REDIRECT) { 246 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, 247 IPPROTO_GRE, 0); 248 return; 249 } 250 251 ipgre_err(skb, info, &tpi); 252 } 253 254 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, 255 int gre_hdr_len) 256 { 257 struct net *net = dev_net(skb->dev); 258 struct metadata_dst *tun_dst = NULL; 259 struct erspan_base_hdr *ershdr; 260 struct erspan_metadata *pkt_md; 261 struct ip_tunnel_net *itn; 262 struct ip_tunnel *tunnel; 263 const struct iphdr *iph; 264 struct erspan_md2 *md2; 265 int ver; 266 int len; 267 268 itn = net_generic(net, erspan_net_id); 269 len = gre_hdr_len + sizeof(*ershdr); 270 271 /* Check based hdr len */ 272 if (unlikely(!pskb_may_pull(skb, len))) 273 return PACKET_REJECT; 274 275 iph = ip_hdr(skb); 276 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 277 ver = ershdr->ver; 278 279 /* The original GRE header does not have key field, 280 * Use ERSPAN 10-bit session ID as key. 281 */ 282 tpi->key = cpu_to_be32(get_session_id(ershdr)); 283 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, 284 tpi->flags | TUNNEL_KEY, 285 iph->saddr, iph->daddr, tpi->key); 286 287 if (tunnel) { 288 len = gre_hdr_len + erspan_hdr_len(ver); 289 if (unlikely(!pskb_may_pull(skb, len))) 290 return PACKET_REJECT; 291 292 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 293 pkt_md = (struct erspan_metadata *)(ershdr + 1); 294 295 if (__iptunnel_pull_header(skb, 296 len, 297 htons(ETH_P_TEB), 298 false, false) < 0) 299 goto drop; 300 301 if (tunnel->collect_md) { 302 struct ip_tunnel_info *info; 303 struct erspan_metadata *md; 304 __be64 tun_id; 305 __be16 flags; 306 307 tpi->flags |= TUNNEL_KEY; 308 flags = tpi->flags; 309 tun_id = key32_to_tunnel_id(tpi->key); 310 311 tun_dst = ip_tun_rx_dst(skb, flags, 312 tun_id, sizeof(*md)); 313 if (!tun_dst) 314 return PACKET_REJECT; 315 316 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 317 md->version = ver; 318 md2 = &md->u.md2; 319 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 320 ERSPAN_V2_MDSIZE); 321 322 info = &tun_dst->u.tun_info; 323 info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 324 info->options_len = sizeof(*md); 325 } 326 327 skb_reset_mac_header(skb); 328 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 329 return PACKET_RCVD; 330 } 331 drop: 332 kfree_skb(skb); 333 return PACKET_RCVD; 334 } 335 336 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, 337 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto) 338 { 339 struct metadata_dst *tun_dst = NULL; 340 const struct iphdr *iph; 341 struct ip_tunnel *tunnel; 342 343 iph = ip_hdr(skb); 344 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 345 iph->saddr, iph->daddr, tpi->key); 346 347 if (tunnel) { 348 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto, 349 raw_proto, false) < 0) 350 goto drop; 351 352 if (tunnel->dev->type != ARPHRD_NONE) 353 skb_pop_mac_header(skb); 354 else 355 skb_reset_mac_header(skb); 356 if (tunnel->collect_md) { 357 __be16 flags; 358 __be64 tun_id; 359 360 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY); 361 tun_id = key32_to_tunnel_id(tpi->key); 362 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0); 363 if (!tun_dst) 364 return PACKET_REJECT; 365 } 366 367 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 368 return PACKET_RCVD; 369 } 370 return PACKET_NEXT; 371 372 drop: 373 kfree_skb(skb); 374 return PACKET_RCVD; 375 } 376 377 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, 378 int hdr_len) 379 { 380 struct net *net = dev_net(skb->dev); 381 struct ip_tunnel_net *itn; 382 int res; 383 384 if (tpi->proto == htons(ETH_P_TEB)) 385 itn = net_generic(net, gre_tap_net_id); 386 else 387 itn = net_generic(net, ipgre_net_id); 388 389 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false); 390 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) { 391 /* ipgre tunnels in collect metadata mode should receive 392 * also ETH_P_TEB traffic. 393 */ 394 itn = net_generic(net, ipgre_net_id); 395 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true); 396 } 397 return res; 398 } 399 400 static int gre_rcv(struct sk_buff *skb) 401 { 402 struct tnl_ptk_info tpi; 403 bool csum_err = false; 404 int hdr_len; 405 406 #ifdef CONFIG_NET_IPGRE_BROADCAST 407 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { 408 /* Looped back packet, drop it! */ 409 if (rt_is_output_route(skb_rtable(skb))) 410 goto drop; 411 } 412 #endif 413 414 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0); 415 if (hdr_len < 0) 416 goto drop; 417 418 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || 419 tpi.proto == htons(ETH_P_ERSPAN2))) { 420 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) 421 return 0; 422 goto out; 423 } 424 425 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) 426 return 0; 427 428 out: 429 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 430 drop: 431 kfree_skb(skb); 432 return 0; 433 } 434 435 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, 436 const struct iphdr *tnl_params, 437 __be16 proto) 438 { 439 struct ip_tunnel *tunnel = netdev_priv(dev); 440 441 if (tunnel->parms.o_flags & TUNNEL_SEQ) 442 tunnel->o_seqno++; 443 444 /* Push GRE header. */ 445 gre_build_header(skb, tunnel->tun_hlen, 446 tunnel->parms.o_flags, proto, tunnel->parms.o_key, 447 htonl(tunnel->o_seqno)); 448 449 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 450 } 451 452 static int gre_handle_offloads(struct sk_buff *skb, bool csum) 453 { 454 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 455 } 456 457 static struct rtable *gre_get_rt(struct sk_buff *skb, 458 struct net_device *dev, 459 struct flowi4 *fl, 460 const struct ip_tunnel_key *key) 461 { 462 struct net *net = dev_net(dev); 463 464 memset(fl, 0, sizeof(*fl)); 465 fl->daddr = key->u.ipv4.dst; 466 fl->saddr = key->u.ipv4.src; 467 fl->flowi4_tos = RT_TOS(key->tos); 468 fl->flowi4_mark = skb->mark; 469 fl->flowi4_proto = IPPROTO_GRE; 470 471 return ip_route_output_key(net, fl); 472 } 473 474 static struct rtable *prepare_fb_xmit(struct sk_buff *skb, 475 struct net_device *dev, 476 struct flowi4 *fl, 477 int tunnel_hlen) 478 { 479 struct ip_tunnel_info *tun_info; 480 const struct ip_tunnel_key *key; 481 struct rtable *rt = NULL; 482 int min_headroom; 483 bool use_cache; 484 int err; 485 486 tun_info = skb_tunnel_info(skb); 487 key = &tun_info->key; 488 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); 489 490 if (use_cache) 491 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr); 492 if (!rt) { 493 rt = gre_get_rt(skb, dev, fl, key); 494 if (IS_ERR(rt)) 495 goto err_free_skb; 496 if (use_cache) 497 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, 498 fl->saddr); 499 } 500 501 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 502 + tunnel_hlen + sizeof(struct iphdr); 503 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { 504 int head_delta = SKB_DATA_ALIGN(min_headroom - 505 skb_headroom(skb) + 506 16); 507 err = pskb_expand_head(skb, max_t(int, head_delta, 0), 508 0, GFP_ATOMIC); 509 if (unlikely(err)) 510 goto err_free_rt; 511 } 512 return rt; 513 514 err_free_rt: 515 ip_rt_put(rt); 516 err_free_skb: 517 kfree_skb(skb); 518 dev->stats.tx_dropped++; 519 return NULL; 520 } 521 522 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, 523 __be16 proto) 524 { 525 struct ip_tunnel *tunnel = netdev_priv(dev); 526 struct ip_tunnel_info *tun_info; 527 const struct ip_tunnel_key *key; 528 struct rtable *rt = NULL; 529 struct flowi4 fl; 530 int tunnel_hlen; 531 __be16 df, flags; 532 533 tun_info = skb_tunnel_info(skb); 534 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 535 ip_tunnel_info_af(tun_info) != AF_INET)) 536 goto err_free_skb; 537 538 key = &tun_info->key; 539 tunnel_hlen = gre_calc_hlen(key->tun_flags); 540 541 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen); 542 if (!rt) 543 return; 544 545 /* Push Tunnel header. */ 546 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM))) 547 goto err_free_rt; 548 549 flags = tun_info->key.tun_flags & 550 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); 551 gre_build_header(skb, tunnel_hlen, flags, proto, 552 tunnel_id_to_key32(tun_info->key.tun_id), 553 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0); 554 555 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 556 557 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE, 558 key->tos, key->ttl, df, false); 559 return; 560 561 err_free_rt: 562 ip_rt_put(rt); 563 err_free_skb: 564 kfree_skb(skb); 565 dev->stats.tx_dropped++; 566 } 567 568 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, 569 __be16 proto) 570 { 571 struct ip_tunnel *tunnel = netdev_priv(dev); 572 struct ip_tunnel_info *tun_info; 573 const struct ip_tunnel_key *key; 574 struct erspan_metadata *md; 575 struct rtable *rt = NULL; 576 bool truncate = false; 577 struct flowi4 fl; 578 int tunnel_hlen; 579 int version; 580 __be16 df; 581 582 tun_info = skb_tunnel_info(skb); 583 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 584 ip_tunnel_info_af(tun_info) != AF_INET)) 585 goto err_free_skb; 586 587 key = &tun_info->key; 588 md = ip_tunnel_info_opts(tun_info); 589 if (!md) 590 goto err_free_rt; 591 592 /* ERSPAN has fixed 8 byte GRE header */ 593 version = md->version; 594 tunnel_hlen = 8 + erspan_hdr_len(version); 595 596 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen); 597 if (!rt) 598 return; 599 600 if (gre_handle_offloads(skb, false)) 601 goto err_free_rt; 602 603 if (skb->len > dev->mtu + dev->hard_header_len) { 604 pskb_trim(skb, dev->mtu + dev->hard_header_len); 605 truncate = true; 606 } 607 608 if (version == 1) { 609 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), 610 ntohl(md->u.index), truncate, true); 611 } else if (version == 2) { 612 erspan_build_header_v2(skb, 613 ntohl(tunnel_id_to_key32(key->tun_id)), 614 md->u.md2.dir, 615 get_hwid(&md->u.md2), 616 truncate, true); 617 } else { 618 goto err_free_rt; 619 } 620 621 gre_build_header(skb, 8, TUNNEL_SEQ, 622 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); 623 624 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 625 626 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE, 627 key->tos, key->ttl, df, false); 628 return; 629 630 err_free_rt: 631 ip_rt_put(rt); 632 err_free_skb: 633 kfree_skb(skb); 634 dev->stats.tx_dropped++; 635 } 636 637 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 638 { 639 struct ip_tunnel_info *info = skb_tunnel_info(skb); 640 struct rtable *rt; 641 struct flowi4 fl4; 642 643 if (ip_tunnel_info_af(info) != AF_INET) 644 return -EINVAL; 645 646 rt = gre_get_rt(skb, dev, &fl4, &info->key); 647 if (IS_ERR(rt)) 648 return PTR_ERR(rt); 649 650 ip_rt_put(rt); 651 info->key.u.ipv4.src = fl4.saddr; 652 return 0; 653 } 654 655 static netdev_tx_t ipgre_xmit(struct sk_buff *skb, 656 struct net_device *dev) 657 { 658 struct ip_tunnel *tunnel = netdev_priv(dev); 659 const struct iphdr *tnl_params; 660 661 if (tunnel->collect_md) { 662 gre_fb_xmit(skb, dev, skb->protocol); 663 return NETDEV_TX_OK; 664 } 665 666 if (dev->header_ops) { 667 /* Need space for new headers */ 668 if (skb_cow_head(skb, dev->needed_headroom - 669 (tunnel->hlen + sizeof(struct iphdr)))) 670 goto free_skb; 671 672 tnl_params = (const struct iphdr *)skb->data; 673 674 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing 675 * to gre header. 676 */ 677 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); 678 skb_reset_mac_header(skb); 679 } else { 680 if (skb_cow_head(skb, dev->needed_headroom)) 681 goto free_skb; 682 683 tnl_params = &tunnel->parms.iph; 684 } 685 686 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM))) 687 goto free_skb; 688 689 __gre_xmit(skb, dev, tnl_params, skb->protocol); 690 return NETDEV_TX_OK; 691 692 free_skb: 693 kfree_skb(skb); 694 dev->stats.tx_dropped++; 695 return NETDEV_TX_OK; 696 } 697 698 static netdev_tx_t erspan_xmit(struct sk_buff *skb, 699 struct net_device *dev) 700 { 701 struct ip_tunnel *tunnel = netdev_priv(dev); 702 bool truncate = false; 703 704 if (tunnel->collect_md) { 705 erspan_fb_xmit(skb, dev, skb->protocol); 706 return NETDEV_TX_OK; 707 } 708 709 if (gre_handle_offloads(skb, false)) 710 goto free_skb; 711 712 if (skb_cow_head(skb, dev->needed_headroom)) 713 goto free_skb; 714 715 if (skb->len > dev->mtu + dev->hard_header_len) { 716 pskb_trim(skb, dev->mtu + dev->hard_header_len); 717 truncate = true; 718 } 719 720 /* Push ERSPAN header */ 721 if (tunnel->erspan_ver == 1) 722 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 723 tunnel->index, 724 truncate, true); 725 else if (tunnel->erspan_ver == 2) 726 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 727 tunnel->dir, tunnel->hwid, 728 truncate, true); 729 else 730 goto free_skb; 731 732 tunnel->parms.o_flags &= ~TUNNEL_KEY; 733 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); 734 return NETDEV_TX_OK; 735 736 free_skb: 737 kfree_skb(skb); 738 dev->stats.tx_dropped++; 739 return NETDEV_TX_OK; 740 } 741 742 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, 743 struct net_device *dev) 744 { 745 struct ip_tunnel *tunnel = netdev_priv(dev); 746 747 if (tunnel->collect_md) { 748 gre_fb_xmit(skb, dev, htons(ETH_P_TEB)); 749 return NETDEV_TX_OK; 750 } 751 752 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM))) 753 goto free_skb; 754 755 if (skb_cow_head(skb, dev->needed_headroom)) 756 goto free_skb; 757 758 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB)); 759 return NETDEV_TX_OK; 760 761 free_skb: 762 kfree_skb(skb); 763 dev->stats.tx_dropped++; 764 return NETDEV_TX_OK; 765 } 766 767 static void ipgre_link_update(struct net_device *dev, bool set_mtu) 768 { 769 struct ip_tunnel *tunnel = netdev_priv(dev); 770 int len; 771 772 len = tunnel->tun_hlen; 773 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 774 len = tunnel->tun_hlen - len; 775 tunnel->hlen = tunnel->hlen + len; 776 777 dev->needed_headroom = dev->needed_headroom + len; 778 if (set_mtu) 779 dev->mtu = max_t(int, dev->mtu - len, 68); 780 781 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { 782 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) || 783 tunnel->encap.type == TUNNEL_ENCAP_NONE) { 784 dev->features |= NETIF_F_GSO_SOFTWARE; 785 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 786 } else { 787 dev->features &= ~NETIF_F_GSO_SOFTWARE; 788 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE; 789 } 790 dev->features |= NETIF_F_LLTX; 791 } else { 792 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE; 793 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE); 794 } 795 } 796 797 static int ipgre_tunnel_ioctl(struct net_device *dev, 798 struct ifreq *ifr, int cmd) 799 { 800 struct ip_tunnel_parm p; 801 int err; 802 803 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 804 return -EFAULT; 805 806 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { 807 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE || 808 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) || 809 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING))) 810 return -EINVAL; 811 } 812 813 p.i_flags = gre_flags_to_tnl_flags(p.i_flags); 814 p.o_flags = gre_flags_to_tnl_flags(p.o_flags); 815 816 err = ip_tunnel_ioctl(dev, &p, cmd); 817 if (err) 818 return err; 819 820 if (cmd == SIOCCHGTUNNEL) { 821 struct ip_tunnel *t = netdev_priv(dev); 822 823 t->parms.i_flags = p.i_flags; 824 t->parms.o_flags = p.o_flags; 825 826 if (strcmp(dev->rtnl_link_ops->kind, "erspan")) 827 ipgre_link_update(dev, true); 828 } 829 830 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags); 831 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags); 832 833 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 834 return -EFAULT; 835 836 return 0; 837 } 838 839 /* Nice toy. Unfortunately, useless in real life :-) 840 It allows to construct virtual multiprotocol broadcast "LAN" 841 over the Internet, provided multicast routing is tuned. 842 843 844 I have no idea was this bicycle invented before me, 845 so that I had to set ARPHRD_IPGRE to a random value. 846 I have an impression, that Cisco could make something similar, 847 but this feature is apparently missing in IOS<=11.2(8). 848 849 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks 850 with broadcast 224.66.66.66. If you have access to mbone, play with me :-) 851 852 ping -t 255 224.66.66.66 853 854 If nobody answers, mbone does not work. 855 856 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255 857 ip addr add 10.66.66.<somewhat>/24 dev Universe 858 ifconfig Universe up 859 ifconfig Universe add fe80::<Your_real_addr>/10 860 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96 861 ftp 10.66.66.66 862 ... 863 ftp fec0:6666:6666::193.233.7.65 864 ... 865 */ 866 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, 867 unsigned short type, 868 const void *daddr, const void *saddr, unsigned int len) 869 { 870 struct ip_tunnel *t = netdev_priv(dev); 871 struct iphdr *iph; 872 struct gre_base_hdr *greh; 873 874 iph = skb_push(skb, t->hlen + sizeof(*iph)); 875 greh = (struct gre_base_hdr *)(iph+1); 876 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags); 877 greh->protocol = htons(type); 878 879 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 880 881 /* Set the source hardware address. */ 882 if (saddr) 883 memcpy(&iph->saddr, saddr, 4); 884 if (daddr) 885 memcpy(&iph->daddr, daddr, 4); 886 if (iph->daddr) 887 return t->hlen + sizeof(*iph); 888 889 return -(t->hlen + sizeof(*iph)); 890 } 891 892 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 893 { 894 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); 895 memcpy(haddr, &iph->saddr, 4); 896 return 4; 897 } 898 899 static const struct header_ops ipgre_header_ops = { 900 .create = ipgre_header, 901 .parse = ipgre_header_parse, 902 }; 903 904 #ifdef CONFIG_NET_IPGRE_BROADCAST 905 static int ipgre_open(struct net_device *dev) 906 { 907 struct ip_tunnel *t = netdev_priv(dev); 908 909 if (ipv4_is_multicast(t->parms.iph.daddr)) { 910 struct flowi4 fl4; 911 struct rtable *rt; 912 913 rt = ip_route_output_gre(t->net, &fl4, 914 t->parms.iph.daddr, 915 t->parms.iph.saddr, 916 t->parms.o_key, 917 RT_TOS(t->parms.iph.tos), 918 t->parms.link); 919 if (IS_ERR(rt)) 920 return -EADDRNOTAVAIL; 921 dev = rt->dst.dev; 922 ip_rt_put(rt); 923 if (!__in_dev_get_rtnl(dev)) 924 return -EADDRNOTAVAIL; 925 t->mlink = dev->ifindex; 926 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); 927 } 928 return 0; 929 } 930 931 static int ipgre_close(struct net_device *dev) 932 { 933 struct ip_tunnel *t = netdev_priv(dev); 934 935 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 936 struct in_device *in_dev; 937 in_dev = inetdev_by_index(t->net, t->mlink); 938 if (in_dev) 939 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 940 } 941 return 0; 942 } 943 #endif 944 945 static const struct net_device_ops ipgre_netdev_ops = { 946 .ndo_init = ipgre_tunnel_init, 947 .ndo_uninit = ip_tunnel_uninit, 948 #ifdef CONFIG_NET_IPGRE_BROADCAST 949 .ndo_open = ipgre_open, 950 .ndo_stop = ipgre_close, 951 #endif 952 .ndo_start_xmit = ipgre_xmit, 953 .ndo_do_ioctl = ipgre_tunnel_ioctl, 954 .ndo_change_mtu = ip_tunnel_change_mtu, 955 .ndo_get_stats64 = ip_tunnel_get_stats64, 956 .ndo_get_iflink = ip_tunnel_get_iflink, 957 }; 958 959 #define GRE_FEATURES (NETIF_F_SG | \ 960 NETIF_F_FRAGLIST | \ 961 NETIF_F_HIGHDMA | \ 962 NETIF_F_HW_CSUM) 963 964 static void ipgre_tunnel_setup(struct net_device *dev) 965 { 966 dev->netdev_ops = &ipgre_netdev_ops; 967 dev->type = ARPHRD_IPGRE; 968 ip_tunnel_setup(dev, ipgre_net_id); 969 } 970 971 static void __gre_tunnel_init(struct net_device *dev) 972 { 973 struct ip_tunnel *tunnel; 974 int t_hlen; 975 976 tunnel = netdev_priv(dev); 977 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 978 tunnel->parms.iph.protocol = IPPROTO_GRE; 979 980 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 981 982 t_hlen = tunnel->hlen + sizeof(struct iphdr); 983 984 dev->features |= GRE_FEATURES; 985 dev->hw_features |= GRE_FEATURES; 986 987 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { 988 /* TCP offload with GRE SEQ is not supported, nor 989 * can we support 2 levels of outer headers requiring 990 * an update. 991 */ 992 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) || 993 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) { 994 dev->features |= NETIF_F_GSO_SOFTWARE; 995 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 996 } 997 998 /* Can use a lockless transmit, unless we generate 999 * output sequences 1000 */ 1001 dev->features |= NETIF_F_LLTX; 1002 } 1003 } 1004 1005 static int ipgre_tunnel_init(struct net_device *dev) 1006 { 1007 struct ip_tunnel *tunnel = netdev_priv(dev); 1008 struct iphdr *iph = &tunnel->parms.iph; 1009 1010 __gre_tunnel_init(dev); 1011 1012 memcpy(dev->dev_addr, &iph->saddr, 4); 1013 memcpy(dev->broadcast, &iph->daddr, 4); 1014 1015 dev->flags = IFF_NOARP; 1016 netif_keep_dst(dev); 1017 dev->addr_len = 4; 1018 1019 if (iph->daddr && !tunnel->collect_md) { 1020 #ifdef CONFIG_NET_IPGRE_BROADCAST 1021 if (ipv4_is_multicast(iph->daddr)) { 1022 if (!iph->saddr) 1023 return -EINVAL; 1024 dev->flags = IFF_BROADCAST; 1025 dev->header_ops = &ipgre_header_ops; 1026 } 1027 #endif 1028 } else if (!tunnel->collect_md) { 1029 dev->header_ops = &ipgre_header_ops; 1030 } 1031 1032 return ip_tunnel_init(dev); 1033 } 1034 1035 static const struct gre_protocol ipgre_protocol = { 1036 .handler = gre_rcv, 1037 .err_handler = gre_err, 1038 }; 1039 1040 static int __net_init ipgre_init_net(struct net *net) 1041 { 1042 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL); 1043 } 1044 1045 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net) 1046 { 1047 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops); 1048 } 1049 1050 static struct pernet_operations ipgre_net_ops = { 1051 .init = ipgre_init_net, 1052 .exit_batch = ipgre_exit_batch_net, 1053 .id = &ipgre_net_id, 1054 .size = sizeof(struct ip_tunnel_net), 1055 }; 1056 1057 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 1058 struct netlink_ext_ack *extack) 1059 { 1060 __be16 flags; 1061 1062 if (!data) 1063 return 0; 1064 1065 flags = 0; 1066 if (data[IFLA_GRE_IFLAGS]) 1067 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1068 if (data[IFLA_GRE_OFLAGS]) 1069 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1070 if (flags & (GRE_VERSION|GRE_ROUTING)) 1071 return -EINVAL; 1072 1073 if (data[IFLA_GRE_COLLECT_METADATA] && 1074 data[IFLA_GRE_ENCAP_TYPE] && 1075 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE) 1076 return -EINVAL; 1077 1078 return 0; 1079 } 1080 1081 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1082 struct netlink_ext_ack *extack) 1083 { 1084 __be32 daddr; 1085 1086 if (tb[IFLA_ADDRESS]) { 1087 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1088 return -EINVAL; 1089 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1090 return -EADDRNOTAVAIL; 1091 } 1092 1093 if (!data) 1094 goto out; 1095 1096 if (data[IFLA_GRE_REMOTE]) { 1097 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4); 1098 if (!daddr) 1099 return -EINVAL; 1100 } 1101 1102 out: 1103 return ipgre_tunnel_validate(tb, data, extack); 1104 } 1105 1106 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[], 1107 struct netlink_ext_ack *extack) 1108 { 1109 __be16 flags = 0; 1110 int ret; 1111 1112 if (!data) 1113 return 0; 1114 1115 ret = ipgre_tap_validate(tb, data, extack); 1116 if (ret) 1117 return ret; 1118 1119 /* ERSPAN should only have GRE sequence and key flag */ 1120 if (data[IFLA_GRE_OFLAGS]) 1121 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1122 if (data[IFLA_GRE_IFLAGS]) 1123 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1124 if (!data[IFLA_GRE_COLLECT_METADATA] && 1125 flags != (GRE_SEQ | GRE_KEY)) 1126 return -EINVAL; 1127 1128 /* ERSPAN Session ID only has 10-bit. Since we reuse 1129 * 32-bit key field as ID, check it's range. 1130 */ 1131 if (data[IFLA_GRE_IKEY] && 1132 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) 1133 return -EINVAL; 1134 1135 if (data[IFLA_GRE_OKEY] && 1136 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) 1137 return -EINVAL; 1138 1139 return 0; 1140 } 1141 1142 static int ipgre_netlink_parms(struct net_device *dev, 1143 struct nlattr *data[], 1144 struct nlattr *tb[], 1145 struct ip_tunnel_parm *parms, 1146 __u32 *fwmark) 1147 { 1148 struct ip_tunnel *t = netdev_priv(dev); 1149 1150 memset(parms, 0, sizeof(*parms)); 1151 1152 parms->iph.protocol = IPPROTO_GRE; 1153 1154 if (!data) 1155 return 0; 1156 1157 if (data[IFLA_GRE_LINK]) 1158 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1159 1160 if (data[IFLA_GRE_IFLAGS]) 1161 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS])); 1162 1163 if (data[IFLA_GRE_OFLAGS]) 1164 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS])); 1165 1166 if (data[IFLA_GRE_IKEY]) 1167 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1168 1169 if (data[IFLA_GRE_OKEY]) 1170 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1171 1172 if (data[IFLA_GRE_LOCAL]) 1173 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]); 1174 1175 if (data[IFLA_GRE_REMOTE]) 1176 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]); 1177 1178 if (data[IFLA_GRE_TTL]) 1179 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]); 1180 1181 if (data[IFLA_GRE_TOS]) 1182 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]); 1183 1184 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) { 1185 if (t->ignore_df) 1186 return -EINVAL; 1187 parms->iph.frag_off = htons(IP_DF); 1188 } 1189 1190 if (data[IFLA_GRE_COLLECT_METADATA]) { 1191 t->collect_md = true; 1192 if (dev->type == ARPHRD_IPGRE) 1193 dev->type = ARPHRD_NONE; 1194 } 1195 1196 if (data[IFLA_GRE_IGNORE_DF]) { 1197 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF]) 1198 && (parms->iph.frag_off & htons(IP_DF))) 1199 return -EINVAL; 1200 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]); 1201 } 1202 1203 if (data[IFLA_GRE_FWMARK]) 1204 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); 1205 1206 if (data[IFLA_GRE_ERSPAN_VER]) { 1207 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1208 1209 if (t->erspan_ver != 1 && t->erspan_ver != 2) 1210 return -EINVAL; 1211 } 1212 1213 if (t->erspan_ver == 1) { 1214 if (data[IFLA_GRE_ERSPAN_INDEX]) { 1215 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1216 if (t->index & ~INDEX_MASK) 1217 return -EINVAL; 1218 } 1219 } else if (t->erspan_ver == 2) { 1220 if (data[IFLA_GRE_ERSPAN_DIR]) { 1221 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1222 if (t->dir & ~(DIR_MASK >> DIR_OFFSET)) 1223 return -EINVAL; 1224 } 1225 if (data[IFLA_GRE_ERSPAN_HWID]) { 1226 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1227 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET)) 1228 return -EINVAL; 1229 } 1230 } 1231 1232 return 0; 1233 } 1234 1235 /* This function returns true when ENCAP attributes are present in the nl msg */ 1236 static bool ipgre_netlink_encap_parms(struct nlattr *data[], 1237 struct ip_tunnel_encap *ipencap) 1238 { 1239 bool ret = false; 1240 1241 memset(ipencap, 0, sizeof(*ipencap)); 1242 1243 if (!data) 1244 return ret; 1245 1246 if (data[IFLA_GRE_ENCAP_TYPE]) { 1247 ret = true; 1248 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1249 } 1250 1251 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1252 ret = true; 1253 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1254 } 1255 1256 if (data[IFLA_GRE_ENCAP_SPORT]) { 1257 ret = true; 1258 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1259 } 1260 1261 if (data[IFLA_GRE_ENCAP_DPORT]) { 1262 ret = true; 1263 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1264 } 1265 1266 return ret; 1267 } 1268 1269 static int gre_tap_init(struct net_device *dev) 1270 { 1271 __gre_tunnel_init(dev); 1272 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1273 netif_keep_dst(dev); 1274 1275 return ip_tunnel_init(dev); 1276 } 1277 1278 static const struct net_device_ops gre_tap_netdev_ops = { 1279 .ndo_init = gre_tap_init, 1280 .ndo_uninit = ip_tunnel_uninit, 1281 .ndo_start_xmit = gre_tap_xmit, 1282 .ndo_set_mac_address = eth_mac_addr, 1283 .ndo_validate_addr = eth_validate_addr, 1284 .ndo_change_mtu = ip_tunnel_change_mtu, 1285 .ndo_get_stats64 = ip_tunnel_get_stats64, 1286 .ndo_get_iflink = ip_tunnel_get_iflink, 1287 .ndo_fill_metadata_dst = gre_fill_metadata_dst, 1288 }; 1289 1290 static int erspan_tunnel_init(struct net_device *dev) 1291 { 1292 struct ip_tunnel *tunnel = netdev_priv(dev); 1293 int t_hlen; 1294 1295 tunnel->tun_hlen = 8; 1296 tunnel->parms.iph.protocol = IPPROTO_GRE; 1297 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + 1298 erspan_hdr_len(tunnel->erspan_ver); 1299 t_hlen = tunnel->hlen + sizeof(struct iphdr); 1300 1301 dev->features |= GRE_FEATURES; 1302 dev->hw_features |= GRE_FEATURES; 1303 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1304 netif_keep_dst(dev); 1305 1306 return ip_tunnel_init(dev); 1307 } 1308 1309 static const struct net_device_ops erspan_netdev_ops = { 1310 .ndo_init = erspan_tunnel_init, 1311 .ndo_uninit = ip_tunnel_uninit, 1312 .ndo_start_xmit = erspan_xmit, 1313 .ndo_set_mac_address = eth_mac_addr, 1314 .ndo_validate_addr = eth_validate_addr, 1315 .ndo_change_mtu = ip_tunnel_change_mtu, 1316 .ndo_get_stats64 = ip_tunnel_get_stats64, 1317 .ndo_get_iflink = ip_tunnel_get_iflink, 1318 .ndo_fill_metadata_dst = gre_fill_metadata_dst, 1319 }; 1320 1321 static void ipgre_tap_setup(struct net_device *dev) 1322 { 1323 ether_setup(dev); 1324 dev->max_mtu = 0; 1325 dev->netdev_ops = &gre_tap_netdev_ops; 1326 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1327 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1328 ip_tunnel_setup(dev, gre_tap_net_id); 1329 } 1330 1331 bool is_gretap_dev(const struct net_device *dev) 1332 { 1333 return dev->netdev_ops == &gre_tap_netdev_ops; 1334 } 1335 EXPORT_SYMBOL_GPL(is_gretap_dev); 1336 1337 static int ipgre_newlink(struct net *src_net, struct net_device *dev, 1338 struct nlattr *tb[], struct nlattr *data[], 1339 struct netlink_ext_ack *extack) 1340 { 1341 struct ip_tunnel_parm p; 1342 struct ip_tunnel_encap ipencap; 1343 __u32 fwmark = 0; 1344 int err; 1345 1346 if (ipgre_netlink_encap_parms(data, &ipencap)) { 1347 struct ip_tunnel *t = netdev_priv(dev); 1348 err = ip_tunnel_encap_setup(t, &ipencap); 1349 1350 if (err < 0) 1351 return err; 1352 } 1353 1354 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); 1355 if (err < 0) 1356 return err; 1357 return ip_tunnel_newlink(dev, tb, &p, fwmark); 1358 } 1359 1360 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], 1361 struct nlattr *data[], 1362 struct netlink_ext_ack *extack) 1363 { 1364 struct ip_tunnel *t = netdev_priv(dev); 1365 struct ip_tunnel_encap ipencap; 1366 __u32 fwmark = t->fwmark; 1367 struct ip_tunnel_parm p; 1368 int err; 1369 1370 if (ipgre_netlink_encap_parms(data, &ipencap)) { 1371 err = ip_tunnel_encap_setup(t, &ipencap); 1372 1373 if (err < 0) 1374 return err; 1375 } 1376 1377 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); 1378 if (err < 0) 1379 return err; 1380 1381 err = ip_tunnel_changelink(dev, tb, &p, fwmark); 1382 if (err < 0) 1383 return err; 1384 1385 t->parms.i_flags = p.i_flags; 1386 t->parms.o_flags = p.o_flags; 1387 1388 if (strcmp(dev->rtnl_link_ops->kind, "erspan")) 1389 ipgre_link_update(dev, !tb[IFLA_MTU]); 1390 1391 return 0; 1392 } 1393 1394 static size_t ipgre_get_size(const struct net_device *dev) 1395 { 1396 return 1397 /* IFLA_GRE_LINK */ 1398 nla_total_size(4) + 1399 /* IFLA_GRE_IFLAGS */ 1400 nla_total_size(2) + 1401 /* IFLA_GRE_OFLAGS */ 1402 nla_total_size(2) + 1403 /* IFLA_GRE_IKEY */ 1404 nla_total_size(4) + 1405 /* IFLA_GRE_OKEY */ 1406 nla_total_size(4) + 1407 /* IFLA_GRE_LOCAL */ 1408 nla_total_size(4) + 1409 /* IFLA_GRE_REMOTE */ 1410 nla_total_size(4) + 1411 /* IFLA_GRE_TTL */ 1412 nla_total_size(1) + 1413 /* IFLA_GRE_TOS */ 1414 nla_total_size(1) + 1415 /* IFLA_GRE_PMTUDISC */ 1416 nla_total_size(1) + 1417 /* IFLA_GRE_ENCAP_TYPE */ 1418 nla_total_size(2) + 1419 /* IFLA_GRE_ENCAP_FLAGS */ 1420 nla_total_size(2) + 1421 /* IFLA_GRE_ENCAP_SPORT */ 1422 nla_total_size(2) + 1423 /* IFLA_GRE_ENCAP_DPORT */ 1424 nla_total_size(2) + 1425 /* IFLA_GRE_COLLECT_METADATA */ 1426 nla_total_size(0) + 1427 /* IFLA_GRE_IGNORE_DF */ 1428 nla_total_size(1) + 1429 /* IFLA_GRE_FWMARK */ 1430 nla_total_size(4) + 1431 /* IFLA_GRE_ERSPAN_INDEX */ 1432 nla_total_size(4) + 1433 /* IFLA_GRE_ERSPAN_VER */ 1434 nla_total_size(1) + 1435 /* IFLA_GRE_ERSPAN_DIR */ 1436 nla_total_size(1) + 1437 /* IFLA_GRE_ERSPAN_HWID */ 1438 nla_total_size(2) + 1439 0; 1440 } 1441 1442 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) 1443 { 1444 struct ip_tunnel *t = netdev_priv(dev); 1445 struct ip_tunnel_parm *p = &t->parms; 1446 1447 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1448 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1449 gre_tnl_flags_to_gre_flags(p->i_flags)) || 1450 nla_put_be16(skb, IFLA_GRE_OFLAGS, 1451 gre_tnl_flags_to_gre_flags(p->o_flags)) || 1452 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1453 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1454 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1455 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) || 1456 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || 1457 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || 1458 nla_put_u8(skb, IFLA_GRE_PMTUDISC, 1459 !!(p->iph.frag_off & htons(IP_DF))) || 1460 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark)) 1461 goto nla_put_failure; 1462 1463 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 1464 t->encap.type) || 1465 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 1466 t->encap.sport) || 1467 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 1468 t->encap.dport) || 1469 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 1470 t->encap.flags)) 1471 goto nla_put_failure; 1472 1473 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df)) 1474 goto nla_put_failure; 1475 1476 if (t->collect_md) { 1477 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 1478 goto nla_put_failure; 1479 } 1480 1481 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) 1482 goto nla_put_failure; 1483 1484 if (t->erspan_ver == 1) { 1485 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) 1486 goto nla_put_failure; 1487 } else if (t->erspan_ver == 2) { 1488 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) 1489 goto nla_put_failure; 1490 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) 1491 goto nla_put_failure; 1492 } 1493 1494 return 0; 1495 1496 nla_put_failure: 1497 return -EMSGSIZE; 1498 } 1499 1500 static void erspan_setup(struct net_device *dev) 1501 { 1502 ether_setup(dev); 1503 dev->netdev_ops = &erspan_netdev_ops; 1504 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1505 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1506 ip_tunnel_setup(dev, erspan_net_id); 1507 } 1508 1509 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { 1510 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 1511 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 1512 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 1513 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 1514 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 1515 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 1516 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 1517 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 1518 [IFLA_GRE_TOS] = { .type = NLA_U8 }, 1519 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, 1520 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 1521 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 1522 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 1523 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 1524 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 1525 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 }, 1526 [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, 1527 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, 1528 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 }, 1529 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 }, 1530 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 }, 1531 }; 1532 1533 static struct rtnl_link_ops ipgre_link_ops __read_mostly = { 1534 .kind = "gre", 1535 .maxtype = IFLA_GRE_MAX, 1536 .policy = ipgre_policy, 1537 .priv_size = sizeof(struct ip_tunnel), 1538 .setup = ipgre_tunnel_setup, 1539 .validate = ipgre_tunnel_validate, 1540 .newlink = ipgre_newlink, 1541 .changelink = ipgre_changelink, 1542 .dellink = ip_tunnel_dellink, 1543 .get_size = ipgre_get_size, 1544 .fill_info = ipgre_fill_info, 1545 .get_link_net = ip_tunnel_get_link_net, 1546 }; 1547 1548 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { 1549 .kind = "gretap", 1550 .maxtype = IFLA_GRE_MAX, 1551 .policy = ipgre_policy, 1552 .priv_size = sizeof(struct ip_tunnel), 1553 .setup = ipgre_tap_setup, 1554 .validate = ipgre_tap_validate, 1555 .newlink = ipgre_newlink, 1556 .changelink = ipgre_changelink, 1557 .dellink = ip_tunnel_dellink, 1558 .get_size = ipgre_get_size, 1559 .fill_info = ipgre_fill_info, 1560 .get_link_net = ip_tunnel_get_link_net, 1561 }; 1562 1563 static struct rtnl_link_ops erspan_link_ops __read_mostly = { 1564 .kind = "erspan", 1565 .maxtype = IFLA_GRE_MAX, 1566 .policy = ipgre_policy, 1567 .priv_size = sizeof(struct ip_tunnel), 1568 .setup = erspan_setup, 1569 .validate = erspan_validate, 1570 .newlink = ipgre_newlink, 1571 .changelink = ipgre_changelink, 1572 .dellink = ip_tunnel_dellink, 1573 .get_size = ipgre_get_size, 1574 .fill_info = ipgre_fill_info, 1575 .get_link_net = ip_tunnel_get_link_net, 1576 }; 1577 1578 struct net_device *gretap_fb_dev_create(struct net *net, const char *name, 1579 u8 name_assign_type) 1580 { 1581 struct nlattr *tb[IFLA_MAX + 1]; 1582 struct net_device *dev; 1583 LIST_HEAD(list_kill); 1584 struct ip_tunnel *t; 1585 int err; 1586 1587 memset(&tb, 0, sizeof(tb)); 1588 1589 dev = rtnl_create_link(net, name, name_assign_type, 1590 &ipgre_tap_ops, tb); 1591 if (IS_ERR(dev)) 1592 return dev; 1593 1594 /* Configure flow based GRE device. */ 1595 t = netdev_priv(dev); 1596 t->collect_md = true; 1597 1598 err = ipgre_newlink(net, dev, tb, NULL, NULL); 1599 if (err < 0) { 1600 free_netdev(dev); 1601 return ERR_PTR(err); 1602 } 1603 1604 /* openvswitch users expect packet sizes to be unrestricted, 1605 * so set the largest MTU we can. 1606 */ 1607 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false); 1608 if (err) 1609 goto out; 1610 1611 err = rtnl_configure_link(dev, NULL); 1612 if (err < 0) 1613 goto out; 1614 1615 return dev; 1616 out: 1617 ip_tunnel_dellink(dev, &list_kill); 1618 unregister_netdevice_many(&list_kill); 1619 return ERR_PTR(err); 1620 } 1621 EXPORT_SYMBOL_GPL(gretap_fb_dev_create); 1622 1623 static int __net_init ipgre_tap_init_net(struct net *net) 1624 { 1625 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0"); 1626 } 1627 1628 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net) 1629 { 1630 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops); 1631 } 1632 1633 static struct pernet_operations ipgre_tap_net_ops = { 1634 .init = ipgre_tap_init_net, 1635 .exit_batch = ipgre_tap_exit_batch_net, 1636 .id = &gre_tap_net_id, 1637 .size = sizeof(struct ip_tunnel_net), 1638 }; 1639 1640 static int __net_init erspan_init_net(struct net *net) 1641 { 1642 return ip_tunnel_init_net(net, erspan_net_id, 1643 &erspan_link_ops, "erspan0"); 1644 } 1645 1646 static void __net_exit erspan_exit_batch_net(struct list_head *net_list) 1647 { 1648 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops); 1649 } 1650 1651 static struct pernet_operations erspan_net_ops = { 1652 .init = erspan_init_net, 1653 .exit_batch = erspan_exit_batch_net, 1654 .id = &erspan_net_id, 1655 .size = sizeof(struct ip_tunnel_net), 1656 }; 1657 1658 static int __init ipgre_init(void) 1659 { 1660 int err; 1661 1662 pr_info("GRE over IPv4 tunneling driver\n"); 1663 1664 err = register_pernet_device(&ipgre_net_ops); 1665 if (err < 0) 1666 return err; 1667 1668 err = register_pernet_device(&ipgre_tap_net_ops); 1669 if (err < 0) 1670 goto pnet_tap_failed; 1671 1672 err = register_pernet_device(&erspan_net_ops); 1673 if (err < 0) 1674 goto pnet_erspan_failed; 1675 1676 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 1677 if (err < 0) { 1678 pr_info("%s: can't add protocol\n", __func__); 1679 goto add_proto_failed; 1680 } 1681 1682 err = rtnl_link_register(&ipgre_link_ops); 1683 if (err < 0) 1684 goto rtnl_link_failed; 1685 1686 err = rtnl_link_register(&ipgre_tap_ops); 1687 if (err < 0) 1688 goto tap_ops_failed; 1689 1690 err = rtnl_link_register(&erspan_link_ops); 1691 if (err < 0) 1692 goto erspan_link_failed; 1693 1694 return 0; 1695 1696 erspan_link_failed: 1697 rtnl_link_unregister(&ipgre_tap_ops); 1698 tap_ops_failed: 1699 rtnl_link_unregister(&ipgre_link_ops); 1700 rtnl_link_failed: 1701 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1702 add_proto_failed: 1703 unregister_pernet_device(&erspan_net_ops); 1704 pnet_erspan_failed: 1705 unregister_pernet_device(&ipgre_tap_net_ops); 1706 pnet_tap_failed: 1707 unregister_pernet_device(&ipgre_net_ops); 1708 return err; 1709 } 1710 1711 static void __exit ipgre_fini(void) 1712 { 1713 rtnl_link_unregister(&ipgre_tap_ops); 1714 rtnl_link_unregister(&ipgre_link_ops); 1715 rtnl_link_unregister(&erspan_link_ops); 1716 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1717 unregister_pernet_device(&ipgre_tap_net_ops); 1718 unregister_pernet_device(&ipgre_net_ops); 1719 unregister_pernet_device(&erspan_net_ops); 1720 } 1721 1722 module_init(ipgre_init); 1723 module_exit(ipgre_fini); 1724 MODULE_LICENSE("GPL"); 1725 MODULE_ALIAS_RTNL_LINK("gre"); 1726 MODULE_ALIAS_RTNL_LINK("gretap"); 1727 MODULE_ALIAS_RTNL_LINK("erspan"); 1728 MODULE_ALIAS_NETDEV("gre0"); 1729 MODULE_ALIAS_NETDEV("gretap0"); 1730 MODULE_ALIAS_NETDEV("erspan0"); 1731