1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux NET3: GRE over IP protocol decoder. 4 * 5 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/capability.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/uaccess.h> 16 #include <linux/skbuff.h> 17 #include <linux/netdevice.h> 18 #include <linux/in.h> 19 #include <linux/tcp.h> 20 #include <linux/udp.h> 21 #include <linux/if_arp.h> 22 #include <linux/if_vlan.h> 23 #include <linux/init.h> 24 #include <linux/in6.h> 25 #include <linux/inetdevice.h> 26 #include <linux/igmp.h> 27 #include <linux/netfilter_ipv4.h> 28 #include <linux/etherdevice.h> 29 #include <linux/if_ether.h> 30 31 #include <net/sock.h> 32 #include <net/ip.h> 33 #include <net/icmp.h> 34 #include <net/protocol.h> 35 #include <net/ip_tunnels.h> 36 #include <net/arp.h> 37 #include <net/checksum.h> 38 #include <net/dsfield.h> 39 #include <net/inet_ecn.h> 40 #include <net/xfrm.h> 41 #include <net/net_namespace.h> 42 #include <net/netns/generic.h> 43 #include <net/rtnetlink.h> 44 #include <net/gre.h> 45 #include <net/dst_metadata.h> 46 #include <net/erspan.h> 47 #include <net/inet_dscp.h> 48 49 /* 50 Problems & solutions 51 -------------------- 52 53 1. The most important issue is detecting local dead loops. 54 They would cause complete host lockup in transmit, which 55 would be "resolved" by stack overflow or, if queueing is enabled, 56 with infinite looping in net_bh. 57 58 We cannot track such dead loops during route installation, 59 it is infeasible task. The most general solutions would be 60 to keep skb->encapsulation counter (sort of local ttl), 61 and silently drop packet when it expires. It is a good 62 solution, but it supposes maintaining new variable in ALL 63 skb, even if no tunneling is used. 64 65 Current solution: xmit_recursion breaks dead loops. This is a percpu 66 counter, since when we enter the first ndo_xmit(), cpu migration is 67 forbidden. We force an exit if this counter reaches RECURSION_LIMIT 68 69 2. Networking dead loops would not kill routers, but would really 70 kill network. IP hop limit plays role of "t->recursion" in this case, 71 if we copy it from packet being encapsulated to upper header. 72 It is very good solution, but it introduces two problems: 73 74 - Routing protocols, using packets with ttl=1 (OSPF, RIP2), 75 do not work over tunnels. 76 - traceroute does not work. I planned to relay ICMP from tunnel, 77 so that this problem would be solved and traceroute output 78 would even more informative. This idea appeared to be wrong: 79 only Linux complies to rfc1812 now (yes, guys, Linux is the only 80 true router now :-)), all routers (at least, in neighbourhood of mine) 81 return only 8 bytes of payload. It is the end. 82 83 Hence, if we want that OSPF worked or traceroute said something reasonable, 84 we should search for another solution. 85 86 One of them is to parse packet trying to detect inner encapsulation 87 made by our node. It is difficult or even impossible, especially, 88 taking into account fragmentation. TO be short, ttl is not solution at all. 89 90 Current solution: The solution was UNEXPECTEDLY SIMPLE. 91 We force DF flag on tunnels with preconfigured hop limit, 92 that is ALL. :-) Well, it does not remove the problem completely, 93 but exponential growth of network traffic is changed to linear 94 (branches, that exceed pmtu are pruned) and tunnel mtu 95 rapidly degrades to value <68, where looping stops. 96 Yes, it is not good if there exists a router in the loop, 97 which does not force DF, even when encapsulating packets have DF set. 98 But it is not our problem! Nobody could accuse us, we made 99 all that we could make. Even if it is your gated who injected 100 fatal route to network, even if it were you who configured 101 fatal static route: you are innocent. :-) 102 103 Alexey Kuznetsov. 104 */ 105 106 static bool log_ecn_error = true; 107 module_param(log_ecn_error, bool, 0644); 108 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 109 110 static struct rtnl_link_ops ipgre_link_ops __read_mostly; 111 static const struct header_ops ipgre_header_ops; 112 113 static int ipgre_tunnel_init(struct net_device *dev); 114 static void erspan_build_header(struct sk_buff *skb, 115 u32 id, u32 index, 116 bool truncate, bool is_ipv4); 117 118 static unsigned int ipgre_net_id __read_mostly; 119 static unsigned int gre_tap_net_id __read_mostly; 120 static unsigned int erspan_net_id __read_mostly; 121 122 static int ipgre_err(struct sk_buff *skb, u32 info, 123 const struct tnl_ptk_info *tpi) 124 { 125 126 /* All the routers (except for Linux) return only 127 8 bytes of packet payload. It means, that precise relaying of 128 ICMP in the real Internet is absolutely infeasible. 129 130 Moreover, Cisco "wise men" put GRE key to the third word 131 in GRE header. It makes impossible maintaining even soft 132 state for keyed GRE tunnels with enabled checksum. Tell 133 them "thank you". 134 135 Well, I wonder, rfc1812 was written by Cisco employee, 136 what the hell these idiots break standards established 137 by themselves??? 138 */ 139 struct net *net = dev_net(skb->dev); 140 struct ip_tunnel_net *itn; 141 const struct iphdr *iph; 142 const int type = icmp_hdr(skb)->type; 143 const int code = icmp_hdr(skb)->code; 144 unsigned int data_len = 0; 145 struct ip_tunnel *t; 146 147 if (tpi->proto == htons(ETH_P_TEB)) 148 itn = net_generic(net, gre_tap_net_id); 149 else if (tpi->proto == htons(ETH_P_ERSPAN) || 150 tpi->proto == htons(ETH_P_ERSPAN2)) 151 itn = net_generic(net, erspan_net_id); 152 else 153 itn = net_generic(net, ipgre_net_id); 154 155 iph = (const struct iphdr *)(icmp_hdr(skb) + 1); 156 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 157 iph->daddr, iph->saddr, tpi->key); 158 159 if (!t) 160 return -ENOENT; 161 162 switch (type) { 163 default: 164 case ICMP_PARAMETERPROB: 165 return 0; 166 167 case ICMP_DEST_UNREACH: 168 switch (code) { 169 case ICMP_SR_FAILED: 170 case ICMP_PORT_UNREACH: 171 /* Impossible event. */ 172 return 0; 173 default: 174 /* All others are translated to HOST_UNREACH. 175 rfc2003 contains "deep thoughts" about NET_UNREACH, 176 I believe they are just ether pollution. --ANK 177 */ 178 break; 179 } 180 break; 181 182 case ICMP_TIME_EXCEEDED: 183 if (code != ICMP_EXC_TTL) 184 return 0; 185 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ 186 break; 187 188 case ICMP_REDIRECT: 189 break; 190 } 191 192 #if IS_ENABLED(CONFIG_IPV6) 193 if (tpi->proto == htons(ETH_P_IPV6) && 194 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, 195 type, data_len)) 196 return 0; 197 #endif 198 199 if (t->parms.iph.daddr == 0 || 200 ipv4_is_multicast(t->parms.iph.daddr)) 201 return 0; 202 203 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 204 return 0; 205 206 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) 207 t->err_count++; 208 else 209 t->err_count = 1; 210 t->err_time = jiffies; 211 212 return 0; 213 } 214 215 static void gre_err(struct sk_buff *skb, u32 info) 216 { 217 /* All the routers (except for Linux) return only 218 * 8 bytes of packet payload. It means, that precise relaying of 219 * ICMP in the real Internet is absolutely infeasible. 220 * 221 * Moreover, Cisco "wise men" put GRE key to the third word 222 * in GRE header. It makes impossible maintaining even soft 223 * state for keyed 224 * GRE tunnels with enabled checksum. Tell them "thank you". 225 * 226 * Well, I wonder, rfc1812 was written by Cisco employee, 227 * what the hell these idiots break standards established 228 * by themselves??? 229 */ 230 231 const struct iphdr *iph = (struct iphdr *)skb->data; 232 const int type = icmp_hdr(skb)->type; 233 const int code = icmp_hdr(skb)->code; 234 struct tnl_ptk_info tpi; 235 236 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP), 237 iph->ihl * 4) < 0) 238 return; 239 240 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 241 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 242 skb->dev->ifindex, IPPROTO_GRE); 243 return; 244 } 245 if (type == ICMP_REDIRECT) { 246 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 247 IPPROTO_GRE); 248 return; 249 } 250 251 ipgre_err(skb, info, &tpi); 252 } 253 254 static bool is_erspan_type1(int gre_hdr_len) 255 { 256 /* Both ERSPAN type I (version 0) and type II (version 1) use 257 * protocol 0x88BE, but the type I has only 4-byte GRE header, 258 * while type II has 8-byte. 259 */ 260 return gre_hdr_len == 4; 261 } 262 263 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, 264 int gre_hdr_len) 265 { 266 struct net *net = dev_net(skb->dev); 267 struct metadata_dst *tun_dst = NULL; 268 struct erspan_base_hdr *ershdr; 269 IP_TUNNEL_DECLARE_FLAGS(flags); 270 struct ip_tunnel_net *itn; 271 struct ip_tunnel *tunnel; 272 const struct iphdr *iph; 273 struct erspan_md2 *md2; 274 int ver; 275 int len; 276 277 ip_tunnel_flags_copy(flags, tpi->flags); 278 279 itn = net_generic(net, erspan_net_id); 280 iph = ip_hdr(skb); 281 if (is_erspan_type1(gre_hdr_len)) { 282 ver = 0; 283 __set_bit(IP_TUNNEL_NO_KEY_BIT, flags); 284 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, 285 iph->saddr, iph->daddr, 0); 286 } else { 287 if (unlikely(!pskb_may_pull(skb, 288 gre_hdr_len + sizeof(*ershdr)))) 289 return PACKET_REJECT; 290 291 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 292 ver = ershdr->ver; 293 iph = ip_hdr(skb); 294 __set_bit(IP_TUNNEL_KEY_BIT, flags); 295 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, 296 iph->saddr, iph->daddr, tpi->key); 297 } 298 299 if (tunnel) { 300 if (is_erspan_type1(gre_hdr_len)) 301 len = gre_hdr_len; 302 else 303 len = gre_hdr_len + erspan_hdr_len(ver); 304 305 if (unlikely(!pskb_may_pull(skb, len))) 306 return PACKET_REJECT; 307 308 if (__iptunnel_pull_header(skb, 309 len, 310 htons(ETH_P_TEB), 311 false, false) < 0) 312 goto drop; 313 314 if (tunnel->collect_md) { 315 struct erspan_metadata *pkt_md, *md; 316 struct ip_tunnel_info *info; 317 unsigned char *gh; 318 __be64 tun_id; 319 320 __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags); 321 ip_tunnel_flags_copy(flags, tpi->flags); 322 tun_id = key32_to_tunnel_id(tpi->key); 323 324 tun_dst = ip_tun_rx_dst(skb, flags, 325 tun_id, sizeof(*md)); 326 if (!tun_dst) 327 return PACKET_REJECT; 328 329 /* skb can be uncloned in __iptunnel_pull_header, so 330 * old pkt_md is no longer valid and we need to reset 331 * it 332 */ 333 gh = skb_network_header(skb) + 334 skb_network_header_len(skb); 335 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + 336 sizeof(*ershdr)); 337 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 338 md->version = ver; 339 md2 = &md->u.md2; 340 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 341 ERSPAN_V2_MDSIZE); 342 343 info = &tun_dst->u.tun_info; 344 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 345 info->key.tun_flags); 346 info->options_len = sizeof(*md); 347 } 348 349 skb_reset_mac_header(skb); 350 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 351 return PACKET_RCVD; 352 } 353 return PACKET_REJECT; 354 355 drop: 356 kfree_skb(skb); 357 return PACKET_RCVD; 358 } 359 360 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, 361 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto) 362 { 363 struct metadata_dst *tun_dst = NULL; 364 const struct iphdr *iph; 365 struct ip_tunnel *tunnel; 366 367 iph = ip_hdr(skb); 368 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 369 iph->saddr, iph->daddr, tpi->key); 370 371 if (tunnel) { 372 const struct iphdr *tnl_params; 373 374 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto, 375 raw_proto, false) < 0) 376 goto drop; 377 378 /* Special case for ipgre_header_parse(), which expects the 379 * mac_header to point to the outer IP header. 380 */ 381 if (tunnel->dev->header_ops == &ipgre_header_ops) 382 skb_pop_mac_header(skb); 383 else 384 skb_reset_mac_header(skb); 385 386 tnl_params = &tunnel->parms.iph; 387 if (tunnel->collect_md || tnl_params->daddr == 0) { 388 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 389 __be64 tun_id; 390 391 __set_bit(IP_TUNNEL_CSUM_BIT, flags); 392 __set_bit(IP_TUNNEL_KEY_BIT, flags); 393 ip_tunnel_flags_and(flags, tpi->flags, flags); 394 395 tun_id = key32_to_tunnel_id(tpi->key); 396 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0); 397 if (!tun_dst) 398 return PACKET_REJECT; 399 } 400 401 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 402 return PACKET_RCVD; 403 } 404 return PACKET_NEXT; 405 406 drop: 407 kfree_skb(skb); 408 return PACKET_RCVD; 409 } 410 411 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, 412 int hdr_len) 413 { 414 struct net *net = dev_net(skb->dev); 415 struct ip_tunnel_net *itn; 416 int res; 417 418 if (tpi->proto == htons(ETH_P_TEB)) 419 itn = net_generic(net, gre_tap_net_id); 420 else 421 itn = net_generic(net, ipgre_net_id); 422 423 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false); 424 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) { 425 /* ipgre tunnels in collect metadata mode should receive 426 * also ETH_P_TEB traffic. 427 */ 428 itn = net_generic(net, ipgre_net_id); 429 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true); 430 } 431 return res; 432 } 433 434 static int gre_rcv(struct sk_buff *skb) 435 { 436 struct tnl_ptk_info tpi; 437 bool csum_err = false; 438 int hdr_len; 439 440 #ifdef CONFIG_NET_IPGRE_BROADCAST 441 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { 442 /* Looped back packet, drop it! */ 443 if (rt_is_output_route(skb_rtable(skb))) 444 goto drop; 445 } 446 #endif 447 448 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0); 449 if (hdr_len < 0) 450 goto drop; 451 452 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || 453 tpi.proto == htons(ETH_P_ERSPAN2))) { 454 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) 455 return 0; 456 goto out; 457 } 458 459 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) 460 return 0; 461 462 out: 463 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 464 drop: 465 kfree_skb(skb); 466 return 0; 467 } 468 469 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, 470 const struct iphdr *tnl_params, 471 __be16 proto) 472 { 473 struct ip_tunnel *tunnel = netdev_priv(dev); 474 IP_TUNNEL_DECLARE_FLAGS(flags); 475 476 ip_tunnel_flags_copy(flags, tunnel->parms.o_flags); 477 478 /* Push GRE header. */ 479 gre_build_header(skb, tunnel->tun_hlen, 480 flags, proto, tunnel->parms.o_key, 481 test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 482 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); 483 484 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 485 } 486 487 static int gre_handle_offloads(struct sk_buff *skb, bool csum) 488 { 489 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 490 } 491 492 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, 493 __be16 proto) 494 { 495 struct ip_tunnel *tunnel = netdev_priv(dev); 496 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 497 struct ip_tunnel_info *tun_info; 498 const struct ip_tunnel_key *key; 499 int tunnel_hlen; 500 501 tun_info = skb_tunnel_info(skb); 502 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 503 ip_tunnel_info_af(tun_info) != AF_INET)) 504 goto err_free_skb; 505 506 key = &tun_info->key; 507 tunnel_hlen = gre_calc_hlen(key->tun_flags); 508 509 if (skb_cow_head(skb, dev->needed_headroom)) 510 goto err_free_skb; 511 512 /* Push Tunnel header. */ 513 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 514 tunnel->parms.o_flags))) 515 goto err_free_skb; 516 517 __set_bit(IP_TUNNEL_CSUM_BIT, flags); 518 __set_bit(IP_TUNNEL_KEY_BIT, flags); 519 __set_bit(IP_TUNNEL_SEQ_BIT, flags); 520 ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags); 521 522 gre_build_header(skb, tunnel_hlen, flags, proto, 523 tunnel_id_to_key32(tun_info->key.tun_id), 524 test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 525 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); 526 527 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); 528 529 return; 530 531 err_free_skb: 532 kfree_skb(skb); 533 DEV_STATS_INC(dev, tx_dropped); 534 } 535 536 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) 537 { 538 struct ip_tunnel *tunnel = netdev_priv(dev); 539 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 540 struct ip_tunnel_info *tun_info; 541 const struct ip_tunnel_key *key; 542 struct erspan_metadata *md; 543 bool truncate = false; 544 __be16 proto; 545 int tunnel_hlen; 546 int version; 547 int nhoff; 548 549 tun_info = skb_tunnel_info(skb); 550 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 551 ip_tunnel_info_af(tun_info) != AF_INET)) 552 goto err_free_skb; 553 554 key = &tun_info->key; 555 if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags)) 556 goto err_free_skb; 557 if (tun_info->options_len < sizeof(*md)) 558 goto err_free_skb; 559 md = ip_tunnel_info_opts(tun_info); 560 561 /* ERSPAN has fixed 8 byte GRE header */ 562 version = md->version; 563 tunnel_hlen = 8 + erspan_hdr_len(version); 564 565 if (skb_cow_head(skb, dev->needed_headroom)) 566 goto err_free_skb; 567 568 if (gre_handle_offloads(skb, false)) 569 goto err_free_skb; 570 571 if (skb->len > dev->mtu + dev->hard_header_len) { 572 if (pskb_trim(skb, dev->mtu + dev->hard_header_len)) 573 goto err_free_skb; 574 truncate = true; 575 } 576 577 nhoff = skb_network_offset(skb); 578 if (skb->protocol == htons(ETH_P_IP) && 579 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) 580 truncate = true; 581 582 if (skb->protocol == htons(ETH_P_IPV6)) { 583 int thoff; 584 585 if (skb_transport_header_was_set(skb)) 586 thoff = skb_transport_offset(skb); 587 else 588 thoff = nhoff + sizeof(struct ipv6hdr); 589 if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) 590 truncate = true; 591 } 592 593 if (version == 1) { 594 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), 595 ntohl(md->u.index), truncate, true); 596 proto = htons(ETH_P_ERSPAN); 597 } else if (version == 2) { 598 erspan_build_header_v2(skb, 599 ntohl(tunnel_id_to_key32(key->tun_id)), 600 md->u.md2.dir, 601 get_hwid(&md->u.md2), 602 truncate, true); 603 proto = htons(ETH_P_ERSPAN2); 604 } else { 605 goto err_free_skb; 606 } 607 608 __set_bit(IP_TUNNEL_SEQ_BIT, flags); 609 gre_build_header(skb, 8, flags, proto, 0, 610 htonl(atomic_fetch_inc(&tunnel->o_seqno))); 611 612 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); 613 614 return; 615 616 err_free_skb: 617 kfree_skb(skb); 618 DEV_STATS_INC(dev, tx_dropped); 619 } 620 621 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 622 { 623 struct ip_tunnel_info *info = skb_tunnel_info(skb); 624 const struct ip_tunnel_key *key; 625 struct rtable *rt; 626 struct flowi4 fl4; 627 628 if (ip_tunnel_info_af(info) != AF_INET) 629 return -EINVAL; 630 631 key = &info->key; 632 ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src, 633 tunnel_id_to_key32(key->tun_id), 634 key->tos & ~INET_ECN_MASK, dev_net(dev), 0, 635 skb->mark, skb_get_hash(skb), key->flow_flags); 636 rt = ip_route_output_key(dev_net(dev), &fl4); 637 if (IS_ERR(rt)) 638 return PTR_ERR(rt); 639 640 ip_rt_put(rt); 641 info->key.u.ipv4.src = fl4.saddr; 642 return 0; 643 } 644 645 static netdev_tx_t ipgre_xmit(struct sk_buff *skb, 646 struct net_device *dev) 647 { 648 struct ip_tunnel *tunnel = netdev_priv(dev); 649 const struct iphdr *tnl_params; 650 651 if (!pskb_inet_may_pull(skb)) 652 goto free_skb; 653 654 if (tunnel->collect_md) { 655 gre_fb_xmit(skb, dev, skb->protocol); 656 return NETDEV_TX_OK; 657 } 658 659 if (dev->header_ops) { 660 int pull_len = tunnel->hlen + sizeof(struct iphdr); 661 662 if (skb_cow_head(skb, 0)) 663 goto free_skb; 664 665 if (!pskb_may_pull(skb, pull_len)) 666 goto free_skb; 667 668 tnl_params = (const struct iphdr *)skb->data; 669 670 /* ip_tunnel_xmit() needs skb->data pointing to gre header. */ 671 skb_pull(skb, pull_len); 672 skb_reset_mac_header(skb); 673 674 if (skb->ip_summed == CHECKSUM_PARTIAL && 675 skb_checksum_start(skb) < skb->data) 676 goto free_skb; 677 } else { 678 if (skb_cow_head(skb, dev->needed_headroom)) 679 goto free_skb; 680 681 tnl_params = &tunnel->parms.iph; 682 } 683 684 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 685 tunnel->parms.o_flags))) 686 goto free_skb; 687 688 __gre_xmit(skb, dev, tnl_params, skb->protocol); 689 return NETDEV_TX_OK; 690 691 free_skb: 692 kfree_skb(skb); 693 DEV_STATS_INC(dev, tx_dropped); 694 return NETDEV_TX_OK; 695 } 696 697 static netdev_tx_t erspan_xmit(struct sk_buff *skb, 698 struct net_device *dev) 699 { 700 struct ip_tunnel *tunnel = netdev_priv(dev); 701 bool truncate = false; 702 __be16 proto; 703 704 if (!pskb_inet_may_pull(skb)) 705 goto free_skb; 706 707 if (tunnel->collect_md) { 708 erspan_fb_xmit(skb, dev); 709 return NETDEV_TX_OK; 710 } 711 712 if (gre_handle_offloads(skb, false)) 713 goto free_skb; 714 715 if (skb_cow_head(skb, dev->needed_headroom)) 716 goto free_skb; 717 718 if (skb->len > dev->mtu + dev->hard_header_len) { 719 if (pskb_trim(skb, dev->mtu + dev->hard_header_len)) 720 goto free_skb; 721 truncate = true; 722 } 723 724 /* Push ERSPAN header */ 725 if (tunnel->erspan_ver == 0) { 726 proto = htons(ETH_P_ERSPAN); 727 __clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags); 728 } else if (tunnel->erspan_ver == 1) { 729 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 730 tunnel->index, 731 truncate, true); 732 proto = htons(ETH_P_ERSPAN); 733 } else if (tunnel->erspan_ver == 2) { 734 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 735 tunnel->dir, tunnel->hwid, 736 truncate, true); 737 proto = htons(ETH_P_ERSPAN2); 738 } else { 739 goto free_skb; 740 } 741 742 __clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags); 743 __gre_xmit(skb, dev, &tunnel->parms.iph, proto); 744 return NETDEV_TX_OK; 745 746 free_skb: 747 kfree_skb(skb); 748 DEV_STATS_INC(dev, tx_dropped); 749 return NETDEV_TX_OK; 750 } 751 752 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, 753 struct net_device *dev) 754 { 755 struct ip_tunnel *tunnel = netdev_priv(dev); 756 757 if (!pskb_inet_may_pull(skb)) 758 goto free_skb; 759 760 if (tunnel->collect_md) { 761 gre_fb_xmit(skb, dev, htons(ETH_P_TEB)); 762 return NETDEV_TX_OK; 763 } 764 765 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 766 tunnel->parms.o_flags))) 767 goto free_skb; 768 769 if (skb_cow_head(skb, dev->needed_headroom)) 770 goto free_skb; 771 772 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB)); 773 return NETDEV_TX_OK; 774 775 free_skb: 776 kfree_skb(skb); 777 DEV_STATS_INC(dev, tx_dropped); 778 return NETDEV_TX_OK; 779 } 780 781 static void ipgre_link_update(struct net_device *dev, bool set_mtu) 782 { 783 struct ip_tunnel *tunnel = netdev_priv(dev); 784 int len; 785 786 len = tunnel->tun_hlen; 787 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 788 len = tunnel->tun_hlen - len; 789 tunnel->hlen = tunnel->hlen + len; 790 791 if (dev->header_ops) 792 dev->hard_header_len += len; 793 else 794 dev->needed_headroom += len; 795 796 if (set_mtu) 797 WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68)); 798 799 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) || 800 (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) && 801 tunnel->encap.type != TUNNEL_ENCAP_NONE)) { 802 dev->features &= ~NETIF_F_GSO_SOFTWARE; 803 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE; 804 } else { 805 dev->features |= NETIF_F_GSO_SOFTWARE; 806 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 807 } 808 } 809 810 static int ipgre_tunnel_ctl(struct net_device *dev, 811 struct ip_tunnel_parm_kern *p, 812 int cmd) 813 { 814 __be16 i_flags, o_flags; 815 int err; 816 817 if (!ip_tunnel_flags_is_be16_compat(p->i_flags) || 818 !ip_tunnel_flags_is_be16_compat(p->o_flags)) 819 return -EOVERFLOW; 820 821 i_flags = ip_tunnel_flags_to_be16(p->i_flags); 822 o_flags = ip_tunnel_flags_to_be16(p->o_flags); 823 824 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { 825 if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE || 826 p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) || 827 ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING))) 828 return -EINVAL; 829 } 830 831 gre_flags_to_tnl_flags(p->i_flags, i_flags); 832 gre_flags_to_tnl_flags(p->o_flags, o_flags); 833 834 err = ip_tunnel_ctl(dev, p, cmd); 835 if (err) 836 return err; 837 838 if (cmd == SIOCCHGTUNNEL) { 839 struct ip_tunnel *t = netdev_priv(dev); 840 841 ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags); 842 ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags); 843 844 if (strcmp(dev->rtnl_link_ops->kind, "erspan")) 845 ipgre_link_update(dev, true); 846 } 847 848 i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 849 ip_tunnel_flags_from_be16(p->i_flags, i_flags); 850 o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 851 ip_tunnel_flags_from_be16(p->o_flags, o_flags); 852 853 return 0; 854 } 855 856 /* Nice toy. Unfortunately, useless in real life :-) 857 It allows to construct virtual multiprotocol broadcast "LAN" 858 over the Internet, provided multicast routing is tuned. 859 860 861 I have no idea was this bicycle invented before me, 862 so that I had to set ARPHRD_IPGRE to a random value. 863 I have an impression, that Cisco could make something similar, 864 but this feature is apparently missing in IOS<=11.2(8). 865 866 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks 867 with broadcast 224.66.66.66. If you have access to mbone, play with me :-) 868 869 ping -t 255 224.66.66.66 870 871 If nobody answers, mbone does not work. 872 873 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255 874 ip addr add 10.66.66.<somewhat>/24 dev Universe 875 ifconfig Universe up 876 ifconfig Universe add fe80::<Your_real_addr>/10 877 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96 878 ftp 10.66.66.66 879 ... 880 ftp fec0:6666:6666::193.233.7.65 881 ... 882 */ 883 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, 884 unsigned short type, 885 const void *daddr, const void *saddr, unsigned int len) 886 { 887 struct ip_tunnel *t = netdev_priv(dev); 888 struct iphdr *iph; 889 struct gre_base_hdr *greh; 890 891 iph = skb_push(skb, t->hlen + sizeof(*iph)); 892 greh = (struct gre_base_hdr *)(iph+1); 893 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags); 894 greh->protocol = htons(type); 895 896 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 897 898 /* Set the source hardware address. */ 899 if (saddr) 900 memcpy(&iph->saddr, saddr, 4); 901 if (daddr) 902 memcpy(&iph->daddr, daddr, 4); 903 if (iph->daddr) 904 return t->hlen + sizeof(*iph); 905 906 return -(t->hlen + sizeof(*iph)); 907 } 908 909 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 910 { 911 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); 912 memcpy(haddr, &iph->saddr, 4); 913 return 4; 914 } 915 916 static const struct header_ops ipgre_header_ops = { 917 .create = ipgre_header, 918 .parse = ipgre_header_parse, 919 }; 920 921 #ifdef CONFIG_NET_IPGRE_BROADCAST 922 static int ipgre_open(struct net_device *dev) 923 { 924 struct ip_tunnel *t = netdev_priv(dev); 925 926 if (ipv4_is_multicast(t->parms.iph.daddr)) { 927 struct flowi4 fl4 = { 928 .flowi4_oif = t->parms.link, 929 .flowi4_tos = t->parms.iph.tos & INET_DSCP_MASK, 930 .flowi4_scope = RT_SCOPE_UNIVERSE, 931 .flowi4_proto = IPPROTO_GRE, 932 .saddr = t->parms.iph.saddr, 933 .daddr = t->parms.iph.daddr, 934 .fl4_gre_key = t->parms.o_key, 935 }; 936 struct rtable *rt; 937 938 rt = ip_route_output_key(t->net, &fl4); 939 if (IS_ERR(rt)) 940 return -EADDRNOTAVAIL; 941 dev = rt->dst.dev; 942 ip_rt_put(rt); 943 if (!__in_dev_get_rtnl(dev)) 944 return -EADDRNOTAVAIL; 945 t->mlink = dev->ifindex; 946 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); 947 } 948 return 0; 949 } 950 951 static int ipgre_close(struct net_device *dev) 952 { 953 struct ip_tunnel *t = netdev_priv(dev); 954 955 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 956 struct in_device *in_dev; 957 in_dev = inetdev_by_index(t->net, t->mlink); 958 if (in_dev) 959 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 960 } 961 return 0; 962 } 963 #endif 964 965 static const struct net_device_ops ipgre_netdev_ops = { 966 .ndo_init = ipgre_tunnel_init, 967 .ndo_uninit = ip_tunnel_uninit, 968 #ifdef CONFIG_NET_IPGRE_BROADCAST 969 .ndo_open = ipgre_open, 970 .ndo_stop = ipgre_close, 971 #endif 972 .ndo_start_xmit = ipgre_xmit, 973 .ndo_siocdevprivate = ip_tunnel_siocdevprivate, 974 .ndo_change_mtu = ip_tunnel_change_mtu, 975 .ndo_get_stats64 = dev_get_tstats64, 976 .ndo_get_iflink = ip_tunnel_get_iflink, 977 .ndo_tunnel_ctl = ipgre_tunnel_ctl, 978 }; 979 980 #define GRE_FEATURES (NETIF_F_SG | \ 981 NETIF_F_FRAGLIST | \ 982 NETIF_F_HIGHDMA | \ 983 NETIF_F_HW_CSUM) 984 985 static void ipgre_tunnel_setup(struct net_device *dev) 986 { 987 dev->netdev_ops = &ipgre_netdev_ops; 988 dev->type = ARPHRD_IPGRE; 989 ip_tunnel_setup(dev, ipgre_net_id); 990 } 991 992 static void __gre_tunnel_init(struct net_device *dev) 993 { 994 struct ip_tunnel *tunnel; 995 996 tunnel = netdev_priv(dev); 997 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 998 tunnel->parms.iph.protocol = IPPROTO_GRE; 999 1000 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 1001 dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph); 1002 1003 dev->features |= GRE_FEATURES; 1004 dev->hw_features |= GRE_FEATURES; 1005 1006 /* TCP offload with GRE SEQ is not supported, nor can we support 2 1007 * levels of outer headers requiring an update. 1008 */ 1009 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags)) 1010 return; 1011 if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) && 1012 tunnel->encap.type != TUNNEL_ENCAP_NONE) 1013 return; 1014 1015 dev->features |= NETIF_F_GSO_SOFTWARE; 1016 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1017 1018 dev->lltx = true; 1019 } 1020 1021 static int ipgre_tunnel_init(struct net_device *dev) 1022 { 1023 struct ip_tunnel *tunnel = netdev_priv(dev); 1024 struct iphdr *iph = &tunnel->parms.iph; 1025 1026 __gre_tunnel_init(dev); 1027 1028 __dev_addr_set(dev, &iph->saddr, 4); 1029 memcpy(dev->broadcast, &iph->daddr, 4); 1030 1031 dev->flags = IFF_NOARP; 1032 netif_keep_dst(dev); 1033 dev->addr_len = 4; 1034 1035 if (iph->daddr && !tunnel->collect_md) { 1036 #ifdef CONFIG_NET_IPGRE_BROADCAST 1037 if (ipv4_is_multicast(iph->daddr)) { 1038 if (!iph->saddr) 1039 return -EINVAL; 1040 dev->flags = IFF_BROADCAST; 1041 dev->header_ops = &ipgre_header_ops; 1042 dev->hard_header_len = tunnel->hlen + sizeof(*iph); 1043 dev->needed_headroom = 0; 1044 } 1045 #endif 1046 } else if (!tunnel->collect_md) { 1047 dev->header_ops = &ipgre_header_ops; 1048 dev->hard_header_len = tunnel->hlen + sizeof(*iph); 1049 dev->needed_headroom = 0; 1050 } 1051 1052 return ip_tunnel_init(dev); 1053 } 1054 1055 static const struct gre_protocol ipgre_protocol = { 1056 .handler = gre_rcv, 1057 .err_handler = gre_err, 1058 }; 1059 1060 static int __net_init ipgre_init_net(struct net *net) 1061 { 1062 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL); 1063 } 1064 1065 static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net, 1066 struct list_head *dev_to_kill) 1067 { 1068 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops, 1069 dev_to_kill); 1070 } 1071 1072 static struct pernet_operations ipgre_net_ops = { 1073 .init = ipgre_init_net, 1074 .exit_batch_rtnl = ipgre_exit_batch_rtnl, 1075 .id = &ipgre_net_id, 1076 .size = sizeof(struct ip_tunnel_net), 1077 }; 1078 1079 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 1080 struct netlink_ext_ack *extack) 1081 { 1082 __be16 flags; 1083 1084 if (!data) 1085 return 0; 1086 1087 flags = 0; 1088 if (data[IFLA_GRE_IFLAGS]) 1089 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1090 if (data[IFLA_GRE_OFLAGS]) 1091 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1092 if (flags & (GRE_VERSION|GRE_ROUTING)) 1093 return -EINVAL; 1094 1095 if (data[IFLA_GRE_COLLECT_METADATA] && 1096 data[IFLA_GRE_ENCAP_TYPE] && 1097 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE) 1098 return -EINVAL; 1099 1100 return 0; 1101 } 1102 1103 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1104 struct netlink_ext_ack *extack) 1105 { 1106 __be32 daddr; 1107 1108 if (tb[IFLA_ADDRESS]) { 1109 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1110 return -EINVAL; 1111 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1112 return -EADDRNOTAVAIL; 1113 } 1114 1115 if (!data) 1116 goto out; 1117 1118 if (data[IFLA_GRE_REMOTE]) { 1119 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4); 1120 if (!daddr) 1121 return -EINVAL; 1122 } 1123 1124 out: 1125 return ipgre_tunnel_validate(tb, data, extack); 1126 } 1127 1128 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[], 1129 struct netlink_ext_ack *extack) 1130 { 1131 __be16 flags = 0; 1132 int ret; 1133 1134 if (!data) 1135 return 0; 1136 1137 ret = ipgre_tap_validate(tb, data, extack); 1138 if (ret) 1139 return ret; 1140 1141 if (data[IFLA_GRE_ERSPAN_VER] && 1142 nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0) 1143 return 0; 1144 1145 /* ERSPAN type II/III should only have GRE sequence and key flag */ 1146 if (data[IFLA_GRE_OFLAGS]) 1147 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1148 if (data[IFLA_GRE_IFLAGS]) 1149 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1150 if (!data[IFLA_GRE_COLLECT_METADATA] && 1151 flags != (GRE_SEQ | GRE_KEY)) 1152 return -EINVAL; 1153 1154 /* ERSPAN Session ID only has 10-bit. Since we reuse 1155 * 32-bit key field as ID, check it's range. 1156 */ 1157 if (data[IFLA_GRE_IKEY] && 1158 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) 1159 return -EINVAL; 1160 1161 if (data[IFLA_GRE_OKEY] && 1162 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) 1163 return -EINVAL; 1164 1165 return 0; 1166 } 1167 1168 static int ipgre_netlink_parms(struct net_device *dev, 1169 struct nlattr *data[], 1170 struct nlattr *tb[], 1171 struct ip_tunnel_parm_kern *parms, 1172 __u32 *fwmark) 1173 { 1174 struct ip_tunnel *t = netdev_priv(dev); 1175 1176 memset(parms, 0, sizeof(*parms)); 1177 1178 parms->iph.protocol = IPPROTO_GRE; 1179 1180 if (!data) 1181 return 0; 1182 1183 if (data[IFLA_GRE_LINK]) 1184 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1185 1186 if (data[IFLA_GRE_IFLAGS]) 1187 gre_flags_to_tnl_flags(parms->i_flags, 1188 nla_get_be16(data[IFLA_GRE_IFLAGS])); 1189 1190 if (data[IFLA_GRE_OFLAGS]) 1191 gre_flags_to_tnl_flags(parms->o_flags, 1192 nla_get_be16(data[IFLA_GRE_OFLAGS])); 1193 1194 if (data[IFLA_GRE_IKEY]) 1195 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1196 1197 if (data[IFLA_GRE_OKEY]) 1198 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1199 1200 if (data[IFLA_GRE_LOCAL]) 1201 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]); 1202 1203 if (data[IFLA_GRE_REMOTE]) 1204 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]); 1205 1206 if (data[IFLA_GRE_TTL]) 1207 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]); 1208 1209 if (data[IFLA_GRE_TOS]) 1210 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]); 1211 1212 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) { 1213 if (t->ignore_df) 1214 return -EINVAL; 1215 parms->iph.frag_off = htons(IP_DF); 1216 } 1217 1218 if (data[IFLA_GRE_COLLECT_METADATA]) { 1219 t->collect_md = true; 1220 if (dev->type == ARPHRD_IPGRE) 1221 dev->type = ARPHRD_NONE; 1222 } 1223 1224 if (data[IFLA_GRE_IGNORE_DF]) { 1225 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF]) 1226 && (parms->iph.frag_off & htons(IP_DF))) 1227 return -EINVAL; 1228 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]); 1229 } 1230 1231 if (data[IFLA_GRE_FWMARK]) 1232 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); 1233 1234 return 0; 1235 } 1236 1237 static int erspan_netlink_parms(struct net_device *dev, 1238 struct nlattr *data[], 1239 struct nlattr *tb[], 1240 struct ip_tunnel_parm_kern *parms, 1241 __u32 *fwmark) 1242 { 1243 struct ip_tunnel *t = netdev_priv(dev); 1244 int err; 1245 1246 err = ipgre_netlink_parms(dev, data, tb, parms, fwmark); 1247 if (err) 1248 return err; 1249 if (!data) 1250 return 0; 1251 1252 if (data[IFLA_GRE_ERSPAN_VER]) { 1253 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1254 1255 if (t->erspan_ver > 2) 1256 return -EINVAL; 1257 } 1258 1259 if (t->erspan_ver == 1) { 1260 if (data[IFLA_GRE_ERSPAN_INDEX]) { 1261 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1262 if (t->index & ~INDEX_MASK) 1263 return -EINVAL; 1264 } 1265 } else if (t->erspan_ver == 2) { 1266 if (data[IFLA_GRE_ERSPAN_DIR]) { 1267 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1268 if (t->dir & ~(DIR_MASK >> DIR_OFFSET)) 1269 return -EINVAL; 1270 } 1271 if (data[IFLA_GRE_ERSPAN_HWID]) { 1272 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1273 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET)) 1274 return -EINVAL; 1275 } 1276 } 1277 1278 return 0; 1279 } 1280 1281 /* This function returns true when ENCAP attributes are present in the nl msg */ 1282 static bool ipgre_netlink_encap_parms(struct nlattr *data[], 1283 struct ip_tunnel_encap *ipencap) 1284 { 1285 bool ret = false; 1286 1287 memset(ipencap, 0, sizeof(*ipencap)); 1288 1289 if (!data) 1290 return ret; 1291 1292 if (data[IFLA_GRE_ENCAP_TYPE]) { 1293 ret = true; 1294 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1295 } 1296 1297 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1298 ret = true; 1299 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1300 } 1301 1302 if (data[IFLA_GRE_ENCAP_SPORT]) { 1303 ret = true; 1304 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1305 } 1306 1307 if (data[IFLA_GRE_ENCAP_DPORT]) { 1308 ret = true; 1309 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1310 } 1311 1312 return ret; 1313 } 1314 1315 static int gre_tap_init(struct net_device *dev) 1316 { 1317 __gre_tunnel_init(dev); 1318 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1319 netif_keep_dst(dev); 1320 1321 return ip_tunnel_init(dev); 1322 } 1323 1324 static const struct net_device_ops gre_tap_netdev_ops = { 1325 .ndo_init = gre_tap_init, 1326 .ndo_uninit = ip_tunnel_uninit, 1327 .ndo_start_xmit = gre_tap_xmit, 1328 .ndo_set_mac_address = eth_mac_addr, 1329 .ndo_validate_addr = eth_validate_addr, 1330 .ndo_change_mtu = ip_tunnel_change_mtu, 1331 .ndo_get_stats64 = dev_get_tstats64, 1332 .ndo_get_iflink = ip_tunnel_get_iflink, 1333 .ndo_fill_metadata_dst = gre_fill_metadata_dst, 1334 }; 1335 1336 static int erspan_tunnel_init(struct net_device *dev) 1337 { 1338 struct ip_tunnel *tunnel = netdev_priv(dev); 1339 1340 if (tunnel->erspan_ver == 0) 1341 tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */ 1342 else 1343 tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */ 1344 1345 tunnel->parms.iph.protocol = IPPROTO_GRE; 1346 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + 1347 erspan_hdr_len(tunnel->erspan_ver); 1348 1349 dev->features |= GRE_FEATURES; 1350 dev->hw_features |= GRE_FEATURES; 1351 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1352 netif_keep_dst(dev); 1353 1354 return ip_tunnel_init(dev); 1355 } 1356 1357 static const struct net_device_ops erspan_netdev_ops = { 1358 .ndo_init = erspan_tunnel_init, 1359 .ndo_uninit = ip_tunnel_uninit, 1360 .ndo_start_xmit = erspan_xmit, 1361 .ndo_set_mac_address = eth_mac_addr, 1362 .ndo_validate_addr = eth_validate_addr, 1363 .ndo_change_mtu = ip_tunnel_change_mtu, 1364 .ndo_get_stats64 = dev_get_tstats64, 1365 .ndo_get_iflink = ip_tunnel_get_iflink, 1366 .ndo_fill_metadata_dst = gre_fill_metadata_dst, 1367 }; 1368 1369 static void ipgre_tap_setup(struct net_device *dev) 1370 { 1371 ether_setup(dev); 1372 dev->max_mtu = 0; 1373 dev->netdev_ops = &gre_tap_netdev_ops; 1374 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1375 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1376 ip_tunnel_setup(dev, gre_tap_net_id); 1377 } 1378 1379 static int 1380 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[]) 1381 { 1382 struct ip_tunnel_encap ipencap; 1383 1384 if (ipgre_netlink_encap_parms(data, &ipencap)) { 1385 struct ip_tunnel *t = netdev_priv(dev); 1386 int err = ip_tunnel_encap_setup(t, &ipencap); 1387 1388 if (err < 0) 1389 return err; 1390 } 1391 1392 return 0; 1393 } 1394 1395 static int ipgre_newlink(struct net *src_net, struct net_device *dev, 1396 struct nlattr *tb[], struct nlattr *data[], 1397 struct netlink_ext_ack *extack) 1398 { 1399 struct ip_tunnel_parm_kern p; 1400 __u32 fwmark = 0; 1401 int err; 1402 1403 err = ipgre_newlink_encap_setup(dev, data); 1404 if (err) 1405 return err; 1406 1407 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); 1408 if (err < 0) 1409 return err; 1410 return ip_tunnel_newlink(dev, tb, &p, fwmark); 1411 } 1412 1413 static int erspan_newlink(struct net *src_net, struct net_device *dev, 1414 struct nlattr *tb[], struct nlattr *data[], 1415 struct netlink_ext_ack *extack) 1416 { 1417 struct ip_tunnel_parm_kern p; 1418 __u32 fwmark = 0; 1419 int err; 1420 1421 err = ipgre_newlink_encap_setup(dev, data); 1422 if (err) 1423 return err; 1424 1425 err = erspan_netlink_parms(dev, data, tb, &p, &fwmark); 1426 if (err) 1427 return err; 1428 return ip_tunnel_newlink(dev, tb, &p, fwmark); 1429 } 1430 1431 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], 1432 struct nlattr *data[], 1433 struct netlink_ext_ack *extack) 1434 { 1435 struct ip_tunnel *t = netdev_priv(dev); 1436 struct ip_tunnel_parm_kern p; 1437 __u32 fwmark = t->fwmark; 1438 int err; 1439 1440 err = ipgre_newlink_encap_setup(dev, data); 1441 if (err) 1442 return err; 1443 1444 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); 1445 if (err < 0) 1446 return err; 1447 1448 err = ip_tunnel_changelink(dev, tb, &p, fwmark); 1449 if (err < 0) 1450 return err; 1451 1452 ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags); 1453 ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags); 1454 1455 ipgre_link_update(dev, !tb[IFLA_MTU]); 1456 1457 return 0; 1458 } 1459 1460 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[], 1461 struct nlattr *data[], 1462 struct netlink_ext_ack *extack) 1463 { 1464 struct ip_tunnel *t = netdev_priv(dev); 1465 struct ip_tunnel_parm_kern p; 1466 __u32 fwmark = t->fwmark; 1467 int err; 1468 1469 err = ipgre_newlink_encap_setup(dev, data); 1470 if (err) 1471 return err; 1472 1473 err = erspan_netlink_parms(dev, data, tb, &p, &fwmark); 1474 if (err < 0) 1475 return err; 1476 1477 err = ip_tunnel_changelink(dev, tb, &p, fwmark); 1478 if (err < 0) 1479 return err; 1480 1481 ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags); 1482 ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags); 1483 1484 return 0; 1485 } 1486 1487 static size_t ipgre_get_size(const struct net_device *dev) 1488 { 1489 return 1490 /* IFLA_GRE_LINK */ 1491 nla_total_size(4) + 1492 /* IFLA_GRE_IFLAGS */ 1493 nla_total_size(2) + 1494 /* IFLA_GRE_OFLAGS */ 1495 nla_total_size(2) + 1496 /* IFLA_GRE_IKEY */ 1497 nla_total_size(4) + 1498 /* IFLA_GRE_OKEY */ 1499 nla_total_size(4) + 1500 /* IFLA_GRE_LOCAL */ 1501 nla_total_size(4) + 1502 /* IFLA_GRE_REMOTE */ 1503 nla_total_size(4) + 1504 /* IFLA_GRE_TTL */ 1505 nla_total_size(1) + 1506 /* IFLA_GRE_TOS */ 1507 nla_total_size(1) + 1508 /* IFLA_GRE_PMTUDISC */ 1509 nla_total_size(1) + 1510 /* IFLA_GRE_ENCAP_TYPE */ 1511 nla_total_size(2) + 1512 /* IFLA_GRE_ENCAP_FLAGS */ 1513 nla_total_size(2) + 1514 /* IFLA_GRE_ENCAP_SPORT */ 1515 nla_total_size(2) + 1516 /* IFLA_GRE_ENCAP_DPORT */ 1517 nla_total_size(2) + 1518 /* IFLA_GRE_COLLECT_METADATA */ 1519 nla_total_size(0) + 1520 /* IFLA_GRE_IGNORE_DF */ 1521 nla_total_size(1) + 1522 /* IFLA_GRE_FWMARK */ 1523 nla_total_size(4) + 1524 /* IFLA_GRE_ERSPAN_INDEX */ 1525 nla_total_size(4) + 1526 /* IFLA_GRE_ERSPAN_VER */ 1527 nla_total_size(1) + 1528 /* IFLA_GRE_ERSPAN_DIR */ 1529 nla_total_size(1) + 1530 /* IFLA_GRE_ERSPAN_HWID */ 1531 nla_total_size(2) + 1532 0; 1533 } 1534 1535 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) 1536 { 1537 struct ip_tunnel *t = netdev_priv(dev); 1538 struct ip_tunnel_parm_kern *p = &t->parms; 1539 IP_TUNNEL_DECLARE_FLAGS(o_flags); 1540 1541 ip_tunnel_flags_copy(o_flags, p->o_flags); 1542 1543 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1544 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1545 gre_tnl_flags_to_gre_flags(p->i_flags)) || 1546 nla_put_be16(skb, IFLA_GRE_OFLAGS, 1547 gre_tnl_flags_to_gre_flags(o_flags)) || 1548 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1549 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1550 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1551 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) || 1552 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || 1553 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || 1554 nla_put_u8(skb, IFLA_GRE_PMTUDISC, 1555 !!(p->iph.frag_off & htons(IP_DF))) || 1556 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark)) 1557 goto nla_put_failure; 1558 1559 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 1560 t->encap.type) || 1561 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 1562 t->encap.sport) || 1563 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 1564 t->encap.dport) || 1565 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 1566 t->encap.flags)) 1567 goto nla_put_failure; 1568 1569 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df)) 1570 goto nla_put_failure; 1571 1572 if (t->collect_md) { 1573 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 1574 goto nla_put_failure; 1575 } 1576 1577 return 0; 1578 1579 nla_put_failure: 1580 return -EMSGSIZE; 1581 } 1582 1583 static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev) 1584 { 1585 struct ip_tunnel *t = netdev_priv(dev); 1586 1587 if (t->erspan_ver <= 2) { 1588 if (t->erspan_ver != 0 && !t->collect_md) 1589 __set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags); 1590 1591 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) 1592 goto nla_put_failure; 1593 1594 if (t->erspan_ver == 1) { 1595 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) 1596 goto nla_put_failure; 1597 } else if (t->erspan_ver == 2) { 1598 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) 1599 goto nla_put_failure; 1600 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) 1601 goto nla_put_failure; 1602 } 1603 } 1604 1605 return ipgre_fill_info(skb, dev); 1606 1607 nla_put_failure: 1608 return -EMSGSIZE; 1609 } 1610 1611 static void erspan_setup(struct net_device *dev) 1612 { 1613 struct ip_tunnel *t = netdev_priv(dev); 1614 1615 ether_setup(dev); 1616 dev->max_mtu = 0; 1617 dev->netdev_ops = &erspan_netdev_ops; 1618 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1619 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1620 ip_tunnel_setup(dev, erspan_net_id); 1621 t->erspan_ver = 1; 1622 } 1623 1624 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { 1625 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 1626 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 1627 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 1628 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 1629 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 1630 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) }, 1631 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, 1632 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 1633 [IFLA_GRE_TOS] = { .type = NLA_U8 }, 1634 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, 1635 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 1636 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 1637 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 1638 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 1639 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 1640 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 }, 1641 [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, 1642 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, 1643 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 }, 1644 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 }, 1645 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 }, 1646 }; 1647 1648 static struct rtnl_link_ops ipgre_link_ops __read_mostly = { 1649 .kind = "gre", 1650 .maxtype = IFLA_GRE_MAX, 1651 .policy = ipgre_policy, 1652 .priv_size = sizeof(struct ip_tunnel), 1653 .setup = ipgre_tunnel_setup, 1654 .validate = ipgre_tunnel_validate, 1655 .newlink = ipgre_newlink, 1656 .changelink = ipgre_changelink, 1657 .dellink = ip_tunnel_dellink, 1658 .get_size = ipgre_get_size, 1659 .fill_info = ipgre_fill_info, 1660 .get_link_net = ip_tunnel_get_link_net, 1661 }; 1662 1663 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { 1664 .kind = "gretap", 1665 .maxtype = IFLA_GRE_MAX, 1666 .policy = ipgre_policy, 1667 .priv_size = sizeof(struct ip_tunnel), 1668 .setup = ipgre_tap_setup, 1669 .validate = ipgre_tap_validate, 1670 .newlink = ipgre_newlink, 1671 .changelink = ipgre_changelink, 1672 .dellink = ip_tunnel_dellink, 1673 .get_size = ipgre_get_size, 1674 .fill_info = ipgre_fill_info, 1675 .get_link_net = ip_tunnel_get_link_net, 1676 }; 1677 1678 static struct rtnl_link_ops erspan_link_ops __read_mostly = { 1679 .kind = "erspan", 1680 .maxtype = IFLA_GRE_MAX, 1681 .policy = ipgre_policy, 1682 .priv_size = sizeof(struct ip_tunnel), 1683 .setup = erspan_setup, 1684 .validate = erspan_validate, 1685 .newlink = erspan_newlink, 1686 .changelink = erspan_changelink, 1687 .dellink = ip_tunnel_dellink, 1688 .get_size = ipgre_get_size, 1689 .fill_info = erspan_fill_info, 1690 .get_link_net = ip_tunnel_get_link_net, 1691 }; 1692 1693 struct net_device *gretap_fb_dev_create(struct net *net, const char *name, 1694 u8 name_assign_type) 1695 { 1696 struct nlattr *tb[IFLA_MAX + 1]; 1697 struct net_device *dev; 1698 LIST_HEAD(list_kill); 1699 struct ip_tunnel *t; 1700 int err; 1701 1702 memset(&tb, 0, sizeof(tb)); 1703 1704 dev = rtnl_create_link(net, name, name_assign_type, 1705 &ipgre_tap_ops, tb, NULL); 1706 if (IS_ERR(dev)) 1707 return dev; 1708 1709 /* Configure flow based GRE device. */ 1710 t = netdev_priv(dev); 1711 t->collect_md = true; 1712 1713 err = ipgre_newlink(net, dev, tb, NULL, NULL); 1714 if (err < 0) { 1715 free_netdev(dev); 1716 return ERR_PTR(err); 1717 } 1718 1719 /* openvswitch users expect packet sizes to be unrestricted, 1720 * so set the largest MTU we can. 1721 */ 1722 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false); 1723 if (err) 1724 goto out; 1725 1726 err = rtnl_configure_link(dev, NULL, 0, NULL); 1727 if (err < 0) 1728 goto out; 1729 1730 return dev; 1731 out: 1732 ip_tunnel_dellink(dev, &list_kill); 1733 unregister_netdevice_many(&list_kill); 1734 return ERR_PTR(err); 1735 } 1736 EXPORT_SYMBOL_GPL(gretap_fb_dev_create); 1737 1738 static int __net_init ipgre_tap_init_net(struct net *net) 1739 { 1740 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0"); 1741 } 1742 1743 static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net, 1744 struct list_head *dev_to_kill) 1745 { 1746 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops, 1747 dev_to_kill); 1748 } 1749 1750 static struct pernet_operations ipgre_tap_net_ops = { 1751 .init = ipgre_tap_init_net, 1752 .exit_batch_rtnl = ipgre_tap_exit_batch_rtnl, 1753 .id = &gre_tap_net_id, 1754 .size = sizeof(struct ip_tunnel_net), 1755 }; 1756 1757 static int __net_init erspan_init_net(struct net *net) 1758 { 1759 return ip_tunnel_init_net(net, erspan_net_id, 1760 &erspan_link_ops, "erspan0"); 1761 } 1762 1763 static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list, 1764 struct list_head *dev_to_kill) 1765 { 1766 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops, 1767 dev_to_kill); 1768 } 1769 1770 static struct pernet_operations erspan_net_ops = { 1771 .init = erspan_init_net, 1772 .exit_batch_rtnl = erspan_exit_batch_rtnl, 1773 .id = &erspan_net_id, 1774 .size = sizeof(struct ip_tunnel_net), 1775 }; 1776 1777 static int __init ipgre_init(void) 1778 { 1779 int err; 1780 1781 pr_info("GRE over IPv4 tunneling driver\n"); 1782 1783 err = register_pernet_device(&ipgre_net_ops); 1784 if (err < 0) 1785 return err; 1786 1787 err = register_pernet_device(&ipgre_tap_net_ops); 1788 if (err < 0) 1789 goto pnet_tap_failed; 1790 1791 err = register_pernet_device(&erspan_net_ops); 1792 if (err < 0) 1793 goto pnet_erspan_failed; 1794 1795 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 1796 if (err < 0) { 1797 pr_info("%s: can't add protocol\n", __func__); 1798 goto add_proto_failed; 1799 } 1800 1801 err = rtnl_link_register(&ipgre_link_ops); 1802 if (err < 0) 1803 goto rtnl_link_failed; 1804 1805 err = rtnl_link_register(&ipgre_tap_ops); 1806 if (err < 0) 1807 goto tap_ops_failed; 1808 1809 err = rtnl_link_register(&erspan_link_ops); 1810 if (err < 0) 1811 goto erspan_link_failed; 1812 1813 return 0; 1814 1815 erspan_link_failed: 1816 rtnl_link_unregister(&ipgre_tap_ops); 1817 tap_ops_failed: 1818 rtnl_link_unregister(&ipgre_link_ops); 1819 rtnl_link_failed: 1820 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1821 add_proto_failed: 1822 unregister_pernet_device(&erspan_net_ops); 1823 pnet_erspan_failed: 1824 unregister_pernet_device(&ipgre_tap_net_ops); 1825 pnet_tap_failed: 1826 unregister_pernet_device(&ipgre_net_ops); 1827 return err; 1828 } 1829 1830 static void __exit ipgre_fini(void) 1831 { 1832 rtnl_link_unregister(&ipgre_tap_ops); 1833 rtnl_link_unregister(&ipgre_link_ops); 1834 rtnl_link_unregister(&erspan_link_ops); 1835 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1836 unregister_pernet_device(&ipgre_tap_net_ops); 1837 unregister_pernet_device(&ipgre_net_ops); 1838 unregister_pernet_device(&erspan_net_ops); 1839 } 1840 1841 module_init(ipgre_init); 1842 module_exit(ipgre_fini); 1843 MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library"); 1844 MODULE_LICENSE("GPL"); 1845 MODULE_ALIAS_RTNL_LINK("gre"); 1846 MODULE_ALIAS_RTNL_LINK("gretap"); 1847 MODULE_ALIAS_RTNL_LINK("erspan"); 1848 MODULE_ALIAS_NETDEV("gre0"); 1849 MODULE_ALIAS_NETDEV("gretap0"); 1850 MODULE_ALIAS_NETDEV("erspan0"); 1851