xref: /linux/net/ipv4/ip_gre.c (revision 7895d662bab8827176d44326d0a7423221287ca9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3:	GRE over IP protocol decoder.
4  *
5  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/in6.h>
25 #include <linux/inetdevice.h>
26 #include <linux/igmp.h>
27 #include <linux/netfilter_ipv4.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 
31 #include <net/sock.h>
32 #include <net/ip.h>
33 #include <net/icmp.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
36 #include <net/arp.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/xfrm.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
44 #include <net/gre.h>
45 #include <net/dst_metadata.h>
46 #include <net/erspan.h>
47 
48 /*
49    Problems & solutions
50    --------------------
51 
52    1. The most important issue is detecting local dead loops.
53    They would cause complete host lockup in transmit, which
54    would be "resolved" by stack overflow or, if queueing is enabled,
55    with infinite looping in net_bh.
56 
57    We cannot track such dead loops during route installation,
58    it is infeasible task. The most general solutions would be
59    to keep skb->encapsulation counter (sort of local ttl),
60    and silently drop packet when it expires. It is a good
61    solution, but it supposes maintaining new variable in ALL
62    skb, even if no tunneling is used.
63 
64    Current solution: xmit_recursion breaks dead loops. This is a percpu
65    counter, since when we enter the first ndo_xmit(), cpu migration is
66    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
67 
68    2. Networking dead loops would not kill routers, but would really
69    kill network. IP hop limit plays role of "t->recursion" in this case,
70    if we copy it from packet being encapsulated to upper header.
71    It is very good solution, but it introduces two problems:
72 
73    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
74      do not work over tunnels.
75    - traceroute does not work. I planned to relay ICMP from tunnel,
76      so that this problem would be solved and traceroute output
77      would even more informative. This idea appeared to be wrong:
78      only Linux complies to rfc1812 now (yes, guys, Linux is the only
79      true router now :-)), all routers (at least, in neighbourhood of mine)
80      return only 8 bytes of payload. It is the end.
81 
82    Hence, if we want that OSPF worked or traceroute said something reasonable,
83    we should search for another solution.
84 
85    One of them is to parse packet trying to detect inner encapsulation
86    made by our node. It is difficult or even impossible, especially,
87    taking into account fragmentation. TO be short, ttl is not solution at all.
88 
89    Current solution: The solution was UNEXPECTEDLY SIMPLE.
90    We force DF flag on tunnels with preconfigured hop limit,
91    that is ALL. :-) Well, it does not remove the problem completely,
92    but exponential growth of network traffic is changed to linear
93    (branches, that exceed pmtu are pruned) and tunnel mtu
94    rapidly degrades to value <68, where looping stops.
95    Yes, it is not good if there exists a router in the loop,
96    which does not force DF, even when encapsulating packets have DF set.
97    But it is not our problem! Nobody could accuse us, we made
98    all that we could make. Even if it is your gated who injected
99    fatal route to network, even if it were you who configured
100    fatal static route: you are innocent. :-)
101 
102    Alexey Kuznetsov.
103  */
104 
105 static bool log_ecn_error = true;
106 module_param(log_ecn_error, bool, 0644);
107 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108 
109 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
110 static int ipgre_tunnel_init(struct net_device *dev);
111 static void erspan_build_header(struct sk_buff *skb,
112 				u32 id, u32 index,
113 				bool truncate, bool is_ipv4);
114 
115 static unsigned int ipgre_net_id __read_mostly;
116 static unsigned int gre_tap_net_id __read_mostly;
117 static unsigned int erspan_net_id __read_mostly;
118 
119 static int ipgre_err(struct sk_buff *skb, u32 info,
120 		     const struct tnl_ptk_info *tpi)
121 {
122 
123 	/* All the routers (except for Linux) return only
124 	   8 bytes of packet payload. It means, that precise relaying of
125 	   ICMP in the real Internet is absolutely infeasible.
126 
127 	   Moreover, Cisco "wise men" put GRE key to the third word
128 	   in GRE header. It makes impossible maintaining even soft
129 	   state for keyed GRE tunnels with enabled checksum. Tell
130 	   them "thank you".
131 
132 	   Well, I wonder, rfc1812 was written by Cisco employee,
133 	   what the hell these idiots break standards established
134 	   by themselves???
135 	   */
136 	struct net *net = dev_net(skb->dev);
137 	struct ip_tunnel_net *itn;
138 	const struct iphdr *iph;
139 	const int type = icmp_hdr(skb)->type;
140 	const int code = icmp_hdr(skb)->code;
141 	unsigned int data_len = 0;
142 	struct ip_tunnel *t;
143 
144 	if (tpi->proto == htons(ETH_P_TEB))
145 		itn = net_generic(net, gre_tap_net_id);
146 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
147 		 tpi->proto == htons(ETH_P_ERSPAN2))
148 		itn = net_generic(net, erspan_net_id);
149 	else
150 		itn = net_generic(net, ipgre_net_id);
151 
152 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
153 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
154 			     iph->daddr, iph->saddr, tpi->key);
155 
156 	if (!t)
157 		return -ENOENT;
158 
159 	switch (type) {
160 	default:
161 	case ICMP_PARAMETERPROB:
162 		return 0;
163 
164 	case ICMP_DEST_UNREACH:
165 		switch (code) {
166 		case ICMP_SR_FAILED:
167 		case ICMP_PORT_UNREACH:
168 			/* Impossible event. */
169 			return 0;
170 		default:
171 			/* All others are translated to HOST_UNREACH.
172 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
173 			   I believe they are just ether pollution. --ANK
174 			 */
175 			break;
176 		}
177 		break;
178 
179 	case ICMP_TIME_EXCEEDED:
180 		if (code != ICMP_EXC_TTL)
181 			return 0;
182 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
183 		break;
184 
185 	case ICMP_REDIRECT:
186 		break;
187 	}
188 
189 #if IS_ENABLED(CONFIG_IPV6)
190        if (tpi->proto == htons(ETH_P_IPV6) &&
191            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
192 				       type, data_len))
193                return 0;
194 #endif
195 
196 	if (t->parms.iph.daddr == 0 ||
197 	    ipv4_is_multicast(t->parms.iph.daddr))
198 		return 0;
199 
200 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
201 		return 0;
202 
203 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
204 		t->err_count++;
205 	else
206 		t->err_count = 1;
207 	t->err_time = jiffies;
208 
209 	return 0;
210 }
211 
212 static void gre_err(struct sk_buff *skb, u32 info)
213 {
214 	/* All the routers (except for Linux) return only
215 	 * 8 bytes of packet payload. It means, that precise relaying of
216 	 * ICMP in the real Internet is absolutely infeasible.
217 	 *
218 	 * Moreover, Cisco "wise men" put GRE key to the third word
219 	 * in GRE header. It makes impossible maintaining even soft
220 	 * state for keyed
221 	 * GRE tunnels with enabled checksum. Tell them "thank you".
222 	 *
223 	 * Well, I wonder, rfc1812 was written by Cisco employee,
224 	 * what the hell these idiots break standards established
225 	 * by themselves???
226 	 */
227 
228 	const struct iphdr *iph = (struct iphdr *)skb->data;
229 	const int type = icmp_hdr(skb)->type;
230 	const int code = icmp_hdr(skb)->code;
231 	struct tnl_ptk_info tpi;
232 
233 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
234 			     iph->ihl * 4) < 0)
235 		return;
236 
237 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
238 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
239 				 skb->dev->ifindex, IPPROTO_GRE);
240 		return;
241 	}
242 	if (type == ICMP_REDIRECT) {
243 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
244 			      IPPROTO_GRE);
245 		return;
246 	}
247 
248 	ipgre_err(skb, info, &tpi);
249 }
250 
251 static bool is_erspan_type1(int gre_hdr_len)
252 {
253 	/* Both ERSPAN type I (version 0) and type II (version 1) use
254 	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
255 	 * while type II has 8-byte.
256 	 */
257 	return gre_hdr_len == 4;
258 }
259 
260 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
261 		      int gre_hdr_len)
262 {
263 	struct net *net = dev_net(skb->dev);
264 	struct metadata_dst *tun_dst = NULL;
265 	struct erspan_base_hdr *ershdr;
266 	struct ip_tunnel_net *itn;
267 	struct ip_tunnel *tunnel;
268 	const struct iphdr *iph;
269 	struct erspan_md2 *md2;
270 	int ver;
271 	int len;
272 
273 	itn = net_generic(net, erspan_net_id);
274 	iph = ip_hdr(skb);
275 	if (is_erspan_type1(gre_hdr_len)) {
276 		ver = 0;
277 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
278 					  tpi->flags | TUNNEL_NO_KEY,
279 					  iph->saddr, iph->daddr, 0);
280 	} else {
281 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
282 		ver = ershdr->ver;
283 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
284 					  tpi->flags | TUNNEL_KEY,
285 					  iph->saddr, iph->daddr, tpi->key);
286 	}
287 
288 	if (tunnel) {
289 		if (is_erspan_type1(gre_hdr_len))
290 			len = gre_hdr_len;
291 		else
292 			len = gre_hdr_len + erspan_hdr_len(ver);
293 
294 		if (unlikely(!pskb_may_pull(skb, len)))
295 			return PACKET_REJECT;
296 
297 		if (__iptunnel_pull_header(skb,
298 					   len,
299 					   htons(ETH_P_TEB),
300 					   false, false) < 0)
301 			goto drop;
302 
303 		if (tunnel->collect_md) {
304 			struct erspan_metadata *pkt_md, *md;
305 			struct ip_tunnel_info *info;
306 			unsigned char *gh;
307 			__be64 tun_id;
308 			__be16 flags;
309 
310 			tpi->flags |= TUNNEL_KEY;
311 			flags = tpi->flags;
312 			tun_id = key32_to_tunnel_id(tpi->key);
313 
314 			tun_dst = ip_tun_rx_dst(skb, flags,
315 						tun_id, sizeof(*md));
316 			if (!tun_dst)
317 				return PACKET_REJECT;
318 
319 			/* skb can be uncloned in __iptunnel_pull_header, so
320 			 * old pkt_md is no longer valid and we need to reset
321 			 * it
322 			 */
323 			gh = skb_network_header(skb) +
324 			     skb_network_header_len(skb);
325 			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
326 							    sizeof(*ershdr));
327 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
328 			md->version = ver;
329 			md2 = &md->u.md2;
330 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
331 						       ERSPAN_V2_MDSIZE);
332 
333 			info = &tun_dst->u.tun_info;
334 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
335 			info->options_len = sizeof(*md);
336 		}
337 
338 		skb_reset_mac_header(skb);
339 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
340 		return PACKET_RCVD;
341 	}
342 	return PACKET_REJECT;
343 
344 drop:
345 	kfree_skb(skb);
346 	return PACKET_RCVD;
347 }
348 
349 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
350 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
351 {
352 	struct metadata_dst *tun_dst = NULL;
353 	const struct iphdr *iph;
354 	struct ip_tunnel *tunnel;
355 
356 	iph = ip_hdr(skb);
357 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
358 				  iph->saddr, iph->daddr, tpi->key);
359 
360 	if (tunnel) {
361 		const struct iphdr *tnl_params;
362 
363 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
364 					   raw_proto, false) < 0)
365 			goto drop;
366 
367 		if (tunnel->dev->type != ARPHRD_NONE)
368 			skb_pop_mac_header(skb);
369 		else
370 			skb_reset_mac_header(skb);
371 
372 		tnl_params = &tunnel->parms.iph;
373 		if (tunnel->collect_md || tnl_params->daddr == 0) {
374 			__be16 flags;
375 			__be64 tun_id;
376 
377 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
378 			tun_id = key32_to_tunnel_id(tpi->key);
379 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
380 			if (!tun_dst)
381 				return PACKET_REJECT;
382 		}
383 
384 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
385 		return PACKET_RCVD;
386 	}
387 	return PACKET_NEXT;
388 
389 drop:
390 	kfree_skb(skb);
391 	return PACKET_RCVD;
392 }
393 
394 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
395 		     int hdr_len)
396 {
397 	struct net *net = dev_net(skb->dev);
398 	struct ip_tunnel_net *itn;
399 	int res;
400 
401 	if (tpi->proto == htons(ETH_P_TEB))
402 		itn = net_generic(net, gre_tap_net_id);
403 	else
404 		itn = net_generic(net, ipgre_net_id);
405 
406 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
407 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
408 		/* ipgre tunnels in collect metadata mode should receive
409 		 * also ETH_P_TEB traffic.
410 		 */
411 		itn = net_generic(net, ipgre_net_id);
412 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
413 	}
414 	return res;
415 }
416 
417 static int gre_rcv(struct sk_buff *skb)
418 {
419 	struct tnl_ptk_info tpi;
420 	bool csum_err = false;
421 	int hdr_len;
422 
423 #ifdef CONFIG_NET_IPGRE_BROADCAST
424 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
425 		/* Looped back packet, drop it! */
426 		if (rt_is_output_route(skb_rtable(skb)))
427 			goto drop;
428 	}
429 #endif
430 
431 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
432 	if (hdr_len < 0)
433 		goto drop;
434 
435 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
436 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
437 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
438 			return 0;
439 		goto out;
440 	}
441 
442 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
443 		return 0;
444 
445 out:
446 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
447 drop:
448 	kfree_skb(skb);
449 	return 0;
450 }
451 
452 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
453 		       const struct iphdr *tnl_params,
454 		       __be16 proto)
455 {
456 	struct ip_tunnel *tunnel = netdev_priv(dev);
457 
458 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
459 		tunnel->o_seqno++;
460 
461 	/* Push GRE header. */
462 	gre_build_header(skb, tunnel->tun_hlen,
463 			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
464 			 htonl(tunnel->o_seqno));
465 
466 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
467 }
468 
469 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
470 {
471 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
472 }
473 
474 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
475 			__be16 proto)
476 {
477 	struct ip_tunnel *tunnel = netdev_priv(dev);
478 	struct ip_tunnel_info *tun_info;
479 	const struct ip_tunnel_key *key;
480 	int tunnel_hlen;
481 	__be16 flags;
482 
483 	tun_info = skb_tunnel_info(skb);
484 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
485 		     ip_tunnel_info_af(tun_info) != AF_INET))
486 		goto err_free_skb;
487 
488 	key = &tun_info->key;
489 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
490 
491 	if (skb_cow_head(skb, dev->needed_headroom))
492 		goto err_free_skb;
493 
494 	/* Push Tunnel header. */
495 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
496 		goto err_free_skb;
497 
498 	flags = tun_info->key.tun_flags &
499 		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
500 	gre_build_header(skb, tunnel_hlen, flags, proto,
501 			 tunnel_id_to_key32(tun_info->key.tun_id),
502 			 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
503 
504 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
505 
506 	return;
507 
508 err_free_skb:
509 	kfree_skb(skb);
510 	dev->stats.tx_dropped++;
511 }
512 
513 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
514 {
515 	struct ip_tunnel *tunnel = netdev_priv(dev);
516 	struct ip_tunnel_info *tun_info;
517 	const struct ip_tunnel_key *key;
518 	struct erspan_metadata *md;
519 	bool truncate = false;
520 	__be16 proto;
521 	int tunnel_hlen;
522 	int version;
523 	int nhoff;
524 	int thoff;
525 
526 	tun_info = skb_tunnel_info(skb);
527 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
528 		     ip_tunnel_info_af(tun_info) != AF_INET))
529 		goto err_free_skb;
530 
531 	key = &tun_info->key;
532 	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
533 		goto err_free_skb;
534 	if (tun_info->options_len < sizeof(*md))
535 		goto err_free_skb;
536 	md = ip_tunnel_info_opts(tun_info);
537 
538 	/* ERSPAN has fixed 8 byte GRE header */
539 	version = md->version;
540 	tunnel_hlen = 8 + erspan_hdr_len(version);
541 
542 	if (skb_cow_head(skb, dev->needed_headroom))
543 		goto err_free_skb;
544 
545 	if (gre_handle_offloads(skb, false))
546 		goto err_free_skb;
547 
548 	if (skb->len > dev->mtu + dev->hard_header_len) {
549 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
550 		truncate = true;
551 	}
552 
553 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
554 	if (skb->protocol == htons(ETH_P_IP) &&
555 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
556 		truncate = true;
557 
558 	thoff = skb_transport_header(skb) - skb_mac_header(skb);
559 	if (skb->protocol == htons(ETH_P_IPV6) &&
560 	    (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
561 		truncate = true;
562 
563 	if (version == 1) {
564 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
565 				    ntohl(md->u.index), truncate, true);
566 		proto = htons(ETH_P_ERSPAN);
567 	} else if (version == 2) {
568 		erspan_build_header_v2(skb,
569 				       ntohl(tunnel_id_to_key32(key->tun_id)),
570 				       md->u.md2.dir,
571 				       get_hwid(&md->u.md2),
572 				       truncate, true);
573 		proto = htons(ETH_P_ERSPAN2);
574 	} else {
575 		goto err_free_skb;
576 	}
577 
578 	gre_build_header(skb, 8, TUNNEL_SEQ,
579 			 proto, 0, htonl(tunnel->o_seqno++));
580 
581 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
582 
583 	return;
584 
585 err_free_skb:
586 	kfree_skb(skb);
587 	dev->stats.tx_dropped++;
588 }
589 
590 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
591 {
592 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
593 	const struct ip_tunnel_key *key;
594 	struct rtable *rt;
595 	struct flowi4 fl4;
596 
597 	if (ip_tunnel_info_af(info) != AF_INET)
598 		return -EINVAL;
599 
600 	key = &info->key;
601 	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
602 			    tunnel_id_to_key32(key->tun_id), key->tos, 0,
603 			    skb->mark, skb_get_hash(skb));
604 	rt = ip_route_output_key(dev_net(dev), &fl4);
605 	if (IS_ERR(rt))
606 		return PTR_ERR(rt);
607 
608 	ip_rt_put(rt);
609 	info->key.u.ipv4.src = fl4.saddr;
610 	return 0;
611 }
612 
613 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
614 			      struct net_device *dev)
615 {
616 	struct ip_tunnel *tunnel = netdev_priv(dev);
617 	const struct iphdr *tnl_params;
618 
619 	if (!pskb_inet_may_pull(skb))
620 		goto free_skb;
621 
622 	if (tunnel->collect_md) {
623 		gre_fb_xmit(skb, dev, skb->protocol);
624 		return NETDEV_TX_OK;
625 	}
626 
627 	if (dev->header_ops) {
628 		/* Need space for new headers */
629 		if (skb_cow_head(skb, dev->needed_headroom -
630 				      (tunnel->hlen + sizeof(struct iphdr))))
631 			goto free_skb;
632 
633 		tnl_params = (const struct iphdr *)skb->data;
634 
635 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
636 		 * to gre header.
637 		 */
638 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
639 		skb_reset_mac_header(skb);
640 	} else {
641 		if (skb_cow_head(skb, dev->needed_headroom))
642 			goto free_skb;
643 
644 		tnl_params = &tunnel->parms.iph;
645 	}
646 
647 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
648 		goto free_skb;
649 
650 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
651 	return NETDEV_TX_OK;
652 
653 free_skb:
654 	kfree_skb(skb);
655 	dev->stats.tx_dropped++;
656 	return NETDEV_TX_OK;
657 }
658 
659 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
660 			       struct net_device *dev)
661 {
662 	struct ip_tunnel *tunnel = netdev_priv(dev);
663 	bool truncate = false;
664 	__be16 proto;
665 
666 	if (!pskb_inet_may_pull(skb))
667 		goto free_skb;
668 
669 	if (tunnel->collect_md) {
670 		erspan_fb_xmit(skb, dev);
671 		return NETDEV_TX_OK;
672 	}
673 
674 	if (gre_handle_offloads(skb, false))
675 		goto free_skb;
676 
677 	if (skb_cow_head(skb, dev->needed_headroom))
678 		goto free_skb;
679 
680 	if (skb->len > dev->mtu + dev->hard_header_len) {
681 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
682 		truncate = true;
683 	}
684 
685 	/* Push ERSPAN header */
686 	if (tunnel->erspan_ver == 0) {
687 		proto = htons(ETH_P_ERSPAN);
688 		tunnel->parms.o_flags &= ~TUNNEL_SEQ;
689 	} else if (tunnel->erspan_ver == 1) {
690 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
691 				    tunnel->index,
692 				    truncate, true);
693 		proto = htons(ETH_P_ERSPAN);
694 	} else if (tunnel->erspan_ver == 2) {
695 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
696 				       tunnel->dir, tunnel->hwid,
697 				       truncate, true);
698 		proto = htons(ETH_P_ERSPAN2);
699 	} else {
700 		goto free_skb;
701 	}
702 
703 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
704 	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
705 	return NETDEV_TX_OK;
706 
707 free_skb:
708 	kfree_skb(skb);
709 	dev->stats.tx_dropped++;
710 	return NETDEV_TX_OK;
711 }
712 
713 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
714 				struct net_device *dev)
715 {
716 	struct ip_tunnel *tunnel = netdev_priv(dev);
717 
718 	if (!pskb_inet_may_pull(skb))
719 		goto free_skb;
720 
721 	if (tunnel->collect_md) {
722 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
723 		return NETDEV_TX_OK;
724 	}
725 
726 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
727 		goto free_skb;
728 
729 	if (skb_cow_head(skb, dev->needed_headroom))
730 		goto free_skb;
731 
732 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
733 	return NETDEV_TX_OK;
734 
735 free_skb:
736 	kfree_skb(skb);
737 	dev->stats.tx_dropped++;
738 	return NETDEV_TX_OK;
739 }
740 
741 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
742 {
743 	struct ip_tunnel *tunnel = netdev_priv(dev);
744 	int len;
745 
746 	len = tunnel->tun_hlen;
747 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
748 	len = tunnel->tun_hlen - len;
749 	tunnel->hlen = tunnel->hlen + len;
750 
751 	dev->needed_headroom = dev->needed_headroom + len;
752 	if (set_mtu)
753 		dev->mtu = max_t(int, dev->mtu - len, 68);
754 
755 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
756 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
757 		    tunnel->encap.type == TUNNEL_ENCAP_NONE) {
758 			dev->features |= NETIF_F_GSO_SOFTWARE;
759 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
760 		} else {
761 			dev->features &= ~NETIF_F_GSO_SOFTWARE;
762 			dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
763 		}
764 		dev->features |= NETIF_F_LLTX;
765 	} else {
766 		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
767 		dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
768 	}
769 }
770 
771 static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
772 			    int cmd)
773 {
774 	int err;
775 
776 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
777 		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
778 		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
779 		    ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
780 			return -EINVAL;
781 	}
782 
783 	p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
784 	p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
785 
786 	err = ip_tunnel_ctl(dev, p, cmd);
787 	if (err)
788 		return err;
789 
790 	if (cmd == SIOCCHGTUNNEL) {
791 		struct ip_tunnel *t = netdev_priv(dev);
792 
793 		t->parms.i_flags = p->i_flags;
794 		t->parms.o_flags = p->o_flags;
795 
796 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
797 			ipgre_link_update(dev, true);
798 	}
799 
800 	p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
801 	p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
802 	return 0;
803 }
804 
805 /* Nice toy. Unfortunately, useless in real life :-)
806    It allows to construct virtual multiprotocol broadcast "LAN"
807    over the Internet, provided multicast routing is tuned.
808 
809 
810    I have no idea was this bicycle invented before me,
811    so that I had to set ARPHRD_IPGRE to a random value.
812    I have an impression, that Cisco could make something similar,
813    but this feature is apparently missing in IOS<=11.2(8).
814 
815    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
816    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
817 
818    ping -t 255 224.66.66.66
819 
820    If nobody answers, mbone does not work.
821 
822    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
823    ip addr add 10.66.66.<somewhat>/24 dev Universe
824    ifconfig Universe up
825    ifconfig Universe add fe80::<Your_real_addr>/10
826    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
827    ftp 10.66.66.66
828    ...
829    ftp fec0:6666:6666::193.233.7.65
830    ...
831  */
832 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
833 			unsigned short type,
834 			const void *daddr, const void *saddr, unsigned int len)
835 {
836 	struct ip_tunnel *t = netdev_priv(dev);
837 	struct iphdr *iph;
838 	struct gre_base_hdr *greh;
839 
840 	iph = skb_push(skb, t->hlen + sizeof(*iph));
841 	greh = (struct gre_base_hdr *)(iph+1);
842 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
843 	greh->protocol = htons(type);
844 
845 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
846 
847 	/* Set the source hardware address. */
848 	if (saddr)
849 		memcpy(&iph->saddr, saddr, 4);
850 	if (daddr)
851 		memcpy(&iph->daddr, daddr, 4);
852 	if (iph->daddr)
853 		return t->hlen + sizeof(*iph);
854 
855 	return -(t->hlen + sizeof(*iph));
856 }
857 
858 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
859 {
860 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
861 	memcpy(haddr, &iph->saddr, 4);
862 	return 4;
863 }
864 
865 static const struct header_ops ipgre_header_ops = {
866 	.create	= ipgre_header,
867 	.parse	= ipgre_header_parse,
868 };
869 
870 #ifdef CONFIG_NET_IPGRE_BROADCAST
871 static int ipgre_open(struct net_device *dev)
872 {
873 	struct ip_tunnel *t = netdev_priv(dev);
874 
875 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
876 		struct flowi4 fl4;
877 		struct rtable *rt;
878 
879 		rt = ip_route_output_gre(t->net, &fl4,
880 					 t->parms.iph.daddr,
881 					 t->parms.iph.saddr,
882 					 t->parms.o_key,
883 					 RT_TOS(t->parms.iph.tos),
884 					 t->parms.link);
885 		if (IS_ERR(rt))
886 			return -EADDRNOTAVAIL;
887 		dev = rt->dst.dev;
888 		ip_rt_put(rt);
889 		if (!__in_dev_get_rtnl(dev))
890 			return -EADDRNOTAVAIL;
891 		t->mlink = dev->ifindex;
892 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
893 	}
894 	return 0;
895 }
896 
897 static int ipgre_close(struct net_device *dev)
898 {
899 	struct ip_tunnel *t = netdev_priv(dev);
900 
901 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
902 		struct in_device *in_dev;
903 		in_dev = inetdev_by_index(t->net, t->mlink);
904 		if (in_dev)
905 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
906 	}
907 	return 0;
908 }
909 #endif
910 
911 static const struct net_device_ops ipgre_netdev_ops = {
912 	.ndo_init		= ipgre_tunnel_init,
913 	.ndo_uninit		= ip_tunnel_uninit,
914 #ifdef CONFIG_NET_IPGRE_BROADCAST
915 	.ndo_open		= ipgre_open,
916 	.ndo_stop		= ipgre_close,
917 #endif
918 	.ndo_start_xmit		= ipgre_xmit,
919 	.ndo_do_ioctl		= ip_tunnel_ioctl,
920 	.ndo_change_mtu		= ip_tunnel_change_mtu,
921 	.ndo_get_stats64	= ip_tunnel_get_stats64,
922 	.ndo_get_iflink		= ip_tunnel_get_iflink,
923 	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
924 };
925 
926 #define GRE_FEATURES (NETIF_F_SG |		\
927 		      NETIF_F_FRAGLIST |	\
928 		      NETIF_F_HIGHDMA |		\
929 		      NETIF_F_HW_CSUM)
930 
931 static void ipgre_tunnel_setup(struct net_device *dev)
932 {
933 	dev->netdev_ops		= &ipgre_netdev_ops;
934 	dev->type		= ARPHRD_IPGRE;
935 	ip_tunnel_setup(dev, ipgre_net_id);
936 }
937 
938 static void __gre_tunnel_init(struct net_device *dev)
939 {
940 	struct ip_tunnel *tunnel;
941 
942 	tunnel = netdev_priv(dev);
943 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
944 	tunnel->parms.iph.protocol = IPPROTO_GRE;
945 
946 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
947 
948 	dev->features		|= GRE_FEATURES;
949 	dev->hw_features	|= GRE_FEATURES;
950 
951 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
952 		/* TCP offload with GRE SEQ is not supported, nor
953 		 * can we support 2 levels of outer headers requiring
954 		 * an update.
955 		 */
956 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
957 		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
958 			dev->features    |= NETIF_F_GSO_SOFTWARE;
959 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
960 		}
961 
962 		/* Can use a lockless transmit, unless we generate
963 		 * output sequences
964 		 */
965 		dev->features |= NETIF_F_LLTX;
966 	}
967 }
968 
969 static int ipgre_tunnel_init(struct net_device *dev)
970 {
971 	struct ip_tunnel *tunnel = netdev_priv(dev);
972 	struct iphdr *iph = &tunnel->parms.iph;
973 
974 	__gre_tunnel_init(dev);
975 
976 	memcpy(dev->dev_addr, &iph->saddr, 4);
977 	memcpy(dev->broadcast, &iph->daddr, 4);
978 
979 	dev->flags		= IFF_NOARP;
980 	netif_keep_dst(dev);
981 	dev->addr_len		= 4;
982 
983 	if (iph->daddr && !tunnel->collect_md) {
984 #ifdef CONFIG_NET_IPGRE_BROADCAST
985 		if (ipv4_is_multicast(iph->daddr)) {
986 			if (!iph->saddr)
987 				return -EINVAL;
988 			dev->flags = IFF_BROADCAST;
989 			dev->header_ops = &ipgre_header_ops;
990 		}
991 #endif
992 	} else if (!tunnel->collect_md) {
993 		dev->header_ops = &ipgre_header_ops;
994 	}
995 
996 	return ip_tunnel_init(dev);
997 }
998 
999 static const struct gre_protocol ipgre_protocol = {
1000 	.handler     = gre_rcv,
1001 	.err_handler = gre_err,
1002 };
1003 
1004 static int __net_init ipgre_init_net(struct net *net)
1005 {
1006 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1007 }
1008 
1009 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1010 {
1011 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1012 }
1013 
1014 static struct pernet_operations ipgre_net_ops = {
1015 	.init = ipgre_init_net,
1016 	.exit_batch = ipgre_exit_batch_net,
1017 	.id   = &ipgre_net_id,
1018 	.size = sizeof(struct ip_tunnel_net),
1019 };
1020 
1021 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1022 				 struct netlink_ext_ack *extack)
1023 {
1024 	__be16 flags;
1025 
1026 	if (!data)
1027 		return 0;
1028 
1029 	flags = 0;
1030 	if (data[IFLA_GRE_IFLAGS])
1031 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1032 	if (data[IFLA_GRE_OFLAGS])
1033 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1034 	if (flags & (GRE_VERSION|GRE_ROUTING))
1035 		return -EINVAL;
1036 
1037 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1038 	    data[IFLA_GRE_ENCAP_TYPE] &&
1039 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1040 		return -EINVAL;
1041 
1042 	return 0;
1043 }
1044 
1045 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1046 			      struct netlink_ext_ack *extack)
1047 {
1048 	__be32 daddr;
1049 
1050 	if (tb[IFLA_ADDRESS]) {
1051 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1052 			return -EINVAL;
1053 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1054 			return -EADDRNOTAVAIL;
1055 	}
1056 
1057 	if (!data)
1058 		goto out;
1059 
1060 	if (data[IFLA_GRE_REMOTE]) {
1061 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1062 		if (!daddr)
1063 			return -EINVAL;
1064 	}
1065 
1066 out:
1067 	return ipgre_tunnel_validate(tb, data, extack);
1068 }
1069 
1070 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1071 			   struct netlink_ext_ack *extack)
1072 {
1073 	__be16 flags = 0;
1074 	int ret;
1075 
1076 	if (!data)
1077 		return 0;
1078 
1079 	ret = ipgre_tap_validate(tb, data, extack);
1080 	if (ret)
1081 		return ret;
1082 
1083 	if (data[IFLA_GRE_ERSPAN_VER] &&
1084 	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1085 		return 0;
1086 
1087 	/* ERSPAN type II/III should only have GRE sequence and key flag */
1088 	if (data[IFLA_GRE_OFLAGS])
1089 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1090 	if (data[IFLA_GRE_IFLAGS])
1091 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1092 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1093 	    flags != (GRE_SEQ | GRE_KEY))
1094 		return -EINVAL;
1095 
1096 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1097 	 * 32-bit key field as ID, check it's range.
1098 	 */
1099 	if (data[IFLA_GRE_IKEY] &&
1100 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1101 		return -EINVAL;
1102 
1103 	if (data[IFLA_GRE_OKEY] &&
1104 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1105 		return -EINVAL;
1106 
1107 	return 0;
1108 }
1109 
1110 static int ipgre_netlink_parms(struct net_device *dev,
1111 				struct nlattr *data[],
1112 				struct nlattr *tb[],
1113 				struct ip_tunnel_parm *parms,
1114 				__u32 *fwmark)
1115 {
1116 	struct ip_tunnel *t = netdev_priv(dev);
1117 
1118 	memset(parms, 0, sizeof(*parms));
1119 
1120 	parms->iph.protocol = IPPROTO_GRE;
1121 
1122 	if (!data)
1123 		return 0;
1124 
1125 	if (data[IFLA_GRE_LINK])
1126 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1127 
1128 	if (data[IFLA_GRE_IFLAGS])
1129 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1130 
1131 	if (data[IFLA_GRE_OFLAGS])
1132 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1133 
1134 	if (data[IFLA_GRE_IKEY])
1135 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1136 
1137 	if (data[IFLA_GRE_OKEY])
1138 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1139 
1140 	if (data[IFLA_GRE_LOCAL])
1141 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1142 
1143 	if (data[IFLA_GRE_REMOTE])
1144 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1145 
1146 	if (data[IFLA_GRE_TTL])
1147 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1148 
1149 	if (data[IFLA_GRE_TOS])
1150 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1151 
1152 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1153 		if (t->ignore_df)
1154 			return -EINVAL;
1155 		parms->iph.frag_off = htons(IP_DF);
1156 	}
1157 
1158 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1159 		t->collect_md = true;
1160 		if (dev->type == ARPHRD_IPGRE)
1161 			dev->type = ARPHRD_NONE;
1162 	}
1163 
1164 	if (data[IFLA_GRE_IGNORE_DF]) {
1165 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1166 		  && (parms->iph.frag_off & htons(IP_DF)))
1167 			return -EINVAL;
1168 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1169 	}
1170 
1171 	if (data[IFLA_GRE_FWMARK])
1172 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1173 
1174 	return 0;
1175 }
1176 
1177 static int erspan_netlink_parms(struct net_device *dev,
1178 				struct nlattr *data[],
1179 				struct nlattr *tb[],
1180 				struct ip_tunnel_parm *parms,
1181 				__u32 *fwmark)
1182 {
1183 	struct ip_tunnel *t = netdev_priv(dev);
1184 	int err;
1185 
1186 	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1187 	if (err)
1188 		return err;
1189 	if (!data)
1190 		return 0;
1191 
1192 	if (data[IFLA_GRE_ERSPAN_VER]) {
1193 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1194 
1195 		if (t->erspan_ver > 2)
1196 			return -EINVAL;
1197 	}
1198 
1199 	if (t->erspan_ver == 1) {
1200 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1201 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1202 			if (t->index & ~INDEX_MASK)
1203 				return -EINVAL;
1204 		}
1205 	} else if (t->erspan_ver == 2) {
1206 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1207 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1208 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1209 				return -EINVAL;
1210 		}
1211 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1212 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1213 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1214 				return -EINVAL;
1215 		}
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 /* This function returns true when ENCAP attributes are present in the nl msg */
1222 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1223 				      struct ip_tunnel_encap *ipencap)
1224 {
1225 	bool ret = false;
1226 
1227 	memset(ipencap, 0, sizeof(*ipencap));
1228 
1229 	if (!data)
1230 		return ret;
1231 
1232 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1233 		ret = true;
1234 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1235 	}
1236 
1237 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1238 		ret = true;
1239 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1240 	}
1241 
1242 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1243 		ret = true;
1244 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1245 	}
1246 
1247 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1248 		ret = true;
1249 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1250 	}
1251 
1252 	return ret;
1253 }
1254 
1255 static int gre_tap_init(struct net_device *dev)
1256 {
1257 	__gre_tunnel_init(dev);
1258 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1259 	netif_keep_dst(dev);
1260 
1261 	return ip_tunnel_init(dev);
1262 }
1263 
1264 static const struct net_device_ops gre_tap_netdev_ops = {
1265 	.ndo_init		= gre_tap_init,
1266 	.ndo_uninit		= ip_tunnel_uninit,
1267 	.ndo_start_xmit		= gre_tap_xmit,
1268 	.ndo_set_mac_address 	= eth_mac_addr,
1269 	.ndo_validate_addr	= eth_validate_addr,
1270 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1271 	.ndo_get_stats64	= ip_tunnel_get_stats64,
1272 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1273 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1274 };
1275 
1276 static int erspan_tunnel_init(struct net_device *dev)
1277 {
1278 	struct ip_tunnel *tunnel = netdev_priv(dev);
1279 
1280 	if (tunnel->erspan_ver == 0)
1281 		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1282 	else
1283 		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1284 
1285 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1286 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1287 		       erspan_hdr_len(tunnel->erspan_ver);
1288 
1289 	dev->features		|= GRE_FEATURES;
1290 	dev->hw_features	|= GRE_FEATURES;
1291 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1292 	netif_keep_dst(dev);
1293 
1294 	return ip_tunnel_init(dev);
1295 }
1296 
1297 static const struct net_device_ops erspan_netdev_ops = {
1298 	.ndo_init		= erspan_tunnel_init,
1299 	.ndo_uninit		= ip_tunnel_uninit,
1300 	.ndo_start_xmit		= erspan_xmit,
1301 	.ndo_set_mac_address	= eth_mac_addr,
1302 	.ndo_validate_addr	= eth_validate_addr,
1303 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1304 	.ndo_get_stats64	= ip_tunnel_get_stats64,
1305 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1306 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1307 };
1308 
1309 static void ipgre_tap_setup(struct net_device *dev)
1310 {
1311 	ether_setup(dev);
1312 	dev->max_mtu = 0;
1313 	dev->netdev_ops	= &gre_tap_netdev_ops;
1314 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1315 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1316 	ip_tunnel_setup(dev, gre_tap_net_id);
1317 }
1318 
1319 static int
1320 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1321 {
1322 	struct ip_tunnel_encap ipencap;
1323 
1324 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1325 		struct ip_tunnel *t = netdev_priv(dev);
1326 		int err = ip_tunnel_encap_setup(t, &ipencap);
1327 
1328 		if (err < 0)
1329 			return err;
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1336 			 struct nlattr *tb[], struct nlattr *data[],
1337 			 struct netlink_ext_ack *extack)
1338 {
1339 	struct ip_tunnel_parm p;
1340 	__u32 fwmark = 0;
1341 	int err;
1342 
1343 	err = ipgre_newlink_encap_setup(dev, data);
1344 	if (err)
1345 		return err;
1346 
1347 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1348 	if (err < 0)
1349 		return err;
1350 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1351 }
1352 
1353 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1354 			  struct nlattr *tb[], struct nlattr *data[],
1355 			  struct netlink_ext_ack *extack)
1356 {
1357 	struct ip_tunnel_parm p;
1358 	__u32 fwmark = 0;
1359 	int err;
1360 
1361 	err = ipgre_newlink_encap_setup(dev, data);
1362 	if (err)
1363 		return err;
1364 
1365 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1366 	if (err)
1367 		return err;
1368 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1369 }
1370 
1371 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1372 			    struct nlattr *data[],
1373 			    struct netlink_ext_ack *extack)
1374 {
1375 	struct ip_tunnel *t = netdev_priv(dev);
1376 	__u32 fwmark = t->fwmark;
1377 	struct ip_tunnel_parm p;
1378 	int err;
1379 
1380 	err = ipgre_newlink_encap_setup(dev, data);
1381 	if (err)
1382 		return err;
1383 
1384 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1385 	if (err < 0)
1386 		return err;
1387 
1388 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1389 	if (err < 0)
1390 		return err;
1391 
1392 	t->parms.i_flags = p.i_flags;
1393 	t->parms.o_flags = p.o_flags;
1394 
1395 	ipgre_link_update(dev, !tb[IFLA_MTU]);
1396 
1397 	return 0;
1398 }
1399 
1400 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1401 			     struct nlattr *data[],
1402 			     struct netlink_ext_ack *extack)
1403 {
1404 	struct ip_tunnel *t = netdev_priv(dev);
1405 	__u32 fwmark = t->fwmark;
1406 	struct ip_tunnel_parm p;
1407 	int err;
1408 
1409 	err = ipgre_newlink_encap_setup(dev, data);
1410 	if (err)
1411 		return err;
1412 
1413 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1414 	if (err < 0)
1415 		return err;
1416 
1417 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1418 	if (err < 0)
1419 		return err;
1420 
1421 	t->parms.i_flags = p.i_flags;
1422 	t->parms.o_flags = p.o_flags;
1423 
1424 	return 0;
1425 }
1426 
1427 static size_t ipgre_get_size(const struct net_device *dev)
1428 {
1429 	return
1430 		/* IFLA_GRE_LINK */
1431 		nla_total_size(4) +
1432 		/* IFLA_GRE_IFLAGS */
1433 		nla_total_size(2) +
1434 		/* IFLA_GRE_OFLAGS */
1435 		nla_total_size(2) +
1436 		/* IFLA_GRE_IKEY */
1437 		nla_total_size(4) +
1438 		/* IFLA_GRE_OKEY */
1439 		nla_total_size(4) +
1440 		/* IFLA_GRE_LOCAL */
1441 		nla_total_size(4) +
1442 		/* IFLA_GRE_REMOTE */
1443 		nla_total_size(4) +
1444 		/* IFLA_GRE_TTL */
1445 		nla_total_size(1) +
1446 		/* IFLA_GRE_TOS */
1447 		nla_total_size(1) +
1448 		/* IFLA_GRE_PMTUDISC */
1449 		nla_total_size(1) +
1450 		/* IFLA_GRE_ENCAP_TYPE */
1451 		nla_total_size(2) +
1452 		/* IFLA_GRE_ENCAP_FLAGS */
1453 		nla_total_size(2) +
1454 		/* IFLA_GRE_ENCAP_SPORT */
1455 		nla_total_size(2) +
1456 		/* IFLA_GRE_ENCAP_DPORT */
1457 		nla_total_size(2) +
1458 		/* IFLA_GRE_COLLECT_METADATA */
1459 		nla_total_size(0) +
1460 		/* IFLA_GRE_IGNORE_DF */
1461 		nla_total_size(1) +
1462 		/* IFLA_GRE_FWMARK */
1463 		nla_total_size(4) +
1464 		/* IFLA_GRE_ERSPAN_INDEX */
1465 		nla_total_size(4) +
1466 		/* IFLA_GRE_ERSPAN_VER */
1467 		nla_total_size(1) +
1468 		/* IFLA_GRE_ERSPAN_DIR */
1469 		nla_total_size(1) +
1470 		/* IFLA_GRE_ERSPAN_HWID */
1471 		nla_total_size(2) +
1472 		0;
1473 }
1474 
1475 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1476 {
1477 	struct ip_tunnel *t = netdev_priv(dev);
1478 	struct ip_tunnel_parm *p = &t->parms;
1479 	__be16 o_flags = p->o_flags;
1480 
1481 	if (t->erspan_ver <= 2) {
1482 		if (t->erspan_ver != 0 && !t->collect_md)
1483 			o_flags |= TUNNEL_KEY;
1484 
1485 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1486 			goto nla_put_failure;
1487 
1488 		if (t->erspan_ver == 1) {
1489 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1490 				goto nla_put_failure;
1491 		} else if (t->erspan_ver == 2) {
1492 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1493 				goto nla_put_failure;
1494 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1495 				goto nla_put_failure;
1496 		}
1497 	}
1498 
1499 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1500 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1501 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1502 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1503 			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1504 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1505 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1506 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1507 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1508 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1509 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1510 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1511 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1512 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1513 		goto nla_put_failure;
1514 
1515 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1516 			t->encap.type) ||
1517 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1518 			 t->encap.sport) ||
1519 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1520 			 t->encap.dport) ||
1521 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1522 			t->encap.flags))
1523 		goto nla_put_failure;
1524 
1525 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1526 		goto nla_put_failure;
1527 
1528 	if (t->collect_md) {
1529 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1530 			goto nla_put_failure;
1531 	}
1532 
1533 	return 0;
1534 
1535 nla_put_failure:
1536 	return -EMSGSIZE;
1537 }
1538 
1539 static void erspan_setup(struct net_device *dev)
1540 {
1541 	struct ip_tunnel *t = netdev_priv(dev);
1542 
1543 	ether_setup(dev);
1544 	dev->max_mtu = 0;
1545 	dev->netdev_ops = &erspan_netdev_ops;
1546 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1547 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1548 	ip_tunnel_setup(dev, erspan_net_id);
1549 	t->erspan_ver = 1;
1550 }
1551 
1552 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1553 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1554 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1555 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1556 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1557 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1558 	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1559 	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1560 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1561 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1562 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1563 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1564 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1565 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1566 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1567 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1568 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1569 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1570 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1571 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1572 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1573 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1574 };
1575 
1576 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1577 	.kind		= "gre",
1578 	.maxtype	= IFLA_GRE_MAX,
1579 	.policy		= ipgre_policy,
1580 	.priv_size	= sizeof(struct ip_tunnel),
1581 	.setup		= ipgre_tunnel_setup,
1582 	.validate	= ipgre_tunnel_validate,
1583 	.newlink	= ipgre_newlink,
1584 	.changelink	= ipgre_changelink,
1585 	.dellink	= ip_tunnel_dellink,
1586 	.get_size	= ipgre_get_size,
1587 	.fill_info	= ipgre_fill_info,
1588 	.get_link_net	= ip_tunnel_get_link_net,
1589 };
1590 
1591 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1592 	.kind		= "gretap",
1593 	.maxtype	= IFLA_GRE_MAX,
1594 	.policy		= ipgre_policy,
1595 	.priv_size	= sizeof(struct ip_tunnel),
1596 	.setup		= ipgre_tap_setup,
1597 	.validate	= ipgre_tap_validate,
1598 	.newlink	= ipgre_newlink,
1599 	.changelink	= ipgre_changelink,
1600 	.dellink	= ip_tunnel_dellink,
1601 	.get_size	= ipgre_get_size,
1602 	.fill_info	= ipgre_fill_info,
1603 	.get_link_net	= ip_tunnel_get_link_net,
1604 };
1605 
1606 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1607 	.kind		= "erspan",
1608 	.maxtype	= IFLA_GRE_MAX,
1609 	.policy		= ipgre_policy,
1610 	.priv_size	= sizeof(struct ip_tunnel),
1611 	.setup		= erspan_setup,
1612 	.validate	= erspan_validate,
1613 	.newlink	= erspan_newlink,
1614 	.changelink	= erspan_changelink,
1615 	.dellink	= ip_tunnel_dellink,
1616 	.get_size	= ipgre_get_size,
1617 	.fill_info	= ipgre_fill_info,
1618 	.get_link_net	= ip_tunnel_get_link_net,
1619 };
1620 
1621 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1622 					u8 name_assign_type)
1623 {
1624 	struct nlattr *tb[IFLA_MAX + 1];
1625 	struct net_device *dev;
1626 	LIST_HEAD(list_kill);
1627 	struct ip_tunnel *t;
1628 	int err;
1629 
1630 	memset(&tb, 0, sizeof(tb));
1631 
1632 	dev = rtnl_create_link(net, name, name_assign_type,
1633 			       &ipgre_tap_ops, tb, NULL);
1634 	if (IS_ERR(dev))
1635 		return dev;
1636 
1637 	/* Configure flow based GRE device. */
1638 	t = netdev_priv(dev);
1639 	t->collect_md = true;
1640 
1641 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1642 	if (err < 0) {
1643 		free_netdev(dev);
1644 		return ERR_PTR(err);
1645 	}
1646 
1647 	/* openvswitch users expect packet sizes to be unrestricted,
1648 	 * so set the largest MTU we can.
1649 	 */
1650 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1651 	if (err)
1652 		goto out;
1653 
1654 	err = rtnl_configure_link(dev, NULL);
1655 	if (err < 0)
1656 		goto out;
1657 
1658 	return dev;
1659 out:
1660 	ip_tunnel_dellink(dev, &list_kill);
1661 	unregister_netdevice_many(&list_kill);
1662 	return ERR_PTR(err);
1663 }
1664 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1665 
1666 static int __net_init ipgre_tap_init_net(struct net *net)
1667 {
1668 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1669 }
1670 
1671 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1672 {
1673 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1674 }
1675 
1676 static struct pernet_operations ipgre_tap_net_ops = {
1677 	.init = ipgre_tap_init_net,
1678 	.exit_batch = ipgre_tap_exit_batch_net,
1679 	.id   = &gre_tap_net_id,
1680 	.size = sizeof(struct ip_tunnel_net),
1681 };
1682 
1683 static int __net_init erspan_init_net(struct net *net)
1684 {
1685 	return ip_tunnel_init_net(net, erspan_net_id,
1686 				  &erspan_link_ops, "erspan0");
1687 }
1688 
1689 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1690 {
1691 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1692 }
1693 
1694 static struct pernet_operations erspan_net_ops = {
1695 	.init = erspan_init_net,
1696 	.exit_batch = erspan_exit_batch_net,
1697 	.id   = &erspan_net_id,
1698 	.size = sizeof(struct ip_tunnel_net),
1699 };
1700 
1701 static int __init ipgre_init(void)
1702 {
1703 	int err;
1704 
1705 	pr_info("GRE over IPv4 tunneling driver\n");
1706 
1707 	err = register_pernet_device(&ipgre_net_ops);
1708 	if (err < 0)
1709 		return err;
1710 
1711 	err = register_pernet_device(&ipgre_tap_net_ops);
1712 	if (err < 0)
1713 		goto pnet_tap_failed;
1714 
1715 	err = register_pernet_device(&erspan_net_ops);
1716 	if (err < 0)
1717 		goto pnet_erspan_failed;
1718 
1719 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1720 	if (err < 0) {
1721 		pr_info("%s: can't add protocol\n", __func__);
1722 		goto add_proto_failed;
1723 	}
1724 
1725 	err = rtnl_link_register(&ipgre_link_ops);
1726 	if (err < 0)
1727 		goto rtnl_link_failed;
1728 
1729 	err = rtnl_link_register(&ipgre_tap_ops);
1730 	if (err < 0)
1731 		goto tap_ops_failed;
1732 
1733 	err = rtnl_link_register(&erspan_link_ops);
1734 	if (err < 0)
1735 		goto erspan_link_failed;
1736 
1737 	return 0;
1738 
1739 erspan_link_failed:
1740 	rtnl_link_unregister(&ipgre_tap_ops);
1741 tap_ops_failed:
1742 	rtnl_link_unregister(&ipgre_link_ops);
1743 rtnl_link_failed:
1744 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1745 add_proto_failed:
1746 	unregister_pernet_device(&erspan_net_ops);
1747 pnet_erspan_failed:
1748 	unregister_pernet_device(&ipgre_tap_net_ops);
1749 pnet_tap_failed:
1750 	unregister_pernet_device(&ipgre_net_ops);
1751 	return err;
1752 }
1753 
1754 static void __exit ipgre_fini(void)
1755 {
1756 	rtnl_link_unregister(&ipgre_tap_ops);
1757 	rtnl_link_unregister(&ipgre_link_ops);
1758 	rtnl_link_unregister(&erspan_link_ops);
1759 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1760 	unregister_pernet_device(&ipgre_tap_net_ops);
1761 	unregister_pernet_device(&ipgre_net_ops);
1762 	unregister_pernet_device(&erspan_net_ops);
1763 }
1764 
1765 module_init(ipgre_init);
1766 module_exit(ipgre_fini);
1767 MODULE_LICENSE("GPL");
1768 MODULE_ALIAS_RTNL_LINK("gre");
1769 MODULE_ALIAS_RTNL_LINK("gretap");
1770 MODULE_ALIAS_RTNL_LINK("erspan");
1771 MODULE_ALIAS_NETDEV("gre0");
1772 MODULE_ALIAS_NETDEV("gretap0");
1773 MODULE_ALIAS_NETDEV("erspan0");
1774