xref: /linux/net/ipv4/ip_gre.c (revision c48a7c44a1d02516309015b6134c9bb982e17008)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3:	GRE over IP protocol decoder.
4  *
5  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/in6.h>
25 #include <linux/inetdevice.h>
26 #include <linux/igmp.h>
27 #include <linux/netfilter_ipv4.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 
31 #include <net/sock.h>
32 #include <net/ip.h>
33 #include <net/icmp.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
36 #include <net/arp.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/xfrm.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
44 #include <net/gre.h>
45 #include <net/dst_metadata.h>
46 #include <net/erspan.h>
47 
48 /*
49    Problems & solutions
50    --------------------
51 
52    1. The most important issue is detecting local dead loops.
53    They would cause complete host lockup in transmit, which
54    would be "resolved" by stack overflow or, if queueing is enabled,
55    with infinite looping in net_bh.
56 
57    We cannot track such dead loops during route installation,
58    it is infeasible task. The most general solutions would be
59    to keep skb->encapsulation counter (sort of local ttl),
60    and silently drop packet when it expires. It is a good
61    solution, but it supposes maintaining new variable in ALL
62    skb, even if no tunneling is used.
63 
64    Current solution: xmit_recursion breaks dead loops. This is a percpu
65    counter, since when we enter the first ndo_xmit(), cpu migration is
66    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
67 
68    2. Networking dead loops would not kill routers, but would really
69    kill network. IP hop limit plays role of "t->recursion" in this case,
70    if we copy it from packet being encapsulated to upper header.
71    It is very good solution, but it introduces two problems:
72 
73    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
74      do not work over tunnels.
75    - traceroute does not work. I planned to relay ICMP from tunnel,
76      so that this problem would be solved and traceroute output
77      would even more informative. This idea appeared to be wrong:
78      only Linux complies to rfc1812 now (yes, guys, Linux is the only
79      true router now :-)), all routers (at least, in neighbourhood of mine)
80      return only 8 bytes of payload. It is the end.
81 
82    Hence, if we want that OSPF worked or traceroute said something reasonable,
83    we should search for another solution.
84 
85    One of them is to parse packet trying to detect inner encapsulation
86    made by our node. It is difficult or even impossible, especially,
87    taking into account fragmentation. TO be short, ttl is not solution at all.
88 
89    Current solution: The solution was UNEXPECTEDLY SIMPLE.
90    We force DF flag on tunnels with preconfigured hop limit,
91    that is ALL. :-) Well, it does not remove the problem completely,
92    but exponential growth of network traffic is changed to linear
93    (branches, that exceed pmtu are pruned) and tunnel mtu
94    rapidly degrades to value <68, where looping stops.
95    Yes, it is not good if there exists a router in the loop,
96    which does not force DF, even when encapsulating packets have DF set.
97    But it is not our problem! Nobody could accuse us, we made
98    all that we could make. Even if it is your gated who injected
99    fatal route to network, even if it were you who configured
100    fatal static route: you are innocent. :-)
101 
102    Alexey Kuznetsov.
103  */
104 
105 static bool log_ecn_error = true;
106 module_param(log_ecn_error, bool, 0644);
107 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108 
109 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
110 static const struct header_ops ipgre_header_ops;
111 
112 static int ipgre_tunnel_init(struct net_device *dev);
113 static void erspan_build_header(struct sk_buff *skb,
114 				u32 id, u32 index,
115 				bool truncate, bool is_ipv4);
116 
117 static unsigned int ipgre_net_id __read_mostly;
118 static unsigned int gre_tap_net_id __read_mostly;
119 static unsigned int erspan_net_id __read_mostly;
120 
121 static int ipgre_err(struct sk_buff *skb, u32 info,
122 		     const struct tnl_ptk_info *tpi)
123 {
124 
125 	/* All the routers (except for Linux) return only
126 	   8 bytes of packet payload. It means, that precise relaying of
127 	   ICMP in the real Internet is absolutely infeasible.
128 
129 	   Moreover, Cisco "wise men" put GRE key to the third word
130 	   in GRE header. It makes impossible maintaining even soft
131 	   state for keyed GRE tunnels with enabled checksum. Tell
132 	   them "thank you".
133 
134 	   Well, I wonder, rfc1812 was written by Cisco employee,
135 	   what the hell these idiots break standards established
136 	   by themselves???
137 	   */
138 	struct net *net = dev_net(skb->dev);
139 	struct ip_tunnel_net *itn;
140 	const struct iphdr *iph;
141 	const int type = icmp_hdr(skb)->type;
142 	const int code = icmp_hdr(skb)->code;
143 	unsigned int data_len = 0;
144 	struct ip_tunnel *t;
145 
146 	if (tpi->proto == htons(ETH_P_TEB))
147 		itn = net_generic(net, gre_tap_net_id);
148 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
149 		 tpi->proto == htons(ETH_P_ERSPAN2))
150 		itn = net_generic(net, erspan_net_id);
151 	else
152 		itn = net_generic(net, ipgre_net_id);
153 
154 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
155 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
156 			     iph->daddr, iph->saddr, tpi->key);
157 
158 	if (!t)
159 		return -ENOENT;
160 
161 	switch (type) {
162 	default:
163 	case ICMP_PARAMETERPROB:
164 		return 0;
165 
166 	case ICMP_DEST_UNREACH:
167 		switch (code) {
168 		case ICMP_SR_FAILED:
169 		case ICMP_PORT_UNREACH:
170 			/* Impossible event. */
171 			return 0;
172 		default:
173 			/* All others are translated to HOST_UNREACH.
174 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
175 			   I believe they are just ether pollution. --ANK
176 			 */
177 			break;
178 		}
179 		break;
180 
181 	case ICMP_TIME_EXCEEDED:
182 		if (code != ICMP_EXC_TTL)
183 			return 0;
184 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
185 		break;
186 
187 	case ICMP_REDIRECT:
188 		break;
189 	}
190 
191 #if IS_ENABLED(CONFIG_IPV6)
192 	if (tpi->proto == htons(ETH_P_IPV6) &&
193 	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
194 					type, data_len))
195 		return 0;
196 #endif
197 
198 	if (t->parms.iph.daddr == 0 ||
199 	    ipv4_is_multicast(t->parms.iph.daddr))
200 		return 0;
201 
202 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
203 		return 0;
204 
205 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
206 		t->err_count++;
207 	else
208 		t->err_count = 1;
209 	t->err_time = jiffies;
210 
211 	return 0;
212 }
213 
214 static void gre_err(struct sk_buff *skb, u32 info)
215 {
216 	/* All the routers (except for Linux) return only
217 	 * 8 bytes of packet payload. It means, that precise relaying of
218 	 * ICMP in the real Internet is absolutely infeasible.
219 	 *
220 	 * Moreover, Cisco "wise men" put GRE key to the third word
221 	 * in GRE header. It makes impossible maintaining even soft
222 	 * state for keyed
223 	 * GRE tunnels with enabled checksum. Tell them "thank you".
224 	 *
225 	 * Well, I wonder, rfc1812 was written by Cisco employee,
226 	 * what the hell these idiots break standards established
227 	 * by themselves???
228 	 */
229 
230 	const struct iphdr *iph = (struct iphdr *)skb->data;
231 	const int type = icmp_hdr(skb)->type;
232 	const int code = icmp_hdr(skb)->code;
233 	struct tnl_ptk_info tpi;
234 
235 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
236 			     iph->ihl * 4) < 0)
237 		return;
238 
239 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
240 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
241 				 skb->dev->ifindex, IPPROTO_GRE);
242 		return;
243 	}
244 	if (type == ICMP_REDIRECT) {
245 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
246 			      IPPROTO_GRE);
247 		return;
248 	}
249 
250 	ipgre_err(skb, info, &tpi);
251 }
252 
253 static bool is_erspan_type1(int gre_hdr_len)
254 {
255 	/* Both ERSPAN type I (version 0) and type II (version 1) use
256 	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
257 	 * while type II has 8-byte.
258 	 */
259 	return gre_hdr_len == 4;
260 }
261 
262 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
263 		      int gre_hdr_len)
264 {
265 	struct net *net = dev_net(skb->dev);
266 	struct metadata_dst *tun_dst = NULL;
267 	struct erspan_base_hdr *ershdr;
268 	struct ip_tunnel_net *itn;
269 	struct ip_tunnel *tunnel;
270 	const struct iphdr *iph;
271 	struct erspan_md2 *md2;
272 	int ver;
273 	int len;
274 
275 	itn = net_generic(net, erspan_net_id);
276 	iph = ip_hdr(skb);
277 	if (is_erspan_type1(gre_hdr_len)) {
278 		ver = 0;
279 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
280 					  tpi->flags | TUNNEL_NO_KEY,
281 					  iph->saddr, iph->daddr, 0);
282 	} else {
283 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
284 		ver = ershdr->ver;
285 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 					  tpi->flags | TUNNEL_KEY,
287 					  iph->saddr, iph->daddr, tpi->key);
288 	}
289 
290 	if (tunnel) {
291 		if (is_erspan_type1(gre_hdr_len))
292 			len = gre_hdr_len;
293 		else
294 			len = gre_hdr_len + erspan_hdr_len(ver);
295 
296 		if (unlikely(!pskb_may_pull(skb, len)))
297 			return PACKET_REJECT;
298 
299 		if (__iptunnel_pull_header(skb,
300 					   len,
301 					   htons(ETH_P_TEB),
302 					   false, false) < 0)
303 			goto drop;
304 
305 		if (tunnel->collect_md) {
306 			struct erspan_metadata *pkt_md, *md;
307 			struct ip_tunnel_info *info;
308 			unsigned char *gh;
309 			__be64 tun_id;
310 			__be16 flags;
311 
312 			tpi->flags |= TUNNEL_KEY;
313 			flags = tpi->flags;
314 			tun_id = key32_to_tunnel_id(tpi->key);
315 
316 			tun_dst = ip_tun_rx_dst(skb, flags,
317 						tun_id, sizeof(*md));
318 			if (!tun_dst)
319 				return PACKET_REJECT;
320 
321 			/* skb can be uncloned in __iptunnel_pull_header, so
322 			 * old pkt_md is no longer valid and we need to reset
323 			 * it
324 			 */
325 			gh = skb_network_header(skb) +
326 			     skb_network_header_len(skb);
327 			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
328 							    sizeof(*ershdr));
329 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
330 			md->version = ver;
331 			md2 = &md->u.md2;
332 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
333 						       ERSPAN_V2_MDSIZE);
334 
335 			info = &tun_dst->u.tun_info;
336 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
337 			info->options_len = sizeof(*md);
338 		}
339 
340 		skb_reset_mac_header(skb);
341 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
342 		return PACKET_RCVD;
343 	}
344 	return PACKET_REJECT;
345 
346 drop:
347 	kfree_skb(skb);
348 	return PACKET_RCVD;
349 }
350 
351 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
352 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
353 {
354 	struct metadata_dst *tun_dst = NULL;
355 	const struct iphdr *iph;
356 	struct ip_tunnel *tunnel;
357 
358 	iph = ip_hdr(skb);
359 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
360 				  iph->saddr, iph->daddr, tpi->key);
361 
362 	if (tunnel) {
363 		const struct iphdr *tnl_params;
364 
365 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
366 					   raw_proto, false) < 0)
367 			goto drop;
368 
369 		/* Special case for ipgre_header_parse(), which expects the
370 		 * mac_header to point to the outer IP header.
371 		 */
372 		if (tunnel->dev->header_ops == &ipgre_header_ops)
373 			skb_pop_mac_header(skb);
374 		else
375 			skb_reset_mac_header(skb);
376 
377 		tnl_params = &tunnel->parms.iph;
378 		if (tunnel->collect_md || tnl_params->daddr == 0) {
379 			__be16 flags;
380 			__be64 tun_id;
381 
382 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
383 			tun_id = key32_to_tunnel_id(tpi->key);
384 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
385 			if (!tun_dst)
386 				return PACKET_REJECT;
387 		}
388 
389 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
390 		return PACKET_RCVD;
391 	}
392 	return PACKET_NEXT;
393 
394 drop:
395 	kfree_skb(skb);
396 	return PACKET_RCVD;
397 }
398 
399 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
400 		     int hdr_len)
401 {
402 	struct net *net = dev_net(skb->dev);
403 	struct ip_tunnel_net *itn;
404 	int res;
405 
406 	if (tpi->proto == htons(ETH_P_TEB))
407 		itn = net_generic(net, gre_tap_net_id);
408 	else
409 		itn = net_generic(net, ipgre_net_id);
410 
411 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
412 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
413 		/* ipgre tunnels in collect metadata mode should receive
414 		 * also ETH_P_TEB traffic.
415 		 */
416 		itn = net_generic(net, ipgre_net_id);
417 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
418 	}
419 	return res;
420 }
421 
422 static int gre_rcv(struct sk_buff *skb)
423 {
424 	struct tnl_ptk_info tpi;
425 	bool csum_err = false;
426 	int hdr_len;
427 
428 #ifdef CONFIG_NET_IPGRE_BROADCAST
429 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
430 		/* Looped back packet, drop it! */
431 		if (rt_is_output_route(skb_rtable(skb)))
432 			goto drop;
433 	}
434 #endif
435 
436 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
437 	if (hdr_len < 0)
438 		goto drop;
439 
440 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
441 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
442 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
443 			return 0;
444 		goto out;
445 	}
446 
447 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
448 		return 0;
449 
450 out:
451 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
452 drop:
453 	kfree_skb(skb);
454 	return 0;
455 }
456 
457 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
458 		       const struct iphdr *tnl_params,
459 		       __be16 proto)
460 {
461 	struct ip_tunnel *tunnel = netdev_priv(dev);
462 	__be16 flags = tunnel->parms.o_flags;
463 
464 	/* Push GRE header. */
465 	gre_build_header(skb, tunnel->tun_hlen,
466 			 flags, proto, tunnel->parms.o_key,
467 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
468 
469 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
470 }
471 
472 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
473 {
474 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
475 }
476 
477 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
478 			__be16 proto)
479 {
480 	struct ip_tunnel *tunnel = netdev_priv(dev);
481 	struct ip_tunnel_info *tun_info;
482 	const struct ip_tunnel_key *key;
483 	int tunnel_hlen;
484 	__be16 flags;
485 
486 	tun_info = skb_tunnel_info(skb);
487 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
488 		     ip_tunnel_info_af(tun_info) != AF_INET))
489 		goto err_free_skb;
490 
491 	key = &tun_info->key;
492 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
493 
494 	if (skb_cow_head(skb, dev->needed_headroom))
495 		goto err_free_skb;
496 
497 	/* Push Tunnel header. */
498 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
499 		goto err_free_skb;
500 
501 	flags = tun_info->key.tun_flags &
502 		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
503 	gre_build_header(skb, tunnel_hlen, flags, proto,
504 			 tunnel_id_to_key32(tun_info->key.tun_id),
505 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
506 
507 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
508 
509 	return;
510 
511 err_free_skb:
512 	kfree_skb(skb);
513 	DEV_STATS_INC(dev, tx_dropped);
514 }
515 
516 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
517 {
518 	struct ip_tunnel *tunnel = netdev_priv(dev);
519 	struct ip_tunnel_info *tun_info;
520 	const struct ip_tunnel_key *key;
521 	struct erspan_metadata *md;
522 	bool truncate = false;
523 	__be16 proto;
524 	int tunnel_hlen;
525 	int version;
526 	int nhoff;
527 
528 	tun_info = skb_tunnel_info(skb);
529 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
530 		     ip_tunnel_info_af(tun_info) != AF_INET))
531 		goto err_free_skb;
532 
533 	key = &tun_info->key;
534 	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
535 		goto err_free_skb;
536 	if (tun_info->options_len < sizeof(*md))
537 		goto err_free_skb;
538 	md = ip_tunnel_info_opts(tun_info);
539 
540 	/* ERSPAN has fixed 8 byte GRE header */
541 	version = md->version;
542 	tunnel_hlen = 8 + erspan_hdr_len(version);
543 
544 	if (skb_cow_head(skb, dev->needed_headroom))
545 		goto err_free_skb;
546 
547 	if (gre_handle_offloads(skb, false))
548 		goto err_free_skb;
549 
550 	if (skb->len > dev->mtu + dev->hard_header_len) {
551 		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
552 			goto err_free_skb;
553 		truncate = true;
554 	}
555 
556 	nhoff = skb_network_offset(skb);
557 	if (skb->protocol == htons(ETH_P_IP) &&
558 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
559 		truncate = true;
560 
561 	if (skb->protocol == htons(ETH_P_IPV6)) {
562 		int thoff;
563 
564 		if (skb_transport_header_was_set(skb))
565 			thoff = skb_transport_offset(skb);
566 		else
567 			thoff = nhoff + sizeof(struct ipv6hdr);
568 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
569 			truncate = true;
570 	}
571 
572 	if (version == 1) {
573 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
574 				    ntohl(md->u.index), truncate, true);
575 		proto = htons(ETH_P_ERSPAN);
576 	} else if (version == 2) {
577 		erspan_build_header_v2(skb,
578 				       ntohl(tunnel_id_to_key32(key->tun_id)),
579 				       md->u.md2.dir,
580 				       get_hwid(&md->u.md2),
581 				       truncate, true);
582 		proto = htons(ETH_P_ERSPAN2);
583 	} else {
584 		goto err_free_skb;
585 	}
586 
587 	gre_build_header(skb, 8, TUNNEL_SEQ,
588 			 proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
589 
590 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
591 
592 	return;
593 
594 err_free_skb:
595 	kfree_skb(skb);
596 	DEV_STATS_INC(dev, tx_dropped);
597 }
598 
599 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
600 {
601 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
602 	const struct ip_tunnel_key *key;
603 	struct rtable *rt;
604 	struct flowi4 fl4;
605 
606 	if (ip_tunnel_info_af(info) != AF_INET)
607 		return -EINVAL;
608 
609 	key = &info->key;
610 	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
611 			    tunnel_id_to_key32(key->tun_id),
612 			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
613 			    skb->mark, skb_get_hash(skb), key->flow_flags);
614 	rt = ip_route_output_key(dev_net(dev), &fl4);
615 	if (IS_ERR(rt))
616 		return PTR_ERR(rt);
617 
618 	ip_rt_put(rt);
619 	info->key.u.ipv4.src = fl4.saddr;
620 	return 0;
621 }
622 
623 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
624 			      struct net_device *dev)
625 {
626 	struct ip_tunnel *tunnel = netdev_priv(dev);
627 	const struct iphdr *tnl_params;
628 
629 	if (!pskb_inet_may_pull(skb))
630 		goto free_skb;
631 
632 	if (tunnel->collect_md) {
633 		gre_fb_xmit(skb, dev, skb->protocol);
634 		return NETDEV_TX_OK;
635 	}
636 
637 	if (dev->header_ops) {
638 		if (skb_cow_head(skb, 0))
639 			goto free_skb;
640 
641 		tnl_params = (const struct iphdr *)skb->data;
642 
643 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
644 		 * to gre header.
645 		 */
646 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
647 		skb_reset_mac_header(skb);
648 
649 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
650 		    skb_checksum_start(skb) < skb->data)
651 			goto free_skb;
652 	} else {
653 		if (skb_cow_head(skb, dev->needed_headroom))
654 			goto free_skb;
655 
656 		tnl_params = &tunnel->parms.iph;
657 	}
658 
659 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
660 		goto free_skb;
661 
662 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
663 	return NETDEV_TX_OK;
664 
665 free_skb:
666 	kfree_skb(skb);
667 	DEV_STATS_INC(dev, tx_dropped);
668 	return NETDEV_TX_OK;
669 }
670 
671 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
672 			       struct net_device *dev)
673 {
674 	struct ip_tunnel *tunnel = netdev_priv(dev);
675 	bool truncate = false;
676 	__be16 proto;
677 
678 	if (!pskb_inet_may_pull(skb))
679 		goto free_skb;
680 
681 	if (tunnel->collect_md) {
682 		erspan_fb_xmit(skb, dev);
683 		return NETDEV_TX_OK;
684 	}
685 
686 	if (gre_handle_offloads(skb, false))
687 		goto free_skb;
688 
689 	if (skb_cow_head(skb, dev->needed_headroom))
690 		goto free_skb;
691 
692 	if (skb->len > dev->mtu + dev->hard_header_len) {
693 		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
694 			goto free_skb;
695 		truncate = true;
696 	}
697 
698 	/* Push ERSPAN header */
699 	if (tunnel->erspan_ver == 0) {
700 		proto = htons(ETH_P_ERSPAN);
701 		tunnel->parms.o_flags &= ~TUNNEL_SEQ;
702 	} else if (tunnel->erspan_ver == 1) {
703 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
704 				    tunnel->index,
705 				    truncate, true);
706 		proto = htons(ETH_P_ERSPAN);
707 	} else if (tunnel->erspan_ver == 2) {
708 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
709 				       tunnel->dir, tunnel->hwid,
710 				       truncate, true);
711 		proto = htons(ETH_P_ERSPAN2);
712 	} else {
713 		goto free_skb;
714 	}
715 
716 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
717 	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
718 	return NETDEV_TX_OK;
719 
720 free_skb:
721 	kfree_skb(skb);
722 	DEV_STATS_INC(dev, tx_dropped);
723 	return NETDEV_TX_OK;
724 }
725 
726 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
727 				struct net_device *dev)
728 {
729 	struct ip_tunnel *tunnel = netdev_priv(dev);
730 
731 	if (!pskb_inet_may_pull(skb))
732 		goto free_skb;
733 
734 	if (tunnel->collect_md) {
735 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
736 		return NETDEV_TX_OK;
737 	}
738 
739 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
740 		goto free_skb;
741 
742 	if (skb_cow_head(skb, dev->needed_headroom))
743 		goto free_skb;
744 
745 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
746 	return NETDEV_TX_OK;
747 
748 free_skb:
749 	kfree_skb(skb);
750 	DEV_STATS_INC(dev, tx_dropped);
751 	return NETDEV_TX_OK;
752 }
753 
754 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
755 {
756 	struct ip_tunnel *tunnel = netdev_priv(dev);
757 	__be16 flags;
758 	int len;
759 
760 	len = tunnel->tun_hlen;
761 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
762 	len = tunnel->tun_hlen - len;
763 	tunnel->hlen = tunnel->hlen + len;
764 
765 	if (dev->header_ops)
766 		dev->hard_header_len += len;
767 	else
768 		dev->needed_headroom += len;
769 
770 	if (set_mtu)
771 		dev->mtu = max_t(int, dev->mtu - len, 68);
772 
773 	flags = tunnel->parms.o_flags;
774 
775 	if (flags & TUNNEL_SEQ ||
776 	    (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
777 		dev->features &= ~NETIF_F_GSO_SOFTWARE;
778 		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
779 	} else {
780 		dev->features |= NETIF_F_GSO_SOFTWARE;
781 		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
782 	}
783 }
784 
785 static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
786 			    int cmd)
787 {
788 	int err;
789 
790 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
791 		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
792 		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
793 		    ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
794 			return -EINVAL;
795 	}
796 
797 	p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
798 	p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
799 
800 	err = ip_tunnel_ctl(dev, p, cmd);
801 	if (err)
802 		return err;
803 
804 	if (cmd == SIOCCHGTUNNEL) {
805 		struct ip_tunnel *t = netdev_priv(dev);
806 
807 		t->parms.i_flags = p->i_flags;
808 		t->parms.o_flags = p->o_flags;
809 
810 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
811 			ipgre_link_update(dev, true);
812 	}
813 
814 	p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
815 	p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
816 	return 0;
817 }
818 
819 /* Nice toy. Unfortunately, useless in real life :-)
820    It allows to construct virtual multiprotocol broadcast "LAN"
821    over the Internet, provided multicast routing is tuned.
822 
823 
824    I have no idea was this bicycle invented before me,
825    so that I had to set ARPHRD_IPGRE to a random value.
826    I have an impression, that Cisco could make something similar,
827    but this feature is apparently missing in IOS<=11.2(8).
828 
829    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
830    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
831 
832    ping -t 255 224.66.66.66
833 
834    If nobody answers, mbone does not work.
835 
836    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
837    ip addr add 10.66.66.<somewhat>/24 dev Universe
838    ifconfig Universe up
839    ifconfig Universe add fe80::<Your_real_addr>/10
840    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
841    ftp 10.66.66.66
842    ...
843    ftp fec0:6666:6666::193.233.7.65
844    ...
845  */
846 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
847 			unsigned short type,
848 			const void *daddr, const void *saddr, unsigned int len)
849 {
850 	struct ip_tunnel *t = netdev_priv(dev);
851 	struct iphdr *iph;
852 	struct gre_base_hdr *greh;
853 
854 	iph = skb_push(skb, t->hlen + sizeof(*iph));
855 	greh = (struct gre_base_hdr *)(iph+1);
856 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
857 	greh->protocol = htons(type);
858 
859 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
860 
861 	/* Set the source hardware address. */
862 	if (saddr)
863 		memcpy(&iph->saddr, saddr, 4);
864 	if (daddr)
865 		memcpy(&iph->daddr, daddr, 4);
866 	if (iph->daddr)
867 		return t->hlen + sizeof(*iph);
868 
869 	return -(t->hlen + sizeof(*iph));
870 }
871 
872 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
873 {
874 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
875 	memcpy(haddr, &iph->saddr, 4);
876 	return 4;
877 }
878 
879 static const struct header_ops ipgre_header_ops = {
880 	.create	= ipgre_header,
881 	.parse	= ipgre_header_parse,
882 };
883 
884 #ifdef CONFIG_NET_IPGRE_BROADCAST
885 static int ipgre_open(struct net_device *dev)
886 {
887 	struct ip_tunnel *t = netdev_priv(dev);
888 
889 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
890 		struct flowi4 fl4;
891 		struct rtable *rt;
892 
893 		rt = ip_route_output_gre(t->net, &fl4,
894 					 t->parms.iph.daddr,
895 					 t->parms.iph.saddr,
896 					 t->parms.o_key,
897 					 RT_TOS(t->parms.iph.tos),
898 					 t->parms.link);
899 		if (IS_ERR(rt))
900 			return -EADDRNOTAVAIL;
901 		dev = rt->dst.dev;
902 		ip_rt_put(rt);
903 		if (!__in_dev_get_rtnl(dev))
904 			return -EADDRNOTAVAIL;
905 		t->mlink = dev->ifindex;
906 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
907 	}
908 	return 0;
909 }
910 
911 static int ipgre_close(struct net_device *dev)
912 {
913 	struct ip_tunnel *t = netdev_priv(dev);
914 
915 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
916 		struct in_device *in_dev;
917 		in_dev = inetdev_by_index(t->net, t->mlink);
918 		if (in_dev)
919 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
920 	}
921 	return 0;
922 }
923 #endif
924 
925 static const struct net_device_ops ipgre_netdev_ops = {
926 	.ndo_init		= ipgre_tunnel_init,
927 	.ndo_uninit		= ip_tunnel_uninit,
928 #ifdef CONFIG_NET_IPGRE_BROADCAST
929 	.ndo_open		= ipgre_open,
930 	.ndo_stop		= ipgre_close,
931 #endif
932 	.ndo_start_xmit		= ipgre_xmit,
933 	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
934 	.ndo_change_mtu		= ip_tunnel_change_mtu,
935 	.ndo_get_stats64	= dev_get_tstats64,
936 	.ndo_get_iflink		= ip_tunnel_get_iflink,
937 	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
938 };
939 
940 #define GRE_FEATURES (NETIF_F_SG |		\
941 		      NETIF_F_FRAGLIST |	\
942 		      NETIF_F_HIGHDMA |		\
943 		      NETIF_F_HW_CSUM)
944 
945 static void ipgre_tunnel_setup(struct net_device *dev)
946 {
947 	dev->netdev_ops		= &ipgre_netdev_ops;
948 	dev->type		= ARPHRD_IPGRE;
949 	ip_tunnel_setup(dev, ipgre_net_id);
950 }
951 
952 static void __gre_tunnel_init(struct net_device *dev)
953 {
954 	struct ip_tunnel *tunnel;
955 	__be16 flags;
956 
957 	tunnel = netdev_priv(dev);
958 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
959 	tunnel->parms.iph.protocol = IPPROTO_GRE;
960 
961 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
962 	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
963 
964 	dev->features		|= GRE_FEATURES | NETIF_F_LLTX;
965 	dev->hw_features	|= GRE_FEATURES;
966 
967 	flags = tunnel->parms.o_flags;
968 
969 	/* TCP offload with GRE SEQ is not supported, nor can we support 2
970 	 * levels of outer headers requiring an update.
971 	 */
972 	if (flags & TUNNEL_SEQ)
973 		return;
974 	if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
975 		return;
976 
977 	dev->features |= NETIF_F_GSO_SOFTWARE;
978 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
979 }
980 
981 static int ipgre_tunnel_init(struct net_device *dev)
982 {
983 	struct ip_tunnel *tunnel = netdev_priv(dev);
984 	struct iphdr *iph = &tunnel->parms.iph;
985 
986 	__gre_tunnel_init(dev);
987 
988 	__dev_addr_set(dev, &iph->saddr, 4);
989 	memcpy(dev->broadcast, &iph->daddr, 4);
990 
991 	dev->flags		= IFF_NOARP;
992 	netif_keep_dst(dev);
993 	dev->addr_len		= 4;
994 
995 	if (iph->daddr && !tunnel->collect_md) {
996 #ifdef CONFIG_NET_IPGRE_BROADCAST
997 		if (ipv4_is_multicast(iph->daddr)) {
998 			if (!iph->saddr)
999 				return -EINVAL;
1000 			dev->flags = IFF_BROADCAST;
1001 			dev->header_ops = &ipgre_header_ops;
1002 			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1003 			dev->needed_headroom = 0;
1004 		}
1005 #endif
1006 	} else if (!tunnel->collect_md) {
1007 		dev->header_ops = &ipgre_header_ops;
1008 		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1009 		dev->needed_headroom = 0;
1010 	}
1011 
1012 	return ip_tunnel_init(dev);
1013 }
1014 
1015 static const struct gre_protocol ipgre_protocol = {
1016 	.handler     = gre_rcv,
1017 	.err_handler = gre_err,
1018 };
1019 
1020 static int __net_init ipgre_init_net(struct net *net)
1021 {
1022 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1023 }
1024 
1025 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1026 {
1027 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1028 }
1029 
1030 static struct pernet_operations ipgre_net_ops = {
1031 	.init = ipgre_init_net,
1032 	.exit_batch = ipgre_exit_batch_net,
1033 	.id   = &ipgre_net_id,
1034 	.size = sizeof(struct ip_tunnel_net),
1035 };
1036 
1037 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1038 				 struct netlink_ext_ack *extack)
1039 {
1040 	__be16 flags;
1041 
1042 	if (!data)
1043 		return 0;
1044 
1045 	flags = 0;
1046 	if (data[IFLA_GRE_IFLAGS])
1047 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1048 	if (data[IFLA_GRE_OFLAGS])
1049 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1050 	if (flags & (GRE_VERSION|GRE_ROUTING))
1051 		return -EINVAL;
1052 
1053 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1054 	    data[IFLA_GRE_ENCAP_TYPE] &&
1055 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1056 		return -EINVAL;
1057 
1058 	return 0;
1059 }
1060 
1061 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1062 			      struct netlink_ext_ack *extack)
1063 {
1064 	__be32 daddr;
1065 
1066 	if (tb[IFLA_ADDRESS]) {
1067 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1068 			return -EINVAL;
1069 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1070 			return -EADDRNOTAVAIL;
1071 	}
1072 
1073 	if (!data)
1074 		goto out;
1075 
1076 	if (data[IFLA_GRE_REMOTE]) {
1077 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1078 		if (!daddr)
1079 			return -EINVAL;
1080 	}
1081 
1082 out:
1083 	return ipgre_tunnel_validate(tb, data, extack);
1084 }
1085 
1086 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1087 			   struct netlink_ext_ack *extack)
1088 {
1089 	__be16 flags = 0;
1090 	int ret;
1091 
1092 	if (!data)
1093 		return 0;
1094 
1095 	ret = ipgre_tap_validate(tb, data, extack);
1096 	if (ret)
1097 		return ret;
1098 
1099 	if (data[IFLA_GRE_ERSPAN_VER] &&
1100 	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1101 		return 0;
1102 
1103 	/* ERSPAN type II/III should only have GRE sequence and key flag */
1104 	if (data[IFLA_GRE_OFLAGS])
1105 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1106 	if (data[IFLA_GRE_IFLAGS])
1107 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1108 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1109 	    flags != (GRE_SEQ | GRE_KEY))
1110 		return -EINVAL;
1111 
1112 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1113 	 * 32-bit key field as ID, check it's range.
1114 	 */
1115 	if (data[IFLA_GRE_IKEY] &&
1116 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1117 		return -EINVAL;
1118 
1119 	if (data[IFLA_GRE_OKEY] &&
1120 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1121 		return -EINVAL;
1122 
1123 	return 0;
1124 }
1125 
1126 static int ipgre_netlink_parms(struct net_device *dev,
1127 				struct nlattr *data[],
1128 				struct nlattr *tb[],
1129 				struct ip_tunnel_parm *parms,
1130 				__u32 *fwmark)
1131 {
1132 	struct ip_tunnel *t = netdev_priv(dev);
1133 
1134 	memset(parms, 0, sizeof(*parms));
1135 
1136 	parms->iph.protocol = IPPROTO_GRE;
1137 
1138 	if (!data)
1139 		return 0;
1140 
1141 	if (data[IFLA_GRE_LINK])
1142 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1143 
1144 	if (data[IFLA_GRE_IFLAGS])
1145 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1146 
1147 	if (data[IFLA_GRE_OFLAGS])
1148 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1149 
1150 	if (data[IFLA_GRE_IKEY])
1151 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1152 
1153 	if (data[IFLA_GRE_OKEY])
1154 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1155 
1156 	if (data[IFLA_GRE_LOCAL])
1157 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1158 
1159 	if (data[IFLA_GRE_REMOTE])
1160 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1161 
1162 	if (data[IFLA_GRE_TTL])
1163 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1164 
1165 	if (data[IFLA_GRE_TOS])
1166 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1167 
1168 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1169 		if (t->ignore_df)
1170 			return -EINVAL;
1171 		parms->iph.frag_off = htons(IP_DF);
1172 	}
1173 
1174 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1175 		t->collect_md = true;
1176 		if (dev->type == ARPHRD_IPGRE)
1177 			dev->type = ARPHRD_NONE;
1178 	}
1179 
1180 	if (data[IFLA_GRE_IGNORE_DF]) {
1181 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1182 		  && (parms->iph.frag_off & htons(IP_DF)))
1183 			return -EINVAL;
1184 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1185 	}
1186 
1187 	if (data[IFLA_GRE_FWMARK])
1188 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1189 
1190 	return 0;
1191 }
1192 
1193 static int erspan_netlink_parms(struct net_device *dev,
1194 				struct nlattr *data[],
1195 				struct nlattr *tb[],
1196 				struct ip_tunnel_parm *parms,
1197 				__u32 *fwmark)
1198 {
1199 	struct ip_tunnel *t = netdev_priv(dev);
1200 	int err;
1201 
1202 	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1203 	if (err)
1204 		return err;
1205 	if (!data)
1206 		return 0;
1207 
1208 	if (data[IFLA_GRE_ERSPAN_VER]) {
1209 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1210 
1211 		if (t->erspan_ver > 2)
1212 			return -EINVAL;
1213 	}
1214 
1215 	if (t->erspan_ver == 1) {
1216 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1217 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1218 			if (t->index & ~INDEX_MASK)
1219 				return -EINVAL;
1220 		}
1221 	} else if (t->erspan_ver == 2) {
1222 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1223 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1224 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1225 				return -EINVAL;
1226 		}
1227 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1228 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1229 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1230 				return -EINVAL;
1231 		}
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 /* This function returns true when ENCAP attributes are present in the nl msg */
1238 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1239 				      struct ip_tunnel_encap *ipencap)
1240 {
1241 	bool ret = false;
1242 
1243 	memset(ipencap, 0, sizeof(*ipencap));
1244 
1245 	if (!data)
1246 		return ret;
1247 
1248 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1249 		ret = true;
1250 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1251 	}
1252 
1253 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1254 		ret = true;
1255 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1256 	}
1257 
1258 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1259 		ret = true;
1260 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1261 	}
1262 
1263 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1264 		ret = true;
1265 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1266 	}
1267 
1268 	return ret;
1269 }
1270 
1271 static int gre_tap_init(struct net_device *dev)
1272 {
1273 	__gre_tunnel_init(dev);
1274 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1275 	netif_keep_dst(dev);
1276 
1277 	return ip_tunnel_init(dev);
1278 }
1279 
1280 static const struct net_device_ops gre_tap_netdev_ops = {
1281 	.ndo_init		= gre_tap_init,
1282 	.ndo_uninit		= ip_tunnel_uninit,
1283 	.ndo_start_xmit		= gre_tap_xmit,
1284 	.ndo_set_mac_address 	= eth_mac_addr,
1285 	.ndo_validate_addr	= eth_validate_addr,
1286 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1287 	.ndo_get_stats64	= dev_get_tstats64,
1288 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1289 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1290 };
1291 
1292 static int erspan_tunnel_init(struct net_device *dev)
1293 {
1294 	struct ip_tunnel *tunnel = netdev_priv(dev);
1295 
1296 	if (tunnel->erspan_ver == 0)
1297 		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1298 	else
1299 		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1300 
1301 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1302 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1303 		       erspan_hdr_len(tunnel->erspan_ver);
1304 
1305 	dev->features		|= GRE_FEATURES;
1306 	dev->hw_features	|= GRE_FEATURES;
1307 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1308 	netif_keep_dst(dev);
1309 
1310 	return ip_tunnel_init(dev);
1311 }
1312 
1313 static const struct net_device_ops erspan_netdev_ops = {
1314 	.ndo_init		= erspan_tunnel_init,
1315 	.ndo_uninit		= ip_tunnel_uninit,
1316 	.ndo_start_xmit		= erspan_xmit,
1317 	.ndo_set_mac_address	= eth_mac_addr,
1318 	.ndo_validate_addr	= eth_validate_addr,
1319 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1320 	.ndo_get_stats64	= dev_get_tstats64,
1321 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1322 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1323 };
1324 
1325 static void ipgre_tap_setup(struct net_device *dev)
1326 {
1327 	ether_setup(dev);
1328 	dev->max_mtu = 0;
1329 	dev->netdev_ops	= &gre_tap_netdev_ops;
1330 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1331 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1332 	ip_tunnel_setup(dev, gre_tap_net_id);
1333 }
1334 
1335 static int
1336 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1337 {
1338 	struct ip_tunnel_encap ipencap;
1339 
1340 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1341 		struct ip_tunnel *t = netdev_priv(dev);
1342 		int err = ip_tunnel_encap_setup(t, &ipencap);
1343 
1344 		if (err < 0)
1345 			return err;
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1352 			 struct nlattr *tb[], struct nlattr *data[],
1353 			 struct netlink_ext_ack *extack)
1354 {
1355 	struct ip_tunnel_parm p;
1356 	__u32 fwmark = 0;
1357 	int err;
1358 
1359 	err = ipgre_newlink_encap_setup(dev, data);
1360 	if (err)
1361 		return err;
1362 
1363 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1364 	if (err < 0)
1365 		return err;
1366 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1367 }
1368 
1369 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1370 			  struct nlattr *tb[], struct nlattr *data[],
1371 			  struct netlink_ext_ack *extack)
1372 {
1373 	struct ip_tunnel_parm p;
1374 	__u32 fwmark = 0;
1375 	int err;
1376 
1377 	err = ipgre_newlink_encap_setup(dev, data);
1378 	if (err)
1379 		return err;
1380 
1381 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1382 	if (err)
1383 		return err;
1384 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1385 }
1386 
1387 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1388 			    struct nlattr *data[],
1389 			    struct netlink_ext_ack *extack)
1390 {
1391 	struct ip_tunnel *t = netdev_priv(dev);
1392 	__u32 fwmark = t->fwmark;
1393 	struct ip_tunnel_parm p;
1394 	int err;
1395 
1396 	err = ipgre_newlink_encap_setup(dev, data);
1397 	if (err)
1398 		return err;
1399 
1400 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1401 	if (err < 0)
1402 		return err;
1403 
1404 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1405 	if (err < 0)
1406 		return err;
1407 
1408 	t->parms.i_flags = p.i_flags;
1409 	t->parms.o_flags = p.o_flags;
1410 
1411 	ipgre_link_update(dev, !tb[IFLA_MTU]);
1412 
1413 	return 0;
1414 }
1415 
1416 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1417 			     struct nlattr *data[],
1418 			     struct netlink_ext_ack *extack)
1419 {
1420 	struct ip_tunnel *t = netdev_priv(dev);
1421 	__u32 fwmark = t->fwmark;
1422 	struct ip_tunnel_parm p;
1423 	int err;
1424 
1425 	err = ipgre_newlink_encap_setup(dev, data);
1426 	if (err)
1427 		return err;
1428 
1429 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1430 	if (err < 0)
1431 		return err;
1432 
1433 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1434 	if (err < 0)
1435 		return err;
1436 
1437 	t->parms.i_flags = p.i_flags;
1438 	t->parms.o_flags = p.o_flags;
1439 
1440 	return 0;
1441 }
1442 
1443 static size_t ipgre_get_size(const struct net_device *dev)
1444 {
1445 	return
1446 		/* IFLA_GRE_LINK */
1447 		nla_total_size(4) +
1448 		/* IFLA_GRE_IFLAGS */
1449 		nla_total_size(2) +
1450 		/* IFLA_GRE_OFLAGS */
1451 		nla_total_size(2) +
1452 		/* IFLA_GRE_IKEY */
1453 		nla_total_size(4) +
1454 		/* IFLA_GRE_OKEY */
1455 		nla_total_size(4) +
1456 		/* IFLA_GRE_LOCAL */
1457 		nla_total_size(4) +
1458 		/* IFLA_GRE_REMOTE */
1459 		nla_total_size(4) +
1460 		/* IFLA_GRE_TTL */
1461 		nla_total_size(1) +
1462 		/* IFLA_GRE_TOS */
1463 		nla_total_size(1) +
1464 		/* IFLA_GRE_PMTUDISC */
1465 		nla_total_size(1) +
1466 		/* IFLA_GRE_ENCAP_TYPE */
1467 		nla_total_size(2) +
1468 		/* IFLA_GRE_ENCAP_FLAGS */
1469 		nla_total_size(2) +
1470 		/* IFLA_GRE_ENCAP_SPORT */
1471 		nla_total_size(2) +
1472 		/* IFLA_GRE_ENCAP_DPORT */
1473 		nla_total_size(2) +
1474 		/* IFLA_GRE_COLLECT_METADATA */
1475 		nla_total_size(0) +
1476 		/* IFLA_GRE_IGNORE_DF */
1477 		nla_total_size(1) +
1478 		/* IFLA_GRE_FWMARK */
1479 		nla_total_size(4) +
1480 		/* IFLA_GRE_ERSPAN_INDEX */
1481 		nla_total_size(4) +
1482 		/* IFLA_GRE_ERSPAN_VER */
1483 		nla_total_size(1) +
1484 		/* IFLA_GRE_ERSPAN_DIR */
1485 		nla_total_size(1) +
1486 		/* IFLA_GRE_ERSPAN_HWID */
1487 		nla_total_size(2) +
1488 		0;
1489 }
1490 
1491 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1492 {
1493 	struct ip_tunnel *t = netdev_priv(dev);
1494 	struct ip_tunnel_parm *p = &t->parms;
1495 	__be16 o_flags = p->o_flags;
1496 
1497 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1498 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1499 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1500 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1501 			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1502 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1503 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1504 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1505 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1506 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1507 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1508 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1509 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1510 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1511 		goto nla_put_failure;
1512 
1513 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1514 			t->encap.type) ||
1515 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1516 			 t->encap.sport) ||
1517 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1518 			 t->encap.dport) ||
1519 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1520 			t->encap.flags))
1521 		goto nla_put_failure;
1522 
1523 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1524 		goto nla_put_failure;
1525 
1526 	if (t->collect_md) {
1527 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1528 			goto nla_put_failure;
1529 	}
1530 
1531 	return 0;
1532 
1533 nla_put_failure:
1534 	return -EMSGSIZE;
1535 }
1536 
1537 static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1538 {
1539 	struct ip_tunnel *t = netdev_priv(dev);
1540 
1541 	if (t->erspan_ver <= 2) {
1542 		if (t->erspan_ver != 0 && !t->collect_md)
1543 			t->parms.o_flags |= TUNNEL_KEY;
1544 
1545 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1546 			goto nla_put_failure;
1547 
1548 		if (t->erspan_ver == 1) {
1549 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1550 				goto nla_put_failure;
1551 		} else if (t->erspan_ver == 2) {
1552 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1553 				goto nla_put_failure;
1554 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1555 				goto nla_put_failure;
1556 		}
1557 	}
1558 
1559 	return ipgre_fill_info(skb, dev);
1560 
1561 nla_put_failure:
1562 	return -EMSGSIZE;
1563 }
1564 
1565 static void erspan_setup(struct net_device *dev)
1566 {
1567 	struct ip_tunnel *t = netdev_priv(dev);
1568 
1569 	ether_setup(dev);
1570 	dev->max_mtu = 0;
1571 	dev->netdev_ops = &erspan_netdev_ops;
1572 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1573 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1574 	ip_tunnel_setup(dev, erspan_net_id);
1575 	t->erspan_ver = 1;
1576 }
1577 
1578 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1579 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1580 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1581 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1582 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1583 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1584 	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1585 	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1586 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1587 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1588 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1589 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1590 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1591 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1592 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1593 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1594 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1595 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1596 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1597 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1598 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1599 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1600 };
1601 
1602 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1603 	.kind		= "gre",
1604 	.maxtype	= IFLA_GRE_MAX,
1605 	.policy		= ipgre_policy,
1606 	.priv_size	= sizeof(struct ip_tunnel),
1607 	.setup		= ipgre_tunnel_setup,
1608 	.validate	= ipgre_tunnel_validate,
1609 	.newlink	= ipgre_newlink,
1610 	.changelink	= ipgre_changelink,
1611 	.dellink	= ip_tunnel_dellink,
1612 	.get_size	= ipgre_get_size,
1613 	.fill_info	= ipgre_fill_info,
1614 	.get_link_net	= ip_tunnel_get_link_net,
1615 };
1616 
1617 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1618 	.kind		= "gretap",
1619 	.maxtype	= IFLA_GRE_MAX,
1620 	.policy		= ipgre_policy,
1621 	.priv_size	= sizeof(struct ip_tunnel),
1622 	.setup		= ipgre_tap_setup,
1623 	.validate	= ipgre_tap_validate,
1624 	.newlink	= ipgre_newlink,
1625 	.changelink	= ipgre_changelink,
1626 	.dellink	= ip_tunnel_dellink,
1627 	.get_size	= ipgre_get_size,
1628 	.fill_info	= ipgre_fill_info,
1629 	.get_link_net	= ip_tunnel_get_link_net,
1630 };
1631 
1632 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1633 	.kind		= "erspan",
1634 	.maxtype	= IFLA_GRE_MAX,
1635 	.policy		= ipgre_policy,
1636 	.priv_size	= sizeof(struct ip_tunnel),
1637 	.setup		= erspan_setup,
1638 	.validate	= erspan_validate,
1639 	.newlink	= erspan_newlink,
1640 	.changelink	= erspan_changelink,
1641 	.dellink	= ip_tunnel_dellink,
1642 	.get_size	= ipgre_get_size,
1643 	.fill_info	= erspan_fill_info,
1644 	.get_link_net	= ip_tunnel_get_link_net,
1645 };
1646 
1647 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1648 					u8 name_assign_type)
1649 {
1650 	struct nlattr *tb[IFLA_MAX + 1];
1651 	struct net_device *dev;
1652 	LIST_HEAD(list_kill);
1653 	struct ip_tunnel *t;
1654 	int err;
1655 
1656 	memset(&tb, 0, sizeof(tb));
1657 
1658 	dev = rtnl_create_link(net, name, name_assign_type,
1659 			       &ipgre_tap_ops, tb, NULL);
1660 	if (IS_ERR(dev))
1661 		return dev;
1662 
1663 	/* Configure flow based GRE device. */
1664 	t = netdev_priv(dev);
1665 	t->collect_md = true;
1666 
1667 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1668 	if (err < 0) {
1669 		free_netdev(dev);
1670 		return ERR_PTR(err);
1671 	}
1672 
1673 	/* openvswitch users expect packet sizes to be unrestricted,
1674 	 * so set the largest MTU we can.
1675 	 */
1676 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1677 	if (err)
1678 		goto out;
1679 
1680 	err = rtnl_configure_link(dev, NULL, 0, NULL);
1681 	if (err < 0)
1682 		goto out;
1683 
1684 	return dev;
1685 out:
1686 	ip_tunnel_dellink(dev, &list_kill);
1687 	unregister_netdevice_many(&list_kill);
1688 	return ERR_PTR(err);
1689 }
1690 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1691 
1692 static int __net_init ipgre_tap_init_net(struct net *net)
1693 {
1694 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1695 }
1696 
1697 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1698 {
1699 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1700 }
1701 
1702 static struct pernet_operations ipgre_tap_net_ops = {
1703 	.init = ipgre_tap_init_net,
1704 	.exit_batch = ipgre_tap_exit_batch_net,
1705 	.id   = &gre_tap_net_id,
1706 	.size = sizeof(struct ip_tunnel_net),
1707 };
1708 
1709 static int __net_init erspan_init_net(struct net *net)
1710 {
1711 	return ip_tunnel_init_net(net, erspan_net_id,
1712 				  &erspan_link_ops, "erspan0");
1713 }
1714 
1715 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1716 {
1717 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1718 }
1719 
1720 static struct pernet_operations erspan_net_ops = {
1721 	.init = erspan_init_net,
1722 	.exit_batch = erspan_exit_batch_net,
1723 	.id   = &erspan_net_id,
1724 	.size = sizeof(struct ip_tunnel_net),
1725 };
1726 
1727 static int __init ipgre_init(void)
1728 {
1729 	int err;
1730 
1731 	pr_info("GRE over IPv4 tunneling driver\n");
1732 
1733 	err = register_pernet_device(&ipgre_net_ops);
1734 	if (err < 0)
1735 		return err;
1736 
1737 	err = register_pernet_device(&ipgre_tap_net_ops);
1738 	if (err < 0)
1739 		goto pnet_tap_failed;
1740 
1741 	err = register_pernet_device(&erspan_net_ops);
1742 	if (err < 0)
1743 		goto pnet_erspan_failed;
1744 
1745 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1746 	if (err < 0) {
1747 		pr_info("%s: can't add protocol\n", __func__);
1748 		goto add_proto_failed;
1749 	}
1750 
1751 	err = rtnl_link_register(&ipgre_link_ops);
1752 	if (err < 0)
1753 		goto rtnl_link_failed;
1754 
1755 	err = rtnl_link_register(&ipgre_tap_ops);
1756 	if (err < 0)
1757 		goto tap_ops_failed;
1758 
1759 	err = rtnl_link_register(&erspan_link_ops);
1760 	if (err < 0)
1761 		goto erspan_link_failed;
1762 
1763 	return 0;
1764 
1765 erspan_link_failed:
1766 	rtnl_link_unregister(&ipgre_tap_ops);
1767 tap_ops_failed:
1768 	rtnl_link_unregister(&ipgre_link_ops);
1769 rtnl_link_failed:
1770 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1771 add_proto_failed:
1772 	unregister_pernet_device(&erspan_net_ops);
1773 pnet_erspan_failed:
1774 	unregister_pernet_device(&ipgre_tap_net_ops);
1775 pnet_tap_failed:
1776 	unregister_pernet_device(&ipgre_net_ops);
1777 	return err;
1778 }
1779 
1780 static void __exit ipgre_fini(void)
1781 {
1782 	rtnl_link_unregister(&ipgre_tap_ops);
1783 	rtnl_link_unregister(&ipgre_link_ops);
1784 	rtnl_link_unregister(&erspan_link_ops);
1785 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1786 	unregister_pernet_device(&ipgre_tap_net_ops);
1787 	unregister_pernet_device(&ipgre_net_ops);
1788 	unregister_pernet_device(&erspan_net_ops);
1789 }
1790 
1791 module_init(ipgre_init);
1792 module_exit(ipgre_fini);
1793 MODULE_LICENSE("GPL");
1794 MODULE_ALIAS_RTNL_LINK("gre");
1795 MODULE_ALIAS_RTNL_LINK("gretap");
1796 MODULE_ALIAS_RTNL_LINK("erspan");
1797 MODULE_ALIAS_NETDEV("gre0");
1798 MODULE_ALIAS_NETDEV("gretap0");
1799 MODULE_ALIAS_NETDEV("erspan0");
1800