xref: /linux/net/ipv4/ip_gre.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3:	GRE over IP protocol decoder.
4  *
5  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/in6.h>
25 #include <linux/inetdevice.h>
26 #include <linux/igmp.h>
27 #include <linux/netfilter_ipv4.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 
31 #include <net/sock.h>
32 #include <net/ip.h>
33 #include <net/icmp.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
36 #include <net/arp.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/xfrm.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
44 #include <net/gre.h>
45 #include <net/dst_metadata.h>
46 #include <net/erspan.h>
47 #include <net/inet_dscp.h>
48 
49 /*
50    Problems & solutions
51    --------------------
52 
53    1. The most important issue is detecting local dead loops.
54    They would cause complete host lockup in transmit, which
55    would be "resolved" by stack overflow or, if queueing is enabled,
56    with infinite looping in net_bh.
57 
58    We cannot track such dead loops during route installation,
59    it is infeasible task. The most general solutions would be
60    to keep skb->encapsulation counter (sort of local ttl),
61    and silently drop packet when it expires. It is a good
62    solution, but it supposes maintaining new variable in ALL
63    skb, even if no tunneling is used.
64 
65    Current solution: xmit_recursion breaks dead loops. This is a percpu
66    counter, since when we enter the first ndo_xmit(), cpu migration is
67    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
68 
69    2. Networking dead loops would not kill routers, but would really
70    kill network. IP hop limit plays role of "t->recursion" in this case,
71    if we copy it from packet being encapsulated to upper header.
72    It is very good solution, but it introduces two problems:
73 
74    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
75      do not work over tunnels.
76    - traceroute does not work. I planned to relay ICMP from tunnel,
77      so that this problem would be solved and traceroute output
78      would even more informative. This idea appeared to be wrong:
79      only Linux complies to rfc1812 now (yes, guys, Linux is the only
80      true router now :-)), all routers (at least, in neighbourhood of mine)
81      return only 8 bytes of payload. It is the end.
82 
83    Hence, if we want that OSPF worked or traceroute said something reasonable,
84    we should search for another solution.
85 
86    One of them is to parse packet trying to detect inner encapsulation
87    made by our node. It is difficult or even impossible, especially,
88    taking into account fragmentation. TO be short, ttl is not solution at all.
89 
90    Current solution: The solution was UNEXPECTEDLY SIMPLE.
91    We force DF flag on tunnels with preconfigured hop limit,
92    that is ALL. :-) Well, it does not remove the problem completely,
93    but exponential growth of network traffic is changed to linear
94    (branches, that exceed pmtu are pruned) and tunnel mtu
95    rapidly degrades to value <68, where looping stops.
96    Yes, it is not good if there exists a router in the loop,
97    which does not force DF, even when encapsulating packets have DF set.
98    But it is not our problem! Nobody could accuse us, we made
99    all that we could make. Even if it is your gated who injected
100    fatal route to network, even if it were you who configured
101    fatal static route: you are innocent. :-)
102 
103    Alexey Kuznetsov.
104  */
105 
106 static bool log_ecn_error = true;
107 module_param(log_ecn_error, bool, 0644);
108 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
109 
110 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
111 static const struct header_ops ipgre_header_ops;
112 
113 static int ipgre_tunnel_init(struct net_device *dev);
114 static void erspan_build_header(struct sk_buff *skb,
115 				u32 id, u32 index,
116 				bool truncate, bool is_ipv4);
117 
118 static unsigned int ipgre_net_id __read_mostly;
119 static unsigned int gre_tap_net_id __read_mostly;
120 static unsigned int erspan_net_id __read_mostly;
121 
122 static int ipgre_err(struct sk_buff *skb, u32 info,
123 		     const struct tnl_ptk_info *tpi)
124 {
125 
126 	/* All the routers (except for Linux) return only
127 	   8 bytes of packet payload. It means, that precise relaying of
128 	   ICMP in the real Internet is absolutely infeasible.
129 
130 	   Moreover, Cisco "wise men" put GRE key to the third word
131 	   in GRE header. It makes impossible maintaining even soft
132 	   state for keyed GRE tunnels with enabled checksum. Tell
133 	   them "thank you".
134 
135 	   Well, I wonder, rfc1812 was written by Cisco employee,
136 	   what the hell these idiots break standards established
137 	   by themselves???
138 	   */
139 	struct net *net = dev_net(skb->dev);
140 	struct ip_tunnel_net *itn;
141 	const struct iphdr *iph;
142 	const int type = icmp_hdr(skb)->type;
143 	const int code = icmp_hdr(skb)->code;
144 	unsigned int data_len = 0;
145 	struct ip_tunnel *t;
146 
147 	if (tpi->proto == htons(ETH_P_TEB))
148 		itn = net_generic(net, gre_tap_net_id);
149 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
150 		 tpi->proto == htons(ETH_P_ERSPAN2))
151 		itn = net_generic(net, erspan_net_id);
152 	else
153 		itn = net_generic(net, ipgre_net_id);
154 
155 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
156 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
157 			     iph->daddr, iph->saddr, tpi->key);
158 
159 	if (!t)
160 		return -ENOENT;
161 
162 	switch (type) {
163 	default:
164 	case ICMP_PARAMETERPROB:
165 		return 0;
166 
167 	case ICMP_DEST_UNREACH:
168 		switch (code) {
169 		case ICMP_SR_FAILED:
170 		case ICMP_PORT_UNREACH:
171 			/* Impossible event. */
172 			return 0;
173 		default:
174 			/* All others are translated to HOST_UNREACH.
175 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
176 			   I believe they are just ether pollution. --ANK
177 			 */
178 			break;
179 		}
180 		break;
181 
182 	case ICMP_TIME_EXCEEDED:
183 		if (code != ICMP_EXC_TTL)
184 			return 0;
185 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
186 		break;
187 
188 	case ICMP_REDIRECT:
189 		break;
190 	}
191 
192 #if IS_ENABLED(CONFIG_IPV6)
193 	if (tpi->proto == htons(ETH_P_IPV6) &&
194 	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
195 					type, data_len))
196 		return 0;
197 #endif
198 
199 	if (t->parms.iph.daddr == 0 ||
200 	    ipv4_is_multicast(t->parms.iph.daddr))
201 		return 0;
202 
203 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
204 		return 0;
205 
206 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
207 		t->err_count++;
208 	else
209 		t->err_count = 1;
210 	t->err_time = jiffies;
211 
212 	return 0;
213 }
214 
215 static void gre_err(struct sk_buff *skb, u32 info)
216 {
217 	/* All the routers (except for Linux) return only
218 	 * 8 bytes of packet payload. It means, that precise relaying of
219 	 * ICMP in the real Internet is absolutely infeasible.
220 	 *
221 	 * Moreover, Cisco "wise men" put GRE key to the third word
222 	 * in GRE header. It makes impossible maintaining even soft
223 	 * state for keyed
224 	 * GRE tunnels with enabled checksum. Tell them "thank you".
225 	 *
226 	 * Well, I wonder, rfc1812 was written by Cisco employee,
227 	 * what the hell these idiots break standards established
228 	 * by themselves???
229 	 */
230 
231 	const struct iphdr *iph = (struct iphdr *)skb->data;
232 	const int type = icmp_hdr(skb)->type;
233 	const int code = icmp_hdr(skb)->code;
234 	struct tnl_ptk_info tpi;
235 
236 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
237 			     iph->ihl * 4) < 0)
238 		return;
239 
240 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
241 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
242 				 skb->dev->ifindex, IPPROTO_GRE);
243 		return;
244 	}
245 	if (type == ICMP_REDIRECT) {
246 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
247 			      IPPROTO_GRE);
248 		return;
249 	}
250 
251 	ipgre_err(skb, info, &tpi);
252 }
253 
254 static bool is_erspan_type1(int gre_hdr_len)
255 {
256 	/* Both ERSPAN type I (version 0) and type II (version 1) use
257 	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
258 	 * while type II has 8-byte.
259 	 */
260 	return gre_hdr_len == 4;
261 }
262 
263 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
264 		      int gre_hdr_len)
265 {
266 	struct net *net = dev_net(skb->dev);
267 	struct metadata_dst *tun_dst = NULL;
268 	struct erspan_base_hdr *ershdr;
269 	IP_TUNNEL_DECLARE_FLAGS(flags);
270 	struct ip_tunnel_net *itn;
271 	struct ip_tunnel *tunnel;
272 	const struct iphdr *iph;
273 	struct erspan_md2 *md2;
274 	int ver;
275 	int len;
276 
277 	ip_tunnel_flags_copy(flags, tpi->flags);
278 
279 	itn = net_generic(net, erspan_net_id);
280 	iph = ip_hdr(skb);
281 	if (is_erspan_type1(gre_hdr_len)) {
282 		ver = 0;
283 		__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
284 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
285 					  iph->saddr, iph->daddr, 0);
286 	} else {
287 		if (unlikely(!pskb_may_pull(skb,
288 					    gre_hdr_len + sizeof(*ershdr))))
289 			return PACKET_REJECT;
290 
291 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
292 		ver = ershdr->ver;
293 		iph = ip_hdr(skb);
294 		__set_bit(IP_TUNNEL_KEY_BIT, flags);
295 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
296 					  iph->saddr, iph->daddr, tpi->key);
297 	}
298 
299 	if (tunnel) {
300 		if (is_erspan_type1(gre_hdr_len))
301 			len = gre_hdr_len;
302 		else
303 			len = gre_hdr_len + erspan_hdr_len(ver);
304 
305 		if (unlikely(!pskb_may_pull(skb, len)))
306 			return PACKET_REJECT;
307 
308 		if (__iptunnel_pull_header(skb,
309 					   len,
310 					   htons(ETH_P_TEB),
311 					   false, false) < 0)
312 			goto drop;
313 
314 		if (tunnel->collect_md) {
315 			struct erspan_metadata *pkt_md, *md;
316 			struct ip_tunnel_info *info;
317 			unsigned char *gh;
318 			__be64 tun_id;
319 
320 			__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
321 			ip_tunnel_flags_copy(flags, tpi->flags);
322 			tun_id = key32_to_tunnel_id(tpi->key);
323 
324 			tun_dst = ip_tun_rx_dst(skb, flags,
325 						tun_id, sizeof(*md));
326 			if (!tun_dst)
327 				return PACKET_REJECT;
328 
329 			/* skb can be uncloned in __iptunnel_pull_header, so
330 			 * old pkt_md is no longer valid and we need to reset
331 			 * it
332 			 */
333 			gh = skb_network_header(skb) +
334 			     skb_network_header_len(skb);
335 			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
336 							    sizeof(*ershdr));
337 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
338 			md->version = ver;
339 			md2 = &md->u.md2;
340 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
341 						       ERSPAN_V2_MDSIZE);
342 
343 			info = &tun_dst->u.tun_info;
344 			__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
345 				  info->key.tun_flags);
346 			info->options_len = sizeof(*md);
347 		}
348 
349 		skb_reset_mac_header(skb);
350 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
351 		return PACKET_RCVD;
352 	}
353 	return PACKET_REJECT;
354 
355 drop:
356 	kfree_skb(skb);
357 	return PACKET_RCVD;
358 }
359 
360 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
361 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
362 {
363 	struct metadata_dst *tun_dst = NULL;
364 	const struct iphdr *iph;
365 	struct ip_tunnel *tunnel;
366 
367 	iph = ip_hdr(skb);
368 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
369 				  iph->saddr, iph->daddr, tpi->key);
370 
371 	if (tunnel) {
372 		const struct iphdr *tnl_params;
373 
374 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
375 					   raw_proto, false) < 0)
376 			goto drop;
377 
378 		/* Special case for ipgre_header_parse(), which expects the
379 		 * mac_header to point to the outer IP header.
380 		 */
381 		if (tunnel->dev->header_ops == &ipgre_header_ops)
382 			skb_pop_mac_header(skb);
383 		else
384 			skb_reset_mac_header(skb);
385 
386 		tnl_params = &tunnel->parms.iph;
387 		if (tunnel->collect_md || tnl_params->daddr == 0) {
388 			IP_TUNNEL_DECLARE_FLAGS(flags) = { };
389 			__be64 tun_id;
390 
391 			__set_bit(IP_TUNNEL_CSUM_BIT, flags);
392 			__set_bit(IP_TUNNEL_KEY_BIT, flags);
393 			ip_tunnel_flags_and(flags, tpi->flags, flags);
394 
395 			tun_id = key32_to_tunnel_id(tpi->key);
396 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
397 			if (!tun_dst)
398 				return PACKET_REJECT;
399 		}
400 
401 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
402 		return PACKET_RCVD;
403 	}
404 	return PACKET_NEXT;
405 
406 drop:
407 	kfree_skb(skb);
408 	return PACKET_RCVD;
409 }
410 
411 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
412 		     int hdr_len)
413 {
414 	struct net *net = dev_net(skb->dev);
415 	struct ip_tunnel_net *itn;
416 	int res;
417 
418 	if (tpi->proto == htons(ETH_P_TEB))
419 		itn = net_generic(net, gre_tap_net_id);
420 	else
421 		itn = net_generic(net, ipgre_net_id);
422 
423 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
424 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
425 		/* ipgre tunnels in collect metadata mode should receive
426 		 * also ETH_P_TEB traffic.
427 		 */
428 		itn = net_generic(net, ipgre_net_id);
429 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
430 	}
431 	return res;
432 }
433 
434 static int gre_rcv(struct sk_buff *skb)
435 {
436 	struct tnl_ptk_info tpi;
437 	bool csum_err = false;
438 	int hdr_len;
439 
440 #ifdef CONFIG_NET_IPGRE_BROADCAST
441 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
442 		/* Looped back packet, drop it! */
443 		if (rt_is_output_route(skb_rtable(skb)))
444 			goto drop;
445 	}
446 #endif
447 
448 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
449 	if (hdr_len < 0)
450 		goto drop;
451 
452 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
453 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
454 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
455 			return 0;
456 		goto out;
457 	}
458 
459 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
460 		return 0;
461 
462 out:
463 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
464 drop:
465 	kfree_skb(skb);
466 	return 0;
467 }
468 
469 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
470 		       const struct iphdr *tnl_params,
471 		       __be16 proto)
472 {
473 	struct ip_tunnel *tunnel = netdev_priv(dev);
474 	IP_TUNNEL_DECLARE_FLAGS(flags);
475 
476 	ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
477 
478 	/* Push GRE header. */
479 	gre_build_header(skb, tunnel->tun_hlen,
480 			 flags, proto, tunnel->parms.o_key,
481 			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
482 			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
483 
484 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
485 }
486 
487 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
488 {
489 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
490 }
491 
492 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
493 			__be16 proto)
494 {
495 	struct ip_tunnel *tunnel = netdev_priv(dev);
496 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
497 	struct ip_tunnel_info *tun_info;
498 	const struct ip_tunnel_key *key;
499 	int tunnel_hlen;
500 
501 	tun_info = skb_tunnel_info(skb);
502 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
503 		     ip_tunnel_info_af(tun_info) != AF_INET))
504 		goto err_free_skb;
505 
506 	key = &tun_info->key;
507 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
508 
509 	if (skb_cow_head(skb, dev->needed_headroom))
510 		goto err_free_skb;
511 
512 	/* Push Tunnel header. */
513 	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
514 					      tunnel->parms.o_flags)))
515 		goto err_free_skb;
516 
517 	__set_bit(IP_TUNNEL_CSUM_BIT, flags);
518 	__set_bit(IP_TUNNEL_KEY_BIT, flags);
519 	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
520 	ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
521 
522 	gre_build_header(skb, tunnel_hlen, flags, proto,
523 			 tunnel_id_to_key32(tun_info->key.tun_id),
524 			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
525 			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
526 
527 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
528 
529 	return;
530 
531 err_free_skb:
532 	kfree_skb(skb);
533 	DEV_STATS_INC(dev, tx_dropped);
534 }
535 
536 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
537 {
538 	struct ip_tunnel *tunnel = netdev_priv(dev);
539 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
540 	struct ip_tunnel_info *tun_info;
541 	const struct ip_tunnel_key *key;
542 	struct erspan_metadata *md;
543 	bool truncate = false;
544 	__be16 proto;
545 	int tunnel_hlen;
546 	int version;
547 	int nhoff;
548 
549 	tun_info = skb_tunnel_info(skb);
550 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
551 		     ip_tunnel_info_af(tun_info) != AF_INET))
552 		goto err_free_skb;
553 
554 	key = &tun_info->key;
555 	if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
556 		goto err_free_skb;
557 	if (tun_info->options_len < sizeof(*md))
558 		goto err_free_skb;
559 	md = ip_tunnel_info_opts(tun_info);
560 
561 	/* ERSPAN has fixed 8 byte GRE header */
562 	version = md->version;
563 	tunnel_hlen = 8 + erspan_hdr_len(version);
564 
565 	if (skb_cow_head(skb, dev->needed_headroom))
566 		goto err_free_skb;
567 
568 	if (gre_handle_offloads(skb, false))
569 		goto err_free_skb;
570 
571 	if (skb->len > dev->mtu + dev->hard_header_len) {
572 		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
573 			goto err_free_skb;
574 		truncate = true;
575 	}
576 
577 	nhoff = skb_network_offset(skb);
578 	if (skb->protocol == htons(ETH_P_IP) &&
579 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
580 		truncate = true;
581 
582 	if (skb->protocol == htons(ETH_P_IPV6)) {
583 		int thoff;
584 
585 		if (skb_transport_header_was_set(skb))
586 			thoff = skb_transport_offset(skb);
587 		else
588 			thoff = nhoff + sizeof(struct ipv6hdr);
589 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
590 			truncate = true;
591 	}
592 
593 	if (version == 1) {
594 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
595 				    ntohl(md->u.index), truncate, true);
596 		proto = htons(ETH_P_ERSPAN);
597 	} else if (version == 2) {
598 		erspan_build_header_v2(skb,
599 				       ntohl(tunnel_id_to_key32(key->tun_id)),
600 				       md->u.md2.dir,
601 				       get_hwid(&md->u.md2),
602 				       truncate, true);
603 		proto = htons(ETH_P_ERSPAN2);
604 	} else {
605 		goto err_free_skb;
606 	}
607 
608 	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
609 	gre_build_header(skb, 8, flags, proto, 0,
610 			 htonl(atomic_fetch_inc(&tunnel->o_seqno)));
611 
612 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
613 
614 	return;
615 
616 err_free_skb:
617 	kfree_skb(skb);
618 	DEV_STATS_INC(dev, tx_dropped);
619 }
620 
621 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
622 {
623 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
624 	const struct ip_tunnel_key *key;
625 	struct rtable *rt;
626 	struct flowi4 fl4;
627 
628 	if (ip_tunnel_info_af(info) != AF_INET)
629 		return -EINVAL;
630 
631 	key = &info->key;
632 	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
633 			    tunnel_id_to_key32(key->tun_id),
634 			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
635 			    skb->mark, skb_get_hash(skb), key->flow_flags);
636 	rt = ip_route_output_key(dev_net(dev), &fl4);
637 	if (IS_ERR(rt))
638 		return PTR_ERR(rt);
639 
640 	ip_rt_put(rt);
641 	info->key.u.ipv4.src = fl4.saddr;
642 	return 0;
643 }
644 
645 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
646 			      struct net_device *dev)
647 {
648 	struct ip_tunnel *tunnel = netdev_priv(dev);
649 	const struct iphdr *tnl_params;
650 
651 	if (!pskb_inet_may_pull(skb))
652 		goto free_skb;
653 
654 	if (tunnel->collect_md) {
655 		gre_fb_xmit(skb, dev, skb->protocol);
656 		return NETDEV_TX_OK;
657 	}
658 
659 	if (dev->header_ops) {
660 		int pull_len = tunnel->hlen + sizeof(struct iphdr);
661 
662 		if (skb_cow_head(skb, 0))
663 			goto free_skb;
664 
665 		if (!pskb_may_pull(skb, pull_len))
666 			goto free_skb;
667 
668 		tnl_params = (const struct iphdr *)skb->data;
669 
670 		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
671 		skb_pull(skb, pull_len);
672 		skb_reset_mac_header(skb);
673 
674 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
675 		    skb_checksum_start(skb) < skb->data)
676 			goto free_skb;
677 	} else {
678 		if (skb_cow_head(skb, dev->needed_headroom))
679 			goto free_skb;
680 
681 		tnl_params = &tunnel->parms.iph;
682 	}
683 
684 	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
685 					      tunnel->parms.o_flags)))
686 		goto free_skb;
687 
688 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
689 	return NETDEV_TX_OK;
690 
691 free_skb:
692 	kfree_skb(skb);
693 	DEV_STATS_INC(dev, tx_dropped);
694 	return NETDEV_TX_OK;
695 }
696 
697 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
698 			       struct net_device *dev)
699 {
700 	struct ip_tunnel *tunnel = netdev_priv(dev);
701 	bool truncate = false;
702 	__be16 proto;
703 
704 	if (!pskb_inet_may_pull(skb))
705 		goto free_skb;
706 
707 	if (tunnel->collect_md) {
708 		erspan_fb_xmit(skb, dev);
709 		return NETDEV_TX_OK;
710 	}
711 
712 	if (gre_handle_offloads(skb, false))
713 		goto free_skb;
714 
715 	if (skb_cow_head(skb, dev->needed_headroom))
716 		goto free_skb;
717 
718 	if (skb->len > dev->mtu + dev->hard_header_len) {
719 		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
720 			goto free_skb;
721 		truncate = true;
722 	}
723 
724 	/* Push ERSPAN header */
725 	if (tunnel->erspan_ver == 0) {
726 		proto = htons(ETH_P_ERSPAN);
727 		__clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
728 	} else if (tunnel->erspan_ver == 1) {
729 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
730 				    tunnel->index,
731 				    truncate, true);
732 		proto = htons(ETH_P_ERSPAN);
733 	} else if (tunnel->erspan_ver == 2) {
734 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
735 				       tunnel->dir, tunnel->hwid,
736 				       truncate, true);
737 		proto = htons(ETH_P_ERSPAN2);
738 	} else {
739 		goto free_skb;
740 	}
741 
742 	__clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
743 	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
744 	return NETDEV_TX_OK;
745 
746 free_skb:
747 	kfree_skb(skb);
748 	DEV_STATS_INC(dev, tx_dropped);
749 	return NETDEV_TX_OK;
750 }
751 
752 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
753 				struct net_device *dev)
754 {
755 	struct ip_tunnel *tunnel = netdev_priv(dev);
756 
757 	if (!pskb_inet_may_pull(skb))
758 		goto free_skb;
759 
760 	if (tunnel->collect_md) {
761 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
762 		return NETDEV_TX_OK;
763 	}
764 
765 	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
766 					      tunnel->parms.o_flags)))
767 		goto free_skb;
768 
769 	if (skb_cow_head(skb, dev->needed_headroom))
770 		goto free_skb;
771 
772 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
773 	return NETDEV_TX_OK;
774 
775 free_skb:
776 	kfree_skb(skb);
777 	DEV_STATS_INC(dev, tx_dropped);
778 	return NETDEV_TX_OK;
779 }
780 
781 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
782 {
783 	struct ip_tunnel *tunnel = netdev_priv(dev);
784 	int len;
785 
786 	len = tunnel->tun_hlen;
787 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
788 	len = tunnel->tun_hlen - len;
789 	tunnel->hlen = tunnel->hlen + len;
790 
791 	if (dev->header_ops)
792 		dev->hard_header_len += len;
793 	else
794 		dev->needed_headroom += len;
795 
796 	if (set_mtu)
797 		WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
798 
799 	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
800 	    (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
801 	     tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
802 		dev->features &= ~NETIF_F_GSO_SOFTWARE;
803 		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
804 	} else {
805 		dev->features |= NETIF_F_GSO_SOFTWARE;
806 		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
807 	}
808 }
809 
810 static int ipgre_tunnel_ctl(struct net_device *dev,
811 			    struct ip_tunnel_parm_kern *p,
812 			    int cmd)
813 {
814 	__be16 i_flags, o_flags;
815 	int err;
816 
817 	if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
818 	    !ip_tunnel_flags_is_be16_compat(p->o_flags))
819 		return -EOVERFLOW;
820 
821 	i_flags = ip_tunnel_flags_to_be16(p->i_flags);
822 	o_flags = ip_tunnel_flags_to_be16(p->o_flags);
823 
824 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
825 		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
826 		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
827 		    ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
828 			return -EINVAL;
829 	}
830 
831 	gre_flags_to_tnl_flags(p->i_flags, i_flags);
832 	gre_flags_to_tnl_flags(p->o_flags, o_flags);
833 
834 	err = ip_tunnel_ctl(dev, p, cmd);
835 	if (err)
836 		return err;
837 
838 	if (cmd == SIOCCHGTUNNEL) {
839 		struct ip_tunnel *t = netdev_priv(dev);
840 
841 		ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
842 		ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
843 
844 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
845 			ipgre_link_update(dev, true);
846 	}
847 
848 	i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
849 	ip_tunnel_flags_from_be16(p->i_flags, i_flags);
850 	o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
851 	ip_tunnel_flags_from_be16(p->o_flags, o_flags);
852 
853 	return 0;
854 }
855 
856 /* Nice toy. Unfortunately, useless in real life :-)
857    It allows to construct virtual multiprotocol broadcast "LAN"
858    over the Internet, provided multicast routing is tuned.
859 
860 
861    I have no idea was this bicycle invented before me,
862    so that I had to set ARPHRD_IPGRE to a random value.
863    I have an impression, that Cisco could make something similar,
864    but this feature is apparently missing in IOS<=11.2(8).
865 
866    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
867    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
868 
869    ping -t 255 224.66.66.66
870 
871    If nobody answers, mbone does not work.
872 
873    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
874    ip addr add 10.66.66.<somewhat>/24 dev Universe
875    ifconfig Universe up
876    ifconfig Universe add fe80::<Your_real_addr>/10
877    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
878    ftp 10.66.66.66
879    ...
880    ftp fec0:6666:6666::193.233.7.65
881    ...
882  */
883 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
884 			unsigned short type,
885 			const void *daddr, const void *saddr, unsigned int len)
886 {
887 	struct ip_tunnel *t = netdev_priv(dev);
888 	struct iphdr *iph;
889 	struct gre_base_hdr *greh;
890 
891 	iph = skb_push(skb, t->hlen + sizeof(*iph));
892 	greh = (struct gre_base_hdr *)(iph+1);
893 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
894 	greh->protocol = htons(type);
895 
896 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
897 
898 	/* Set the source hardware address. */
899 	if (saddr)
900 		memcpy(&iph->saddr, saddr, 4);
901 	if (daddr)
902 		memcpy(&iph->daddr, daddr, 4);
903 	if (iph->daddr)
904 		return t->hlen + sizeof(*iph);
905 
906 	return -(t->hlen + sizeof(*iph));
907 }
908 
909 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
910 {
911 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
912 	memcpy(haddr, &iph->saddr, 4);
913 	return 4;
914 }
915 
916 static const struct header_ops ipgre_header_ops = {
917 	.create	= ipgre_header,
918 	.parse	= ipgre_header_parse,
919 };
920 
921 #ifdef CONFIG_NET_IPGRE_BROADCAST
922 static int ipgre_open(struct net_device *dev)
923 {
924 	struct ip_tunnel *t = netdev_priv(dev);
925 
926 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
927 		struct flowi4 fl4;
928 		struct rtable *rt;
929 
930 		rt = ip_route_output_gre(t->net, &fl4,
931 					 t->parms.iph.daddr,
932 					 t->parms.iph.saddr,
933 					 t->parms.o_key,
934 					 t->parms.iph.tos & INET_DSCP_MASK,
935 					 t->parms.link);
936 		if (IS_ERR(rt))
937 			return -EADDRNOTAVAIL;
938 		dev = rt->dst.dev;
939 		ip_rt_put(rt);
940 		if (!__in_dev_get_rtnl(dev))
941 			return -EADDRNOTAVAIL;
942 		t->mlink = dev->ifindex;
943 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
944 	}
945 	return 0;
946 }
947 
948 static int ipgre_close(struct net_device *dev)
949 {
950 	struct ip_tunnel *t = netdev_priv(dev);
951 
952 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
953 		struct in_device *in_dev;
954 		in_dev = inetdev_by_index(t->net, t->mlink);
955 		if (in_dev)
956 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
957 	}
958 	return 0;
959 }
960 #endif
961 
962 static const struct net_device_ops ipgre_netdev_ops = {
963 	.ndo_init		= ipgre_tunnel_init,
964 	.ndo_uninit		= ip_tunnel_uninit,
965 #ifdef CONFIG_NET_IPGRE_BROADCAST
966 	.ndo_open		= ipgre_open,
967 	.ndo_stop		= ipgre_close,
968 #endif
969 	.ndo_start_xmit		= ipgre_xmit,
970 	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
971 	.ndo_change_mtu		= ip_tunnel_change_mtu,
972 	.ndo_get_stats64	= dev_get_tstats64,
973 	.ndo_get_iflink		= ip_tunnel_get_iflink,
974 	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
975 };
976 
977 #define GRE_FEATURES (NETIF_F_SG |		\
978 		      NETIF_F_FRAGLIST |	\
979 		      NETIF_F_HIGHDMA |		\
980 		      NETIF_F_HW_CSUM)
981 
982 static void ipgre_tunnel_setup(struct net_device *dev)
983 {
984 	dev->netdev_ops		= &ipgre_netdev_ops;
985 	dev->type		= ARPHRD_IPGRE;
986 	ip_tunnel_setup(dev, ipgre_net_id);
987 }
988 
989 static void __gre_tunnel_init(struct net_device *dev)
990 {
991 	struct ip_tunnel *tunnel;
992 
993 	tunnel = netdev_priv(dev);
994 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
995 	tunnel->parms.iph.protocol = IPPROTO_GRE;
996 
997 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
998 	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
999 
1000 	dev->features		|= GRE_FEATURES;
1001 	dev->hw_features	|= GRE_FEATURES;
1002 
1003 	/* TCP offload with GRE SEQ is not supported, nor can we support 2
1004 	 * levels of outer headers requiring an update.
1005 	 */
1006 	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
1007 		return;
1008 	if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
1009 	    tunnel->encap.type != TUNNEL_ENCAP_NONE)
1010 		return;
1011 
1012 	dev->features |= NETIF_F_GSO_SOFTWARE;
1013 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1014 
1015 	dev->lltx = true;
1016 }
1017 
1018 static int ipgre_tunnel_init(struct net_device *dev)
1019 {
1020 	struct ip_tunnel *tunnel = netdev_priv(dev);
1021 	struct iphdr *iph = &tunnel->parms.iph;
1022 
1023 	__gre_tunnel_init(dev);
1024 
1025 	__dev_addr_set(dev, &iph->saddr, 4);
1026 	memcpy(dev->broadcast, &iph->daddr, 4);
1027 
1028 	dev->flags		= IFF_NOARP;
1029 	netif_keep_dst(dev);
1030 	dev->addr_len		= 4;
1031 
1032 	if (iph->daddr && !tunnel->collect_md) {
1033 #ifdef CONFIG_NET_IPGRE_BROADCAST
1034 		if (ipv4_is_multicast(iph->daddr)) {
1035 			if (!iph->saddr)
1036 				return -EINVAL;
1037 			dev->flags = IFF_BROADCAST;
1038 			dev->header_ops = &ipgre_header_ops;
1039 			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1040 			dev->needed_headroom = 0;
1041 		}
1042 #endif
1043 	} else if (!tunnel->collect_md) {
1044 		dev->header_ops = &ipgre_header_ops;
1045 		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1046 		dev->needed_headroom = 0;
1047 	}
1048 
1049 	return ip_tunnel_init(dev);
1050 }
1051 
1052 static const struct gre_protocol ipgre_protocol = {
1053 	.handler     = gre_rcv,
1054 	.err_handler = gre_err,
1055 };
1056 
1057 static int __net_init ipgre_init_net(struct net *net)
1058 {
1059 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1060 }
1061 
1062 static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
1063 					     struct list_head *dev_to_kill)
1064 {
1065 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
1066 			      dev_to_kill);
1067 }
1068 
1069 static struct pernet_operations ipgre_net_ops = {
1070 	.init = ipgre_init_net,
1071 	.exit_batch_rtnl = ipgre_exit_batch_rtnl,
1072 	.id   = &ipgre_net_id,
1073 	.size = sizeof(struct ip_tunnel_net),
1074 };
1075 
1076 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1077 				 struct netlink_ext_ack *extack)
1078 {
1079 	__be16 flags;
1080 
1081 	if (!data)
1082 		return 0;
1083 
1084 	flags = 0;
1085 	if (data[IFLA_GRE_IFLAGS])
1086 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1087 	if (data[IFLA_GRE_OFLAGS])
1088 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1089 	if (flags & (GRE_VERSION|GRE_ROUTING))
1090 		return -EINVAL;
1091 
1092 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1093 	    data[IFLA_GRE_ENCAP_TYPE] &&
1094 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1095 		return -EINVAL;
1096 
1097 	return 0;
1098 }
1099 
1100 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1101 			      struct netlink_ext_ack *extack)
1102 {
1103 	__be32 daddr;
1104 
1105 	if (tb[IFLA_ADDRESS]) {
1106 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1107 			return -EINVAL;
1108 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1109 			return -EADDRNOTAVAIL;
1110 	}
1111 
1112 	if (!data)
1113 		goto out;
1114 
1115 	if (data[IFLA_GRE_REMOTE]) {
1116 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1117 		if (!daddr)
1118 			return -EINVAL;
1119 	}
1120 
1121 out:
1122 	return ipgre_tunnel_validate(tb, data, extack);
1123 }
1124 
1125 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1126 			   struct netlink_ext_ack *extack)
1127 {
1128 	__be16 flags = 0;
1129 	int ret;
1130 
1131 	if (!data)
1132 		return 0;
1133 
1134 	ret = ipgre_tap_validate(tb, data, extack);
1135 	if (ret)
1136 		return ret;
1137 
1138 	if (data[IFLA_GRE_ERSPAN_VER] &&
1139 	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1140 		return 0;
1141 
1142 	/* ERSPAN type II/III should only have GRE sequence and key flag */
1143 	if (data[IFLA_GRE_OFLAGS])
1144 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1145 	if (data[IFLA_GRE_IFLAGS])
1146 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1147 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1148 	    flags != (GRE_SEQ | GRE_KEY))
1149 		return -EINVAL;
1150 
1151 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1152 	 * 32-bit key field as ID, check it's range.
1153 	 */
1154 	if (data[IFLA_GRE_IKEY] &&
1155 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1156 		return -EINVAL;
1157 
1158 	if (data[IFLA_GRE_OKEY] &&
1159 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1160 		return -EINVAL;
1161 
1162 	return 0;
1163 }
1164 
1165 static int ipgre_netlink_parms(struct net_device *dev,
1166 				struct nlattr *data[],
1167 				struct nlattr *tb[],
1168 				struct ip_tunnel_parm_kern *parms,
1169 				__u32 *fwmark)
1170 {
1171 	struct ip_tunnel *t = netdev_priv(dev);
1172 
1173 	memset(parms, 0, sizeof(*parms));
1174 
1175 	parms->iph.protocol = IPPROTO_GRE;
1176 
1177 	if (!data)
1178 		return 0;
1179 
1180 	if (data[IFLA_GRE_LINK])
1181 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1182 
1183 	if (data[IFLA_GRE_IFLAGS])
1184 		gre_flags_to_tnl_flags(parms->i_flags,
1185 				       nla_get_be16(data[IFLA_GRE_IFLAGS]));
1186 
1187 	if (data[IFLA_GRE_OFLAGS])
1188 		gre_flags_to_tnl_flags(parms->o_flags,
1189 				       nla_get_be16(data[IFLA_GRE_OFLAGS]));
1190 
1191 	if (data[IFLA_GRE_IKEY])
1192 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1193 
1194 	if (data[IFLA_GRE_OKEY])
1195 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1196 
1197 	if (data[IFLA_GRE_LOCAL])
1198 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1199 
1200 	if (data[IFLA_GRE_REMOTE])
1201 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1202 
1203 	if (data[IFLA_GRE_TTL])
1204 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1205 
1206 	if (data[IFLA_GRE_TOS])
1207 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1208 
1209 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1210 		if (t->ignore_df)
1211 			return -EINVAL;
1212 		parms->iph.frag_off = htons(IP_DF);
1213 	}
1214 
1215 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1216 		t->collect_md = true;
1217 		if (dev->type == ARPHRD_IPGRE)
1218 			dev->type = ARPHRD_NONE;
1219 	}
1220 
1221 	if (data[IFLA_GRE_IGNORE_DF]) {
1222 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1223 		  && (parms->iph.frag_off & htons(IP_DF)))
1224 			return -EINVAL;
1225 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1226 	}
1227 
1228 	if (data[IFLA_GRE_FWMARK])
1229 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1230 
1231 	return 0;
1232 }
1233 
1234 static int erspan_netlink_parms(struct net_device *dev,
1235 				struct nlattr *data[],
1236 				struct nlattr *tb[],
1237 				struct ip_tunnel_parm_kern *parms,
1238 				__u32 *fwmark)
1239 {
1240 	struct ip_tunnel *t = netdev_priv(dev);
1241 	int err;
1242 
1243 	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1244 	if (err)
1245 		return err;
1246 	if (!data)
1247 		return 0;
1248 
1249 	if (data[IFLA_GRE_ERSPAN_VER]) {
1250 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1251 
1252 		if (t->erspan_ver > 2)
1253 			return -EINVAL;
1254 	}
1255 
1256 	if (t->erspan_ver == 1) {
1257 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1258 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1259 			if (t->index & ~INDEX_MASK)
1260 				return -EINVAL;
1261 		}
1262 	} else if (t->erspan_ver == 2) {
1263 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1264 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1265 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1266 				return -EINVAL;
1267 		}
1268 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1269 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1270 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1271 				return -EINVAL;
1272 		}
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 /* This function returns true when ENCAP attributes are present in the nl msg */
1279 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1280 				      struct ip_tunnel_encap *ipencap)
1281 {
1282 	bool ret = false;
1283 
1284 	memset(ipencap, 0, sizeof(*ipencap));
1285 
1286 	if (!data)
1287 		return ret;
1288 
1289 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1290 		ret = true;
1291 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1292 	}
1293 
1294 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1295 		ret = true;
1296 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1297 	}
1298 
1299 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1300 		ret = true;
1301 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1302 	}
1303 
1304 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1305 		ret = true;
1306 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1307 	}
1308 
1309 	return ret;
1310 }
1311 
1312 static int gre_tap_init(struct net_device *dev)
1313 {
1314 	__gre_tunnel_init(dev);
1315 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1316 	netif_keep_dst(dev);
1317 
1318 	return ip_tunnel_init(dev);
1319 }
1320 
1321 static const struct net_device_ops gre_tap_netdev_ops = {
1322 	.ndo_init		= gre_tap_init,
1323 	.ndo_uninit		= ip_tunnel_uninit,
1324 	.ndo_start_xmit		= gre_tap_xmit,
1325 	.ndo_set_mac_address 	= eth_mac_addr,
1326 	.ndo_validate_addr	= eth_validate_addr,
1327 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1328 	.ndo_get_stats64	= dev_get_tstats64,
1329 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1330 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1331 };
1332 
1333 static int erspan_tunnel_init(struct net_device *dev)
1334 {
1335 	struct ip_tunnel *tunnel = netdev_priv(dev);
1336 
1337 	if (tunnel->erspan_ver == 0)
1338 		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1339 	else
1340 		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1341 
1342 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1343 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1344 		       erspan_hdr_len(tunnel->erspan_ver);
1345 
1346 	dev->features		|= GRE_FEATURES;
1347 	dev->hw_features	|= GRE_FEATURES;
1348 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1349 	netif_keep_dst(dev);
1350 
1351 	return ip_tunnel_init(dev);
1352 }
1353 
1354 static const struct net_device_ops erspan_netdev_ops = {
1355 	.ndo_init		= erspan_tunnel_init,
1356 	.ndo_uninit		= ip_tunnel_uninit,
1357 	.ndo_start_xmit		= erspan_xmit,
1358 	.ndo_set_mac_address	= eth_mac_addr,
1359 	.ndo_validate_addr	= eth_validate_addr,
1360 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1361 	.ndo_get_stats64	= dev_get_tstats64,
1362 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1363 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1364 };
1365 
1366 static void ipgre_tap_setup(struct net_device *dev)
1367 {
1368 	ether_setup(dev);
1369 	dev->max_mtu = 0;
1370 	dev->netdev_ops	= &gre_tap_netdev_ops;
1371 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1372 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1373 	ip_tunnel_setup(dev, gre_tap_net_id);
1374 }
1375 
1376 static int
1377 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1378 {
1379 	struct ip_tunnel_encap ipencap;
1380 
1381 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1382 		struct ip_tunnel *t = netdev_priv(dev);
1383 		int err = ip_tunnel_encap_setup(t, &ipencap);
1384 
1385 		if (err < 0)
1386 			return err;
1387 	}
1388 
1389 	return 0;
1390 }
1391 
1392 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1393 			 struct nlattr *tb[], struct nlattr *data[],
1394 			 struct netlink_ext_ack *extack)
1395 {
1396 	struct ip_tunnel_parm_kern p;
1397 	__u32 fwmark = 0;
1398 	int err;
1399 
1400 	err = ipgre_newlink_encap_setup(dev, data);
1401 	if (err)
1402 		return err;
1403 
1404 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1405 	if (err < 0)
1406 		return err;
1407 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1408 }
1409 
1410 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1411 			  struct nlattr *tb[], struct nlattr *data[],
1412 			  struct netlink_ext_ack *extack)
1413 {
1414 	struct ip_tunnel_parm_kern p;
1415 	__u32 fwmark = 0;
1416 	int err;
1417 
1418 	err = ipgre_newlink_encap_setup(dev, data);
1419 	if (err)
1420 		return err;
1421 
1422 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1423 	if (err)
1424 		return err;
1425 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1426 }
1427 
1428 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1429 			    struct nlattr *data[],
1430 			    struct netlink_ext_ack *extack)
1431 {
1432 	struct ip_tunnel *t = netdev_priv(dev);
1433 	struct ip_tunnel_parm_kern p;
1434 	__u32 fwmark = t->fwmark;
1435 	int err;
1436 
1437 	err = ipgre_newlink_encap_setup(dev, data);
1438 	if (err)
1439 		return err;
1440 
1441 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1442 	if (err < 0)
1443 		return err;
1444 
1445 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1446 	if (err < 0)
1447 		return err;
1448 
1449 	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1450 	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1451 
1452 	ipgre_link_update(dev, !tb[IFLA_MTU]);
1453 
1454 	return 0;
1455 }
1456 
1457 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1458 			     struct nlattr *data[],
1459 			     struct netlink_ext_ack *extack)
1460 {
1461 	struct ip_tunnel *t = netdev_priv(dev);
1462 	struct ip_tunnel_parm_kern p;
1463 	__u32 fwmark = t->fwmark;
1464 	int err;
1465 
1466 	err = ipgre_newlink_encap_setup(dev, data);
1467 	if (err)
1468 		return err;
1469 
1470 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1471 	if (err < 0)
1472 		return err;
1473 
1474 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1475 	if (err < 0)
1476 		return err;
1477 
1478 	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1479 	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1480 
1481 	return 0;
1482 }
1483 
1484 static size_t ipgre_get_size(const struct net_device *dev)
1485 {
1486 	return
1487 		/* IFLA_GRE_LINK */
1488 		nla_total_size(4) +
1489 		/* IFLA_GRE_IFLAGS */
1490 		nla_total_size(2) +
1491 		/* IFLA_GRE_OFLAGS */
1492 		nla_total_size(2) +
1493 		/* IFLA_GRE_IKEY */
1494 		nla_total_size(4) +
1495 		/* IFLA_GRE_OKEY */
1496 		nla_total_size(4) +
1497 		/* IFLA_GRE_LOCAL */
1498 		nla_total_size(4) +
1499 		/* IFLA_GRE_REMOTE */
1500 		nla_total_size(4) +
1501 		/* IFLA_GRE_TTL */
1502 		nla_total_size(1) +
1503 		/* IFLA_GRE_TOS */
1504 		nla_total_size(1) +
1505 		/* IFLA_GRE_PMTUDISC */
1506 		nla_total_size(1) +
1507 		/* IFLA_GRE_ENCAP_TYPE */
1508 		nla_total_size(2) +
1509 		/* IFLA_GRE_ENCAP_FLAGS */
1510 		nla_total_size(2) +
1511 		/* IFLA_GRE_ENCAP_SPORT */
1512 		nla_total_size(2) +
1513 		/* IFLA_GRE_ENCAP_DPORT */
1514 		nla_total_size(2) +
1515 		/* IFLA_GRE_COLLECT_METADATA */
1516 		nla_total_size(0) +
1517 		/* IFLA_GRE_IGNORE_DF */
1518 		nla_total_size(1) +
1519 		/* IFLA_GRE_FWMARK */
1520 		nla_total_size(4) +
1521 		/* IFLA_GRE_ERSPAN_INDEX */
1522 		nla_total_size(4) +
1523 		/* IFLA_GRE_ERSPAN_VER */
1524 		nla_total_size(1) +
1525 		/* IFLA_GRE_ERSPAN_DIR */
1526 		nla_total_size(1) +
1527 		/* IFLA_GRE_ERSPAN_HWID */
1528 		nla_total_size(2) +
1529 		0;
1530 }
1531 
1532 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1533 {
1534 	struct ip_tunnel *t = netdev_priv(dev);
1535 	struct ip_tunnel_parm_kern *p = &t->parms;
1536 	IP_TUNNEL_DECLARE_FLAGS(o_flags);
1537 
1538 	ip_tunnel_flags_copy(o_flags, p->o_flags);
1539 
1540 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1541 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1542 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1543 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1544 			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1545 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1546 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1547 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1548 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1549 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1550 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1551 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1552 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1553 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1554 		goto nla_put_failure;
1555 
1556 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1557 			t->encap.type) ||
1558 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1559 			 t->encap.sport) ||
1560 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1561 			 t->encap.dport) ||
1562 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1563 			t->encap.flags))
1564 		goto nla_put_failure;
1565 
1566 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1567 		goto nla_put_failure;
1568 
1569 	if (t->collect_md) {
1570 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1571 			goto nla_put_failure;
1572 	}
1573 
1574 	return 0;
1575 
1576 nla_put_failure:
1577 	return -EMSGSIZE;
1578 }
1579 
1580 static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1581 {
1582 	struct ip_tunnel *t = netdev_priv(dev);
1583 
1584 	if (t->erspan_ver <= 2) {
1585 		if (t->erspan_ver != 0 && !t->collect_md)
1586 			__set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
1587 
1588 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1589 			goto nla_put_failure;
1590 
1591 		if (t->erspan_ver == 1) {
1592 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1593 				goto nla_put_failure;
1594 		} else if (t->erspan_ver == 2) {
1595 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1596 				goto nla_put_failure;
1597 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1598 				goto nla_put_failure;
1599 		}
1600 	}
1601 
1602 	return ipgre_fill_info(skb, dev);
1603 
1604 nla_put_failure:
1605 	return -EMSGSIZE;
1606 }
1607 
1608 static void erspan_setup(struct net_device *dev)
1609 {
1610 	struct ip_tunnel *t = netdev_priv(dev);
1611 
1612 	ether_setup(dev);
1613 	dev->max_mtu = 0;
1614 	dev->netdev_ops = &erspan_netdev_ops;
1615 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1616 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1617 	ip_tunnel_setup(dev, erspan_net_id);
1618 	t->erspan_ver = 1;
1619 }
1620 
1621 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1622 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1623 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1624 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1625 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1626 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1627 	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1628 	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1629 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1630 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1631 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1632 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1633 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1634 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1635 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1636 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1637 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1638 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1639 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1640 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1641 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1642 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1643 };
1644 
1645 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1646 	.kind		= "gre",
1647 	.maxtype	= IFLA_GRE_MAX,
1648 	.policy		= ipgre_policy,
1649 	.priv_size	= sizeof(struct ip_tunnel),
1650 	.setup		= ipgre_tunnel_setup,
1651 	.validate	= ipgre_tunnel_validate,
1652 	.newlink	= ipgre_newlink,
1653 	.changelink	= ipgre_changelink,
1654 	.dellink	= ip_tunnel_dellink,
1655 	.get_size	= ipgre_get_size,
1656 	.fill_info	= ipgre_fill_info,
1657 	.get_link_net	= ip_tunnel_get_link_net,
1658 };
1659 
1660 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1661 	.kind		= "gretap",
1662 	.maxtype	= IFLA_GRE_MAX,
1663 	.policy		= ipgre_policy,
1664 	.priv_size	= sizeof(struct ip_tunnel),
1665 	.setup		= ipgre_tap_setup,
1666 	.validate	= ipgre_tap_validate,
1667 	.newlink	= ipgre_newlink,
1668 	.changelink	= ipgre_changelink,
1669 	.dellink	= ip_tunnel_dellink,
1670 	.get_size	= ipgre_get_size,
1671 	.fill_info	= ipgre_fill_info,
1672 	.get_link_net	= ip_tunnel_get_link_net,
1673 };
1674 
1675 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1676 	.kind		= "erspan",
1677 	.maxtype	= IFLA_GRE_MAX,
1678 	.policy		= ipgre_policy,
1679 	.priv_size	= sizeof(struct ip_tunnel),
1680 	.setup		= erspan_setup,
1681 	.validate	= erspan_validate,
1682 	.newlink	= erspan_newlink,
1683 	.changelink	= erspan_changelink,
1684 	.dellink	= ip_tunnel_dellink,
1685 	.get_size	= ipgre_get_size,
1686 	.fill_info	= erspan_fill_info,
1687 	.get_link_net	= ip_tunnel_get_link_net,
1688 };
1689 
1690 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1691 					u8 name_assign_type)
1692 {
1693 	struct nlattr *tb[IFLA_MAX + 1];
1694 	struct net_device *dev;
1695 	LIST_HEAD(list_kill);
1696 	struct ip_tunnel *t;
1697 	int err;
1698 
1699 	memset(&tb, 0, sizeof(tb));
1700 
1701 	dev = rtnl_create_link(net, name, name_assign_type,
1702 			       &ipgre_tap_ops, tb, NULL);
1703 	if (IS_ERR(dev))
1704 		return dev;
1705 
1706 	/* Configure flow based GRE device. */
1707 	t = netdev_priv(dev);
1708 	t->collect_md = true;
1709 
1710 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1711 	if (err < 0) {
1712 		free_netdev(dev);
1713 		return ERR_PTR(err);
1714 	}
1715 
1716 	/* openvswitch users expect packet sizes to be unrestricted,
1717 	 * so set the largest MTU we can.
1718 	 */
1719 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1720 	if (err)
1721 		goto out;
1722 
1723 	err = rtnl_configure_link(dev, NULL, 0, NULL);
1724 	if (err < 0)
1725 		goto out;
1726 
1727 	return dev;
1728 out:
1729 	ip_tunnel_dellink(dev, &list_kill);
1730 	unregister_netdevice_many(&list_kill);
1731 	return ERR_PTR(err);
1732 }
1733 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1734 
1735 static int __net_init ipgre_tap_init_net(struct net *net)
1736 {
1737 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1738 }
1739 
1740 static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
1741 						 struct list_head *dev_to_kill)
1742 {
1743 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
1744 			      dev_to_kill);
1745 }
1746 
1747 static struct pernet_operations ipgre_tap_net_ops = {
1748 	.init = ipgre_tap_init_net,
1749 	.exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
1750 	.id   = &gre_tap_net_id,
1751 	.size = sizeof(struct ip_tunnel_net),
1752 };
1753 
1754 static int __net_init erspan_init_net(struct net *net)
1755 {
1756 	return ip_tunnel_init_net(net, erspan_net_id,
1757 				  &erspan_link_ops, "erspan0");
1758 }
1759 
1760 static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
1761 					      struct list_head *dev_to_kill)
1762 {
1763 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
1764 			      dev_to_kill);
1765 }
1766 
1767 static struct pernet_operations erspan_net_ops = {
1768 	.init = erspan_init_net,
1769 	.exit_batch_rtnl = erspan_exit_batch_rtnl,
1770 	.id   = &erspan_net_id,
1771 	.size = sizeof(struct ip_tunnel_net),
1772 };
1773 
1774 static int __init ipgre_init(void)
1775 {
1776 	int err;
1777 
1778 	pr_info("GRE over IPv4 tunneling driver\n");
1779 
1780 	err = register_pernet_device(&ipgre_net_ops);
1781 	if (err < 0)
1782 		return err;
1783 
1784 	err = register_pernet_device(&ipgre_tap_net_ops);
1785 	if (err < 0)
1786 		goto pnet_tap_failed;
1787 
1788 	err = register_pernet_device(&erspan_net_ops);
1789 	if (err < 0)
1790 		goto pnet_erspan_failed;
1791 
1792 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1793 	if (err < 0) {
1794 		pr_info("%s: can't add protocol\n", __func__);
1795 		goto add_proto_failed;
1796 	}
1797 
1798 	err = rtnl_link_register(&ipgre_link_ops);
1799 	if (err < 0)
1800 		goto rtnl_link_failed;
1801 
1802 	err = rtnl_link_register(&ipgre_tap_ops);
1803 	if (err < 0)
1804 		goto tap_ops_failed;
1805 
1806 	err = rtnl_link_register(&erspan_link_ops);
1807 	if (err < 0)
1808 		goto erspan_link_failed;
1809 
1810 	return 0;
1811 
1812 erspan_link_failed:
1813 	rtnl_link_unregister(&ipgre_tap_ops);
1814 tap_ops_failed:
1815 	rtnl_link_unregister(&ipgre_link_ops);
1816 rtnl_link_failed:
1817 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1818 add_proto_failed:
1819 	unregister_pernet_device(&erspan_net_ops);
1820 pnet_erspan_failed:
1821 	unregister_pernet_device(&ipgre_tap_net_ops);
1822 pnet_tap_failed:
1823 	unregister_pernet_device(&ipgre_net_ops);
1824 	return err;
1825 }
1826 
1827 static void __exit ipgre_fini(void)
1828 {
1829 	rtnl_link_unregister(&ipgre_tap_ops);
1830 	rtnl_link_unregister(&ipgre_link_ops);
1831 	rtnl_link_unregister(&erspan_link_ops);
1832 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1833 	unregister_pernet_device(&ipgre_tap_net_ops);
1834 	unregister_pernet_device(&ipgre_net_ops);
1835 	unregister_pernet_device(&erspan_net_ops);
1836 }
1837 
1838 module_init(ipgre_init);
1839 module_exit(ipgre_fini);
1840 MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1841 MODULE_LICENSE("GPL");
1842 MODULE_ALIAS_RTNL_LINK("gre");
1843 MODULE_ALIAS_RTNL_LINK("gretap");
1844 MODULE_ALIAS_RTNL_LINK("erspan");
1845 MODULE_ALIAS_NETDEV("gre0");
1846 MODULE_ALIAS_NETDEV("gretap0");
1847 MODULE_ALIAS_NETDEV("erspan0");
1848