xref: /linux/net/ipv4/ip_gre.c (revision 95db3b255fde4e830e5f8cc011eb404023f669d4)
1 /*
2  *	Linux NET3:	GRE over IP protocol decoder.
3  *
4  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 
52 /*
53    Problems & solutions
54    --------------------
55 
56    1. The most important issue is detecting local dead loops.
57    They would cause complete host lockup in transmit, which
58    would be "resolved" by stack overflow or, if queueing is enabled,
59    with infinite looping in net_bh.
60 
61    We cannot track such dead loops during route installation,
62    it is infeasible task. The most general solutions would be
63    to keep skb->encapsulation counter (sort of local ttl),
64    and silently drop packet when it expires. It is a good
65    solution, but it supposes maintaining new variable in ALL
66    skb, even if no tunneling is used.
67 
68    Current solution: xmit_recursion breaks dead loops. This is a percpu
69    counter, since when we enter the first ndo_xmit(), cpu migration is
70    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
71 
72    2. Networking dead loops would not kill routers, but would really
73    kill network. IP hop limit plays role of "t->recursion" in this case,
74    if we copy it from packet being encapsulated to upper header.
75    It is very good solution, but it introduces two problems:
76 
77    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
78      do not work over tunnels.
79    - traceroute does not work. I planned to relay ICMP from tunnel,
80      so that this problem would be solved and traceroute output
81      would even more informative. This idea appeared to be wrong:
82      only Linux complies to rfc1812 now (yes, guys, Linux is the only
83      true router now :-)), all routers (at least, in neighbourhood of mine)
84      return only 8 bytes of payload. It is the end.
85 
86    Hence, if we want that OSPF worked or traceroute said something reasonable,
87    we should search for another solution.
88 
89    One of them is to parse packet trying to detect inner encapsulation
90    made by our node. It is difficult or even impossible, especially,
91    taking into account fragmentation. TO be short, ttl is not solution at all.
92 
93    Current solution: The solution was UNEXPECTEDLY SIMPLE.
94    We force DF flag on tunnels with preconfigured hop limit,
95    that is ALL. :-) Well, it does not remove the problem completely,
96    but exponential growth of network traffic is changed to linear
97    (branches, that exceed pmtu are pruned) and tunnel mtu
98    rapidly degrades to value <68, where looping stops.
99    Yes, it is not good if there exists a router in the loop,
100    which does not force DF, even when encapsulating packets have DF set.
101    But it is not our problem! Nobody could accuse us, we made
102    all that we could make. Even if it is your gated who injected
103    fatal route to network, even if it were you who configured
104    fatal static route: you are innocent. :-)
105 
106    Alexey Kuznetsov.
107  */
108 
109 static bool log_ecn_error = true;
110 module_param(log_ecn_error, bool, 0644);
111 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
112 
113 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
114 static int ipgre_tunnel_init(struct net_device *dev);
115 
116 static int ipgre_net_id __read_mostly;
117 static int gre_tap_net_id __read_mostly;
118 
119 static void ipgre_err(struct sk_buff *skb, u32 info,
120 		      const struct tnl_ptk_info *tpi)
121 {
122 
123 	/* All the routers (except for Linux) return only
124 	   8 bytes of packet payload. It means, that precise relaying of
125 	   ICMP in the real Internet is absolutely infeasible.
126 
127 	   Moreover, Cisco "wise men" put GRE key to the third word
128 	   in GRE header. It makes impossible maintaining even soft
129 	   state for keyed GRE tunnels with enabled checksum. Tell
130 	   them "thank you".
131 
132 	   Well, I wonder, rfc1812 was written by Cisco employee,
133 	   what the hell these idiots break standards established
134 	   by themselves???
135 	   */
136 	struct net *net = dev_net(skb->dev);
137 	struct ip_tunnel_net *itn;
138 	const struct iphdr *iph;
139 	const int type = icmp_hdr(skb)->type;
140 	const int code = icmp_hdr(skb)->code;
141 	struct ip_tunnel *t;
142 
143 	switch (type) {
144 	default:
145 	case ICMP_PARAMETERPROB:
146 		return;
147 
148 	case ICMP_DEST_UNREACH:
149 		switch (code) {
150 		case ICMP_SR_FAILED:
151 		case ICMP_PORT_UNREACH:
152 			/* Impossible event. */
153 			return;
154 		default:
155 			/* All others are translated to HOST_UNREACH.
156 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
157 			   I believe they are just ether pollution. --ANK
158 			 */
159 			break;
160 		}
161 		break;
162 
163 	case ICMP_TIME_EXCEEDED:
164 		if (code != ICMP_EXC_TTL)
165 			return;
166 		break;
167 
168 	case ICMP_REDIRECT:
169 		break;
170 	}
171 
172 	if (tpi->proto == htons(ETH_P_TEB))
173 		itn = net_generic(net, gre_tap_net_id);
174 	else
175 		itn = net_generic(net, ipgre_net_id);
176 
177 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
178 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
179 			     iph->daddr, iph->saddr, tpi->key);
180 
181 	if (!t)
182 		return;
183 
184 	if (t->parms.iph.daddr == 0 ||
185 	    ipv4_is_multicast(t->parms.iph.daddr))
186 		return;
187 
188 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
189 		return;
190 
191 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
192 		t->err_count++;
193 	else
194 		t->err_count = 1;
195 	t->err_time = jiffies;
196 }
197 
198 static void gre_err(struct sk_buff *skb, u32 info)
199 {
200 	/* All the routers (except for Linux) return only
201 	 * 8 bytes of packet payload. It means, that precise relaying of
202 	 * ICMP in the real Internet is absolutely infeasible.
203 	 *
204 	 * Moreover, Cisco "wise men" put GRE key to the third word
205 	 * in GRE header. It makes impossible maintaining even soft
206 	 * state for keyed
207 	 * GRE tunnels with enabled checksum. Tell them "thank you".
208 	 *
209 	 * Well, I wonder, rfc1812 was written by Cisco employee,
210 	 * what the hell these idiots break standards established
211 	 * by themselves???
212 	 */
213 
214 	const struct iphdr *iph = (struct iphdr *)skb->data;
215 	const int type = icmp_hdr(skb)->type;
216 	const int code = icmp_hdr(skb)->code;
217 	struct tnl_ptk_info tpi;
218 	bool csum_err = false;
219 
220 	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
221 			     iph->ihl * 4) < 0) {
222 		if (!csum_err)		/* ignore csum errors. */
223 			return;
224 	}
225 
226 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
227 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
228 				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
229 		return;
230 	}
231 	if (type == ICMP_REDIRECT) {
232 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
233 			      IPPROTO_GRE, 0);
234 		return;
235 	}
236 
237 	ipgre_err(skb, info, &tpi);
238 }
239 
240 static __be64 key_to_tunnel_id(__be32 key)
241 {
242 #ifdef __BIG_ENDIAN
243 	return (__force __be64)((__force u32)key);
244 #else
245 	return (__force __be64)((__force u64)key << 32);
246 #endif
247 }
248 
249 /* Returns the least-significant 32 bits of a __be64. */
250 static __be32 tunnel_id_to_key(__be64 x)
251 {
252 #ifdef __BIG_ENDIAN
253 	return (__force __be32)x;
254 #else
255 	return (__force __be32)((__force u64)x >> 32);
256 #endif
257 }
258 
259 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
260 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
261 {
262 	struct metadata_dst *tun_dst = NULL;
263 	const struct iphdr *iph;
264 	struct ip_tunnel *tunnel;
265 
266 	iph = ip_hdr(skb);
267 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
268 				  iph->saddr, iph->daddr, tpi->key);
269 
270 	if (tunnel) {
271 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
272 					   raw_proto, false) < 0)
273 			goto drop;
274 
275 		if (tunnel->dev->type != ARPHRD_NONE)
276 			skb_pop_mac_header(skb);
277 		else
278 			skb_reset_mac_header(skb);
279 		if (tunnel->collect_md) {
280 			__be16 flags;
281 			__be64 tun_id;
282 
283 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
284 			tun_id = key_to_tunnel_id(tpi->key);
285 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
286 			if (!tun_dst)
287 				return PACKET_REJECT;
288 		}
289 
290 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
291 		return PACKET_RCVD;
292 	}
293 	return PACKET_NEXT;
294 
295 drop:
296 	kfree_skb(skb);
297 	return PACKET_RCVD;
298 }
299 
300 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
301 		     int hdr_len)
302 {
303 	struct net *net = dev_net(skb->dev);
304 	struct ip_tunnel_net *itn;
305 	int res;
306 
307 	if (tpi->proto == htons(ETH_P_TEB))
308 		itn = net_generic(net, gre_tap_net_id);
309 	else
310 		itn = net_generic(net, ipgre_net_id);
311 
312 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
313 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
314 		/* ipgre tunnels in collect metadata mode should receive
315 		 * also ETH_P_TEB traffic.
316 		 */
317 		itn = net_generic(net, ipgre_net_id);
318 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
319 	}
320 	return res;
321 }
322 
323 static int gre_rcv(struct sk_buff *skb)
324 {
325 	struct tnl_ptk_info tpi;
326 	bool csum_err = false;
327 	int hdr_len;
328 
329 #ifdef CONFIG_NET_IPGRE_BROADCAST
330 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
331 		/* Looped back packet, drop it! */
332 		if (rt_is_output_route(skb_rtable(skb)))
333 			goto drop;
334 	}
335 #endif
336 
337 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
338 	if (hdr_len < 0)
339 		goto drop;
340 
341 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
342 		return 0;
343 
344 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
345 drop:
346 	kfree_skb(skb);
347 	return 0;
348 }
349 
350 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
351 		       const struct iphdr *tnl_params,
352 		       __be16 proto)
353 {
354 	struct ip_tunnel *tunnel = netdev_priv(dev);
355 
356 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
357 		tunnel->o_seqno++;
358 
359 	/* Push GRE header. */
360 	gre_build_header(skb, tunnel->tun_hlen,
361 			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
362 			 htonl(tunnel->o_seqno));
363 
364 	skb_set_inner_protocol(skb, proto);
365 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
366 }
367 
368 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
369 {
370 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
371 }
372 
373 static struct rtable *gre_get_rt(struct sk_buff *skb,
374 				 struct net_device *dev,
375 				 struct flowi4 *fl,
376 				 const struct ip_tunnel_key *key)
377 {
378 	struct net *net = dev_net(dev);
379 
380 	memset(fl, 0, sizeof(*fl));
381 	fl->daddr = key->u.ipv4.dst;
382 	fl->saddr = key->u.ipv4.src;
383 	fl->flowi4_tos = RT_TOS(key->tos);
384 	fl->flowi4_mark = skb->mark;
385 	fl->flowi4_proto = IPPROTO_GRE;
386 
387 	return ip_route_output_key(net, fl);
388 }
389 
390 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
391 			__be16 proto)
392 {
393 	struct ip_tunnel_info *tun_info;
394 	const struct ip_tunnel_key *key;
395 	struct rtable *rt = NULL;
396 	struct flowi4 fl;
397 	int min_headroom;
398 	int tunnel_hlen;
399 	__be16 df, flags;
400 	bool use_cache;
401 	int err;
402 
403 	tun_info = skb_tunnel_info(skb);
404 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
405 		     ip_tunnel_info_af(tun_info) != AF_INET))
406 		goto err_free_skb;
407 
408 	key = &tun_info->key;
409 	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
410 	if (use_cache)
411 		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
412 	if (!rt) {
413 		rt = gre_get_rt(skb, dev, &fl, key);
414 		if (IS_ERR(rt))
415 				goto err_free_skb;
416 		if (use_cache)
417 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
418 					  fl.saddr);
419 	}
420 
421 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
422 
423 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
424 			+ tunnel_hlen + sizeof(struct iphdr);
425 	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
426 		int head_delta = SKB_DATA_ALIGN(min_headroom -
427 						skb_headroom(skb) +
428 						16);
429 		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
430 				       0, GFP_ATOMIC);
431 		if (unlikely(err))
432 			goto err_free_rt;
433 	}
434 
435 	/* Push Tunnel header. */
436 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
437 		goto err_free_rt;
438 
439 	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
440 	gre_build_header(skb, tunnel_hlen, flags, proto,
441 			 tunnel_id_to_key(tun_info->key.tun_id), 0);
442 
443 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
444 
445 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
446 		      key->tos, key->ttl, df, false);
447 	return;
448 
449 err_free_rt:
450 	ip_rt_put(rt);
451 err_free_skb:
452 	kfree_skb(skb);
453 	dev->stats.tx_dropped++;
454 }
455 
456 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
457 {
458 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
459 	struct rtable *rt;
460 	struct flowi4 fl4;
461 
462 	if (ip_tunnel_info_af(info) != AF_INET)
463 		return -EINVAL;
464 
465 	rt = gre_get_rt(skb, dev, &fl4, &info->key);
466 	if (IS_ERR(rt))
467 		return PTR_ERR(rt);
468 
469 	ip_rt_put(rt);
470 	info->key.u.ipv4.src = fl4.saddr;
471 	return 0;
472 }
473 
474 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
475 			      struct net_device *dev)
476 {
477 	struct ip_tunnel *tunnel = netdev_priv(dev);
478 	const struct iphdr *tnl_params;
479 
480 	if (tunnel->collect_md) {
481 		gre_fb_xmit(skb, dev, skb->protocol);
482 		return NETDEV_TX_OK;
483 	}
484 
485 	if (dev->header_ops) {
486 		/* Need space for new headers */
487 		if (skb_cow_head(skb, dev->needed_headroom -
488 				      (tunnel->hlen + sizeof(struct iphdr))))
489 			goto free_skb;
490 
491 		tnl_params = (const struct iphdr *)skb->data;
492 
493 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
494 		 * to gre header.
495 		 */
496 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
497 		skb_reset_mac_header(skb);
498 	} else {
499 		if (skb_cow_head(skb, dev->needed_headroom))
500 			goto free_skb;
501 
502 		tnl_params = &tunnel->parms.iph;
503 	}
504 
505 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
506 		goto free_skb;
507 
508 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
509 	return NETDEV_TX_OK;
510 
511 free_skb:
512 	kfree_skb(skb);
513 	dev->stats.tx_dropped++;
514 	return NETDEV_TX_OK;
515 }
516 
517 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
518 				struct net_device *dev)
519 {
520 	struct ip_tunnel *tunnel = netdev_priv(dev);
521 
522 	if (tunnel->collect_md) {
523 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
524 		return NETDEV_TX_OK;
525 	}
526 
527 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
528 		goto free_skb;
529 
530 	if (skb_cow_head(skb, dev->needed_headroom))
531 		goto free_skb;
532 
533 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
534 	return NETDEV_TX_OK;
535 
536 free_skb:
537 	kfree_skb(skb);
538 	dev->stats.tx_dropped++;
539 	return NETDEV_TX_OK;
540 }
541 
542 static int ipgre_tunnel_ioctl(struct net_device *dev,
543 			      struct ifreq *ifr, int cmd)
544 {
545 	int err;
546 	struct ip_tunnel_parm p;
547 
548 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
549 		return -EFAULT;
550 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
551 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
552 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
553 		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
554 			return -EINVAL;
555 	}
556 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
557 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
558 
559 	err = ip_tunnel_ioctl(dev, &p, cmd);
560 	if (err)
561 		return err;
562 
563 	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
564 	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
565 
566 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
567 		return -EFAULT;
568 	return 0;
569 }
570 
571 /* Nice toy. Unfortunately, useless in real life :-)
572    It allows to construct virtual multiprotocol broadcast "LAN"
573    over the Internet, provided multicast routing is tuned.
574 
575 
576    I have no idea was this bicycle invented before me,
577    so that I had to set ARPHRD_IPGRE to a random value.
578    I have an impression, that Cisco could make something similar,
579    but this feature is apparently missing in IOS<=11.2(8).
580 
581    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
582    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
583 
584    ping -t 255 224.66.66.66
585 
586    If nobody answers, mbone does not work.
587 
588    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
589    ip addr add 10.66.66.<somewhat>/24 dev Universe
590    ifconfig Universe up
591    ifconfig Universe add fe80::<Your_real_addr>/10
592    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
593    ftp 10.66.66.66
594    ...
595    ftp fec0:6666:6666::193.233.7.65
596    ...
597  */
598 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
599 			unsigned short type,
600 			const void *daddr, const void *saddr, unsigned int len)
601 {
602 	struct ip_tunnel *t = netdev_priv(dev);
603 	struct iphdr *iph;
604 	struct gre_base_hdr *greh;
605 
606 	iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
607 	greh = (struct gre_base_hdr *)(iph+1);
608 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
609 	greh->protocol = htons(type);
610 
611 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
612 
613 	/* Set the source hardware address. */
614 	if (saddr)
615 		memcpy(&iph->saddr, saddr, 4);
616 	if (daddr)
617 		memcpy(&iph->daddr, daddr, 4);
618 	if (iph->daddr)
619 		return t->hlen + sizeof(*iph);
620 
621 	return -(t->hlen + sizeof(*iph));
622 }
623 
624 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
625 {
626 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
627 	memcpy(haddr, &iph->saddr, 4);
628 	return 4;
629 }
630 
631 static const struct header_ops ipgre_header_ops = {
632 	.create	= ipgre_header,
633 	.parse	= ipgre_header_parse,
634 };
635 
636 #ifdef CONFIG_NET_IPGRE_BROADCAST
637 static int ipgre_open(struct net_device *dev)
638 {
639 	struct ip_tunnel *t = netdev_priv(dev);
640 
641 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
642 		struct flowi4 fl4;
643 		struct rtable *rt;
644 
645 		rt = ip_route_output_gre(t->net, &fl4,
646 					 t->parms.iph.daddr,
647 					 t->parms.iph.saddr,
648 					 t->parms.o_key,
649 					 RT_TOS(t->parms.iph.tos),
650 					 t->parms.link);
651 		if (IS_ERR(rt))
652 			return -EADDRNOTAVAIL;
653 		dev = rt->dst.dev;
654 		ip_rt_put(rt);
655 		if (!__in_dev_get_rtnl(dev))
656 			return -EADDRNOTAVAIL;
657 		t->mlink = dev->ifindex;
658 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
659 	}
660 	return 0;
661 }
662 
663 static int ipgre_close(struct net_device *dev)
664 {
665 	struct ip_tunnel *t = netdev_priv(dev);
666 
667 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
668 		struct in_device *in_dev;
669 		in_dev = inetdev_by_index(t->net, t->mlink);
670 		if (in_dev)
671 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
672 	}
673 	return 0;
674 }
675 #endif
676 
677 static const struct net_device_ops ipgre_netdev_ops = {
678 	.ndo_init		= ipgre_tunnel_init,
679 	.ndo_uninit		= ip_tunnel_uninit,
680 #ifdef CONFIG_NET_IPGRE_BROADCAST
681 	.ndo_open		= ipgre_open,
682 	.ndo_stop		= ipgre_close,
683 #endif
684 	.ndo_start_xmit		= ipgre_xmit,
685 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
686 	.ndo_change_mtu		= ip_tunnel_change_mtu,
687 	.ndo_get_stats64	= ip_tunnel_get_stats64,
688 	.ndo_get_iflink		= ip_tunnel_get_iflink,
689 };
690 
691 #define GRE_FEATURES (NETIF_F_SG |		\
692 		      NETIF_F_FRAGLIST |	\
693 		      NETIF_F_HIGHDMA |		\
694 		      NETIF_F_HW_CSUM)
695 
696 static void ipgre_tunnel_setup(struct net_device *dev)
697 {
698 	dev->netdev_ops		= &ipgre_netdev_ops;
699 	dev->type		= ARPHRD_IPGRE;
700 	ip_tunnel_setup(dev, ipgre_net_id);
701 }
702 
703 static void __gre_tunnel_init(struct net_device *dev)
704 {
705 	struct ip_tunnel *tunnel;
706 	int t_hlen;
707 
708 	tunnel = netdev_priv(dev);
709 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
710 	tunnel->parms.iph.protocol = IPPROTO_GRE;
711 
712 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
713 
714 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
715 
716 	dev->needed_headroom	= LL_MAX_HEADER + t_hlen + 4;
717 	dev->mtu		= ETH_DATA_LEN - t_hlen - 4;
718 
719 	dev->features		|= GRE_FEATURES;
720 	dev->hw_features	|= GRE_FEATURES;
721 
722 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
723 		/* TCP offload with GRE SEQ is not supported, nor
724 		 * can we support 2 levels of outer headers requiring
725 		 * an update.
726 		 */
727 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
728 		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
729 			dev->features    |= NETIF_F_GSO_SOFTWARE;
730 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
731 		}
732 
733 		/* Can use a lockless transmit, unless we generate
734 		 * output sequences
735 		 */
736 		dev->features |= NETIF_F_LLTX;
737 	}
738 }
739 
740 static int ipgre_tunnel_init(struct net_device *dev)
741 {
742 	struct ip_tunnel *tunnel = netdev_priv(dev);
743 	struct iphdr *iph = &tunnel->parms.iph;
744 
745 	__gre_tunnel_init(dev);
746 
747 	memcpy(dev->dev_addr, &iph->saddr, 4);
748 	memcpy(dev->broadcast, &iph->daddr, 4);
749 
750 	dev->flags		= IFF_NOARP;
751 	netif_keep_dst(dev);
752 	dev->addr_len		= 4;
753 
754 	if (iph->daddr && !tunnel->collect_md) {
755 #ifdef CONFIG_NET_IPGRE_BROADCAST
756 		if (ipv4_is_multicast(iph->daddr)) {
757 			if (!iph->saddr)
758 				return -EINVAL;
759 			dev->flags = IFF_BROADCAST;
760 			dev->header_ops = &ipgre_header_ops;
761 		}
762 #endif
763 	} else if (!tunnel->collect_md) {
764 		dev->header_ops = &ipgre_header_ops;
765 	}
766 
767 	return ip_tunnel_init(dev);
768 }
769 
770 static const struct gre_protocol ipgre_protocol = {
771 	.handler     = gre_rcv,
772 	.err_handler = gre_err,
773 };
774 
775 static int __net_init ipgre_init_net(struct net *net)
776 {
777 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
778 }
779 
780 static void __net_exit ipgre_exit_net(struct net *net)
781 {
782 	struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
783 	ip_tunnel_delete_net(itn, &ipgre_link_ops);
784 }
785 
786 static struct pernet_operations ipgre_net_ops = {
787 	.init = ipgre_init_net,
788 	.exit = ipgre_exit_net,
789 	.id   = &ipgre_net_id,
790 	.size = sizeof(struct ip_tunnel_net),
791 };
792 
793 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
794 {
795 	__be16 flags;
796 
797 	if (!data)
798 		return 0;
799 
800 	flags = 0;
801 	if (data[IFLA_GRE_IFLAGS])
802 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
803 	if (data[IFLA_GRE_OFLAGS])
804 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
805 	if (flags & (GRE_VERSION|GRE_ROUTING))
806 		return -EINVAL;
807 
808 	if (data[IFLA_GRE_COLLECT_METADATA] &&
809 	    data[IFLA_GRE_ENCAP_TYPE] &&
810 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
811 		return -EINVAL;
812 
813 	return 0;
814 }
815 
816 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
817 {
818 	__be32 daddr;
819 
820 	if (tb[IFLA_ADDRESS]) {
821 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
822 			return -EINVAL;
823 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
824 			return -EADDRNOTAVAIL;
825 	}
826 
827 	if (!data)
828 		goto out;
829 
830 	if (data[IFLA_GRE_REMOTE]) {
831 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
832 		if (!daddr)
833 			return -EINVAL;
834 	}
835 
836 out:
837 	return ipgre_tunnel_validate(tb, data);
838 }
839 
840 static void ipgre_netlink_parms(struct net_device *dev,
841 				struct nlattr *data[],
842 				struct nlattr *tb[],
843 				struct ip_tunnel_parm *parms)
844 {
845 	memset(parms, 0, sizeof(*parms));
846 
847 	parms->iph.protocol = IPPROTO_GRE;
848 
849 	if (!data)
850 		return;
851 
852 	if (data[IFLA_GRE_LINK])
853 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
854 
855 	if (data[IFLA_GRE_IFLAGS])
856 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
857 
858 	if (data[IFLA_GRE_OFLAGS])
859 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
860 
861 	if (data[IFLA_GRE_IKEY])
862 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
863 
864 	if (data[IFLA_GRE_OKEY])
865 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
866 
867 	if (data[IFLA_GRE_LOCAL])
868 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
869 
870 	if (data[IFLA_GRE_REMOTE])
871 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
872 
873 	if (data[IFLA_GRE_TTL])
874 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
875 
876 	if (data[IFLA_GRE_TOS])
877 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
878 
879 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
880 		parms->iph.frag_off = htons(IP_DF);
881 
882 	if (data[IFLA_GRE_COLLECT_METADATA]) {
883 		struct ip_tunnel *t = netdev_priv(dev);
884 
885 		t->collect_md = true;
886 		if (dev->type == ARPHRD_IPGRE)
887 			dev->type = ARPHRD_NONE;
888 	}
889 }
890 
891 /* This function returns true when ENCAP attributes are present in the nl msg */
892 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
893 				      struct ip_tunnel_encap *ipencap)
894 {
895 	bool ret = false;
896 
897 	memset(ipencap, 0, sizeof(*ipencap));
898 
899 	if (!data)
900 		return ret;
901 
902 	if (data[IFLA_GRE_ENCAP_TYPE]) {
903 		ret = true;
904 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
905 	}
906 
907 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
908 		ret = true;
909 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
910 	}
911 
912 	if (data[IFLA_GRE_ENCAP_SPORT]) {
913 		ret = true;
914 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
915 	}
916 
917 	if (data[IFLA_GRE_ENCAP_DPORT]) {
918 		ret = true;
919 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
920 	}
921 
922 	return ret;
923 }
924 
925 static int gre_tap_init(struct net_device *dev)
926 {
927 	__gre_tunnel_init(dev);
928 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
929 
930 	return ip_tunnel_init(dev);
931 }
932 
933 static const struct net_device_ops gre_tap_netdev_ops = {
934 	.ndo_init		= gre_tap_init,
935 	.ndo_uninit		= ip_tunnel_uninit,
936 	.ndo_start_xmit		= gre_tap_xmit,
937 	.ndo_set_mac_address 	= eth_mac_addr,
938 	.ndo_validate_addr	= eth_validate_addr,
939 	.ndo_change_mtu		= ip_tunnel_change_mtu,
940 	.ndo_get_stats64	= ip_tunnel_get_stats64,
941 	.ndo_get_iflink		= ip_tunnel_get_iflink,
942 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
943 };
944 
945 static void ipgre_tap_setup(struct net_device *dev)
946 {
947 	ether_setup(dev);
948 	dev->netdev_ops	= &gre_tap_netdev_ops;
949 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
950 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
951 	ip_tunnel_setup(dev, gre_tap_net_id);
952 }
953 
954 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
955 			 struct nlattr *tb[], struct nlattr *data[])
956 {
957 	struct ip_tunnel_parm p;
958 	struct ip_tunnel_encap ipencap;
959 
960 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
961 		struct ip_tunnel *t = netdev_priv(dev);
962 		int err = ip_tunnel_encap_setup(t, &ipencap);
963 
964 		if (err < 0)
965 			return err;
966 	}
967 
968 	ipgre_netlink_parms(dev, data, tb, &p);
969 	return ip_tunnel_newlink(dev, tb, &p);
970 }
971 
972 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
973 			    struct nlattr *data[])
974 {
975 	struct ip_tunnel_parm p;
976 	struct ip_tunnel_encap ipencap;
977 
978 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
979 		struct ip_tunnel *t = netdev_priv(dev);
980 		int err = ip_tunnel_encap_setup(t, &ipencap);
981 
982 		if (err < 0)
983 			return err;
984 	}
985 
986 	ipgre_netlink_parms(dev, data, tb, &p);
987 	return ip_tunnel_changelink(dev, tb, &p);
988 }
989 
990 static size_t ipgre_get_size(const struct net_device *dev)
991 {
992 	return
993 		/* IFLA_GRE_LINK */
994 		nla_total_size(4) +
995 		/* IFLA_GRE_IFLAGS */
996 		nla_total_size(2) +
997 		/* IFLA_GRE_OFLAGS */
998 		nla_total_size(2) +
999 		/* IFLA_GRE_IKEY */
1000 		nla_total_size(4) +
1001 		/* IFLA_GRE_OKEY */
1002 		nla_total_size(4) +
1003 		/* IFLA_GRE_LOCAL */
1004 		nla_total_size(4) +
1005 		/* IFLA_GRE_REMOTE */
1006 		nla_total_size(4) +
1007 		/* IFLA_GRE_TTL */
1008 		nla_total_size(1) +
1009 		/* IFLA_GRE_TOS */
1010 		nla_total_size(1) +
1011 		/* IFLA_GRE_PMTUDISC */
1012 		nla_total_size(1) +
1013 		/* IFLA_GRE_ENCAP_TYPE */
1014 		nla_total_size(2) +
1015 		/* IFLA_GRE_ENCAP_FLAGS */
1016 		nla_total_size(2) +
1017 		/* IFLA_GRE_ENCAP_SPORT */
1018 		nla_total_size(2) +
1019 		/* IFLA_GRE_ENCAP_DPORT */
1020 		nla_total_size(2) +
1021 		/* IFLA_GRE_COLLECT_METADATA */
1022 		nla_total_size(0) +
1023 		0;
1024 }
1025 
1026 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1027 {
1028 	struct ip_tunnel *t = netdev_priv(dev);
1029 	struct ip_tunnel_parm *p = &t->parms;
1030 
1031 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1032 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1033 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1034 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1035 			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1036 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1037 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1038 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1039 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1040 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1041 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1042 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1043 		       !!(p->iph.frag_off & htons(IP_DF))))
1044 		goto nla_put_failure;
1045 
1046 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1047 			t->encap.type) ||
1048 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1049 			 t->encap.sport) ||
1050 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1051 			 t->encap.dport) ||
1052 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1053 			t->encap.flags))
1054 		goto nla_put_failure;
1055 
1056 	if (t->collect_md) {
1057 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1058 			goto nla_put_failure;
1059 	}
1060 
1061 	return 0;
1062 
1063 nla_put_failure:
1064 	return -EMSGSIZE;
1065 }
1066 
1067 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1068 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1069 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1070 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1071 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1072 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1073 	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1074 	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1075 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1076 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1077 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1078 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1079 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1080 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1081 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1082 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1083 };
1084 
1085 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1086 	.kind		= "gre",
1087 	.maxtype	= IFLA_GRE_MAX,
1088 	.policy		= ipgre_policy,
1089 	.priv_size	= sizeof(struct ip_tunnel),
1090 	.setup		= ipgre_tunnel_setup,
1091 	.validate	= ipgre_tunnel_validate,
1092 	.newlink	= ipgre_newlink,
1093 	.changelink	= ipgre_changelink,
1094 	.dellink	= ip_tunnel_dellink,
1095 	.get_size	= ipgre_get_size,
1096 	.fill_info	= ipgre_fill_info,
1097 	.get_link_net	= ip_tunnel_get_link_net,
1098 };
1099 
1100 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1101 	.kind		= "gretap",
1102 	.maxtype	= IFLA_GRE_MAX,
1103 	.policy		= ipgre_policy,
1104 	.priv_size	= sizeof(struct ip_tunnel),
1105 	.setup		= ipgre_tap_setup,
1106 	.validate	= ipgre_tap_validate,
1107 	.newlink	= ipgre_newlink,
1108 	.changelink	= ipgre_changelink,
1109 	.dellink	= ip_tunnel_dellink,
1110 	.get_size	= ipgre_get_size,
1111 	.fill_info	= ipgre_fill_info,
1112 	.get_link_net	= ip_tunnel_get_link_net,
1113 };
1114 
1115 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1116 					u8 name_assign_type)
1117 {
1118 	struct nlattr *tb[IFLA_MAX + 1];
1119 	struct net_device *dev;
1120 	LIST_HEAD(list_kill);
1121 	struct ip_tunnel *t;
1122 	int err;
1123 
1124 	memset(&tb, 0, sizeof(tb));
1125 
1126 	dev = rtnl_create_link(net, name, name_assign_type,
1127 			       &ipgre_tap_ops, tb);
1128 	if (IS_ERR(dev))
1129 		return dev;
1130 
1131 	/* Configure flow based GRE device. */
1132 	t = netdev_priv(dev);
1133 	t->collect_md = true;
1134 
1135 	err = ipgre_newlink(net, dev, tb, NULL);
1136 	if (err < 0) {
1137 		free_netdev(dev);
1138 		return ERR_PTR(err);
1139 	}
1140 
1141 	/* openvswitch users expect packet sizes to be unrestricted,
1142 	 * so set the largest MTU we can.
1143 	 */
1144 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1145 	if (err)
1146 		goto out;
1147 
1148 	err = rtnl_configure_link(dev, NULL);
1149 	if (err < 0)
1150 		goto out;
1151 
1152 	return dev;
1153 out:
1154 	ip_tunnel_dellink(dev, &list_kill);
1155 	unregister_netdevice_many(&list_kill);
1156 	return ERR_PTR(err);
1157 }
1158 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1159 
1160 static int __net_init ipgre_tap_init_net(struct net *net)
1161 {
1162 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1163 }
1164 
1165 static void __net_exit ipgre_tap_exit_net(struct net *net)
1166 {
1167 	struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1168 	ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1169 }
1170 
1171 static struct pernet_operations ipgre_tap_net_ops = {
1172 	.init = ipgre_tap_init_net,
1173 	.exit = ipgre_tap_exit_net,
1174 	.id   = &gre_tap_net_id,
1175 	.size = sizeof(struct ip_tunnel_net),
1176 };
1177 
1178 static int __init ipgre_init(void)
1179 {
1180 	int err;
1181 
1182 	pr_info("GRE over IPv4 tunneling driver\n");
1183 
1184 	err = register_pernet_device(&ipgre_net_ops);
1185 	if (err < 0)
1186 		return err;
1187 
1188 	err = register_pernet_device(&ipgre_tap_net_ops);
1189 	if (err < 0)
1190 		goto pnet_tap_faied;
1191 
1192 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1193 	if (err < 0) {
1194 		pr_info("%s: can't add protocol\n", __func__);
1195 		goto add_proto_failed;
1196 	}
1197 
1198 	err = rtnl_link_register(&ipgre_link_ops);
1199 	if (err < 0)
1200 		goto rtnl_link_failed;
1201 
1202 	err = rtnl_link_register(&ipgre_tap_ops);
1203 	if (err < 0)
1204 		goto tap_ops_failed;
1205 
1206 	return 0;
1207 
1208 tap_ops_failed:
1209 	rtnl_link_unregister(&ipgre_link_ops);
1210 rtnl_link_failed:
1211 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1212 add_proto_failed:
1213 	unregister_pernet_device(&ipgre_tap_net_ops);
1214 pnet_tap_faied:
1215 	unregister_pernet_device(&ipgre_net_ops);
1216 	return err;
1217 }
1218 
1219 static void __exit ipgre_fini(void)
1220 {
1221 	rtnl_link_unregister(&ipgre_tap_ops);
1222 	rtnl_link_unregister(&ipgre_link_ops);
1223 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1224 	unregister_pernet_device(&ipgre_tap_net_ops);
1225 	unregister_pernet_device(&ipgre_net_ops);
1226 }
1227 
1228 module_init(ipgre_init);
1229 module_exit(ipgre_fini);
1230 MODULE_LICENSE("GPL");
1231 MODULE_ALIAS_RTNL_LINK("gre");
1232 MODULE_ALIAS_RTNL_LINK("gretap");
1233 MODULE_ALIAS_NETDEV("gre0");
1234 MODULE_ALIAS_NETDEV("gretap0");
1235