xref: /linux/net/ipv4/ipip.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3:	IP/IP protocol decoder.
4  *
5  *	Authors:
6  *		Sam Lantinga (slouken@cs.ucdavis.edu)  02/01/95
7  *
8  *	Fixes:
9  *		Alan Cox	:	Merged and made usable non modular (its so tiny its silly as
10  *					a module taking up 2 pages).
11  *		Alan Cox	: 	Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
12  *					to keep ip_forward happy.
13  *		Alan Cox	:	More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
14  *		Kai Schulte	:	Fixed #defines for IP_FIREWALL->FIREWALL
15  *              David Woodhouse :       Perform some basic ICMP handling.
16  *                                      IPIP Routing without decapsulation.
17  *              Carlos Picoto   :       GRE over IP support
18  *		Alexey Kuznetsov:	Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
19  *					I do not want to merge them together.
20  */
21 
22 /* tunnel.c: an IP tunnel driver
23 
24 	The purpose of this driver is to provide an IP tunnel through
25 	which you can tunnel network traffic transparently across subnets.
26 
27 	This was written by looking at Nick Holloway's dummy driver
28 	Thanks for the great code!
29 
30 		-Sam Lantinga	(slouken@cs.ucdavis.edu)  02/01/95
31 
32 	Minor tweaks:
33 		Cleaned up the code a little and added some pre-1.3.0 tweaks.
34 		dev->hard_header/hard_header_len changed to use no headers.
35 		Comments/bracketing tweaked.
36 		Made the tunnels use dev->name not tunnel: when error reporting.
37 		Added tx_dropped stat
38 
39 		-Alan Cox	(alan@lxorguk.ukuu.org.uk) 21 March 95
40 
41 	Reworked:
42 		Changed to tunnel to destination gateway in addition to the
43 			tunnel's pointopoint address
44 		Almost completely rewritten
45 		Note:  There is currently no firewall or ICMP handling done.
46 
47 		-Sam Lantinga	(slouken@cs.ucdavis.edu) 02/13/96
48 
49 */
50 
51 /* Things I wish I had known when writing the tunnel driver:
52 
53 	When the tunnel_xmit() function is called, the skb contains the
54 	packet to be sent (plus a great deal of extra info), and dev
55 	contains the tunnel device that _we_ are.
56 
57 	When we are passed a packet, we are expected to fill in the
58 	source address with our source IP address.
59 
60 	What is the proper way to allocate, copy and free a buffer?
61 	After you allocate it, it is a "0 length" chunk of memory
62 	starting at zero.  If you want to add headers to the buffer
63 	later, you'll have to call "skb_reserve(skb, amount)" with
64 	the amount of memory you want reserved.  Then, you call
65 	"skb_put(skb, amount)" with the amount of space you want in
66 	the buffer.  skb_put() returns a pointer to the top (#0) of
67 	that buffer.  skb->len is set to the amount of space you have
68 	"allocated" with skb_put().  You can then write up to skb->len
69 	bytes to that buffer.  If you need more, you can call skb_put()
70 	again with the additional amount of space you need.  You can
71 	find out how much more space you can allocate by calling
72 	"skb_tailroom(skb)".
73 	Now, to add header space, call "skb_push(skb, header_len)".
74 	This creates space at the beginning of the buffer and returns
75 	a pointer to this new space.  If later you need to strip a
76 	header from a buffer, call "skb_pull(skb, header_len)".
77 	skb_headroom() will return how much space is left at the top
78 	of the buffer (before the main data).  Remember, this headroom
79 	space must be reserved before the skb_put() function is called.
80 	*/
81 
82 /*
83    This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
84 
85    For comments look at net/ipv4/ip_gre.c --ANK
86  */
87 
88 
89 #include <linux/capability.h>
90 #include <linux/module.h>
91 #include <linux/types.h>
92 #include <linux/kernel.h>
93 #include <linux/slab.h>
94 #include <linux/uaccess.h>
95 #include <linux/skbuff.h>
96 #include <linux/netdevice.h>
97 #include <linux/in.h>
98 #include <linux/tcp.h>
99 #include <linux/udp.h>
100 #include <linux/if_arp.h>
101 #include <linux/init.h>
102 #include <linux/netfilter_ipv4.h>
103 #include <linux/if_ether.h>
104 
105 #include <net/sock.h>
106 #include <net/ip.h>
107 #include <net/icmp.h>
108 #include <net/ip_tunnels.h>
109 #include <net/inet_ecn.h>
110 #include <net/xfrm.h>
111 #include <net/net_namespace.h>
112 #include <net/netns/generic.h>
113 #include <net/dst_metadata.h>
114 
115 static bool log_ecn_error = true;
116 module_param(log_ecn_error, bool, 0644);
117 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
118 
119 static unsigned int ipip_net_id __read_mostly;
120 
121 static int ipip_tunnel_init(struct net_device *dev);
122 static struct rtnl_link_ops ipip_link_ops __read_mostly;
123 
124 static int ipip_err(struct sk_buff *skb, u32 info)
125 {
126 	/* All the routers (except for Linux) return only
127 	 * 8 bytes of packet payload. It means, that precise relaying of
128 	 * ICMP in the real Internet is absolutely infeasible.
129 	 */
130 	struct net *net = dev_net(skb->dev);
131 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
132 	const struct iphdr *iph = (const struct iphdr *)skb->data;
133 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
134 	const int type = icmp_hdr(skb)->type;
135 	const int code = icmp_hdr(skb)->code;
136 	struct ip_tunnel *t;
137 	int err = 0;
138 
139 	__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
140 
141 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->daddr,
142 			     iph->saddr, 0);
143 	if (!t) {
144 		err = -ENOENT;
145 		goto out;
146 	}
147 
148 	switch (type) {
149 	case ICMP_DEST_UNREACH:
150 		switch (code) {
151 		case ICMP_SR_FAILED:
152 			/* Impossible event. */
153 			goto out;
154 		default:
155 			/* All others are translated to HOST_UNREACH.
156 			 * rfc2003 contains "deep thoughts" about NET_UNREACH,
157 			 * I believe they are just ether pollution. --ANK
158 			 */
159 			break;
160 		}
161 		break;
162 
163 	case ICMP_TIME_EXCEEDED:
164 		if (code != ICMP_EXC_TTL)
165 			goto out;
166 		break;
167 
168 	case ICMP_REDIRECT:
169 		break;
170 
171 	default:
172 		goto out;
173 	}
174 
175 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
176 		ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol);
177 		goto out;
178 	}
179 
180 	if (type == ICMP_REDIRECT) {
181 		ipv4_redirect(skb, net, t->parms.link, iph->protocol);
182 		goto out;
183 	}
184 
185 	if (t->parms.iph.daddr == 0) {
186 		err = -ENOENT;
187 		goto out;
188 	}
189 
190 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
191 		goto out;
192 
193 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
194 		t->err_count++;
195 	else
196 		t->err_count = 1;
197 	t->err_time = jiffies;
198 
199 out:
200 	return err;
201 }
202 
203 static const struct tnl_ptk_info ipip_tpi = {
204 	/* no tunnel info required for ipip. */
205 	.proto = htons(ETH_P_IP),
206 };
207 
208 #if IS_ENABLED(CONFIG_MPLS)
209 static const struct tnl_ptk_info mplsip_tpi = {
210 	/* no tunnel info required for mplsip. */
211 	.proto = htons(ETH_P_MPLS_UC),
212 };
213 #endif
214 
215 static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
216 {
217 	struct net *net = dev_net(skb->dev);
218 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
219 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
220 	struct metadata_dst *tun_dst = NULL;
221 	struct ip_tunnel *tunnel;
222 	const struct iphdr *iph;
223 
224 	__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
225 
226 	iph = ip_hdr(skb);
227 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->saddr,
228 				  iph->daddr, 0);
229 	if (tunnel) {
230 		const struct tnl_ptk_info *tpi;
231 
232 		if (tunnel->parms.iph.protocol != ipproto &&
233 		    tunnel->parms.iph.protocol != 0)
234 			goto drop;
235 
236 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
237 			goto drop;
238 #if IS_ENABLED(CONFIG_MPLS)
239 		if (ipproto == IPPROTO_MPLS)
240 			tpi = &mplsip_tpi;
241 		else
242 #endif
243 			tpi = &ipip_tpi;
244 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
245 			goto drop;
246 		if (tunnel->collect_md) {
247 			ip_tunnel_flags_zero(flags);
248 
249 			tun_dst = ip_tun_rx_dst(skb, flags, 0, 0);
250 			if (!tun_dst)
251 				return 0;
252 			ip_tunnel_md_udp_encap(skb, &tun_dst->u.tun_info);
253 		}
254 		skb_reset_mac_header(skb);
255 
256 		return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
257 	}
258 
259 	return -1;
260 
261 drop:
262 	kfree_skb(skb);
263 	return 0;
264 }
265 
266 static int ipip_rcv(struct sk_buff *skb)
267 {
268 	return ipip_tunnel_rcv(skb, IPPROTO_IPIP);
269 }
270 
271 #if IS_ENABLED(CONFIG_MPLS)
272 static int mplsip_rcv(struct sk_buff *skb)
273 {
274 	return ipip_tunnel_rcv(skb, IPPROTO_MPLS);
275 }
276 #endif
277 
278 /*
279  *	This function assumes it is being called from dev_queue_xmit()
280  *	and that skb is filled properly by that function.
281  */
282 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
283 				    struct net_device *dev)
284 {
285 	struct ip_tunnel *tunnel = netdev_priv(dev);
286 	const struct iphdr  *tiph = &tunnel->parms.iph;
287 	u8 ipproto;
288 
289 	if (!pskb_inet_may_pull(skb))
290 		goto tx_error;
291 
292 	switch (skb->protocol) {
293 	case htons(ETH_P_IP):
294 		ipproto = IPPROTO_IPIP;
295 		break;
296 #if IS_ENABLED(CONFIG_MPLS)
297 	case htons(ETH_P_MPLS_UC):
298 		ipproto = IPPROTO_MPLS;
299 		break;
300 #endif
301 	default:
302 		goto tx_error;
303 	}
304 
305 	if (tiph->protocol != ipproto && tiph->protocol != 0)
306 		goto tx_error;
307 
308 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
309 		goto tx_error;
310 
311 	skb_set_inner_ipproto(skb, ipproto);
312 
313 	if (tunnel->collect_md)
314 		ip_md_tunnel_xmit(skb, dev, ipproto, 0);
315 	else
316 		ip_tunnel_xmit(skb, dev, tiph, ipproto);
317 	return NETDEV_TX_OK;
318 
319 tx_error:
320 	kfree_skb(skb);
321 
322 	DEV_STATS_INC(dev, tx_errors);
323 	return NETDEV_TX_OK;
324 }
325 
326 static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
327 {
328 	switch (ipproto) {
329 	case 0:
330 	case IPPROTO_IPIP:
331 #if IS_ENABLED(CONFIG_MPLS)
332 	case IPPROTO_MPLS:
333 #endif
334 		return true;
335 	}
336 
337 	return false;
338 }
339 
340 static int
341 ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd)
342 {
343 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
344 		if (p->iph.version != 4 ||
345 		    !ipip_tunnel_ioctl_verify_protocol(p->iph.protocol) ||
346 		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)))
347 			return -EINVAL;
348 	}
349 
350 	p->i_key = p->o_key = 0;
351 	ip_tunnel_flags_zero(p->i_flags);
352 	ip_tunnel_flags_zero(p->o_flags);
353 	return ip_tunnel_ctl(dev, p, cmd);
354 }
355 
356 static int ipip_fill_forward_path(struct net_device_path_ctx *ctx,
357 				  struct net_device_path *path)
358 {
359 	struct ip_tunnel *tunnel = netdev_priv(ctx->dev);
360 	const struct iphdr *tiph = &tunnel->parms.iph;
361 	struct rtable *rt;
362 
363 	rt = ip_route_output(dev_net(ctx->dev), tiph->daddr, 0, 0, 0,
364 			     RT_SCOPE_UNIVERSE);
365 	if (IS_ERR(rt))
366 		return PTR_ERR(rt);
367 
368 	path->type = DEV_PATH_TUN;
369 	path->tun.src_v4.s_addr = tiph->saddr;
370 	path->tun.dst_v4.s_addr = tiph->daddr;
371 	path->tun.l3_proto = IPPROTO_IPIP;
372 	path->dev = ctx->dev;
373 
374 	ctx->dev = rt->dst.dev;
375 	ip_rt_put(rt);
376 
377 	return 0;
378 }
379 
380 static const struct net_device_ops ipip_netdev_ops = {
381 	.ndo_init       = ipip_tunnel_init,
382 	.ndo_uninit     = ip_tunnel_uninit,
383 	.ndo_start_xmit	= ipip_tunnel_xmit,
384 	.ndo_siocdevprivate = ip_tunnel_siocdevprivate,
385 	.ndo_change_mtu = ip_tunnel_change_mtu,
386 	.ndo_get_stats64 = dev_get_tstats64,
387 	.ndo_get_iflink = ip_tunnel_get_iflink,
388 	.ndo_tunnel_ctl	= ipip_tunnel_ctl,
389 	.ndo_fill_forward_path = ipip_fill_forward_path,
390 };
391 
392 #define IPIP_FEATURES (NETIF_F_SG |		\
393 		       NETIF_F_FRAGLIST |	\
394 		       NETIF_F_HIGHDMA |	\
395 		       NETIF_F_GSO_SOFTWARE |	\
396 		       NETIF_F_HW_CSUM)
397 
398 static void ipip_tunnel_setup(struct net_device *dev)
399 {
400 	dev->netdev_ops		= &ipip_netdev_ops;
401 	dev->header_ops		= &ip_tunnel_header_ops;
402 
403 	dev->type		= ARPHRD_TUNNEL;
404 	dev->flags		= IFF_NOARP;
405 	dev->addr_len		= 4;
406 	dev->lltx		= true;
407 	netif_keep_dst(dev);
408 
409 	dev->features		|= IPIP_FEATURES;
410 	dev->hw_features	|= IPIP_FEATURES;
411 	ip_tunnel_setup(dev, ipip_net_id);
412 }
413 
414 static int ipip_tunnel_init(struct net_device *dev)
415 {
416 	struct ip_tunnel *tunnel = netdev_priv(dev);
417 
418 	__dev_addr_set(dev, &tunnel->parms.iph.saddr, 4);
419 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
420 
421 	tunnel->tun_hlen = 0;
422 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
423 	return ip_tunnel_init(dev);
424 }
425 
426 static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
427 				struct netlink_ext_ack *extack)
428 {
429 	u8 proto;
430 
431 	if (!data || !data[IFLA_IPTUN_PROTO])
432 		return 0;
433 
434 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
435 	if (proto != IPPROTO_IPIP && proto != IPPROTO_MPLS && proto != 0)
436 		return -EINVAL;
437 
438 	return 0;
439 }
440 
441 static void ipip_netlink_parms(struct nlattr *data[],
442 			       struct ip_tunnel_parm_kern *parms,
443 			       bool *collect_md, __u32 *fwmark)
444 {
445 	memset(parms, 0, sizeof(*parms));
446 
447 	parms->iph.version = 4;
448 	parms->iph.protocol = IPPROTO_IPIP;
449 	parms->iph.ihl = 5;
450 	*collect_md = false;
451 
452 	if (!data)
453 		return;
454 
455 	ip_tunnel_netlink_parms(data, parms);
456 
457 	if (data[IFLA_IPTUN_COLLECT_METADATA])
458 		*collect_md = true;
459 
460 	if (data[IFLA_IPTUN_FWMARK])
461 		*fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
462 }
463 
464 static int ipip_newlink(struct net_device *dev,
465 			struct rtnl_newlink_params *params,
466 			struct netlink_ext_ack *extack)
467 {
468 	struct ip_tunnel *t = netdev_priv(dev);
469 	struct nlattr **data = params->data;
470 	struct nlattr **tb = params->tb;
471 	struct ip_tunnel_encap ipencap;
472 	struct ip_tunnel_parm_kern p;
473 	__u32 fwmark = 0;
474 
475 	if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
476 		int err = ip_tunnel_encap_setup(t, &ipencap);
477 
478 		if (err < 0)
479 			return err;
480 	}
481 
482 	ipip_netlink_parms(data, &p, &t->collect_md, &fwmark);
483 	return ip_tunnel_newlink(params->link_net ? : dev_net(dev), dev, tb, &p,
484 				 fwmark);
485 }
486 
487 static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
488 			   struct nlattr *data[],
489 			   struct netlink_ext_ack *extack)
490 {
491 	struct ip_tunnel *t = netdev_priv(dev);
492 	struct ip_tunnel_encap ipencap;
493 	struct ip_tunnel_parm_kern p;
494 	bool collect_md;
495 	__u32 fwmark = t->fwmark;
496 
497 	if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
498 		int err = ip_tunnel_encap_setup(t, &ipencap);
499 
500 		if (err < 0)
501 			return err;
502 	}
503 
504 	ipip_netlink_parms(data, &p, &collect_md, &fwmark);
505 	if (collect_md)
506 		return -EINVAL;
507 
508 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
509 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
510 		return -EINVAL;
511 
512 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
513 }
514 
515 static size_t ipip_get_size(const struct net_device *dev)
516 {
517 	return
518 		/* IFLA_IPTUN_LINK */
519 		nla_total_size(4) +
520 		/* IFLA_IPTUN_LOCAL */
521 		nla_total_size(4) +
522 		/* IFLA_IPTUN_REMOTE */
523 		nla_total_size(4) +
524 		/* IFLA_IPTUN_TTL */
525 		nla_total_size(1) +
526 		/* IFLA_IPTUN_TOS */
527 		nla_total_size(1) +
528 		/* IFLA_IPTUN_PROTO */
529 		nla_total_size(1) +
530 		/* IFLA_IPTUN_PMTUDISC */
531 		nla_total_size(1) +
532 		/* IFLA_IPTUN_ENCAP_TYPE */
533 		nla_total_size(2) +
534 		/* IFLA_IPTUN_ENCAP_FLAGS */
535 		nla_total_size(2) +
536 		/* IFLA_IPTUN_ENCAP_SPORT */
537 		nla_total_size(2) +
538 		/* IFLA_IPTUN_ENCAP_DPORT */
539 		nla_total_size(2) +
540 		/* IFLA_IPTUN_COLLECT_METADATA */
541 		nla_total_size(0) +
542 		/* IFLA_IPTUN_FWMARK */
543 		nla_total_size(4) +
544 		0;
545 }
546 
547 static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
548 {
549 	struct ip_tunnel *tunnel = netdev_priv(dev);
550 	struct ip_tunnel_parm_kern *parm = &tunnel->parms;
551 
552 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
553 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
554 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
555 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
556 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
557 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
558 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
559 		       !!(parm->iph.frag_off & htons(IP_DF))) ||
560 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
561 		goto nla_put_failure;
562 
563 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
564 			tunnel->encap.type) ||
565 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
566 			 tunnel->encap.sport) ||
567 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
568 			 tunnel->encap.dport) ||
569 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
570 			tunnel->encap.flags))
571 		goto nla_put_failure;
572 
573 	if (tunnel->collect_md)
574 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
575 			goto nla_put_failure;
576 	return 0;
577 
578 nla_put_failure:
579 	return -EMSGSIZE;
580 }
581 
582 static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
583 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
584 	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
585 	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
586 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
587 	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
588 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
589 	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
590 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
591 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
592 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
593 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
594 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
595 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
596 };
597 
598 static struct rtnl_link_ops ipip_link_ops __read_mostly = {
599 	.kind		= "ipip",
600 	.maxtype	= IFLA_IPTUN_MAX,
601 	.policy		= ipip_policy,
602 	.priv_size	= sizeof(struct ip_tunnel),
603 	.setup		= ipip_tunnel_setup,
604 	.validate	= ipip_tunnel_validate,
605 	.newlink	= ipip_newlink,
606 	.changelink	= ipip_changelink,
607 	.dellink	= ip_tunnel_dellink,
608 	.get_size	= ipip_get_size,
609 	.fill_info	= ipip_fill_info,
610 	.get_link_net	= ip_tunnel_get_link_net,
611 };
612 
613 static struct xfrm_tunnel ipip_handler __read_mostly = {
614 	.handler	=	ipip_rcv,
615 	.err_handler	=	ipip_err,
616 	.priority	=	1,
617 };
618 
619 #if IS_ENABLED(CONFIG_MPLS)
620 static struct xfrm_tunnel mplsip_handler __read_mostly = {
621 	.handler	=	mplsip_rcv,
622 	.err_handler	=	ipip_err,
623 	.priority	=	1,
624 };
625 #endif
626 
627 static int __net_init ipip_init_net(struct net *net)
628 {
629 	return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
630 }
631 
632 static void __net_exit ipip_exit_rtnl(struct net *net,
633 				      struct list_head *dev_to_kill)
634 {
635 	ip_tunnel_delete_net(net, ipip_net_id, &ipip_link_ops, dev_to_kill);
636 }
637 
638 static struct pernet_operations ipip_net_ops = {
639 	.init = ipip_init_net,
640 	.exit_rtnl = ipip_exit_rtnl,
641 	.id   = &ipip_net_id,
642 	.size = sizeof(struct ip_tunnel_net),
643 };
644 
645 static int __init ipip_init(void)
646 {
647 	int err;
648 
649 	pr_info("ipip: IPv4 and MPLS over IPv4 tunneling driver\n");
650 
651 	err = register_pernet_device(&ipip_net_ops);
652 	if (err < 0)
653 		return err;
654 	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
655 	if (err < 0) {
656 		pr_info("%s: can't register tunnel\n", __func__);
657 		goto xfrm_tunnel_ipip_failed;
658 	}
659 #if IS_ENABLED(CONFIG_MPLS)
660 	err = xfrm4_tunnel_register(&mplsip_handler, AF_MPLS);
661 	if (err < 0) {
662 		pr_info("%s: can't register tunnel\n", __func__);
663 		goto xfrm_tunnel_mplsip_failed;
664 	}
665 #endif
666 	err = rtnl_link_register(&ipip_link_ops);
667 	if (err < 0)
668 		goto rtnl_link_failed;
669 
670 out:
671 	return err;
672 
673 rtnl_link_failed:
674 #if IS_ENABLED(CONFIG_MPLS)
675 	xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS);
676 xfrm_tunnel_mplsip_failed:
677 
678 #endif
679 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
680 xfrm_tunnel_ipip_failed:
681 	unregister_pernet_device(&ipip_net_ops);
682 	goto out;
683 }
684 
685 static void __exit ipip_fini(void)
686 {
687 	rtnl_link_unregister(&ipip_link_ops);
688 	if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
689 		pr_info("%s: can't deregister tunnel\n", __func__);
690 #if IS_ENABLED(CONFIG_MPLS)
691 	if (xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS))
692 		pr_info("%s: can't deregister tunnel\n", __func__);
693 #endif
694 	unregister_pernet_device(&ipip_net_ops);
695 }
696 
697 module_init(ipip_init);
698 module_exit(ipip_fini);
699 MODULE_DESCRIPTION("IP/IP protocol decoder library");
700 MODULE_LICENSE("GPL");
701 MODULE_ALIAS_RTNL_LINK("ipip");
702 MODULE_ALIAS_NETDEV("tunl0");
703