xref: /linux/net/ipv4/ip_vti.c (revision 896868eded124059023be0af92d68cdaf9b4de70)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3: IP/IP protocol decoder modified to support
4  *		    virtual tunnel interface
5  *
6  *	Authors:
7  *		Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
8  */
9 
10 /*
11    This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
12 
13    For comments look at net/ipv4/ip_gre.c --ANK
14  */
15 
16 
17 #include <linux/capability.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/uaccess.h>
22 #include <linux/skbuff.h>
23 #include <linux/netdevice.h>
24 #include <linux/in.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/if_arp.h>
28 #include <linux/init.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/if_ether.h>
31 #include <linux/icmpv6.h>
32 
33 #include <net/sock.h>
34 #include <net/ip.h>
35 #include <net/icmp.h>
36 #include <net/ip_tunnels.h>
37 #include <net/inet_ecn.h>
38 #include <net/xfrm.h>
39 #include <net/net_namespace.h>
40 #include <net/netns/generic.h>
41 
42 static struct rtnl_link_ops vti_link_ops __read_mostly;
43 
44 static unsigned int vti_net_id __read_mostly;
45 static int vti_tunnel_init(struct net_device *dev);
46 
47 static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
48 		     int encap_type, bool update_skb_dev)
49 {
50 	struct ip_tunnel *tunnel;
51 	const struct iphdr *iph = ip_hdr(skb);
52 	struct net *net = dev_net(skb->dev);
53 	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
54 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
55 
56 	__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
57 
58 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
59 				  iph->saddr, iph->daddr, 0);
60 	if (tunnel) {
61 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
62 			goto drop;
63 
64 		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
65 
66 		if (update_skb_dev)
67 			skb->dev = tunnel->dev;
68 
69 		return xfrm_input(skb, nexthdr, spi, encap_type);
70 	}
71 
72 	return -EINVAL;
73 drop:
74 	kfree_skb(skb);
75 	return 0;
76 }
77 
78 static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
79 			   int encap_type)
80 {
81 	return vti_input(skb, nexthdr, spi, encap_type, false);
82 }
83 
84 static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
85 {
86 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
87 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
88 
89 	return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
90 }
91 
92 static int vti_rcv_proto(struct sk_buff *skb)
93 {
94 	return vti_rcv(skb, 0, false);
95 }
96 
97 static int vti_rcv_cb(struct sk_buff *skb, int err)
98 {
99 	unsigned short family;
100 	struct net_device *dev;
101 	struct xfrm_state *x;
102 	const struct xfrm_mode *inner_mode;
103 	struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
104 	u32 orig_mark = skb->mark;
105 	int ret;
106 
107 	if (!tunnel)
108 		return 1;
109 
110 	dev = tunnel->dev;
111 
112 	if (err) {
113 		DEV_STATS_INC(dev, rx_errors);
114 		DEV_STATS_INC(dev, rx_dropped);
115 
116 		return 0;
117 	}
118 
119 	x = xfrm_input_state(skb);
120 
121 	inner_mode = &x->inner_mode;
122 
123 	if (x->sel.family == AF_UNSPEC) {
124 		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
125 		if (inner_mode == NULL) {
126 			XFRM_INC_STATS(dev_net(skb->dev),
127 				       LINUX_MIB_XFRMINSTATEMODEERROR);
128 			return -EINVAL;
129 		}
130 	}
131 
132 	family = inner_mode->family;
133 
134 	skb->mark = be32_to_cpu(tunnel->parms.i_key);
135 	ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
136 	skb->mark = orig_mark;
137 
138 	if (!ret)
139 		return -EPERM;
140 
141 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
142 	skb->dev = dev;
143 	dev_sw_netstats_rx_add(dev, skb->len);
144 
145 	return 0;
146 }
147 
148 static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src)
149 {
150 	xfrm_address_t *daddr = (xfrm_address_t *)&dst;
151 	xfrm_address_t *saddr = (xfrm_address_t *)&src;
152 
153 	/* if there is no transform then this tunnel is not functional.
154 	 * Or if the xfrm is not mode tunnel.
155 	 */
156 	if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
157 	    x->props.family != AF_INET)
158 		return false;
159 
160 	if (!dst)
161 		return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET);
162 
163 	if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET))
164 		return false;
165 
166 	return true;
167 }
168 
169 static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
170 			    struct flowi *fl)
171 {
172 	struct ip_tunnel *tunnel = netdev_priv(dev);
173 	struct ip_tunnel_parm_kern *parms = &tunnel->parms;
174 	struct dst_entry *dst = skb_dst(skb);
175 	struct net_device *tdev;	/* Device to other host */
176 	int pkt_len = skb->len;
177 	int err;
178 	int mtu;
179 
180 	if (!dst) {
181 		switch (skb->protocol) {
182 		case htons(ETH_P_IP): {
183 			struct rtable *rt;
184 
185 			fl->u.ip4.flowi4_oif = dev->ifindex;
186 			fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
187 			rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
188 			if (IS_ERR(rt)) {
189 				DEV_STATS_INC(dev, tx_carrier_errors);
190 				goto tx_error_icmp;
191 			}
192 			dst = &rt->dst;
193 			skb_dst_set(skb, dst);
194 			break;
195 		}
196 #if IS_ENABLED(CONFIG_IPV6)
197 		case htons(ETH_P_IPV6):
198 			fl->u.ip6.flowi6_oif = dev->ifindex;
199 			fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
200 			dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
201 			if (dst->error) {
202 				dst_release(dst);
203 				dst = NULL;
204 				DEV_STATS_INC(dev, tx_carrier_errors);
205 				goto tx_error_icmp;
206 			}
207 			skb_dst_set(skb, dst);
208 			break;
209 #endif
210 		default:
211 			DEV_STATS_INC(dev, tx_carrier_errors);
212 			goto tx_error_icmp;
213 		}
214 	}
215 
216 	dst_hold(dst);
217 	dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0);
218 	if (IS_ERR(dst)) {
219 		DEV_STATS_INC(dev, tx_carrier_errors);
220 		goto tx_error_icmp;
221 	}
222 
223 	if (dst->flags & DST_XFRM_QUEUE)
224 		goto xmit;
225 
226 	if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
227 		DEV_STATS_INC(dev, tx_carrier_errors);
228 		dst_release(dst);
229 		goto tx_error_icmp;
230 	}
231 
232 	tdev = dst->dev;
233 
234 	if (tdev == dev) {
235 		dst_release(dst);
236 		DEV_STATS_INC(dev, collisions);
237 		goto tx_error;
238 	}
239 
240 	mtu = dst_mtu(dst);
241 	if (skb->len > mtu) {
242 		skb_dst_update_pmtu_no_confirm(skb, mtu);
243 		if (skb->protocol == htons(ETH_P_IP)) {
244 			if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
245 				goto xmit;
246 			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
247 				      htonl(mtu));
248 		} else {
249 			if (mtu < IPV6_MIN_MTU)
250 				mtu = IPV6_MIN_MTU;
251 
252 			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
253 		}
254 
255 		dst_release(dst);
256 		goto tx_error;
257 	}
258 
259 xmit:
260 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
261 	skb_dst_set(skb, dst);
262 	skb->dev = skb_dst(skb)->dev;
263 
264 	err = dst_output(tunnel->net, skb->sk, skb);
265 	if (net_xmit_eval(err) == 0)
266 		err = pkt_len;
267 	iptunnel_xmit_stats(dev, err);
268 	return NETDEV_TX_OK;
269 
270 tx_error_icmp:
271 	dst_link_failure(skb);
272 tx_error:
273 	DEV_STATS_INC(dev, tx_errors);
274 	kfree_skb(skb);
275 	return NETDEV_TX_OK;
276 }
277 
278 /* This function assumes it is being called from dev_queue_xmit()
279  * and that skb is filled properly by that function.
280  */
281 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
282 {
283 	struct ip_tunnel *tunnel = netdev_priv(dev);
284 	struct flowi fl;
285 
286 	if (!pskb_inet_may_pull(skb))
287 		goto tx_err;
288 
289 	memset(&fl, 0, sizeof(fl));
290 
291 	switch (skb->protocol) {
292 	case htons(ETH_P_IP):
293 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
294 		xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET);
295 		break;
296 	case htons(ETH_P_IPV6):
297 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
298 		xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET6);
299 		break;
300 	default:
301 		goto tx_err;
302 	}
303 
304 	/* override mark with tunnel output key */
305 	fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
306 
307 	return vti_xmit(skb, dev, &fl);
308 
309 tx_err:
310 	DEV_STATS_INC(dev, tx_errors);
311 	kfree_skb(skb);
312 	return NETDEV_TX_OK;
313 }
314 
315 static int vti4_err(struct sk_buff *skb, u32 info)
316 {
317 	__be32 spi;
318 	__u32 mark;
319 	struct xfrm_state *x;
320 	struct ip_tunnel *tunnel;
321 	struct ip_esp_hdr *esph;
322 	struct ip_auth_hdr *ah ;
323 	struct ip_comp_hdr *ipch;
324 	struct net *net = dev_net(skb->dev);
325 	const struct iphdr *iph = (const struct iphdr *)skb->data;
326 	int protocol = iph->protocol;
327 	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
328 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
329 
330 	__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
331 
332 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
333 				  iph->daddr, iph->saddr, 0);
334 	if (!tunnel)
335 		return -1;
336 
337 	mark = be32_to_cpu(tunnel->parms.o_key);
338 
339 	switch (protocol) {
340 	case IPPROTO_ESP:
341 		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
342 		spi = esph->spi;
343 		break;
344 	case IPPROTO_AH:
345 		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
346 		spi = ah->spi;
347 		break;
348 	case IPPROTO_COMP:
349 		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
350 		spi = htonl(ntohs(ipch->cpi));
351 		break;
352 	default:
353 		return 0;
354 	}
355 
356 	switch (icmp_hdr(skb)->type) {
357 	case ICMP_DEST_UNREACH:
358 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
359 			return 0;
360 		break;
361 	case ICMP_REDIRECT:
362 		break;
363 	default:
364 		return 0;
365 	}
366 
367 	x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
368 			      spi, protocol, AF_INET);
369 	if (!x)
370 		return 0;
371 
372 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
373 		ipv4_update_pmtu(skb, net, info, 0, protocol);
374 	else
375 		ipv4_redirect(skb, net, 0, protocol);
376 	xfrm_state_put(x);
377 
378 	return 0;
379 }
380 
381 static int
382 vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd)
383 {
384 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
385 	int err = 0;
386 
387 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
388 		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_IPIP ||
389 		    p->iph.ihl != 5)
390 			return -EINVAL;
391 	}
392 
393 	if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
394 	    !ip_tunnel_flags_is_be16_compat(p->o_flags))
395 		return -EOVERFLOW;
396 
397 	if (!(ip_tunnel_flags_to_be16(p->i_flags) & GRE_KEY))
398 		p->i_key = 0;
399 	if (!(ip_tunnel_flags_to_be16(p->o_flags) & GRE_KEY))
400 		p->o_key = 0;
401 
402 	__set_bit(IP_TUNNEL_VTI_BIT, flags);
403 	ip_tunnel_flags_copy(p->i_flags, flags);
404 
405 	err = ip_tunnel_ctl(dev, p, cmd);
406 	if (err)
407 		return err;
408 
409 	if (cmd != SIOCDELTUNNEL) {
410 		ip_tunnel_flags_from_be16(flags, GRE_KEY);
411 		ip_tunnel_flags_or(p->i_flags, p->i_flags, flags);
412 		ip_tunnel_flags_or(p->o_flags, p->o_flags, flags);
413 	}
414 	return 0;
415 }
416 
417 static const struct net_device_ops vti_netdev_ops = {
418 	.ndo_init	= vti_tunnel_init,
419 	.ndo_uninit	= ip_tunnel_uninit,
420 	.ndo_start_xmit	= vti_tunnel_xmit,
421 	.ndo_siocdevprivate = ip_tunnel_siocdevprivate,
422 	.ndo_change_mtu	= ip_tunnel_change_mtu,
423 	.ndo_get_stats64 = dev_get_tstats64,
424 	.ndo_get_iflink = ip_tunnel_get_iflink,
425 	.ndo_tunnel_ctl	= vti_tunnel_ctl,
426 };
427 
428 static void vti_tunnel_setup(struct net_device *dev)
429 {
430 	dev->netdev_ops		= &vti_netdev_ops;
431 	dev->header_ops		= &ip_tunnel_header_ops;
432 	dev->type		= ARPHRD_TUNNEL;
433 	ip_tunnel_setup(dev, vti_net_id);
434 }
435 
436 static int vti_tunnel_init(struct net_device *dev)
437 {
438 	struct ip_tunnel *tunnel = netdev_priv(dev);
439 	struct iphdr *iph = &tunnel->parms.iph;
440 
441 	__dev_addr_set(dev, &iph->saddr, 4);
442 	memcpy(dev->broadcast, &iph->daddr, 4);
443 
444 	dev->flags		= IFF_NOARP;
445 	dev->addr_len		= 4;
446 	dev->features		|= NETIF_F_LLTX;
447 	netif_keep_dst(dev);
448 
449 	return ip_tunnel_init(dev);
450 }
451 
452 static void __net_init vti_fb_tunnel_init(struct net_device *dev)
453 {
454 	struct ip_tunnel *tunnel = netdev_priv(dev);
455 	struct iphdr *iph = &tunnel->parms.iph;
456 
457 	iph->version		= 4;
458 	iph->protocol		= IPPROTO_IPIP;
459 	iph->ihl		= 5;
460 }
461 
462 static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
463 	.handler	=	vti_rcv_proto,
464 	.input_handler	=	vti_input_proto,
465 	.cb_handler	=	vti_rcv_cb,
466 	.err_handler	=	vti4_err,
467 	.priority	=	100,
468 };
469 
470 static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
471 	.handler	=	vti_rcv_proto,
472 	.input_handler	=	vti_input_proto,
473 	.cb_handler	=	vti_rcv_cb,
474 	.err_handler	=	vti4_err,
475 	.priority	=	100,
476 };
477 
478 static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
479 	.handler	=	vti_rcv_proto,
480 	.input_handler	=	vti_input_proto,
481 	.cb_handler	=	vti_rcv_cb,
482 	.err_handler	=	vti4_err,
483 	.priority	=	100,
484 };
485 
486 #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
487 static int vti_rcv_tunnel(struct sk_buff *skb)
488 {
489 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
490 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
491 
492 	return vti_input(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr, 0, false);
493 }
494 
495 static struct xfrm_tunnel vti_ipip_handler __read_mostly = {
496 	.handler	=	vti_rcv_tunnel,
497 	.cb_handler	=	vti_rcv_cb,
498 	.err_handler	=	vti4_err,
499 	.priority	=	0,
500 };
501 
502 #if IS_ENABLED(CONFIG_IPV6)
503 static struct xfrm_tunnel vti_ipip6_handler __read_mostly = {
504 	.handler	=	vti_rcv_tunnel,
505 	.cb_handler	=	vti_rcv_cb,
506 	.err_handler	=	vti4_err,
507 	.priority	=	0,
508 };
509 #endif
510 #endif
511 
512 static int __net_init vti_init_net(struct net *net)
513 {
514 	int err;
515 	struct ip_tunnel_net *itn;
516 
517 	err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
518 	if (err)
519 		return err;
520 	itn = net_generic(net, vti_net_id);
521 	if (itn->fb_tunnel_dev)
522 		vti_fb_tunnel_init(itn->fb_tunnel_dev);
523 	return 0;
524 }
525 
526 static void __net_exit vti_exit_batch_rtnl(struct list_head *list_net,
527 					   struct list_head *dev_to_kill)
528 {
529 	ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops,
530 			      dev_to_kill);
531 }
532 
533 static struct pernet_operations vti_net_ops = {
534 	.init = vti_init_net,
535 	.exit_batch_rtnl = vti_exit_batch_rtnl,
536 	.id   = &vti_net_id,
537 	.size = sizeof(struct ip_tunnel_net),
538 };
539 
540 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
541 			       struct netlink_ext_ack *extack)
542 {
543 	return 0;
544 }
545 
546 static void vti_netlink_parms(struct nlattr *data[],
547 			      struct ip_tunnel_parm_kern *parms,
548 			      __u32 *fwmark)
549 {
550 	memset(parms, 0, sizeof(*parms));
551 
552 	parms->iph.protocol = IPPROTO_IPIP;
553 
554 	if (!data)
555 		return;
556 
557 	__set_bit(IP_TUNNEL_VTI_BIT, parms->i_flags);
558 
559 	if (data[IFLA_VTI_LINK])
560 		parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
561 
562 	if (data[IFLA_VTI_IKEY])
563 		parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
564 
565 	if (data[IFLA_VTI_OKEY])
566 		parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
567 
568 	if (data[IFLA_VTI_LOCAL])
569 		parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]);
570 
571 	if (data[IFLA_VTI_REMOTE])
572 		parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
573 
574 	if (data[IFLA_VTI_FWMARK])
575 		*fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]);
576 }
577 
578 static int vti_newlink(struct net *src_net, struct net_device *dev,
579 		       struct nlattr *tb[], struct nlattr *data[],
580 		       struct netlink_ext_ack *extack)
581 {
582 	struct ip_tunnel_parm_kern parms;
583 	__u32 fwmark = 0;
584 
585 	vti_netlink_parms(data, &parms, &fwmark);
586 	return ip_tunnel_newlink(dev, tb, &parms, fwmark);
587 }
588 
589 static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
590 			  struct nlattr *data[],
591 			  struct netlink_ext_ack *extack)
592 {
593 	struct ip_tunnel *t = netdev_priv(dev);
594 	struct ip_tunnel_parm_kern p;
595 	__u32 fwmark = t->fwmark;
596 
597 	vti_netlink_parms(data, &p, &fwmark);
598 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
599 }
600 
601 static size_t vti_get_size(const struct net_device *dev)
602 {
603 	return
604 		/* IFLA_VTI_LINK */
605 		nla_total_size(4) +
606 		/* IFLA_VTI_IKEY */
607 		nla_total_size(4) +
608 		/* IFLA_VTI_OKEY */
609 		nla_total_size(4) +
610 		/* IFLA_VTI_LOCAL */
611 		nla_total_size(4) +
612 		/* IFLA_VTI_REMOTE */
613 		nla_total_size(4) +
614 		/* IFLA_VTI_FWMARK */
615 		nla_total_size(4) +
616 		0;
617 }
618 
619 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
620 {
621 	struct ip_tunnel *t = netdev_priv(dev);
622 	struct ip_tunnel_parm_kern *p = &t->parms;
623 
624 	if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
625 	    nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
626 	    nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
627 	    nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
628 	    nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
629 	    nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
630 		return -EMSGSIZE;
631 
632 	return 0;
633 }
634 
635 static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
636 	[IFLA_VTI_LINK]		= { .type = NLA_U32 },
637 	[IFLA_VTI_IKEY]		= { .type = NLA_U32 },
638 	[IFLA_VTI_OKEY]		= { .type = NLA_U32 },
639 	[IFLA_VTI_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
640 	[IFLA_VTI_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
641 	[IFLA_VTI_FWMARK]	= { .type = NLA_U32 },
642 };
643 
644 static struct rtnl_link_ops vti_link_ops __read_mostly = {
645 	.kind		= "vti",
646 	.maxtype	= IFLA_VTI_MAX,
647 	.policy		= vti_policy,
648 	.priv_size	= sizeof(struct ip_tunnel),
649 	.setup		= vti_tunnel_setup,
650 	.validate	= vti_tunnel_validate,
651 	.newlink	= vti_newlink,
652 	.changelink	= vti_changelink,
653 	.dellink        = ip_tunnel_dellink,
654 	.get_size	= vti_get_size,
655 	.fill_info	= vti_fill_info,
656 	.get_link_net	= ip_tunnel_get_link_net,
657 };
658 
659 static int __init vti_init(void)
660 {
661 	const char *msg;
662 	int err;
663 
664 	pr_info("IPv4 over IPsec tunneling driver\n");
665 
666 	msg = "tunnel device";
667 	err = register_pernet_device(&vti_net_ops);
668 	if (err < 0)
669 		goto pernet_dev_failed;
670 
671 	msg = "tunnel protocols";
672 	err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
673 	if (err < 0)
674 		goto xfrm_proto_esp_failed;
675 	err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
676 	if (err < 0)
677 		goto xfrm_proto_ah_failed;
678 	err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
679 	if (err < 0)
680 		goto xfrm_proto_comp_failed;
681 
682 #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
683 	msg = "ipip tunnel";
684 	err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET);
685 	if (err < 0)
686 		goto xfrm_tunnel_ipip_failed;
687 #if IS_ENABLED(CONFIG_IPV6)
688 	err = xfrm4_tunnel_register(&vti_ipip6_handler, AF_INET6);
689 	if (err < 0)
690 		goto xfrm_tunnel_ipip6_failed;
691 #endif
692 #endif
693 
694 	msg = "netlink interface";
695 	err = rtnl_link_register(&vti_link_ops);
696 	if (err < 0)
697 		goto rtnl_link_failed;
698 
699 	return err;
700 
701 rtnl_link_failed:
702 #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
703 #if IS_ENABLED(CONFIG_IPV6)
704 	xfrm4_tunnel_deregister(&vti_ipip6_handler, AF_INET6);
705 xfrm_tunnel_ipip6_failed:
706 #endif
707 	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET);
708 xfrm_tunnel_ipip_failed:
709 #endif
710 	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
711 xfrm_proto_comp_failed:
712 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
713 xfrm_proto_ah_failed:
714 	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
715 xfrm_proto_esp_failed:
716 	unregister_pernet_device(&vti_net_ops);
717 pernet_dev_failed:
718 	pr_err("vti init: failed to register %s\n", msg);
719 	return err;
720 }
721 
722 static void __exit vti_fini(void)
723 {
724 	rtnl_link_unregister(&vti_link_ops);
725 #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
726 #if IS_ENABLED(CONFIG_IPV6)
727 	xfrm4_tunnel_deregister(&vti_ipip6_handler, AF_INET6);
728 #endif
729 	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET);
730 #endif
731 	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
732 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
733 	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
734 	unregister_pernet_device(&vti_net_ops);
735 }
736 
737 module_init(vti_init);
738 module_exit(vti_fini);
739 MODULE_DESCRIPTION("Virtual (secure) IP tunneling library");
740 MODULE_LICENSE("GPL");
741 MODULE_ALIAS_RTNL_LINK("vti");
742 MODULE_ALIAS_NETDEV("ip_vti0");
743