xref: /linux/net/ipv4/ip_tunnel_core.c (revision 5e0266f0e5f57617472d5aac4013f58a3ef264ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013 Nicira, Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/netdevice.h>
12 #include <linux/in.h>
13 #include <linux/if_arp.h>
14 #include <linux/init.h>
15 #include <linux/in6.h>
16 #include <linux/inetdevice.h>
17 #include <linux/netfilter_ipv4.h>
18 #include <linux/etherdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/if_vlan.h>
21 #include <linux/static_key.h>
22 
23 #include <net/ip.h>
24 #include <net/icmp.h>
25 #include <net/protocol.h>
26 #include <net/ip_tunnels.h>
27 #include <net/ip6_tunnel.h>
28 #include <net/ip6_checksum.h>
29 #include <net/arp.h>
30 #include <net/checksum.h>
31 #include <net/dsfield.h>
32 #include <net/inet_ecn.h>
33 #include <net/xfrm.h>
34 #include <net/net_namespace.h>
35 #include <net/netns/generic.h>
36 #include <net/rtnetlink.h>
37 #include <net/dst_metadata.h>
38 #include <net/geneve.h>
39 #include <net/vxlan.h>
40 #include <net/erspan.h>
41 
42 const struct ip_tunnel_encap_ops __rcu *
43 		iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
44 EXPORT_SYMBOL(iptun_encaps);
45 
46 const struct ip6_tnl_encap_ops __rcu *
47 		ip6tun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
48 EXPORT_SYMBOL(ip6tun_encaps);
49 
50 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
51 		   __be32 src, __be32 dst, __u8 proto,
52 		   __u8 tos, __u8 ttl, __be16 df, bool xnet)
53 {
54 	int pkt_len = skb->len - skb_inner_network_offset(skb);
55 	struct net *net = dev_net(rt->dst.dev);
56 	struct net_device *dev = skb->dev;
57 	struct iphdr *iph;
58 	int err;
59 
60 	skb_scrub_packet(skb, xnet);
61 
62 	skb_clear_hash_if_not_l4(skb);
63 	skb_dst_set(skb, &rt->dst);
64 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
65 
66 	/* Push down and install the IP header. */
67 	skb_push(skb, sizeof(struct iphdr));
68 	skb_reset_network_header(skb);
69 
70 	iph = ip_hdr(skb);
71 
72 	iph->version	=	4;
73 	iph->ihl	=	sizeof(struct iphdr) >> 2;
74 	iph->frag_off	=	ip_mtu_locked(&rt->dst) ? 0 : df;
75 	iph->protocol	=	proto;
76 	iph->tos	=	tos;
77 	iph->daddr	=	dst;
78 	iph->saddr	=	src;
79 	iph->ttl	=	ttl;
80 	__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
81 
82 	err = ip_local_out(net, sk, skb);
83 
84 	if (dev) {
85 		if (unlikely(net_xmit_eval(err)))
86 			pkt_len = 0;
87 		iptunnel_xmit_stats(dev, pkt_len);
88 	}
89 }
90 EXPORT_SYMBOL_GPL(iptunnel_xmit);
91 
92 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
93 			   __be16 inner_proto, bool raw_proto, bool xnet)
94 {
95 	if (unlikely(!pskb_may_pull(skb, hdr_len)))
96 		return -ENOMEM;
97 
98 	skb_pull_rcsum(skb, hdr_len);
99 
100 	if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
101 		struct ethhdr *eh;
102 
103 		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
104 			return -ENOMEM;
105 
106 		eh = (struct ethhdr *)skb->data;
107 		if (likely(eth_proto_is_802_3(eh->h_proto)))
108 			skb->protocol = eh->h_proto;
109 		else
110 			skb->protocol = htons(ETH_P_802_2);
111 
112 	} else {
113 		skb->protocol = inner_proto;
114 	}
115 
116 	skb_clear_hash_if_not_l4(skb);
117 	__vlan_hwaccel_clear_tag(skb);
118 	skb_set_queue_mapping(skb, 0);
119 	skb_scrub_packet(skb, xnet);
120 
121 	return iptunnel_pull_offloads(skb);
122 }
123 EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
124 
125 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
126 					     gfp_t flags)
127 {
128 	struct metadata_dst *res;
129 	struct ip_tunnel_info *dst, *src;
130 
131 	if (!md || md->type != METADATA_IP_TUNNEL ||
132 	    md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
133 		return NULL;
134 
135 	src = &md->u.tun_info;
136 	res = metadata_dst_alloc(src->options_len, METADATA_IP_TUNNEL, flags);
137 	if (!res)
138 		return NULL;
139 
140 	dst = &res->u.tun_info;
141 	dst->key.tun_id = src->key.tun_id;
142 	if (src->mode & IP_TUNNEL_INFO_IPV6)
143 		memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
144 		       sizeof(struct in6_addr));
145 	else
146 		dst->key.u.ipv4.dst = src->key.u.ipv4.src;
147 	dst->key.tun_flags = src->key.tun_flags;
148 	dst->mode = src->mode | IP_TUNNEL_INFO_TX;
149 	ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
150 				src->options_len, 0);
151 
152 	return res;
153 }
154 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
155 
156 int iptunnel_handle_offloads(struct sk_buff *skb,
157 			     int gso_type_mask)
158 {
159 	int err;
160 
161 	if (likely(!skb->encapsulation)) {
162 		skb_reset_inner_headers(skb);
163 		skb->encapsulation = 1;
164 	}
165 
166 	if (skb_is_gso(skb)) {
167 		err = skb_header_unclone(skb, GFP_ATOMIC);
168 		if (unlikely(err))
169 			return err;
170 		skb_shinfo(skb)->gso_type |= gso_type_mask;
171 		return 0;
172 	}
173 
174 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
175 		skb->ip_summed = CHECKSUM_NONE;
176 		/* We clear encapsulation here to prevent badly-written
177 		 * drivers potentially deciding to offload an inner checksum
178 		 * if we set CHECKSUM_PARTIAL on the outer header.
179 		 * This should go away when the drivers are all fixed.
180 		 */
181 		skb->encapsulation = 0;
182 	}
183 
184 	return 0;
185 }
186 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
187 
188 /**
189  * iptunnel_pmtud_build_icmp() - Build ICMP error message for PMTUD
190  * @skb:	Original packet with L2 header
191  * @mtu:	MTU value for ICMP error
192  *
193  * Return: length on success, negative error code if message couldn't be built.
194  */
195 static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
196 {
197 	const struct iphdr *iph = ip_hdr(skb);
198 	struct icmphdr *icmph;
199 	struct iphdr *niph;
200 	struct ethhdr eh;
201 	int len, err;
202 
203 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
204 		return -EINVAL;
205 
206 	skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
207 	pskb_pull(skb, ETH_HLEN);
208 	skb_reset_network_header(skb);
209 
210 	err = pskb_trim(skb, 576 - sizeof(*niph) - sizeof(*icmph));
211 	if (err)
212 		return err;
213 
214 	len = skb->len + sizeof(*icmph);
215 	err = skb_cow(skb, sizeof(*niph) + sizeof(*icmph) + ETH_HLEN);
216 	if (err)
217 		return err;
218 
219 	icmph = skb_push(skb, sizeof(*icmph));
220 	*icmph = (struct icmphdr) {
221 		.type			= ICMP_DEST_UNREACH,
222 		.code			= ICMP_FRAG_NEEDED,
223 		.checksum		= 0,
224 		.un.frag.__unused	= 0,
225 		.un.frag.mtu		= htons(mtu),
226 	};
227 	icmph->checksum = ip_compute_csum(icmph, len);
228 	skb_reset_transport_header(skb);
229 
230 	niph = skb_push(skb, sizeof(*niph));
231 	*niph = (struct iphdr) {
232 		.ihl			= sizeof(*niph) / 4u,
233 		.version 		= 4,
234 		.tos 			= 0,
235 		.tot_len		= htons(len + sizeof(*niph)),
236 		.id			= 0,
237 		.frag_off		= htons(IP_DF),
238 		.ttl			= iph->ttl,
239 		.protocol		= IPPROTO_ICMP,
240 		.saddr			= iph->daddr,
241 		.daddr			= iph->saddr,
242 	};
243 	ip_send_check(niph);
244 	skb_reset_network_header(skb);
245 
246 	skb->ip_summed = CHECKSUM_NONE;
247 
248 	eth_header(skb, skb->dev, ntohs(eh.h_proto), eh.h_source, eh.h_dest, 0);
249 	skb_reset_mac_header(skb);
250 
251 	return skb->len;
252 }
253 
254 /**
255  * iptunnel_pmtud_check_icmp() - Trigger ICMP reply if needed and allowed
256  * @skb:	Buffer being sent by encapsulation, L2 headers expected
257  * @mtu:	Network MTU for path
258  *
259  * Return: 0 for no ICMP reply, length if built, negative value on error.
260  */
261 static int iptunnel_pmtud_check_icmp(struct sk_buff *skb, int mtu)
262 {
263 	const struct icmphdr *icmph = icmp_hdr(skb);
264 	const struct iphdr *iph = ip_hdr(skb);
265 
266 	if (mtu < 576 || iph->frag_off != htons(IP_DF))
267 		return 0;
268 
269 	if (ipv4_is_lbcast(iph->daddr)  || ipv4_is_multicast(iph->daddr) ||
270 	    ipv4_is_zeronet(iph->saddr) || ipv4_is_loopback(iph->saddr)  ||
271 	    ipv4_is_lbcast(iph->saddr)  || ipv4_is_multicast(iph->saddr))
272 		return 0;
273 
274 	if (iph->protocol == IPPROTO_ICMP && icmp_is_err(icmph->type))
275 		return 0;
276 
277 	return iptunnel_pmtud_build_icmp(skb, mtu);
278 }
279 
280 #if IS_ENABLED(CONFIG_IPV6)
281 /**
282  * iptunnel_pmtud_build_icmpv6() - Build ICMPv6 error message for PMTUD
283  * @skb:	Original packet with L2 header
284  * @mtu:	MTU value for ICMPv6 error
285  *
286  * Return: length on success, negative error code if message couldn't be built.
287  */
288 static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
289 {
290 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
291 	struct icmp6hdr *icmp6h;
292 	struct ipv6hdr *nip6h;
293 	struct ethhdr eh;
294 	int len, err;
295 	__wsum csum;
296 
297 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
298 		return -EINVAL;
299 
300 	skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
301 	pskb_pull(skb, ETH_HLEN);
302 	skb_reset_network_header(skb);
303 
304 	err = pskb_trim(skb, IPV6_MIN_MTU - sizeof(*nip6h) - sizeof(*icmp6h));
305 	if (err)
306 		return err;
307 
308 	len = skb->len + sizeof(*icmp6h);
309 	err = skb_cow(skb, sizeof(*nip6h) + sizeof(*icmp6h) + ETH_HLEN);
310 	if (err)
311 		return err;
312 
313 	icmp6h = skb_push(skb, sizeof(*icmp6h));
314 	*icmp6h = (struct icmp6hdr) {
315 		.icmp6_type		= ICMPV6_PKT_TOOBIG,
316 		.icmp6_code		= 0,
317 		.icmp6_cksum		= 0,
318 		.icmp6_mtu		= htonl(mtu),
319 	};
320 	skb_reset_transport_header(skb);
321 
322 	nip6h = skb_push(skb, sizeof(*nip6h));
323 	*nip6h = (struct ipv6hdr) {
324 		.priority		= 0,
325 		.version		= 6,
326 		.flow_lbl		= { 0 },
327 		.payload_len		= htons(len),
328 		.nexthdr		= IPPROTO_ICMPV6,
329 		.hop_limit		= ip6h->hop_limit,
330 		.saddr			= ip6h->daddr,
331 		.daddr			= ip6h->saddr,
332 	};
333 	skb_reset_network_header(skb);
334 
335 	csum = csum_partial(icmp6h, len, 0);
336 	icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len,
337 					      IPPROTO_ICMPV6, csum);
338 
339 	skb->ip_summed = CHECKSUM_NONE;
340 
341 	eth_header(skb, skb->dev, ntohs(eh.h_proto), eh.h_source, eh.h_dest, 0);
342 	skb_reset_mac_header(skb);
343 
344 	return skb->len;
345 }
346 
347 /**
348  * iptunnel_pmtud_check_icmpv6() - Trigger ICMPv6 reply if needed and allowed
349  * @skb:	Buffer being sent by encapsulation, L2 headers expected
350  * @mtu:	Network MTU for path
351  *
352  * Return: 0 for no ICMPv6 reply, length if built, negative value on error.
353  */
354 static int iptunnel_pmtud_check_icmpv6(struct sk_buff *skb, int mtu)
355 {
356 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
357 	int stype = ipv6_addr_type(&ip6h->saddr);
358 	u8 proto = ip6h->nexthdr;
359 	__be16 frag_off;
360 	int offset;
361 
362 	if (mtu < IPV6_MIN_MTU)
363 		return 0;
364 
365 	if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST ||
366 	    stype == IPV6_ADDR_LOOPBACK)
367 		return 0;
368 
369 	offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto,
370 				  &frag_off);
371 	if (offset < 0 || (frag_off & htons(~0x7)))
372 		return 0;
373 
374 	if (proto == IPPROTO_ICMPV6) {
375 		struct icmp6hdr *icmp6h;
376 
377 		if (!pskb_may_pull(skb, skb_network_header(skb) +
378 					offset + 1 - skb->data))
379 			return 0;
380 
381 		icmp6h = (struct icmp6hdr *)(skb_network_header(skb) + offset);
382 		if (icmpv6_is_err(icmp6h->icmp6_type) ||
383 		    icmp6h->icmp6_type == NDISC_REDIRECT)
384 			return 0;
385 	}
386 
387 	return iptunnel_pmtud_build_icmpv6(skb, mtu);
388 }
389 #endif /* IS_ENABLED(CONFIG_IPV6) */
390 
391 /**
392  * skb_tunnel_check_pmtu() - Check, update PMTU and trigger ICMP reply as needed
393  * @skb:	Buffer being sent by encapsulation, L2 headers expected
394  * @encap_dst:	Destination for tunnel encapsulation (outer IP)
395  * @headroom:	Encapsulation header size, bytes
396  * @reply:	Build matching ICMP or ICMPv6 message as a result
397  *
398  * L2 tunnel implementations that can carry IP and can be directly bridged
399  * (currently UDP tunnels) can't always rely on IP forwarding paths to handle
400  * PMTU discovery. In the bridged case, ICMP or ICMPv6 messages need to be built
401  * based on payload and sent back by the encapsulation itself.
402  *
403  * For routable interfaces, we just need to update the PMTU for the destination.
404  *
405  * Return: 0 if ICMP error not needed, length if built, negative value on error
406  */
407 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
408 			  int headroom, bool reply)
409 {
410 	u32 mtu = dst_mtu(encap_dst) - headroom;
411 
412 	if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) ||
413 	    (!skb_is_gso(skb) && (skb->len - skb_network_offset(skb)) <= mtu))
414 		return 0;
415 
416 	skb_dst_update_pmtu_no_confirm(skb, mtu);
417 
418 	if (!reply || skb->pkt_type == PACKET_HOST)
419 		return 0;
420 
421 	if (skb->protocol == htons(ETH_P_IP))
422 		return iptunnel_pmtud_check_icmp(skb, mtu);
423 
424 #if IS_ENABLED(CONFIG_IPV6)
425 	if (skb->protocol == htons(ETH_P_IPV6))
426 		return iptunnel_pmtud_check_icmpv6(skb, mtu);
427 #endif
428 	return 0;
429 }
430 EXPORT_SYMBOL(skb_tunnel_check_pmtu);
431 
432 static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
433 	[LWTUNNEL_IP_UNSPEC]	= { .strict_start_type = LWTUNNEL_IP_OPTS },
434 	[LWTUNNEL_IP_ID]	= { .type = NLA_U64 },
435 	[LWTUNNEL_IP_DST]	= { .type = NLA_U32 },
436 	[LWTUNNEL_IP_SRC]	= { .type = NLA_U32 },
437 	[LWTUNNEL_IP_TTL]	= { .type = NLA_U8 },
438 	[LWTUNNEL_IP_TOS]	= { .type = NLA_U8 },
439 	[LWTUNNEL_IP_FLAGS]	= { .type = NLA_U16 },
440 	[LWTUNNEL_IP_OPTS]	= { .type = NLA_NESTED },
441 };
442 
443 static const struct nla_policy ip_opts_policy[LWTUNNEL_IP_OPTS_MAX + 1] = {
444 	[LWTUNNEL_IP_OPTS_GENEVE]	= { .type = NLA_NESTED },
445 	[LWTUNNEL_IP_OPTS_VXLAN]	= { .type = NLA_NESTED },
446 	[LWTUNNEL_IP_OPTS_ERSPAN]	= { .type = NLA_NESTED },
447 };
448 
449 static const struct nla_policy
450 geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
451 	[LWTUNNEL_IP_OPT_GENEVE_CLASS]	= { .type = NLA_U16 },
452 	[LWTUNNEL_IP_OPT_GENEVE_TYPE]	= { .type = NLA_U8 },
453 	[LWTUNNEL_IP_OPT_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
454 };
455 
456 static const struct nla_policy
457 vxlan_opt_policy[LWTUNNEL_IP_OPT_VXLAN_MAX + 1] = {
458 	[LWTUNNEL_IP_OPT_VXLAN_GBP]	= { .type = NLA_U32 },
459 };
460 
461 static const struct nla_policy
462 erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = {
463 	[LWTUNNEL_IP_OPT_ERSPAN_VER]	= { .type = NLA_U8 },
464 	[LWTUNNEL_IP_OPT_ERSPAN_INDEX]	= { .type = NLA_U32 },
465 	[LWTUNNEL_IP_OPT_ERSPAN_DIR]	= { .type = NLA_U8 },
466 	[LWTUNNEL_IP_OPT_ERSPAN_HWID]	= { .type = NLA_U8 },
467 };
468 
469 static int ip_tun_parse_opts_geneve(struct nlattr *attr,
470 				    struct ip_tunnel_info *info, int opts_len,
471 				    struct netlink_ext_ack *extack)
472 {
473 	struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1];
474 	int data_len, err;
475 
476 	err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_GENEVE_MAX, attr,
477 			       geneve_opt_policy, extack);
478 	if (err)
479 		return err;
480 
481 	if (!tb[LWTUNNEL_IP_OPT_GENEVE_CLASS] ||
482 	    !tb[LWTUNNEL_IP_OPT_GENEVE_TYPE] ||
483 	    !tb[LWTUNNEL_IP_OPT_GENEVE_DATA])
484 		return -EINVAL;
485 
486 	attr = tb[LWTUNNEL_IP_OPT_GENEVE_DATA];
487 	data_len = nla_len(attr);
488 	if (data_len % 4)
489 		return -EINVAL;
490 
491 	if (info) {
492 		struct geneve_opt *opt = ip_tunnel_info_opts(info) + opts_len;
493 
494 		memcpy(opt->opt_data, nla_data(attr), data_len);
495 		opt->length = data_len / 4;
496 		attr = tb[LWTUNNEL_IP_OPT_GENEVE_CLASS];
497 		opt->opt_class = nla_get_be16(attr);
498 		attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
499 		opt->type = nla_get_u8(attr);
500 		info->key.tun_flags |= TUNNEL_GENEVE_OPT;
501 	}
502 
503 	return sizeof(struct geneve_opt) + data_len;
504 }
505 
506 static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
507 				   struct ip_tunnel_info *info, int opts_len,
508 				   struct netlink_ext_ack *extack)
509 {
510 	struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1];
511 	int err;
512 
513 	err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_VXLAN_MAX, attr,
514 			       vxlan_opt_policy, extack);
515 	if (err)
516 		return err;
517 
518 	if (!tb[LWTUNNEL_IP_OPT_VXLAN_GBP])
519 		return -EINVAL;
520 
521 	if (info) {
522 		struct vxlan_metadata *md =
523 			ip_tunnel_info_opts(info) + opts_len;
524 
525 		attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
526 		md->gbp = nla_get_u32(attr);
527 		md->gbp &= VXLAN_GBP_MASK;
528 		info->key.tun_flags |= TUNNEL_VXLAN_OPT;
529 	}
530 
531 	return sizeof(struct vxlan_metadata);
532 }
533 
534 static int ip_tun_parse_opts_erspan(struct nlattr *attr,
535 				    struct ip_tunnel_info *info, int opts_len,
536 				    struct netlink_ext_ack *extack)
537 {
538 	struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1];
539 	int err;
540 	u8 ver;
541 
542 	err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr,
543 			       erspan_opt_policy, extack);
544 	if (err)
545 		return err;
546 
547 	if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER])
548 		return -EINVAL;
549 
550 	ver = nla_get_u8(tb[LWTUNNEL_IP_OPT_ERSPAN_VER]);
551 	if (ver == 1) {
552 		if (!tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX])
553 			return -EINVAL;
554 	} else if (ver == 2) {
555 		if (!tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] ||
556 		    !tb[LWTUNNEL_IP_OPT_ERSPAN_HWID])
557 			return -EINVAL;
558 	} else {
559 		return -EINVAL;
560 	}
561 
562 	if (info) {
563 		struct erspan_metadata *md =
564 			ip_tunnel_info_opts(info) + opts_len;
565 
566 		md->version = ver;
567 		if (ver == 1) {
568 			attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX];
569 			md->u.index = nla_get_be32(attr);
570 		} else {
571 			attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR];
572 			md->u.md2.dir = nla_get_u8(attr);
573 			attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID];
574 			set_hwid(&md->u.md2, nla_get_u8(attr));
575 		}
576 
577 		info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
578 	}
579 
580 	return sizeof(struct erspan_metadata);
581 }
582 
583 static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
584 			     struct netlink_ext_ack *extack)
585 {
586 	int err, rem, opt_len, opts_len = 0;
587 	struct nlattr *nla;
588 	__be16 type = 0;
589 
590 	if (!attr)
591 		return 0;
592 
593 	err = nla_validate(nla_data(attr), nla_len(attr), LWTUNNEL_IP_OPTS_MAX,
594 			   ip_opts_policy, extack);
595 	if (err)
596 		return err;
597 
598 	nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
599 		switch (nla_type(nla)) {
600 		case LWTUNNEL_IP_OPTS_GENEVE:
601 			if (type && type != TUNNEL_GENEVE_OPT)
602 				return -EINVAL;
603 			opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len,
604 							   extack);
605 			if (opt_len < 0)
606 				return opt_len;
607 			opts_len += opt_len;
608 			if (opts_len > IP_TUNNEL_OPTS_MAX)
609 				return -EINVAL;
610 			type = TUNNEL_GENEVE_OPT;
611 			break;
612 		case LWTUNNEL_IP_OPTS_VXLAN:
613 			if (type)
614 				return -EINVAL;
615 			opt_len = ip_tun_parse_opts_vxlan(nla, info, opts_len,
616 							  extack);
617 			if (opt_len < 0)
618 				return opt_len;
619 			opts_len += opt_len;
620 			type = TUNNEL_VXLAN_OPT;
621 			break;
622 		case LWTUNNEL_IP_OPTS_ERSPAN:
623 			if (type)
624 				return -EINVAL;
625 			opt_len = ip_tun_parse_opts_erspan(nla, info, opts_len,
626 							   extack);
627 			if (opt_len < 0)
628 				return opt_len;
629 			opts_len += opt_len;
630 			type = TUNNEL_ERSPAN_OPT;
631 			break;
632 		default:
633 			return -EINVAL;
634 		}
635 	}
636 
637 	return opts_len;
638 }
639 
640 static int ip_tun_get_optlen(struct nlattr *attr,
641 			     struct netlink_ext_ack *extack)
642 {
643 	return ip_tun_parse_opts(attr, NULL, extack);
644 }
645 
646 static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info,
647 			   struct netlink_ext_ack *extack)
648 {
649 	return ip_tun_parse_opts(attr, info, extack);
650 }
651 
652 static int ip_tun_build_state(struct net *net, struct nlattr *attr,
653 			      unsigned int family, const void *cfg,
654 			      struct lwtunnel_state **ts,
655 			      struct netlink_ext_ack *extack)
656 {
657 	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
658 	struct lwtunnel_state *new_state;
659 	struct ip_tunnel_info *tun_info;
660 	int err, opt_len;
661 
662 	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
663 					  ip_tun_policy, extack);
664 	if (err < 0)
665 		return err;
666 
667 	opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP_OPTS], extack);
668 	if (opt_len < 0)
669 		return opt_len;
670 
671 	new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
672 	if (!new_state)
673 		return -ENOMEM;
674 
675 	new_state->type = LWTUNNEL_ENCAP_IP;
676 
677 	tun_info = lwt_tun_info(new_state);
678 
679 	err = ip_tun_set_opts(tb[LWTUNNEL_IP_OPTS], tun_info, extack);
680 	if (err < 0) {
681 		lwtstate_free(new_state);
682 		return err;
683 	}
684 
685 #ifdef CONFIG_DST_CACHE
686 	err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
687 	if (err) {
688 		lwtstate_free(new_state);
689 		return err;
690 	}
691 #endif
692 
693 	if (tb[LWTUNNEL_IP_ID])
694 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
695 
696 	if (tb[LWTUNNEL_IP_DST])
697 		tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
698 
699 	if (tb[LWTUNNEL_IP_SRC])
700 		tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
701 
702 	if (tb[LWTUNNEL_IP_TTL])
703 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
704 
705 	if (tb[LWTUNNEL_IP_TOS])
706 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
707 
708 	if (tb[LWTUNNEL_IP_FLAGS])
709 		tun_info->key.tun_flags |=
710 				(nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
711 				 ~TUNNEL_OPTIONS_PRESENT);
712 
713 	tun_info->mode = IP_TUNNEL_INFO_TX;
714 	tun_info->options_len = opt_len;
715 
716 	*ts = new_state;
717 
718 	return 0;
719 }
720 
721 static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate)
722 {
723 #ifdef CONFIG_DST_CACHE
724 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
725 
726 	dst_cache_destroy(&tun_info->dst_cache);
727 #endif
728 }
729 
730 static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb,
731 					 struct ip_tunnel_info *tun_info)
732 {
733 	struct geneve_opt *opt;
734 	struct nlattr *nest;
735 	int offset = 0;
736 
737 	nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE);
738 	if (!nest)
739 		return -ENOMEM;
740 
741 	while (tun_info->options_len > offset) {
742 		opt = ip_tunnel_info_opts(tun_info) + offset;
743 		if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS,
744 				 opt->opt_class) ||
745 		    nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) ||
746 		    nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4,
747 			    opt->opt_data)) {
748 			nla_nest_cancel(skb, nest);
749 			return -ENOMEM;
750 		}
751 		offset += sizeof(*opt) + opt->length * 4;
752 	}
753 
754 	nla_nest_end(skb, nest);
755 	return 0;
756 }
757 
758 static int ip_tun_fill_encap_opts_vxlan(struct sk_buff *skb,
759 					struct ip_tunnel_info *tun_info)
760 {
761 	struct vxlan_metadata *md;
762 	struct nlattr *nest;
763 
764 	nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_VXLAN);
765 	if (!nest)
766 		return -ENOMEM;
767 
768 	md = ip_tunnel_info_opts(tun_info);
769 	if (nla_put_u32(skb, LWTUNNEL_IP_OPT_VXLAN_GBP, md->gbp)) {
770 		nla_nest_cancel(skb, nest);
771 		return -ENOMEM;
772 	}
773 
774 	nla_nest_end(skb, nest);
775 	return 0;
776 }
777 
778 static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb,
779 					 struct ip_tunnel_info *tun_info)
780 {
781 	struct erspan_metadata *md;
782 	struct nlattr *nest;
783 
784 	nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_ERSPAN);
785 	if (!nest)
786 		return -ENOMEM;
787 
788 	md = ip_tunnel_info_opts(tun_info);
789 	if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version))
790 		goto err;
791 
792 	if (md->version == 1 &&
793 	    nla_put_be32(skb, LWTUNNEL_IP_OPT_ERSPAN_INDEX, md->u.index))
794 		goto err;
795 
796 	if (md->version == 2 &&
797 	    (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_DIR, md->u.md2.dir) ||
798 	     nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_HWID,
799 			get_hwid(&md->u.md2))))
800 		goto err;
801 
802 	nla_nest_end(skb, nest);
803 	return 0;
804 err:
805 	nla_nest_cancel(skb, nest);
806 	return -ENOMEM;
807 }
808 
809 static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
810 				  struct ip_tunnel_info *tun_info)
811 {
812 	struct nlattr *nest;
813 	int err = 0;
814 
815 	if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
816 		return 0;
817 
818 	nest = nla_nest_start_noflag(skb, type);
819 	if (!nest)
820 		return -ENOMEM;
821 
822 	if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
823 		err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
824 	else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
825 		err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
826 	else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
827 		err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
828 
829 	if (err) {
830 		nla_nest_cancel(skb, nest);
831 		return err;
832 	}
833 
834 	nla_nest_end(skb, nest);
835 	return 0;
836 }
837 
838 static int ip_tun_fill_encap_info(struct sk_buff *skb,
839 				  struct lwtunnel_state *lwtstate)
840 {
841 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
842 
843 	if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
844 			 LWTUNNEL_IP_PAD) ||
845 	    nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
846 	    nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
847 	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
848 	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
849 	    nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
850 	    ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
851 		return -ENOMEM;
852 
853 	return 0;
854 }
855 
856 static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
857 {
858 	int opt_len;
859 
860 	if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
861 		return 0;
862 
863 	opt_len = nla_total_size(0);		/* LWTUNNEL_IP_OPTS */
864 	if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
865 		struct geneve_opt *opt;
866 		int offset = 0;
867 
868 		opt_len += nla_total_size(0);	/* LWTUNNEL_IP_OPTS_GENEVE */
869 		while (info->options_len > offset) {
870 			opt = ip_tunnel_info_opts(info) + offset;
871 			opt_len += nla_total_size(2)	/* OPT_GENEVE_CLASS */
872 				   + nla_total_size(1)	/* OPT_GENEVE_TYPE */
873 				   + nla_total_size(opt->length * 4);
874 							/* OPT_GENEVE_DATA */
875 			offset += sizeof(*opt) + opt->length * 4;
876 		}
877 	} else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
878 		opt_len += nla_total_size(0)	/* LWTUNNEL_IP_OPTS_VXLAN */
879 			   + nla_total_size(4);	/* OPT_VXLAN_GBP */
880 	} else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
881 		struct erspan_metadata *md = ip_tunnel_info_opts(info);
882 
883 		opt_len += nla_total_size(0)	/* LWTUNNEL_IP_OPTS_ERSPAN */
884 			   + nla_total_size(1)	/* OPT_ERSPAN_VER */
885 			   + (md->version == 1 ? nla_total_size(4)
886 						/* OPT_ERSPAN_INDEX (v1) */
887 					       : nla_total_size(1) +
888 						 nla_total_size(1));
889 						/* OPT_ERSPAN_DIR + HWID (v2) */
890 	}
891 
892 	return opt_len;
893 }
894 
895 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
896 {
897 	return nla_total_size_64bit(8)	/* LWTUNNEL_IP_ID */
898 		+ nla_total_size(4)	/* LWTUNNEL_IP_DST */
899 		+ nla_total_size(4)	/* LWTUNNEL_IP_SRC */
900 		+ nla_total_size(1)	/* LWTUNNEL_IP_TOS */
901 		+ nla_total_size(1)	/* LWTUNNEL_IP_TTL */
902 		+ nla_total_size(2)	/* LWTUNNEL_IP_FLAGS */
903 		+ ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
904 					/* LWTUNNEL_IP_OPTS */
905 }
906 
907 static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
908 {
909 	struct ip_tunnel_info *info_a = lwt_tun_info(a);
910 	struct ip_tunnel_info *info_b = lwt_tun_info(b);
911 
912 	return memcmp(info_a, info_b, sizeof(info_a->key)) ||
913 	       info_a->mode != info_b->mode ||
914 	       info_a->options_len != info_b->options_len ||
915 	       memcmp(ip_tunnel_info_opts(info_a),
916 		      ip_tunnel_info_opts(info_b), info_a->options_len);
917 }
918 
919 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
920 	.build_state = ip_tun_build_state,
921 	.destroy_state = ip_tun_destroy_state,
922 	.fill_encap = ip_tun_fill_encap_info,
923 	.get_encap_size = ip_tun_encap_nlsize,
924 	.cmp_encap = ip_tun_cmp_encap,
925 	.owner = THIS_MODULE,
926 };
927 
928 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
929 	[LWTUNNEL_IP6_UNSPEC]	= { .strict_start_type = LWTUNNEL_IP6_OPTS },
930 	[LWTUNNEL_IP6_ID]		= { .type = NLA_U64 },
931 	[LWTUNNEL_IP6_DST]		= { .len = sizeof(struct in6_addr) },
932 	[LWTUNNEL_IP6_SRC]		= { .len = sizeof(struct in6_addr) },
933 	[LWTUNNEL_IP6_HOPLIMIT]		= { .type = NLA_U8 },
934 	[LWTUNNEL_IP6_TC]		= { .type = NLA_U8 },
935 	[LWTUNNEL_IP6_FLAGS]		= { .type = NLA_U16 },
936 	[LWTUNNEL_IP6_OPTS]		= { .type = NLA_NESTED },
937 };
938 
939 static int ip6_tun_build_state(struct net *net, struct nlattr *attr,
940 			       unsigned int family, const void *cfg,
941 			       struct lwtunnel_state **ts,
942 			       struct netlink_ext_ack *extack)
943 {
944 	struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
945 	struct lwtunnel_state *new_state;
946 	struct ip_tunnel_info *tun_info;
947 	int err, opt_len;
948 
949 	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
950 					  ip6_tun_policy, extack);
951 	if (err < 0)
952 		return err;
953 
954 	opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP6_OPTS], extack);
955 	if (opt_len < 0)
956 		return opt_len;
957 
958 	new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
959 	if (!new_state)
960 		return -ENOMEM;
961 
962 	new_state->type = LWTUNNEL_ENCAP_IP6;
963 
964 	tun_info = lwt_tun_info(new_state);
965 
966 	err = ip_tun_set_opts(tb[LWTUNNEL_IP6_OPTS], tun_info, extack);
967 	if (err < 0) {
968 		lwtstate_free(new_state);
969 		return err;
970 	}
971 
972 	if (tb[LWTUNNEL_IP6_ID])
973 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
974 
975 	if (tb[LWTUNNEL_IP6_DST])
976 		tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
977 
978 	if (tb[LWTUNNEL_IP6_SRC])
979 		tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
980 
981 	if (tb[LWTUNNEL_IP6_HOPLIMIT])
982 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
983 
984 	if (tb[LWTUNNEL_IP6_TC])
985 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
986 
987 	if (tb[LWTUNNEL_IP6_FLAGS])
988 		tun_info->key.tun_flags |=
989 				(nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
990 				 ~TUNNEL_OPTIONS_PRESENT);
991 
992 	tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
993 	tun_info->options_len = opt_len;
994 
995 	*ts = new_state;
996 
997 	return 0;
998 }
999 
1000 static int ip6_tun_fill_encap_info(struct sk_buff *skb,
1001 				   struct lwtunnel_state *lwtstate)
1002 {
1003 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
1004 
1005 	if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
1006 			 LWTUNNEL_IP6_PAD) ||
1007 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
1008 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
1009 	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
1010 	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
1011 	    nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
1012 	    ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
1013 		return -ENOMEM;
1014 
1015 	return 0;
1016 }
1017 
1018 static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
1019 {
1020 	return nla_total_size_64bit(8)	/* LWTUNNEL_IP6_ID */
1021 		+ nla_total_size(16)	/* LWTUNNEL_IP6_DST */
1022 		+ nla_total_size(16)	/* LWTUNNEL_IP6_SRC */
1023 		+ nla_total_size(1)	/* LWTUNNEL_IP6_HOPLIMIT */
1024 		+ nla_total_size(1)	/* LWTUNNEL_IP6_TC */
1025 		+ nla_total_size(2)	/* LWTUNNEL_IP6_FLAGS */
1026 		+ ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
1027 					/* LWTUNNEL_IP6_OPTS */
1028 }
1029 
1030 static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
1031 	.build_state = ip6_tun_build_state,
1032 	.fill_encap = ip6_tun_fill_encap_info,
1033 	.get_encap_size = ip6_tun_encap_nlsize,
1034 	.cmp_encap = ip_tun_cmp_encap,
1035 	.owner = THIS_MODULE,
1036 };
1037 
1038 void __init ip_tunnel_core_init(void)
1039 {
1040 	/* If you land here, make sure whether increasing ip_tunnel_info's
1041 	 * options_len is a reasonable choice with its usage in front ends
1042 	 * (f.e., it's part of flow keys, etc).
1043 	 */
1044 	BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255);
1045 
1046 	lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
1047 	lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
1048 }
1049 
1050 DEFINE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
1051 EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
1052 
1053 void ip_tunnel_need_metadata(void)
1054 {
1055 	static_branch_inc(&ip_tunnel_metadata_cnt);
1056 }
1057 EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
1058 
1059 void ip_tunnel_unneed_metadata(void)
1060 {
1061 	static_branch_dec(&ip_tunnel_metadata_cnt);
1062 }
1063 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
1064 
1065 /* Returns either the correct skb->protocol value, or 0 if invalid. */
1066 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb)
1067 {
1068 	if (skb_network_header(skb) >= skb->head &&
1069 	    (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) &&
1070 	    ip_hdr(skb)->version == 4)
1071 		return htons(ETH_P_IP);
1072 	if (skb_network_header(skb) >= skb->head &&
1073 	    (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) &&
1074 	    ipv6_hdr(skb)->version == 6)
1075 		return htons(ETH_P_IPV6);
1076 	return 0;
1077 }
1078 EXPORT_SYMBOL(ip_tunnel_parse_protocol);
1079 
1080 const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
1081 EXPORT_SYMBOL(ip_tunnel_header_ops);
1082 
1083 /* This function returns true when ENCAP attributes are present in the nl msg */
1084 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
1085 				   struct ip_tunnel_encap *encap)
1086 {
1087 	bool ret = false;
1088 
1089 	memset(encap, 0, sizeof(*encap));
1090 
1091 	if (!data)
1092 		return ret;
1093 
1094 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1095 		ret = true;
1096 		encap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1097 	}
1098 
1099 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1100 		ret = true;
1101 		encap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1102 	}
1103 
1104 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1105 		ret = true;
1106 		encap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1107 	}
1108 
1109 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1110 		ret = true;
1111 		encap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1112 	}
1113 
1114 	return ret;
1115 }
1116 EXPORT_SYMBOL_GPL(ip_tunnel_netlink_encap_parms);
1117 
1118 void ip_tunnel_netlink_parms(struct nlattr *data[],
1119 			     struct ip_tunnel_parm *parms)
1120 {
1121 	if (data[IFLA_IPTUN_LINK])
1122 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1123 
1124 	if (data[IFLA_IPTUN_LOCAL])
1125 		parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
1126 
1127 	if (data[IFLA_IPTUN_REMOTE])
1128 		parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
1129 
1130 	if (data[IFLA_IPTUN_TTL]) {
1131 		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
1132 		if (parms->iph.ttl)
1133 			parms->iph.frag_off = htons(IP_DF);
1134 	}
1135 
1136 	if (data[IFLA_IPTUN_TOS])
1137 		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
1138 
1139 	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
1140 		parms->iph.frag_off = htons(IP_DF);
1141 
1142 	if (data[IFLA_IPTUN_FLAGS])
1143 		parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
1144 
1145 	if (data[IFLA_IPTUN_PROTO])
1146 		parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1147 }
1148 EXPORT_SYMBOL_GPL(ip_tunnel_netlink_parms);
1149