xref: /linux/include/net/ip_tunnels.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_IP_TUNNELS_H
3 #define __NET_IP_TUNNELS_H 1
4 
5 #include <linux/if_tunnel.h>
6 #include <linux/netdevice.h>
7 #include <linux/skbuff.h>
8 #include <linux/socket.h>
9 #include <linux/types.h>
10 #include <linux/u64_stats_sync.h>
11 #include <linux/bitops.h>
12 
13 #include <net/dsfield.h>
14 #include <net/flow.h>
15 #include <net/gro_cells.h>
16 #include <net/inet_dscp.h>
17 #include <net/inet_ecn.h>
18 #include <net/netns/generic.h>
19 #include <net/rtnetlink.h>
20 #include <net/lwtunnel.h>
21 #include <net/dst_cache.h>
22 
23 #if IS_ENABLED(CONFIG_IPV6)
24 #include <net/ipv6.h>
25 #include <net/ip6_fib.h>
26 #include <net/ip6_route.h>
27 #endif
28 
29 /* Keep error state on tunnel for 30 sec */
30 #define IPTUNNEL_ERR_TIMEO	(30*HZ)
31 
32 /* Used to memset ip_tunnel padding. */
33 #define IP_TUNNEL_KEY_SIZE	offsetofend(struct ip_tunnel_key, tp_dst)
34 
35 /* Used to memset ipv4 address padding. */
36 #define IP_TUNNEL_KEY_IPV4_PAD	offsetofend(struct ip_tunnel_key, u.ipv4.dst)
37 #define IP_TUNNEL_KEY_IPV4_PAD_LEN				\
38 	(sizeof_field(struct ip_tunnel_key, u) -		\
39 	 sizeof_field(struct ip_tunnel_key, u.ipv4))
40 
41 #define __ipt_flag_op(op, ...)					\
42 	op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM)
43 
44 #define IP_TUNNEL_DECLARE_FLAGS(...)				\
45 	__ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__)
46 
47 #define ip_tunnel_flags_zero(...)	__ipt_flag_op(bitmap_zero, __VA_ARGS__)
48 #define ip_tunnel_flags_copy(...)	__ipt_flag_op(bitmap_copy, __VA_ARGS__)
49 #define ip_tunnel_flags_and(...)	__ipt_flag_op(bitmap_and, __VA_ARGS__)
50 #define ip_tunnel_flags_or(...)		__ipt_flag_op(bitmap_or, __VA_ARGS__)
51 
52 #define ip_tunnel_flags_empty(...)				\
53 	__ipt_flag_op(bitmap_empty, __VA_ARGS__)
54 #define ip_tunnel_flags_intersect(...)				\
55 	__ipt_flag_op(bitmap_intersects, __VA_ARGS__)
56 #define ip_tunnel_flags_subset(...)				\
57 	__ipt_flag_op(bitmap_subset, __VA_ARGS__)
58 
59 struct ip_tunnel_key {
60 	__be64			tun_id;
61 	union {
62 		struct {
63 			__be32	src;
64 			__be32	dst;
65 		} ipv4;
66 		struct {
67 			struct in6_addr src;
68 			struct in6_addr dst;
69 		} ipv6;
70 	} u;
71 	IP_TUNNEL_DECLARE_FLAGS(tun_flags);
72 	__be32			label;		/* Flow Label for IPv6 */
73 	u32			nhid;
74 	u8			tos;		/* TOS for IPv4, TC for IPv6 */
75 	u8			ttl;		/* TTL for IPv4, HL for IPv6 */
76 	__be16			tp_src;
77 	__be16			tp_dst;
78 	__u8			flow_flags;
79 };
80 
81 struct ip_tunnel_encap {
82 	u16			type;
83 	u16			flags;
84 	__be16			sport;
85 	__be16			dport;
86 };
87 
88 /* Flags for ip_tunnel_info mode. */
89 #define IP_TUNNEL_INFO_TX	0x01	/* represents tx tunnel parameters */
90 #define IP_TUNNEL_INFO_IPV6	0x02	/* key contains IPv6 addresses */
91 #define IP_TUNNEL_INFO_BRIDGE	0x04	/* represents a bridged tunnel id */
92 
93 /* Maximum tunnel options length. */
94 #define IP_TUNNEL_OPTS_MAX					\
95 	GENMASK((sizeof_field(struct ip_tunnel_info,		\
96 			      options_len) * BITS_PER_BYTE) - 1, 0)
97 
98 #define ip_tunnel_info_opts(info)				\
99 	_Generic(info,						\
100 		 const struct ip_tunnel_info * : ((const void *)(info)->options),\
101 		 struct ip_tunnel_info * : ((void *)(info)->options)\
102 	)
103 
104 struct ip_tunnel_info {
105 	struct ip_tunnel_key	key;
106 	struct ip_tunnel_encap	encap;
107 #ifdef CONFIG_DST_CACHE
108 	struct dst_cache	dst_cache;
109 #endif
110 	u8			options_len;
111 	u8			mode;
112 	u8			options[] __aligned_largest __counted_by(options_len);
113 };
114 
115 /* 6rd prefix/relay information */
116 #ifdef CONFIG_IPV6_SIT_6RD
117 struct ip_tunnel_6rd_parm {
118 	struct in6_addr		prefix;
119 	__be32			relay_prefix;
120 	u16			prefixlen;
121 	u16			relay_prefixlen;
122 };
123 #endif
124 
125 struct ip_tunnel_prl_entry {
126 	struct ip_tunnel_prl_entry __rcu *next;
127 	__be32				addr;
128 	u16				flags;
129 	struct rcu_head			rcu_head;
130 };
131 
132 struct metadata_dst;
133 
134 /* Kernel-side variant of ip_tunnel_parm */
135 struct ip_tunnel_parm_kern {
136 	char			name[IFNAMSIZ];
137 	IP_TUNNEL_DECLARE_FLAGS(i_flags);
138 	IP_TUNNEL_DECLARE_FLAGS(o_flags);
139 	__be32			i_key;
140 	__be32			o_key;
141 	int			link;
142 	struct iphdr		iph;
143 };
144 
145 struct ip_tunnel {
146 	struct ip_tunnel __rcu	*next;
147 	struct hlist_node hash_node;
148 
149 	struct net_device	*dev;
150 	netdevice_tracker	dev_tracker;
151 
152 	struct net		*net;	/* netns for packet i/o */
153 
154 	unsigned long	err_time;	/* Time when the last ICMP error
155 					 * arrived */
156 	int		err_count;	/* Number of arrived ICMP errors */
157 
158 	/* These four fields used only by GRE */
159 	u32		i_seqno;	/* The last seen seqno	*/
160 	atomic_t	o_seqno;	/* The last output seqno */
161 	int		tun_hlen;	/* Precalculated header length */
162 
163 	/* These four fields used only by ERSPAN */
164 	u32		index;		/* ERSPAN type II index */
165 	u8		erspan_ver;	/* ERSPAN version */
166 	u8		dir;		/* ERSPAN direction */
167 	u16		hwid;		/* ERSPAN hardware ID */
168 
169 	struct dst_cache dst_cache;
170 
171 	struct ip_tunnel_parm_kern parms;
172 
173 	int		mlink;
174 	int		encap_hlen;	/* Encap header length (FOU,GUE) */
175 	int		hlen;		/* tun_hlen + encap_hlen */
176 	struct ip_tunnel_encap encap;
177 
178 	/* for SIT */
179 #ifdef CONFIG_IPV6_SIT_6RD
180 	struct ip_tunnel_6rd_parm ip6rd;
181 #endif
182 	struct ip_tunnel_prl_entry __rcu *prl;	/* potential router list */
183 	unsigned int		prl_count;	/* # of entries in PRL */
184 	unsigned int		ip_tnl_net_id;
185 	struct gro_cells	gro_cells;
186 	__u32			fwmark;
187 	bool			collect_md;
188 	bool			ignore_df;
189 };
190 
191 struct tnl_ptk_info {
192 	IP_TUNNEL_DECLARE_FLAGS(flags);
193 	__be16 proto;
194 	__be32 key;
195 	__be32 seq;
196 	int hdr_len;
197 };
198 
199 #define PACKET_RCVD	0
200 #define PACKET_REJECT	1
201 #define PACKET_NEXT	2
202 
203 #define IP_TNL_HASH_BITS   7
204 #define IP_TNL_HASH_SIZE   (1 << IP_TNL_HASH_BITS)
205 
206 struct ip_tunnel_net {
207 	struct net_device *fb_tunnel_dev;
208 	struct rtnl_link_ops *rtnl_link_ops;
209 	struct hlist_head tunnels[IP_TNL_HASH_SIZE];
210 	struct ip_tunnel __rcu *collect_md_tun;
211 	int type;
212 };
213 
214 static inline void ip_tunnel_set_options_present(unsigned long *flags)
215 {
216 	IP_TUNNEL_DECLARE_FLAGS(present) = { };
217 
218 	__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
219 	__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
220 	__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
221 	__set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
222 	__set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
223 
224 	ip_tunnel_flags_or(flags, flags, present);
225 }
226 
227 static inline void ip_tunnel_clear_options_present(unsigned long *flags)
228 {
229 	IP_TUNNEL_DECLARE_FLAGS(present) = { };
230 
231 	__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
232 	__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
233 	__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
234 	__set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
235 	__set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
236 
237 	__ipt_flag_op(bitmap_andnot, flags, flags, present);
238 }
239 
240 static inline bool ip_tunnel_is_options_present(const unsigned long *flags)
241 {
242 	IP_TUNNEL_DECLARE_FLAGS(present) = { };
243 
244 	__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
245 	__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
246 	__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
247 	__set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
248 	__set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
249 
250 	return ip_tunnel_flags_intersect(flags, present);
251 }
252 
253 static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags)
254 {
255 	IP_TUNNEL_DECLARE_FLAGS(supp) = { };
256 
257 	bitmap_set(supp, 0, BITS_PER_TYPE(__be16));
258 	__set_bit(IP_TUNNEL_VTI_BIT, supp);
259 
260 	return ip_tunnel_flags_subset(flags, supp);
261 }
262 
263 static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags)
264 {
265 	ip_tunnel_flags_zero(dst);
266 
267 	bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16));
268 	__assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI);
269 }
270 
271 static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags)
272 {
273 	__be16 ret;
274 
275 	ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16)));
276 	if (test_bit(IP_TUNNEL_VTI_BIT, flags))
277 		ret |= VTI_ISVTI;
278 
279 	return ret;
280 }
281 
282 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
283 				      __be32 saddr, __be32 daddr,
284 				      u8 tos, u8 ttl, __be32 label,
285 				      __be16 tp_src, __be16 tp_dst,
286 				      __be64 tun_id,
287 				      const unsigned long *tun_flags)
288 {
289 	key->tun_id = tun_id;
290 	key->u.ipv4.src = saddr;
291 	key->u.ipv4.dst = daddr;
292 	memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
293 	       0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
294 	key->tos = tos;
295 	key->ttl = ttl;
296 	key->label = label;
297 	ip_tunnel_flags_copy(key->tun_flags, tun_flags);
298 
299 	/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
300 	 * the upper tunnel are used.
301 	 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
302 	 */
303 	key->tp_src = tp_src;
304 	key->tp_dst = tp_dst;
305 
306 	/* Clear struct padding. */
307 	if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
308 		memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
309 		       0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
310 }
311 
312 static inline bool
313 ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
314 			   const struct ip_tunnel_info *info)
315 {
316 	if (skb->mark)
317 		return false;
318 
319 	return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags);
320 }
321 
322 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
323 					       *tun_info)
324 {
325 	return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
326 }
327 
328 static inline __be64 key32_to_tunnel_id(__be32 key)
329 {
330 #ifdef __BIG_ENDIAN
331 	return (__force __be64)key;
332 #else
333 	return (__force __be64)((__force u64)key << 32);
334 #endif
335 }
336 
337 /* Returns the least-significant 32 bits of a __be64. */
338 static inline __be32 tunnel_id_to_key32(__be64 tun_id)
339 {
340 #ifdef __BIG_ENDIAN
341 	return (__force __be32)tun_id;
342 #else
343 	return (__force __be32)((__force u64)tun_id >> 32);
344 #endif
345 }
346 
347 #ifdef CONFIG_INET
348 
349 static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
350 				       int proto,
351 				       __be32 daddr, __be32 saddr,
352 				       __be32 key, __u8 tos,
353 				       struct net *net, int oif,
354 				       __u32 mark, __u32 tun_inner_hash,
355 				       __u8 flow_flags)
356 {
357 	memset(fl4, 0, sizeof(*fl4));
358 
359 	if (oif) {
360 		fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index(net, oif);
361 		/* Legacy VRF/l3mdev use case */
362 		fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
363 	}
364 
365 	fl4->daddr = daddr;
366 	fl4->saddr = saddr;
367 	fl4->flowi4_dscp = inet_dsfield_to_dscp(tos);
368 	fl4->flowi4_proto = proto;
369 	fl4->fl4_gre_key = key;
370 	fl4->flowi4_mark = mark;
371 	fl4->flowi4_multipath_hash = tun_inner_hash;
372 	fl4->flowi4_flags = flow_flags;
373 }
374 
375 int ip_tunnel_init(struct net_device *dev);
376 void ip_tunnel_uninit(struct net_device *dev);
377 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
378 struct net *ip_tunnel_get_link_net(const struct net_device *dev);
379 int ip_tunnel_get_iflink(const struct net_device *dev);
380 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
381 		       struct rtnl_link_ops *ops, char *devname);
382 void ip_tunnel_delete_net(struct net *net, unsigned int id,
383 			  struct rtnl_link_ops *ops,
384 			  struct list_head *dev_to_kill);
385 
386 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
387 		    const struct iphdr *tnl_params, const u8 protocol);
388 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
389 		       const u8 proto, int tunnel_hlen);
390 int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
391 		  int cmd);
392 bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp,
393 			      const void __user *data);
394 bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp);
395 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
396 			     void __user *data, int cmd);
397 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
398 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
399 
400 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
401 				   int link, const unsigned long *flags,
402 				   __be32 remote, __be32 local,
403 				   __be32 key);
404 
405 void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info);
406 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
407 		  const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
408 		  bool log_ecn_error);
409 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
410 			 struct ip_tunnel_parm_kern *p, __u32 fwmark);
411 int ip_tunnel_newlink(struct net *net, struct net_device *dev,
412 		      struct nlattr *tb[], struct ip_tunnel_parm_kern *p,
413 		      __u32 fwmark);
414 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
415 
416 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
417 				   struct ip_tunnel_encap *encap);
418 
419 void ip_tunnel_netlink_parms(struct nlattr *data[],
420 			     struct ip_tunnel_parm_kern *parms);
421 
422 extern const struct header_ops ip_tunnel_header_ops;
423 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
424 
425 struct ip_tunnel_encap_ops {
426 	size_t (*encap_hlen)(struct ip_tunnel_encap *e);
427 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
428 			    u8 *protocol, struct flowi4 *fl4);
429 	int (*err_handler)(struct sk_buff *skb, u32 info);
430 };
431 
432 #define MAX_IPTUN_ENCAP_OPS 8
433 
434 extern const struct ip_tunnel_encap_ops __rcu *
435 		iptun_encaps[MAX_IPTUN_ENCAP_OPS];
436 
437 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
438 			    unsigned int num);
439 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
440 			    unsigned int num);
441 
442 int ip_tunnel_encap_setup(struct ip_tunnel *t,
443 			  struct ip_tunnel_encap *ipencap);
444 
445 static inline enum skb_drop_reason
446 pskb_inet_may_pull_reason(struct sk_buff *skb)
447 {
448 	int nhlen;
449 
450 	switch (skb->protocol) {
451 #if IS_ENABLED(CONFIG_IPV6)
452 	case htons(ETH_P_IPV6):
453 		nhlen = sizeof(struct ipv6hdr);
454 		break;
455 #endif
456 	case htons(ETH_P_IP):
457 		nhlen = sizeof(struct iphdr);
458 		break;
459 	default:
460 		nhlen = 0;
461 	}
462 
463 	return pskb_network_may_pull_reason(skb, nhlen);
464 }
465 
466 static inline bool pskb_inet_may_pull(struct sk_buff *skb)
467 {
468 	return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET;
469 }
470 
471 /* Variant of pskb_inet_may_pull().
472  */
473 static inline enum skb_drop_reason
474 skb_vlan_inet_prepare(struct sk_buff *skb, bool inner_proto_inherit)
475 {
476 	int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
477 	__be16 type = skb->protocol;
478 	enum skb_drop_reason reason;
479 
480 	/* Essentially this is skb_protocol(skb, true)
481 	 * And we get MAC len.
482 	 */
483 	if (eth_type_vlan(type))
484 		type = __vlan_get_protocol(skb, type, &maclen);
485 
486 	switch (type) {
487 #if IS_ENABLED(CONFIG_IPV6)
488 	case htons(ETH_P_IPV6):
489 		nhlen = sizeof(struct ipv6hdr);
490 		break;
491 #endif
492 	case htons(ETH_P_IP):
493 		nhlen = sizeof(struct iphdr);
494 		break;
495 	}
496 	/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
497 	 * a base network header in skb->head.
498 	 */
499 	reason = pskb_may_pull_reason(skb, maclen + nhlen);
500 	if (reason)
501 		return reason;
502 
503 	skb_set_network_header(skb, maclen);
504 
505 	return SKB_NOT_DROPPED_YET;
506 }
507 
508 static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
509 {
510 	const struct ip_tunnel_encap_ops *ops;
511 	int hlen = -EINVAL;
512 
513 	if (e->type == TUNNEL_ENCAP_NONE)
514 		return 0;
515 
516 	if (e->type >= MAX_IPTUN_ENCAP_OPS)
517 		return -EINVAL;
518 
519 	rcu_read_lock();
520 	ops = rcu_dereference(iptun_encaps[e->type]);
521 	if (likely(ops && ops->encap_hlen))
522 		hlen = ops->encap_hlen(e);
523 	rcu_read_unlock();
524 
525 	return hlen;
526 }
527 
528 static inline int ip_tunnel_encap(struct sk_buff *skb,
529 				  struct ip_tunnel_encap *e,
530 				  u8 *protocol, struct flowi4 *fl4)
531 {
532 	const struct ip_tunnel_encap_ops *ops;
533 	int ret = -EINVAL;
534 
535 	if (e->type == TUNNEL_ENCAP_NONE)
536 		return 0;
537 
538 	if (e->type >= MAX_IPTUN_ENCAP_OPS)
539 		return -EINVAL;
540 
541 	rcu_read_lock();
542 	ops = rcu_dereference(iptun_encaps[e->type]);
543 	if (likely(ops && ops->build_header))
544 		ret = ops->build_header(skb, e, protocol, fl4);
545 	rcu_read_unlock();
546 
547 	return ret;
548 }
549 
550 /* Extract dsfield from inner protocol */
551 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
552 				       const struct sk_buff *skb)
553 {
554 	__be16 payload_protocol = skb_protocol(skb, true);
555 
556 	if (payload_protocol == htons(ETH_P_IP))
557 		return iph->tos;
558 	else if (payload_protocol == htons(ETH_P_IPV6))
559 		return ipv6_get_dsfield((const struct ipv6hdr *)iph);
560 	else
561 		return 0;
562 }
563 
564 static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph,
565 					     const struct sk_buff *skb)
566 {
567 	__be16 payload_protocol = skb_protocol(skb, true);
568 
569 	if (payload_protocol == htons(ETH_P_IPV6))
570 		return ip6_flowlabel((const struct ipv6hdr *)iph);
571 	else
572 		return 0;
573 }
574 
575 static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
576 				       const struct sk_buff *skb)
577 {
578 	__be16 payload_protocol = skb_protocol(skb, true);
579 
580 	if (payload_protocol == htons(ETH_P_IP))
581 		return iph->ttl;
582 	else if (payload_protocol == htons(ETH_P_IPV6))
583 		return ((const struct ipv6hdr *)iph)->hop_limit;
584 	else
585 		return 0;
586 }
587 
588 /* Propagate ECN bits out */
589 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
590 				     const struct sk_buff *skb)
591 {
592 	u8 inner = ip_tunnel_get_dsfield(iph, skb);
593 
594 	return INET_ECN_encapsulate(tos, inner);
595 }
596 
597 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
598 			   __be16 inner_proto, bool raw_proto, bool xnet);
599 
600 static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
601 				       __be16 inner_proto, bool xnet)
602 {
603 	return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
604 }
605 
606 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
607 		   __be32 src, __be32 dst, u8 proto,
608 		   u8 tos, u8 ttl, __be16 df, bool xnet, u16 ipcb_flags);
609 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
610 					     gfp_t flags);
611 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
612 			  int headroom, bool reply);
613 
614 int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
615 
616 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
617 {
618 	if (skb_is_gso(skb)) {
619 		int err;
620 
621 		err = skb_unclone(skb, GFP_ATOMIC);
622 		if (unlikely(err))
623 			return err;
624 		skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
625 					       NETIF_F_GSO_SHIFT);
626 	}
627 
628 	skb->encapsulation = 0;
629 	return 0;
630 }
631 
632 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
633 {
634 	if (pkt_len > 0) {
635 		struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
636 
637 		u64_stats_update_begin(&tstats->syncp);
638 		u64_stats_add(&tstats->tx_bytes, pkt_len);
639 		u64_stats_inc(&tstats->tx_packets);
640 		u64_stats_update_end(&tstats->syncp);
641 		put_cpu_ptr(tstats);
642 		return;
643 	}
644 
645 	if (pkt_len < 0) {
646 		DEV_STATS_INC(dev, tx_errors);
647 		DEV_STATS_INC(dev, tx_aborted_errors);
648 	} else {
649 		DEV_STATS_INC(dev, tx_dropped);
650 	}
651 }
652 
653 static inline void ip_tunnel_info_opts_get(void *to,
654 					   const struct ip_tunnel_info *info)
655 {
656 	memcpy(to, ip_tunnel_info_opts(info), info->options_len);
657 }
658 
659 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
660 					   const void *from, int len,
661 					   const unsigned long *flags)
662 {
663 	info->options_len = len;
664 	if (len > 0) {
665 		memcpy(ip_tunnel_info_opts(info), from, len);
666 		ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags,
667 				   flags);
668 	}
669 }
670 
671 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
672 {
673 	return (struct ip_tunnel_info *)lwtstate->data;
674 }
675 
676 DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
677 
678 /* Returns > 0 if metadata should be collected */
679 static inline int ip_tunnel_collect_metadata(void)
680 {
681 	return static_branch_unlikely(&ip_tunnel_metadata_cnt);
682 }
683 
684 void __init ip_tunnel_core_init(void);
685 
686 void ip_tunnel_need_metadata(void);
687 void ip_tunnel_unneed_metadata(void);
688 
689 #else /* CONFIG_INET */
690 
691 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
692 {
693 	return NULL;
694 }
695 
696 static inline void ip_tunnel_need_metadata(void)
697 {
698 }
699 
700 static inline void ip_tunnel_unneed_metadata(void)
701 {
702 }
703 
704 static inline void ip_tunnel_info_opts_get(void *to,
705 					   const struct ip_tunnel_info *info)
706 {
707 }
708 
709 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
710 					   const void *from, int len,
711 					   const unsigned long *flags)
712 {
713 	info->options_len = 0;
714 }
715 
716 #endif /* CONFIG_INET */
717 
718 #endif /* __NET_IP_TUNNELS_H */
719