xref: /linux/include/net/ip6_tunnel.h (revision 6562c9acb43ac69ba5a956b0c3911b883d90541f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_IP6_TUNNEL_H
3 #define _NET_IP6_TUNNEL_H
4 
5 #include <linux/ipv6.h>
6 #include <linux/netdevice.h>
7 #include <linux/if_tunnel.h>
8 #include <linux/ip6_tunnel.h>
9 #include <net/ip_tunnels.h>
10 #include <net/dst_cache.h>
11 
12 #define IP6TUNNEL_ERR_TIMEO (30*HZ)
13 
14 /* capable of sending packets */
15 #define IP6_TNL_F_CAP_XMIT 0x10000
16 /* capable of receiving packets */
17 #define IP6_TNL_F_CAP_RCV 0x20000
18 /* determine capability on a per-packet basis */
19 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
20 
21 struct __ip6_tnl_parm {
22 	char name[IFNAMSIZ];	/* name of tunnel device */
23 	int link;		/* ifindex of underlying L2 interface */
24 	__u8 proto;		/* tunnel protocol */
25 	__u8 encap_limit;	/* encapsulation limit for tunnel */
26 	__u8 hop_limit;		/* hop limit for tunnel */
27 	bool collect_md;
28 	__be32 flowinfo;	/* traffic class and flowlabel for tunnel */
29 	__u32 flags;		/* tunnel flags */
30 	struct in6_addr laddr;	/* local tunnel end-point address */
31 	struct in6_addr raddr;	/* remote tunnel end-point address */
32 
33 	__be16			i_flags;
34 	__be16			o_flags;
35 	__be32			i_key;
36 	__be32			o_key;
37 
38 	__u32			fwmark;
39 	__u32			index;	/* ERSPAN type II index */
40 	__u8			erspan_ver;	/* ERSPAN version */
41 	__u8			dir;	/* direction */
42 	__u16			hwid;	/* hwid */
43 };
44 
45 /* IPv6 tunnel */
46 struct ip6_tnl {
47 	struct ip6_tnl __rcu *next;	/* next tunnel in list */
48 	struct net_device *dev;	/* virtual device associated with tunnel */
49 	netdevice_tracker dev_tracker;
50 	struct net *net;	/* netns for packet i/o */
51 	struct __ip6_tnl_parm parms;	/* tunnel configuration parameters */
52 	struct flowi fl;	/* flowi template for xmit */
53 	struct dst_cache dst_cache;	/* cached dst */
54 	struct gro_cells gro_cells;
55 
56 	int err_count;
57 	unsigned long err_time;
58 
59 	/* These fields used only by GRE */
60 	__u32 i_seqno;	/* The last seen seqno	*/
61 	atomic_t o_seqno;	/* The last output seqno */
62 	int hlen;       /* tun_hlen + encap_hlen */
63 	int tun_hlen;	/* Precalculated header length */
64 	int encap_hlen; /* Encap header length (FOU,GUE) */
65 	struct ip_tunnel_encap encap;
66 	int mlink;
67 };
68 
69 struct ip6_tnl_encap_ops {
70 	size_t (*encap_hlen)(struct ip_tunnel_encap *e);
71 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
72 			    u8 *protocol, struct flowi6 *fl6);
73 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
74 			   u8 type, u8 code, int offset, __be32 info);
75 };
76 
77 #ifdef CONFIG_INET
78 
79 extern const struct ip6_tnl_encap_ops __rcu *
80 		ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
81 
82 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
83 			  unsigned int num);
84 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
85 			  unsigned int num);
86 int ip6_tnl_encap_setup(struct ip6_tnl *t,
87 			struct ip_tunnel_encap *ipencap);
88 
89 static inline int ip6_encap_hlen(struct ip_tunnel_encap *e)
90 {
91 	const struct ip6_tnl_encap_ops *ops;
92 	int hlen = -EINVAL;
93 
94 	if (e->type == TUNNEL_ENCAP_NONE)
95 		return 0;
96 
97 	if (e->type >= MAX_IPTUN_ENCAP_OPS)
98 		return -EINVAL;
99 
100 	rcu_read_lock();
101 	ops = rcu_dereference(ip6tun_encaps[e->type]);
102 	if (likely(ops && ops->encap_hlen))
103 		hlen = ops->encap_hlen(e);
104 	rcu_read_unlock();
105 
106 	return hlen;
107 }
108 
109 static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t,
110 				u8 *protocol, struct flowi6 *fl6)
111 {
112 	const struct ip6_tnl_encap_ops *ops;
113 	int ret = -EINVAL;
114 
115 	if (t->encap.type == TUNNEL_ENCAP_NONE)
116 		return 0;
117 
118 	if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
119 		return -EINVAL;
120 
121 	rcu_read_lock();
122 	ops = rcu_dereference(ip6tun_encaps[t->encap.type]);
123 	if (likely(ops && ops->build_header))
124 		ret = ops->build_header(skb, &t->encap, protocol, fl6);
125 	rcu_read_unlock();
126 
127 	return ret;
128 }
129 
130 /* Tunnel encapsulation limit destination sub-option */
131 
132 struct ipv6_tlv_tnl_enc_lim {
133 	__u8 type;		/* type-code for option         */
134 	__u8 length;		/* option length                */
135 	__u8 encap_limit;	/* tunnel encapsulation limit   */
136 } __packed;
137 
138 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
139 		const struct in6_addr *raddr);
140 int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
141 		const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
142 		bool log_ecn_error);
143 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
144 		     const struct in6_addr *raddr);
145 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
146 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
147 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
148 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
149 			     const struct in6_addr *raddr);
150 struct net *ip6_tnl_get_link_net(const struct net_device *dev);
151 int ip6_tnl_get_iflink(const struct net_device *dev);
152 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
153 
154 static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
155 				  struct net_device *dev)
156 {
157 	int pkt_len, err;
158 
159 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
160 	pkt_len = skb->len - skb_inner_network_offset(skb);
161 	err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
162 
163 	if (dev) {
164 		if (unlikely(net_xmit_eval(err)))
165 			pkt_len = -1;
166 		iptunnel_xmit_stats(dev, pkt_len);
167 	}
168 }
169 #endif
170 #endif
171