1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_DST_METADATA_H
3 #define __NET_DST_METADATA_H 1
4
5 #include <linux/skbuff.h>
6 #include <net/ip.h>
7 #include <net/ip_tunnels.h>
8 #include <net/macsec.h>
9 #include <net/dst.h>
10
11 enum metadata_type {
12 METADATA_IP_TUNNEL,
13 METADATA_HW_PORT_MUX,
14 METADATA_MACSEC,
15 METADATA_XFRM,
16 };
17
18 struct hw_port_info {
19 struct net_device *lower_dev;
20 u32 port_id;
21 };
22
23 struct macsec_info {
24 sci_t sci;
25 };
26
27 struct xfrm_md_info {
28 u32 if_id;
29 int link;
30 struct dst_entry *dst_orig;
31 };
32
33 struct metadata_dst {
34 struct dst_entry dst;
35 enum metadata_type type;
36 union {
37 struct ip_tunnel_info tun_info;
38 struct hw_port_info port_info;
39 struct macsec_info macsec_info;
40 struct xfrm_md_info xfrm_info;
41 } u;
42 };
43
skb_metadata_dst(const struct sk_buff * skb)44 static inline struct metadata_dst *skb_metadata_dst(const struct sk_buff *skb)
45 {
46 struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
47
48 if (md_dst && md_dst->dst.flags & DST_METADATA)
49 return md_dst;
50
51 return NULL;
52 }
53
54 static inline struct ip_tunnel_info *
skb_tunnel_info(const struct sk_buff * skb)55 skb_tunnel_info(const struct sk_buff *skb)
56 {
57 struct metadata_dst *md_dst = skb_metadata_dst(skb);
58 struct dst_entry *dst;
59
60 if (md_dst && md_dst->type == METADATA_IP_TUNNEL)
61 return &md_dst->u.tun_info;
62
63 dst = skb_dst(skb);
64 if (dst && dst->lwtstate &&
65 (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
66 dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
67 return lwt_tun_info(dst->lwtstate);
68
69 return NULL;
70 }
71
lwt_xfrm_info(struct lwtunnel_state * lwt)72 static inline struct xfrm_md_info *lwt_xfrm_info(struct lwtunnel_state *lwt)
73 {
74 return (struct xfrm_md_info *)lwt->data;
75 }
76
skb_xfrm_md_info(const struct sk_buff * skb)77 static inline struct xfrm_md_info *skb_xfrm_md_info(const struct sk_buff *skb)
78 {
79 struct metadata_dst *md_dst = skb_metadata_dst(skb);
80 struct dst_entry *dst;
81
82 if (md_dst && md_dst->type == METADATA_XFRM)
83 return &md_dst->u.xfrm_info;
84
85 dst = skb_dst(skb);
86 if (dst && dst->lwtstate &&
87 dst->lwtstate->type == LWTUNNEL_ENCAP_XFRM)
88 return lwt_xfrm_info(dst->lwtstate);
89
90 return NULL;
91 }
92
skb_valid_dst(const struct sk_buff * skb)93 static inline bool skb_valid_dst(const struct sk_buff *skb)
94 {
95 struct dst_entry *dst = skb_dst(skb);
96
97 return dst && !(dst->flags & DST_METADATA);
98 }
99
skb_metadata_dst_cmp(const struct sk_buff * skb_a,const struct sk_buff * skb_b)100 static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
101 const struct sk_buff *skb_b)
102 {
103 const struct metadata_dst *a, *b;
104
105 if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
106 return 0;
107
108 a = (const struct metadata_dst *) skb_dst(skb_a);
109 b = (const struct metadata_dst *) skb_dst(skb_b);
110
111 if (!a != !b || a->type != b->type)
112 return 1;
113
114 switch (a->type) {
115 case METADATA_HW_PORT_MUX:
116 return memcmp(&a->u.port_info, &b->u.port_info,
117 sizeof(a->u.port_info));
118 case METADATA_IP_TUNNEL:
119 return memcmp(&a->u.tun_info, &b->u.tun_info,
120 sizeof(a->u.tun_info) +
121 a->u.tun_info.options_len);
122 case METADATA_MACSEC:
123 return memcmp(&a->u.macsec_info, &b->u.macsec_info,
124 sizeof(a->u.macsec_info));
125 case METADATA_XFRM:
126 return memcmp(&a->u.xfrm_info, &b->u.xfrm_info,
127 sizeof(a->u.xfrm_info));
128 default:
129 return 1;
130 }
131 }
132
133 void metadata_dst_free(struct metadata_dst *);
134 struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
135 gfp_t flags);
136 void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst);
137 struct metadata_dst __percpu *
138 metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags);
139
tun_rx_dst(int md_size)140 static inline struct metadata_dst *tun_rx_dst(int md_size)
141 {
142 struct metadata_dst *tun_dst;
143
144 tun_dst = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
145 if (!tun_dst)
146 return NULL;
147
148 tun_dst->u.tun_info.options_len = 0;
149 tun_dst->u.tun_info.mode = 0;
150 return tun_dst;
151 }
152
tun_dst_unclone(struct sk_buff * skb)153 static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
154 {
155 struct metadata_dst *md_dst = skb_metadata_dst(skb);
156 int md_size;
157 struct metadata_dst *new_md;
158
159 if (!md_dst || md_dst->type != METADATA_IP_TUNNEL)
160 return ERR_PTR(-EINVAL);
161
162 md_size = md_dst->u.tun_info.options_len;
163 new_md = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
164 if (!new_md)
165 return ERR_PTR(-ENOMEM);
166
167 memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
168 sizeof(struct ip_tunnel_info) + md_size);
169 #ifdef CONFIG_DST_CACHE
170 /* Unclone the dst cache if there is one */
171 if (new_md->u.tun_info.dst_cache.cache) {
172 int ret;
173
174 ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
175 if (ret) {
176 metadata_dst_free(new_md);
177 return ERR_PTR(ret);
178 }
179 }
180 #endif
181
182 skb_dst_drop(skb);
183 skb_dst_set(skb, &new_md->dst);
184 return new_md;
185 }
186
skb_tunnel_info_unclone(struct sk_buff * skb)187 static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
188 {
189 struct metadata_dst *dst;
190
191 dst = tun_dst_unclone(skb);
192 if (IS_ERR(dst))
193 return NULL;
194
195 return &dst->u.tun_info;
196 }
197
__ip_tun_set_dst(__be32 saddr,__be32 daddr,__u8 tos,__u8 ttl,__be16 tp_dst,const unsigned long * flags,__be64 tunnel_id,int md_size)198 static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
199 __be32 daddr,
200 __u8 tos, __u8 ttl,
201 __be16 tp_dst,
202 const unsigned long *flags,
203 __be64 tunnel_id,
204 int md_size)
205 {
206 struct metadata_dst *tun_dst;
207
208 tun_dst = tun_rx_dst(md_size);
209 if (!tun_dst)
210 return NULL;
211
212 ip_tunnel_key_init(&tun_dst->u.tun_info.key,
213 saddr, daddr, tos, ttl,
214 0, 0, tp_dst, tunnel_id, flags);
215 return tun_dst;
216 }
217
ip_tun_rx_dst(struct sk_buff * skb,const unsigned long * flags,__be64 tunnel_id,int md_size)218 static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
219 const unsigned long *flags,
220 __be64 tunnel_id,
221 int md_size)
222 {
223 const struct iphdr *iph = ip_hdr(skb);
224 struct metadata_dst *tun_dst;
225
226 tun_dst = __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
227 0, flags, tunnel_id, md_size);
228
229 if (tun_dst && (iph->frag_off & htons(IP_DF)))
230 __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
231 tun_dst->u.tun_info.key.tun_flags);
232 return tun_dst;
233 }
234
__ipv6_tun_set_dst(const struct in6_addr * saddr,const struct in6_addr * daddr,__u8 tos,__u8 ttl,__be16 tp_dst,__be32 label,const unsigned long * flags,__be64 tunnel_id,int md_size)235 static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
236 const struct in6_addr *daddr,
237 __u8 tos, __u8 ttl,
238 __be16 tp_dst,
239 __be32 label,
240 const unsigned long *flags,
241 __be64 tunnel_id,
242 int md_size)
243 {
244 struct metadata_dst *tun_dst;
245 struct ip_tunnel_info *info;
246
247 tun_dst = tun_rx_dst(md_size);
248 if (!tun_dst)
249 return NULL;
250
251 info = &tun_dst->u.tun_info;
252 info->mode = IP_TUNNEL_INFO_IPV6;
253 ip_tunnel_flags_copy(info->key.tun_flags, flags);
254 info->key.tun_id = tunnel_id;
255 info->key.tp_src = 0;
256 info->key.tp_dst = tp_dst;
257
258 info->key.u.ipv6.src = *saddr;
259 info->key.u.ipv6.dst = *daddr;
260
261 info->key.tos = tos;
262 info->key.ttl = ttl;
263 info->key.label = label;
264
265 return tun_dst;
266 }
267
ipv6_tun_rx_dst(struct sk_buff * skb,const unsigned long * flags,__be64 tunnel_id,int md_size)268 static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
269 const unsigned long *flags,
270 __be64 tunnel_id,
271 int md_size)
272 {
273 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
274
275 return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
276 ipv6_get_dsfield(ip6h), ip6h->hop_limit,
277 0, ip6_flowlabel(ip6h), flags, tunnel_id,
278 md_size);
279 }
280 #endif /* __NET_DST_METADATA_H */
281