xref: /linux/net/sched/act_tunnel_key.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/vxlan.h>
14 #include <net/erspan.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/dst.h>
18 #include <net/pkt_cls.h>
19 #include <net/tc_wrapper.h>
20 
21 #include <linux/tc_act/tc_tunnel_key.h>
22 #include <net/tc_act/tc_tunnel_key.h>
23 
24 static struct tc_action_ops act_tunnel_key_ops;
25 
tunnel_key_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)26 TC_INDIRECT_SCOPE int tunnel_key_act(struct sk_buff *skb,
27 				     const struct tc_action *a,
28 				     struct tcf_result *res)
29 {
30 	struct tcf_tunnel_key *t = to_tunnel_key(a);
31 	struct tcf_tunnel_key_params *params;
32 	int action;
33 
34 	params = rcu_dereference_bh(t->params);
35 
36 	tcf_lastuse_update(&t->tcf_tm);
37 	tcf_action_update_bstats(&t->common, skb);
38 	action = READ_ONCE(t->tcf_action);
39 
40 	switch (params->tcft_action) {
41 	case TCA_TUNNEL_KEY_ACT_RELEASE:
42 		skb_dst_drop(skb);
43 		break;
44 	case TCA_TUNNEL_KEY_ACT_SET:
45 		skb_dst_drop(skb);
46 		skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
47 		break;
48 	default:
49 		WARN_ONCE(1, "Bad tunnel_key action %d.\n",
50 			  params->tcft_action);
51 		break;
52 	}
53 
54 	return action;
55 }
56 
57 static const struct nla_policy
58 enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
59 	[TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC]	= {
60 		.strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
61 	[TCA_TUNNEL_KEY_ENC_OPTS_GENEVE]	= { .type = NLA_NESTED },
62 	[TCA_TUNNEL_KEY_ENC_OPTS_VXLAN]		= { .type = NLA_NESTED },
63 	[TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN]	= { .type = NLA_NESTED },
64 };
65 
66 static const struct nla_policy
67 geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
68 	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]	   = { .type = NLA_U16 },
69 	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]	   = { .type = NLA_U8 },
70 	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]	   = { .type = NLA_BINARY,
71 						       .len = 128 },
72 };
73 
74 static const struct nla_policy
75 vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
76 	[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]	   = { .type = NLA_U32 },
77 };
78 
79 static const struct nla_policy
80 erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
81 	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]	   = { .type = NLA_U8 },
82 	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]	   = { .type = NLA_U32 },
83 	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR]	   = { .type = NLA_U8 },
84 	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]	   = { .type = NLA_U8 },
85 };
86 
87 static int
tunnel_key_copy_geneve_opt(const struct nlattr * nla,void * dst,int dst_len,struct netlink_ext_ack * extack)88 tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
89 			   struct netlink_ext_ack *extack)
90 {
91 	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
92 	int err, data_len, opt_len;
93 	u8 *data;
94 
95 	err = nla_parse_nested_deprecated(tb,
96 					  TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
97 					  nla, geneve_opt_policy, extack);
98 	if (err < 0)
99 		return err;
100 
101 	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
102 	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
103 	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
104 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
105 		return -EINVAL;
106 	}
107 
108 	data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
109 	data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
110 	if (data_len < 4) {
111 		NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
112 		return -ERANGE;
113 	}
114 	if (data_len % 4) {
115 		NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
116 		return -ERANGE;
117 	}
118 
119 	opt_len = sizeof(struct geneve_opt) + data_len;
120 	if (dst) {
121 		struct geneve_opt *opt = dst;
122 
123 		WARN_ON(dst_len < opt_len);
124 
125 		opt->opt_class =
126 			nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
127 		opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
128 		opt->length = data_len / 4; /* length is in units of 4 bytes */
129 		opt->r1 = 0;
130 		opt->r2 = 0;
131 		opt->r3 = 0;
132 
133 		memcpy(opt + 1, data, data_len);
134 	}
135 
136 	return opt_len;
137 }
138 
139 static int
tunnel_key_copy_vxlan_opt(const struct nlattr * nla,void * dst,int dst_len,struct netlink_ext_ack * extack)140 tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
141 			  struct netlink_ext_ack *extack)
142 {
143 	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
144 	int err;
145 
146 	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
147 			       vxlan_opt_policy, extack);
148 	if (err < 0)
149 		return err;
150 
151 	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
152 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
153 		return -EINVAL;
154 	}
155 
156 	if (dst) {
157 		struct vxlan_metadata *md = dst;
158 
159 		md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
160 		md->gbp &= VXLAN_GBP_MASK;
161 	}
162 
163 	return sizeof(struct vxlan_metadata);
164 }
165 
166 static int
tunnel_key_copy_erspan_opt(const struct nlattr * nla,void * dst,int dst_len,struct netlink_ext_ack * extack)167 tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
168 			   struct netlink_ext_ack *extack)
169 {
170 	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
171 	int err;
172 	u8 ver;
173 
174 	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
175 			       erspan_opt_policy, extack);
176 	if (err < 0)
177 		return err;
178 
179 	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
180 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
181 		return -EINVAL;
182 	}
183 
184 	ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
185 	if (ver == 1) {
186 		if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
187 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
188 			return -EINVAL;
189 		}
190 	} else if (ver == 2) {
191 		if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
192 		    !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
193 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
194 			return -EINVAL;
195 		}
196 	} else {
197 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
198 		return -EINVAL;
199 	}
200 
201 	if (dst) {
202 		struct erspan_metadata *md = dst;
203 
204 		md->version = ver;
205 		if (ver == 1) {
206 			nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
207 			md->u.index = nla_get_be32(nla);
208 		} else {
209 			nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
210 			md->u.md2.dir = nla_get_u8(nla);
211 			nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
212 			set_hwid(&md->u.md2, nla_get_u8(nla));
213 		}
214 	}
215 
216 	return sizeof(struct erspan_metadata);
217 }
218 
tunnel_key_copy_opts(const struct nlattr * nla,u8 * dst,int dst_len,struct netlink_ext_ack * extack)219 static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
220 				int dst_len, struct netlink_ext_ack *extack)
221 {
222 	int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
223 	const struct nlattr *attr, *head = nla_data(nla);
224 
225 	err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
226 				      enc_opts_policy, extack);
227 	if (err)
228 		return err;
229 
230 	nla_for_each_attr(attr, head, len, rem) {
231 		switch (nla_type(attr)) {
232 		case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
233 			if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) {
234 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
235 				return -EINVAL;
236 			}
237 			opt_len = tunnel_key_copy_geneve_opt(attr, dst,
238 							     dst_len, extack);
239 			if (opt_len < 0)
240 				return opt_len;
241 			opts_len += opt_len;
242 			if (opts_len > IP_TUNNEL_OPTS_MAX) {
243 				NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
244 				return -EINVAL;
245 			}
246 			if (dst) {
247 				dst_len -= opt_len;
248 				dst += opt_len;
249 			}
250 			type = IP_TUNNEL_GENEVE_OPT_BIT;
251 			break;
252 		case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
253 			if (type) {
254 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
255 				return -EINVAL;
256 			}
257 			opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
258 							    dst_len, extack);
259 			if (opt_len < 0)
260 				return opt_len;
261 			opts_len += opt_len;
262 			type = IP_TUNNEL_VXLAN_OPT_BIT;
263 			break;
264 		case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
265 			if (type) {
266 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
267 				return -EINVAL;
268 			}
269 			opt_len = tunnel_key_copy_erspan_opt(attr, dst,
270 							     dst_len, extack);
271 			if (opt_len < 0)
272 				return opt_len;
273 			opts_len += opt_len;
274 			type = IP_TUNNEL_ERSPAN_OPT_BIT;
275 			break;
276 		}
277 	}
278 
279 	if (!opts_len) {
280 		NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
281 		return -EINVAL;
282 	}
283 
284 	if (rem > 0) {
285 		NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
286 		return -EINVAL;
287 	}
288 
289 	return opts_len;
290 }
291 
tunnel_key_get_opts_len(struct nlattr * nla,struct netlink_ext_ack * extack)292 static int tunnel_key_get_opts_len(struct nlattr *nla,
293 				   struct netlink_ext_ack *extack)
294 {
295 	return tunnel_key_copy_opts(nla, NULL, 0, extack);
296 }
297 
tunnel_key_opts_set(struct nlattr * nla,struct ip_tunnel_info * info,int opts_len,struct netlink_ext_ack * extack)298 static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
299 			       int opts_len, struct netlink_ext_ack *extack)
300 {
301 	info->options_len = opts_len;
302 	switch (nla_type(nla_data(nla))) {
303 	case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
304 #if IS_ENABLED(CONFIG_INET)
305 		__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags);
306 		return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
307 					    opts_len, extack);
308 #else
309 		return -EAFNOSUPPORT;
310 #endif
311 	case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
312 #if IS_ENABLED(CONFIG_INET)
313 		__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags);
314 		return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
315 					    opts_len, extack);
316 #else
317 		return -EAFNOSUPPORT;
318 #endif
319 	case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
320 #if IS_ENABLED(CONFIG_INET)
321 		__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags);
322 		return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
323 					    opts_len, extack);
324 #else
325 		return -EAFNOSUPPORT;
326 #endif
327 	default:
328 		NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
329 		return -EINVAL;
330 	}
331 }
332 
333 static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
334 	[TCA_TUNNEL_KEY_PARMS]	    = { .len = sizeof(struct tc_tunnel_key) },
335 	[TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
336 	[TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
337 	[TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
338 	[TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
339 	[TCA_TUNNEL_KEY_ENC_KEY_ID]   = { .type = NLA_U32 },
340 	[TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
341 	[TCA_TUNNEL_KEY_NO_CSUM]      = { .type = NLA_U8 },
342 	[TCA_TUNNEL_KEY_ENC_OPTS]     = { .type = NLA_NESTED },
343 	[TCA_TUNNEL_KEY_ENC_TOS]      = { .type = NLA_U8 },
344 	[TCA_TUNNEL_KEY_ENC_TTL]      = { .type = NLA_U8 },
345 };
346 
tunnel_key_release_params(struct tcf_tunnel_key_params * p)347 static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
348 {
349 	if (!p)
350 		return;
351 	if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
352 		dst_release(&p->tcft_enc_metadata->dst);
353 
354 	kfree_rcu(p, rcu);
355 }
356 
tunnel_key_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,struct tcf_proto * tp,u32 act_flags,struct netlink_ext_ack * extack)357 static int tunnel_key_init(struct net *net, struct nlattr *nla,
358 			   struct nlattr *est, struct tc_action **a,
359 			   struct tcf_proto *tp, u32 act_flags,
360 			   struct netlink_ext_ack *extack)
361 {
362 	struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
363 	bool bind = act_flags & TCA_ACT_FLAGS_BIND;
364 	struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
365 	struct tcf_tunnel_key_params *params_new;
366 	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
367 	struct metadata_dst *metadata = NULL;
368 	struct tcf_chain *goto_ch = NULL;
369 	struct tc_tunnel_key *parm;
370 	struct tcf_tunnel_key *t;
371 	bool exists = false;
372 	__be16 dst_port = 0;
373 	__be64 key_id = 0;
374 	int opts_len = 0;
375 	u8 tos, ttl;
376 	int ret = 0;
377 	u32 index;
378 	int err;
379 
380 	if (!nla) {
381 		NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
382 		return -EINVAL;
383 	}
384 
385 	err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
386 					  tunnel_key_policy, extack);
387 	if (err < 0) {
388 		NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
389 		return err;
390 	}
391 
392 	if (!tb[TCA_TUNNEL_KEY_PARMS]) {
393 		NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
394 		return -EINVAL;
395 	}
396 
397 	parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
398 	index = parm->index;
399 	err = tcf_idr_check_alloc(tn, &index, a, bind);
400 	if (err < 0)
401 		return err;
402 	exists = err;
403 	if (exists && bind)
404 		return ACT_P_BOUND;
405 
406 	switch (parm->t_action) {
407 	case TCA_TUNNEL_KEY_ACT_RELEASE:
408 		break;
409 	case TCA_TUNNEL_KEY_ACT_SET:
410 		if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
411 			__be32 key32;
412 
413 			key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
414 			key_id = key32_to_tunnel_id(key32);
415 			__set_bit(IP_TUNNEL_KEY_BIT, flags);
416 		}
417 
418 		__set_bit(IP_TUNNEL_CSUM_BIT, flags);
419 		if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
420 		    nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
421 			__clear_bit(IP_TUNNEL_CSUM_BIT, flags);
422 
423 		if (nla_get_flag(tb[TCA_TUNNEL_KEY_NO_FRAG]))
424 			__set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, flags);
425 
426 		if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
427 			dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
428 
429 		if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
430 			opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
431 							   extack);
432 			if (opts_len < 0) {
433 				ret = opts_len;
434 				goto err_out;
435 			}
436 		}
437 
438 		tos = 0;
439 		if (tb[TCA_TUNNEL_KEY_ENC_TOS])
440 			tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
441 		ttl = 0;
442 		if (tb[TCA_TUNNEL_KEY_ENC_TTL])
443 			ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
444 
445 		if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
446 		    tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
447 			__be32 saddr;
448 			__be32 daddr;
449 
450 			saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
451 			daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
452 
453 			metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
454 						    dst_port, flags,
455 						    key_id, opts_len);
456 		} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
457 			   tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
458 			struct in6_addr saddr;
459 			struct in6_addr daddr;
460 
461 			saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
462 			daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
463 
464 			metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
465 						      0, flags,
466 						      key_id, opts_len);
467 		} else {
468 			NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
469 			ret = -EINVAL;
470 			goto err_out;
471 		}
472 
473 		if (!metadata) {
474 			NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
475 			ret = -ENOMEM;
476 			goto err_out;
477 		}
478 
479 #ifdef CONFIG_DST_CACHE
480 		ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
481 		if (ret)
482 			goto release_tun_meta;
483 #endif
484 
485 		if (opts_len) {
486 			ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
487 						  &metadata->u.tun_info,
488 						  opts_len, extack);
489 			if (ret < 0)
490 				goto release_tun_meta;
491 		}
492 
493 		metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
494 		break;
495 	default:
496 		NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
497 		ret = -EINVAL;
498 		goto err_out;
499 	}
500 
501 	if (!exists) {
502 		ret = tcf_idr_create_from_flags(tn, index, est, a,
503 						&act_tunnel_key_ops, bind,
504 						act_flags);
505 		if (ret) {
506 			NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
507 			goto release_tun_meta;
508 		}
509 
510 		ret = ACT_P_CREATED;
511 	} else if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
512 		NL_SET_ERR_MSG(extack, "TC IDR already exists");
513 		ret = -EEXIST;
514 		goto release_tun_meta;
515 	}
516 
517 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
518 	if (err < 0) {
519 		ret = err;
520 		exists = true;
521 		goto release_tun_meta;
522 	}
523 	t = to_tunnel_key(*a);
524 
525 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
526 	if (unlikely(!params_new)) {
527 		NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
528 		ret = -ENOMEM;
529 		exists = true;
530 		goto put_chain;
531 	}
532 	params_new->tcft_action = parm->t_action;
533 	params_new->tcft_enc_metadata = metadata;
534 
535 	spin_lock_bh(&t->tcf_lock);
536 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
537 	params_new = rcu_replace_pointer(t->params, params_new,
538 					 lockdep_is_held(&t->tcf_lock));
539 	spin_unlock_bh(&t->tcf_lock);
540 	tunnel_key_release_params(params_new);
541 	if (goto_ch)
542 		tcf_chain_put_by_act(goto_ch);
543 
544 	return ret;
545 
546 put_chain:
547 	if (goto_ch)
548 		tcf_chain_put_by_act(goto_ch);
549 
550 release_tun_meta:
551 	if (metadata)
552 		dst_release(&metadata->dst);
553 
554 err_out:
555 	if (exists)
556 		tcf_idr_release(*a, bind);
557 	else
558 		tcf_idr_cleanup(tn, index);
559 	return ret;
560 }
561 
tunnel_key_release(struct tc_action * a)562 static void tunnel_key_release(struct tc_action *a)
563 {
564 	struct tcf_tunnel_key *t = to_tunnel_key(a);
565 	struct tcf_tunnel_key_params *params;
566 
567 	params = rcu_dereference_protected(t->params, 1);
568 	tunnel_key_release_params(params);
569 }
570 
tunnel_key_geneve_opts_dump(struct sk_buff * skb,const struct ip_tunnel_info * info)571 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
572 				       const struct ip_tunnel_info *info)
573 {
574 	int len = info->options_len;
575 	u8 *src = (u8 *)(info + 1);
576 	struct nlattr *start;
577 
578 	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
579 	if (!start)
580 		return -EMSGSIZE;
581 
582 	while (len > 0) {
583 		struct geneve_opt *opt = (struct geneve_opt *)src;
584 
585 		if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
586 				 opt->opt_class) ||
587 		    nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
588 			       opt->type) ||
589 		    nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
590 			    opt->length * 4, opt + 1)) {
591 			nla_nest_cancel(skb, start);
592 			return -EMSGSIZE;
593 		}
594 
595 		len -= sizeof(struct geneve_opt) + opt->length * 4;
596 		src += sizeof(struct geneve_opt) + opt->length * 4;
597 	}
598 
599 	nla_nest_end(skb, start);
600 	return 0;
601 }
602 
tunnel_key_vxlan_opts_dump(struct sk_buff * skb,const struct ip_tunnel_info * info)603 static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
604 				      const struct ip_tunnel_info *info)
605 {
606 	struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
607 	struct nlattr *start;
608 
609 	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
610 	if (!start)
611 		return -EMSGSIZE;
612 
613 	if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
614 		nla_nest_cancel(skb, start);
615 		return -EMSGSIZE;
616 	}
617 
618 	nla_nest_end(skb, start);
619 	return 0;
620 }
621 
tunnel_key_erspan_opts_dump(struct sk_buff * skb,const struct ip_tunnel_info * info)622 static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
623 				       const struct ip_tunnel_info *info)
624 {
625 	struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
626 	struct nlattr *start;
627 
628 	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
629 	if (!start)
630 		return -EMSGSIZE;
631 
632 	if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
633 		goto err;
634 
635 	if (md->version == 1 &&
636 	    nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
637 		goto err;
638 
639 	if (md->version == 2 &&
640 	    (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
641 			md->u.md2.dir) ||
642 	     nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
643 			get_hwid(&md->u.md2))))
644 		goto err;
645 
646 	nla_nest_end(skb, start);
647 	return 0;
648 err:
649 	nla_nest_cancel(skb, start);
650 	return -EMSGSIZE;
651 }
652 
tunnel_key_opts_dump(struct sk_buff * skb,const struct ip_tunnel_info * info)653 static int tunnel_key_opts_dump(struct sk_buff *skb,
654 				const struct ip_tunnel_info *info)
655 {
656 	struct nlattr *start;
657 	int err = -EINVAL;
658 
659 	if (!info->options_len)
660 		return 0;
661 
662 	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
663 	if (!start)
664 		return -EMSGSIZE;
665 
666 	if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) {
667 		err = tunnel_key_geneve_opts_dump(skb, info);
668 		if (err)
669 			goto err_out;
670 	} else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
671 		err = tunnel_key_vxlan_opts_dump(skb, info);
672 		if (err)
673 			goto err_out;
674 	} else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) {
675 		err = tunnel_key_erspan_opts_dump(skb, info);
676 		if (err)
677 			goto err_out;
678 	} else {
679 err_out:
680 		nla_nest_cancel(skb, start);
681 		return err;
682 	}
683 
684 	nla_nest_end(skb, start);
685 	return 0;
686 }
687 
tunnel_key_dump_addresses(struct sk_buff * skb,const struct ip_tunnel_info * info)688 static int tunnel_key_dump_addresses(struct sk_buff *skb,
689 				     const struct ip_tunnel_info *info)
690 {
691 	unsigned short family = ip_tunnel_info_af(info);
692 
693 	if (family == AF_INET) {
694 		__be32 saddr = info->key.u.ipv4.src;
695 		__be32 daddr = info->key.u.ipv4.dst;
696 
697 		if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
698 		    !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
699 			return 0;
700 	}
701 
702 	if (family == AF_INET6) {
703 		const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
704 		const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
705 
706 		if (!nla_put_in6_addr(skb,
707 				      TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
708 		    !nla_put_in6_addr(skb,
709 				      TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
710 			return 0;
711 	}
712 
713 	return -EINVAL;
714 }
715 
tunnel_key_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)716 static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
717 			   int bind, int ref)
718 {
719 	unsigned char *b = skb_tail_pointer(skb);
720 	struct tcf_tunnel_key *t = to_tunnel_key(a);
721 	struct tcf_tunnel_key_params *params;
722 	struct tc_tunnel_key opt = {
723 		.index    = t->tcf_index,
724 		.refcnt   = refcount_read(&t->tcf_refcnt) - ref,
725 		.bindcnt  = atomic_read(&t->tcf_bindcnt) - bind,
726 	};
727 	struct tcf_t tm;
728 
729 	spin_lock_bh(&t->tcf_lock);
730 	params = rcu_dereference_protected(t->params,
731 					   lockdep_is_held(&t->tcf_lock));
732 	opt.action   = t->tcf_action;
733 	opt.t_action = params->tcft_action;
734 
735 	if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
736 		goto nla_put_failure;
737 
738 	if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
739 		struct ip_tunnel_info *info =
740 			&params->tcft_enc_metadata->u.tun_info;
741 		struct ip_tunnel_key *key = &info->key;
742 		__be32 key_id = tunnel_id_to_key32(key->tun_id);
743 
744 		if ((test_bit(IP_TUNNEL_KEY_BIT, key->tun_flags) &&
745 		     nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
746 		    tunnel_key_dump_addresses(skb,
747 					      &params->tcft_enc_metadata->u.tun_info) ||
748 		    (key->tp_dst &&
749 		      nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
750 				   key->tp_dst)) ||
751 		    nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
752 			       !test_bit(IP_TUNNEL_CSUM_BIT, key->tun_flags)) ||
753 		    (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) &&
754 		     nla_put_flag(skb, TCA_TUNNEL_KEY_NO_FRAG)) ||
755 		    tunnel_key_opts_dump(skb, info))
756 			goto nla_put_failure;
757 
758 		if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
759 			goto nla_put_failure;
760 
761 		if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
762 			goto nla_put_failure;
763 	}
764 
765 	tcf_tm_dump(&tm, &t->tcf_tm);
766 	if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
767 			  &tm, TCA_TUNNEL_KEY_PAD))
768 		goto nla_put_failure;
769 	spin_unlock_bh(&t->tcf_lock);
770 
771 	return skb->len;
772 
773 nla_put_failure:
774 	spin_unlock_bh(&t->tcf_lock);
775 	nlmsg_trim(skb, b);
776 	return -1;
777 }
778 
tcf_tunnel_encap_put_tunnel(void * priv)779 static void tcf_tunnel_encap_put_tunnel(void *priv)
780 {
781 	struct ip_tunnel_info *tunnel = priv;
782 
783 	kfree(tunnel);
784 }
785 
tcf_tunnel_encap_get_tunnel(struct flow_action_entry * entry,const struct tc_action * act)786 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
787 				       const struct tc_action *act)
788 {
789 	entry->tunnel = tcf_tunnel_info_copy(act);
790 	if (!entry->tunnel)
791 		return -ENOMEM;
792 	entry->destructor = tcf_tunnel_encap_put_tunnel;
793 	entry->destructor_priv = entry->tunnel;
794 	return 0;
795 }
796 
tcf_tunnel_key_offload_act_setup(struct tc_action * act,void * entry_data,u32 * index_inc,bool bind,struct netlink_ext_ack * extack)797 static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
798 					    void *entry_data,
799 					    u32 *index_inc,
800 					    bool bind,
801 					    struct netlink_ext_ack *extack)
802 {
803 	int err;
804 
805 	if (bind) {
806 		struct flow_action_entry *entry = entry_data;
807 
808 		if (is_tcf_tunnel_set(act)) {
809 			entry->id = FLOW_ACTION_TUNNEL_ENCAP;
810 			err = tcf_tunnel_encap_get_tunnel(entry, act);
811 			if (err)
812 				return err;
813 		} else if (is_tcf_tunnel_release(act)) {
814 			entry->id = FLOW_ACTION_TUNNEL_DECAP;
815 		} else {
816 			NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel key mode offload");
817 			return -EOPNOTSUPP;
818 		}
819 		*index_inc = 1;
820 	} else {
821 		struct flow_offload_action *fl_action = entry_data;
822 
823 		if (is_tcf_tunnel_set(act))
824 			fl_action->id = FLOW_ACTION_TUNNEL_ENCAP;
825 		else if (is_tcf_tunnel_release(act))
826 			fl_action->id = FLOW_ACTION_TUNNEL_DECAP;
827 		else
828 			return -EOPNOTSUPP;
829 	}
830 
831 	return 0;
832 }
833 
834 static struct tc_action_ops act_tunnel_key_ops = {
835 	.kind		=	"tunnel_key",
836 	.id		=	TCA_ID_TUNNEL_KEY,
837 	.owner		=	THIS_MODULE,
838 	.act		=	tunnel_key_act,
839 	.dump		=	tunnel_key_dump,
840 	.init		=	tunnel_key_init,
841 	.cleanup	=	tunnel_key_release,
842 	.offload_act_setup =	tcf_tunnel_key_offload_act_setup,
843 	.size		=	sizeof(struct tcf_tunnel_key),
844 };
845 MODULE_ALIAS_NET_ACT("tunnel_key");
846 
tunnel_key_init_net(struct net * net)847 static __net_init int tunnel_key_init_net(struct net *net)
848 {
849 	struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
850 
851 	return tc_action_net_init(net, tn, &act_tunnel_key_ops);
852 }
853 
tunnel_key_exit_net(struct list_head * net_list)854 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
855 {
856 	tc_action_net_exit(net_list, act_tunnel_key_ops.net_id);
857 }
858 
859 static struct pernet_operations tunnel_key_net_ops = {
860 	.init = tunnel_key_init_net,
861 	.exit_batch = tunnel_key_exit_net,
862 	.id   = &act_tunnel_key_ops.net_id,
863 	.size = sizeof(struct tc_action_net),
864 };
865 
tunnel_key_init_module(void)866 static int __init tunnel_key_init_module(void)
867 {
868 	return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
869 }
870 
tunnel_key_cleanup_module(void)871 static void __exit tunnel_key_cleanup_module(void)
872 {
873 	tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
874 }
875 
876 module_init(tunnel_key_init_module);
877 module_exit(tunnel_key_cleanup_module);
878 
879 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
880 MODULE_DESCRIPTION("ip tunnel manipulation actions");
881 MODULE_LICENSE("GPL v2");
882