xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c (revision 40e79150c1686263e6a031d7702aec63aff31332)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/net_namespace.h>
13 #include <net/tc_act/tc_vlan.h>
14 
15 #include "reg.h"
16 #include "core.h"
17 #include "resources.h"
18 #include "spectrum.h"
19 #include "core_acl_flex_keys.h"
20 #include "core_acl_flex_actions.h"
21 #include "spectrum_acl_tcam.h"
22 
23 struct mlxsw_sp_acl {
24 	struct mlxsw_sp *mlxsw_sp;
25 	struct mlxsw_afk *afk;
26 	struct mlxsw_sp_fid *dummy_fid;
27 	struct rhashtable ruleset_ht;
28 	struct list_head rules;
29 	struct mutex rules_lock; /* Protects rules list */
30 	struct {
31 		struct delayed_work dw;
32 		unsigned long interval;	/* ms */
33 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 	} rule_activity_update;
35 	struct mlxsw_sp_acl_tcam tcam;
36 };
37 
38 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39 {
40 	return acl->afk;
41 }
42 
43 struct mlxsw_sp_acl_ruleset_ht_key {
44 	struct mlxsw_sp_flow_block *block;
45 	u32 chain_index;
46 	const struct mlxsw_sp_acl_profile_ops *ops;
47 };
48 
49 struct mlxsw_sp_acl_ruleset {
50 	struct rhash_head ht_node; /* Member of acl HT */
51 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
52 	struct rhashtable rule_ht;
53 	unsigned int ref_count;
54 	unsigned long priv[];
55 	/* priv has to be always the last item */
56 };
57 
58 struct mlxsw_sp_acl_rule {
59 	struct rhash_head ht_node; /* Member of rule HT */
60 	struct list_head list;
61 	unsigned long cookie; /* HT key */
62 	struct mlxsw_sp_acl_ruleset *ruleset;
63 	struct mlxsw_sp_acl_rule_info *rulei;
64 	u64 last_used;
65 	u64 last_packets;
66 	u64 last_bytes;
67 	unsigned long priv[];
68 	/* priv has to be always the last item */
69 };
70 
71 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
72 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
73 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
74 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
75 	.automatic_shrinking = true,
76 };
77 
78 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
79 	.key_len = sizeof(unsigned long),
80 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
81 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
82 	.automatic_shrinking = true,
83 };
84 
85 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
86 {
87 	return mlxsw_sp->acl->dummy_fid;
88 }
89 
90 static bool
91 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
92 {
93 	/* We hold a reference on ruleset ourselves */
94 	return ruleset->ref_count == 2;
95 }
96 
97 int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
98 			      struct mlxsw_sp_flow_block *block,
99 			      struct mlxsw_sp_flow_block_binding *binding)
100 {
101 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
102 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
103 
104 	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
105 				 binding->mlxsw_sp_port, binding->ingress);
106 }
107 
108 void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
109 				 struct mlxsw_sp_flow_block *block,
110 				 struct mlxsw_sp_flow_block_binding *binding)
111 {
112 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
113 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
114 
115 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
116 			    binding->mlxsw_sp_port, binding->ingress);
117 }
118 
119 static int
120 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
121 				struct mlxsw_sp_acl_ruleset *ruleset,
122 				struct mlxsw_sp_flow_block *block)
123 {
124 	struct mlxsw_sp_flow_block_binding *binding;
125 	int err;
126 
127 	block->ruleset_zero = ruleset;
128 	list_for_each_entry(binding, &block->binding_list, list) {
129 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
130 		if (err)
131 			goto rollback;
132 	}
133 	return 0;
134 
135 rollback:
136 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
137 					     list)
138 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
139 	block->ruleset_zero = NULL;
140 
141 	return err;
142 }
143 
144 static void
145 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
146 				  struct mlxsw_sp_acl_ruleset *ruleset,
147 				  struct mlxsw_sp_flow_block *block)
148 {
149 	struct mlxsw_sp_flow_block_binding *binding;
150 
151 	list_for_each_entry(binding, &block->binding_list, list)
152 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
153 	block->ruleset_zero = NULL;
154 }
155 
156 static struct mlxsw_sp_acl_ruleset *
157 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
158 			    struct mlxsw_sp_flow_block *block, u32 chain_index,
159 			    const struct mlxsw_sp_acl_profile_ops *ops,
160 			    struct mlxsw_afk_element_usage *tmplt_elusage)
161 {
162 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
163 	struct mlxsw_sp_acl_ruleset *ruleset;
164 	size_t alloc_size;
165 	int err;
166 
167 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
168 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
169 	if (!ruleset)
170 		return ERR_PTR(-ENOMEM);
171 	ruleset->ref_count = 1;
172 	ruleset->ht_key.block = block;
173 	ruleset->ht_key.chain_index = chain_index;
174 	ruleset->ht_key.ops = ops;
175 
176 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
177 	if (err)
178 		goto err_rhashtable_init;
179 
180 	err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
181 			       tmplt_elusage);
182 	if (err)
183 		goto err_ops_ruleset_add;
184 
185 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
186 				     mlxsw_sp_acl_ruleset_ht_params);
187 	if (err)
188 		goto err_ht_insert;
189 
190 	return ruleset;
191 
192 err_ht_insert:
193 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
194 err_ops_ruleset_add:
195 	rhashtable_destroy(&ruleset->rule_ht);
196 err_rhashtable_init:
197 	kfree(ruleset);
198 	return ERR_PTR(err);
199 }
200 
201 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
202 					 struct mlxsw_sp_acl_ruleset *ruleset)
203 {
204 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
205 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
206 
207 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
208 			       mlxsw_sp_acl_ruleset_ht_params);
209 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
210 	rhashtable_destroy(&ruleset->rule_ht);
211 	kfree(ruleset);
212 }
213 
214 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
215 {
216 	ruleset->ref_count++;
217 }
218 
219 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
220 					 struct mlxsw_sp_acl_ruleset *ruleset)
221 {
222 	if (--ruleset->ref_count)
223 		return;
224 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
225 }
226 
227 static struct mlxsw_sp_acl_ruleset *
228 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
229 			      struct mlxsw_sp_flow_block *block, u32 chain_index,
230 			      const struct mlxsw_sp_acl_profile_ops *ops)
231 {
232 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
233 
234 	memset(&ht_key, 0, sizeof(ht_key));
235 	ht_key.block = block;
236 	ht_key.chain_index = chain_index;
237 	ht_key.ops = ops;
238 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
239 				      mlxsw_sp_acl_ruleset_ht_params);
240 }
241 
242 struct mlxsw_sp_acl_ruleset *
243 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
244 			    struct mlxsw_sp_flow_block *block, u32 chain_index,
245 			    enum mlxsw_sp_acl_profile profile)
246 {
247 	const struct mlxsw_sp_acl_profile_ops *ops;
248 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
249 	struct mlxsw_sp_acl_ruleset *ruleset;
250 
251 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
252 	if (!ops)
253 		return ERR_PTR(-EINVAL);
254 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
255 	if (!ruleset)
256 		return ERR_PTR(-ENOENT);
257 	return ruleset;
258 }
259 
260 struct mlxsw_sp_acl_ruleset *
261 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
262 			 struct mlxsw_sp_flow_block *block, u32 chain_index,
263 			 enum mlxsw_sp_acl_profile profile,
264 			 struct mlxsw_afk_element_usage *tmplt_elusage)
265 {
266 	const struct mlxsw_sp_acl_profile_ops *ops;
267 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
268 	struct mlxsw_sp_acl_ruleset *ruleset;
269 
270 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
271 	if (!ops)
272 		return ERR_PTR(-EINVAL);
273 
274 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
275 	if (ruleset) {
276 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
277 		return ruleset;
278 	}
279 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
280 					   tmplt_elusage);
281 }
282 
283 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
284 			      struct mlxsw_sp_acl_ruleset *ruleset)
285 {
286 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
287 }
288 
289 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
290 {
291 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
292 
293 	return ops->ruleset_group_id(ruleset->priv);
294 }
295 
296 struct mlxsw_sp_acl_rule_info *
297 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
298 			  struct mlxsw_afa_block *afa_block)
299 {
300 	struct mlxsw_sp_acl_rule_info *rulei;
301 	int err;
302 
303 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
304 	if (!rulei)
305 		return ERR_PTR(-ENOMEM);
306 
307 	if (afa_block) {
308 		rulei->act_block = afa_block;
309 		return rulei;
310 	}
311 
312 	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
313 	if (IS_ERR(rulei->act_block)) {
314 		err = PTR_ERR(rulei->act_block);
315 		goto err_afa_block_create;
316 	}
317 	rulei->action_created = 1;
318 	return rulei;
319 
320 err_afa_block_create:
321 	kfree(rulei);
322 	return ERR_PTR(err);
323 }
324 
325 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
326 {
327 	if (rulei->action_created)
328 		mlxsw_afa_block_destroy(rulei->act_block);
329 	kfree(rulei);
330 }
331 
332 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
333 {
334 	return mlxsw_afa_block_commit(rulei->act_block);
335 }
336 
337 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
338 				 unsigned int priority)
339 {
340 	rulei->priority = priority;
341 }
342 
343 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
344 				    enum mlxsw_afk_element element,
345 				    u32 key_value, u32 mask_value)
346 {
347 	mlxsw_afk_values_add_u32(&rulei->values, element,
348 				 key_value, mask_value);
349 }
350 
351 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
352 				    enum mlxsw_afk_element element,
353 				    const char *key_value,
354 				    const char *mask_value, unsigned int len)
355 {
356 	mlxsw_afk_values_add_buf(&rulei->values, element,
357 				 key_value, mask_value, len);
358 }
359 
360 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
361 {
362 	return mlxsw_afa_block_continue(rulei->act_block);
363 }
364 
365 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
366 				u16 group_id)
367 {
368 	return mlxsw_afa_block_jump(rulei->act_block, group_id);
369 }
370 
371 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
372 {
373 	return mlxsw_afa_block_terminate(rulei->act_block);
374 }
375 
376 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
377 				bool ingress,
378 				const struct flow_action_cookie *fa_cookie,
379 				struct netlink_ext_ack *extack)
380 {
381 	return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
382 					   fa_cookie, extack);
383 }
384 
385 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
386 {
387 	return mlxsw_afa_block_append_trap(rulei->act_block,
388 					   MLXSW_TRAP_ID_ACL0);
389 }
390 
391 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
392 			       struct mlxsw_sp_acl_rule_info *rulei,
393 			       struct net_device *out_dev,
394 			       struct netlink_ext_ack *extack)
395 {
396 	struct mlxsw_sp_port *mlxsw_sp_port;
397 	u8 local_port;
398 	bool in_port;
399 
400 	if (out_dev) {
401 		if (!mlxsw_sp_port_dev_check(out_dev)) {
402 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
403 			return -EINVAL;
404 		}
405 		mlxsw_sp_port = netdev_priv(out_dev);
406 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
407 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
408 			return -EINVAL;
409 		}
410 		local_port = mlxsw_sp_port->local_port;
411 		in_port = false;
412 	} else {
413 		/* If out_dev is NULL, the caller wants to
414 		 * set forward to ingress port.
415 		 */
416 		local_port = 0;
417 		in_port = true;
418 	}
419 	return mlxsw_afa_block_append_fwd(rulei->act_block,
420 					  local_port, in_port, extack);
421 }
422 
423 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
424 				  struct mlxsw_sp_acl_rule_info *rulei,
425 				  struct mlxsw_sp_flow_block *block,
426 				  struct net_device *out_dev,
427 				  struct netlink_ext_ack *extack)
428 {
429 	struct mlxsw_sp_flow_block_binding *binding;
430 	struct mlxsw_sp_port *in_port;
431 
432 	if (!list_is_singular(&block->binding_list)) {
433 		NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
434 		return -EOPNOTSUPP;
435 	}
436 	binding = list_first_entry(&block->binding_list,
437 				   struct mlxsw_sp_flow_block_binding, list);
438 	in_port = binding->mlxsw_sp_port;
439 
440 	return mlxsw_afa_block_append_mirror(rulei->act_block,
441 					     in_port->local_port,
442 					     out_dev,
443 					     binding->ingress,
444 					     extack);
445 }
446 
447 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
448 				struct mlxsw_sp_acl_rule_info *rulei,
449 				u32 action, u16 vid, u16 proto, u8 prio,
450 				struct netlink_ext_ack *extack)
451 {
452 	u8 ethertype;
453 
454 	if (action == FLOW_ACTION_VLAN_MANGLE) {
455 		switch (proto) {
456 		case ETH_P_8021Q:
457 			ethertype = 0;
458 			break;
459 		case ETH_P_8021AD:
460 			ethertype = 1;
461 			break;
462 		default:
463 			NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
464 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
465 				proto);
466 			return -EINVAL;
467 		}
468 
469 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
470 							  vid, prio, ethertype,
471 							  extack);
472 	} else {
473 		NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
474 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
475 		return -EINVAL;
476 	}
477 }
478 
479 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
480 				    struct mlxsw_sp_acl_rule_info *rulei,
481 				    u32 prio, struct netlink_ext_ack *extack)
482 {
483 	/* Even though both Linux and Spectrum switches support 16 priorities,
484 	 * spectrum_qdisc only processes the first eight priomap elements, and
485 	 * the DCB and PFC features are tied to 8 priorities as well. Therefore
486 	 * bounce attempts to prioritize packets to higher priorities.
487 	 */
488 	if (prio >= IEEE_8021QAZ_MAX_TCS) {
489 		NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
490 		return -EINVAL;
491 	}
492 	return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
493 						      extack);
494 }
495 
496 enum mlxsw_sp_acl_mangle_field {
497 	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
498 	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
499 	MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
500 };
501 
502 struct mlxsw_sp_acl_mangle_action {
503 	enum flow_action_mangle_base htype;
504 	/* Offset is u32-aligned. */
505 	u32 offset;
506 	/* Mask bits are unset for the modified field. */
507 	u32 mask;
508 	/* Shift required to extract the set value. */
509 	u32 shift;
510 	enum mlxsw_sp_acl_mangle_field field;
511 };
512 
513 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
514 	{								\
515 		.htype = _htype,					\
516 		.offset = _offset,					\
517 		.mask = _mask,						\
518 		.shift = _shift,					\
519 		.field = MLXSW_SP_ACL_MANGLE_FIELD_##_field,		\
520 	}
521 
522 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
523 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4,       \
524 				   _offset, _mask, _shift, _field)
525 
526 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
527 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6,       \
528 				   _offset, _mask, _shift, _field)
529 
530 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
531 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
532 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
533 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
534 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
535 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
536 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
537 };
538 
539 static int
540 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
541 				    struct mlxsw_sp_acl_rule_info *rulei,
542 				    struct mlxsw_sp_acl_mangle_action *mact,
543 				    u32 val, struct netlink_ext_ack *extack)
544 {
545 	switch (mact->field) {
546 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
547 		return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
548 							  val, extack);
549 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
550 		return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
551 						       val, extack);
552 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
553 		return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
554 						      val, extack);
555 	}
556 
557 	/* We shouldn't have gotten a match in the first place! */
558 	WARN_ONCE(1, "Unhandled mangle field");
559 	return -EINVAL;
560 }
561 
562 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
563 				  struct mlxsw_sp_acl_rule_info *rulei,
564 				  enum flow_action_mangle_base htype,
565 				  u32 offset, u32 mask, u32 val,
566 				  struct netlink_ext_ack *extack)
567 {
568 	struct mlxsw_sp_acl_mangle_action *mact;
569 	size_t i;
570 
571 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
572 		mact = &mlxsw_sp_acl_mangle_actions[i];
573 		if (mact->htype == htype &&
574 		    mact->offset == offset &&
575 		    mact->mask == mask) {
576 			val >>= mact->shift;
577 			return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp,
578 								   rulei, mact,
579 								   val, extack);
580 		}
581 	}
582 
583 	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
584 	return -EINVAL;
585 }
586 
587 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
588 				 struct mlxsw_sp_acl_rule_info *rulei,
589 				 struct netlink_ext_ack *extack)
590 {
591 	int err;
592 
593 	err = mlxsw_afa_block_append_counter(rulei->act_block,
594 					     &rulei->counter_index, extack);
595 	if (err)
596 		return err;
597 	rulei->counter_valid = true;
598 	return 0;
599 }
600 
601 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
602 				   struct mlxsw_sp_acl_rule_info *rulei,
603 				   u16 fid, struct netlink_ext_ack *extack)
604 {
605 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
606 }
607 
608 struct mlxsw_sp_acl_rule *
609 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
610 			 struct mlxsw_sp_acl_ruleset *ruleset,
611 			 unsigned long cookie,
612 			 struct mlxsw_afa_block *afa_block,
613 			 struct netlink_ext_ack *extack)
614 {
615 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
616 	struct mlxsw_sp_acl_rule *rule;
617 	int err;
618 
619 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
620 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
621 		       GFP_KERNEL);
622 	if (!rule) {
623 		err = -ENOMEM;
624 		goto err_alloc;
625 	}
626 	rule->cookie = cookie;
627 	rule->ruleset = ruleset;
628 
629 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
630 	if (IS_ERR(rule->rulei)) {
631 		err = PTR_ERR(rule->rulei);
632 		goto err_rulei_create;
633 	}
634 
635 	return rule;
636 
637 err_rulei_create:
638 	kfree(rule);
639 err_alloc:
640 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
641 	return ERR_PTR(err);
642 }
643 
644 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
645 			       struct mlxsw_sp_acl_rule *rule)
646 {
647 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
648 
649 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
650 	kfree(rule);
651 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
652 }
653 
654 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
655 			  struct mlxsw_sp_acl_rule *rule)
656 {
657 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
658 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
659 	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
660 	int err;
661 
662 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
663 	if (err)
664 		return err;
665 
666 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
667 				     mlxsw_sp_acl_rule_ht_params);
668 	if (err)
669 		goto err_rhashtable_insert;
670 
671 	if (!ruleset->ht_key.chain_index &&
672 	    mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
673 		/* We only need ruleset with chain index 0, the implicit
674 		 * one, to be directly bound to device. The rest of the
675 		 * rulesets are bound by "Goto action set".
676 		 */
677 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
678 		if (err)
679 			goto err_ruleset_block_bind;
680 	}
681 
682 	mutex_lock(&mlxsw_sp->acl->rules_lock);
683 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
684 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
685 	block->rule_count++;
686 	block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
687 	block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
688 	return 0;
689 
690 err_ruleset_block_bind:
691 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
692 			       mlxsw_sp_acl_rule_ht_params);
693 err_rhashtable_insert:
694 	ops->rule_del(mlxsw_sp, rule->priv);
695 	return err;
696 }
697 
698 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
699 			   struct mlxsw_sp_acl_rule *rule)
700 {
701 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
702 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
703 	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
704 
705 	block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
706 	block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
707 	block->rule_count--;
708 	mutex_lock(&mlxsw_sp->acl->rules_lock);
709 	list_del(&rule->list);
710 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
711 	if (!ruleset->ht_key.chain_index &&
712 	    mlxsw_sp_acl_ruleset_is_singular(ruleset))
713 		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
714 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
715 			       mlxsw_sp_acl_rule_ht_params);
716 	ops->rule_del(mlxsw_sp, rule->priv);
717 }
718 
719 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
720 				     struct mlxsw_sp_acl_rule *rule,
721 				     struct mlxsw_afa_block *afa_block)
722 {
723 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
724 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
725 	struct mlxsw_sp_acl_rule_info *rulei;
726 
727 	rulei = mlxsw_sp_acl_rule_rulei(rule);
728 	rulei->act_block = afa_block;
729 
730 	return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
731 }
732 
733 struct mlxsw_sp_acl_rule *
734 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
735 			 struct mlxsw_sp_acl_ruleset *ruleset,
736 			 unsigned long cookie)
737 {
738 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
739 				       mlxsw_sp_acl_rule_ht_params);
740 }
741 
742 struct mlxsw_sp_acl_rule_info *
743 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
744 {
745 	return rule->rulei;
746 }
747 
748 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
749 					     struct mlxsw_sp_acl_rule *rule)
750 {
751 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
752 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
753 	bool active;
754 	int err;
755 
756 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
757 	if (err)
758 		return err;
759 	if (active)
760 		rule->last_used = jiffies;
761 	return 0;
762 }
763 
764 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
765 {
766 	struct mlxsw_sp_acl_rule *rule;
767 	int err;
768 
769 	mutex_lock(&acl->rules_lock);
770 	list_for_each_entry(rule, &acl->rules, list) {
771 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
772 							rule);
773 		if (err)
774 			goto err_rule_update;
775 	}
776 	mutex_unlock(&acl->rules_lock);
777 	return 0;
778 
779 err_rule_update:
780 	mutex_unlock(&acl->rules_lock);
781 	return err;
782 }
783 
784 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
785 {
786 	unsigned long interval = acl->rule_activity_update.interval;
787 
788 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
789 			       msecs_to_jiffies(interval));
790 }
791 
792 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
793 {
794 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
795 						rule_activity_update.dw.work);
796 	int err;
797 
798 	err = mlxsw_sp_acl_rules_activity_update(acl);
799 	if (err)
800 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
801 
802 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
803 }
804 
805 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
806 				struct mlxsw_sp_acl_rule *rule,
807 				u64 *packets, u64 *bytes, u64 *last_use,
808 				enum flow_action_hw_stats *used_hw_stats)
809 
810 {
811 	struct mlxsw_sp_acl_rule_info *rulei;
812 	u64 current_packets = 0;
813 	u64 current_bytes = 0;
814 	int err;
815 
816 	rulei = mlxsw_sp_acl_rule_rulei(rule);
817 	if (rulei->counter_valid) {
818 		err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
819 						&current_packets,
820 						&current_bytes);
821 		if (err)
822 			return err;
823 		*used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
824 	}
825 	*packets = current_packets - rule->last_packets;
826 	*bytes = current_bytes - rule->last_bytes;
827 	*last_use = rule->last_used;
828 
829 	rule->last_bytes = current_bytes;
830 	rule->last_packets = current_packets;
831 
832 	return 0;
833 }
834 
835 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
836 {
837 	struct mlxsw_sp_fid *fid;
838 	struct mlxsw_sp_acl *acl;
839 	size_t alloc_size;
840 	int err;
841 
842 	alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
843 	acl = kzalloc(alloc_size, GFP_KERNEL);
844 	if (!acl)
845 		return -ENOMEM;
846 	mlxsw_sp->acl = acl;
847 	acl->mlxsw_sp = mlxsw_sp;
848 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
849 						       ACL_FLEX_KEYS),
850 				    mlxsw_sp->afk_ops);
851 	if (!acl->afk) {
852 		err = -ENOMEM;
853 		goto err_afk_create;
854 	}
855 
856 	err = rhashtable_init(&acl->ruleset_ht,
857 			      &mlxsw_sp_acl_ruleset_ht_params);
858 	if (err)
859 		goto err_rhashtable_init;
860 
861 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
862 	if (IS_ERR(fid)) {
863 		err = PTR_ERR(fid);
864 		goto err_fid_get;
865 	}
866 	acl->dummy_fid = fid;
867 
868 	INIT_LIST_HEAD(&acl->rules);
869 	mutex_init(&acl->rules_lock);
870 	err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
871 	if (err)
872 		goto err_acl_ops_init;
873 
874 	/* Create the delayed work for the rule activity_update */
875 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
876 			  mlxsw_sp_acl_rule_activity_update_work);
877 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
878 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
879 	return 0;
880 
881 err_acl_ops_init:
882 	mutex_destroy(&acl->rules_lock);
883 	mlxsw_sp_fid_put(fid);
884 err_fid_get:
885 	rhashtable_destroy(&acl->ruleset_ht);
886 err_rhashtable_init:
887 	mlxsw_afk_destroy(acl->afk);
888 err_afk_create:
889 	kfree(acl);
890 	return err;
891 }
892 
893 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
894 {
895 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
896 
897 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
898 	mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
899 	mutex_destroy(&acl->rules_lock);
900 	WARN_ON(!list_empty(&acl->rules));
901 	mlxsw_sp_fid_put(acl->dummy_fid);
902 	rhashtable_destroy(&acl->ruleset_ht);
903 	mlxsw_afk_destroy(acl->afk);
904 	kfree(acl);
905 }
906 
907 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
908 {
909 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
910 
911 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
912 							   &acl->tcam);
913 }
914 
915 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
916 {
917 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
918 
919 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
920 							   &acl->tcam, val);
921 }
922