xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 
43 #include "reg.h"
44 #include "core.h"
45 #include "resources.h"
46 #include "spectrum.h"
47 #include "core_acl_flex_keys.h"
48 #include "core_acl_flex_actions.h"
49 #include "spectrum_acl_flex_keys.h"
50 
51 struct mlxsw_sp_acl {
52 	struct mlxsw_afk *afk;
53 	struct mlxsw_afa *afa;
54 	const struct mlxsw_sp_acl_ops *ops;
55 	struct rhashtable ruleset_ht;
56 	unsigned long priv[0];
57 	/* priv has to be always the last item */
58 };
59 
60 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
61 {
62 	return acl->afk;
63 }
64 
65 struct mlxsw_sp_acl_ruleset_ht_key {
66 	struct net_device *dev; /* dev this ruleset is bound to */
67 	bool ingress;
68 	const struct mlxsw_sp_acl_profile_ops *ops;
69 };
70 
71 struct mlxsw_sp_acl_ruleset {
72 	struct rhash_head ht_node; /* Member of acl HT */
73 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
74 	struct rhashtable rule_ht;
75 	unsigned int ref_count;
76 	unsigned long priv[0];
77 	/* priv has to be always the last item */
78 };
79 
80 struct mlxsw_sp_acl_rule {
81 	struct rhash_head ht_node; /* Member of rule HT */
82 	unsigned long cookie; /* HT key */
83 	struct mlxsw_sp_acl_ruleset *ruleset;
84 	struct mlxsw_sp_acl_rule_info *rulei;
85 	unsigned long priv[0];
86 	/* priv has to be always the last item */
87 };
88 
89 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
90 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
91 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
92 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
93 	.automatic_shrinking = true,
94 };
95 
96 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
97 	.key_len = sizeof(unsigned long),
98 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
99 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
100 	.automatic_shrinking = true,
101 };
102 
103 static struct mlxsw_sp_acl_ruleset *
104 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
105 			    const struct mlxsw_sp_acl_profile_ops *ops)
106 {
107 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
108 	struct mlxsw_sp_acl_ruleset *ruleset;
109 	size_t alloc_size;
110 	int err;
111 
112 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
113 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
114 	if (!ruleset)
115 		return ERR_PTR(-ENOMEM);
116 	ruleset->ref_count = 1;
117 	ruleset->ht_key.ops = ops;
118 
119 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
120 	if (err)
121 		goto err_rhashtable_init;
122 
123 	err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
124 	if (err)
125 		goto err_ops_ruleset_add;
126 
127 	return ruleset;
128 
129 err_ops_ruleset_add:
130 	rhashtable_destroy(&ruleset->rule_ht);
131 err_rhashtable_init:
132 	kfree(ruleset);
133 	return ERR_PTR(err);
134 }
135 
136 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
137 					 struct mlxsw_sp_acl_ruleset *ruleset)
138 {
139 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
140 
141 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
142 	rhashtable_destroy(&ruleset->rule_ht);
143 	kfree(ruleset);
144 }
145 
146 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
147 				     struct mlxsw_sp_acl_ruleset *ruleset,
148 				     struct net_device *dev, bool ingress)
149 {
150 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
151 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
152 	int err;
153 
154 	ruleset->ht_key.dev = dev;
155 	ruleset->ht_key.ingress = ingress;
156 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
157 				     mlxsw_sp_acl_ruleset_ht_params);
158 	if (err)
159 		return err;
160 	err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
161 	if (err)
162 		goto err_ops_ruleset_bind;
163 	return 0;
164 
165 err_ops_ruleset_bind:
166 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
167 			       mlxsw_sp_acl_ruleset_ht_params);
168 	return err;
169 }
170 
171 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
172 					struct mlxsw_sp_acl_ruleset *ruleset)
173 {
174 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
175 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
176 
177 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
178 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
179 			       mlxsw_sp_acl_ruleset_ht_params);
180 }
181 
182 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
183 {
184 	ruleset->ref_count++;
185 }
186 
187 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
188 					 struct mlxsw_sp_acl_ruleset *ruleset)
189 {
190 	if (--ruleset->ref_count)
191 		return;
192 	mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
193 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
194 }
195 
196 struct mlxsw_sp_acl_ruleset *
197 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
198 			 struct net_device *dev, bool ingress,
199 			 enum mlxsw_sp_acl_profile profile)
200 {
201 	const struct mlxsw_sp_acl_profile_ops *ops;
202 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
203 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
204 	struct mlxsw_sp_acl_ruleset *ruleset;
205 	int err;
206 
207 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
208 	if (!ops)
209 		return ERR_PTR(-EINVAL);
210 
211 	memset(&ht_key, 0, sizeof(ht_key));
212 	ht_key.dev = dev;
213 	ht_key.ingress = ingress;
214 	ht_key.ops = ops;
215 	ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
216 					 mlxsw_sp_acl_ruleset_ht_params);
217 	if (ruleset) {
218 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
219 		return ruleset;
220 	}
221 	ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
222 	if (IS_ERR(ruleset))
223 		return ruleset;
224 	err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
225 	if (err)
226 		goto err_ruleset_bind;
227 	return ruleset;
228 
229 err_ruleset_bind:
230 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
231 	return ERR_PTR(err);
232 }
233 
234 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
235 			      struct mlxsw_sp_acl_ruleset *ruleset)
236 {
237 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
238 }
239 
240 struct mlxsw_sp_acl_rule_info *
241 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
242 {
243 	struct mlxsw_sp_acl_rule_info *rulei;
244 	int err;
245 
246 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
247 	if (!rulei)
248 		return NULL;
249 	rulei->act_block = mlxsw_afa_block_create(acl->afa);
250 	if (IS_ERR(rulei->act_block)) {
251 		err = PTR_ERR(rulei->act_block);
252 		goto err_afa_block_create;
253 	}
254 	return rulei;
255 
256 err_afa_block_create:
257 	kfree(rulei);
258 	return ERR_PTR(err);
259 }
260 
261 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
262 {
263 	mlxsw_afa_block_destroy(rulei->act_block);
264 	kfree(rulei);
265 }
266 
267 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
268 {
269 	return mlxsw_afa_block_commit(rulei->act_block);
270 }
271 
272 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
273 				 unsigned int priority)
274 {
275 	rulei->priority = priority;
276 }
277 
278 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
279 				    enum mlxsw_afk_element element,
280 				    u32 key_value, u32 mask_value)
281 {
282 	mlxsw_afk_values_add_u32(&rulei->values, element,
283 				 key_value, mask_value);
284 }
285 
286 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
287 				    enum mlxsw_afk_element element,
288 				    const char *key_value,
289 				    const char *mask_value, unsigned int len)
290 {
291 	mlxsw_afk_values_add_buf(&rulei->values, element,
292 				 key_value, mask_value, len);
293 }
294 
295 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
296 {
297 	mlxsw_afa_block_continue(rulei->act_block);
298 }
299 
300 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
301 				 u16 group_id)
302 {
303 	mlxsw_afa_block_jump(rulei->act_block, group_id);
304 }
305 
306 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
307 {
308 	return mlxsw_afa_block_append_drop(rulei->act_block);
309 }
310 
311 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
312 			       struct mlxsw_sp_acl_rule_info *rulei,
313 			       struct net_device *out_dev)
314 {
315 	struct mlxsw_sp_port *mlxsw_sp_port;
316 	u8 local_port;
317 	bool in_port;
318 
319 	if (out_dev) {
320 		if (!mlxsw_sp_port_dev_check(out_dev))
321 			return -EINVAL;
322 		mlxsw_sp_port = netdev_priv(out_dev);
323 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
324 			return -EINVAL;
325 		local_port = mlxsw_sp_port->local_port;
326 		in_port = false;
327 	} else {
328 		/* If out_dev is NULL, the called wants to
329 		 * set forward to ingress port.
330 		 */
331 		local_port = 0;
332 		in_port = true;
333 	}
334 	return mlxsw_afa_block_append_fwd(rulei->act_block,
335 					  local_port, in_port);
336 }
337 
338 struct mlxsw_sp_acl_rule *
339 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
340 			 struct mlxsw_sp_acl_ruleset *ruleset,
341 			 unsigned long cookie)
342 {
343 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
344 	struct mlxsw_sp_acl_rule *rule;
345 	int err;
346 
347 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
348 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
349 	if (!rule) {
350 		err = -ENOMEM;
351 		goto err_alloc;
352 	}
353 	rule->cookie = cookie;
354 	rule->ruleset = ruleset;
355 
356 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
357 	if (IS_ERR(rule->rulei)) {
358 		err = PTR_ERR(rule->rulei);
359 		goto err_rulei_create;
360 	}
361 	return rule;
362 
363 err_rulei_create:
364 	kfree(rule);
365 err_alloc:
366 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
367 	return ERR_PTR(err);
368 }
369 
370 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
371 			       struct mlxsw_sp_acl_rule *rule)
372 {
373 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
374 
375 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
376 	kfree(rule);
377 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
378 }
379 
380 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
381 			  struct mlxsw_sp_acl_rule *rule)
382 {
383 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
384 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
385 	int err;
386 
387 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
388 	if (err)
389 		return err;
390 
391 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
392 				     mlxsw_sp_acl_rule_ht_params);
393 	if (err)
394 		goto err_rhashtable_insert;
395 
396 	return 0;
397 
398 err_rhashtable_insert:
399 	ops->rule_del(mlxsw_sp, rule->priv);
400 	return err;
401 }
402 
403 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
404 			   struct mlxsw_sp_acl_rule *rule)
405 {
406 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
407 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
408 
409 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
410 			       mlxsw_sp_acl_rule_ht_params);
411 	ops->rule_del(mlxsw_sp, rule->priv);
412 }
413 
414 struct mlxsw_sp_acl_rule *
415 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
416 			 struct mlxsw_sp_acl_ruleset *ruleset,
417 			 unsigned long cookie)
418 {
419 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
420 				       mlxsw_sp_acl_rule_ht_params);
421 }
422 
423 struct mlxsw_sp_acl_rule_info *
424 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
425 {
426 	return rule->rulei;
427 }
428 
429 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
430 
431 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
432 				     char *enc_actions, bool is_first)
433 {
434 	struct mlxsw_sp *mlxsw_sp = priv;
435 	char pefa_pl[MLXSW_REG_PEFA_LEN];
436 	u32 kvdl_index;
437 	int ret;
438 	int err;
439 
440 	/* The first action set of a TCAM entry is stored directly in TCAM,
441 	 * not KVD linear area.
442 	 */
443 	if (is_first)
444 		return 0;
445 
446 	ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE);
447 	if (ret < 0)
448 		return ret;
449 	kvdl_index = ret;
450 	mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
451 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
452 	if (err)
453 		goto err_pefa_write;
454 	*p_kvdl_index = kvdl_index;
455 	return 0;
456 
457 err_pefa_write:
458 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
459 	return err;
460 }
461 
462 static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
463 				      bool is_first)
464 {
465 	struct mlxsw_sp *mlxsw_sp = priv;
466 
467 	if (is_first)
468 		return;
469 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
470 }
471 
472 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
473 					   u8 local_port)
474 {
475 	struct mlxsw_sp *mlxsw_sp = priv;
476 	char ppbs_pl[MLXSW_REG_PPBS_LEN];
477 	u32 kvdl_index;
478 	int ret;
479 	int err;
480 
481 	ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1);
482 	if (ret < 0)
483 		return ret;
484 	kvdl_index = ret;
485 	mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
486 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
487 	if (err)
488 		goto err_ppbs_write;
489 	*p_kvdl_index = kvdl_index;
490 	return 0;
491 
492 err_ppbs_write:
493 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
494 	return err;
495 }
496 
497 static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
498 {
499 	struct mlxsw_sp *mlxsw_sp = priv;
500 
501 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
502 }
503 
504 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
505 	.kvdl_set_add		= mlxsw_sp_act_kvdl_set_add,
506 	.kvdl_set_del		= mlxsw_sp_act_kvdl_set_del,
507 	.kvdl_fwd_entry_add	= mlxsw_sp_act_kvdl_fwd_entry_add,
508 	.kvdl_fwd_entry_del	= mlxsw_sp_act_kvdl_fwd_entry_del,
509 };
510 
511 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
512 {
513 	const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
514 	struct mlxsw_sp_acl *acl;
515 	int err;
516 
517 	acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
518 	if (!acl)
519 		return -ENOMEM;
520 	mlxsw_sp->acl = acl;
521 
522 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
523 						       ACL_FLEX_KEYS),
524 				    mlxsw_sp_afk_blocks,
525 				    MLXSW_SP_AFK_BLOCKS_COUNT);
526 	if (!acl->afk) {
527 		err = -ENOMEM;
528 		goto err_afk_create;
529 	}
530 
531 	acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
532 						       ACL_ACTIONS_PER_SET),
533 				    &mlxsw_sp_act_afa_ops, mlxsw_sp);
534 	if (IS_ERR(acl->afa)) {
535 		err = PTR_ERR(acl->afa);
536 		goto err_afa_create;
537 	}
538 
539 	err = rhashtable_init(&acl->ruleset_ht,
540 			      &mlxsw_sp_acl_ruleset_ht_params);
541 	if (err)
542 		goto err_rhashtable_init;
543 
544 	err = acl_ops->init(mlxsw_sp, acl->priv);
545 	if (err)
546 		goto err_acl_ops_init;
547 
548 	acl->ops = acl_ops;
549 	return 0;
550 
551 err_acl_ops_init:
552 	rhashtable_destroy(&acl->ruleset_ht);
553 err_rhashtable_init:
554 	mlxsw_afa_destroy(acl->afa);
555 err_afa_create:
556 	mlxsw_afk_destroy(acl->afk);
557 err_afk_create:
558 	kfree(acl);
559 	return err;
560 }
561 
562 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
563 {
564 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
565 	const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
566 
567 	acl_ops->fini(mlxsw_sp, acl->priv);
568 	rhashtable_destroy(&acl->ruleset_ht);
569 	mlxsw_afa_destroy(acl->afa);
570 	mlxsw_afk_destroy(acl->afk);
571 	kfree(acl);
572 }
573