xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c (revision a5d9265e017f081f0dc133c0e2f45103d027b874)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <trace/events/mlxsw.h>
12 
13 #include "reg.h"
14 #include "core.h"
15 #include "resources.h"
16 #include "spectrum.h"
17 #include "spectrum_acl_tcam.h"
18 #include "core_acl_flex_keys.h"
19 
20 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
21 {
22 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
23 
24 	return ops->priv_size;
25 }
26 
27 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
29 
30 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
31 			   struct mlxsw_sp_acl_tcam *tcam)
32 {
33 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
34 	u64 max_tcam_regions;
35 	u64 max_regions;
36 	u64 max_groups;
37 	size_t alloc_size;
38 	int err;
39 
40 	tcam->vregion_rehash_intrvl =
41 			MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
42 	INIT_LIST_HEAD(&tcam->vregion_list);
43 
44 	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
45 					      ACL_MAX_TCAM_REGIONS);
46 	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
47 
48 	/* Use 1:1 mapping between ACL region and TCAM region */
49 	if (max_tcam_regions < max_regions)
50 		max_regions = max_tcam_regions;
51 
52 	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
53 	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
54 	if (!tcam->used_regions)
55 		return -ENOMEM;
56 	tcam->max_regions = max_regions;
57 
58 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
59 	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
60 	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
61 	if (!tcam->used_groups) {
62 		err = -ENOMEM;
63 		goto err_alloc_used_groups;
64 	}
65 	tcam->max_groups = max_groups;
66 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
67 						 ACL_MAX_GROUP_SIZE);
68 
69 	err = ops->init(mlxsw_sp, tcam->priv, tcam);
70 	if (err)
71 		goto err_tcam_init;
72 
73 	return 0;
74 
75 err_tcam_init:
76 	kfree(tcam->used_groups);
77 err_alloc_used_groups:
78 	kfree(tcam->used_regions);
79 	return err;
80 }
81 
82 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
83 			    struct mlxsw_sp_acl_tcam *tcam)
84 {
85 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
86 
87 	ops->fini(mlxsw_sp, tcam->priv);
88 	kfree(tcam->used_groups);
89 	kfree(tcam->used_regions);
90 }
91 
92 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
93 				   struct mlxsw_sp_acl_rule_info *rulei,
94 				   u32 *priority, bool fillup_priority)
95 {
96 	u64 max_priority;
97 
98 	if (!fillup_priority) {
99 		*priority = 0;
100 		return 0;
101 	}
102 
103 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
104 		return -EIO;
105 
106 	/* Priority range is 1..cap_kvd_size-1. */
107 	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
108 	if (rulei->priority >= max_priority)
109 		return -EINVAL;
110 
111 	/* Unlike in TC, in HW, higher number means higher priority. */
112 	*priority = max_priority - rulei->priority;
113 	return 0;
114 }
115 
116 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
117 					   u16 *p_id)
118 {
119 	u16 id;
120 
121 	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
122 	if (id < tcam->max_regions) {
123 		__set_bit(id, tcam->used_regions);
124 		*p_id = id;
125 		return 0;
126 	}
127 	return -ENOBUFS;
128 }
129 
130 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
131 					    u16 id)
132 {
133 	__clear_bit(id, tcam->used_regions);
134 }
135 
136 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
137 					  u16 *p_id)
138 {
139 	u16 id;
140 
141 	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
142 	if (id < tcam->max_groups) {
143 		__set_bit(id, tcam->used_groups);
144 		*p_id = id;
145 		return 0;
146 	}
147 	return -ENOBUFS;
148 }
149 
150 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
151 					   u16 id)
152 {
153 	__clear_bit(id, tcam->used_groups);
154 }
155 
156 struct mlxsw_sp_acl_tcam_pattern {
157 	const enum mlxsw_afk_element *elements;
158 	unsigned int elements_count;
159 };
160 
161 struct mlxsw_sp_acl_tcam_group {
162 	struct mlxsw_sp_acl_tcam *tcam;
163 	u16 id;
164 	struct list_head vregion_list;
165 	unsigned int region_count;
166 	struct rhashtable vchunk_ht;
167 	struct mlxsw_sp_acl_tcam_group_ops *ops;
168 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
169 	unsigned int patterns_count;
170 	bool tmplt_elusage_set;
171 	struct mlxsw_afk_element_usage tmplt_elusage;
172 };
173 
174 struct mlxsw_sp_acl_tcam_vregion {
175 	struct mlxsw_sp_acl_tcam_region *region;
176 	struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
177 	struct list_head list; /* Member of a TCAM group */
178 	struct list_head tlist; /* Member of a TCAM */
179 	struct list_head vchunk_list; /* List of vchunks under this vregion */
180 	struct mlxsw_sp_acl_tcam_group *group;
181 	struct mlxsw_afk_key_info *key_info;
182 	struct mlxsw_sp_acl_tcam *tcam;
183 	struct delayed_work rehash_dw;
184 	struct mlxsw_sp *mlxsw_sp;
185 	bool failed_rollback; /* Indicates failed rollback during migration */
186 };
187 
188 struct mlxsw_sp_acl_tcam_vchunk;
189 
190 struct mlxsw_sp_acl_tcam_chunk {
191 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
192 	struct mlxsw_sp_acl_tcam_region *region;
193 	unsigned long priv[0];
194 	/* priv has to be always the last item */
195 };
196 
197 struct mlxsw_sp_acl_tcam_vchunk {
198 	struct mlxsw_sp_acl_tcam_chunk *chunk;
199 	struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
200 	struct list_head list; /* Member of a TCAM vregion */
201 	struct rhash_head ht_node; /* Member of a chunk HT */
202 	struct list_head ventry_list;
203 	unsigned int priority; /* Priority within the vregion and group */
204 	struct mlxsw_sp_acl_tcam_group *group;
205 	struct mlxsw_sp_acl_tcam_vregion *vregion;
206 	unsigned int ref_count;
207 };
208 
209 struct mlxsw_sp_acl_tcam_entry {
210 	struct mlxsw_sp_acl_tcam_ventry *ventry;
211 	struct mlxsw_sp_acl_tcam_chunk *chunk;
212 	unsigned long priv[0];
213 	/* priv has to be always the last item */
214 };
215 
216 struct mlxsw_sp_acl_tcam_ventry {
217 	struct mlxsw_sp_acl_tcam_entry *entry;
218 	struct list_head list; /* Member of a TCAM vchunk */
219 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
220 	struct mlxsw_sp_acl_rule_info *rulei;
221 };
222 
223 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
224 	.key_len = sizeof(unsigned int),
225 	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
226 	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
227 	.automatic_shrinking = true,
228 };
229 
230 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
231 					  struct mlxsw_sp_acl_tcam_group *group)
232 {
233 	struct mlxsw_sp_acl_tcam_vregion *vregion;
234 	char pagt_pl[MLXSW_REG_PAGT_LEN];
235 	int acl_index = 0;
236 
237 	mlxsw_reg_pagt_pack(pagt_pl, group->id);
238 	list_for_each_entry(vregion, &group->vregion_list, list) {
239 		if (vregion->region2)
240 			mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
241 						   vregion->region2->id, true);
242 		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
243 					   vregion->region->id, false);
244 	}
245 	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
246 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
247 }
248 
249 static int
250 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
251 			    struct mlxsw_sp_acl_tcam *tcam,
252 			    struct mlxsw_sp_acl_tcam_group *group,
253 			    const struct mlxsw_sp_acl_tcam_pattern *patterns,
254 			    unsigned int patterns_count,
255 			    struct mlxsw_afk_element_usage *tmplt_elusage)
256 {
257 	int err;
258 
259 	group->tcam = tcam;
260 	group->patterns = patterns;
261 	group->patterns_count = patterns_count;
262 	if (tmplt_elusage) {
263 		group->tmplt_elusage_set = true;
264 		memcpy(&group->tmplt_elusage, tmplt_elusage,
265 		       sizeof(group->tmplt_elusage));
266 	}
267 	INIT_LIST_HEAD(&group->vregion_list);
268 	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
269 	if (err)
270 		return err;
271 
272 	err = rhashtable_init(&group->vchunk_ht,
273 			      &mlxsw_sp_acl_tcam_vchunk_ht_params);
274 	if (err)
275 		goto err_rhashtable_init;
276 
277 	return 0;
278 
279 err_rhashtable_init:
280 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
281 	return err;
282 }
283 
284 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
285 					struct mlxsw_sp_acl_tcam_group *group)
286 {
287 	struct mlxsw_sp_acl_tcam *tcam = group->tcam;
288 
289 	rhashtable_destroy(&group->vchunk_ht);
290 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
291 	WARN_ON(!list_empty(&group->vregion_list));
292 }
293 
294 static int
295 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
296 			     struct mlxsw_sp_acl_tcam_group *group,
297 			     struct mlxsw_sp_port *mlxsw_sp_port,
298 			     bool ingress)
299 {
300 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
301 
302 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
303 					       MLXSW_REG_PXBT_E_EACL,
304 			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
305 			    group->id);
306 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
307 }
308 
309 static void
310 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
311 			       struct mlxsw_sp_acl_tcam_group *group,
312 			       struct mlxsw_sp_port *mlxsw_sp_port,
313 			       bool ingress)
314 {
315 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
316 
317 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
318 					       MLXSW_REG_PXBT_E_EACL,
319 			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
320 			    group->id);
321 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
322 }
323 
324 static u16
325 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
326 {
327 	return group->id;
328 }
329 
330 static unsigned int
331 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
332 {
333 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
334 
335 	if (list_empty(&vregion->vchunk_list))
336 		return 0;
337 	/* As a priority of a vregion, return priority of the first vchunk */
338 	vchunk = list_first_entry(&vregion->vchunk_list,
339 				  typeof(*vchunk), list);
340 	return vchunk->priority;
341 }
342 
343 static unsigned int
344 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
345 {
346 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
347 
348 	if (list_empty(&vregion->vchunk_list))
349 		return 0;
350 	vchunk = list_last_entry(&vregion->vchunk_list,
351 				 typeof(*vchunk), list);
352 	return vchunk->priority;
353 }
354 
355 static int
356 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
357 				      struct mlxsw_sp_acl_tcam_region *region)
358 {
359 	struct mlxsw_sp_acl_tcam_group *group = region->vregion->group;
360 	int err;
361 
362 	if (group->region_count == group->tcam->max_group_size)
363 		return -ENOBUFS;
364 
365 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
366 	if (err)
367 		return err;
368 
369 	group->region_count++;
370 	return 0;
371 }
372 
373 static void
374 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
375 				      struct mlxsw_sp_acl_tcam_region *region)
376 {
377 	struct mlxsw_sp_acl_tcam_group *group = region->vregion->group;
378 
379 	group->region_count--;
380 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
381 }
382 
383 static int
384 mlxsw_sp_acl_tcam_group_vregion_attach(struct mlxsw_sp *mlxsw_sp,
385 				       struct mlxsw_sp_acl_tcam_group *group,
386 				       struct mlxsw_sp_acl_tcam_vregion *vregion)
387 {
388 	struct mlxsw_sp_acl_tcam_vregion *vregion2;
389 	struct list_head *pos;
390 	int err;
391 
392 	/* Position the vregion inside the list according to priority */
393 	list_for_each(pos, &group->vregion_list) {
394 		vregion2 = list_entry(pos, typeof(*vregion2), list);
395 		if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) >
396 		    mlxsw_sp_acl_tcam_vregion_prio(vregion))
397 			break;
398 	}
399 	list_add_tail(&vregion->list, pos);
400 	vregion->group = group;
401 
402 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, vregion->region);
403 	if (err)
404 		goto err_region_attach;
405 
406 	return 0;
407 
408 err_region_attach:
409 	list_del(&vregion->list);
410 	return err;
411 }
412 
413 static void
414 mlxsw_sp_acl_tcam_group_vregion_detach(struct mlxsw_sp *mlxsw_sp,
415 				       struct mlxsw_sp_acl_tcam_vregion *vregion)
416 {
417 	list_del(&vregion->list);
418 	if (vregion->region2)
419 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
420 						      vregion->region2);
421 	mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
422 }
423 
424 static struct mlxsw_sp_acl_tcam_vregion *
425 mlxsw_sp_acl_tcam_group_vregion_find(struct mlxsw_sp_acl_tcam_group *group,
426 				     unsigned int priority,
427 				     struct mlxsw_afk_element_usage *elusage,
428 				     bool *p_need_split)
429 {
430 	struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
431 	struct list_head *pos;
432 	bool issubset;
433 
434 	list_for_each(pos, &group->vregion_list) {
435 		vregion = list_entry(pos, typeof(*vregion), list);
436 
437 		/* First, check if the requested priority does not rather belong
438 		 * under some of the next vregions.
439 		 */
440 		if (pos->next != &group->vregion_list) { /* not last */
441 			vregion2 = list_entry(pos->next, typeof(*vregion2),
442 					      list);
443 			if (priority >=
444 			    mlxsw_sp_acl_tcam_vregion_prio(vregion2))
445 				continue;
446 		}
447 
448 		issubset = mlxsw_afk_key_info_subset(vregion->key_info,
449 						     elusage);
450 
451 		/* If requested element usage would not fit and the priority
452 		 * is lower than the currently inspected vregion we cannot
453 		 * use this region, so return NULL to indicate new vregion has
454 		 * to be created.
455 		 */
456 		if (!issubset &&
457 		    priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
458 			return NULL;
459 
460 		/* If requested element usage would not fit and the priority
461 		 * is higher than the currently inspected vregion we cannot
462 		 * use this vregion. There is still some hope that the next
463 		 * vregion would be the fit. So let it be processed and
464 		 * eventually break at the check right above this.
465 		 */
466 		if (!issubset &&
467 		    priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
468 			continue;
469 
470 		/* Indicate if the vregion needs to be split in order to add
471 		 * the requested priority. Split is needed when requested
472 		 * element usage won't fit into the found vregion.
473 		 */
474 		*p_need_split = !issubset;
475 		return vregion;
476 	}
477 	return NULL; /* New vregion has to be created. */
478 }
479 
480 static void
481 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
482 				     struct mlxsw_afk_element_usage *elusage,
483 				     struct mlxsw_afk_element_usage *out)
484 {
485 	const struct mlxsw_sp_acl_tcam_pattern *pattern;
486 	int i;
487 
488 	/* In case the template is set, we don't have to look up the pattern
489 	 * and just use the template.
490 	 */
491 	if (group->tmplt_elusage_set) {
492 		memcpy(out, &group->tmplt_elusage, sizeof(*out));
493 		WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
494 		return;
495 	}
496 
497 	for (i = 0; i < group->patterns_count; i++) {
498 		pattern = &group->patterns[i];
499 		mlxsw_afk_element_usage_fill(out, pattern->elements,
500 					     pattern->elements_count);
501 		if (mlxsw_afk_element_usage_subset(elusage, out))
502 			return;
503 	}
504 	memcpy(out, elusage, sizeof(*out));
505 }
506 
507 static int
508 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
509 			       struct mlxsw_sp_acl_tcam_region *region)
510 {
511 	struct mlxsw_afk_key_info *key_info = region->key_info;
512 	char ptar_pl[MLXSW_REG_PTAR_LEN];
513 	unsigned int encodings_count;
514 	int i;
515 	int err;
516 
517 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
518 			    region->key_type,
519 			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
520 			    region->id, region->tcam_region_info);
521 	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
522 	for (i = 0; i < encodings_count; i++) {
523 		u16 encoding;
524 
525 		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
526 		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
527 	}
528 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
529 	if (err)
530 		return err;
531 	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
532 	return 0;
533 }
534 
535 static void
536 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
537 			      struct mlxsw_sp_acl_tcam_region *region)
538 {
539 	char ptar_pl[MLXSW_REG_PTAR_LEN];
540 
541 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
542 			    region->key_type, 0, region->id,
543 			    region->tcam_region_info);
544 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
545 }
546 
547 static int
548 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
549 				struct mlxsw_sp_acl_tcam_region *region)
550 {
551 	char pacl_pl[MLXSW_REG_PACL_LEN];
552 
553 	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
554 			    region->tcam_region_info);
555 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
556 }
557 
558 static void
559 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
560 				 struct mlxsw_sp_acl_tcam_region *region)
561 {
562 	char pacl_pl[MLXSW_REG_PACL_LEN];
563 
564 	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
565 			    region->tcam_region_info);
566 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
567 }
568 
569 static struct mlxsw_sp_acl_tcam_region *
570 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
571 				struct mlxsw_sp_acl_tcam *tcam,
572 				struct mlxsw_sp_acl_tcam_vregion *vregion,
573 				void *hints_priv)
574 {
575 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
576 	struct mlxsw_sp_acl_tcam_region *region;
577 	int err;
578 
579 	region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
580 	if (!region)
581 		return ERR_PTR(-ENOMEM);
582 	region->mlxsw_sp = mlxsw_sp;
583 	region->vregion = vregion;
584 	region->key_info = vregion->key_info;
585 
586 	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
587 	if (err)
588 		goto err_region_id_get;
589 
590 	err = ops->region_associate(mlxsw_sp, region);
591 	if (err)
592 		goto err_tcam_region_associate;
593 
594 	region->key_type = ops->key_type;
595 	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
596 	if (err)
597 		goto err_tcam_region_alloc;
598 
599 	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
600 	if (err)
601 		goto err_tcam_region_enable;
602 
603 	err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
604 			       region, hints_priv);
605 	if (err)
606 		goto err_tcam_region_init;
607 
608 	return region;
609 
610 err_tcam_region_init:
611 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
612 err_tcam_region_enable:
613 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
614 err_tcam_region_alloc:
615 err_tcam_region_associate:
616 	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
617 err_region_id_get:
618 	kfree(region);
619 	return ERR_PTR(err);
620 }
621 
622 static void
623 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
624 				 struct mlxsw_sp_acl_tcam_region *region)
625 {
626 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
627 
628 	ops->region_fini(mlxsw_sp, region->priv);
629 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
630 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
631 	mlxsw_sp_acl_tcam_region_id_put(region->vregion->group->tcam,
632 					region->id);
633 	kfree(region);
634 }
635 
636 static void
637 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
638 {
639 	unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
640 
641 	if (!interval)
642 		return;
643 	mlxsw_core_schedule_dw(&vregion->rehash_dw,
644 			       msecs_to_jiffies(interval));
645 }
646 
647 static int
648 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
649 				 struct mlxsw_sp_acl_tcam_vregion *vregion);
650 
651 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
652 {
653 	struct mlxsw_sp_acl_tcam_vregion *vregion =
654 		container_of(work, struct mlxsw_sp_acl_tcam_vregion,
655 			     rehash_dw.work);
656 
657 	/* TODO: Take rtnl lock here as the rest of the code counts on it
658 	 * now. Later, this should be replaced by per-vregion lock.
659 	 */
660 	rtnl_lock();
661 	mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion);
662 	rtnl_unlock();
663 	mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
664 }
665 
666 static struct mlxsw_sp_acl_tcam_vregion *
667 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
668 				 struct mlxsw_sp_acl_tcam *tcam,
669 				 struct mlxsw_afk_element_usage *elusage)
670 {
671 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
672 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
673 	struct mlxsw_sp_acl_tcam_vregion *vregion;
674 	int err;
675 
676 	vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
677 	if (!vregion)
678 		return ERR_PTR(-ENOMEM);
679 	INIT_LIST_HEAD(&vregion->vchunk_list);
680 	vregion->tcam = tcam;
681 	vregion->mlxsw_sp = mlxsw_sp;
682 
683 	vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
684 	if (IS_ERR(vregion->key_info)) {
685 		err = PTR_ERR(vregion->key_info);
686 		goto err_key_info_get;
687 	}
688 
689 	vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
690 							  vregion, NULL);
691 	if (IS_ERR(vregion->region)) {
692 		err = PTR_ERR(vregion->region);
693 		goto err_region_create;
694 	}
695 
696 	list_add_tail(&vregion->tlist, &tcam->vregion_list);
697 
698 	if (ops->region_rehash_hints_get) {
699 		/* Create the delayed work for vregion periodic rehash */
700 		INIT_DELAYED_WORK(&vregion->rehash_dw,
701 				  mlxsw_sp_acl_tcam_vregion_rehash_work);
702 		mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
703 	}
704 
705 	return vregion;
706 
707 err_region_create:
708 	mlxsw_afk_key_info_put(vregion->key_info);
709 err_key_info_get:
710 	kfree(vregion);
711 	return ERR_PTR(err);
712 }
713 
714 static void
715 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
716 				  struct mlxsw_sp_acl_tcam_vregion *vregion)
717 {
718 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
719 
720 	if (ops->region_rehash_hints_get)
721 		cancel_delayed_work_sync(&vregion->rehash_dw);
722 	list_del(&vregion->tlist);
723 	if (vregion->region2)
724 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
725 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
726 	mlxsw_afk_key_info_put(vregion->key_info);
727 	kfree(vregion);
728 }
729 
730 u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
731 						struct mlxsw_sp_acl_tcam *tcam)
732 {
733 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
734 	u32 vregion_rehash_intrvl;
735 
736 	if (WARN_ON(!ops->region_rehash_hints_get))
737 		return 0;
738 	vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
739 	return vregion_rehash_intrvl;
740 }
741 
742 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
743 						struct mlxsw_sp_acl_tcam *tcam,
744 						u32 val)
745 {
746 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
747 	struct mlxsw_sp_acl_tcam_vregion *vregion;
748 
749 	if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
750 		return -EINVAL;
751 	if (WARN_ON(!ops->region_rehash_hints_get))
752 		return -EOPNOTSUPP;
753 	tcam->vregion_rehash_intrvl = val;
754 	rtnl_lock();
755 	list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
756 		if (val)
757 			mlxsw_core_schedule_dw(&vregion->rehash_dw, 0);
758 		else
759 			cancel_delayed_work_sync(&vregion->rehash_dw);
760 	}
761 	rtnl_unlock();
762 	return 0;
763 }
764 
765 static int
766 mlxsw_sp_acl_tcam_vchunk_assoc(struct mlxsw_sp *mlxsw_sp,
767 			       struct mlxsw_sp_acl_tcam_group *group,
768 			       unsigned int priority,
769 			       struct mlxsw_afk_element_usage *elusage,
770 			       struct mlxsw_sp_acl_tcam_vchunk *vchunk)
771 {
772 	struct mlxsw_sp_acl_tcam_vregion *vregion;
773 	bool vregion_created = false;
774 	bool need_split;
775 	int err;
776 
777 	vregion = mlxsw_sp_acl_tcam_group_vregion_find(group, priority, elusage,
778 						       &need_split);
779 	if (vregion && need_split) {
780 		/* According to priority, the vchunk should belong to an
781 		 * existing vregion. However, this vchunk needs elements
782 		 * that vregion does not contain. We need to split the existing
783 		 * vregion into two and create a new vregion for this vchunk
784 		 * in between. This is not supported now.
785 		 */
786 		return -EOPNOTSUPP;
787 	}
788 	if (!vregion) {
789 		struct mlxsw_afk_element_usage vregion_elusage;
790 
791 		mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
792 						     &vregion_elusage);
793 		vregion = mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp,
794 							   group->tcam,
795 							   &vregion_elusage);
796 		if (IS_ERR(vregion))
797 			return PTR_ERR(vregion);
798 		vregion_created = true;
799 	}
800 
801 	vchunk->vregion = vregion;
802 	list_add_tail(&vchunk->list, &vregion->vchunk_list);
803 
804 	if (!vregion_created)
805 		return 0;
806 
807 	err = mlxsw_sp_acl_tcam_group_vregion_attach(mlxsw_sp, group, vregion);
808 	if (err)
809 		goto err_group_vregion_attach;
810 
811 	return 0;
812 
813 err_group_vregion_attach:
814 	mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
815 	return err;
816 }
817 
818 static void
819 mlxsw_sp_acl_tcam_vchunk_deassoc(struct mlxsw_sp *mlxsw_sp,
820 				 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
821 {
822 	struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
823 
824 	list_del(&vchunk->list);
825 	if (list_empty(&vregion->vchunk_list)) {
826 		mlxsw_sp_acl_tcam_group_vregion_detach(mlxsw_sp, vregion);
827 		mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
828 	}
829 }
830 
831 static struct mlxsw_sp_acl_tcam_chunk *
832 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
833 			       struct mlxsw_sp_acl_tcam_vchunk *vchunk,
834 			       struct mlxsw_sp_acl_tcam_region *region)
835 {
836 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
837 	struct mlxsw_sp_acl_tcam_chunk *chunk;
838 
839 	chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
840 	if (!chunk)
841 		return ERR_PTR(-ENOMEM);
842 	chunk->vchunk = vchunk;
843 	chunk->region = region;
844 
845 	ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
846 	return chunk;
847 }
848 
849 static void
850 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
851 				struct mlxsw_sp_acl_tcam_chunk *chunk)
852 {
853 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
854 
855 	ops->chunk_fini(chunk->priv);
856 	kfree(chunk);
857 }
858 
859 static struct mlxsw_sp_acl_tcam_vchunk *
860 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
861 				struct mlxsw_sp_acl_tcam_group *group,
862 				unsigned int priority,
863 				struct mlxsw_afk_element_usage *elusage)
864 {
865 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
866 	int err;
867 
868 	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
869 		return ERR_PTR(-EINVAL);
870 
871 	vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
872 	if (!vchunk)
873 		return ERR_PTR(-ENOMEM);
874 	INIT_LIST_HEAD(&vchunk->ventry_list);
875 	vchunk->priority = priority;
876 	vchunk->group = group;
877 	vchunk->ref_count = 1;
878 
879 	err = mlxsw_sp_acl_tcam_vchunk_assoc(mlxsw_sp, group, priority,
880 					     elusage, vchunk);
881 	if (err)
882 		goto err_vchunk_assoc;
883 
884 	err = rhashtable_insert_fast(&group->vchunk_ht, &vchunk->ht_node,
885 				     mlxsw_sp_acl_tcam_vchunk_ht_params);
886 	if (err)
887 		goto err_rhashtable_insert;
888 
889 	vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
890 						       vchunk->vregion->region);
891 	if (IS_ERR(vchunk->chunk)) {
892 		err = PTR_ERR(vchunk->chunk);
893 		goto err_chunk_create;
894 	}
895 
896 	return vchunk;
897 
898 err_chunk_create:
899 	rhashtable_remove_fast(&group->vchunk_ht, &vchunk->ht_node,
900 			       mlxsw_sp_acl_tcam_vchunk_ht_params);
901 err_rhashtable_insert:
902 	mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk);
903 err_vchunk_assoc:
904 	kfree(vchunk);
905 	return ERR_PTR(err);
906 }
907 
908 static void
909 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
910 				 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
911 {
912 	struct mlxsw_sp_acl_tcam_group *group = vchunk->group;
913 
914 	if (vchunk->chunk2)
915 		mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
916 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
917 	rhashtable_remove_fast(&group->vchunk_ht, &vchunk->ht_node,
918 			       mlxsw_sp_acl_tcam_vchunk_ht_params);
919 	mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk);
920 	kfree(vchunk);
921 }
922 
923 static struct mlxsw_sp_acl_tcam_vchunk *
924 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
925 			     struct mlxsw_sp_acl_tcam_group *group,
926 			     unsigned int priority,
927 			     struct mlxsw_afk_element_usage *elusage)
928 {
929 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
930 
931 	vchunk = rhashtable_lookup_fast(&group->vchunk_ht, &priority,
932 					mlxsw_sp_acl_tcam_vchunk_ht_params);
933 	if (vchunk) {
934 		if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
935 						       elusage)))
936 			return ERR_PTR(-EINVAL);
937 		vchunk->ref_count++;
938 		return vchunk;
939 	}
940 	return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, group,
941 					       priority, elusage);
942 }
943 
944 static void
945 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
946 			     struct mlxsw_sp_acl_tcam_vchunk *vchunk)
947 {
948 	if (--vchunk->ref_count)
949 		return;
950 	mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
951 }
952 
953 static struct mlxsw_sp_acl_tcam_entry *
954 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
955 			       struct mlxsw_sp_acl_tcam_ventry *ventry,
956 			       struct mlxsw_sp_acl_tcam_chunk *chunk)
957 {
958 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
959 	struct mlxsw_sp_acl_tcam_entry *entry;
960 	int err;
961 
962 	entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
963 	if (!entry)
964 		return ERR_PTR(-ENOMEM);
965 	entry->ventry = ventry;
966 	entry->chunk = chunk;
967 
968 	err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
969 			     entry->priv, ventry->rulei);
970 	if (err)
971 		goto err_entry_add;
972 
973 	return entry;
974 
975 err_entry_add:
976 	kfree(entry);
977 	return ERR_PTR(err);
978 }
979 
980 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
981 					    struct mlxsw_sp_acl_tcam_entry *entry)
982 {
983 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
984 
985 	ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
986 		       entry->chunk->priv, entry->priv);
987 	kfree(entry);
988 }
989 
990 static int
991 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
992 				       struct mlxsw_sp_acl_tcam_region *region,
993 				       struct mlxsw_sp_acl_tcam_entry *entry,
994 				       struct mlxsw_sp_acl_rule_info *rulei)
995 {
996 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
997 
998 	return ops->entry_action_replace(mlxsw_sp, region->priv,
999 					 entry->priv, rulei);
1000 }
1001 
1002 static int
1003 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1004 				     struct mlxsw_sp_acl_tcam_entry *entry,
1005 				     bool *activity)
1006 {
1007 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1008 
1009 	return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1010 				       entry->priv, activity);
1011 }
1012 
1013 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1014 					struct mlxsw_sp_acl_tcam_group *group,
1015 					struct mlxsw_sp_acl_tcam_ventry *ventry,
1016 					struct mlxsw_sp_acl_rule_info *rulei)
1017 {
1018 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1019 	int err;
1020 
1021 	vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, group, rulei->priority,
1022 					      &rulei->values.elusage);
1023 	if (IS_ERR(vchunk))
1024 		return PTR_ERR(vchunk);
1025 
1026 	ventry->vchunk = vchunk;
1027 	ventry->rulei = rulei;
1028 	ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1029 						       vchunk->chunk);
1030 	if (IS_ERR(ventry->entry)) {
1031 		err = PTR_ERR(ventry->entry);
1032 		goto err_entry_create;
1033 	}
1034 
1035 	list_add_tail(&ventry->list, &vchunk->ventry_list);
1036 
1037 	return 0;
1038 
1039 err_entry_create:
1040 	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1041 	return err;
1042 }
1043 
1044 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1045 					 struct mlxsw_sp_acl_tcam_ventry *ventry)
1046 {
1047 	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1048 
1049 	list_del(&ventry->list);
1050 	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1051 	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1052 }
1053 
1054 static int
1055 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1056 					struct mlxsw_sp_acl_tcam_ventry *ventry,
1057 					struct mlxsw_sp_acl_rule_info *rulei)
1058 {
1059 	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1060 
1061 	return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1062 						      vchunk->vregion->region,
1063 						      ventry->entry, rulei);
1064 }
1065 
1066 static int
1067 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1068 				      struct mlxsw_sp_acl_tcam_ventry *ventry,
1069 				      bool *activity)
1070 {
1071 	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1072 						    ventry->entry, activity);
1073 }
1074 
1075 static int
1076 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1077 				 struct mlxsw_sp_acl_tcam_ventry *ventry,
1078 				 struct mlxsw_sp_acl_tcam_chunk *chunk2)
1079 {
1080 	struct mlxsw_sp_acl_tcam_entry *entry2;
1081 
1082 	entry2 = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk2);
1083 	if (IS_ERR(entry2))
1084 		return PTR_ERR(entry2);
1085 	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1086 	ventry->entry = entry2;
1087 	return 0;
1088 }
1089 
1090 static int
1091 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1092 				     struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1093 				     struct mlxsw_sp_acl_tcam_region *region,
1094 				     bool this_is_rollback)
1095 {
1096 	struct mlxsw_sp_acl_tcam_ventry *ventry;
1097 	struct mlxsw_sp_acl_tcam_chunk *chunk2;
1098 	int err;
1099 	int err2;
1100 
1101 	chunk2 = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1102 	if (IS_ERR(chunk2)) {
1103 		if (this_is_rollback)
1104 			vchunk->vregion->failed_rollback = true;
1105 		return PTR_ERR(chunk2);
1106 	}
1107 	vchunk->chunk2 = chunk2;
1108 	list_for_each_entry(ventry, &vchunk->ventry_list, list) {
1109 		err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1110 						       vchunk->chunk2);
1111 		if (err) {
1112 			if (this_is_rollback) {
1113 				vchunk->vregion->failed_rollback = true;
1114 				return err;
1115 			}
1116 			goto rollback;
1117 		}
1118 	}
1119 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1120 	vchunk->chunk = chunk2;
1121 	vchunk->chunk2 = NULL;
1122 	return 0;
1123 
1124 rollback:
1125 	/* Migrate the entries back to the original chunk. If some entry
1126 	 * migration fails, there's no good way how to proceed. Set the
1127 	 * vregion with "failed_rollback" flag.
1128 	 */
1129 	list_for_each_entry_continue_reverse(ventry, &vchunk->ventry_list,
1130 					     list) {
1131 		err2 = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1132 							vchunk->chunk);
1133 		if (err2) {
1134 			vchunk->vregion->failed_rollback = true;
1135 			goto err_rollback;
1136 		}
1137 	}
1138 
1139 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1140 	vchunk->chunk2 = NULL;
1141 
1142 err_rollback:
1143 	return err;
1144 }
1145 
1146 static int
1147 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1148 				     struct mlxsw_sp_acl_tcam_vregion *vregion)
1149 {
1150 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1151 	int err;
1152 
1153 	list_for_each_entry(vchunk, &vregion->vchunk_list, list) {
1154 		err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1155 							   vregion->region2,
1156 							   false);
1157 		if (err)
1158 			goto rollback;
1159 	}
1160 	return 0;
1161 
1162 rollback:
1163 	list_for_each_entry_continue_reverse(vchunk, &vregion->vchunk_list,
1164 					     list) {
1165 		mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1166 						     vregion->region, true);
1167 	}
1168 	return err;
1169 }
1170 
1171 static int
1172 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1173 				  struct mlxsw_sp_acl_tcam_vregion *vregion,
1174 				  void *hints_priv)
1175 {
1176 	struct mlxsw_sp_acl_tcam_region *region2, *unused_region;
1177 	int err;
1178 
1179 	trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1180 
1181 	region2 = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1182 						  vregion, hints_priv);
1183 	if (IS_ERR(region2))
1184 		return PTR_ERR(region2);
1185 
1186 	vregion->region2 = region2;
1187 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, region2);
1188 	if (err)
1189 		goto err_group_region_attach;
1190 
1191 	err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion);
1192 	if (!vregion->failed_rollback) {
1193 		if (!err) {
1194 			/* In case of successful migration, region2 is used and
1195 			 * the original is unused.
1196 			 */
1197 			unused_region = vregion->region;
1198 			vregion->region = vregion->region2;
1199 		} else {
1200 			/* In case of failure during migration, the original
1201 			 * region is still used.
1202 			 */
1203 			unused_region = vregion->region2;
1204 		}
1205 		vregion->region2 = NULL;
1206 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1207 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1208 	}
1209 	return err;
1210 
1211 err_group_region_attach:
1212 	vregion->region2 = NULL;
1213 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region2);
1214 	return err;
1215 }
1216 
1217 static int
1218 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1219 				 struct mlxsw_sp_acl_tcam_vregion *vregion)
1220 {
1221 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1222 	void *hints_priv;
1223 	int err;
1224 
1225 	trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1226 	if (vregion->failed_rollback)
1227 		return -EBUSY;
1228 
1229 	hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1230 	if (IS_ERR(hints_priv)) {
1231 		err = PTR_ERR(hints_priv);
1232 		if (err != -EAGAIN)
1233 			dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1234 		return err;
1235 	}
1236 
1237 	err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion, hints_priv);
1238 	if (err) {
1239 		dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1240 		if (vregion->failed_rollback) {
1241 			trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
1242 								   vregion);
1243 			dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1244 		}
1245 	}
1246 
1247 	ops->region_rehash_hints_put(hints_priv);
1248 	return err;
1249 }
1250 
1251 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1252 	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1253 	MLXSW_AFK_ELEMENT_DMAC_32_47,
1254 	MLXSW_AFK_ELEMENT_DMAC_0_31,
1255 	MLXSW_AFK_ELEMENT_SMAC_32_47,
1256 	MLXSW_AFK_ELEMENT_SMAC_0_31,
1257 	MLXSW_AFK_ELEMENT_ETHERTYPE,
1258 	MLXSW_AFK_ELEMENT_IP_PROTO,
1259 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1260 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1261 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
1262 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1263 	MLXSW_AFK_ELEMENT_VID,
1264 	MLXSW_AFK_ELEMENT_PCP,
1265 	MLXSW_AFK_ELEMENT_TCP_FLAGS,
1266 	MLXSW_AFK_ELEMENT_IP_TTL_,
1267 	MLXSW_AFK_ELEMENT_IP_ECN,
1268 	MLXSW_AFK_ELEMENT_IP_DSCP,
1269 };
1270 
1271 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1272 	MLXSW_AFK_ELEMENT_ETHERTYPE,
1273 	MLXSW_AFK_ELEMENT_IP_PROTO,
1274 	MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1275 	MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1276 	MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1277 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1278 	MLXSW_AFK_ELEMENT_DST_IP_96_127,
1279 	MLXSW_AFK_ELEMENT_DST_IP_64_95,
1280 	MLXSW_AFK_ELEMENT_DST_IP_32_63,
1281 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1282 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
1283 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1284 };
1285 
1286 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1287 	{
1288 		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1289 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1290 	},
1291 	{
1292 		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1293 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1294 	},
1295 };
1296 
1297 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1298 	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1299 
1300 struct mlxsw_sp_acl_tcam_flower_ruleset {
1301 	struct mlxsw_sp_acl_tcam_group group;
1302 };
1303 
1304 struct mlxsw_sp_acl_tcam_flower_rule {
1305 	struct mlxsw_sp_acl_tcam_ventry ventry;
1306 };
1307 
1308 static int
1309 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1310 				     struct mlxsw_sp_acl_tcam *tcam,
1311 				     void *ruleset_priv,
1312 				     struct mlxsw_afk_element_usage *tmplt_elusage)
1313 {
1314 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1315 
1316 	return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
1317 					   mlxsw_sp_acl_tcam_patterns,
1318 					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1319 					   tmplt_elusage);
1320 }
1321 
1322 static void
1323 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1324 				     void *ruleset_priv)
1325 {
1326 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1327 
1328 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1329 }
1330 
1331 static int
1332 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1333 				      void *ruleset_priv,
1334 				      struct mlxsw_sp_port *mlxsw_sp_port,
1335 				      bool ingress)
1336 {
1337 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1338 
1339 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
1340 					    mlxsw_sp_port, ingress);
1341 }
1342 
1343 static void
1344 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1345 					void *ruleset_priv,
1346 					struct mlxsw_sp_port *mlxsw_sp_port,
1347 					bool ingress)
1348 {
1349 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1350 
1351 	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
1352 				       mlxsw_sp_port, ingress);
1353 }
1354 
1355 static u16
1356 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1357 {
1358 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1359 
1360 	return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
1361 }
1362 
1363 static int
1364 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1365 				  void *ruleset_priv, void *rule_priv,
1366 				  struct mlxsw_sp_acl_rule_info *rulei)
1367 {
1368 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1369 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1370 
1371 	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->group,
1372 					    &rule->ventry, rulei);
1373 }
1374 
1375 static void
1376 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1377 {
1378 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1379 
1380 	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1381 }
1382 
1383 static int
1384 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1385 					     void *rule_priv,
1386 					     struct mlxsw_sp_acl_rule_info *rulei)
1387 {
1388 	return -EOPNOTSUPP;
1389 }
1390 
1391 static int
1392 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1393 					   void *rule_priv, bool *activity)
1394 {
1395 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1396 
1397 	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1398 						     activity);
1399 }
1400 
1401 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1402 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1403 	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
1404 	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
1405 	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
1406 	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1407 	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1408 	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1409 	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
1410 	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
1411 	.rule_action_replace	= mlxsw_sp_acl_tcam_flower_rule_action_replace,
1412 	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
1413 };
1414 
1415 struct mlxsw_sp_acl_tcam_mr_ruleset {
1416 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1417 	struct mlxsw_sp_acl_tcam_group group;
1418 };
1419 
1420 struct mlxsw_sp_acl_tcam_mr_rule {
1421 	struct mlxsw_sp_acl_tcam_ventry ventry;
1422 };
1423 
1424 static int
1425 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1426 				 struct mlxsw_sp_acl_tcam *tcam,
1427 				 void *ruleset_priv,
1428 				 struct mlxsw_afk_element_usage *tmplt_elusage)
1429 {
1430 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1431 	int err;
1432 
1433 	err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
1434 					  mlxsw_sp_acl_tcam_patterns,
1435 					  MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1436 					  tmplt_elusage);
1437 	if (err)
1438 		return err;
1439 
1440 	/* For most of the TCAM clients it would make sense to take a tcam chunk
1441 	 * only when the first rule is written. This is not the case for
1442 	 * multicast router as it is required to bind the multicast router to a
1443 	 * specific ACL Group ID which must exist in HW before multicast router
1444 	 * is initialized.
1445 	 */
1446 	ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1447 						       &ruleset->group, 1,
1448 						       tmplt_elusage);
1449 	if (IS_ERR(ruleset->vchunk)) {
1450 		err = PTR_ERR(ruleset->vchunk);
1451 		goto err_chunk_get;
1452 	}
1453 
1454 	return 0;
1455 
1456 err_chunk_get:
1457 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1458 	return err;
1459 }
1460 
1461 static void
1462 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1463 {
1464 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1465 
1466 	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1467 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1468 }
1469 
1470 static int
1471 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1472 				  struct mlxsw_sp_port *mlxsw_sp_port,
1473 				  bool ingress)
1474 {
1475 	/* Binding is done when initializing multicast router */
1476 	return 0;
1477 }
1478 
1479 static void
1480 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1481 				    void *ruleset_priv,
1482 				    struct mlxsw_sp_port *mlxsw_sp_port,
1483 				    bool ingress)
1484 {
1485 }
1486 
1487 static u16
1488 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1489 {
1490 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1491 
1492 	return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
1493 }
1494 
1495 static int
1496 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1497 			      void *rule_priv,
1498 			      struct mlxsw_sp_acl_rule_info *rulei)
1499 {
1500 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1501 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1502 
1503 	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->group,
1504 					   &rule->ventry, rulei);
1505 }
1506 
1507 static void
1508 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1509 {
1510 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1511 
1512 	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1513 }
1514 
1515 static int
1516 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1517 					 void *rule_priv,
1518 					 struct mlxsw_sp_acl_rule_info *rulei)
1519 {
1520 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1521 
1522 	return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1523 						       rulei);
1524 }
1525 
1526 static int
1527 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1528 				       void *rule_priv, bool *activity)
1529 {
1530 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1531 
1532 	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1533 						     activity);
1534 }
1535 
1536 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1537 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1538 	.ruleset_add		= mlxsw_sp_acl_tcam_mr_ruleset_add,
1539 	.ruleset_del		= mlxsw_sp_acl_tcam_mr_ruleset_del,
1540 	.ruleset_bind		= mlxsw_sp_acl_tcam_mr_ruleset_bind,
1541 	.ruleset_unbind		= mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1542 	.ruleset_group_id	= mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1543 	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1544 	.rule_add		= mlxsw_sp_acl_tcam_mr_rule_add,
1545 	.rule_del		= mlxsw_sp_acl_tcam_mr_rule_del,
1546 	.rule_action_replace	= mlxsw_sp_acl_tcam_mr_rule_action_replace,
1547 	.rule_activity_get	= mlxsw_sp_acl_tcam_mr_rule_activity_get,
1548 };
1549 
1550 static const struct mlxsw_sp_acl_profile_ops *
1551 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1552 	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1553 	[MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1554 };
1555 
1556 const struct mlxsw_sp_acl_profile_ops *
1557 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1558 			      enum mlxsw_sp_acl_profile profile)
1559 {
1560 	const struct mlxsw_sp_acl_profile_ops *ops;
1561 
1562 	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1563 		return NULL;
1564 	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1565 	if (WARN_ON(!ops))
1566 		return NULL;
1567 	return ops;
1568 }
1569