xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c (revision 4003c9e78778e93188a09d6043a74f7154449d43)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies.
3 
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/mlx5/fs.h>
7 
8 #include "lib/fs_chains.h"
9 #include "fs_ft_pool.h"
10 #include "en/mapping.h"
11 #include "fs_core.h"
12 #include "en_tc.h"
13 
14 #define chains_lock(chains) ((chains)->lock)
15 #define chains_ht(chains) ((chains)->chains_ht)
16 #define prios_ht(chains) ((chains)->prios_ht)
17 #define chains_default_ft(chains) ((chains)->chains_default_ft)
18 #define chains_end_ft(chains) ((chains)->chains_end_ft)
19 #define FT_TBL_SZ (64 * 1024)
20 
21 struct mlx5_fs_chains {
22 	struct mlx5_core_dev *dev;
23 
24 	struct rhashtable chains_ht;
25 	struct rhashtable prios_ht;
26 	/* Protects above chains_ht and prios_ht */
27 	struct mutex lock;
28 
29 	struct mlx5_flow_table *chains_default_ft;
30 	struct mlx5_flow_table *chains_end_ft;
31 	struct mapping_ctx *chains_mapping;
32 
33 	enum mlx5_flow_namespace_type ns;
34 	u32 group_num;
35 	u32 flags;
36 	int fs_base_prio;
37 	int fs_base_level;
38 };
39 
40 struct fs_chain {
41 	struct rhash_head node;
42 
43 	u32 chain;
44 
45 	int ref;
46 	int id;
47 
48 	struct mlx5_fs_chains *chains;
49 	struct list_head prios_list;
50 	struct mlx5_flow_handle *restore_rule;
51 	struct mlx5_modify_hdr *miss_modify_hdr;
52 };
53 
54 struct prio_key {
55 	u32 chain;
56 	u32 prio;
57 	u32 level;
58 };
59 
60 struct prio {
61 	struct rhash_head node;
62 	struct list_head list;
63 
64 	struct prio_key key;
65 
66 	int ref;
67 
68 	struct fs_chain *chain;
69 	struct mlx5_flow_table *ft;
70 	struct mlx5_flow_table *next_ft;
71 	struct mlx5_flow_group *miss_group;
72 	struct mlx5_flow_handle *miss_rule;
73 };
74 
75 static const struct rhashtable_params chain_params = {
76 	.head_offset = offsetof(struct fs_chain, node),
77 	.key_offset = offsetof(struct fs_chain, chain),
78 	.key_len = sizeof_field(struct fs_chain, chain),
79 	.automatic_shrinking = true,
80 };
81 
82 static const struct rhashtable_params prio_params = {
83 	.head_offset = offsetof(struct prio, node),
84 	.key_offset = offsetof(struct prio, key),
85 	.key_len = sizeof_field(struct prio, key),
86 	.automatic_shrinking = true,
87 };
88 
mlx5_chains_prios_supported(struct mlx5_fs_chains * chains)89 bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
90 {
91 	return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
92 }
93 
mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains * chains)94 bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
95 {
96 	return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
97 }
98 
mlx5_chains_backwards_supported(struct mlx5_fs_chains * chains)99 bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
100 {
101 	return mlx5_chains_prios_supported(chains) &&
102 	       mlx5_chains_ignore_flow_level_supported(chains);
103 }
104 
mlx5_chains_get_chain_range(struct mlx5_fs_chains * chains)105 u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
106 {
107 	if (!mlx5_chains_prios_supported(chains))
108 		return 1;
109 
110 	if (mlx5_chains_ignore_flow_level_supported(chains))
111 		return UINT_MAX - 1;
112 
113 	/* We should get here only for eswitch case */
114 	return FDB_TC_MAX_CHAIN;
115 }
116 
mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains * chains)117 u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
118 {
119 	return mlx5_chains_get_chain_range(chains) + 1;
120 }
121 
mlx5_chains_get_prio_range(struct mlx5_fs_chains * chains)122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
123 {
124 	if (mlx5_chains_ignore_flow_level_supported(chains))
125 		return UINT_MAX;
126 
127 	if (!chains->dev->priv.eswitch ||
128 	    chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
129 		return 1;
130 
131 	/* We should get here only for eswitch case */
132 	return FDB_TC_MAX_PRIO;
133 }
134 
mlx5_chains_get_level_range(struct mlx5_fs_chains * chains)135 static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
136 {
137 	if (mlx5_chains_ignore_flow_level_supported(chains))
138 		return UINT_MAX;
139 
140 	/* Same value for FDB and NIC RX tables */
141 	return FDB_TC_LEVELS_PER_PRIO;
142 }
143 
144 void
mlx5_chains_set_end_ft(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)145 mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
146 		       struct mlx5_flow_table *ft)
147 {
148 	chains_end_ft(chains) = ft;
149 }
150 
151 static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)152 mlx5_chains_create_table(struct mlx5_fs_chains *chains,
153 			 u32 chain, u32 prio, u32 level)
154 {
155 	struct mlx5_flow_table_attr ft_attr = {};
156 	struct mlx5_flow_namespace *ns;
157 	struct mlx5_flow_table *ft;
158 	int sz;
159 
160 	if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
161 		ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
162 				  MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
163 
164 	sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
165 	ft_attr.max_fte = sz;
166 
167 	/* We use chains_default_ft(chains) as the table's next_ft till
168 	 * ignore_flow_level is allowed on FT creation and not just for FTEs.
169 	 * Instead caller should add an explicit miss rule if needed.
170 	 */
171 	ft_attr.next_ft = chains_default_ft(chains);
172 
173 	/* The root table(chain 0, prio 1, level 0) is required to be
174 	 * connected to the previous fs_core managed prio.
175 	 * We always create it, as a managed table, in order to align with
176 	 * fs_core logic.
177 	 */
178 	if (!mlx5_chains_ignore_flow_level_supported(chains) ||
179 	    (chain == 0 && prio == 1 && level == 0)) {
180 		ft_attr.level = chains->fs_base_level;
181 		ft_attr.prio = chains->fs_base_prio + prio - 1;
182 		ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
183 			mlx5_get_fdb_sub_ns(chains->dev, chain) :
184 			mlx5_get_flow_namespace(chains->dev, chains->ns);
185 	} else {
186 		ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
187 		ft_attr.prio = chains->fs_base_prio;
188 		/* Firmware doesn't allow us to create another level 0 table,
189 		 * so we create all unmanaged tables as level 1 (base + 1).
190 		 *
191 		 * To connect them, we use explicit miss rules with
192 		 * ignore_flow_level. Caller is responsible to create
193 		 * these rules (if needed).
194 		 */
195 		ft_attr.level = chains->fs_base_level + 1;
196 		ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
197 	}
198 
199 	if (!ns) {
200 		mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
201 		return ERR_PTR(-EOPNOTSUPP);
202 	}
203 
204 	ft_attr.autogroup.num_reserved_entries = 2;
205 	ft_attr.autogroup.max_num_groups = chains->group_num;
206 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
207 	if (IS_ERR(ft)) {
208 		mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
209 			       (int)PTR_ERR(ft), chain, prio, level, sz);
210 		return ft;
211 	}
212 
213 	return ft;
214 }
215 
216 static int
create_chain_restore(struct fs_chain * chain)217 create_chain_restore(struct fs_chain *chain)
218 {
219 	struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
220 	u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
221 	struct mlx5_fs_chains *chains = chain->chains;
222 	enum mlx5e_tc_attr_to_reg mapped_obj_to_reg;
223 	struct mlx5_modify_hdr *mod_hdr;
224 	u32 index;
225 	int err;
226 
227 	if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
228 	    !mlx5_chains_prios_supported(chains) ||
229 	    !chains->chains_mapping)
230 		return 0;
231 
232 	err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
233 	if (err)
234 		return err;
235 	if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
236 		/* we got the special default flow tag id, so we won't know
237 		 * if we actually marked the packet with the restore rule
238 		 * we create.
239 		 *
240 		 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
241 		 */
242 		err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
243 		mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
244 		if (err)
245 			return err;
246 	}
247 
248 	chain->id = index;
249 
250 	if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
251 		mapped_obj_to_reg = MAPPED_OBJ_TO_REG;
252 		chain->restore_rule = esw_add_restore_rule(esw, chain->id);
253 		if (IS_ERR(chain->restore_rule)) {
254 			err = PTR_ERR(chain->restore_rule);
255 			goto err_rule;
256 		}
257 	} else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
258 		/* For NIC RX we don't need a restore rule
259 		 * since we write the metadata to reg_b
260 		 * that is passed to SW directly.
261 		 */
262 		mapped_obj_to_reg = NIC_MAPPED_OBJ_TO_REG;
263 	} else {
264 		err = -EINVAL;
265 		goto err_rule;
266 	}
267 
268 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
269 	MLX5_SET(set_action_in, modact, field,
270 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mfield);
271 	MLX5_SET(set_action_in, modact, offset,
272 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].moffset);
273 	MLX5_SET(set_action_in, modact, length,
274 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen == 32 ?
275 		 0 : mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen);
276 	MLX5_SET(set_action_in, modact, data, chain->id);
277 	mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
278 					   1, modact);
279 	if (IS_ERR(mod_hdr)) {
280 		err = PTR_ERR(mod_hdr);
281 		goto err_mod_hdr;
282 	}
283 	chain->miss_modify_hdr = mod_hdr;
284 
285 	return 0;
286 
287 err_mod_hdr:
288 	if (!IS_ERR_OR_NULL(chain->restore_rule))
289 		mlx5_del_flow_rules(chain->restore_rule);
290 err_rule:
291 	/* Datapath can't find this mapping, so we can safely remove it */
292 	mapping_remove(chains->chains_mapping, chain->id);
293 	return err;
294 }
295 
destroy_chain_restore(struct fs_chain * chain)296 static void destroy_chain_restore(struct fs_chain *chain)
297 {
298 	struct mlx5_fs_chains *chains = chain->chains;
299 
300 	if (!chain->miss_modify_hdr)
301 		return;
302 
303 	if (chain->restore_rule)
304 		mlx5_del_flow_rules(chain->restore_rule);
305 
306 	mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
307 	mapping_remove(chains->chains_mapping, chain->id);
308 }
309 
310 static struct fs_chain *
mlx5_chains_create_chain(struct mlx5_fs_chains * chains,u32 chain)311 mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
312 {
313 	struct fs_chain *chain_s = NULL;
314 	int err;
315 
316 	chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
317 	if (!chain_s)
318 		return ERR_PTR(-ENOMEM);
319 
320 	chain_s->chains = chains;
321 	chain_s->chain = chain;
322 	INIT_LIST_HEAD(&chain_s->prios_list);
323 
324 	err = create_chain_restore(chain_s);
325 	if (err)
326 		goto err_restore;
327 
328 	err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
329 				     chain_params);
330 	if (err)
331 		goto err_insert;
332 
333 	return chain_s;
334 
335 err_insert:
336 	destroy_chain_restore(chain_s);
337 err_restore:
338 	kvfree(chain_s);
339 	return ERR_PTR(err);
340 }
341 
342 static void
mlx5_chains_destroy_chain(struct fs_chain * chain)343 mlx5_chains_destroy_chain(struct fs_chain *chain)
344 {
345 	struct mlx5_fs_chains *chains = chain->chains;
346 
347 	rhashtable_remove_fast(&chains_ht(chains), &chain->node,
348 			       chain_params);
349 
350 	destroy_chain_restore(chain);
351 	kvfree(chain);
352 }
353 
354 static struct fs_chain *
mlx5_chains_get_chain(struct mlx5_fs_chains * chains,u32 chain)355 mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
356 {
357 	struct fs_chain *chain_s;
358 
359 	chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
360 					 chain_params);
361 	if (!chain_s) {
362 		chain_s = mlx5_chains_create_chain(chains, chain);
363 		if (IS_ERR(chain_s))
364 			return chain_s;
365 	}
366 
367 	chain_s->ref++;
368 
369 	return chain_s;
370 }
371 
372 static struct mlx5_flow_handle *
mlx5_chains_add_miss_rule(struct fs_chain * chain,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)373 mlx5_chains_add_miss_rule(struct fs_chain *chain,
374 			  struct mlx5_flow_table *ft,
375 			  struct mlx5_flow_table *next_ft)
376 {
377 	struct mlx5_fs_chains *chains = chain->chains;
378 	struct mlx5_flow_destination dest = {};
379 	struct mlx5_flow_act act = {};
380 
381 	act.flags  = FLOW_ACT_NO_APPEND;
382 	if (mlx5_chains_ignore_flow_level_supported(chain->chains))
383 		act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
384 
385 	act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
386 	dest.type  = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
387 	dest.ft = next_ft;
388 
389 	if (chains->chains_mapping && next_ft == chains_end_ft(chains) &&
390 	    chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
391 	    mlx5_chains_prios_supported(chains)) {
392 		act.modify_hdr = chain->miss_modify_hdr;
393 		act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
394 	}
395 
396 	return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
397 }
398 
399 static int
mlx5_chains_update_prio_prevs(struct prio * prio,struct mlx5_flow_table * next_ft)400 mlx5_chains_update_prio_prevs(struct prio *prio,
401 			      struct mlx5_flow_table *next_ft)
402 {
403 	struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
404 	struct fs_chain *chain = prio->chain;
405 	struct prio *pos;
406 	int n = 0, err;
407 
408 	if (prio->key.level)
409 		return 0;
410 
411 	/* Iterate in reverse order until reaching the level 0 rule of
412 	 * the previous priority, adding all the miss rules first, so we can
413 	 * revert them if any of them fails.
414 	 */
415 	pos = prio;
416 	list_for_each_entry_continue_reverse(pos,
417 					     &chain->prios_list,
418 					     list) {
419 		miss_rules[n] = mlx5_chains_add_miss_rule(chain,
420 							  pos->ft,
421 							  next_ft);
422 		if (IS_ERR(miss_rules[n])) {
423 			err = PTR_ERR(miss_rules[n]);
424 			goto err_prev_rule;
425 		}
426 
427 		n++;
428 		if (!pos->key.level)
429 			break;
430 	}
431 
432 	/* Success, delete old miss rules, and update the pointers. */
433 	n = 0;
434 	pos = prio;
435 	list_for_each_entry_continue_reverse(pos,
436 					     &chain->prios_list,
437 					     list) {
438 		mlx5_del_flow_rules(pos->miss_rule);
439 
440 		pos->miss_rule = miss_rules[n];
441 		pos->next_ft = next_ft;
442 
443 		n++;
444 		if (!pos->key.level)
445 			break;
446 	}
447 
448 	return 0;
449 
450 err_prev_rule:
451 	while (--n >= 0)
452 		mlx5_del_flow_rules(miss_rules[n]);
453 
454 	return err;
455 }
456 
457 static void
mlx5_chains_put_chain(struct fs_chain * chain)458 mlx5_chains_put_chain(struct fs_chain *chain)
459 {
460 	if (--chain->ref == 0)
461 		mlx5_chains_destroy_chain(chain);
462 }
463 
464 static struct prio *
mlx5_chains_create_prio(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)465 mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
466 			u32 chain, u32 prio, u32 level)
467 {
468 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
469 	struct mlx5_flow_handle *miss_rule;
470 	struct mlx5_flow_group *miss_group;
471 	struct mlx5_flow_table *next_ft;
472 	struct mlx5_flow_table *ft;
473 	struct fs_chain *chain_s;
474 	struct list_head *pos;
475 	struct prio *prio_s;
476 	u32 *flow_group_in;
477 	int err;
478 
479 	chain_s = mlx5_chains_get_chain(chains, chain);
480 	if (IS_ERR(chain_s))
481 		return ERR_CAST(chain_s);
482 
483 	prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
484 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
485 	if (!prio_s || !flow_group_in) {
486 		err = -ENOMEM;
487 		goto err_alloc;
488 	}
489 
490 	/* Chain's prio list is sorted by prio and level.
491 	 * And all levels of some prio point to the next prio's level 0.
492 	 * Example list (prio, level):
493 	 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
494 	 * In hardware, we will we have the following pointers:
495 	 * (3,0) -> (5,0) -> (7,0) -> Slow path
496 	 * (3,1) -> (5,0)
497 	 * (5,1) -> (7,0)
498 	 * (6,1) -> (7,0)
499 	 */
500 
501 	/* Default miss for each chain: */
502 	next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
503 		  chains_default_ft(chains) :
504 		  chains_end_ft(chains);
505 	list_for_each(pos, &chain_s->prios_list) {
506 		struct prio *p = list_entry(pos, struct prio, list);
507 
508 		/* exit on first pos that is larger */
509 		if (prio < p->key.prio || (prio == p->key.prio &&
510 					   level < p->key.level)) {
511 			/* Get next level 0 table */
512 			next_ft = p->key.level == 0 ? p->ft : p->next_ft;
513 			break;
514 		}
515 	}
516 
517 	ft = mlx5_chains_create_table(chains, chain, prio, level);
518 	if (IS_ERR(ft)) {
519 		err = PTR_ERR(ft);
520 		goto err_create;
521 	}
522 
523 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
524 		 ft->max_fte - 2);
525 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
526 		 ft->max_fte - 1);
527 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
528 	if (IS_ERR(miss_group)) {
529 		err = PTR_ERR(miss_group);
530 		goto err_group;
531 	}
532 
533 	/* Add miss rule to next_ft */
534 	miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
535 	if (IS_ERR(miss_rule)) {
536 		err = PTR_ERR(miss_rule);
537 		goto err_miss_rule;
538 	}
539 
540 	prio_s->miss_group = miss_group;
541 	prio_s->miss_rule = miss_rule;
542 	prio_s->next_ft = next_ft;
543 	prio_s->chain = chain_s;
544 	prio_s->key.chain = chain;
545 	prio_s->key.prio = prio;
546 	prio_s->key.level = level;
547 	prio_s->ft = ft;
548 
549 	err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
550 				     prio_params);
551 	if (err)
552 		goto err_insert;
553 
554 	list_add(&prio_s->list, pos->prev);
555 
556 	/* Table is ready, connect it */
557 	err = mlx5_chains_update_prio_prevs(prio_s, ft);
558 	if (err)
559 		goto err_update;
560 
561 	kvfree(flow_group_in);
562 	return prio_s;
563 
564 err_update:
565 	list_del(&prio_s->list);
566 	rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
567 			       prio_params);
568 err_insert:
569 	mlx5_del_flow_rules(miss_rule);
570 err_miss_rule:
571 	mlx5_destroy_flow_group(miss_group);
572 err_group:
573 	mlx5_destroy_flow_table(ft);
574 err_create:
575 err_alloc:
576 	kvfree(prio_s);
577 	kvfree(flow_group_in);
578 	mlx5_chains_put_chain(chain_s);
579 	return ERR_PTR(err);
580 }
581 
582 static void
mlx5_chains_destroy_prio(struct mlx5_fs_chains * chains,struct prio * prio)583 mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
584 			 struct prio *prio)
585 {
586 	struct fs_chain *chain = prio->chain;
587 
588 	WARN_ON(mlx5_chains_update_prio_prevs(prio,
589 					      prio->next_ft));
590 
591 	list_del(&prio->list);
592 	rhashtable_remove_fast(&prios_ht(chains), &prio->node,
593 			       prio_params);
594 	mlx5_del_flow_rules(prio->miss_rule);
595 	mlx5_destroy_flow_group(prio->miss_group);
596 	mlx5_destroy_flow_table(prio->ft);
597 	mlx5_chains_put_chain(chain);
598 	kvfree(prio);
599 }
600 
601 struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)602 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
603 		      u32 level)
604 {
605 	struct mlx5_flow_table *prev_fts;
606 	struct prio *prio_s;
607 	struct prio_key key;
608 	int l = 0;
609 
610 	if ((chain > mlx5_chains_get_chain_range(chains) &&
611 	     chain != mlx5_chains_get_nf_ft_chain(chains)) ||
612 	    prio > mlx5_chains_get_prio_range(chains) ||
613 	    level > mlx5_chains_get_level_range(chains))
614 		return ERR_PTR(-EOPNOTSUPP);
615 
616 	/* create earlier levels for correct fs_core lookup when
617 	 * connecting tables.
618 	 */
619 	for (l = 0; l < level; l++) {
620 		prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
621 		if (IS_ERR(prev_fts)) {
622 			prio_s = ERR_CAST(prev_fts);
623 			goto err_get_prevs;
624 		}
625 	}
626 
627 	key.chain = chain;
628 	key.prio = prio;
629 	key.level = level;
630 
631 	mutex_lock(&chains_lock(chains));
632 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
633 					prio_params);
634 	if (!prio_s) {
635 		prio_s = mlx5_chains_create_prio(chains, chain,
636 						 prio, level);
637 		if (IS_ERR(prio_s))
638 			goto err_create_prio;
639 	}
640 
641 	++prio_s->ref;
642 	mutex_unlock(&chains_lock(chains));
643 
644 	return prio_s->ft;
645 
646 err_create_prio:
647 	mutex_unlock(&chains_lock(chains));
648 err_get_prevs:
649 	while (--l >= 0)
650 		mlx5_chains_put_table(chains, chain, prio, l);
651 	return ERR_CAST(prio_s);
652 }
653 
654 void
mlx5_chains_put_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)655 mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
656 		      u32 level)
657 {
658 	struct prio *prio_s;
659 	struct prio_key key;
660 
661 	key.chain = chain;
662 	key.prio = prio;
663 	key.level = level;
664 
665 	mutex_lock(&chains_lock(chains));
666 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
667 					prio_params);
668 	if (!prio_s)
669 		goto err_get_prio;
670 
671 	if (--prio_s->ref == 0)
672 		mlx5_chains_destroy_prio(chains, prio_s);
673 	mutex_unlock(&chains_lock(chains));
674 
675 	while (level-- > 0)
676 		mlx5_chains_put_table(chains, chain, prio, level);
677 
678 	return;
679 
680 err_get_prio:
681 	mutex_unlock(&chains_lock(chains));
682 	WARN_ONCE(1,
683 		  "Couldn't find table: (chain: %d prio: %d level: %d)",
684 		  chain, prio, level);
685 }
686 
687 struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains * chains)688 mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
689 {
690 	return chains_end_ft(chains);
691 }
692 
693 struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains * chains)694 mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
695 {
696 	u32 chain, prio, level;
697 	int err;
698 
699 	if (!mlx5_chains_ignore_flow_level_supported(chains)) {
700 		err = -EOPNOTSUPP;
701 
702 		mlx5_core_warn(chains->dev,
703 			       "Couldn't create global flow table, ignore_flow_level not supported.");
704 		goto err_ignore;
705 	}
706 
707 	chain = mlx5_chains_get_chain_range(chains),
708 	prio = mlx5_chains_get_prio_range(chains);
709 	level = mlx5_chains_get_level_range(chains);
710 
711 	return mlx5_chains_create_table(chains, chain, prio, level);
712 
713 err_ignore:
714 	return ERR_PTR(err);
715 }
716 
717 void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)718 mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
719 				 struct mlx5_flow_table *ft)
720 {
721 	mlx5_destroy_flow_table(ft);
722 }
723 
724 static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)725 mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
726 {
727 	struct mlx5_fs_chains *chains;
728 	int err;
729 
730 	chains = kzalloc(sizeof(*chains), GFP_KERNEL);
731 	if (!chains)
732 		return ERR_PTR(-ENOMEM);
733 
734 	chains->dev = dev;
735 	chains->flags = attr->flags;
736 	chains->ns = attr->ns;
737 	chains->group_num = attr->max_grp_num;
738 	chains->chains_mapping = attr->mapping;
739 	chains->fs_base_prio = attr->fs_base_prio;
740 	chains->fs_base_level = attr->fs_base_level;
741 	chains_default_ft(chains) = chains_end_ft(chains) = attr->default_ft;
742 
743 	err = rhashtable_init(&chains_ht(chains), &chain_params);
744 	if (err)
745 		goto init_chains_ht_err;
746 
747 	err = rhashtable_init(&prios_ht(chains), &prio_params);
748 	if (err)
749 		goto init_prios_ht_err;
750 
751 	mutex_init(&chains_lock(chains));
752 
753 	return chains;
754 
755 init_prios_ht_err:
756 	rhashtable_destroy(&chains_ht(chains));
757 init_chains_ht_err:
758 	kfree(chains);
759 	return ERR_PTR(err);
760 }
761 
762 static void
mlx5_chains_cleanup(struct mlx5_fs_chains * chains)763 mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
764 {
765 	mutex_destroy(&chains_lock(chains));
766 	rhashtable_destroy(&prios_ht(chains));
767 	rhashtable_destroy(&chains_ht(chains));
768 
769 	kfree(chains);
770 }
771 
772 struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)773 mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
774 {
775 	struct mlx5_fs_chains *chains;
776 
777 	chains = mlx5_chains_init(dev, attr);
778 
779 	return chains;
780 }
781 
782 void
mlx5_chains_destroy(struct mlx5_fs_chains * chains)783 mlx5_chains_destroy(struct mlx5_fs_chains *chains)
784 {
785 	mlx5_chains_cleanup(chains);
786 }
787 
788 int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains * chains,u32 chain,u32 * chain_mapping)789 mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
790 			      u32 *chain_mapping)
791 {
792 	struct mapping_ctx *ctx = chains->chains_mapping;
793 	struct mlx5_mapped_obj mapped_obj = {};
794 
795 	mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
796 	mapped_obj.chain = chain;
797 	return mapping_add(ctx, &mapped_obj, chain_mapping);
798 }
799 
800 int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains * chains,u32 chain_mapping)801 mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
802 {
803 	struct mapping_ctx *ctx = chains->chains_mapping;
804 
805 	return mapping_remove(ctx, chain_mapping);
806 }
807 
808 void
mlx5_chains_print_info(struct mlx5_fs_chains * chains)809 mlx5_chains_print_info(struct mlx5_fs_chains *chains)
810 {
811 	mlx5_core_dbg(chains->dev, "Flow table chains groups(%d)\n", chains->group_num);
812 }
813