xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies.
3 
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/mlx5/fs.h>
7 
8 #include "lib/fs_chains.h"
9 #include "fs_ft_pool.h"
10 #include "en/mapping.h"
11 #include "fs_core.h"
12 #include "en_tc.h"
13 
14 #define chains_lock(chains) ((chains)->lock)
15 #define chains_ht(chains) ((chains)->chains_ht)
16 #define prios_ht(chains) ((chains)->prios_ht)
17 #define chains_default_ft(chains) ((chains)->chains_default_ft)
18 #define chains_end_ft(chains) ((chains)->chains_end_ft)
19 #define FT_TBL_SZ (64 * 1024)
20 
21 struct mlx5_fs_chains {
22 	struct mlx5_core_dev *dev;
23 
24 	struct rhashtable chains_ht;
25 	struct rhashtable prios_ht;
26 	/* Protects above chains_ht and prios_ht */
27 	struct mutex lock;
28 
29 	struct mlx5_flow_table *chains_default_ft;
30 	struct mlx5_flow_table *chains_end_ft;
31 	struct mapping_ctx *chains_mapping;
32 
33 	enum mlx5_flow_namespace_type ns;
34 	u32 group_num;
35 	u32 flags;
36 	int fs_base_prio;
37 	int fs_base_level;
38 };
39 
40 struct fs_chain {
41 	struct rhash_head node;
42 
43 	u32 chain;
44 
45 	int ref;
46 	int id;
47 
48 	struct mlx5_fs_chains *chains;
49 	struct list_head prios_list;
50 	struct mlx5_flow_handle *restore_rule;
51 	struct mlx5_modify_hdr *miss_modify_hdr;
52 };
53 
54 struct prio_key {
55 	u32 chain;
56 	u32 prio;
57 	u32 level;
58 };
59 
60 struct prio {
61 	struct rhash_head node;
62 	struct list_head list;
63 
64 	struct prio_key key;
65 
66 	int ref;
67 
68 	struct fs_chain *chain;
69 	struct mlx5_flow_table *ft;
70 	struct mlx5_flow_table *next_ft;
71 	struct mlx5_flow_group *miss_group;
72 	struct mlx5_flow_handle *miss_rule;
73 };
74 
75 static const struct rhashtable_params chain_params = {
76 	.head_offset = offsetof(struct fs_chain, node),
77 	.key_offset = offsetof(struct fs_chain, chain),
78 	.key_len = sizeof_field(struct fs_chain, chain),
79 	.automatic_shrinking = true,
80 };
81 
82 static const struct rhashtable_params prio_params = {
83 	.head_offset = offsetof(struct prio, node),
84 	.key_offset = offsetof(struct prio, key),
85 	.key_len = sizeof_field(struct prio, key),
86 	.automatic_shrinking = true,
87 };
88 
mlx5_chains_prios_supported(struct mlx5_fs_chains * chains)89 bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
90 {
91 	return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
92 }
93 
mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains * chains)94 bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
95 {
96 	return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
97 }
98 
mlx5_chains_backwards_supported(struct mlx5_fs_chains * chains)99 bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
100 {
101 	return mlx5_chains_prios_supported(chains) &&
102 	       mlx5_chains_ignore_flow_level_supported(chains);
103 }
104 
mlx5_chains_get_chain_range(struct mlx5_fs_chains * chains)105 u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
106 {
107 	if (!mlx5_chains_prios_supported(chains))
108 		return 1;
109 
110 	if (mlx5_chains_ignore_flow_level_supported(chains))
111 		return UINT_MAX - 1;
112 
113 	/* We should get here only for eswitch case */
114 	return FDB_TC_MAX_CHAIN;
115 }
116 
mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains * chains)117 u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
118 {
119 	return mlx5_chains_get_chain_range(chains) + 1;
120 }
121 
mlx5_chains_get_prio_range(struct mlx5_fs_chains * chains)122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
123 {
124 	if (mlx5_chains_ignore_flow_level_supported(chains))
125 		return UINT_MAX;
126 
127 	if (!chains->dev->priv.eswitch ||
128 	    chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
129 		return 1;
130 
131 	/* We should get here only for eswitch case */
132 	return FDB_TC_MAX_PRIO;
133 }
134 
mlx5_chains_get_level_range(struct mlx5_fs_chains * chains)135 static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
136 {
137 	if (mlx5_chains_ignore_flow_level_supported(chains))
138 		return UINT_MAX;
139 
140 	/* Same value for FDB and NIC RX tables */
141 	return FDB_TC_LEVELS_PER_PRIO;
142 }
143 
144 void
mlx5_chains_set_end_ft(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)145 mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
146 		       struct mlx5_flow_table *ft)
147 {
148 	chains_end_ft(chains) = ft;
149 }
150 
151 static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)152 mlx5_chains_create_table(struct mlx5_fs_chains *chains,
153 			 u32 chain, u32 prio, u32 level)
154 {
155 	struct mlx5_flow_table_attr ft_attr = {};
156 	struct mlx5_flow_namespace *ns;
157 	struct mlx5_flow_table *ft;
158 	int sz;
159 
160 	if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
161 		ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
162 				  MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
163 
164 	sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
165 		FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
166 	ft_attr.max_fte = sz;
167 
168 	/* We use chains_default_ft(chains) as the table's next_ft till
169 	 * ignore_flow_level is allowed on FT creation and not just for FTEs.
170 	 * Instead caller should add an explicit miss rule if needed.
171 	 */
172 	ft_attr.next_ft = chains_default_ft(chains);
173 
174 	/* The root table(chain 0, prio 1, level 0) is required to be
175 	 * connected to the previous fs_core managed prio.
176 	 * We always create it, as a managed table, in order to align with
177 	 * fs_core logic.
178 	 */
179 	if (!mlx5_chains_ignore_flow_level_supported(chains) ||
180 	    (chain == 0 && prio == 1 && level == 0)) {
181 		ft_attr.level = chains->fs_base_level;
182 		ft_attr.prio = chains->fs_base_prio + prio - 1;
183 		ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
184 			mlx5_get_fdb_sub_ns(chains->dev, chain) :
185 			mlx5_get_flow_namespace(chains->dev, chains->ns);
186 	} else {
187 		ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
188 		ft_attr.prio = chains->fs_base_prio;
189 		/* Firmware doesn't allow us to create another level 0 table,
190 		 * so we create all unmanaged tables as level 1 (base + 1).
191 		 *
192 		 * To connect them, we use explicit miss rules with
193 		 * ignore_flow_level. Caller is responsible to create
194 		 * these rules (if needed).
195 		 */
196 		ft_attr.level = chains->fs_base_level + 1;
197 		ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
198 	}
199 
200 	if (!ns) {
201 		mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
202 		return ERR_PTR(-EOPNOTSUPP);
203 	}
204 
205 	ft_attr.autogroup.num_reserved_entries = 2;
206 	ft_attr.autogroup.max_num_groups = chains->group_num;
207 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
208 	if (IS_ERR(ft)) {
209 		mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
210 			       (int)PTR_ERR(ft), chain, prio, level, sz);
211 		return ft;
212 	}
213 
214 	return ft;
215 }
216 
217 static int
create_chain_restore(struct fs_chain * chain)218 create_chain_restore(struct fs_chain *chain)
219 {
220 	struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
221 	u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
222 	struct mlx5_fs_chains *chains = chain->chains;
223 	enum mlx5e_tc_attr_to_reg mapped_obj_to_reg;
224 	struct mlx5_modify_hdr *mod_hdr;
225 	u32 index;
226 	int err;
227 
228 	if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
229 	    !mlx5_chains_prios_supported(chains) ||
230 	    !chains->chains_mapping)
231 		return 0;
232 
233 	err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
234 	if (err)
235 		return err;
236 	if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
237 		/* we got the special default flow tag id, so we won't know
238 		 * if we actually marked the packet with the restore rule
239 		 * we create.
240 		 *
241 		 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
242 		 */
243 		err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
244 		mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
245 		if (err)
246 			return err;
247 	}
248 
249 	chain->id = index;
250 
251 	if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
252 		mapped_obj_to_reg = MAPPED_OBJ_TO_REG;
253 		chain->restore_rule = esw_add_restore_rule(esw, chain->id);
254 		if (IS_ERR(chain->restore_rule)) {
255 			err = PTR_ERR(chain->restore_rule);
256 			goto err_rule;
257 		}
258 	} else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
259 		/* For NIC RX we don't need a restore rule
260 		 * since we write the metadata to reg_b
261 		 * that is passed to SW directly.
262 		 */
263 		mapped_obj_to_reg = NIC_MAPPED_OBJ_TO_REG;
264 	} else {
265 		err = -EINVAL;
266 		goto err_rule;
267 	}
268 
269 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
270 	MLX5_SET(set_action_in, modact, field,
271 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mfield);
272 	MLX5_SET(set_action_in, modact, offset,
273 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].moffset);
274 	MLX5_SET(set_action_in, modact, length,
275 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen == 32 ?
276 		 0 : mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen);
277 	MLX5_SET(set_action_in, modact, data, chain->id);
278 	mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
279 					   1, modact);
280 	if (IS_ERR(mod_hdr)) {
281 		err = PTR_ERR(mod_hdr);
282 		goto err_mod_hdr;
283 	}
284 	chain->miss_modify_hdr = mod_hdr;
285 
286 	return 0;
287 
288 err_mod_hdr:
289 	if (!IS_ERR_OR_NULL(chain->restore_rule))
290 		mlx5_del_flow_rules(chain->restore_rule);
291 err_rule:
292 	/* Datapath can't find this mapping, so we can safely remove it */
293 	mapping_remove(chains->chains_mapping, chain->id);
294 	return err;
295 }
296 
destroy_chain_restore(struct fs_chain * chain)297 static void destroy_chain_restore(struct fs_chain *chain)
298 {
299 	struct mlx5_fs_chains *chains = chain->chains;
300 
301 	if (!chain->miss_modify_hdr)
302 		return;
303 
304 	if (chain->restore_rule)
305 		mlx5_del_flow_rules(chain->restore_rule);
306 
307 	mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
308 	mapping_remove(chains->chains_mapping, chain->id);
309 }
310 
311 static struct fs_chain *
mlx5_chains_create_chain(struct mlx5_fs_chains * chains,u32 chain)312 mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
313 {
314 	struct fs_chain *chain_s = NULL;
315 	int err;
316 
317 	chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
318 	if (!chain_s)
319 		return ERR_PTR(-ENOMEM);
320 
321 	chain_s->chains = chains;
322 	chain_s->chain = chain;
323 	INIT_LIST_HEAD(&chain_s->prios_list);
324 
325 	err = create_chain_restore(chain_s);
326 	if (err)
327 		goto err_restore;
328 
329 	err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
330 				     chain_params);
331 	if (err)
332 		goto err_insert;
333 
334 	return chain_s;
335 
336 err_insert:
337 	destroy_chain_restore(chain_s);
338 err_restore:
339 	kvfree(chain_s);
340 	return ERR_PTR(err);
341 }
342 
343 static void
mlx5_chains_destroy_chain(struct fs_chain * chain)344 mlx5_chains_destroy_chain(struct fs_chain *chain)
345 {
346 	struct mlx5_fs_chains *chains = chain->chains;
347 
348 	rhashtable_remove_fast(&chains_ht(chains), &chain->node,
349 			       chain_params);
350 
351 	destroy_chain_restore(chain);
352 	kvfree(chain);
353 }
354 
355 static struct fs_chain *
mlx5_chains_get_chain(struct mlx5_fs_chains * chains,u32 chain)356 mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
357 {
358 	struct fs_chain *chain_s;
359 
360 	chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
361 					 chain_params);
362 	if (!chain_s) {
363 		chain_s = mlx5_chains_create_chain(chains, chain);
364 		if (IS_ERR(chain_s))
365 			return chain_s;
366 	}
367 
368 	chain_s->ref++;
369 
370 	return chain_s;
371 }
372 
373 static struct mlx5_flow_handle *
mlx5_chains_add_miss_rule(struct fs_chain * chain,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)374 mlx5_chains_add_miss_rule(struct fs_chain *chain,
375 			  struct mlx5_flow_table *ft,
376 			  struct mlx5_flow_table *next_ft)
377 {
378 	struct mlx5_fs_chains *chains = chain->chains;
379 	struct mlx5_flow_destination dest = {};
380 	struct mlx5_flow_act act = {};
381 
382 	act.flags  = FLOW_ACT_NO_APPEND;
383 	if (mlx5_chains_ignore_flow_level_supported(chain->chains))
384 		act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
385 
386 	act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
387 	dest.type  = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
388 	dest.ft = next_ft;
389 
390 	if (chains->chains_mapping && next_ft == chains_end_ft(chains) &&
391 	    chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
392 	    mlx5_chains_prios_supported(chains)) {
393 		act.modify_hdr = chain->miss_modify_hdr;
394 		act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
395 	}
396 
397 	return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
398 }
399 
400 static int
mlx5_chains_update_prio_prevs(struct prio * prio,struct mlx5_flow_table * next_ft)401 mlx5_chains_update_prio_prevs(struct prio *prio,
402 			      struct mlx5_flow_table *next_ft)
403 {
404 	struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
405 	struct fs_chain *chain = prio->chain;
406 	struct prio *pos;
407 	int n = 0, err;
408 
409 	if (prio->key.level)
410 		return 0;
411 
412 	/* Iterate in reverse order until reaching the level 0 rule of
413 	 * the previous priority, adding all the miss rules first, so we can
414 	 * revert them if any of them fails.
415 	 */
416 	pos = prio;
417 	list_for_each_entry_continue_reverse(pos,
418 					     &chain->prios_list,
419 					     list) {
420 		miss_rules[n] = mlx5_chains_add_miss_rule(chain,
421 							  pos->ft,
422 							  next_ft);
423 		if (IS_ERR(miss_rules[n])) {
424 			err = PTR_ERR(miss_rules[n]);
425 			goto err_prev_rule;
426 		}
427 
428 		n++;
429 		if (!pos->key.level)
430 			break;
431 	}
432 
433 	/* Success, delete old miss rules, and update the pointers. */
434 	n = 0;
435 	pos = prio;
436 	list_for_each_entry_continue_reverse(pos,
437 					     &chain->prios_list,
438 					     list) {
439 		mlx5_del_flow_rules(pos->miss_rule);
440 
441 		pos->miss_rule = miss_rules[n];
442 		pos->next_ft = next_ft;
443 
444 		n++;
445 		if (!pos->key.level)
446 			break;
447 	}
448 
449 	return 0;
450 
451 err_prev_rule:
452 	while (--n >= 0)
453 		mlx5_del_flow_rules(miss_rules[n]);
454 
455 	return err;
456 }
457 
458 static void
mlx5_chains_put_chain(struct fs_chain * chain)459 mlx5_chains_put_chain(struct fs_chain *chain)
460 {
461 	if (--chain->ref == 0)
462 		mlx5_chains_destroy_chain(chain);
463 }
464 
465 static struct prio *
mlx5_chains_create_prio(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)466 mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
467 			u32 chain, u32 prio, u32 level)
468 {
469 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
470 	struct mlx5_flow_handle *miss_rule;
471 	struct mlx5_flow_group *miss_group;
472 	struct mlx5_flow_table *next_ft;
473 	struct mlx5_flow_table *ft;
474 	struct fs_chain *chain_s;
475 	struct list_head *pos;
476 	struct prio *prio_s;
477 	u32 *flow_group_in;
478 	int err;
479 
480 	chain_s = mlx5_chains_get_chain(chains, chain);
481 	if (IS_ERR(chain_s))
482 		return ERR_CAST(chain_s);
483 
484 	prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
485 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
486 	if (!prio_s || !flow_group_in) {
487 		err = -ENOMEM;
488 		goto err_alloc;
489 	}
490 
491 	/* Chain's prio list is sorted by prio and level.
492 	 * And all levels of some prio point to the next prio's level 0.
493 	 * Example list (prio, level):
494 	 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
495 	 * In hardware, we will we have the following pointers:
496 	 * (3,0) -> (5,0) -> (7,0) -> Slow path
497 	 * (3,1) -> (5,0)
498 	 * (5,1) -> (7,0)
499 	 * (6,1) -> (7,0)
500 	 */
501 
502 	/* Default miss for each chain: */
503 	next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
504 		  chains_default_ft(chains) :
505 		  chains_end_ft(chains);
506 	list_for_each(pos, &chain_s->prios_list) {
507 		struct prio *p = list_entry(pos, struct prio, list);
508 
509 		/* exit on first pos that is larger */
510 		if (prio < p->key.prio || (prio == p->key.prio &&
511 					   level < p->key.level)) {
512 			/* Get next level 0 table */
513 			next_ft = p->key.level == 0 ? p->ft : p->next_ft;
514 			break;
515 		}
516 	}
517 
518 	ft = mlx5_chains_create_table(chains, chain, prio, level);
519 	if (IS_ERR(ft)) {
520 		err = PTR_ERR(ft);
521 		goto err_create;
522 	}
523 
524 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
525 		 ft->max_fte - 2);
526 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
527 		 ft->max_fte - 1);
528 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
529 	if (IS_ERR(miss_group)) {
530 		err = PTR_ERR(miss_group);
531 		goto err_group;
532 	}
533 
534 	/* Add miss rule to next_ft */
535 	miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
536 	if (IS_ERR(miss_rule)) {
537 		err = PTR_ERR(miss_rule);
538 		goto err_miss_rule;
539 	}
540 
541 	prio_s->miss_group = miss_group;
542 	prio_s->miss_rule = miss_rule;
543 	prio_s->next_ft = next_ft;
544 	prio_s->chain = chain_s;
545 	prio_s->key.chain = chain;
546 	prio_s->key.prio = prio;
547 	prio_s->key.level = level;
548 	prio_s->ft = ft;
549 
550 	err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
551 				     prio_params);
552 	if (err)
553 		goto err_insert;
554 
555 	list_add(&prio_s->list, pos->prev);
556 
557 	/* Table is ready, connect it */
558 	err = mlx5_chains_update_prio_prevs(prio_s, ft);
559 	if (err)
560 		goto err_update;
561 
562 	kvfree(flow_group_in);
563 	return prio_s;
564 
565 err_update:
566 	list_del(&prio_s->list);
567 	rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
568 			       prio_params);
569 err_insert:
570 	mlx5_del_flow_rules(miss_rule);
571 err_miss_rule:
572 	mlx5_destroy_flow_group(miss_group);
573 err_group:
574 	mlx5_destroy_flow_table(ft);
575 err_create:
576 err_alloc:
577 	kvfree(prio_s);
578 	kvfree(flow_group_in);
579 	mlx5_chains_put_chain(chain_s);
580 	return ERR_PTR(err);
581 }
582 
583 static void
mlx5_chains_destroy_prio(struct mlx5_fs_chains * chains,struct prio * prio)584 mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
585 			 struct prio *prio)
586 {
587 	struct fs_chain *chain = prio->chain;
588 
589 	WARN_ON(mlx5_chains_update_prio_prevs(prio,
590 					      prio->next_ft));
591 
592 	list_del(&prio->list);
593 	rhashtable_remove_fast(&prios_ht(chains), &prio->node,
594 			       prio_params);
595 	mlx5_del_flow_rules(prio->miss_rule);
596 	mlx5_destroy_flow_group(prio->miss_group);
597 	mlx5_destroy_flow_table(prio->ft);
598 	mlx5_chains_put_chain(chain);
599 	kvfree(prio);
600 }
601 
602 struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)603 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
604 		      u32 level)
605 {
606 	struct mlx5_flow_table *prev_fts;
607 	struct prio *prio_s;
608 	struct prio_key key;
609 	int l = 0;
610 
611 	if ((chain > mlx5_chains_get_chain_range(chains) &&
612 	     chain != mlx5_chains_get_nf_ft_chain(chains)) ||
613 	    prio > mlx5_chains_get_prio_range(chains) ||
614 	    level > mlx5_chains_get_level_range(chains))
615 		return ERR_PTR(-EOPNOTSUPP);
616 
617 	/* create earlier levels for correct fs_core lookup when
618 	 * connecting tables.
619 	 */
620 	for (l = 0; l < level; l++) {
621 		prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
622 		if (IS_ERR(prev_fts)) {
623 			prio_s = ERR_CAST(prev_fts);
624 			goto err_get_prevs;
625 		}
626 	}
627 
628 	key.chain = chain;
629 	key.prio = prio;
630 	key.level = level;
631 
632 	mutex_lock(&chains_lock(chains));
633 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
634 					prio_params);
635 	if (!prio_s) {
636 		prio_s = mlx5_chains_create_prio(chains, chain,
637 						 prio, level);
638 		if (IS_ERR(prio_s))
639 			goto err_create_prio;
640 	}
641 
642 	++prio_s->ref;
643 	mutex_unlock(&chains_lock(chains));
644 
645 	return prio_s->ft;
646 
647 err_create_prio:
648 	mutex_unlock(&chains_lock(chains));
649 err_get_prevs:
650 	while (--l >= 0)
651 		mlx5_chains_put_table(chains, chain, prio, l);
652 	return ERR_CAST(prio_s);
653 }
654 
655 void
mlx5_chains_put_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)656 mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
657 		      u32 level)
658 {
659 	struct prio *prio_s;
660 	struct prio_key key;
661 
662 	key.chain = chain;
663 	key.prio = prio;
664 	key.level = level;
665 
666 	mutex_lock(&chains_lock(chains));
667 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
668 					prio_params);
669 	if (!prio_s)
670 		goto err_get_prio;
671 
672 	if (--prio_s->ref == 0)
673 		mlx5_chains_destroy_prio(chains, prio_s);
674 	mutex_unlock(&chains_lock(chains));
675 
676 	while (level-- > 0)
677 		mlx5_chains_put_table(chains, chain, prio, level);
678 
679 	return;
680 
681 err_get_prio:
682 	mutex_unlock(&chains_lock(chains));
683 	WARN_ONCE(1,
684 		  "Couldn't find table: (chain: %d prio: %d level: %d)",
685 		  chain, prio, level);
686 }
687 
688 struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains * chains)689 mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
690 {
691 	return chains_end_ft(chains);
692 }
693 
694 struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains * chains)695 mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
696 {
697 	u32 chain, prio, level;
698 	int err;
699 
700 	if (!mlx5_chains_ignore_flow_level_supported(chains)) {
701 		err = -EOPNOTSUPP;
702 
703 		mlx5_core_warn(chains->dev,
704 			       "Couldn't create global flow table, ignore_flow_level not supported.");
705 		goto err_ignore;
706 	}
707 
708 	chain = mlx5_chains_get_chain_range(chains);
709 	prio = mlx5_chains_get_prio_range(chains);
710 	level = mlx5_chains_get_level_range(chains);
711 
712 	return mlx5_chains_create_table(chains, chain, prio, level);
713 
714 err_ignore:
715 	return ERR_PTR(err);
716 }
717 
718 void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)719 mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
720 				 struct mlx5_flow_table *ft)
721 {
722 	mlx5_destroy_flow_table(ft);
723 }
724 
725 static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)726 mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
727 {
728 	struct mlx5_fs_chains *chains;
729 	int err;
730 
731 	chains = kzalloc(sizeof(*chains), GFP_KERNEL);
732 	if (!chains)
733 		return ERR_PTR(-ENOMEM);
734 
735 	chains->dev = dev;
736 	chains->flags = attr->flags;
737 	chains->ns = attr->ns;
738 	chains->group_num = attr->max_grp_num;
739 	chains->chains_mapping = attr->mapping;
740 	chains->fs_base_prio = attr->fs_base_prio;
741 	chains->fs_base_level = attr->fs_base_level;
742 	chains_default_ft(chains) = chains_end_ft(chains) = attr->default_ft;
743 
744 	err = rhashtable_init(&chains_ht(chains), &chain_params);
745 	if (err)
746 		goto init_chains_ht_err;
747 
748 	err = rhashtable_init(&prios_ht(chains), &prio_params);
749 	if (err)
750 		goto init_prios_ht_err;
751 
752 	mutex_init(&chains_lock(chains));
753 
754 	return chains;
755 
756 init_prios_ht_err:
757 	rhashtable_destroy(&chains_ht(chains));
758 init_chains_ht_err:
759 	kfree(chains);
760 	return ERR_PTR(err);
761 }
762 
763 static void
mlx5_chains_cleanup(struct mlx5_fs_chains * chains)764 mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
765 {
766 	mutex_destroy(&chains_lock(chains));
767 	rhashtable_destroy(&prios_ht(chains));
768 	rhashtable_destroy(&chains_ht(chains));
769 
770 	kfree(chains);
771 }
772 
773 struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)774 mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
775 {
776 	struct mlx5_fs_chains *chains;
777 
778 	chains = mlx5_chains_init(dev, attr);
779 
780 	return chains;
781 }
782 
783 void
mlx5_chains_destroy(struct mlx5_fs_chains * chains)784 mlx5_chains_destroy(struct mlx5_fs_chains *chains)
785 {
786 	mlx5_chains_cleanup(chains);
787 }
788 
789 int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains * chains,u32 chain,u32 * chain_mapping)790 mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
791 			      u32 *chain_mapping)
792 {
793 	struct mapping_ctx *ctx = chains->chains_mapping;
794 	struct mlx5_mapped_obj mapped_obj = {};
795 
796 	mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
797 	mapped_obj.chain = chain;
798 	return mapping_add(ctx, &mapped_obj, chain_mapping);
799 }
800 
801 int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains * chains,u32 chain_mapping)802 mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
803 {
804 	struct mapping_ctx *ctx = chains->chains_mapping;
805 
806 	return mapping_remove(ctx, chain_mapping);
807 }
808 
809 void
mlx5_chains_print_info(struct mlx5_fs_chains * chains)810 mlx5_chains_print_info(struct mlx5_fs_chains *chains)
811 {
812 	mlx5_core_dbg(chains->dev, "Flow table chains groups(%d)\n", chains->group_num);
813 }
814