xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c (revision 4ff71af020ae59ae2d83b174646fc2ad9fcd4dc4)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3 
4 #include <linux/mlx5/vport.h>
5 #include <mlx5_core.h>
6 #include <fs_core.h>
7 #include <fs_cmd.h>
8 #include "fs_hws_pools.h"
9 #include "mlx5hws.h"
10 
11 #define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
12 #define MLX5HWS_CTX_QUEUE_SIZE 256
13 
14 static struct mlx5hws_action *
15 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
16 static void
17 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
18 			unsigned long index);
19 static void
20 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
21 			unsigned long index);
22 
mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev * dev,struct mlx5_fs_hws_context * fs_ctx)23 static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
24 					 struct mlx5_fs_hws_context *fs_ctx)
25 {
26 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
27 	struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
28 	struct mlx5hws_action_reformat_header reformat_hdr = {};
29 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
30 	enum mlx5hws_action_type action_type;
31 	int err = -ENOSPC;
32 
33 	hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
34 	if (!hws_pool->tag_action)
35 		return err;
36 	hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
37 	if (!hws_pool->pop_vlan_action)
38 		goto destroy_tag;
39 	hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
40 	if (!hws_pool->push_vlan_action)
41 		goto destroy_pop_vlan;
42 	hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
43 	if (!hws_pool->drop_action)
44 		goto destroy_push_vlan;
45 	action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
46 	hws_pool->decapl2_action =
47 		mlx5hws_action_create_reformat(ctx, action_type, 1,
48 					       &reformat_hdr, 0, flags);
49 	if (!hws_pool->decapl2_action)
50 		goto destroy_drop;
51 	hws_pool->remove_hdr_vlan_action =
52 		mlx5_fs_create_action_remove_header_vlan(ctx);
53 	if (!hws_pool->remove_hdr_vlan_action)
54 		goto destroy_decapl2;
55 	err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
56 				       MLX5HWS_ACTION_TYP_INSERT_HEADER);
57 	if (err)
58 		goto destroy_remove_hdr;
59 	err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
60 				       MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
61 	if (err)
62 		goto cleanup_insert_hdr;
63 	xa_init(&hws_pool->el2tol3tnl_pools);
64 	xa_init(&hws_pool->el2tol2tnl_pools);
65 	xa_init(&hws_pool->mh_pools);
66 	xa_init(&hws_pool->table_dests);
67 	xa_init(&hws_pool->vport_dests);
68 	xa_init(&hws_pool->vport_vhca_dests);
69 	xa_init(&hws_pool->aso_meters);
70 	xa_init(&hws_pool->sample_dests);
71 	return 0;
72 
73 cleanup_insert_hdr:
74 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
75 destroy_remove_hdr:
76 	mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
77 destroy_decapl2:
78 	mlx5hws_action_destroy(hws_pool->decapl2_action);
79 destroy_drop:
80 	mlx5hws_action_destroy(hws_pool->drop_action);
81 destroy_push_vlan:
82 	mlx5hws_action_destroy(hws_pool->push_vlan_action);
83 destroy_pop_vlan:
84 	mlx5hws_action_destroy(hws_pool->pop_vlan_action);
85 destroy_tag:
86 	mlx5hws_action_destroy(hws_pool->tag_action);
87 	return err;
88 }
89 
mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context * fs_ctx)90 static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
91 {
92 	struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
93 	struct mlx5_fs_hws_data *fs_hws_data;
94 	struct mlx5hws_action *action;
95 	struct mlx5_fs_pool *pool;
96 	unsigned long i;
97 
98 	xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
99 		kfree(fs_hws_data);
100 	xa_destroy(&hws_pool->sample_dests);
101 	xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
102 		kfree(fs_hws_data);
103 	xa_destroy(&hws_pool->aso_meters);
104 	xa_for_each(&hws_pool->vport_vhca_dests, i, action)
105 		mlx5hws_action_destroy(action);
106 	xa_destroy(&hws_pool->vport_vhca_dests);
107 	xa_for_each(&hws_pool->vport_dests, i, action)
108 		mlx5hws_action_destroy(action);
109 	xa_destroy(&hws_pool->vport_dests);
110 	xa_destroy(&hws_pool->table_dests);
111 	xa_for_each(&hws_pool->mh_pools, i, pool)
112 		mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
113 	xa_destroy(&hws_pool->mh_pools);
114 	xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
115 		mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
116 	xa_destroy(&hws_pool->el2tol2tnl_pools);
117 	xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
118 		mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
119 	xa_destroy(&hws_pool->el2tol3tnl_pools);
120 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
121 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
122 	mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
123 	mlx5hws_action_destroy(hws_pool->decapl2_action);
124 	mlx5hws_action_destroy(hws_pool->drop_action);
125 	mlx5hws_action_destroy(hws_pool->push_vlan_action);
126 	mlx5hws_action_destroy(hws_pool->pop_vlan_action);
127 	mlx5hws_action_destroy(hws_pool->tag_action);
128 }
129 
mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace * ns)130 static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
131 {
132 	struct mlx5hws_context_attr hws_ctx_attr = {};
133 	int err;
134 
135 	hws_ctx_attr.queues = min_t(int, num_online_cpus(),
136 				    MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
137 	hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
138 
139 	ns->fs_hws_context.hws_ctx =
140 		mlx5hws_context_open(ns->dev, &hws_ctx_attr);
141 	if (!ns->fs_hws_context.hws_ctx) {
142 		mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
143 		return -EINVAL;
144 	}
145 	err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
146 	if (err) {
147 		mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
148 		mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
149 		return err;
150 	}
151 	return 0;
152 }
153 
mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace * ns)154 static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
155 {
156 	mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
157 	return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
158 }
159 
mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns,u16 peer_vhca_id)160 static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
161 				 struct mlx5_flow_root_namespace *peer_ns,
162 				 u16 peer_vhca_id)
163 {
164 	struct mlx5hws_context *peer_ctx = NULL;
165 
166 	if (peer_ns)
167 		peer_ctx = peer_ns->fs_hws_context.hws_ctx;
168 	mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
169 				 peer_vhca_id);
170 	return 0;
171 }
172 
mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)173 static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
174 				       struct mlx5_flow_table *ft,
175 				       struct mlx5_flow_table *next_ft)
176 {
177 	struct mlx5hws_table *next_tbl;
178 	int err;
179 
180 	if (!ns->fs_hws_context.hws_ctx)
181 		return -EINVAL;
182 
183 	/* if no change required, return */
184 	if (!next_ft && !ft->fs_hws_table.miss_ft_set)
185 		return 0;
186 
187 	next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
188 	err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
189 	if (err) {
190 		mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
191 		return err;
192 	}
193 	ft->fs_hws_table.miss_ft_set = !!next_tbl;
194 	return 0;
195 }
196 
mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)197 static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
198 					      struct mlx5_flow_table *ft)
199 {
200 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
201 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
202 	struct mlx5hws_action *dest_ft_action;
203 	struct xarray *dests_xa;
204 	int err;
205 
206 	dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
207 							      ft->id, flags);
208 	if (!dest_ft_action) {
209 		mlx5_core_err(ns->dev, "Failed creating dest table action\n");
210 		return -ENOMEM;
211 	}
212 
213 	dests_xa = &fs_ctx->hws_pool.table_dests;
214 	err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
215 	if (err)
216 		mlx5hws_action_destroy(dest_ft_action);
217 	return err;
218 }
219 
mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)220 static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
221 					      struct mlx5_flow_table *ft)
222 {
223 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
224 	struct mlx5hws_action *dest_ft_action;
225 	struct xarray *dests_xa;
226 	int err;
227 
228 	dests_xa = &fs_ctx->hws_pool.table_dests;
229 	dest_ft_action = xa_erase(dests_xa, ft->id);
230 	if (!dest_ft_action) {
231 		mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
232 		return -ENOENT;
233 	}
234 
235 	err = mlx5hws_action_destroy(dest_ft_action);
236 	if (err)
237 		mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
238 	return err;
239 }
240 
mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)241 static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
242 					  struct mlx5_flow_table *ft,
243 					  struct mlx5_flow_table_attr *ft_attr,
244 					  struct mlx5_flow_table *next_ft)
245 {
246 	struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
247 	struct mlx5hws_table_attr tbl_attr = {};
248 	struct mlx5hws_table *tbl;
249 	int err;
250 
251 	if (mlx5_fs_cmd_is_fw_term_table(ft)) {
252 		err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
253 								   next_ft);
254 		if (err)
255 			return err;
256 		err = mlx5_fs_add_flow_table_dest_action(ns, ft);
257 		if (err)
258 			mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
259 		return err;
260 	}
261 
262 	if (ns->table_type != FS_FT_FDB) {
263 		mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
264 			      ns->table_type);
265 		return -EOPNOTSUPP;
266 	}
267 
268 	tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
269 	tbl_attr.level = ft_attr->level;
270 	tbl_attr.uid = ft_attr->uid;
271 	tbl = mlx5hws_table_create(ctx, &tbl_attr);
272 	if (!tbl) {
273 		mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
274 		return -EINVAL;
275 	}
276 
277 	ft->fs_hws_table.hws_table = tbl;
278 	ft->id = mlx5hws_table_get_id(tbl);
279 
280 	if (next_ft) {
281 		err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
282 		if (err)
283 			goto destroy_table;
284 	}
285 
286 	ft->max_fte = INT_MAX;
287 
288 	err = mlx5_fs_add_flow_table_dest_action(ns, ft);
289 	if (err)
290 		goto clear_ft_miss;
291 	return 0;
292 
293 clear_ft_miss:
294 	mlx5_fs_set_ft_default_miss(ns, ft, NULL);
295 destroy_table:
296 	mlx5hws_table_destroy(tbl);
297 	ft->fs_hws_table.hws_table = NULL;
298 	return err;
299 }
300 
mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)301 static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
302 					   struct mlx5_flow_table *ft)
303 {
304 	int err;
305 
306 	err = mlx5_fs_del_flow_table_dest_action(ns, ft);
307 	if (err)
308 		mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
309 
310 	if (mlx5_fs_cmd_is_fw_term_table(ft))
311 		return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
312 
313 	err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
314 	if (err)
315 		mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
316 
317 	err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
318 	if (err)
319 		mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
320 
321 	return err;
322 }
323 
mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)324 static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
325 					  struct mlx5_flow_table *ft,
326 					  struct mlx5_flow_table *next_ft)
327 {
328 	if (mlx5_fs_cmd_is_fw_term_table(ft))
329 		return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
330 
331 	return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
332 }
333 
mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)334 static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
335 				       struct mlx5_flow_table *ft,
336 				       u32 underlay_qpn,
337 				       bool disconnect)
338 {
339 	return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
340 							 disconnect);
341 }
342 
mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)343 static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
344 					  struct mlx5_flow_table *ft, u32 *in,
345 					  struct mlx5_flow_group *fg)
346 {
347 	struct mlx5hws_match_parameters mask;
348 	struct mlx5hws_bwc_matcher *matcher;
349 	u8 match_criteria_enable;
350 	u32 priority;
351 
352 	if (mlx5_fs_cmd_is_fw_term_table(ft))
353 		return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
354 
355 	mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
356 	mask.match_sz = sizeof(fg->mask.match_criteria);
357 
358 	match_criteria_enable = MLX5_GET(create_flow_group_in, in,
359 					 match_criteria_enable);
360 	priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
361 	matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
362 					     priority, match_criteria_enable,
363 					     &mask);
364 	if (!matcher) {
365 		mlx5_core_err(ns->dev, "Failed creating matcher\n");
366 		return -EINVAL;
367 	}
368 
369 	fg->fs_hws_matcher.matcher = matcher;
370 	return 0;
371 }
372 
mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)373 static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
374 					   struct mlx5_flow_table *ft,
375 					   struct mlx5_flow_group *fg)
376 {
377 	if (mlx5_fs_cmd_is_fw_term_table(ft))
378 		return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
379 
380 	return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
381 }
382 
383 static struct mlx5hws_action *
mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)384 mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
385 			   struct mlx5_flow_rule *dst)
386 {
387 	return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
388 }
389 
390 static struct mlx5hws_action *
mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)391 mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
392 				  struct mlx5_flow_rule *dst)
393 {
394 	u32 table_num = dst->dest_attr.ft_num;
395 
396 	return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
397 }
398 
399 static struct mlx5hws_action *
mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)400 mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
401 				     struct mlx5_flow_rule *dst)
402 {
403 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
404 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
405 	u32 table_num = dst->dest_attr.ft_num;
406 
407 	return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
408 }
409 
410 static struct mlx5hws_action *
mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst,bool is_dest_type_uplink)411 mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
412 			      struct mlx5_flow_rule *dst,
413 			      bool is_dest_type_uplink)
414 {
415 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
416 	struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
417 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
418 	struct mlx5hws_action *dest;
419 	struct xarray *dests_xa;
420 	bool vhca_id_valid;
421 	unsigned long idx;
422 	u16 vport_num;
423 	int err;
424 
425 	vhca_id_valid = is_dest_type_uplink ||
426 			(dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
427 	vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
428 	if (vhca_id_valid) {
429 		dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
430 		idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
431 	} else {
432 		dests_xa = &fs_ctx->hws_pool.vport_dests;
433 		idx = vport_num;
434 	}
435 dest_load:
436 	dest = xa_load(dests_xa, idx);
437 	if (dest)
438 		return dest;
439 
440 	dest = mlx5hws_action_create_dest_vport(ctx, vport_num,	vhca_id_valid,
441 						dest_attr->vport.vhca_id, flags);
442 
443 	err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
444 	if (err) {
445 		mlx5hws_action_destroy(dest);
446 		dest = NULL;
447 
448 		if (err == -EBUSY)
449 			/* xarray entry was already stored by another thread */
450 			goto dest_load;
451 	}
452 
453 	return dest;
454 }
455 
456 static struct mlx5hws_action *
mlx5_fs_create_dest_action_range(struct mlx5hws_context * ctx,struct mlx5_flow_rule * dst)457 mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
458 				 struct mlx5_flow_rule *dst)
459 {
460 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
461 	struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
462 
463 	return mlx5hws_action_create_dest_match_range(ctx,
464 						      dest_attr->range.field,
465 						      dest_attr->range.hit_ft,
466 						      dest_attr->range.miss_ft,
467 						      dest_attr->range.min,
468 						      dest_attr->range.max,
469 						      flags);
470 }
471 
472 static struct mlx5_fs_hws_data *
mlx5_fs_get_cached_hws_data(struct xarray * cache_xa,unsigned long index)473 mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
474 {
475 	struct mlx5_fs_hws_data *fs_hws_data;
476 	int err;
477 
478 	xa_lock(cache_xa);
479 	fs_hws_data = xa_load(cache_xa, index);
480 	if (!fs_hws_data) {
481 		fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
482 		if (!fs_hws_data) {
483 			xa_unlock(cache_xa);
484 			return NULL;
485 		}
486 		refcount_set(&fs_hws_data->hws_action_refcount, 0);
487 		mutex_init(&fs_hws_data->lock);
488 		err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
489 		if (err) {
490 			kfree(fs_hws_data);
491 			xa_unlock(cache_xa);
492 			return NULL;
493 		}
494 	}
495 	xa_unlock(cache_xa);
496 
497 	return fs_hws_data;
498 }
499 
500 static struct mlx5hws_action *
mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)501 mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
502 			     struct mlx5_exe_aso *exe_aso)
503 {
504 	struct mlx5_fs_hws_create_action_ctx create_ctx;
505 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
506 	struct mlx5_fs_hws_data *meter_hws_data;
507 	u32 id = exe_aso->base_id;
508 	struct xarray *meters_xa;
509 
510 	meters_xa = &fs_ctx->hws_pool.aso_meters;
511 	meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
512 	if (!meter_hws_data)
513 		return NULL;
514 
515 	create_ctx.hws_ctx = ctx;
516 	create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
517 	create_ctx.id = id;
518 	create_ctx.return_reg_id = exe_aso->return_reg_id;
519 
520 	return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
521 }
522 
mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)523 static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
524 					 struct mlx5_exe_aso *exe_aso)
525 {
526 	struct mlx5_fs_hws_data *meter_hws_data;
527 	struct xarray *meters_xa;
528 
529 	meters_xa = &fs_ctx->hws_pool.aso_meters;
530 	meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
531 	if (!meter_hws_data)
532 		return;
533 	return mlx5_fs_put_hws_action(meter_hws_data);
534 }
535 
536 static struct mlx5hws_action *
mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)537 mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
538 				struct mlx5_flow_rule *dst)
539 {
540 	struct mlx5_fs_hws_create_action_ctx create_ctx;
541 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
542 	struct mlx5_fs_hws_data *sampler_hws_data;
543 	u32 id = dst->dest_attr.sampler_id;
544 	struct xarray *sampler_xa;
545 
546 	sampler_xa = &fs_ctx->hws_pool.sample_dests;
547 	sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
548 	if (!sampler_hws_data)
549 		return NULL;
550 
551 	create_ctx.hws_ctx = ctx;
552 	create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
553 	create_ctx.id = id;
554 
555 	return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
556 }
557 
mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,u32 sampler_id)558 static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
559 					    u32 sampler_id)
560 {
561 	struct mlx5_fs_hws_data *sampler_hws_data;
562 	struct xarray *sampler_xa;
563 
564 	sampler_xa = &fs_ctx->hws_pool.sample_dests;
565 	sampler_hws_data = xa_load(sampler_xa, sampler_id);
566 	if (!sampler_hws_data)
567 		return;
568 
569 	mlx5_fs_put_hws_action(sampler_hws_data);
570 }
571 
572 static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context * ctx,struct mlx5hws_action_dest_attr * dests,u32 num_of_dests)573 mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
574 				 struct mlx5hws_action_dest_attr *dests,
575 				 u32 num_of_dests)
576 {
577 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
578 
579 	return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
580 						flags);
581 }
582 
583 static struct mlx5hws_action *
mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context * fs_ctx)584 mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
585 {
586 	return fs_ctx->hws_pool.push_vlan_action;
587 }
588 
mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan * vlan)589 static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
590 {
591 	u16 n_ethtype = vlan->ethtype;
592 	u8 prio = vlan->prio;
593 	u16 vid = vlan->vid;
594 
595 	return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
596 }
597 
598 static struct mlx5hws_action *
mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context * fs_ctx)599 mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
600 {
601 	return fs_ctx->hws_pool.pop_vlan_action;
602 }
603 
604 static struct mlx5hws_action *
mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context * fs_ctx)605 mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
606 {
607 	return fs_ctx->hws_pool.decapl2_action;
608 }
609 
610 static struct mlx5hws_action *
mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context * fs_ctx)611 mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
612 {
613 	return fs_ctx->hws_pool.drop_action;
614 }
615 
616 static struct mlx5hws_action *
mlx5_fs_get_action_tag(struct mlx5_fs_hws_context * fs_ctx)617 mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
618 {
619 	return fs_ctx->hws_pool.tag_action;
620 }
621 
622 static struct mlx5hws_action *
mlx5_fs_create_action_last(struct mlx5hws_context * ctx)623 mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
624 {
625 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
626 
627 	return mlx5hws_action_create_last(ctx, flags);
628 }
629 
630 static struct mlx5hws_action *
mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx * create_ctx)631 mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
632 {
633 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
634 
635 	switch (create_ctx->actions_type) {
636 	case MLX5HWS_ACTION_TYP_CTR:
637 		return mlx5hws_action_create_counter(create_ctx->hws_ctx,
638 						     create_ctx->id, flags);
639 	case MLX5HWS_ACTION_TYP_ASO_METER:
640 		return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
641 						       create_ctx->id,
642 						       create_ctx->return_reg_id,
643 						       flags);
644 	case MLX5HWS_ACTION_TYP_SAMPLER:
645 		return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
646 							  create_ctx->id, flags);
647 	default:
648 		return NULL;
649 	}
650 }
651 
652 struct mlx5hws_action *
mlx5_fs_get_hws_action(struct mlx5_fs_hws_data * fs_hws_data,struct mlx5_fs_hws_create_action_ctx * create_ctx)653 mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
654 		       struct mlx5_fs_hws_create_action_ctx *create_ctx)
655 {
656 	/* try avoid locking if not necessary */
657 	if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
658 		return fs_hws_data->hws_action;
659 
660 	mutex_lock(&fs_hws_data->lock);
661 	if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
662 		mutex_unlock(&fs_hws_data->lock);
663 		return fs_hws_data->hws_action;
664 	}
665 	fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
666 	if (!fs_hws_data->hws_action) {
667 		mutex_unlock(&fs_hws_data->lock);
668 		return NULL;
669 	}
670 	refcount_set(&fs_hws_data->hws_action_refcount, 1);
671 	mutex_unlock(&fs_hws_data->lock);
672 
673 	return fs_hws_data->hws_action;
674 }
675 
mlx5_fs_put_hws_action(struct mlx5_fs_hws_data * fs_hws_data)676 void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
677 {
678 	if (!fs_hws_data)
679 		return;
680 
681 	/* try avoid locking if not necessary */
682 	if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
683 		return;
684 
685 	mutex_lock(&fs_hws_data->lock);
686 	if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
687 		mutex_unlock(&fs_hws_data->lock);
688 		return;
689 	}
690 	mlx5hws_action_destroy(fs_hws_data->hws_action);
691 	fs_hws_data->hws_action = NULL;
692 	mutex_unlock(&fs_hws_data->lock);
693 }
694 
mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action * fs_action)695 static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
696 				      struct mlx5_fs_hws_rule_action *fs_action)
697 {
698 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
699 
700 	switch (mlx5hws_action_get_type(fs_action->action)) {
701 	case MLX5HWS_ACTION_TYP_CTR:
702 		mlx5_fc_put_hws_action(fs_action->counter);
703 		break;
704 	case MLX5HWS_ACTION_TYP_ASO_METER:
705 		mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
706 		break;
707 	case MLX5HWS_ACTION_TYP_SAMPLER:
708 		mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
709 		break;
710 	default:
711 		mlx5hws_action_destroy(fs_action->action);
712 	}
713 }
714 
715 static void
mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action ** fs_actions,int * num_fs_actions)716 mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
717 			   struct mlx5_fs_hws_rule_action **fs_actions,
718 			   int *num_fs_actions)
719 {
720 	int i;
721 
722 	/* Free in reverse order to handle action dependencies */
723 	for (i = *num_fs_actions - 1; i >= 0; i--)
724 		mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
725 	*num_fs_actions = 0;
726 	kfree(*fs_actions);
727 	*fs_actions = NULL;
728 }
729 
730 /* Splits FTE's actions into cached, rule and destination actions.
731  * The cached and destination actions are saved on the fte hws rule.
732  * The rule actions are returned as a parameter, together with their count.
733  * We want to support a rule with 32 destinations, which means we need to
734  * account for 32 destinations plus usually a counter plus one more action
735  * for a multi-destination flow table.
736  * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
737  */
738 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34
mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte,struct mlx5hws_rule_action ** ractions)739 static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
740 				       struct mlx5_flow_table *ft,
741 				       struct mlx5_flow_group *group,
742 				       struct fs_fte *fte,
743 				       struct mlx5hws_rule_action **ractions)
744 {
745 	struct mlx5_flow_act *fte_action = &fte->act_dests.action;
746 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
747 	struct mlx5hws_action_dest_attr *dest_actions;
748 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
749 	struct mlx5_fs_hws_rule_action *fs_actions;
750 	struct mlx5_core_dev *dev = ns->dev;
751 	struct mlx5hws_action *dest_action;
752 	struct mlx5hws_action *tmp_action;
753 	struct mlx5_fs_hws_pr *pr_data;
754 	struct mlx5_fs_hws_mh *mh_data;
755 	bool delay_encap_set = false;
756 	struct mlx5_flow_rule *dst;
757 	int num_dest_actions = 0;
758 	int num_fs_actions = 0;
759 	int num_actions = 0;
760 	int err;
761 
762 	*ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
763 			    GFP_KERNEL);
764 	if (!*ractions) {
765 		err = -ENOMEM;
766 		goto out_err;
767 	}
768 
769 	fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
770 			     sizeof(*fs_actions), GFP_KERNEL);
771 	if (!fs_actions) {
772 		err = -ENOMEM;
773 		goto free_actions_alloc;
774 	}
775 
776 	dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
777 			       sizeof(*dest_actions), GFP_KERNEL);
778 	if (!dest_actions) {
779 		err = -ENOMEM;
780 		goto free_fs_actions_alloc;
781 	}
782 
783 	/* The order of the actions are must to be kept, only the following
784 	 * order is supported by HW steering:
785 	 * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
786 	 *      -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
787 	 *      -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
788 	 */
789 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
790 		tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
791 		if (!tmp_action) {
792 			err = -ENOMEM;
793 			goto free_dest_actions_alloc;
794 		}
795 		(*ractions)[num_actions++].action = tmp_action;
796 	}
797 
798 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
799 		int reformat_type = fte_action->pkt_reformat->reformat_type;
800 
801 		if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
802 			mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
803 			err = -EINVAL;
804 			goto free_actions;
805 		}
806 
807 		if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
808 			pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
809 			(*ractions)[num_actions].reformat.offset = pr_data->offset;
810 			(*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
811 			(*ractions)[num_actions].reformat.data = pr_data->data;
812 			(*ractions)[num_actions++].action =
813 				fte_action->pkt_reformat->fs_hws_action.hws_action;
814 		} else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
815 			(*ractions)[num_actions++].action =
816 				fte_action->pkt_reformat->fs_hws_action.hws_action;
817 		} else {
818 			delay_encap_set = true;
819 		}
820 	}
821 
822 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
823 		tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
824 		if (!tmp_action) {
825 			err = -ENOMEM;
826 			goto free_actions;
827 		}
828 		(*ractions)[num_actions++].action = tmp_action;
829 	}
830 
831 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
832 		tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
833 		if (!tmp_action) {
834 			err = -ENOMEM;
835 			goto free_actions;
836 		}
837 		(*ractions)[num_actions++].action = tmp_action;
838 	}
839 
840 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
841 		mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
842 		(*ractions)[num_actions].modify_header.offset = mh_data->offset;
843 		(*ractions)[num_actions].modify_header.data = mh_data->data;
844 		(*ractions)[num_actions++].action =
845 			fte_action->modify_hdr->fs_hws_action.hws_action;
846 	}
847 
848 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
849 		tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
850 		if (!tmp_action) {
851 			err = -ENOMEM;
852 			goto free_actions;
853 		}
854 		(*ractions)[num_actions].push_vlan.vlan_hdr =
855 			htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
856 		(*ractions)[num_actions++].action = tmp_action;
857 	}
858 
859 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
860 		tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
861 		if (!tmp_action) {
862 			err = -ENOMEM;
863 			goto free_actions;
864 		}
865 		(*ractions)[num_actions].push_vlan.vlan_hdr =
866 			htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
867 		(*ractions)[num_actions++].action = tmp_action;
868 	}
869 
870 	if (delay_encap_set) {
871 		pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
872 		(*ractions)[num_actions].reformat.offset = pr_data->offset;
873 		(*ractions)[num_actions].reformat.data = pr_data->data;
874 		(*ractions)[num_actions++].action =
875 			fte_action->pkt_reformat->fs_hws_action.hws_action;
876 	}
877 
878 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
879 		list_for_each_entry(dst, &fte->node.children, node.list) {
880 			struct mlx5_fc *counter;
881 
882 			if (dst->dest_attr.type !=
883 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
884 				continue;
885 
886 			if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
887 				err = -EOPNOTSUPP;
888 				goto free_actions;
889 			}
890 
891 			counter = dst->dest_attr.counter;
892 			tmp_action = mlx5_fc_get_hws_action(ctx, counter);
893 			if (!tmp_action) {
894 				err = -EINVAL;
895 				goto free_actions;
896 			}
897 
898 			(*ractions)[num_actions].counter.offset =
899 				mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
900 			(*ractions)[num_actions++].action = tmp_action;
901 			fs_actions[num_fs_actions].action = tmp_action;
902 			fs_actions[num_fs_actions++].counter = counter;
903 		}
904 	}
905 
906 	if (fte->act_dests.flow_context.flow_tag) {
907 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
908 			err = -EOPNOTSUPP;
909 			goto free_actions;
910 		}
911 		tmp_action = mlx5_fs_get_action_tag(fs_ctx);
912 		if (!tmp_action) {
913 			err = -ENOMEM;
914 			goto free_actions;
915 		}
916 		(*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
917 		(*ractions)[num_actions++].action = tmp_action;
918 	}
919 
920 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
921 		if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
922 		    num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
923 			err = -EOPNOTSUPP;
924 			goto free_actions;
925 		}
926 
927 		tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
928 							  &fte_action->exe_aso);
929 		if (!tmp_action) {
930 			err = -ENOMEM;
931 			goto free_actions;
932 		}
933 		(*ractions)[num_actions].aso_meter.offset =
934 			fte_action->exe_aso.flow_meter.meter_idx;
935 		(*ractions)[num_actions].aso_meter.init_color =
936 			fte_action->exe_aso.flow_meter.init_color;
937 		(*ractions)[num_actions++].action = tmp_action;
938 		fs_actions[num_fs_actions].action = tmp_action;
939 		fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
940 	}
941 
942 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
943 		dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
944 		if (!dest_action) {
945 			err = -ENOMEM;
946 			goto free_actions;
947 		}
948 		dest_actions[num_dest_actions++].dest = dest_action;
949 	}
950 
951 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
952 		list_for_each_entry(dst, &fte->node.children, node.list) {
953 			struct mlx5_flow_destination *attr = &dst->dest_attr;
954 			bool type_uplink =
955 				attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
956 
957 			if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
958 			    num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
959 				err = -EOPNOTSUPP;
960 				goto free_actions;
961 			}
962 			if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
963 				continue;
964 
965 			switch (attr->type) {
966 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
967 				dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
968 				if (dst->dest_attr.ft->flags &
969 				    MLX5_FLOW_TABLE_UPLINK_VPORT)
970 					dest_actions[num_dest_actions].is_wire_ft = true;
971 				break;
972 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
973 				dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
974 										dst);
975 				if (dest_action)
976 					break;
977 				dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
978 										   dst);
979 				fs_actions[num_fs_actions++].action = dest_action;
980 				break;
981 			case MLX5_FLOW_DESTINATION_TYPE_RANGE:
982 				dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
983 				fs_actions[num_fs_actions++].action = dest_action;
984 				break;
985 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
986 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
987 				dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
988 									    type_uplink);
989 				break;
990 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
991 				dest_action =
992 					mlx5_fs_get_dest_action_sampler(fs_ctx,
993 									dst);
994 				fs_actions[num_fs_actions].action = dest_action;
995 				fs_actions[num_fs_actions++].sampler_id =
996 							dst->dest_attr.sampler_id;
997 				break;
998 			default:
999 				err = -EOPNOTSUPP;
1000 				goto free_actions;
1001 			}
1002 			if (!dest_action) {
1003 				err = -ENOMEM;
1004 				goto free_actions;
1005 			}
1006 			dest_actions[num_dest_actions++].dest = dest_action;
1007 		}
1008 	}
1009 
1010 	if (num_dest_actions == 1) {
1011 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1012 			err = -EOPNOTSUPP;
1013 			goto free_actions;
1014 		}
1015 		(*ractions)[num_actions++].action = dest_actions->dest;
1016 	} else if (num_dest_actions > 1) {
1017 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1018 		    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1019 			err = -EOPNOTSUPP;
1020 			goto free_actions;
1021 		}
1022 		tmp_action =
1023 			mlx5_fs_create_action_dest_array(ctx, dest_actions,
1024 							 num_dest_actions);
1025 		if (!tmp_action) {
1026 			err = -EOPNOTSUPP;
1027 			goto free_actions;
1028 		}
1029 		fs_actions[num_fs_actions++].action = tmp_action;
1030 		(*ractions)[num_actions++].action = tmp_action;
1031 	}
1032 
1033 	if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1034 	    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1035 		err = -EOPNOTSUPP;
1036 		goto free_actions;
1037 	}
1038 
1039 	tmp_action = mlx5_fs_create_action_last(ctx);
1040 	if (!tmp_action) {
1041 		err = -ENOMEM;
1042 		goto free_actions;
1043 	}
1044 	fs_actions[num_fs_actions++].action = tmp_action;
1045 	(*ractions)[num_actions++].action = tmp_action;
1046 
1047 	kfree(dest_actions);
1048 
1049 	/* Actions created specifically for this rule will be destroyed
1050 	 * once rule is deleted.
1051 	 */
1052 	fte->fs_hws_rule.num_fs_actions = num_fs_actions;
1053 	fte->fs_hws_rule.hws_fs_actions = fs_actions;
1054 
1055 	return 0;
1056 
1057 free_actions:
1058 	mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
1059 free_dest_actions_alloc:
1060 	kfree(dest_actions);
1061 free_fs_actions_alloc:
1062 	kfree(fs_actions);
1063 free_actions_alloc:
1064 	kfree(*ractions);
1065 	*ractions = NULL;
1066 out_err:
1067 	return err;
1068 }
1069 
mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)1070 static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
1071 				   struct mlx5_flow_table *ft,
1072 				   struct mlx5_flow_group *group,
1073 				   struct fs_fte *fte)
1074 {
1075 	struct mlx5hws_match_parameters params;
1076 	struct mlx5hws_rule_action *ractions;
1077 	struct mlx5hws_bwc_rule *rule;
1078 	int err = 0;
1079 
1080 	if (mlx5_fs_cmd_is_fw_term_table(ft))
1081 		return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
1082 
1083 	err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1084 	if (err)
1085 		goto out_err;
1086 
1087 	params.match_sz = sizeof(fte->val);
1088 	params.match_buf = fte->val;
1089 
1090 	rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
1091 				       fte->act_dests.flow_context.flow_source,
1092 				       ractions);
1093 	kfree(ractions);
1094 	if (!rule) {
1095 		err = -EINVAL;
1096 		goto free_actions;
1097 	}
1098 
1099 	fte->fs_hws_rule.bwc_rule = rule;
1100 	return 0;
1101 
1102 free_actions:
1103 	mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1104 				   &fte->fs_hws_rule.num_fs_actions);
1105 out_err:
1106 	mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
1107 	return err;
1108 }
1109 
mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)1110 static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
1111 				   struct mlx5_flow_table *ft,
1112 				   struct fs_fte *fte)
1113 {
1114 	struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
1115 	int err;
1116 
1117 	if (mlx5_fs_cmd_is_fw_term_table(ft))
1118 		return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
1119 
1120 	err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
1121 	rule->bwc_rule = NULL;
1122 
1123 	mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
1124 				   &rule->num_fs_actions);
1125 
1126 	return err;
1127 }
1128 
mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)1129 static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
1130 				   struct mlx5_flow_table *ft,
1131 				   struct mlx5_flow_group *group,
1132 				   int modify_mask,
1133 				   struct fs_fte *fte)
1134 {
1135 	int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
1136 		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
1137 		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1138 	struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
1139 	struct mlx5hws_rule_action *ractions;
1140 	int saved_num_fs_actions;
1141 	int ret;
1142 
1143 	if (mlx5_fs_cmd_is_fw_term_table(ft))
1144 		return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
1145 							     modify_mask, fte);
1146 
1147 	if ((modify_mask & ~allowed_mask) != 0)
1148 		return -EINVAL;
1149 
1150 	saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
1151 	saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
1152 
1153 	ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1154 	if (ret)
1155 		return ret;
1156 
1157 	ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
1158 	kfree(ractions);
1159 	if (ret)
1160 		goto restore_actions;
1161 
1162 	mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
1163 				   &saved_num_fs_actions);
1164 	return ret;
1165 
1166 restore_actions:
1167 	mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1168 				   &fte->fs_hws_rule.num_fs_actions);
1169 	fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
1170 	fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
1171 	return ret;
1172 }
1173 
1174 static struct mlx5hws_action *
mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context * ctx)1175 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
1176 {
1177 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
1178 	struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
1179 
1180 	/* MAC anchor not supported in HWS reformat, use VLAN anchor */
1181 	remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
1182 	remove_hdr_vlan.offset = 0;
1183 	remove_hdr_vlan.size = sizeof(struct vlan_hdr);
1184 	return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
1185 }
1186 
1187 static struct mlx5hws_action *
mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_pkt_reformat_params * params)1188 mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
1189 				      struct mlx5_pkt_reformat_params *params)
1190 {
1191 	if (!params ||
1192 	    params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
1193 	    params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
1194 	    params->size != sizeof(struct vlan_hdr))
1195 		return NULL;
1196 
1197 	return fs_ctx->hws_pool.remove_hdr_vlan_action;
1198 }
1199 
1200 static int
mlx5_fs_verify_insert_header_params(struct mlx5_core_dev * mdev,struct mlx5_pkt_reformat_params * params)1201 mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
1202 				    struct mlx5_pkt_reformat_params *params)
1203 {
1204 	if ((!params->data && params->size) || (params->data && !params->size) ||
1205 	    MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
1206 	    MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
1207 		mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
1208 		return -EINVAL;
1209 	}
1210 	if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
1211 	    params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
1212 	    params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
1213 		mlx5_core_err(mdev, "Only vlan insert header supported\n");
1214 		return -EOPNOTSUPP;
1215 	}
1216 	return 0;
1217 }
1218 
1219 static int
mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params)1220 mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
1221 				  struct mlx5_pkt_reformat_params *params)
1222 {
1223 	if (params->param_0 || params->param_1) {
1224 		mlx5_core_err(dev, "Invalid reformat params\n");
1225 		return -EINVAL;
1226 	}
1227 	return 0;
1228 }
1229 
1230 static struct mlx5_fs_pool *
mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev * dev,struct xarray * pr_pools,enum mlx5hws_action_type reformat_type,size_t size)1231 mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
1232 			  enum mlx5hws_action_type reformat_type, size_t size)
1233 {
1234 	struct mlx5_fs_pool *pr_pool;
1235 	unsigned long index = size;
1236 	int err;
1237 
1238 	pr_pool = xa_load(pr_pools, index);
1239 	if (pr_pool)
1240 		return pr_pool;
1241 
1242 	pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
1243 	if (!pr_pool)
1244 		return ERR_PTR(-ENOMEM);
1245 	err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
1246 	if (err)
1247 		goto free_pr_pool;
1248 	err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
1249 	if (err)
1250 		goto cleanup_pr_pool;
1251 	return pr_pool;
1252 
1253 cleanup_pr_pool:
1254 	mlx5_fs_hws_pr_pool_cleanup(pr_pool);
1255 free_pr_pool:
1256 	kfree(pr_pool);
1257 	return ERR_PTR(err);
1258 }
1259 
1260 static void
mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool * pool,struct xarray * pr_pools,unsigned long index)1261 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
1262 			unsigned long index)
1263 {
1264 	xa_erase(pr_pools, index);
1265 	mlx5_fs_hws_pr_pool_cleanup(pool);
1266 	kfree(pool);
1267 }
1268 
1269 static int
mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)1270 mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
1271 				   struct mlx5_pkt_reformat_params *params,
1272 				   enum mlx5_flow_namespace_type namespace,
1273 				   struct mlx5_pkt_reformat *pkt_reformat)
1274 {
1275 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
1276 	struct mlx5_fs_hws_actions_pool *hws_pool;
1277 	struct mlx5hws_action *hws_action = NULL;
1278 	struct mlx5_fs_hws_pr *pr_data = NULL;
1279 	struct mlx5_fs_pool *pr_pool = NULL;
1280 	struct mlx5_core_dev *dev = ns->dev;
1281 	u8 hdr_idx = 0;
1282 	int err;
1283 
1284 	if (!params)
1285 		return -EINVAL;
1286 
1287 	hws_pool = &fs_ctx->hws_pool;
1288 
1289 	switch (params->type) {
1290 	case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1291 	case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1292 	case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1293 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1294 			return -EINVAL;
1295 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1296 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1297 						    params->size);
1298 		if (IS_ERR(pr_pool))
1299 			return PTR_ERR(pr_pool);
1300 		break;
1301 	case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1302 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1303 			return -EINVAL;
1304 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
1305 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
1306 						    params->size);
1307 		if (IS_ERR(pr_pool))
1308 			return PTR_ERR(pr_pool);
1309 		break;
1310 	case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1311 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1312 			return -EINVAL;
1313 		pr_pool = &hws_pool->dl3tnltol2_pool;
1314 		hdr_idx = params->size == ETH_HLEN ?
1315 			  MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
1316 			  MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
1317 		break;
1318 	case MLX5_REFORMAT_TYPE_INSERT_HDR:
1319 		err = mlx5_fs_verify_insert_header_params(dev, params);
1320 		if (err)
1321 			return err;
1322 		pr_pool = &hws_pool->insert_hdr_pool;
1323 		break;
1324 	case MLX5_REFORMAT_TYPE_REMOVE_HDR:
1325 		hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
1326 		if (!hws_action)
1327 			mlx5_core_err(dev, "Only vlan remove header supported\n");
1328 		break;
1329 	default:
1330 		mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
1331 			      params->type);
1332 		return -EOPNOTSUPP;
1333 	}
1334 
1335 	if (pr_pool) {
1336 		pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
1337 		if (IS_ERR_OR_NULL(pr_data))
1338 			return !pr_data ? -EINVAL : PTR_ERR(pr_data);
1339 		hws_action = pr_data->bulk->hws_action;
1340 		if (!hws_action) {
1341 			mlx5_core_err(dev,
1342 				      "Failed allocating packet-reformat action\n");
1343 			err = -EINVAL;
1344 			goto release_pr;
1345 		}
1346 		pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
1347 		if (!pr_data->data) {
1348 			err = -ENOMEM;
1349 			goto release_pr;
1350 		}
1351 		pr_data->hdr_idx = hdr_idx;
1352 		pr_data->data_size = params->size;
1353 		pkt_reformat->fs_hws_action.pr_data = pr_data;
1354 	}
1355 
1356 	mutex_init(&pkt_reformat->fs_hws_action.lock);
1357 	pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
1358 	pkt_reformat->fs_hws_action.hws_action = hws_action;
1359 	return 0;
1360 
1361 release_pr:
1362 	if (pr_pool && pr_data)
1363 		mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1364 	return err;
1365 }
1366 
mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)1367 static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
1368 						 struct mlx5_pkt_reformat *pkt_reformat)
1369 {
1370 	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1371 	struct mlx5_core_dev *dev = ns->dev;
1372 	struct mlx5_fs_hws_pr *pr_data;
1373 	struct mlx5_fs_pool *pr_pool;
1374 
1375 	if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
1376 		struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
1377 
1378 		fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
1379 		mlx5_fs_cmd_get_fw_cmds()->
1380 			packet_reformat_dealloc(ns, &fw_pkt_reformat);
1381 		pkt_reformat->fs_hws_action.fw_reformat_id = 0;
1382 	}
1383 
1384 	if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
1385 		return;
1386 
1387 	if (!pkt_reformat->fs_hws_action.pr_data) {
1388 		mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1389 		return;
1390 	}
1391 	pr_data = pkt_reformat->fs_hws_action.pr_data;
1392 
1393 	switch (pkt_reformat->reformat_type) {
1394 	case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1395 	case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1396 	case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1397 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1398 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1399 						    pr_data->data_size);
1400 		break;
1401 	case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1402 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1403 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1404 						    pr_data->data_size);
1405 		break;
1406 	case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1407 		pr_pool = &hws_pool->dl3tnltol2_pool;
1408 		break;
1409 	case MLX5_REFORMAT_TYPE_INSERT_HDR:
1410 		pr_pool = &hws_pool->insert_hdr_pool;
1411 		break;
1412 	default:
1413 		mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
1414 		return;
1415 	}
1416 	if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
1417 		mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1418 		return;
1419 	}
1420 	kfree(pr_data->data);
1421 	mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1422 	pkt_reformat->fs_hws_action.pr_data = NULL;
1423 }
1424 
1425 static struct mlx5_fs_pool *
mlx5_fs_create_mh_pool(struct mlx5_core_dev * dev,struct mlx5hws_action_mh_pattern * pattern,struct xarray * mh_pools,unsigned long index)1426 mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
1427 		       struct mlx5hws_action_mh_pattern *pattern,
1428 		       struct xarray *mh_pools, unsigned long index)
1429 {
1430 	struct mlx5_fs_pool *pool;
1431 	int err;
1432 
1433 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1434 	if (!pool)
1435 		return ERR_PTR(-ENOMEM);
1436 	err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
1437 	if (err)
1438 		goto free_pool;
1439 	err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
1440 	if (err)
1441 		goto cleanup_pool;
1442 	return pool;
1443 
1444 cleanup_pool:
1445 	mlx5_fs_hws_mh_pool_cleanup(pool);
1446 free_pool:
1447 	kfree(pool);
1448 	return ERR_PTR(err);
1449 }
1450 
1451 static void
mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool * pool,struct xarray * mh_pools,unsigned long index)1452 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
1453 			unsigned long index)
1454 {
1455 	xa_erase(mh_pools, index);
1456 	mlx5_fs_hws_mh_pool_cleanup(pool);
1457 	kfree(pool);
1458 }
1459 
mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)1460 static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
1461 					    u8 namespace, u8 num_actions,
1462 					    void *modify_actions,
1463 					    struct mlx5_modify_hdr *modify_hdr)
1464 {
1465 	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1466 	struct mlx5hws_action_mh_pattern pattern = {};
1467 	struct mlx5_fs_hws_mh *mh_data = NULL;
1468 	struct mlx5hws_action *hws_action;
1469 	struct mlx5_fs_pool *pool;
1470 	unsigned long i, cnt = 0;
1471 	bool known_pattern;
1472 	int err;
1473 
1474 	pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
1475 	pattern.data = modify_actions;
1476 
1477 	known_pattern = false;
1478 	xa_for_each(&hws_pool->mh_pools, i, pool) {
1479 		if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
1480 			known_pattern = true;
1481 			break;
1482 		}
1483 		cnt++;
1484 	}
1485 
1486 	if (!known_pattern) {
1487 		pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
1488 					      &hws_pool->mh_pools, cnt);
1489 		if (IS_ERR(pool))
1490 			return PTR_ERR(pool);
1491 	}
1492 	mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
1493 	if (IS_ERR(mh_data)) {
1494 		err = PTR_ERR(mh_data);
1495 		goto destroy_pool;
1496 	}
1497 	hws_action = mh_data->bulk->hws_action;
1498 	mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
1499 	if (!mh_data->data) {
1500 		err = -ENOMEM;
1501 		goto release_mh;
1502 	}
1503 	modify_hdr->fs_hws_action.mh_data = mh_data;
1504 	modify_hdr->fs_hws_action.fs_pool = pool;
1505 	modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
1506 	modify_hdr->fs_hws_action.hws_action = hws_action;
1507 
1508 	return 0;
1509 
1510 release_mh:
1511 	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1512 destroy_pool:
1513 	if (!known_pattern)
1514 		mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
1515 	return err;
1516 }
1517 
mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)1518 static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
1519 					       struct mlx5_modify_hdr *modify_hdr)
1520 {
1521 	struct mlx5_fs_hws_mh *mh_data;
1522 	struct mlx5_fs_pool *pool;
1523 
1524 	if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
1525 		mlx5_core_err(ns->dev, "Failed release modify-header\n");
1526 		return;
1527 	}
1528 
1529 	mh_data = modify_hdr->fs_hws_action.mh_data;
1530 	kfree(mh_data->data);
1531 	pool = modify_hdr->fs_hws_action.fs_pool;
1532 	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1533 	modify_hdr->fs_hws_action.mh_data = NULL;
1534 }
1535 
1536 int
mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat * pkt_reformat,u32 * reformat_id)1537 mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
1538 				       u32 *reformat_id)
1539 {
1540 	enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
1541 	struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
1542 	u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
1543 	struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
1544 	struct mlx5_pkt_reformat_params params = { 0 };
1545 	struct mlx5_flow_root_namespace *ns;
1546 	struct mlx5_core_dev *dev;
1547 	int ret;
1548 
1549 	mutex_lock(lock);
1550 
1551 	if (*id != 0) {
1552 		*reformat_id = *id;
1553 		ret = 0;
1554 		goto unlock;
1555 	}
1556 
1557 	dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
1558 	if (!dev) {
1559 		ret = -EINVAL;
1560 		goto unlock;
1561 	}
1562 
1563 	ns = mlx5_get_root_namespace(dev, ns_type);
1564 	if (!ns) {
1565 		ret = -EINVAL;
1566 		goto unlock;
1567 	}
1568 
1569 	params.type = pkt_reformat->reformat_type;
1570 	params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
1571 	params.data = pkt_reformat->fs_hws_action.pr_data->data;
1572 
1573 	ret = mlx5_fs_cmd_get_fw_cmds()->
1574 		packet_reformat_alloc(ns, &params, ns_type, &fw_pkt_reformat);
1575 	if (ret)
1576 		goto unlock;
1577 
1578 	*id = fw_pkt_reformat.id;
1579 	*reformat_id = *id;
1580 	ret = 0;
1581 
1582 unlock:
1583 	mutex_unlock(lock);
1584 
1585 	return ret;
1586 }
1587 
mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)1588 static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
1589 					     u16 format_id, u32 *match_mask)
1590 {
1591 	return -EOPNOTSUPP;
1592 }
1593 
mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)1594 static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
1595 					      int definer_id)
1596 {
1597 	return -EOPNOTSUPP;
1598 }
1599 
mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)1600 static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
1601 					 enum fs_flow_table_type ft_type)
1602 {
1603 	if (ft_type != FS_FT_FDB)
1604 		return 0;
1605 
1606 	return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
1607 	       MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
1608 	       MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
1609 }
1610 
mlx5_fs_hws_is_supported(struct mlx5_core_dev * dev)1611 bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
1612 {
1613 	return mlx5hws_is_supported(dev);
1614 }
1615 
1616 static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
1617 	.create_flow_table = mlx5_cmd_hws_create_flow_table,
1618 	.destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
1619 	.modify_flow_table = mlx5_cmd_hws_modify_flow_table,
1620 	.update_root_ft = mlx5_cmd_hws_update_root_ft,
1621 	.create_flow_group = mlx5_cmd_hws_create_flow_group,
1622 	.destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
1623 	.create_fte = mlx5_cmd_hws_create_fte,
1624 	.delete_fte = mlx5_cmd_hws_delete_fte,
1625 	.update_fte = mlx5_cmd_hws_update_fte,
1626 	.packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
1627 	.packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
1628 	.modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
1629 	.modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
1630 	.create_match_definer = mlx5_cmd_hws_create_match_definer,
1631 	.destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
1632 	.create_ns = mlx5_cmd_hws_create_ns,
1633 	.destroy_ns = mlx5_cmd_hws_destroy_ns,
1634 	.set_peer = mlx5_cmd_hws_set_peer,
1635 	.get_capabilities = mlx5_cmd_hws_get_capabilities,
1636 };
1637 
mlx5_fs_cmd_get_hws_cmds(void)1638 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
1639 {
1640 	return &mlx5_flow_cmds_hws;
1641 }
1642