1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3
4 #include <linux/mlx5/vport.h>
5 #include <mlx5_core.h>
6 #include <fs_core.h>
7 #include <fs_cmd.h>
8 #include "fs_hws_pools.h"
9 #include "mlx5hws.h"
10
11 #define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
12 #define MLX5HWS_CTX_QUEUE_SIZE 256
13
14 static struct mlx5hws_action *
15 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
16 static void
17 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
18 unsigned long index);
19 static void
20 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
21 unsigned long index);
22
mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev * dev,struct mlx5_fs_hws_context * fs_ctx)23 static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
24 struct mlx5_fs_hws_context *fs_ctx)
25 {
26 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
27 struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
28 struct mlx5hws_action_reformat_header reformat_hdr = {};
29 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
30 enum mlx5hws_action_type action_type;
31 int err = -ENOSPC;
32
33 hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
34 if (!hws_pool->tag_action)
35 return err;
36 hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
37 if (!hws_pool->pop_vlan_action)
38 goto destroy_tag;
39 hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
40 if (!hws_pool->push_vlan_action)
41 goto destroy_pop_vlan;
42 hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
43 if (!hws_pool->drop_action)
44 goto destroy_push_vlan;
45 action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
46 hws_pool->decapl2_action =
47 mlx5hws_action_create_reformat(ctx, action_type, 1,
48 &reformat_hdr, 0, flags);
49 if (!hws_pool->decapl2_action)
50 goto destroy_drop;
51 hws_pool->remove_hdr_vlan_action =
52 mlx5_fs_create_action_remove_header_vlan(ctx);
53 if (!hws_pool->remove_hdr_vlan_action)
54 goto destroy_decapl2;
55 err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
56 MLX5HWS_ACTION_TYP_INSERT_HEADER);
57 if (err)
58 goto destroy_remove_hdr;
59 err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
60 MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
61 if (err)
62 goto cleanup_insert_hdr;
63 xa_init(&hws_pool->el2tol3tnl_pools);
64 xa_init(&hws_pool->el2tol2tnl_pools);
65 xa_init(&hws_pool->mh_pools);
66 xa_init(&hws_pool->table_dests);
67 xa_init(&hws_pool->vport_dests);
68 xa_init(&hws_pool->vport_vhca_dests);
69 xa_init(&hws_pool->aso_meters);
70 xa_init(&hws_pool->sample_dests);
71 return 0;
72
73 cleanup_insert_hdr:
74 mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
75 destroy_remove_hdr:
76 mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
77 destroy_decapl2:
78 mlx5hws_action_destroy(hws_pool->decapl2_action);
79 destroy_drop:
80 mlx5hws_action_destroy(hws_pool->drop_action);
81 destroy_push_vlan:
82 mlx5hws_action_destroy(hws_pool->push_vlan_action);
83 destroy_pop_vlan:
84 mlx5hws_action_destroy(hws_pool->pop_vlan_action);
85 destroy_tag:
86 mlx5hws_action_destroy(hws_pool->tag_action);
87 return err;
88 }
89
mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context * fs_ctx)90 static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
91 {
92 struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
93 struct mlx5_fs_hws_data *fs_hws_data;
94 struct mlx5hws_action *action;
95 struct mlx5_fs_pool *pool;
96 unsigned long i;
97
98 xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
99 kfree(fs_hws_data);
100 xa_destroy(&hws_pool->sample_dests);
101 xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
102 kfree(fs_hws_data);
103 xa_destroy(&hws_pool->aso_meters);
104 xa_for_each(&hws_pool->vport_vhca_dests, i, action)
105 mlx5hws_action_destroy(action);
106 xa_destroy(&hws_pool->vport_vhca_dests);
107 xa_for_each(&hws_pool->vport_dests, i, action)
108 mlx5hws_action_destroy(action);
109 xa_destroy(&hws_pool->vport_dests);
110 xa_destroy(&hws_pool->table_dests);
111 xa_for_each(&hws_pool->mh_pools, i, pool)
112 mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
113 xa_destroy(&hws_pool->mh_pools);
114 xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
115 mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
116 xa_destroy(&hws_pool->el2tol2tnl_pools);
117 xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
118 mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
119 xa_destroy(&hws_pool->el2tol3tnl_pools);
120 mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
121 mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
122 mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
123 mlx5hws_action_destroy(hws_pool->decapl2_action);
124 mlx5hws_action_destroy(hws_pool->drop_action);
125 mlx5hws_action_destroy(hws_pool->push_vlan_action);
126 mlx5hws_action_destroy(hws_pool->pop_vlan_action);
127 mlx5hws_action_destroy(hws_pool->tag_action);
128 }
129
mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace * ns)130 static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
131 {
132 struct mlx5hws_context_attr hws_ctx_attr = {};
133 int err;
134
135 hws_ctx_attr.queues = min_t(int, num_online_cpus(),
136 MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
137 hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
138
139 ns->fs_hws_context.hws_ctx =
140 mlx5hws_context_open(ns->dev, &hws_ctx_attr);
141 if (!ns->fs_hws_context.hws_ctx) {
142 mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
143 return -EINVAL;
144 }
145 err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
146 if (err) {
147 mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
148 mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
149 return err;
150 }
151 return 0;
152 }
153
mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace * ns)154 static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
155 {
156 mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
157 return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
158 }
159
mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns,u16 peer_vhca_id)160 static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
161 struct mlx5_flow_root_namespace *peer_ns,
162 u16 peer_vhca_id)
163 {
164 struct mlx5hws_context *peer_ctx = NULL;
165
166 if (peer_ns)
167 peer_ctx = peer_ns->fs_hws_context.hws_ctx;
168 mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
169 peer_vhca_id);
170 return 0;
171 }
172
mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)173 static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
174 struct mlx5_flow_table *ft,
175 struct mlx5_flow_table *next_ft)
176 {
177 struct mlx5hws_table *next_tbl;
178 int err;
179
180 if (!ns->fs_hws_context.hws_ctx)
181 return -EINVAL;
182
183 /* if no change required, return */
184 if (!next_ft && !ft->fs_hws_table.miss_ft_set)
185 return 0;
186
187 next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
188 err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
189 if (err) {
190 mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
191 return err;
192 }
193 ft->fs_hws_table.miss_ft_set = !!next_tbl;
194 return 0;
195 }
196
mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)197 static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
198 struct mlx5_flow_table *ft)
199 {
200 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
201 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
202 struct mlx5hws_action *dest_ft_action;
203 struct xarray *dests_xa;
204 int err;
205
206 dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
207 ft->id, flags);
208 if (!dest_ft_action) {
209 mlx5_core_err(ns->dev, "Failed creating dest table action\n");
210 return -ENOMEM;
211 }
212
213 dests_xa = &fs_ctx->hws_pool.table_dests;
214 err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
215 if (err)
216 mlx5hws_action_destroy(dest_ft_action);
217 return err;
218 }
219
mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)220 static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
221 struct mlx5_flow_table *ft)
222 {
223 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
224 struct mlx5hws_action *dest_ft_action;
225 struct xarray *dests_xa;
226 int err;
227
228 dests_xa = &fs_ctx->hws_pool.table_dests;
229 dest_ft_action = xa_erase(dests_xa, ft->id);
230 if (!dest_ft_action) {
231 mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
232 return -ENOENT;
233 }
234
235 err = mlx5hws_action_destroy(dest_ft_action);
236 if (err)
237 mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
238 return err;
239 }
240
mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)241 static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
242 struct mlx5_flow_table *ft,
243 struct mlx5_flow_table_attr *ft_attr,
244 struct mlx5_flow_table *next_ft)
245 {
246 struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
247 struct mlx5hws_table_attr tbl_attr = {};
248 struct mlx5hws_table *tbl;
249 int err;
250
251 if (mlx5_fs_cmd_is_fw_term_table(ft)) {
252 err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
253 next_ft);
254 if (err)
255 return err;
256 err = mlx5_fs_add_flow_table_dest_action(ns, ft);
257 if (err)
258 mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
259 return err;
260 }
261
262 if (ns->table_type != FS_FT_FDB) {
263 mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
264 ns->table_type);
265 return -EOPNOTSUPP;
266 }
267
268 tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
269 tbl_attr.level = ft_attr->level;
270 tbl = mlx5hws_table_create(ctx, &tbl_attr);
271 if (!tbl) {
272 mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
273 return -EINVAL;
274 }
275
276 ft->fs_hws_table.hws_table = tbl;
277 ft->id = mlx5hws_table_get_id(tbl);
278
279 if (next_ft) {
280 err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
281 if (err)
282 goto destroy_table;
283 }
284
285 ft->max_fte = INT_MAX;
286
287 err = mlx5_fs_add_flow_table_dest_action(ns, ft);
288 if (err)
289 goto clear_ft_miss;
290 return 0;
291
292 clear_ft_miss:
293 mlx5_fs_set_ft_default_miss(ns, ft, NULL);
294 destroy_table:
295 mlx5hws_table_destroy(tbl);
296 ft->fs_hws_table.hws_table = NULL;
297 return err;
298 }
299
mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)300 static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
301 struct mlx5_flow_table *ft)
302 {
303 int err;
304
305 err = mlx5_fs_del_flow_table_dest_action(ns, ft);
306 if (err)
307 mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
308
309 if (mlx5_fs_cmd_is_fw_term_table(ft))
310 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
311
312 err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
313 if (err)
314 mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
315
316 err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
317 if (err)
318 mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
319
320 return err;
321 }
322
mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)323 static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
324 struct mlx5_flow_table *ft,
325 struct mlx5_flow_table *next_ft)
326 {
327 if (mlx5_fs_cmd_is_fw_term_table(ft))
328 return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
329
330 return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
331 }
332
mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)333 static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
334 struct mlx5_flow_table *ft,
335 u32 underlay_qpn,
336 bool disconnect)
337 {
338 return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
339 disconnect);
340 }
341
mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)342 static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
343 struct mlx5_flow_table *ft, u32 *in,
344 struct mlx5_flow_group *fg)
345 {
346 struct mlx5hws_match_parameters mask;
347 struct mlx5hws_bwc_matcher *matcher;
348 u8 match_criteria_enable;
349 u32 priority;
350
351 if (mlx5_fs_cmd_is_fw_term_table(ft))
352 return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
353
354 mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
355 mask.match_sz = sizeof(fg->mask.match_criteria);
356
357 match_criteria_enable = MLX5_GET(create_flow_group_in, in,
358 match_criteria_enable);
359 priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
360 matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
361 priority, match_criteria_enable,
362 &mask);
363 if (!matcher) {
364 mlx5_core_err(ns->dev, "Failed creating matcher\n");
365 return -EINVAL;
366 }
367
368 fg->fs_hws_matcher.matcher = matcher;
369 return 0;
370 }
371
mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)372 static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
373 struct mlx5_flow_table *ft,
374 struct mlx5_flow_group *fg)
375 {
376 if (mlx5_fs_cmd_is_fw_term_table(ft))
377 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
378
379 return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
380 }
381
382 static struct mlx5hws_action *
mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)383 mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
384 struct mlx5_flow_rule *dst)
385 {
386 return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
387 }
388
389 static struct mlx5hws_action *
mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)390 mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
391 struct mlx5_flow_rule *dst)
392 {
393 u32 table_num = dst->dest_attr.ft_num;
394
395 return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
396 }
397
398 static struct mlx5hws_action *
mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)399 mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
400 struct mlx5_flow_rule *dst)
401 {
402 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
403 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
404 u32 table_num = dst->dest_attr.ft_num;
405
406 return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
407 }
408
409 static struct mlx5hws_action *
mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst,bool is_dest_type_uplink)410 mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
411 struct mlx5_flow_rule *dst,
412 bool is_dest_type_uplink)
413 {
414 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
415 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
416 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
417 struct mlx5hws_action *dest;
418 struct xarray *dests_xa;
419 bool vhca_id_valid;
420 unsigned long idx;
421 u16 vport_num;
422 int err;
423
424 vhca_id_valid = is_dest_type_uplink ||
425 (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
426 vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
427 if (vhca_id_valid) {
428 dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
429 idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
430 } else {
431 dests_xa = &fs_ctx->hws_pool.vport_dests;
432 idx = vport_num;
433 }
434 dest_load:
435 dest = xa_load(dests_xa, idx);
436 if (dest)
437 return dest;
438
439 dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
440 dest_attr->vport.vhca_id, flags);
441
442 err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
443 if (err) {
444 mlx5hws_action_destroy(dest);
445 dest = NULL;
446
447 if (err == -EBUSY)
448 /* xarray entry was already stored by another thread */
449 goto dest_load;
450 }
451
452 return dest;
453 }
454
455 static struct mlx5hws_action *
mlx5_fs_create_dest_action_range(struct mlx5hws_context * ctx,struct mlx5_flow_rule * dst)456 mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
457 struct mlx5_flow_rule *dst)
458 {
459 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
460 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
461
462 return mlx5hws_action_create_dest_match_range(ctx,
463 dest_attr->range.field,
464 dest_attr->range.hit_ft,
465 dest_attr->range.miss_ft,
466 dest_attr->range.min,
467 dest_attr->range.max,
468 flags);
469 }
470
471 static struct mlx5_fs_hws_data *
mlx5_fs_get_cached_hws_data(struct xarray * cache_xa,unsigned long index)472 mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
473 {
474 struct mlx5_fs_hws_data *fs_hws_data;
475 int err;
476
477 xa_lock(cache_xa);
478 fs_hws_data = xa_load(cache_xa, index);
479 if (!fs_hws_data) {
480 fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
481 if (!fs_hws_data) {
482 xa_unlock(cache_xa);
483 return NULL;
484 }
485 refcount_set(&fs_hws_data->hws_action_refcount, 0);
486 mutex_init(&fs_hws_data->lock);
487 err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
488 if (err) {
489 kfree(fs_hws_data);
490 xa_unlock(cache_xa);
491 return NULL;
492 }
493 }
494 xa_unlock(cache_xa);
495
496 return fs_hws_data;
497 }
498
499 static struct mlx5hws_action *
mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)500 mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
501 struct mlx5_exe_aso *exe_aso)
502 {
503 struct mlx5_fs_hws_create_action_ctx create_ctx;
504 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
505 struct mlx5_fs_hws_data *meter_hws_data;
506 u32 id = exe_aso->base_id;
507 struct xarray *meters_xa;
508
509 meters_xa = &fs_ctx->hws_pool.aso_meters;
510 meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
511 if (!meter_hws_data)
512 return NULL;
513
514 create_ctx.hws_ctx = ctx;
515 create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
516 create_ctx.id = id;
517 create_ctx.return_reg_id = exe_aso->return_reg_id;
518
519 return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
520 }
521
mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)522 static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
523 struct mlx5_exe_aso *exe_aso)
524 {
525 struct mlx5_fs_hws_data *meter_hws_data;
526 struct xarray *meters_xa;
527
528 meters_xa = &fs_ctx->hws_pool.aso_meters;
529 meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
530 if (!meter_hws_data)
531 return;
532 return mlx5_fs_put_hws_action(meter_hws_data);
533 }
534
535 static struct mlx5hws_action *
mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)536 mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
537 struct mlx5_flow_rule *dst)
538 {
539 struct mlx5_fs_hws_create_action_ctx create_ctx;
540 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
541 struct mlx5_fs_hws_data *sampler_hws_data;
542 u32 id = dst->dest_attr.sampler_id;
543 struct xarray *sampler_xa;
544
545 sampler_xa = &fs_ctx->hws_pool.sample_dests;
546 sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
547 if (!sampler_hws_data)
548 return NULL;
549
550 create_ctx.hws_ctx = ctx;
551 create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
552 create_ctx.id = id;
553
554 return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
555 }
556
mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,u32 sampler_id)557 static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
558 u32 sampler_id)
559 {
560 struct mlx5_fs_hws_data *sampler_hws_data;
561 struct xarray *sampler_xa;
562
563 sampler_xa = &fs_ctx->hws_pool.sample_dests;
564 sampler_hws_data = xa_load(sampler_xa, sampler_id);
565 if (!sampler_hws_data)
566 return;
567
568 mlx5_fs_put_hws_action(sampler_hws_data);
569 }
570
571 static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context * ctx,struct mlx5hws_action_dest_attr * dests,u32 num_of_dests,bool ignore_flow_level,u32 flow_source)572 mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
573 struct mlx5hws_action_dest_attr *dests,
574 u32 num_of_dests, bool ignore_flow_level,
575 u32 flow_source)
576 {
577 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
578
579 return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
580 ignore_flow_level,
581 flow_source, flags);
582 }
583
584 static struct mlx5hws_action *
mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context * fs_ctx)585 mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
586 {
587 return fs_ctx->hws_pool.push_vlan_action;
588 }
589
mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan * vlan)590 static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
591 {
592 u16 n_ethtype = vlan->ethtype;
593 u8 prio = vlan->prio;
594 u16 vid = vlan->vid;
595
596 return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
597 }
598
599 static struct mlx5hws_action *
mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context * fs_ctx)600 mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
601 {
602 return fs_ctx->hws_pool.pop_vlan_action;
603 }
604
605 static struct mlx5hws_action *
mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context * fs_ctx)606 mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
607 {
608 return fs_ctx->hws_pool.decapl2_action;
609 }
610
611 static struct mlx5hws_action *
mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context * fs_ctx)612 mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
613 {
614 return fs_ctx->hws_pool.drop_action;
615 }
616
617 static struct mlx5hws_action *
mlx5_fs_get_action_tag(struct mlx5_fs_hws_context * fs_ctx)618 mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
619 {
620 return fs_ctx->hws_pool.tag_action;
621 }
622
623 static struct mlx5hws_action *
mlx5_fs_create_action_last(struct mlx5hws_context * ctx)624 mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
625 {
626 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
627
628 return mlx5hws_action_create_last(ctx, flags);
629 }
630
631 static struct mlx5hws_action *
mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx * create_ctx)632 mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
633 {
634 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
635
636 switch (create_ctx->actions_type) {
637 case MLX5HWS_ACTION_TYP_CTR:
638 return mlx5hws_action_create_counter(create_ctx->hws_ctx,
639 create_ctx->id, flags);
640 case MLX5HWS_ACTION_TYP_ASO_METER:
641 return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
642 create_ctx->id,
643 create_ctx->return_reg_id,
644 flags);
645 case MLX5HWS_ACTION_TYP_SAMPLER:
646 return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
647 create_ctx->id, flags);
648 default:
649 return NULL;
650 }
651 }
652
653 struct mlx5hws_action *
mlx5_fs_get_hws_action(struct mlx5_fs_hws_data * fs_hws_data,struct mlx5_fs_hws_create_action_ctx * create_ctx)654 mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
655 struct mlx5_fs_hws_create_action_ctx *create_ctx)
656 {
657 /* try avoid locking if not necessary */
658 if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
659 return fs_hws_data->hws_action;
660
661 mutex_lock(&fs_hws_data->lock);
662 if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
663 mutex_unlock(&fs_hws_data->lock);
664 return fs_hws_data->hws_action;
665 }
666 fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
667 if (!fs_hws_data->hws_action) {
668 mutex_unlock(&fs_hws_data->lock);
669 return NULL;
670 }
671 refcount_set(&fs_hws_data->hws_action_refcount, 1);
672 mutex_unlock(&fs_hws_data->lock);
673
674 return fs_hws_data->hws_action;
675 }
676
mlx5_fs_put_hws_action(struct mlx5_fs_hws_data * fs_hws_data)677 void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
678 {
679 if (!fs_hws_data)
680 return;
681
682 /* try avoid locking if not necessary */
683 if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
684 return;
685
686 mutex_lock(&fs_hws_data->lock);
687 if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
688 mutex_unlock(&fs_hws_data->lock);
689 return;
690 }
691 mlx5hws_action_destroy(fs_hws_data->hws_action);
692 fs_hws_data->hws_action = NULL;
693 mutex_unlock(&fs_hws_data->lock);
694 }
695
mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action * fs_action)696 static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
697 struct mlx5_fs_hws_rule_action *fs_action)
698 {
699 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
700
701 switch (mlx5hws_action_get_type(fs_action->action)) {
702 case MLX5HWS_ACTION_TYP_CTR:
703 mlx5_fc_put_hws_action(fs_action->counter);
704 break;
705 case MLX5HWS_ACTION_TYP_ASO_METER:
706 mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
707 break;
708 case MLX5HWS_ACTION_TYP_SAMPLER:
709 mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
710 break;
711 default:
712 mlx5hws_action_destroy(fs_action->action);
713 }
714 }
715
716 static void
mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action ** fs_actions,int * num_fs_actions)717 mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
718 struct mlx5_fs_hws_rule_action **fs_actions,
719 int *num_fs_actions)
720 {
721 int i;
722
723 /* Free in reverse order to handle action dependencies */
724 for (i = *num_fs_actions - 1; i >= 0; i--)
725 mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
726 *num_fs_actions = 0;
727 kfree(*fs_actions);
728 *fs_actions = NULL;
729 }
730
731 /* Splits FTE's actions into cached, rule and destination actions.
732 * The cached and destination actions are saved on the fte hws rule.
733 * The rule actions are returned as a parameter, together with their count.
734 * We want to support a rule with 32 destinations, which means we need to
735 * account for 32 destinations plus usually a counter plus one more action
736 * for a multi-destination flow table.
737 * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
738 */
739 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34
mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte,struct mlx5hws_rule_action ** ractions)740 static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
741 struct mlx5_flow_table *ft,
742 struct mlx5_flow_group *group,
743 struct fs_fte *fte,
744 struct mlx5hws_rule_action **ractions)
745 {
746 struct mlx5_flow_act *fte_action = &fte->act_dests.action;
747 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
748 struct mlx5hws_action_dest_attr *dest_actions;
749 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
750 struct mlx5_fs_hws_rule_action *fs_actions;
751 struct mlx5_core_dev *dev = ns->dev;
752 struct mlx5hws_action *dest_action;
753 struct mlx5hws_action *tmp_action;
754 struct mlx5_fs_hws_pr *pr_data;
755 struct mlx5_fs_hws_mh *mh_data;
756 bool delay_encap_set = false;
757 struct mlx5_flow_rule *dst;
758 int num_dest_actions = 0;
759 int num_fs_actions = 0;
760 int num_actions = 0;
761 int err;
762
763 *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
764 GFP_KERNEL);
765 if (!*ractions) {
766 err = -ENOMEM;
767 goto out_err;
768 }
769
770 fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
771 sizeof(*fs_actions), GFP_KERNEL);
772 if (!fs_actions) {
773 err = -ENOMEM;
774 goto free_actions_alloc;
775 }
776
777 dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
778 sizeof(*dest_actions), GFP_KERNEL);
779 if (!dest_actions) {
780 err = -ENOMEM;
781 goto free_fs_actions_alloc;
782 }
783
784 /* The order of the actions are must to be kept, only the following
785 * order is supported by HW steering:
786 * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
787 * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
788 * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
789 */
790 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
791 tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
792 if (!tmp_action) {
793 err = -ENOMEM;
794 goto free_dest_actions_alloc;
795 }
796 (*ractions)[num_actions++].action = tmp_action;
797 }
798
799 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
800 int reformat_type = fte_action->pkt_reformat->reformat_type;
801
802 if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
803 mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
804 err = -EINVAL;
805 goto free_actions;
806 }
807
808 if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
809 pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
810 (*ractions)[num_actions].reformat.offset = pr_data->offset;
811 (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
812 (*ractions)[num_actions].reformat.data = pr_data->data;
813 (*ractions)[num_actions++].action =
814 fte_action->pkt_reformat->fs_hws_action.hws_action;
815 } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
816 (*ractions)[num_actions++].action =
817 fte_action->pkt_reformat->fs_hws_action.hws_action;
818 } else {
819 delay_encap_set = true;
820 }
821 }
822
823 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
824 tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
825 if (!tmp_action) {
826 err = -ENOMEM;
827 goto free_actions;
828 }
829 (*ractions)[num_actions++].action = tmp_action;
830 }
831
832 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
833 tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
834 if (!tmp_action) {
835 err = -ENOMEM;
836 goto free_actions;
837 }
838 (*ractions)[num_actions++].action = tmp_action;
839 }
840
841 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
842 mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
843 (*ractions)[num_actions].modify_header.offset = mh_data->offset;
844 (*ractions)[num_actions].modify_header.data = mh_data->data;
845 (*ractions)[num_actions++].action =
846 fte_action->modify_hdr->fs_hws_action.hws_action;
847 }
848
849 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
850 tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
851 if (!tmp_action) {
852 err = -ENOMEM;
853 goto free_actions;
854 }
855 (*ractions)[num_actions].push_vlan.vlan_hdr =
856 htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
857 (*ractions)[num_actions++].action = tmp_action;
858 }
859
860 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
861 tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
862 if (!tmp_action) {
863 err = -ENOMEM;
864 goto free_actions;
865 }
866 (*ractions)[num_actions].push_vlan.vlan_hdr =
867 htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
868 (*ractions)[num_actions++].action = tmp_action;
869 }
870
871 if (delay_encap_set) {
872 pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
873 (*ractions)[num_actions].reformat.offset = pr_data->offset;
874 (*ractions)[num_actions].reformat.data = pr_data->data;
875 (*ractions)[num_actions++].action =
876 fte_action->pkt_reformat->fs_hws_action.hws_action;
877 }
878
879 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
880 list_for_each_entry(dst, &fte->node.children, node.list) {
881 struct mlx5_fc *counter;
882
883 if (dst->dest_attr.type !=
884 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
885 continue;
886
887 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
888 err = -EOPNOTSUPP;
889 goto free_actions;
890 }
891
892 counter = dst->dest_attr.counter;
893 tmp_action = mlx5_fc_get_hws_action(ctx, counter);
894 if (!tmp_action) {
895 err = -EINVAL;
896 goto free_actions;
897 }
898
899 (*ractions)[num_actions].counter.offset =
900 mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
901 (*ractions)[num_actions++].action = tmp_action;
902 fs_actions[num_fs_actions].action = tmp_action;
903 fs_actions[num_fs_actions++].counter = counter;
904 }
905 }
906
907 if (fte->act_dests.flow_context.flow_tag) {
908 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
909 err = -EOPNOTSUPP;
910 goto free_actions;
911 }
912 tmp_action = mlx5_fs_get_action_tag(fs_ctx);
913 if (!tmp_action) {
914 err = -ENOMEM;
915 goto free_actions;
916 }
917 (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
918 (*ractions)[num_actions++].action = tmp_action;
919 }
920
921 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
922 if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
923 num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
924 err = -EOPNOTSUPP;
925 goto free_actions;
926 }
927
928 tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
929 &fte_action->exe_aso);
930 if (!tmp_action) {
931 err = -ENOMEM;
932 goto free_actions;
933 }
934 (*ractions)[num_actions].aso_meter.offset =
935 fte_action->exe_aso.flow_meter.meter_idx;
936 (*ractions)[num_actions].aso_meter.init_color =
937 fte_action->exe_aso.flow_meter.init_color;
938 (*ractions)[num_actions++].action = tmp_action;
939 fs_actions[num_fs_actions].action = tmp_action;
940 fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
941 }
942
943 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
944 dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
945 if (!dest_action) {
946 err = -ENOMEM;
947 goto free_actions;
948 }
949 dest_actions[num_dest_actions++].dest = dest_action;
950 }
951
952 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
953 list_for_each_entry(dst, &fte->node.children, node.list) {
954 struct mlx5_flow_destination *attr = &dst->dest_attr;
955 bool type_uplink =
956 attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
957
958 if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
959 num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
960 err = -EOPNOTSUPP;
961 goto free_actions;
962 }
963 if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
964 continue;
965
966 switch (attr->type) {
967 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
968 dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
969 if (dst->dest_attr.ft->flags &
970 MLX5_FLOW_TABLE_UPLINK_VPORT)
971 dest_actions[num_dest_actions].is_wire_ft = true;
972 break;
973 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
974 dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
975 dst);
976 if (dest_action)
977 break;
978 dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
979 dst);
980 fs_actions[num_fs_actions++].action = dest_action;
981 break;
982 case MLX5_FLOW_DESTINATION_TYPE_RANGE:
983 dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
984 fs_actions[num_fs_actions++].action = dest_action;
985 break;
986 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
987 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
988 dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
989 type_uplink);
990 break;
991 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
992 dest_action =
993 mlx5_fs_get_dest_action_sampler(fs_ctx,
994 dst);
995 fs_actions[num_fs_actions].action = dest_action;
996 fs_actions[num_fs_actions++].sampler_id =
997 dst->dest_attr.sampler_id;
998 break;
999 default:
1000 err = -EOPNOTSUPP;
1001 goto free_actions;
1002 }
1003 if (!dest_action) {
1004 err = -ENOMEM;
1005 goto free_actions;
1006 }
1007 dest_actions[num_dest_actions++].dest = dest_action;
1008 }
1009 }
1010
1011 if (num_dest_actions == 1) {
1012 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1013 err = -EOPNOTSUPP;
1014 goto free_actions;
1015 }
1016 (*ractions)[num_actions++].action = dest_actions->dest;
1017 } else if (num_dest_actions > 1) {
1018 u32 flow_source = fte->act_dests.flow_context.flow_source;
1019 bool ignore_flow_level;
1020
1021 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1022 num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1023 err = -EOPNOTSUPP;
1024 goto free_actions;
1025 }
1026 ignore_flow_level =
1027 !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
1028 tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
1029 num_dest_actions,
1030 ignore_flow_level,
1031 flow_source);
1032 if (!tmp_action) {
1033 err = -EOPNOTSUPP;
1034 goto free_actions;
1035 }
1036 fs_actions[num_fs_actions++].action = tmp_action;
1037 (*ractions)[num_actions++].action = tmp_action;
1038 }
1039
1040 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1041 num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1042 err = -EOPNOTSUPP;
1043 goto free_actions;
1044 }
1045
1046 tmp_action = mlx5_fs_create_action_last(ctx);
1047 if (!tmp_action) {
1048 err = -ENOMEM;
1049 goto free_actions;
1050 }
1051 fs_actions[num_fs_actions++].action = tmp_action;
1052 (*ractions)[num_actions++].action = tmp_action;
1053
1054 kfree(dest_actions);
1055
1056 /* Actions created specifically for this rule will be destroyed
1057 * once rule is deleted.
1058 */
1059 fte->fs_hws_rule.num_fs_actions = num_fs_actions;
1060 fte->fs_hws_rule.hws_fs_actions = fs_actions;
1061
1062 return 0;
1063
1064 free_actions:
1065 mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
1066 free_dest_actions_alloc:
1067 kfree(dest_actions);
1068 free_fs_actions_alloc:
1069 kfree(fs_actions);
1070 free_actions_alloc:
1071 kfree(*ractions);
1072 *ractions = NULL;
1073 out_err:
1074 return err;
1075 }
1076
mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)1077 static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
1078 struct mlx5_flow_table *ft,
1079 struct mlx5_flow_group *group,
1080 struct fs_fte *fte)
1081 {
1082 struct mlx5hws_match_parameters params;
1083 struct mlx5hws_rule_action *ractions;
1084 struct mlx5hws_bwc_rule *rule;
1085 int err = 0;
1086
1087 if (mlx5_fs_cmd_is_fw_term_table(ft))
1088 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
1089
1090 err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1091 if (err)
1092 goto out_err;
1093
1094 params.match_sz = sizeof(fte->val);
1095 params.match_buf = fte->val;
1096
1097 rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, ¶ms,
1098 fte->act_dests.flow_context.flow_source,
1099 ractions);
1100 kfree(ractions);
1101 if (!rule) {
1102 err = -EINVAL;
1103 goto free_actions;
1104 }
1105
1106 fte->fs_hws_rule.bwc_rule = rule;
1107 return 0;
1108
1109 free_actions:
1110 mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1111 &fte->fs_hws_rule.num_fs_actions);
1112 out_err:
1113 mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
1114 return err;
1115 }
1116
mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)1117 static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
1118 struct mlx5_flow_table *ft,
1119 struct fs_fte *fte)
1120 {
1121 struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
1122 int err;
1123
1124 if (mlx5_fs_cmd_is_fw_term_table(ft))
1125 return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
1126
1127 err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
1128 rule->bwc_rule = NULL;
1129
1130 mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
1131 &rule->num_fs_actions);
1132
1133 return err;
1134 }
1135
mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)1136 static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
1137 struct mlx5_flow_table *ft,
1138 struct mlx5_flow_group *group,
1139 int modify_mask,
1140 struct fs_fte *fte)
1141 {
1142 int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
1143 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
1144 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1145 struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
1146 struct mlx5hws_rule_action *ractions;
1147 int saved_num_fs_actions;
1148 int ret;
1149
1150 if (mlx5_fs_cmd_is_fw_term_table(ft))
1151 return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
1152 modify_mask, fte);
1153
1154 if ((modify_mask & ~allowed_mask) != 0)
1155 return -EINVAL;
1156
1157 saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
1158 saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
1159
1160 ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1161 if (ret)
1162 return ret;
1163
1164 ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
1165 kfree(ractions);
1166 if (ret)
1167 goto restore_actions;
1168
1169 mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
1170 &saved_num_fs_actions);
1171 return ret;
1172
1173 restore_actions:
1174 mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1175 &fte->fs_hws_rule.num_fs_actions);
1176 fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
1177 fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
1178 return ret;
1179 }
1180
1181 static struct mlx5hws_action *
mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context * ctx)1182 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
1183 {
1184 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
1185 struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
1186
1187 /* MAC anchor not supported in HWS reformat, use VLAN anchor */
1188 remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
1189 remove_hdr_vlan.offset = 0;
1190 remove_hdr_vlan.size = sizeof(struct vlan_hdr);
1191 return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
1192 }
1193
1194 static struct mlx5hws_action *
mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_pkt_reformat_params * params)1195 mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
1196 struct mlx5_pkt_reformat_params *params)
1197 {
1198 if (!params ||
1199 params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
1200 params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
1201 params->size != sizeof(struct vlan_hdr))
1202 return NULL;
1203
1204 return fs_ctx->hws_pool.remove_hdr_vlan_action;
1205 }
1206
1207 static int
mlx5_fs_verify_insert_header_params(struct mlx5_core_dev * mdev,struct mlx5_pkt_reformat_params * params)1208 mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
1209 struct mlx5_pkt_reformat_params *params)
1210 {
1211 if ((!params->data && params->size) || (params->data && !params->size) ||
1212 MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
1213 MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
1214 mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
1215 return -EINVAL;
1216 }
1217 if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
1218 params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
1219 params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
1220 mlx5_core_err(mdev, "Only vlan insert header supported\n");
1221 return -EOPNOTSUPP;
1222 }
1223 return 0;
1224 }
1225
1226 static int
mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params)1227 mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
1228 struct mlx5_pkt_reformat_params *params)
1229 {
1230 if (params->param_0 || params->param_1) {
1231 mlx5_core_err(dev, "Invalid reformat params\n");
1232 return -EINVAL;
1233 }
1234 return 0;
1235 }
1236
1237 static struct mlx5_fs_pool *
mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev * dev,struct xarray * pr_pools,enum mlx5hws_action_type reformat_type,size_t size)1238 mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
1239 enum mlx5hws_action_type reformat_type, size_t size)
1240 {
1241 struct mlx5_fs_pool *pr_pool;
1242 unsigned long index = size;
1243 int err;
1244
1245 pr_pool = xa_load(pr_pools, index);
1246 if (pr_pool)
1247 return pr_pool;
1248
1249 pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
1250 if (!pr_pool)
1251 return ERR_PTR(-ENOMEM);
1252 err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
1253 if (err)
1254 goto free_pr_pool;
1255 err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
1256 if (err)
1257 goto cleanup_pr_pool;
1258 return pr_pool;
1259
1260 cleanup_pr_pool:
1261 mlx5_fs_hws_pr_pool_cleanup(pr_pool);
1262 free_pr_pool:
1263 kfree(pr_pool);
1264 return ERR_PTR(err);
1265 }
1266
1267 static void
mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool * pool,struct xarray * pr_pools,unsigned long index)1268 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
1269 unsigned long index)
1270 {
1271 xa_erase(pr_pools, index);
1272 mlx5_fs_hws_pr_pool_cleanup(pool);
1273 kfree(pool);
1274 }
1275
1276 static int
mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)1277 mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
1278 struct mlx5_pkt_reformat_params *params,
1279 enum mlx5_flow_namespace_type namespace,
1280 struct mlx5_pkt_reformat *pkt_reformat)
1281 {
1282 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
1283 struct mlx5_fs_hws_actions_pool *hws_pool;
1284 struct mlx5hws_action *hws_action = NULL;
1285 struct mlx5_fs_hws_pr *pr_data = NULL;
1286 struct mlx5_fs_pool *pr_pool = NULL;
1287 struct mlx5_core_dev *dev = ns->dev;
1288 u8 hdr_idx = 0;
1289 int err;
1290
1291 if (!params)
1292 return -EINVAL;
1293
1294 hws_pool = &fs_ctx->hws_pool;
1295
1296 switch (params->type) {
1297 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1298 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1299 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1300 if (mlx5_fs_verify_encap_decap_params(dev, params))
1301 return -EINVAL;
1302 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1303 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1304 params->size);
1305 if (IS_ERR(pr_pool))
1306 return PTR_ERR(pr_pool);
1307 break;
1308 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1309 if (mlx5_fs_verify_encap_decap_params(dev, params))
1310 return -EINVAL;
1311 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
1312 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
1313 params->size);
1314 if (IS_ERR(pr_pool))
1315 return PTR_ERR(pr_pool);
1316 break;
1317 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1318 if (mlx5_fs_verify_encap_decap_params(dev, params))
1319 return -EINVAL;
1320 pr_pool = &hws_pool->dl3tnltol2_pool;
1321 hdr_idx = params->size == ETH_HLEN ?
1322 MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
1323 MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
1324 break;
1325 case MLX5_REFORMAT_TYPE_INSERT_HDR:
1326 err = mlx5_fs_verify_insert_header_params(dev, params);
1327 if (err)
1328 return err;
1329 pr_pool = &hws_pool->insert_hdr_pool;
1330 break;
1331 case MLX5_REFORMAT_TYPE_REMOVE_HDR:
1332 hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
1333 if (!hws_action)
1334 mlx5_core_err(dev, "Only vlan remove header supported\n");
1335 break;
1336 default:
1337 mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
1338 params->type);
1339 return -EOPNOTSUPP;
1340 }
1341
1342 if (pr_pool) {
1343 pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
1344 if (IS_ERR_OR_NULL(pr_data))
1345 return !pr_data ? -EINVAL : PTR_ERR(pr_data);
1346 hws_action = pr_data->bulk->hws_action;
1347 if (!hws_action) {
1348 mlx5_core_err(dev,
1349 "Failed allocating packet-reformat action\n");
1350 err = -EINVAL;
1351 goto release_pr;
1352 }
1353 pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
1354 if (!pr_data->data) {
1355 err = -ENOMEM;
1356 goto release_pr;
1357 }
1358 pr_data->hdr_idx = hdr_idx;
1359 pr_data->data_size = params->size;
1360 pkt_reformat->fs_hws_action.pr_data = pr_data;
1361 }
1362
1363 mutex_init(&pkt_reformat->fs_hws_action.lock);
1364 pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
1365 pkt_reformat->fs_hws_action.hws_action = hws_action;
1366 return 0;
1367
1368 release_pr:
1369 if (pr_pool && pr_data)
1370 mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1371 return err;
1372 }
1373
mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)1374 static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
1375 struct mlx5_pkt_reformat *pkt_reformat)
1376 {
1377 struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1378 struct mlx5_core_dev *dev = ns->dev;
1379 struct mlx5_fs_hws_pr *pr_data;
1380 struct mlx5_fs_pool *pr_pool;
1381
1382 if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
1383 struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
1384
1385 fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
1386 mlx5_fs_cmd_get_fw_cmds()->
1387 packet_reformat_dealloc(ns, &fw_pkt_reformat);
1388 pkt_reformat->fs_hws_action.fw_reformat_id = 0;
1389 }
1390
1391 if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
1392 return;
1393
1394 if (!pkt_reformat->fs_hws_action.pr_data) {
1395 mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1396 return;
1397 }
1398 pr_data = pkt_reformat->fs_hws_action.pr_data;
1399
1400 switch (pkt_reformat->reformat_type) {
1401 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1402 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1403 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1404 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1405 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1406 pr_data->data_size);
1407 break;
1408 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1409 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1410 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1411 pr_data->data_size);
1412 break;
1413 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1414 pr_pool = &hws_pool->dl3tnltol2_pool;
1415 break;
1416 case MLX5_REFORMAT_TYPE_INSERT_HDR:
1417 pr_pool = &hws_pool->insert_hdr_pool;
1418 break;
1419 default:
1420 mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
1421 return;
1422 }
1423 if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
1424 mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1425 return;
1426 }
1427 kfree(pr_data->data);
1428 mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1429 pkt_reformat->fs_hws_action.pr_data = NULL;
1430 }
1431
1432 static struct mlx5_fs_pool *
mlx5_fs_create_mh_pool(struct mlx5_core_dev * dev,struct mlx5hws_action_mh_pattern * pattern,struct xarray * mh_pools,unsigned long index)1433 mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
1434 struct mlx5hws_action_mh_pattern *pattern,
1435 struct xarray *mh_pools, unsigned long index)
1436 {
1437 struct mlx5_fs_pool *pool;
1438 int err;
1439
1440 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1441 if (!pool)
1442 return ERR_PTR(-ENOMEM);
1443 err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
1444 if (err)
1445 goto free_pool;
1446 err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
1447 if (err)
1448 goto cleanup_pool;
1449 return pool;
1450
1451 cleanup_pool:
1452 mlx5_fs_hws_mh_pool_cleanup(pool);
1453 free_pool:
1454 kfree(pool);
1455 return ERR_PTR(err);
1456 }
1457
1458 static void
mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool * pool,struct xarray * mh_pools,unsigned long index)1459 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
1460 unsigned long index)
1461 {
1462 xa_erase(mh_pools, index);
1463 mlx5_fs_hws_mh_pool_cleanup(pool);
1464 kfree(pool);
1465 }
1466
mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)1467 static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
1468 u8 namespace, u8 num_actions,
1469 void *modify_actions,
1470 struct mlx5_modify_hdr *modify_hdr)
1471 {
1472 struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1473 struct mlx5hws_action_mh_pattern pattern = {};
1474 struct mlx5_fs_hws_mh *mh_data = NULL;
1475 struct mlx5hws_action *hws_action;
1476 struct mlx5_fs_pool *pool;
1477 unsigned long i, cnt = 0;
1478 bool known_pattern;
1479 int err;
1480
1481 pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
1482 pattern.data = modify_actions;
1483
1484 known_pattern = false;
1485 xa_for_each(&hws_pool->mh_pools, i, pool) {
1486 if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
1487 known_pattern = true;
1488 break;
1489 }
1490 cnt++;
1491 }
1492
1493 if (!known_pattern) {
1494 pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
1495 &hws_pool->mh_pools, cnt);
1496 if (IS_ERR(pool))
1497 return PTR_ERR(pool);
1498 }
1499 mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
1500 if (IS_ERR(mh_data)) {
1501 err = PTR_ERR(mh_data);
1502 goto destroy_pool;
1503 }
1504 hws_action = mh_data->bulk->hws_action;
1505 mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
1506 if (!mh_data->data) {
1507 err = -ENOMEM;
1508 goto release_mh;
1509 }
1510 modify_hdr->fs_hws_action.mh_data = mh_data;
1511 modify_hdr->fs_hws_action.fs_pool = pool;
1512 modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
1513 modify_hdr->fs_hws_action.hws_action = hws_action;
1514
1515 return 0;
1516
1517 release_mh:
1518 mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1519 destroy_pool:
1520 if (!known_pattern)
1521 mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
1522 return err;
1523 }
1524
mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)1525 static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
1526 struct mlx5_modify_hdr *modify_hdr)
1527 {
1528 struct mlx5_fs_hws_mh *mh_data;
1529 struct mlx5_fs_pool *pool;
1530
1531 if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
1532 mlx5_core_err(ns->dev, "Failed release modify-header\n");
1533 return;
1534 }
1535
1536 mh_data = modify_hdr->fs_hws_action.mh_data;
1537 kfree(mh_data->data);
1538 pool = modify_hdr->fs_hws_action.fs_pool;
1539 mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1540 modify_hdr->fs_hws_action.mh_data = NULL;
1541 }
1542
1543 int
mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat * pkt_reformat,u32 * reformat_id)1544 mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
1545 u32 *reformat_id)
1546 {
1547 enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
1548 struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
1549 u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
1550 struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
1551 struct mlx5_pkt_reformat_params params = { 0 };
1552 struct mlx5_flow_root_namespace *ns;
1553 struct mlx5_core_dev *dev;
1554 int ret;
1555
1556 mutex_lock(lock);
1557
1558 if (*id != 0) {
1559 *reformat_id = *id;
1560 ret = 0;
1561 goto unlock;
1562 }
1563
1564 dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
1565 if (!dev) {
1566 ret = -EINVAL;
1567 goto unlock;
1568 }
1569
1570 ns = mlx5_get_root_namespace(dev, ns_type);
1571 if (!ns) {
1572 ret = -EINVAL;
1573 goto unlock;
1574 }
1575
1576 params.type = pkt_reformat->reformat_type;
1577 params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
1578 params.data = pkt_reformat->fs_hws_action.pr_data->data;
1579
1580 ret = mlx5_fs_cmd_get_fw_cmds()->
1581 packet_reformat_alloc(ns, ¶ms, ns_type, &fw_pkt_reformat);
1582 if (ret)
1583 goto unlock;
1584
1585 *id = fw_pkt_reformat.id;
1586 *reformat_id = *id;
1587 ret = 0;
1588
1589 unlock:
1590 mutex_unlock(lock);
1591
1592 return ret;
1593 }
1594
mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)1595 static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
1596 u16 format_id, u32 *match_mask)
1597 {
1598 return -EOPNOTSUPP;
1599 }
1600
mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)1601 static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
1602 int definer_id)
1603 {
1604 return -EOPNOTSUPP;
1605 }
1606
mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)1607 static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
1608 enum fs_flow_table_type ft_type)
1609 {
1610 if (ft_type != FS_FT_FDB)
1611 return 0;
1612
1613 return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
1614 MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
1615 MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
1616 }
1617
mlx5_fs_hws_is_supported(struct mlx5_core_dev * dev)1618 bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
1619 {
1620 return mlx5hws_is_supported(dev);
1621 }
1622
1623 static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
1624 .create_flow_table = mlx5_cmd_hws_create_flow_table,
1625 .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
1626 .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
1627 .update_root_ft = mlx5_cmd_hws_update_root_ft,
1628 .create_flow_group = mlx5_cmd_hws_create_flow_group,
1629 .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
1630 .create_fte = mlx5_cmd_hws_create_fte,
1631 .delete_fte = mlx5_cmd_hws_delete_fte,
1632 .update_fte = mlx5_cmd_hws_update_fte,
1633 .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
1634 .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
1635 .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
1636 .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
1637 .create_match_definer = mlx5_cmd_hws_create_match_definer,
1638 .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
1639 .create_ns = mlx5_cmd_hws_create_ns,
1640 .destroy_ns = mlx5_cmd_hws_destroy_ns,
1641 .set_peer = mlx5_cmd_hws_set_peer,
1642 .get_capabilities = mlx5_cmd_hws_get_capabilities,
1643 };
1644
mlx5_fs_cmd_get_hws_cmds(void)1645 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
1646 {
1647 return &mlx5_flow_cmds_hws;
1648 }
1649