1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3
4 #include <linux/mlx5/vport.h>
5 #include <mlx5_core.h>
6 #include <fs_core.h>
7 #include <fs_cmd.h>
8 #include "fs_hws_pools.h"
9 #include "mlx5hws.h"
10
11 #define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
12 #define MLX5HWS_CTX_QUEUE_SIZE 256
13
14 static struct mlx5hws_action *
15 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
16 static void
17 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
18 unsigned long index);
19 static void
20 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
21 unsigned long index);
22
mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev * dev,struct mlx5_fs_hws_context * fs_ctx)23 static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
24 struct mlx5_fs_hws_context *fs_ctx)
25 {
26 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
27 struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
28 struct mlx5hws_action_reformat_header reformat_hdr = {};
29 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
30 enum mlx5hws_action_type action_type;
31 int err = -ENOSPC;
32
33 hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
34 if (!hws_pool->tag_action)
35 return err;
36 hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
37 if (!hws_pool->pop_vlan_action)
38 goto destroy_tag;
39 hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
40 if (!hws_pool->push_vlan_action)
41 goto destroy_pop_vlan;
42 hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
43 if (!hws_pool->drop_action)
44 goto destroy_push_vlan;
45 action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
46 hws_pool->decapl2_action =
47 mlx5hws_action_create_reformat(ctx, action_type, 1,
48 &reformat_hdr, 0, flags);
49 if (!hws_pool->decapl2_action)
50 goto destroy_drop;
51 hws_pool->remove_hdr_vlan_action =
52 mlx5_fs_create_action_remove_header_vlan(ctx);
53 if (!hws_pool->remove_hdr_vlan_action)
54 goto destroy_decapl2;
55 err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
56 MLX5HWS_ACTION_TYP_INSERT_HEADER);
57 if (err)
58 goto destroy_remove_hdr;
59 err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
60 MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
61 if (err)
62 goto cleanup_insert_hdr;
63 xa_init(&hws_pool->el2tol3tnl_pools);
64 xa_init(&hws_pool->el2tol2tnl_pools);
65 xa_init(&hws_pool->mh_pools);
66 xa_init(&hws_pool->table_dests);
67 xa_init(&hws_pool->vport_dests);
68 xa_init(&hws_pool->vport_vhca_dests);
69 xa_init(&hws_pool->aso_meters);
70 xa_init(&hws_pool->sample_dests);
71 return 0;
72
73 cleanup_insert_hdr:
74 mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
75 destroy_remove_hdr:
76 mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
77 destroy_decapl2:
78 mlx5hws_action_destroy(hws_pool->decapl2_action);
79 destroy_drop:
80 mlx5hws_action_destroy(hws_pool->drop_action);
81 destroy_push_vlan:
82 mlx5hws_action_destroy(hws_pool->push_vlan_action);
83 destroy_pop_vlan:
84 mlx5hws_action_destroy(hws_pool->pop_vlan_action);
85 destroy_tag:
86 mlx5hws_action_destroy(hws_pool->tag_action);
87 return err;
88 }
89
mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context * fs_ctx)90 static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
91 {
92 struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
93 struct mlx5_fs_hws_data *fs_hws_data;
94 struct mlx5hws_action *action;
95 struct mlx5_fs_pool *pool;
96 unsigned long i;
97
98 xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
99 kfree(fs_hws_data);
100 xa_destroy(&hws_pool->sample_dests);
101 xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
102 kfree(fs_hws_data);
103 xa_destroy(&hws_pool->aso_meters);
104 xa_for_each(&hws_pool->vport_vhca_dests, i, action)
105 mlx5hws_action_destroy(action);
106 xa_destroy(&hws_pool->vport_vhca_dests);
107 xa_for_each(&hws_pool->vport_dests, i, action)
108 mlx5hws_action_destroy(action);
109 xa_destroy(&hws_pool->vport_dests);
110 xa_destroy(&hws_pool->table_dests);
111 xa_for_each(&hws_pool->mh_pools, i, pool)
112 mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
113 xa_destroy(&hws_pool->mh_pools);
114 xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
115 mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
116 xa_destroy(&hws_pool->el2tol2tnl_pools);
117 xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
118 mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
119 xa_destroy(&hws_pool->el2tol3tnl_pools);
120 mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
121 mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
122 mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
123 mlx5hws_action_destroy(hws_pool->decapl2_action);
124 mlx5hws_action_destroy(hws_pool->drop_action);
125 mlx5hws_action_destroy(hws_pool->push_vlan_action);
126 mlx5hws_action_destroy(hws_pool->pop_vlan_action);
127 mlx5hws_action_destroy(hws_pool->tag_action);
128 }
129
mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace * ns)130 static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
131 {
132 struct mlx5hws_context_attr hws_ctx_attr = {};
133 int err;
134
135 hws_ctx_attr.queues = min_t(int, num_online_cpus(),
136 MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
137 hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
138
139 ns->fs_hws_context.hws_ctx =
140 mlx5hws_context_open(ns->dev, &hws_ctx_attr);
141 if (!ns->fs_hws_context.hws_ctx) {
142 mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
143 return -EINVAL;
144 }
145 err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
146 if (err) {
147 mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
148 mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
149 return err;
150 }
151 return 0;
152 }
153
mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace * ns)154 static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
155 {
156 mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
157 return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
158 }
159
mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns,u16 peer_vhca_id)160 static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
161 struct mlx5_flow_root_namespace *peer_ns,
162 u16 peer_vhca_id)
163 {
164 struct mlx5hws_context *peer_ctx = NULL;
165
166 if (peer_ns)
167 peer_ctx = peer_ns->fs_hws_context.hws_ctx;
168 mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
169 peer_vhca_id);
170 return 0;
171 }
172
mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)173 static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
174 struct mlx5_flow_table *ft,
175 struct mlx5_flow_table *next_ft)
176 {
177 struct mlx5hws_table *next_tbl;
178 int err;
179
180 if (!ns->fs_hws_context.hws_ctx)
181 return -EINVAL;
182
183 /* if no change required, return */
184 if (!next_ft && !ft->fs_hws_table.miss_ft_set)
185 return 0;
186
187 next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
188 err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
189 if (err) {
190 mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
191 return err;
192 }
193 ft->fs_hws_table.miss_ft_set = !!next_tbl;
194 return 0;
195 }
196
mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)197 static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
198 struct mlx5_flow_table *ft)
199 {
200 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
201 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
202 struct mlx5hws_action *dest_ft_action;
203 struct xarray *dests_xa;
204 int err;
205
206 dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
207 ft->id, flags);
208 if (!dest_ft_action) {
209 mlx5_core_err(ns->dev, "Failed creating dest table action\n");
210 return -ENOMEM;
211 }
212
213 dests_xa = &fs_ctx->hws_pool.table_dests;
214 err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
215 if (err)
216 mlx5hws_action_destroy(dest_ft_action);
217 return err;
218 }
219
mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)220 static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
221 struct mlx5_flow_table *ft)
222 {
223 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
224 struct mlx5hws_action *dest_ft_action;
225 struct xarray *dests_xa;
226 int err;
227
228 dests_xa = &fs_ctx->hws_pool.table_dests;
229 dest_ft_action = xa_erase(dests_xa, ft->id);
230 if (!dest_ft_action) {
231 mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
232 return -ENOENT;
233 }
234
235 err = mlx5hws_action_destroy(dest_ft_action);
236 if (err)
237 mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
238 return err;
239 }
240
mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)241 static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
242 struct mlx5_flow_table *ft,
243 struct mlx5_flow_table_attr *ft_attr,
244 struct mlx5_flow_table *next_ft)
245 {
246 struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
247 struct mlx5hws_table_attr tbl_attr = {};
248 struct mlx5hws_table *tbl;
249 int err;
250
251 if (mlx5_fs_cmd_is_fw_term_table(ft)) {
252 err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
253 next_ft);
254 if (err)
255 return err;
256 err = mlx5_fs_add_flow_table_dest_action(ns, ft);
257 if (err)
258 mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
259 return err;
260 }
261
262 if (ns->table_type != FS_FT_FDB) {
263 mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
264 ns->table_type);
265 return -EOPNOTSUPP;
266 }
267
268 tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
269 tbl_attr.level = ft_attr->level;
270 tbl_attr.uid = ft_attr->uid;
271 tbl = mlx5hws_table_create(ctx, &tbl_attr);
272 if (!tbl) {
273 mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
274 return -EINVAL;
275 }
276
277 ft->fs_hws_table.hws_table = tbl;
278 ft->id = mlx5hws_table_get_id(tbl);
279
280 if (next_ft) {
281 err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
282 if (err)
283 goto destroy_table;
284 }
285
286 ft->max_fte = INT_MAX;
287
288 err = mlx5_fs_add_flow_table_dest_action(ns, ft);
289 if (err)
290 goto clear_ft_miss;
291 return 0;
292
293 clear_ft_miss:
294 mlx5_fs_set_ft_default_miss(ns, ft, NULL);
295 destroy_table:
296 mlx5hws_table_destroy(tbl);
297 ft->fs_hws_table.hws_table = NULL;
298 return err;
299 }
300
mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)301 static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
302 struct mlx5_flow_table *ft)
303 {
304 int err;
305
306 err = mlx5_fs_del_flow_table_dest_action(ns, ft);
307 if (err)
308 mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
309
310 if (mlx5_fs_cmd_is_fw_term_table(ft))
311 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
312
313 err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
314 if (err)
315 mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
316
317 err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
318 if (err)
319 mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
320
321 return err;
322 }
323
mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)324 static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
325 struct mlx5_flow_table *ft,
326 struct mlx5_flow_table *next_ft)
327 {
328 if (mlx5_fs_cmd_is_fw_term_table(ft))
329 return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
330
331 return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
332 }
333
mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)334 static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
335 struct mlx5_flow_table *ft,
336 u32 underlay_qpn,
337 bool disconnect)
338 {
339 return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
340 disconnect);
341 }
342
mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)343 static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
344 struct mlx5_flow_table *ft, u32 *in,
345 struct mlx5_flow_group *fg)
346 {
347 struct mlx5hws_match_parameters mask;
348 struct mlx5hws_bwc_matcher *matcher;
349 u8 match_criteria_enable;
350 u32 priority;
351
352 if (mlx5_fs_cmd_is_fw_term_table(ft))
353 return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
354
355 mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
356 mask.match_sz = sizeof(fg->mask.match_criteria);
357
358 match_criteria_enable = MLX5_GET(create_flow_group_in, in,
359 match_criteria_enable);
360 priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
361 matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
362 priority, match_criteria_enable,
363 &mask);
364 if (!matcher) {
365 mlx5_core_err(ns->dev, "Failed creating matcher\n");
366 return -EINVAL;
367 }
368
369 fg->fs_hws_matcher.matcher = matcher;
370 return 0;
371 }
372
mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)373 static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
374 struct mlx5_flow_table *ft,
375 struct mlx5_flow_group *fg)
376 {
377 if (mlx5_fs_cmd_is_fw_term_table(ft))
378 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
379
380 return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
381 }
382
383 static struct mlx5hws_action *
mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)384 mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
385 struct mlx5_flow_rule *dst)
386 {
387 return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
388 }
389
390 static struct mlx5hws_action *
mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)391 mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
392 struct mlx5_flow_rule *dst)
393 {
394 u32 table_num = dst->dest_attr.ft_num;
395
396 return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
397 }
398
399 static struct mlx5hws_action *
mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)400 mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
401 struct mlx5_flow_rule *dst)
402 {
403 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
404 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
405 u32 table_num = dst->dest_attr.ft_num;
406
407 return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
408 }
409
410 static struct mlx5hws_action *
mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst,bool is_dest_type_uplink)411 mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
412 struct mlx5_flow_rule *dst,
413 bool is_dest_type_uplink)
414 {
415 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
416 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
417 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
418 struct mlx5hws_action *dest;
419 struct xarray *dests_xa;
420 bool vhca_id_valid;
421 unsigned long idx;
422 u16 vport_num;
423 int err;
424
425 vhca_id_valid = is_dest_type_uplink ||
426 (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
427 vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
428 if (vhca_id_valid) {
429 dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
430 idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
431 } else {
432 dests_xa = &fs_ctx->hws_pool.vport_dests;
433 idx = vport_num;
434 }
435 dest_load:
436 dest = xa_load(dests_xa, idx);
437 if (dest)
438 return dest;
439
440 dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
441 dest_attr->vport.vhca_id, flags);
442
443 err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
444 if (err) {
445 mlx5hws_action_destroy(dest);
446 dest = NULL;
447
448 if (err == -EBUSY)
449 /* xarray entry was already stored by another thread */
450 goto dest_load;
451 }
452
453 return dest;
454 }
455
456 static struct mlx5hws_action *
mlx5_fs_create_dest_action_range(struct mlx5hws_context * ctx,struct mlx5_flow_rule * dst)457 mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
458 struct mlx5_flow_rule *dst)
459 {
460 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
461 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
462
463 return mlx5hws_action_create_dest_match_range(ctx,
464 dest_attr->range.field,
465 dest_attr->range.hit_ft,
466 dest_attr->range.miss_ft,
467 dest_attr->range.min,
468 dest_attr->range.max,
469 flags);
470 }
471
472 static struct mlx5_fs_hws_data *
mlx5_fs_get_cached_hws_data(struct xarray * cache_xa,unsigned long index)473 mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
474 {
475 struct mlx5_fs_hws_data *fs_hws_data;
476 int err;
477
478 xa_lock(cache_xa);
479 fs_hws_data = xa_load(cache_xa, index);
480 if (!fs_hws_data) {
481 fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
482 if (!fs_hws_data) {
483 xa_unlock(cache_xa);
484 return NULL;
485 }
486 refcount_set(&fs_hws_data->hws_action_refcount, 0);
487 mutex_init(&fs_hws_data->lock);
488 err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
489 if (err) {
490 kfree(fs_hws_data);
491 xa_unlock(cache_xa);
492 return NULL;
493 }
494 }
495 xa_unlock(cache_xa);
496
497 return fs_hws_data;
498 }
499
500 static struct mlx5hws_action *
mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)501 mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
502 struct mlx5_exe_aso *exe_aso)
503 {
504 struct mlx5_fs_hws_create_action_ctx create_ctx;
505 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
506 struct mlx5_fs_hws_data *meter_hws_data;
507 u32 id = exe_aso->base_id;
508 struct xarray *meters_xa;
509
510 meters_xa = &fs_ctx->hws_pool.aso_meters;
511 meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
512 if (!meter_hws_data)
513 return NULL;
514
515 create_ctx.hws_ctx = ctx;
516 create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
517 create_ctx.id = id;
518 create_ctx.return_reg_id = exe_aso->return_reg_id;
519
520 return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
521 }
522
mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)523 static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
524 struct mlx5_exe_aso *exe_aso)
525 {
526 struct mlx5_fs_hws_data *meter_hws_data;
527 struct xarray *meters_xa;
528
529 meters_xa = &fs_ctx->hws_pool.aso_meters;
530 meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
531 if (!meter_hws_data)
532 return;
533 return mlx5_fs_put_hws_action(meter_hws_data);
534 }
535
536 static struct mlx5hws_action *
mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)537 mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
538 struct mlx5_flow_rule *dst)
539 {
540 struct mlx5_fs_hws_create_action_ctx create_ctx;
541 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
542 struct mlx5_fs_hws_data *sampler_hws_data;
543 u32 id = dst->dest_attr.sampler_id;
544 struct xarray *sampler_xa;
545
546 sampler_xa = &fs_ctx->hws_pool.sample_dests;
547 sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
548 if (!sampler_hws_data)
549 return NULL;
550
551 create_ctx.hws_ctx = ctx;
552 create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
553 create_ctx.id = id;
554
555 return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
556 }
557
mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,u32 sampler_id)558 static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
559 u32 sampler_id)
560 {
561 struct mlx5_fs_hws_data *sampler_hws_data;
562 struct xarray *sampler_xa;
563
564 sampler_xa = &fs_ctx->hws_pool.sample_dests;
565 sampler_hws_data = xa_load(sampler_xa, sampler_id);
566 if (!sampler_hws_data)
567 return;
568
569 mlx5_fs_put_hws_action(sampler_hws_data);
570 }
571
572 static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context * ctx,struct mlx5hws_action_dest_attr * dests,u32 num_of_dests,bool ignore_flow_level)573 mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
574 struct mlx5hws_action_dest_attr *dests,
575 u32 num_of_dests, bool ignore_flow_level)
576 {
577 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
578
579 return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
580 ignore_flow_level, flags);
581 }
582
583 static struct mlx5hws_action *
mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context * fs_ctx)584 mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
585 {
586 return fs_ctx->hws_pool.push_vlan_action;
587 }
588
mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan * vlan)589 static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
590 {
591 u16 n_ethtype = vlan->ethtype;
592 u8 prio = vlan->prio;
593 u16 vid = vlan->vid;
594
595 return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
596 }
597
598 static struct mlx5hws_action *
mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context * fs_ctx)599 mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
600 {
601 return fs_ctx->hws_pool.pop_vlan_action;
602 }
603
604 static struct mlx5hws_action *
mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context * fs_ctx)605 mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
606 {
607 return fs_ctx->hws_pool.decapl2_action;
608 }
609
610 static struct mlx5hws_action *
mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context * fs_ctx)611 mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
612 {
613 return fs_ctx->hws_pool.drop_action;
614 }
615
616 static struct mlx5hws_action *
mlx5_fs_get_action_tag(struct mlx5_fs_hws_context * fs_ctx)617 mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
618 {
619 return fs_ctx->hws_pool.tag_action;
620 }
621
622 static struct mlx5hws_action *
mlx5_fs_create_action_last(struct mlx5hws_context * ctx)623 mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
624 {
625 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
626
627 return mlx5hws_action_create_last(ctx, flags);
628 }
629
630 static struct mlx5hws_action *
mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx * create_ctx)631 mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
632 {
633 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
634
635 switch (create_ctx->actions_type) {
636 case MLX5HWS_ACTION_TYP_CTR:
637 return mlx5hws_action_create_counter(create_ctx->hws_ctx,
638 create_ctx->id, flags);
639 case MLX5HWS_ACTION_TYP_ASO_METER:
640 return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
641 create_ctx->id,
642 create_ctx->return_reg_id,
643 flags);
644 case MLX5HWS_ACTION_TYP_SAMPLER:
645 return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
646 create_ctx->id, flags);
647 default:
648 return NULL;
649 }
650 }
651
652 struct mlx5hws_action *
mlx5_fs_get_hws_action(struct mlx5_fs_hws_data * fs_hws_data,struct mlx5_fs_hws_create_action_ctx * create_ctx)653 mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
654 struct mlx5_fs_hws_create_action_ctx *create_ctx)
655 {
656 /* try avoid locking if not necessary */
657 if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
658 return fs_hws_data->hws_action;
659
660 mutex_lock(&fs_hws_data->lock);
661 if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
662 mutex_unlock(&fs_hws_data->lock);
663 return fs_hws_data->hws_action;
664 }
665 fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
666 if (!fs_hws_data->hws_action) {
667 mutex_unlock(&fs_hws_data->lock);
668 return NULL;
669 }
670 refcount_set(&fs_hws_data->hws_action_refcount, 1);
671 mutex_unlock(&fs_hws_data->lock);
672
673 return fs_hws_data->hws_action;
674 }
675
mlx5_fs_put_hws_action(struct mlx5_fs_hws_data * fs_hws_data)676 void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
677 {
678 if (!fs_hws_data)
679 return;
680
681 /* try avoid locking if not necessary */
682 if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
683 return;
684
685 mutex_lock(&fs_hws_data->lock);
686 if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
687 mutex_unlock(&fs_hws_data->lock);
688 return;
689 }
690 mlx5hws_action_destroy(fs_hws_data->hws_action);
691 fs_hws_data->hws_action = NULL;
692 mutex_unlock(&fs_hws_data->lock);
693 }
694
mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action * fs_action)695 static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
696 struct mlx5_fs_hws_rule_action *fs_action)
697 {
698 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
699
700 switch (mlx5hws_action_get_type(fs_action->action)) {
701 case MLX5HWS_ACTION_TYP_CTR:
702 mlx5_fc_put_hws_action(fs_action->counter);
703 break;
704 case MLX5HWS_ACTION_TYP_ASO_METER:
705 mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
706 break;
707 case MLX5HWS_ACTION_TYP_SAMPLER:
708 mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
709 break;
710 default:
711 mlx5hws_action_destroy(fs_action->action);
712 }
713 }
714
715 static void
mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action ** fs_actions,int * num_fs_actions)716 mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
717 struct mlx5_fs_hws_rule_action **fs_actions,
718 int *num_fs_actions)
719 {
720 int i;
721
722 /* Free in reverse order to handle action dependencies */
723 for (i = *num_fs_actions - 1; i >= 0; i--)
724 mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
725 *num_fs_actions = 0;
726 kfree(*fs_actions);
727 *fs_actions = NULL;
728 }
729
730 /* Splits FTE's actions into cached, rule and destination actions.
731 * The cached and destination actions are saved on the fte hws rule.
732 * The rule actions are returned as a parameter, together with their count.
733 * We want to support a rule with 32 destinations, which means we need to
734 * account for 32 destinations plus usually a counter plus one more action
735 * for a multi-destination flow table.
736 * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
737 */
738 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34
mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte,struct mlx5hws_rule_action ** ractions)739 static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
740 struct mlx5_flow_table *ft,
741 struct mlx5_flow_group *group,
742 struct fs_fte *fte,
743 struct mlx5hws_rule_action **ractions)
744 {
745 struct mlx5_flow_act *fte_action = &fte->act_dests.action;
746 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
747 struct mlx5hws_action_dest_attr *dest_actions;
748 struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
749 struct mlx5_fs_hws_rule_action *fs_actions;
750 struct mlx5_core_dev *dev = ns->dev;
751 struct mlx5hws_action *dest_action;
752 struct mlx5hws_action *tmp_action;
753 struct mlx5_fs_hws_pr *pr_data;
754 struct mlx5_fs_hws_mh *mh_data;
755 bool delay_encap_set = false;
756 struct mlx5_flow_rule *dst;
757 int num_dest_actions = 0;
758 int num_fs_actions = 0;
759 int num_actions = 0;
760 int err;
761
762 *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
763 GFP_KERNEL);
764 if (!*ractions) {
765 err = -ENOMEM;
766 goto out_err;
767 }
768
769 fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
770 sizeof(*fs_actions), GFP_KERNEL);
771 if (!fs_actions) {
772 err = -ENOMEM;
773 goto free_actions_alloc;
774 }
775
776 dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
777 sizeof(*dest_actions), GFP_KERNEL);
778 if (!dest_actions) {
779 err = -ENOMEM;
780 goto free_fs_actions_alloc;
781 }
782
783 /* The order of the actions are must to be kept, only the following
784 * order is supported by HW steering:
785 * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
786 * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
787 * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
788 */
789 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
790 tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
791 if (!tmp_action) {
792 err = -ENOMEM;
793 goto free_dest_actions_alloc;
794 }
795 (*ractions)[num_actions++].action = tmp_action;
796 }
797
798 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
799 int reformat_type = fte_action->pkt_reformat->reformat_type;
800
801 if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
802 mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
803 err = -EINVAL;
804 goto free_actions;
805 }
806
807 if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
808 pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
809 (*ractions)[num_actions].reformat.offset = pr_data->offset;
810 (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
811 (*ractions)[num_actions].reformat.data = pr_data->data;
812 (*ractions)[num_actions++].action =
813 fte_action->pkt_reformat->fs_hws_action.hws_action;
814 } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
815 (*ractions)[num_actions++].action =
816 fte_action->pkt_reformat->fs_hws_action.hws_action;
817 } else {
818 delay_encap_set = true;
819 }
820 }
821
822 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
823 tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
824 if (!tmp_action) {
825 err = -ENOMEM;
826 goto free_actions;
827 }
828 (*ractions)[num_actions++].action = tmp_action;
829 }
830
831 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
832 tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
833 if (!tmp_action) {
834 err = -ENOMEM;
835 goto free_actions;
836 }
837 (*ractions)[num_actions++].action = tmp_action;
838 }
839
840 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
841 mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
842 (*ractions)[num_actions].modify_header.offset = mh_data->offset;
843 (*ractions)[num_actions].modify_header.data = mh_data->data;
844 (*ractions)[num_actions++].action =
845 fte_action->modify_hdr->fs_hws_action.hws_action;
846 }
847
848 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
849 tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
850 if (!tmp_action) {
851 err = -ENOMEM;
852 goto free_actions;
853 }
854 (*ractions)[num_actions].push_vlan.vlan_hdr =
855 htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
856 (*ractions)[num_actions++].action = tmp_action;
857 }
858
859 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
860 tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
861 if (!tmp_action) {
862 err = -ENOMEM;
863 goto free_actions;
864 }
865 (*ractions)[num_actions].push_vlan.vlan_hdr =
866 htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
867 (*ractions)[num_actions++].action = tmp_action;
868 }
869
870 if (delay_encap_set) {
871 pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
872 (*ractions)[num_actions].reformat.offset = pr_data->offset;
873 (*ractions)[num_actions].reformat.data = pr_data->data;
874 (*ractions)[num_actions++].action =
875 fte_action->pkt_reformat->fs_hws_action.hws_action;
876 }
877
878 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
879 list_for_each_entry(dst, &fte->node.children, node.list) {
880 struct mlx5_fc *counter;
881
882 if (dst->dest_attr.type !=
883 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
884 continue;
885
886 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
887 err = -EOPNOTSUPP;
888 goto free_actions;
889 }
890
891 counter = dst->dest_attr.counter;
892 tmp_action = mlx5_fc_get_hws_action(ctx, counter);
893 if (!tmp_action) {
894 err = -EINVAL;
895 goto free_actions;
896 }
897
898 (*ractions)[num_actions].counter.offset =
899 mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
900 (*ractions)[num_actions++].action = tmp_action;
901 fs_actions[num_fs_actions].action = tmp_action;
902 fs_actions[num_fs_actions++].counter = counter;
903 }
904 }
905
906 if (fte->act_dests.flow_context.flow_tag) {
907 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
908 err = -EOPNOTSUPP;
909 goto free_actions;
910 }
911 tmp_action = mlx5_fs_get_action_tag(fs_ctx);
912 if (!tmp_action) {
913 err = -ENOMEM;
914 goto free_actions;
915 }
916 (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
917 (*ractions)[num_actions++].action = tmp_action;
918 }
919
920 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
921 if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
922 num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
923 err = -EOPNOTSUPP;
924 goto free_actions;
925 }
926
927 tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
928 &fte_action->exe_aso);
929 if (!tmp_action) {
930 err = -ENOMEM;
931 goto free_actions;
932 }
933 (*ractions)[num_actions].aso_meter.offset =
934 fte_action->exe_aso.flow_meter.meter_idx;
935 (*ractions)[num_actions].aso_meter.init_color =
936 fte_action->exe_aso.flow_meter.init_color;
937 (*ractions)[num_actions++].action = tmp_action;
938 fs_actions[num_fs_actions].action = tmp_action;
939 fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
940 }
941
942 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
943 dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
944 if (!dest_action) {
945 err = -ENOMEM;
946 goto free_actions;
947 }
948 dest_actions[num_dest_actions++].dest = dest_action;
949 }
950
951 if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
952 list_for_each_entry(dst, &fte->node.children, node.list) {
953 struct mlx5_flow_destination *attr = &dst->dest_attr;
954 bool type_uplink =
955 attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
956
957 if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
958 num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
959 err = -EOPNOTSUPP;
960 goto free_actions;
961 }
962 if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
963 continue;
964
965 switch (attr->type) {
966 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
967 dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
968 if (dst->dest_attr.ft->flags &
969 MLX5_FLOW_TABLE_UPLINK_VPORT)
970 dest_actions[num_dest_actions].is_wire_ft = true;
971 break;
972 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
973 dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
974 dst);
975 if (dest_action)
976 break;
977 dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
978 dst);
979 fs_actions[num_fs_actions++].action = dest_action;
980 break;
981 case MLX5_FLOW_DESTINATION_TYPE_RANGE:
982 dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
983 fs_actions[num_fs_actions++].action = dest_action;
984 break;
985 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
986 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
987 dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
988 type_uplink);
989 break;
990 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
991 dest_action =
992 mlx5_fs_get_dest_action_sampler(fs_ctx,
993 dst);
994 fs_actions[num_fs_actions].action = dest_action;
995 fs_actions[num_fs_actions++].sampler_id =
996 dst->dest_attr.sampler_id;
997 break;
998 default:
999 err = -EOPNOTSUPP;
1000 goto free_actions;
1001 }
1002 if (!dest_action) {
1003 err = -ENOMEM;
1004 goto free_actions;
1005 }
1006 dest_actions[num_dest_actions++].dest = dest_action;
1007 }
1008 }
1009
1010 if (num_dest_actions == 1) {
1011 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1012 err = -EOPNOTSUPP;
1013 goto free_actions;
1014 }
1015 (*ractions)[num_actions++].action = dest_actions->dest;
1016 } else if (num_dest_actions > 1) {
1017 bool ignore_flow_level;
1018
1019 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1020 num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1021 err = -EOPNOTSUPP;
1022 goto free_actions;
1023 }
1024 ignore_flow_level =
1025 !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
1026 tmp_action =
1027 mlx5_fs_create_action_dest_array(ctx, dest_actions,
1028 num_dest_actions,
1029 ignore_flow_level);
1030 if (!tmp_action) {
1031 err = -EOPNOTSUPP;
1032 goto free_actions;
1033 }
1034 fs_actions[num_fs_actions++].action = tmp_action;
1035 (*ractions)[num_actions++].action = tmp_action;
1036 }
1037
1038 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1039 num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1040 err = -EOPNOTSUPP;
1041 goto free_actions;
1042 }
1043
1044 tmp_action = mlx5_fs_create_action_last(ctx);
1045 if (!tmp_action) {
1046 err = -ENOMEM;
1047 goto free_actions;
1048 }
1049 fs_actions[num_fs_actions++].action = tmp_action;
1050 (*ractions)[num_actions++].action = tmp_action;
1051
1052 kfree(dest_actions);
1053
1054 /* Actions created specifically for this rule will be destroyed
1055 * once rule is deleted.
1056 */
1057 fte->fs_hws_rule.num_fs_actions = num_fs_actions;
1058 fte->fs_hws_rule.hws_fs_actions = fs_actions;
1059
1060 return 0;
1061
1062 free_actions:
1063 mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
1064 free_dest_actions_alloc:
1065 kfree(dest_actions);
1066 free_fs_actions_alloc:
1067 kfree(fs_actions);
1068 free_actions_alloc:
1069 kfree(*ractions);
1070 *ractions = NULL;
1071 out_err:
1072 return err;
1073 }
1074
mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)1075 static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
1076 struct mlx5_flow_table *ft,
1077 struct mlx5_flow_group *group,
1078 struct fs_fte *fte)
1079 {
1080 struct mlx5hws_match_parameters params;
1081 struct mlx5hws_rule_action *ractions;
1082 struct mlx5hws_bwc_rule *rule;
1083 int err = 0;
1084
1085 if (mlx5_fs_cmd_is_fw_term_table(ft))
1086 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
1087
1088 err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1089 if (err)
1090 goto out_err;
1091
1092 params.match_sz = sizeof(fte->val);
1093 params.match_buf = fte->val;
1094
1095 rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, ¶ms,
1096 fte->act_dests.flow_context.flow_source,
1097 ractions);
1098 kfree(ractions);
1099 if (!rule) {
1100 err = -EINVAL;
1101 goto free_actions;
1102 }
1103
1104 fte->fs_hws_rule.bwc_rule = rule;
1105 return 0;
1106
1107 free_actions:
1108 mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1109 &fte->fs_hws_rule.num_fs_actions);
1110 out_err:
1111 mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
1112 return err;
1113 }
1114
mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)1115 static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
1116 struct mlx5_flow_table *ft,
1117 struct fs_fte *fte)
1118 {
1119 struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
1120 int err;
1121
1122 if (mlx5_fs_cmd_is_fw_term_table(ft))
1123 return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
1124
1125 err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
1126 rule->bwc_rule = NULL;
1127
1128 mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
1129 &rule->num_fs_actions);
1130
1131 return err;
1132 }
1133
mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)1134 static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
1135 struct mlx5_flow_table *ft,
1136 struct mlx5_flow_group *group,
1137 int modify_mask,
1138 struct fs_fte *fte)
1139 {
1140 int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
1141 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
1142 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1143 struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
1144 struct mlx5hws_rule_action *ractions;
1145 int saved_num_fs_actions;
1146 int ret;
1147
1148 if (mlx5_fs_cmd_is_fw_term_table(ft))
1149 return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
1150 modify_mask, fte);
1151
1152 if ((modify_mask & ~allowed_mask) != 0)
1153 return -EINVAL;
1154
1155 saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
1156 saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
1157
1158 ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1159 if (ret)
1160 return ret;
1161
1162 ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
1163 kfree(ractions);
1164 if (ret)
1165 goto restore_actions;
1166
1167 mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
1168 &saved_num_fs_actions);
1169 return ret;
1170
1171 restore_actions:
1172 mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1173 &fte->fs_hws_rule.num_fs_actions);
1174 fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
1175 fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
1176 return ret;
1177 }
1178
1179 static struct mlx5hws_action *
mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context * ctx)1180 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
1181 {
1182 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
1183 struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
1184
1185 /* MAC anchor not supported in HWS reformat, use VLAN anchor */
1186 remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
1187 remove_hdr_vlan.offset = 0;
1188 remove_hdr_vlan.size = sizeof(struct vlan_hdr);
1189 return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
1190 }
1191
1192 static struct mlx5hws_action *
mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_pkt_reformat_params * params)1193 mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
1194 struct mlx5_pkt_reformat_params *params)
1195 {
1196 if (!params ||
1197 params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
1198 params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
1199 params->size != sizeof(struct vlan_hdr))
1200 return NULL;
1201
1202 return fs_ctx->hws_pool.remove_hdr_vlan_action;
1203 }
1204
1205 static int
mlx5_fs_verify_insert_header_params(struct mlx5_core_dev * mdev,struct mlx5_pkt_reformat_params * params)1206 mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
1207 struct mlx5_pkt_reformat_params *params)
1208 {
1209 if ((!params->data && params->size) || (params->data && !params->size) ||
1210 MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
1211 MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
1212 mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
1213 return -EINVAL;
1214 }
1215 if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
1216 params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
1217 params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
1218 mlx5_core_err(mdev, "Only vlan insert header supported\n");
1219 return -EOPNOTSUPP;
1220 }
1221 return 0;
1222 }
1223
1224 static int
mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params)1225 mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
1226 struct mlx5_pkt_reformat_params *params)
1227 {
1228 if (params->param_0 || params->param_1) {
1229 mlx5_core_err(dev, "Invalid reformat params\n");
1230 return -EINVAL;
1231 }
1232 return 0;
1233 }
1234
1235 static struct mlx5_fs_pool *
mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev * dev,struct xarray * pr_pools,enum mlx5hws_action_type reformat_type,size_t size)1236 mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
1237 enum mlx5hws_action_type reformat_type, size_t size)
1238 {
1239 struct mlx5_fs_pool *pr_pool;
1240 unsigned long index = size;
1241 int err;
1242
1243 pr_pool = xa_load(pr_pools, index);
1244 if (pr_pool)
1245 return pr_pool;
1246
1247 pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
1248 if (!pr_pool)
1249 return ERR_PTR(-ENOMEM);
1250 err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
1251 if (err)
1252 goto free_pr_pool;
1253 err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
1254 if (err)
1255 goto cleanup_pr_pool;
1256 return pr_pool;
1257
1258 cleanup_pr_pool:
1259 mlx5_fs_hws_pr_pool_cleanup(pr_pool);
1260 free_pr_pool:
1261 kfree(pr_pool);
1262 return ERR_PTR(err);
1263 }
1264
1265 static void
mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool * pool,struct xarray * pr_pools,unsigned long index)1266 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
1267 unsigned long index)
1268 {
1269 xa_erase(pr_pools, index);
1270 mlx5_fs_hws_pr_pool_cleanup(pool);
1271 kfree(pool);
1272 }
1273
1274 static int
mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)1275 mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
1276 struct mlx5_pkt_reformat_params *params,
1277 enum mlx5_flow_namespace_type namespace,
1278 struct mlx5_pkt_reformat *pkt_reformat)
1279 {
1280 struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
1281 struct mlx5_fs_hws_actions_pool *hws_pool;
1282 struct mlx5hws_action *hws_action = NULL;
1283 struct mlx5_fs_hws_pr *pr_data = NULL;
1284 struct mlx5_fs_pool *pr_pool = NULL;
1285 struct mlx5_core_dev *dev = ns->dev;
1286 u8 hdr_idx = 0;
1287 int err;
1288
1289 if (!params)
1290 return -EINVAL;
1291
1292 hws_pool = &fs_ctx->hws_pool;
1293
1294 switch (params->type) {
1295 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1296 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1297 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1298 if (mlx5_fs_verify_encap_decap_params(dev, params))
1299 return -EINVAL;
1300 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1301 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1302 params->size);
1303 if (IS_ERR(pr_pool))
1304 return PTR_ERR(pr_pool);
1305 break;
1306 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1307 if (mlx5_fs_verify_encap_decap_params(dev, params))
1308 return -EINVAL;
1309 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
1310 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
1311 params->size);
1312 if (IS_ERR(pr_pool))
1313 return PTR_ERR(pr_pool);
1314 break;
1315 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1316 if (mlx5_fs_verify_encap_decap_params(dev, params))
1317 return -EINVAL;
1318 pr_pool = &hws_pool->dl3tnltol2_pool;
1319 hdr_idx = params->size == ETH_HLEN ?
1320 MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
1321 MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
1322 break;
1323 case MLX5_REFORMAT_TYPE_INSERT_HDR:
1324 err = mlx5_fs_verify_insert_header_params(dev, params);
1325 if (err)
1326 return err;
1327 pr_pool = &hws_pool->insert_hdr_pool;
1328 break;
1329 case MLX5_REFORMAT_TYPE_REMOVE_HDR:
1330 hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
1331 if (!hws_action)
1332 mlx5_core_err(dev, "Only vlan remove header supported\n");
1333 break;
1334 default:
1335 mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
1336 params->type);
1337 return -EOPNOTSUPP;
1338 }
1339
1340 if (pr_pool) {
1341 pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
1342 if (IS_ERR_OR_NULL(pr_data))
1343 return !pr_data ? -EINVAL : PTR_ERR(pr_data);
1344 hws_action = pr_data->bulk->hws_action;
1345 if (!hws_action) {
1346 mlx5_core_err(dev,
1347 "Failed allocating packet-reformat action\n");
1348 err = -EINVAL;
1349 goto release_pr;
1350 }
1351 pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
1352 if (!pr_data->data) {
1353 err = -ENOMEM;
1354 goto release_pr;
1355 }
1356 pr_data->hdr_idx = hdr_idx;
1357 pr_data->data_size = params->size;
1358 pkt_reformat->fs_hws_action.pr_data = pr_data;
1359 }
1360
1361 mutex_init(&pkt_reformat->fs_hws_action.lock);
1362 pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
1363 pkt_reformat->fs_hws_action.hws_action = hws_action;
1364 return 0;
1365
1366 release_pr:
1367 if (pr_pool && pr_data)
1368 mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1369 return err;
1370 }
1371
mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)1372 static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
1373 struct mlx5_pkt_reformat *pkt_reformat)
1374 {
1375 struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1376 struct mlx5_core_dev *dev = ns->dev;
1377 struct mlx5_fs_hws_pr *pr_data;
1378 struct mlx5_fs_pool *pr_pool;
1379
1380 if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
1381 struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
1382
1383 fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
1384 mlx5_fs_cmd_get_fw_cmds()->
1385 packet_reformat_dealloc(ns, &fw_pkt_reformat);
1386 pkt_reformat->fs_hws_action.fw_reformat_id = 0;
1387 }
1388
1389 if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
1390 return;
1391
1392 if (!pkt_reformat->fs_hws_action.pr_data) {
1393 mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1394 return;
1395 }
1396 pr_data = pkt_reformat->fs_hws_action.pr_data;
1397
1398 switch (pkt_reformat->reformat_type) {
1399 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1400 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1401 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1402 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1403 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1404 pr_data->data_size);
1405 break;
1406 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1407 pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1408 MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1409 pr_data->data_size);
1410 break;
1411 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1412 pr_pool = &hws_pool->dl3tnltol2_pool;
1413 break;
1414 case MLX5_REFORMAT_TYPE_INSERT_HDR:
1415 pr_pool = &hws_pool->insert_hdr_pool;
1416 break;
1417 default:
1418 mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
1419 return;
1420 }
1421 if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
1422 mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1423 return;
1424 }
1425 kfree(pr_data->data);
1426 mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1427 pkt_reformat->fs_hws_action.pr_data = NULL;
1428 }
1429
1430 static struct mlx5_fs_pool *
mlx5_fs_create_mh_pool(struct mlx5_core_dev * dev,struct mlx5hws_action_mh_pattern * pattern,struct xarray * mh_pools,unsigned long index)1431 mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
1432 struct mlx5hws_action_mh_pattern *pattern,
1433 struct xarray *mh_pools, unsigned long index)
1434 {
1435 struct mlx5_fs_pool *pool;
1436 int err;
1437
1438 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1439 if (!pool)
1440 return ERR_PTR(-ENOMEM);
1441 err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
1442 if (err)
1443 goto free_pool;
1444 err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
1445 if (err)
1446 goto cleanup_pool;
1447 return pool;
1448
1449 cleanup_pool:
1450 mlx5_fs_hws_mh_pool_cleanup(pool);
1451 free_pool:
1452 kfree(pool);
1453 return ERR_PTR(err);
1454 }
1455
1456 static void
mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool * pool,struct xarray * mh_pools,unsigned long index)1457 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
1458 unsigned long index)
1459 {
1460 xa_erase(mh_pools, index);
1461 mlx5_fs_hws_mh_pool_cleanup(pool);
1462 kfree(pool);
1463 }
1464
mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)1465 static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
1466 u8 namespace, u8 num_actions,
1467 void *modify_actions,
1468 struct mlx5_modify_hdr *modify_hdr)
1469 {
1470 struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1471 struct mlx5hws_action_mh_pattern pattern = {};
1472 struct mlx5_fs_hws_mh *mh_data = NULL;
1473 struct mlx5hws_action *hws_action;
1474 struct mlx5_fs_pool *pool;
1475 unsigned long i, cnt = 0;
1476 bool known_pattern;
1477 int err;
1478
1479 pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
1480 pattern.data = modify_actions;
1481
1482 known_pattern = false;
1483 xa_for_each(&hws_pool->mh_pools, i, pool) {
1484 if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
1485 known_pattern = true;
1486 break;
1487 }
1488 cnt++;
1489 }
1490
1491 if (!known_pattern) {
1492 pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
1493 &hws_pool->mh_pools, cnt);
1494 if (IS_ERR(pool))
1495 return PTR_ERR(pool);
1496 }
1497 mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
1498 if (IS_ERR(mh_data)) {
1499 err = PTR_ERR(mh_data);
1500 goto destroy_pool;
1501 }
1502 hws_action = mh_data->bulk->hws_action;
1503 mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
1504 if (!mh_data->data) {
1505 err = -ENOMEM;
1506 goto release_mh;
1507 }
1508 modify_hdr->fs_hws_action.mh_data = mh_data;
1509 modify_hdr->fs_hws_action.fs_pool = pool;
1510 modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
1511 modify_hdr->fs_hws_action.hws_action = hws_action;
1512
1513 return 0;
1514
1515 release_mh:
1516 mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1517 destroy_pool:
1518 if (!known_pattern)
1519 mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
1520 return err;
1521 }
1522
mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)1523 static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
1524 struct mlx5_modify_hdr *modify_hdr)
1525 {
1526 struct mlx5_fs_hws_mh *mh_data;
1527 struct mlx5_fs_pool *pool;
1528
1529 if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
1530 mlx5_core_err(ns->dev, "Failed release modify-header\n");
1531 return;
1532 }
1533
1534 mh_data = modify_hdr->fs_hws_action.mh_data;
1535 kfree(mh_data->data);
1536 pool = modify_hdr->fs_hws_action.fs_pool;
1537 mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1538 modify_hdr->fs_hws_action.mh_data = NULL;
1539 }
1540
1541 int
mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat * pkt_reformat,u32 * reformat_id)1542 mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
1543 u32 *reformat_id)
1544 {
1545 enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
1546 struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
1547 u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
1548 struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
1549 struct mlx5_pkt_reformat_params params = { 0 };
1550 struct mlx5_flow_root_namespace *ns;
1551 struct mlx5_core_dev *dev;
1552 int ret;
1553
1554 mutex_lock(lock);
1555
1556 if (*id != 0) {
1557 *reformat_id = *id;
1558 ret = 0;
1559 goto unlock;
1560 }
1561
1562 dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
1563 if (!dev) {
1564 ret = -EINVAL;
1565 goto unlock;
1566 }
1567
1568 ns = mlx5_get_root_namespace(dev, ns_type);
1569 if (!ns) {
1570 ret = -EINVAL;
1571 goto unlock;
1572 }
1573
1574 params.type = pkt_reformat->reformat_type;
1575 params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
1576 params.data = pkt_reformat->fs_hws_action.pr_data->data;
1577
1578 ret = mlx5_fs_cmd_get_fw_cmds()->
1579 packet_reformat_alloc(ns, ¶ms, ns_type, &fw_pkt_reformat);
1580 if (ret)
1581 goto unlock;
1582
1583 *id = fw_pkt_reformat.id;
1584 *reformat_id = *id;
1585 ret = 0;
1586
1587 unlock:
1588 mutex_unlock(lock);
1589
1590 return ret;
1591 }
1592
mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)1593 static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
1594 u16 format_id, u32 *match_mask)
1595 {
1596 return -EOPNOTSUPP;
1597 }
1598
mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)1599 static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
1600 int definer_id)
1601 {
1602 return -EOPNOTSUPP;
1603 }
1604
mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)1605 static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
1606 enum fs_flow_table_type ft_type)
1607 {
1608 if (ft_type != FS_FT_FDB)
1609 return 0;
1610
1611 return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
1612 MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
1613 MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
1614 }
1615
mlx5_fs_hws_is_supported(struct mlx5_core_dev * dev)1616 bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
1617 {
1618 return mlx5hws_is_supported(dev);
1619 }
1620
1621 static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
1622 .create_flow_table = mlx5_cmd_hws_create_flow_table,
1623 .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
1624 .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
1625 .update_root_ft = mlx5_cmd_hws_update_root_ft,
1626 .create_flow_group = mlx5_cmd_hws_create_flow_group,
1627 .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
1628 .create_fte = mlx5_cmd_hws_create_fte,
1629 .delete_fte = mlx5_cmd_hws_delete_fte,
1630 .update_fte = mlx5_cmd_hws_update_fte,
1631 .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
1632 .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
1633 .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
1634 .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
1635 .create_match_definer = mlx5_cmd_hws_create_match_definer,
1636 .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
1637 .create_ns = mlx5_cmd_hws_create_ns,
1638 .destroy_ns = mlx5_cmd_hws_destroy_ns,
1639 .set_peer = mlx5_cmd_hws_set_peer,
1640 .get_capabilities = mlx5_cmd_hws_get_capabilities,
1641 };
1642
mlx5_fs_cmd_get_hws_cmds(void)1643 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
1644 {
1645 return &mlx5_flow_cmds_hws;
1646 }
1647