1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3
4 #include <mlx5_core.h>
5 #include "fs_hws_pools.h"
6
7 #define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
8 #define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
9 #define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
10
11 static struct mlx5hws_action *
mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context * ctx)12 mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
13 {
14 struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
15 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
16 enum mlx5hws_action_type reformat_type;
17 u32 log_bulk_size;
18
19 reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
20 reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
21 reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
22
23 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
24 return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
25 reformat_hdr, log_bulk_size, flags);
26 }
27
28 static struct mlx5hws_action *
mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context * ctx,size_t data_size)29 mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
30 {
31 struct mlx5hws_action_reformat_header reformat_hdr = {};
32 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
33 enum mlx5hws_action_type reformat_type;
34 u32 log_bulk_size;
35
36 reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
37 reformat_hdr.sz = data_size;
38
39 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
40 return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
41 &reformat_hdr, log_bulk_size, flags);
42 }
43
44 static struct mlx5hws_action *
mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context * ctx,size_t data_size)45 mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
46 {
47 struct mlx5hws_action_reformat_header reformat_hdr = {};
48 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
49 enum mlx5hws_action_type reformat_type;
50 u32 log_bulk_size;
51
52 reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
53 reformat_hdr.sz = data_size;
54
55 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
56 return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
57 &reformat_hdr, log_bulk_size, flags);
58 }
59
60 static struct mlx5hws_action *
mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context * ctx)61 mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
62 {
63 struct mlx5hws_action_insert_header insert_hdr = {};
64 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
65 u32 log_bulk_size;
66
67 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
68 insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
69 insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
70 insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
71
72 return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
73 log_bulk_size, flags);
74 }
75
76 static struct mlx5hws_action *
mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev * dev,struct mlx5_fs_hws_pr_pool_ctx * pr_pool_ctx)77 mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
78 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
79 {
80 struct mlx5_flow_root_namespace *root_ns;
81 struct mlx5hws_context *ctx;
82 size_t encap_data_size;
83
84 root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
85 if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
86 return NULL;
87
88 ctx = root_ns->fs_hws_context.hws_ctx;
89 if (!ctx)
90 return NULL;
91
92 encap_data_size = pr_pool_ctx->encap_data_size;
93 switch (pr_pool_ctx->reformat_type) {
94 case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
95 return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
96 case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
97 return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
98 case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
99 return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
100 case MLX5HWS_ACTION_TYP_INSERT_HEADER:
101 return mlx5_fs_insert_hdr_bulk_action_create(ctx);
102 default:
103 return NULL;
104 }
105 return NULL;
106 }
107
108 static struct mlx5_fs_bulk *
mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev * dev,void * pool_ctx)109 mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
110 {
111 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
112 struct mlx5_fs_hws_pr_bulk *pr_bulk;
113 int bulk_len;
114 int i;
115
116 if (!pool_ctx)
117 return NULL;
118 pr_pool_ctx = pool_ctx;
119 bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
120 pr_bulk = kvzalloc_flex(*pr_bulk, prs_data, bulk_len);
121 if (!pr_bulk)
122 return NULL;
123
124 mlx5_fs_bulk_init(&pr_bulk->fs_bulk, bulk_len);
125
126 if (mlx5_fs_bulk_bitmap_alloc(dev, &pr_bulk->fs_bulk))
127 goto free_pr_bulk;
128
129 for (i = 0; i < bulk_len; i++) {
130 pr_bulk->prs_data[i].bulk = pr_bulk;
131 pr_bulk->prs_data[i].offset = i;
132 }
133
134 pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
135 if (!pr_bulk->hws_action)
136 goto cleanup_fs_bulk;
137
138 return &pr_bulk->fs_bulk;
139
140 cleanup_fs_bulk:
141 mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
142 free_pr_bulk:
143 kvfree(pr_bulk);
144 return NULL;
145 }
146
147 static int
mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fs_bulk * fs_bulk)148 mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
149 {
150 struct mlx5_fs_hws_pr_bulk *pr_bulk;
151
152 pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
153 if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
154 mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
155 return -EBUSY;
156 }
157
158 mlx5hws_action_destroy(pr_bulk->hws_action);
159 mlx5_fs_bulk_cleanup(fs_bulk);
160 kvfree(pr_bulk);
161
162 return 0;
163 }
164
mlx5_hws_pool_update_threshold(struct mlx5_fs_pool * hws_pool)165 static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
166 {
167 hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
168 hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
169 }
170
171 static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
172 .bulk_create = mlx5_fs_hws_pr_bulk_create,
173 .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
174 .update_threshold = mlx5_hws_pool_update_threshold,
175 };
176
mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool * pr_pool,struct mlx5_core_dev * dev,size_t encap_data_size,enum mlx5hws_action_type reformat_type)177 int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
178 struct mlx5_core_dev *dev, size_t encap_data_size,
179 enum mlx5hws_action_type reformat_type)
180 {
181 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
182
183 if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
184 reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
185 reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
186 reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
187 return -EOPNOTSUPP;
188
189 pr_pool_ctx = kzalloc_obj(*pr_pool_ctx);
190 if (!pr_pool_ctx)
191 return -ENOMEM;
192 pr_pool_ctx->reformat_type = reformat_type;
193 pr_pool_ctx->encap_data_size = encap_data_size;
194 mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
195 return 0;
196 }
197
mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool * pr_pool)198 void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
199 {
200 struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
201
202 mlx5_fs_pool_cleanup(pr_pool);
203 pr_pool_ctx = pr_pool->pool_ctx;
204 if (!pr_pool_ctx)
205 return;
206 kfree(pr_pool_ctx);
207 }
208
209 struct mlx5_fs_hws_pr *
mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool * pr_pool)210 mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
211 {
212 struct mlx5_fs_pool_index pool_index = {};
213 struct mlx5_fs_hws_pr_bulk *pr_bulk;
214 int err;
215
216 err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
217 if (err)
218 return ERR_PTR(err);
219 pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
220 fs_bulk);
221 return &pr_bulk->prs_data[pool_index.index];
222 }
223
mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool * pr_pool,struct mlx5_fs_hws_pr * pr_data)224 void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
225 struct mlx5_fs_hws_pr *pr_data)
226 {
227 struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
228 struct mlx5_fs_pool_index pool_index = {};
229 struct mlx5_core_dev *dev = pr_pool->dev;
230
231 pool_index.fs_bulk = fs_bulk;
232 pool_index.index = pr_data->offset;
233 if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
234 mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
235 }
236
mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr * pr_data)237 struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
238 {
239 return pr_data->bulk->hws_action;
240 }
241
242 static struct mlx5hws_action *
mlx5_fs_mh_bulk_action_create(struct mlx5hws_context * ctx,struct mlx5hws_action_mh_pattern * pattern)243 mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
244 struct mlx5hws_action_mh_pattern *pattern)
245 {
246 u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
247 u32 log_bulk_size;
248
249 log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
250 return mlx5hws_action_create_modify_header(ctx, 1, pattern,
251 log_bulk_size, flags);
252 }
253
254 static struct mlx5_fs_bulk *
mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev * dev,void * pool_ctx)255 mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
256 {
257 struct mlx5hws_action_mh_pattern *pattern;
258 struct mlx5_flow_root_namespace *root_ns;
259 struct mlx5_fs_hws_mh_bulk *mh_bulk;
260 struct mlx5hws_context *ctx;
261 int bulk_len;
262
263 if (!pool_ctx)
264 return NULL;
265
266 root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
267 if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
268 return NULL;
269
270 ctx = root_ns->fs_hws_context.hws_ctx;
271 if (!ctx)
272 return NULL;
273
274 pattern = pool_ctx;
275 bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
276 mh_bulk = kvzalloc_flex(*mh_bulk, mhs_data, bulk_len);
277 if (!mh_bulk)
278 return NULL;
279
280 mlx5_fs_bulk_init(&mh_bulk->fs_bulk, bulk_len);
281
282 if (mlx5_fs_bulk_bitmap_alloc(dev, &mh_bulk->fs_bulk))
283 goto free_mh_bulk;
284
285 for (int i = 0; i < bulk_len; i++) {
286 mh_bulk->mhs_data[i].bulk = mh_bulk;
287 mh_bulk->mhs_data[i].offset = i;
288 }
289
290 mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
291 if (!mh_bulk->hws_action)
292 goto cleanup_fs_bulk;
293
294 return &mh_bulk->fs_bulk;
295
296 cleanup_fs_bulk:
297 mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
298 free_mh_bulk:
299 kvfree(mh_bulk);
300 return NULL;
301 }
302
303 static int
mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fs_bulk * fs_bulk)304 mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
305 struct mlx5_fs_bulk *fs_bulk)
306 {
307 struct mlx5_fs_hws_mh_bulk *mh_bulk;
308
309 mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
310 if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
311 mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
312 return -EBUSY;
313 }
314
315 mlx5hws_action_destroy(mh_bulk->hws_action);
316 mlx5_fs_bulk_cleanup(fs_bulk);
317 kvfree(mh_bulk);
318
319 return 0;
320 }
321
322 static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
323 .bulk_create = mlx5_fs_hws_mh_bulk_create,
324 .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
325 .update_threshold = mlx5_hws_pool_update_threshold,
326 };
327
mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool * fs_hws_mh_pool,struct mlx5_core_dev * dev,struct mlx5hws_action_mh_pattern * pattern)328 int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
329 struct mlx5_core_dev *dev,
330 struct mlx5hws_action_mh_pattern *pattern)
331 {
332 struct mlx5hws_action_mh_pattern *pool_pattern;
333
334 pool_pattern = kzalloc_obj(*pool_pattern);
335 if (!pool_pattern)
336 return -ENOMEM;
337 pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
338 if (!pool_pattern->data) {
339 kfree(pool_pattern);
340 return -ENOMEM;
341 }
342 pool_pattern->sz = pattern->sz;
343 mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
344 pool_pattern);
345 return 0;
346 }
347
mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool * fs_hws_mh_pool)348 void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
349 {
350 struct mlx5hws_action_mh_pattern *pool_pattern;
351
352 mlx5_fs_pool_cleanup(fs_hws_mh_pool);
353 pool_pattern = fs_hws_mh_pool->pool_ctx;
354 if (!pool_pattern)
355 return;
356 kfree(pool_pattern->data);
357 kfree(pool_pattern);
358 }
359
360 struct mlx5_fs_hws_mh *
mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool * mh_pool)361 mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
362 {
363 struct mlx5_fs_pool_index pool_index = {};
364 struct mlx5_fs_hws_mh_bulk *mh_bulk;
365 int err;
366
367 err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
368 if (err)
369 return ERR_PTR(err);
370 mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
371 fs_bulk);
372 return &mh_bulk->mhs_data[pool_index.index];
373 }
374
mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool * mh_pool,struct mlx5_fs_hws_mh * mh_data)375 void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
376 struct mlx5_fs_hws_mh *mh_data)
377 {
378 struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
379 struct mlx5_fs_pool_index pool_index = {};
380 struct mlx5_core_dev *dev = mh_pool->dev;
381
382 pool_index.fs_bulk = fs_bulk;
383 pool_index.index = mh_data->offset;
384 if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
385 mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
386 }
387
mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool * mh_pool,struct mlx5hws_action_mh_pattern * pattern)388 bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
389 struct mlx5hws_action_mh_pattern *pattern)
390 {
391 struct mlx5hws_action_mh_pattern *pool_pattern;
392 int num_actions, i;
393
394 pool_pattern = mh_pool->pool_ctx;
395 if (WARN_ON_ONCE(!pool_pattern))
396 return false;
397
398 if (pattern->sz != pool_pattern->sz)
399 return false;
400 num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
401 for (i = 0; i < num_actions; i++) {
402 if ((__force __be32)pattern->data[i] !=
403 (__force __be32)pool_pattern->data[i])
404 return false;
405 }
406 return true;
407 }
408
mlx5_fc_get_hws_action(struct mlx5hws_context * ctx,struct mlx5_fc * counter)409 struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
410 struct mlx5_fc *counter)
411 {
412 struct mlx5_fs_hws_create_action_ctx create_ctx;
413 struct mlx5_fc_bulk *fc_bulk = counter->bulk;
414 struct mlx5hws_action *hws_action;
415
416 create_ctx.hws_ctx = ctx;
417 create_ctx.id = fc_bulk->base_id;
418 create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
419
420 mlx5_fc_local_get(counter);
421 hws_action = mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
422 if (!hws_action)
423 mlx5_fc_local_put(counter);
424 return hws_action;
425 }
426
mlx5_fc_put_hws_action(struct mlx5_fc * counter)427 void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
428 {
429 mlx5_fs_put_hws_action(&counter->bulk->hws_data);
430 mlx5_fc_local_put(counter);
431 }
432