1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5
6 static const char *
hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt)7 hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt)
8 {
9 switch (opt) {
10 case MLX5HWS_POOL_OPTIMIZE_NONE:
11 return "rx-and-tx";
12 case MLX5HWS_POOL_OPTIMIZE_ORIG:
13 return "rx-only";
14 case MLX5HWS_POOL_OPTIMIZE_MIRROR:
15 return "tx-only";
16 default:
17 return "unknown";
18 }
19 }
20
21 static int
hws_action_ste_table_create_pool(struct mlx5hws_context * ctx,struct mlx5hws_action_ste_table * action_tbl,enum mlx5hws_pool_optimize opt,size_t log_sz)22 hws_action_ste_table_create_pool(struct mlx5hws_context *ctx,
23 struct mlx5hws_action_ste_table *action_tbl,
24 enum mlx5hws_pool_optimize opt, size_t log_sz)
25 {
26 struct mlx5hws_pool_attr pool_attr = { 0 };
27
28 pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
29 pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
30 pool_attr.flags = MLX5HWS_POOL_FLAG_BUDDY;
31 pool_attr.opt_type = opt;
32 pool_attr.alloc_log_sz = log_sz;
33
34 action_tbl->pool = mlx5hws_pool_create(ctx, &pool_attr);
35 if (!action_tbl->pool) {
36 mlx5hws_err(ctx, "Failed to allocate STE pool\n");
37 return -EINVAL;
38 }
39
40 return 0;
41 }
42
hws_action_ste_table_create_single_rtc(struct mlx5hws_context * ctx,struct mlx5hws_action_ste_table * action_tbl,enum mlx5hws_pool_optimize opt,size_t log_sz,bool tx)43 static int hws_action_ste_table_create_single_rtc(
44 struct mlx5hws_context *ctx,
45 struct mlx5hws_action_ste_table *action_tbl,
46 enum mlx5hws_pool_optimize opt, size_t log_sz, bool tx)
47 {
48 struct mlx5hws_cmd_rtc_create_attr rtc_attr = { 0 };
49 u32 *rtc_id;
50
51 rtc_attr.log_depth = 0;
52 rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
53 /* Action STEs use the default always hit definer. */
54 rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
55 rtc_attr.is_frst_jumbo = false;
56 rtc_attr.miss_ft_id = 0;
57 rtc_attr.pd = ctx->pd_num;
58 rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
59
60 if (tx) {
61 rtc_attr.table_type = FS_FT_FDB_TX;
62 rtc_attr.ste_base =
63 mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
64 rtc_attr.stc_base =
65 mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
66 rtc_attr.log_size =
67 opt == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_sz;
68 rtc_id = &action_tbl->rtc_1_id;
69 } else {
70 rtc_attr.table_type = FS_FT_FDB_RX;
71 rtc_attr.ste_base = mlx5hws_pool_get_base_id(action_tbl->pool);
72 rtc_attr.stc_base = mlx5hws_pool_get_base_id(ctx->stc_pool);
73 rtc_attr.log_size =
74 opt == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_sz;
75 rtc_id = &action_tbl->rtc_0_id;
76 }
77
78 return mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_id);
79 }
80
81 static int
hws_action_ste_table_create_rtcs(struct mlx5hws_context * ctx,struct mlx5hws_action_ste_table * action_tbl,enum mlx5hws_pool_optimize opt,size_t log_sz)82 hws_action_ste_table_create_rtcs(struct mlx5hws_context *ctx,
83 struct mlx5hws_action_ste_table *action_tbl,
84 enum mlx5hws_pool_optimize opt, size_t log_sz)
85 {
86 int err;
87
88 err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
89 log_sz, false);
90 if (err)
91 return err;
92
93 err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
94 log_sz, true);
95 if (err) {
96 mlx5hws_cmd_rtc_destroy(ctx->mdev, action_tbl->rtc_0_id);
97 return err;
98 }
99
100 return 0;
101 }
102
103 static void
hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table * action_tbl)104 hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table *action_tbl)
105 {
106 mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
107 action_tbl->rtc_1_id);
108 mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
109 action_tbl->rtc_0_id);
110 }
111
112 static int
hws_action_ste_table_create_stc(struct mlx5hws_context * ctx,struct mlx5hws_action_ste_table * action_tbl)113 hws_action_ste_table_create_stc(struct mlx5hws_context *ctx,
114 struct mlx5hws_action_ste_table *action_tbl)
115 {
116 struct mlx5hws_cmd_stc_modify_attr stc_attr = { 0 };
117
118 stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
119 stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
120 stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
121 stc_attr.ste_table.ste_pool = action_tbl->pool;
122 stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
123
124 return mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
125 MLX5HWS_TABLE_TYPE_FDB,
126 &action_tbl->stc);
127 }
128
129 static struct mlx5hws_action_ste_table *
hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element * parent_elem)130 hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element *parent_elem)
131 {
132 enum mlx5hws_pool_optimize opt = parent_elem->opt;
133 struct mlx5hws_context *ctx = parent_elem->ctx;
134 struct mlx5hws_action_ste_table *action_tbl;
135 size_t log_sz;
136 int err;
137
138 log_sz = min(parent_elem->log_sz ?
139 parent_elem->log_sz +
140 MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ :
141 MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ,
142 MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ);
143
144 action_tbl = kzalloc_obj(*action_tbl);
145 if (!action_tbl)
146 return ERR_PTR(-ENOMEM);
147
148 err = hws_action_ste_table_create_pool(ctx, action_tbl, opt, log_sz);
149 if (err)
150 goto free_tbl;
151
152 err = hws_action_ste_table_create_rtcs(ctx, action_tbl, opt, log_sz);
153 if (err)
154 goto destroy_pool;
155
156 err = hws_action_ste_table_create_stc(ctx, action_tbl);
157 if (err)
158 goto destroy_rtcs;
159
160 action_tbl->parent_elem = parent_elem;
161 INIT_LIST_HEAD(&action_tbl->list_node);
162 action_tbl->last_used = jiffies;
163 list_add(&action_tbl->list_node, &parent_elem->available);
164 parent_elem->log_sz = log_sz;
165
166 mlx5hws_dbg(ctx,
167 "Allocated %s action STE table log_sz %zu; STEs (%d, %d); RTCs (%d, %d); STC %d\n",
168 hws_pool_opt_to_str(opt), log_sz,
169 mlx5hws_pool_get_base_id(action_tbl->pool),
170 mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
171 action_tbl->rtc_0_id, action_tbl->rtc_1_id,
172 action_tbl->stc.offset);
173
174 return action_tbl;
175
176 destroy_rtcs:
177 hws_action_ste_table_destroy_rtcs(action_tbl);
178 destroy_pool:
179 mlx5hws_pool_destroy(action_tbl->pool);
180 free_tbl:
181 kfree(action_tbl);
182
183 return ERR_PTR(err);
184 }
185
186 static void
hws_action_ste_table_destroy(struct mlx5hws_action_ste_table * action_tbl)187 hws_action_ste_table_destroy(struct mlx5hws_action_ste_table *action_tbl)
188 {
189 struct mlx5hws_context *ctx = action_tbl->parent_elem->ctx;
190
191 mlx5hws_dbg(ctx,
192 "Destroying %s action STE table: STEs (%d, %d); RTCs (%d, %d); STC %d\n",
193 hws_pool_opt_to_str(action_tbl->parent_elem->opt),
194 mlx5hws_pool_get_base_id(action_tbl->pool),
195 mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
196 action_tbl->rtc_0_id, action_tbl->rtc_1_id,
197 action_tbl->stc.offset);
198
199 mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
200 &action_tbl->stc);
201 hws_action_ste_table_destroy_rtcs(action_tbl);
202 mlx5hws_pool_destroy(action_tbl->pool);
203
204 list_del(&action_tbl->list_node);
205 kfree(action_tbl);
206 }
207
208 static int
hws_action_ste_pool_element_init(struct mlx5hws_context * ctx,struct mlx5hws_action_ste_pool_element * elem,enum mlx5hws_pool_optimize opt)209 hws_action_ste_pool_element_init(struct mlx5hws_context *ctx,
210 struct mlx5hws_action_ste_pool_element *elem,
211 enum mlx5hws_pool_optimize opt)
212 {
213 elem->ctx = ctx;
214 elem->opt = opt;
215 INIT_LIST_HEAD(&elem->available);
216 INIT_LIST_HEAD(&elem->full);
217
218 return 0;
219 }
220
hws_action_ste_pool_element_destroy(struct mlx5hws_action_ste_pool_element * elem)221 static void hws_action_ste_pool_element_destroy(
222 struct mlx5hws_action_ste_pool_element *elem)
223 {
224 struct mlx5hws_action_ste_table *action_tbl, *p;
225
226 /* This should be empty, but attempt to free its elements anyway. */
227 list_for_each_entry_safe(action_tbl, p, &elem->full, list_node)
228 hws_action_ste_table_destroy(action_tbl);
229
230 list_for_each_entry_safe(action_tbl, p, &elem->available, list_node)
231 hws_action_ste_table_destroy(action_tbl);
232 }
233
hws_action_ste_pool_init(struct mlx5hws_context * ctx,struct mlx5hws_action_ste_pool * pool)234 static int hws_action_ste_pool_init(struct mlx5hws_context *ctx,
235 struct mlx5hws_action_ste_pool *pool)
236 {
237 enum mlx5hws_pool_optimize opt;
238 int err;
239
240 mutex_init(&pool->lock);
241
242 /* Rules which are added for both RX and TX must use the same action STE
243 * indices for both. If we were to use a single table, then RX-only and
244 * TX-only rules would waste the unused entries. Thus, we use separate
245 * table sets for the three cases.
246 */
247 for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
248 opt++) {
249 err = hws_action_ste_pool_element_init(ctx, &pool->elems[opt],
250 opt);
251 if (err)
252 goto destroy_elems;
253 pool->elems[opt].parent_pool = pool;
254 }
255
256 return 0;
257
258 destroy_elems:
259 while (opt-- > MLX5HWS_POOL_OPTIMIZE_NONE)
260 hws_action_ste_pool_element_destroy(&pool->elems[opt]);
261
262 return err;
263 }
264
hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool * pool)265 static void hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool *pool)
266 {
267 int opt;
268
269 for (opt = MLX5HWS_POOL_OPTIMIZE_MAX - 1;
270 opt >= MLX5HWS_POOL_OPTIMIZE_NONE; opt--)
271 hws_action_ste_pool_element_destroy(&pool->elems[opt]);
272 }
273
hws_action_ste_pool_element_collect_stale(struct mlx5hws_action_ste_pool_element * elem,struct list_head * cleanup)274 static void hws_action_ste_pool_element_collect_stale(
275 struct mlx5hws_action_ste_pool_element *elem, struct list_head *cleanup)
276 {
277 struct mlx5hws_action_ste_table *action_tbl, *p;
278 unsigned long expire_time, now;
279
280 expire_time = secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS);
281 now = jiffies;
282
283 list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) {
284 if (mlx5hws_pool_full(action_tbl->pool) &&
285 time_before(action_tbl->last_used + expire_time, now))
286 list_move(&action_tbl->list_node, cleanup);
287 }
288 }
289
hws_action_ste_table_cleanup_list(struct list_head * cleanup)290 static void hws_action_ste_table_cleanup_list(struct list_head *cleanup)
291 {
292 struct mlx5hws_action_ste_table *action_tbl, *p;
293
294 list_for_each_entry_safe(action_tbl, p, cleanup, list_node)
295 hws_action_ste_table_destroy(action_tbl);
296 }
297
hws_action_ste_pool_cleanup(struct work_struct * work)298 static void hws_action_ste_pool_cleanup(struct work_struct *work)
299 {
300 enum mlx5hws_pool_optimize opt;
301 struct mlx5hws_context *ctx;
302 LIST_HEAD(cleanup);
303 int i;
304
305 ctx = container_of(work, struct mlx5hws_context,
306 action_ste_cleanup.work);
307
308 for (i = 0; i < ctx->queues; i++) {
309 struct mlx5hws_action_ste_pool *p = &ctx->action_ste_pool[i];
310
311 mutex_lock(&p->lock);
312 for (opt = MLX5HWS_POOL_OPTIMIZE_NONE;
313 opt < MLX5HWS_POOL_OPTIMIZE_MAX; opt++)
314 hws_action_ste_pool_element_collect_stale(
315 &p->elems[opt], &cleanup);
316 mutex_unlock(&p->lock);
317 }
318
319 hws_action_ste_table_cleanup_list(&cleanup);
320
321 schedule_delayed_work(&ctx->action_ste_cleanup,
322 secs_to_jiffies(
323 MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
324 }
325
mlx5hws_action_ste_pool_init(struct mlx5hws_context * ctx)326 int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx)
327 {
328 struct mlx5hws_action_ste_pool *pool;
329 size_t queues = ctx->queues;
330 int i, err;
331
332 pool = kzalloc_objs(*pool, queues);
333 if (!pool)
334 return -ENOMEM;
335
336 for (i = 0; i < queues; i++) {
337 err = hws_action_ste_pool_init(ctx, &pool[i]);
338 if (err)
339 goto free_pool;
340 }
341
342 ctx->action_ste_pool = pool;
343
344 INIT_DELAYED_WORK(&ctx->action_ste_cleanup,
345 hws_action_ste_pool_cleanup);
346 schedule_delayed_work(
347 &ctx->action_ste_cleanup,
348 secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
349
350 return 0;
351
352 free_pool:
353 while (i--)
354 hws_action_ste_pool_destroy(&pool[i]);
355 kfree(pool);
356
357 return err;
358 }
359
mlx5hws_action_ste_pool_uninit(struct mlx5hws_context * ctx)360 void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx)
361 {
362 size_t queues = ctx->queues;
363 int i;
364
365 cancel_delayed_work_sync(&ctx->action_ste_cleanup);
366
367 for (i = 0; i < queues; i++)
368 hws_action_ste_pool_destroy(&ctx->action_ste_pool[i]);
369
370 kfree(ctx->action_ste_pool);
371 }
372
373 static struct mlx5hws_action_ste_pool_element *
hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool * pool,bool skip_rx,bool skip_tx)374 hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool *pool,
375 bool skip_rx, bool skip_tx)
376 {
377 if (skip_rx)
378 return &pool->elems[MLX5HWS_POOL_OPTIMIZE_MIRROR];
379
380 if (skip_tx)
381 return &pool->elems[MLX5HWS_POOL_OPTIMIZE_ORIG];
382
383 return &pool->elems[MLX5HWS_POOL_OPTIMIZE_NONE];
384 }
385
386 static int
hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table * action_tbl,struct mlx5hws_action_ste_chunk * chunk)387 hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table *action_tbl,
388 struct mlx5hws_action_ste_chunk *chunk)
389 {
390 int err;
391
392 err = mlx5hws_pool_chunk_alloc(action_tbl->pool, &chunk->ste);
393 if (err)
394 return err;
395
396 chunk->action_tbl = action_tbl;
397 action_tbl->last_used = jiffies;
398
399 return 0;
400 }
401
mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool * pool,bool skip_rx,bool skip_tx,struct mlx5hws_action_ste_chunk * chunk)402 int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
403 bool skip_rx, bool skip_tx,
404 struct mlx5hws_action_ste_chunk *chunk)
405 {
406 struct mlx5hws_action_ste_pool_element *elem;
407 struct mlx5hws_action_ste_table *action_tbl;
408 bool found;
409 int err;
410
411 if (skip_rx && skip_tx)
412 return -EINVAL;
413
414 mutex_lock(&pool->lock);
415
416 elem = hws_action_ste_choose_elem(pool, skip_rx, skip_tx);
417
418 mlx5hws_dbg(elem->ctx,
419 "Allocating action STEs skip_rx %d skip_tx %d order %d\n",
420 skip_rx, skip_tx, chunk->ste.order);
421
422 found = false;
423 list_for_each_entry(action_tbl, &elem->available, list_node) {
424 if (!hws_action_ste_table_chunk_alloc(action_tbl, chunk)) {
425 found = true;
426 break;
427 }
428 }
429
430 if (!found) {
431 action_tbl = hws_action_ste_table_alloc(elem);
432 if (IS_ERR(action_tbl)) {
433 err = PTR_ERR(action_tbl);
434 goto out;
435 }
436
437 err = hws_action_ste_table_chunk_alloc(action_tbl, chunk);
438 if (err)
439 goto out;
440 }
441
442 if (mlx5hws_pool_empty(action_tbl->pool))
443 list_move(&action_tbl->list_node, &elem->full);
444
445 err = 0;
446
447 out:
448 mutex_unlock(&pool->lock);
449
450 return err;
451 }
452
mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk * chunk)453 void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk)
454 {
455 struct mutex *lock = &chunk->action_tbl->parent_elem->parent_pool->lock;
456
457 mlx5hws_dbg(chunk->action_tbl->pool->ctx,
458 "Freeing action STEs offset %d order %d\n",
459 chunk->ste.offset, chunk->ste.order);
460
461 mutex_lock(lock);
462 mlx5hws_pool_chunk_free(chunk->action_tbl->pool, &chunk->ste);
463 chunk->action_tbl->last_used = jiffies;
464 list_move(&chunk->action_tbl->list_node,
465 &chunk->action_tbl->parent_elem->available);
466 mutex_unlock(lock);
467 }
468