1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5
hws_bwc_gen_queue_idx(struct mlx5hws_context * ctx)6 static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
7 {
8 /* assign random queue */
9 return get_random_u8() % mlx5hws_bwc_queues(ctx);
10 }
11
12 static u16
hws_bwc_get_burst_th(struct mlx5hws_context * ctx,u16 queue_id)13 hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
14 {
15 return min(ctx->send_queue[queue_id].num_entries / 2,
16 MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
17 }
18
19 static struct mutex *
hws_bwc_get_queue_lock(struct mlx5hws_context * ctx,u16 idx)20 hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
21 {
22 return &ctx->bwc_send_queue_locks[idx];
23 }
24
hws_bwc_lock_all_queues(struct mlx5hws_context * ctx)25 static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
26 {
27 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
28 struct mutex *queue_lock; /* Protect the queue */
29 int i;
30
31 for (i = 0; i < bwc_queues; i++) {
32 queue_lock = hws_bwc_get_queue_lock(ctx, i);
33 mutex_lock(queue_lock);
34 }
35 }
36
hws_bwc_unlock_all_queues(struct mlx5hws_context * ctx)37 static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
38 {
39 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
40 struct mutex *queue_lock; /* Protect the queue */
41 int i = bwc_queues;
42
43 while (i--) {
44 queue_lock = hws_bwc_get_queue_lock(ctx, i);
45 mutex_unlock(queue_lock);
46 }
47 }
48
hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher * bwc_matcher,u32 priority,u8 size_log_rx,u8 size_log_tx,struct mlx5hws_matcher_attr * attr)49 static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
50 u32 priority,
51 u8 size_log_rx, u8 size_log_tx,
52 struct mlx5hws_matcher_attr *attr)
53 {
54 memset(attr, 0, sizeof(*attr));
55
56 attr->priority = priority;
57 attr->optimize_using_rule_idx = 0;
58 attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
59 attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
60 attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
61 attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
62 attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].rule.num_log = size_log_rx;
63 attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
64 attr->resizable = true;
65 attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
66 }
67
68 static int
hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher * bwc_matcher)69 hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
70 {
71 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
72 struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
73 int drain_error = 0, move_error = 0, poll_error = 0;
74 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
75 struct mlx5hws_rule_attr rule_attr;
76 struct mlx5hws_bwc_rule *bwc_rule;
77 struct mlx5hws_send_engine *queue;
78 struct list_head *rules_list;
79 u32 pending_rules;
80 int i, ret = 0;
81 bool drain;
82
83 mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
84
85 for (i = 0; i < bwc_queues; i++) {
86 if (list_empty(&bwc_matcher->rules[i]))
87 continue;
88
89 pending_rules = 0;
90 rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
91 rules_list = &bwc_matcher->rules[i];
92
93 list_for_each_entry(bwc_rule, rules_list, list_node) {
94 ret = mlx5hws_matcher_resize_rule_move(matcher,
95 bwc_rule->rule,
96 &rule_attr);
97 if (unlikely(ret)) {
98 if (!move_error) {
99 mlx5hws_err(ctx,
100 "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
101 ret);
102 move_error = ret;
103 }
104 /* Rule wasn't queued, no need to poll */
105 continue;
106 }
107
108 pending_rules++;
109 drain = pending_rules >=
110 hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
111 ret = mlx5hws_bwc_queue_poll(ctx,
112 rule_attr.queue_id,
113 &pending_rules,
114 drain);
115 if (unlikely(ret)) {
116 if (ret == -ETIMEDOUT) {
117 mlx5hws_err(ctx,
118 "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n",
119 ret);
120 return ret;
121 }
122 if (!poll_error) {
123 mlx5hws_err(ctx,
124 "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
125 ret);
126 poll_error = ret;
127 }
128 }
129 }
130
131 if (pending_rules) {
132 queue = &ctx->send_queue[rule_attr.queue_id];
133 mlx5hws_send_engine_flush_queue(queue);
134 ret = mlx5hws_bwc_queue_poll(ctx,
135 rule_attr.queue_id,
136 &pending_rules,
137 true);
138 if (unlikely(ret)) {
139 if (ret == -ETIMEDOUT) {
140 mlx5hws_err(ctx,
141 "Moving bwc rule: timeout draining completions (%d), aborting rehash\n",
142 ret);
143 return ret;
144 }
145 if (!drain_error) {
146 mlx5hws_err(ctx,
147 "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n",
148 ret);
149 drain_error = ret;
150 }
151 }
152 }
153 }
154
155 /* Return the first error that happened */
156 if (unlikely(move_error))
157 return move_error;
158 if (unlikely(poll_error))
159 return poll_error;
160 if (unlikely(drain_error))
161 return drain_error;
162
163 return ret;
164 }
165
hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher * bwc_matcher)166 static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
167 {
168 switch (bwc_matcher->matcher_type) {
169 case MLX5HWS_BWC_MATCHER_SIMPLE:
170 return hws_bwc_matcher_move_all_simple(bwc_matcher);
171 case MLX5HWS_BWC_MATCHER_COMPLEX_FIRST:
172 return mlx5hws_bwc_matcher_complex_move_first(bwc_matcher);
173 case MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER:
174 return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
175 default:
176 return -EINVAL;
177 }
178 }
179
hws_bwc_matcher_move(struct mlx5hws_bwc_matcher * bwc_matcher)180 static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
181 {
182 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
183 struct mlx5hws_matcher_attr matcher_attr = {0};
184 struct mlx5hws_matcher *old_matcher;
185 struct mlx5hws_matcher *new_matcher;
186 int ret;
187
188 hws_bwc_matcher_init_attr(bwc_matcher,
189 bwc_matcher->priority,
190 bwc_matcher->rx_size.size_log,
191 bwc_matcher->tx_size.size_log,
192 &matcher_attr);
193
194 old_matcher = bwc_matcher->matcher;
195 new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
196 &bwc_matcher->mt, 1,
197 bwc_matcher->at,
198 bwc_matcher->num_of_at,
199 &matcher_attr);
200 if (!new_matcher) {
201 mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
202 return -ENOMEM;
203 }
204
205 ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
206 if (ret) {
207 mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
208 return ret;
209 }
210
211 ret = hws_bwc_matcher_move_all(bwc_matcher);
212 if (ret)
213 mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
214
215 /* Error during rehash can't be rolled back.
216 * The best option here is to allow the rehash to complete and remove
217 * the old matcher - can't leave the matcher in the 'in_resize' state.
218 */
219
220 bwc_matcher->matcher = new_matcher;
221 mlx5hws_matcher_destroy(old_matcher);
222
223 return ret;
224 }
225
mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_table * table,u32 priority,u8 match_criteria_enable,struct mlx5hws_match_parameters * mask,enum mlx5hws_action_type action_types[])226 int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
227 struct mlx5hws_table *table,
228 u32 priority,
229 u8 match_criteria_enable,
230 struct mlx5hws_match_parameters *mask,
231 enum mlx5hws_action_type action_types[])
232 {
233 enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
234 struct mlx5hws_context *ctx = table->ctx;
235 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
236 struct mlx5hws_matcher_attr attr = {0};
237 int i;
238
239 bwc_matcher->rules = kzalloc_objs(*bwc_matcher->rules, bwc_queues);
240 if (!bwc_matcher->rules)
241 goto err;
242
243 for (i = 0; i < bwc_queues; i++)
244 INIT_LIST_HEAD(&bwc_matcher->rules[i]);
245
246 hws_bwc_matcher_init_attr(bwc_matcher,
247 priority,
248 bwc_matcher->rx_size.size_log,
249 bwc_matcher->tx_size.size_log,
250 &attr);
251
252 bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_SIMPLE;
253 bwc_matcher->priority = priority;
254
255 bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
256 bwc_matcher->at = kzalloc_objs(*bwc_matcher->at,
257 bwc_matcher->size_of_at_array);
258 if (!bwc_matcher->at)
259 goto free_bwc_matcher_rules;
260
261 /* create dummy action template */
262 bwc_matcher->at[0] =
263 mlx5hws_action_template_create(action_types ?
264 action_types : init_action_types);
265 if (!bwc_matcher->at[0]) {
266 mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
267 goto free_bwc_matcher_at_array;
268 }
269
270 bwc_matcher->num_of_at = 1;
271
272 bwc_matcher->mt = mlx5hws_match_template_create(ctx,
273 mask->match_buf,
274 mask->match_sz,
275 match_criteria_enable);
276 if (!bwc_matcher->mt) {
277 mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
278 goto free_at;
279 }
280
281 bwc_matcher->matcher = mlx5hws_matcher_create(table,
282 &bwc_matcher->mt, 1,
283 &bwc_matcher->at[0],
284 bwc_matcher->num_of_at,
285 &attr);
286 if (!bwc_matcher->matcher) {
287 mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
288 goto free_mt;
289 }
290
291 return 0;
292
293 free_mt:
294 mlx5hws_match_template_destroy(bwc_matcher->mt);
295 free_at:
296 mlx5hws_action_template_destroy(bwc_matcher->at[0]);
297 free_bwc_matcher_at_array:
298 kfree(bwc_matcher->at);
299 free_bwc_matcher_rules:
300 kfree(bwc_matcher->rules);
301 err:
302 return -EINVAL;
303 }
304
305 static void
hws_bwc_matcher_init_size_rxtx(struct mlx5hws_bwc_matcher_size * size)306 hws_bwc_matcher_init_size_rxtx(struct mlx5hws_bwc_matcher_size *size)
307 {
308 size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
309 atomic_set(&size->num_of_rules, 0);
310 atomic_set(&size->rehash_required, false);
311 }
312
hws_bwc_matcher_init_size(struct mlx5hws_bwc_matcher * bwc_matcher)313 static void hws_bwc_matcher_init_size(struct mlx5hws_bwc_matcher *bwc_matcher)
314 {
315 hws_bwc_matcher_init_size_rxtx(&bwc_matcher->rx_size);
316 hws_bwc_matcher_init_size_rxtx(&bwc_matcher->tx_size);
317 }
318
319 struct mlx5hws_bwc_matcher *
mlx5hws_bwc_matcher_create(struct mlx5hws_table * table,u32 priority,u8 match_criteria_enable,struct mlx5hws_match_parameters * mask)320 mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
321 u32 priority,
322 u8 match_criteria_enable,
323 struct mlx5hws_match_parameters *mask)
324 {
325 struct mlx5hws_bwc_matcher *bwc_matcher;
326 bool is_complex;
327 int ret;
328
329 if (!mlx5hws_context_bwc_supported(table->ctx)) {
330 mlx5hws_err(table->ctx,
331 "BWC matcher: context created w/o BWC API compatibility\n");
332 return NULL;
333 }
334
335 bwc_matcher = kzalloc_obj(*bwc_matcher);
336 if (!bwc_matcher)
337 return NULL;
338
339 hws_bwc_matcher_init_size(bwc_matcher);
340
341 /* Check if the required match params can be all matched
342 * in single STE, otherwise complex matcher is needed.
343 */
344
345 is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
346 if (is_complex)
347 ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
348 table,
349 priority,
350 match_criteria_enable,
351 mask);
352 else
353 ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
354 table,
355 priority,
356 match_criteria_enable,
357 mask,
358 NULL);
359 if (ret)
360 goto free_bwc_matcher;
361
362 return bwc_matcher;
363
364 free_bwc_matcher:
365 kfree(bwc_matcher);
366
367 return NULL;
368 }
369
mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher * bwc_matcher)370 int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
371 {
372 int i;
373
374 mlx5hws_matcher_destroy(bwc_matcher->matcher);
375 bwc_matcher->matcher = NULL;
376
377 for (i = 0; i < bwc_matcher->num_of_at; i++)
378 mlx5hws_action_template_destroy(bwc_matcher->at[i]);
379 kfree(bwc_matcher->at);
380
381 mlx5hws_match_template_destroy(bwc_matcher->mt);
382 kfree(bwc_matcher->rules);
383
384 return 0;
385 }
386
mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher * bwc_matcher)387 int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
388 {
389 u32 rx_rules = atomic_read(&bwc_matcher->rx_size.num_of_rules);
390 u32 tx_rules = atomic_read(&bwc_matcher->tx_size.num_of_rules);
391
392 if (rx_rules || tx_rules)
393 mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
394 "BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
395 rx_rules, tx_rules);
396
397 if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
398 mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
399 else
400 mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
401
402 kfree(bwc_matcher);
403 return 0;
404 }
405
mlx5hws_bwc_queue_poll(struct mlx5hws_context * ctx,u16 queue_id,u32 * pending_rules,bool drain)406 int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
407 u16 queue_id,
408 u32 *pending_rules,
409 bool drain)
410 {
411 unsigned long timeout = jiffies +
412 secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
413 struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
414 u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
415 bool got_comp = *pending_rules >= burst_th;
416 bool queue_full;
417 int err = 0;
418 int ret;
419 int i;
420
421 /* Check if there are any completions at all */
422 if (!got_comp && !drain)
423 return 0;
424
425 queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
426 while (queue_full || ((got_comp || drain) && *pending_rules)) {
427 ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
428 if (unlikely(ret < 0)) {
429 mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
430 queue_id, ret);
431 return -EINVAL;
432 }
433
434 if (ret) {
435 (*pending_rules) -= ret;
436 for (i = 0; i < ret; i++) {
437 if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
438 mlx5hws_err(ctx,
439 "BWC poll error: polling queue %d returned completion with error\n",
440 queue_id);
441 err = -EINVAL;
442 }
443 }
444 queue_full = false;
445 }
446
447 got_comp = !!ret;
448
449 if (unlikely(!got_comp && time_after(jiffies, timeout))) {
450 mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
451 return -ETIMEDOUT;
452 }
453 }
454
455 return err;
456 }
457
458 void
mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher * bwc_matcher,u16 bwc_queue_idx,u32 flow_source,struct mlx5hws_rule_attr * rule_attr)459 mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
460 u16 bwc_queue_idx,
461 u32 flow_source,
462 struct mlx5hws_rule_attr *rule_attr)
463 {
464 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
465
466 /* no use of INSERT_BY_INDEX in bwc rule */
467 rule_attr->rule_idx = 0;
468
469 /* notify HW at each rule insertion/deletion */
470 rule_attr->burst = 0;
471
472 /* We don't need user data, but the API requires it to exist */
473 rule_attr->user_data = (void *)0xFACADE;
474
475 rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
476 rule_attr->flow_source = flow_source;
477 }
478
479 struct mlx5hws_bwc_rule *
mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher * bwc_matcher)480 mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
481 {
482 struct mlx5hws_bwc_rule *bwc_rule;
483
484 bwc_rule = kzalloc_obj(*bwc_rule);
485 if (unlikely(!bwc_rule))
486 goto out_err;
487
488 bwc_rule->rule = kzalloc_obj(*bwc_rule->rule);
489 if (unlikely(!bwc_rule->rule))
490 goto free_rule;
491
492 bwc_rule->bwc_matcher = bwc_matcher;
493 return bwc_rule;
494
495 free_rule:
496 kfree(bwc_rule);
497 out_err:
498 return NULL;
499 }
500
mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule * bwc_rule)501 void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
502 {
503 if (likely(bwc_rule->rule))
504 kfree(bwc_rule->rule);
505 kfree(bwc_rule);
506 }
507
hws_bwc_rule_list_add(struct mlx5hws_bwc_rule * bwc_rule,u16 idx)508 static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
509 {
510 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
511
512 bwc_rule->bwc_queue_idx = idx;
513 list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
514 }
515
hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule * bwc_rule)516 static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
517 {
518 list_del_init(&bwc_rule->list_node);
519 }
520
521 static int
hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_attr * attr)522 hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
523 struct mlx5hws_rule_attr *attr)
524 {
525 return mlx5hws_rule_destroy(bwc_rule->rule, attr);
526 }
527
528 static int
hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_attr * rule_attr)529 hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
530 struct mlx5hws_rule_attr *rule_attr)
531 {
532 struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
533 u32 expected_completions = 1;
534 int ret;
535
536 ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
537 if (unlikely(ret))
538 return ret;
539
540 ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
541 &expected_completions, true);
542 if (unlikely(ret))
543 return ret;
544
545 if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
546 bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
547 mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
548 bwc_rule->rule->status);
549 return -EINVAL;
550 }
551
552 return 0;
553 }
554
hws_bwc_rule_cnt_dec(struct mlx5hws_bwc_rule * bwc_rule)555 static void hws_bwc_rule_cnt_dec(struct mlx5hws_bwc_rule *bwc_rule)
556 {
557 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
558
559 if (!bwc_rule->skip_rx)
560 atomic_dec(&bwc_matcher->rx_size.num_of_rules);
561 if (!bwc_rule->skip_tx)
562 atomic_dec(&bwc_matcher->tx_size.num_of_rules);
563 }
564
565 static int
hws_bwc_matcher_rehash_shrink(struct mlx5hws_bwc_matcher * bwc_matcher)566 hws_bwc_matcher_rehash_shrink(struct mlx5hws_bwc_matcher *bwc_matcher)
567 {
568 struct mlx5hws_bwc_matcher_size *rx_size = &bwc_matcher->rx_size;
569 struct mlx5hws_bwc_matcher_size *tx_size = &bwc_matcher->tx_size;
570
571 /* It is possible that another thread has added a rule.
572 * Need to check again if we really need rehash/shrink.
573 */
574 if (atomic_read(&rx_size->num_of_rules) ||
575 atomic_read(&tx_size->num_of_rules))
576 return 0;
577
578 /* If the current matcher RX/TX size is already at its initial size. */
579 if (rx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG &&
580 tx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG)
581 return 0;
582
583 /* Now we've done all the checking - do the shrinking:
584 * - reset match RTC size to the initial size
585 * - create new matcher
586 * - move the rules, which will not do anything as the matcher is empty
587 * - destroy the old matcher
588 */
589
590 rx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
591 tx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
592
593 return hws_bwc_matcher_move(bwc_matcher);
594 }
595
hws_bwc_rule_cnt_dec_with_shrink(struct mlx5hws_bwc_rule * bwc_rule,u16 bwc_queue_idx)596 static int hws_bwc_rule_cnt_dec_with_shrink(struct mlx5hws_bwc_rule *bwc_rule,
597 u16 bwc_queue_idx)
598 {
599 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
600 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
601 struct mutex *queue_lock; /* Protect the queue */
602 int ret;
603
604 hws_bwc_rule_cnt_dec(bwc_rule);
605
606 if (atomic_read(&bwc_matcher->rx_size.num_of_rules) ||
607 atomic_read(&bwc_matcher->tx_size.num_of_rules))
608 return 0;
609
610 /* Matcher has no more rules - shrink it to save ICM. */
611
612 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
613 mutex_unlock(queue_lock);
614
615 hws_bwc_lock_all_queues(ctx);
616 ret = hws_bwc_matcher_rehash_shrink(bwc_matcher);
617 hws_bwc_unlock_all_queues(ctx);
618
619 mutex_lock(queue_lock);
620
621 if (unlikely(ret))
622 mlx5hws_err(ctx,
623 "BWC rule deletion: shrinking empty matcher failed (%d)\n",
624 ret);
625
626 return ret;
627 }
628
mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule * bwc_rule)629 int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
630 {
631 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
632 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
633 u16 idx = bwc_rule->bwc_queue_idx;
634 struct mlx5hws_rule_attr attr;
635 struct mutex *queue_lock; /* Protect the queue */
636 int ret;
637
638 mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
639
640 queue_lock = hws_bwc_get_queue_lock(ctx, idx);
641
642 mutex_lock(queue_lock);
643
644 ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
645 hws_bwc_rule_list_remove(bwc_rule);
646 hws_bwc_rule_cnt_dec_with_shrink(bwc_rule, idx);
647
648 mutex_unlock(queue_lock);
649
650 return ret;
651 }
652
mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule * bwc_rule)653 int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
654 {
655 bool is_complex = bwc_rule->bwc_matcher->matcher_type ==
656 MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
657 int ret = 0;
658
659 if (is_complex)
660 ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule);
661 else
662 ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
663
664 mlx5hws_bwc_rule_free(bwc_rule);
665 return ret;
666 }
667
668 static int
hws_bwc_rule_create_async(struct mlx5hws_bwc_rule * bwc_rule,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * rule_attr)669 hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
670 u32 *match_param,
671 u8 at_idx,
672 struct mlx5hws_rule_action rule_actions[],
673 struct mlx5hws_rule_attr *rule_attr)
674 {
675 return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
676 0, /* only one match template supported */
677 match_param,
678 at_idx,
679 rule_actions,
680 rule_attr,
681 bwc_rule->rule);
682 }
683
684 static int
hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule * bwc_rule,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * rule_attr)685 hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
686 u32 *match_param,
687 u8 at_idx,
688 struct mlx5hws_rule_action rule_actions[],
689 struct mlx5hws_rule_attr *rule_attr)
690
691 {
692 struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
693 u32 expected_completions = 1;
694 int ret;
695
696 ret = hws_bwc_rule_create_async(bwc_rule, match_param,
697 at_idx, rule_actions,
698 rule_attr);
699 if (unlikely(ret))
700 return ret;
701
702 return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
703 &expected_completions, true);
704 }
705
706 static int
hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule * bwc_rule,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * rule_attr)707 hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
708 u8 at_idx,
709 struct mlx5hws_rule_action rule_actions[],
710 struct mlx5hws_rule_attr *rule_attr)
711 {
712 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
713 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
714 u32 expected_completions = 1;
715 int ret;
716
717 ret = mlx5hws_rule_action_update(bwc_rule->rule,
718 at_idx,
719 rule_actions,
720 rule_attr);
721 if (unlikely(ret))
722 return ret;
723
724 ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
725 &expected_completions, true);
726 if (unlikely(ret))
727 mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
728
729 return ret;
730 }
731
732 static bool
hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_bwc_matcher_size * size)733 hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher,
734 struct mlx5hws_bwc_matcher_size *size)
735 {
736 struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
737
738 /* check the match RTC size */
739 return (size->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
740 MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
741 (caps->ste_alloc_log_max - 1);
742 }
743
744 static bool
hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_bwc_matcher_size * size,u32 num_of_rules)745 hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
746 struct mlx5hws_bwc_matcher_size *size,
747 u32 num_of_rules)
748 {
749 if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size)))
750 return false;
751
752 if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
753 (1UL << size->size_log)))
754 return true;
755
756 return false;
757 }
758
759 static void
hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],enum mlx5hws_action_type action_types[])760 hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
761 enum mlx5hws_action_type action_types[])
762 {
763 int i = 0;
764
765 for (i = 0;
766 rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
767 i++) {
768 action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
769 }
770
771 action_types[i] = MLX5HWS_ACTION_TYP_LAST;
772 }
773
774 static int
hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_rule_action rule_actions[])775 hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
776 struct mlx5hws_rule_action rule_actions[])
777 {
778 enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
779 void *p;
780
781 if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) {
782 if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
783 return -ENOMEM;
784 bwc_matcher->size_of_at_array *= 2;
785 p = krealloc(bwc_matcher->at,
786 bwc_matcher->size_of_at_array *
787 sizeof(*bwc_matcher->at),
788 __GFP_ZERO | GFP_KERNEL);
789 if (!p) {
790 bwc_matcher->size_of_at_array /= 2;
791 return -ENOMEM;
792 }
793
794 bwc_matcher->at = p;
795 }
796
797 hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
798
799 bwc_matcher->at[bwc_matcher->num_of_at] =
800 mlx5hws_action_template_create(action_types);
801
802 if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
803 return -ENOMEM;
804
805 bwc_matcher->num_of_at++;
806 return 0;
807 }
808
809 static int
hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_bwc_matcher_size * size)810 hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher,
811 struct mlx5hws_bwc_matcher_size *size)
812 {
813 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
814 struct mlx5hws_cmd_query_caps *caps = ctx->caps;
815
816 if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size))) {
817 mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
818 caps->rtc_log_depth_max);
819 return -ENOMEM;
820 }
821
822 size->size_log = min(size->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
823 caps->ste_alloc_log_max -
824 MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
825
826 return 0;
827 }
828
829 static int
hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_rule_action rule_actions[])830 hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
831 struct mlx5hws_rule_action rule_actions[])
832 {
833 enum mlx5hws_action_type *action_type_arr;
834 int i, j;
835
836 /* start from index 1 - first action template is a dummy */
837 for (i = 1; i < bwc_matcher->num_of_at; i++) {
838 j = 0;
839 action_type_arr = bwc_matcher->at[i]->action_type_arr;
840
841 while (rule_actions[j].action &&
842 rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
843 if (action_type_arr[j] != rule_actions[j].action->type)
844 break;
845 j++;
846 }
847
848 if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
849 (!rule_actions[j].action ||
850 rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
851 return i;
852 }
853
854 return -1;
855 }
856
857 static int
hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher * bwc_matcher)858 hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
859 {
860 bool need_rx_rehash, need_tx_rehash;
861 int ret;
862
863 need_rx_rehash = atomic_read(&bwc_matcher->rx_size.rehash_required);
864 need_tx_rehash = atomic_read(&bwc_matcher->tx_size.rehash_required);
865
866 /* It is possible that another rule has already performed rehash.
867 * Need to check again if we really need rehash.
868 */
869 if (!need_rx_rehash && !need_tx_rehash)
870 return 0;
871
872 /* If the current matcher RX/TX size is already at its max size,
873 * it can't be rehashed.
874 */
875 if (need_rx_rehash &&
876 hws_bwc_matcher_size_maxed_out(bwc_matcher,
877 &bwc_matcher->rx_size)) {
878 atomic_set(&bwc_matcher->rx_size.rehash_required, false);
879 need_rx_rehash = false;
880 }
881 if (need_tx_rehash &&
882 hws_bwc_matcher_size_maxed_out(bwc_matcher,
883 &bwc_matcher->tx_size)) {
884 atomic_set(&bwc_matcher->tx_size.rehash_required, false);
885 need_tx_rehash = false;
886 }
887
888 /* If both RX and TX rehash flags are now off, it means that whatever
889 * we wanted to rehash is now at its max size - no rehash can be done.
890 * Return and try adding the rule again - perhaps there was some change.
891 */
892 if (!need_rx_rehash && !need_tx_rehash)
893 return 0;
894
895 /* Now we're done all the checking - do the rehash:
896 * - extend match RTC size
897 * - create new matcher
898 * - move all the rules to the new matcher
899 * - destroy the old matcher
900 */
901 atomic_set(&bwc_matcher->rx_size.rehash_required, false);
902 atomic_set(&bwc_matcher->tx_size.rehash_required, false);
903
904 if (need_rx_rehash) {
905 ret = hws_bwc_matcher_extend_size(bwc_matcher,
906 &bwc_matcher->rx_size);
907 if (ret)
908 return ret;
909 }
910
911 if (need_tx_rehash) {
912 ret = hws_bwc_matcher_extend_size(bwc_matcher,
913 &bwc_matcher->tx_size);
914 if (ret)
915 return ret;
916 }
917
918 return hws_bwc_matcher_move(bwc_matcher);
919 }
920
hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_action rule_actions[],u16 bwc_queue_idx)921 static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule,
922 struct mlx5hws_rule_action rule_actions[],
923 u16 bwc_queue_idx)
924 {
925 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
926 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
927 struct mutex *queue_lock; /* Protect the queue */
928 int at_idx, ret;
929
930 /* check if rehash needed due to missing action template */
931 at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
932 if (likely(at_idx >= 0))
933 return at_idx;
934
935 /* we need to extend BWC matcher action templates array */
936 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
937 mutex_unlock(queue_lock);
938 hws_bwc_lock_all_queues(ctx);
939
940 /* check again - perhaps other thread already did extend_at */
941 at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
942 if (at_idx >= 0)
943 goto out;
944
945 ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
946 if (unlikely(ret)) {
947 mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret);
948 at_idx = -EINVAL;
949 goto out;
950 }
951
952 /* action templates array was extended, we need the last idx */
953 at_idx = bwc_matcher->num_of_at - 1;
954 ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
955 bwc_matcher->at[at_idx]);
956 if (unlikely(ret)) {
957 mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret);
958 at_idx = -EINVAL;
959 goto out;
960 }
961
962 out:
963 hws_bwc_unlock_all_queues(ctx);
964 mutex_lock(queue_lock);
965 return at_idx;
966 }
967
hws_bwc_rule_cnt_inc_rxtx(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_bwc_matcher_size * size)968 static void hws_bwc_rule_cnt_inc_rxtx(struct mlx5hws_bwc_rule *bwc_rule,
969 struct mlx5hws_bwc_matcher_size *size)
970 {
971 u32 num_of_rules = atomic_inc_return(&size->num_of_rules);
972
973 if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_rule->bwc_matcher,
974 size, num_of_rules)))
975 atomic_set(&size->rehash_required, true);
976 }
977
hws_bwc_rule_cnt_inc(struct mlx5hws_bwc_rule * bwc_rule)978 static void hws_bwc_rule_cnt_inc(struct mlx5hws_bwc_rule *bwc_rule)
979 {
980 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
981
982 if (!bwc_rule->skip_rx)
983 hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->rx_size);
984 if (!bwc_rule->skip_tx)
985 hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->tx_size);
986 }
987
hws_bwc_rule_cnt_inc_with_rehash(struct mlx5hws_bwc_rule * bwc_rule,u16 bwc_queue_idx)988 static int hws_bwc_rule_cnt_inc_with_rehash(struct mlx5hws_bwc_rule *bwc_rule,
989 u16 bwc_queue_idx)
990 {
991 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
992 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
993 struct mutex *queue_lock; /* Protect the queue */
994 int ret;
995
996 hws_bwc_rule_cnt_inc(bwc_rule);
997
998 if (!atomic_read(&bwc_matcher->rx_size.rehash_required) &&
999 !atomic_read(&bwc_matcher->tx_size.rehash_required))
1000 return 0;
1001
1002 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
1003 mutex_unlock(queue_lock);
1004
1005 hws_bwc_lock_all_queues(ctx);
1006 ret = hws_bwc_matcher_rehash_size(bwc_matcher);
1007 hws_bwc_unlock_all_queues(ctx);
1008
1009 mutex_lock(queue_lock);
1010
1011 if (likely(!ret))
1012 return 0;
1013
1014 /* Failed to rehash. Print a diagnostic and rollback the counters. */
1015 mlx5hws_err(ctx,
1016 "BWC rule insertion: rehash to sizes [%d, %d] failed (%d)\n",
1017 bwc_matcher->rx_size.size_log,
1018 bwc_matcher->tx_size.size_log, ret);
1019 hws_bwc_rule_cnt_dec(bwc_rule);
1020
1021 return ret;
1022 }
1023
mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule * bwc_rule,u32 * match_param,struct mlx5hws_rule_action rule_actions[],u32 flow_source,u16 bwc_queue_idx)1024 int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
1025 u32 *match_param,
1026 struct mlx5hws_rule_action rule_actions[],
1027 u32 flow_source,
1028 u16 bwc_queue_idx)
1029 {
1030 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
1031 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1032 struct mlx5hws_rule_attr rule_attr;
1033 struct mutex *queue_lock; /* Protect the queue */
1034 int ret = 0;
1035 int at_idx;
1036
1037 mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
1038
1039 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
1040
1041 mutex_lock(queue_lock);
1042
1043 at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx);
1044 if (unlikely(at_idx < 0)) {
1045 mutex_unlock(queue_lock);
1046 mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)",
1047 ret);
1048 return -EINVAL;
1049 }
1050
1051 ret = hws_bwc_rule_cnt_inc_with_rehash(bwc_rule, bwc_queue_idx);
1052 if (unlikely(ret)) {
1053 mutex_unlock(queue_lock);
1054 return ret;
1055 }
1056
1057 ret = hws_bwc_rule_create_sync(bwc_rule,
1058 match_param,
1059 at_idx,
1060 rule_actions,
1061 &rule_attr);
1062 if (likely(!ret)) {
1063 hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
1064 mutex_unlock(queue_lock);
1065 return 0; /* rule inserted successfully */
1066 }
1067
1068 /* Rule insertion could fail due to queue being full, timeout, or
1069 * matcher in resize. In such cases, no point in trying to rehash.
1070 */
1071 if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) {
1072 mutex_unlock(queue_lock);
1073 mlx5hws_err(ctx,
1074 "BWC rule insertion failed - %s (%d)\n",
1075 ret == -EBUSY ? "queue is full" :
1076 ret == -ETIMEDOUT ? "timeout" :
1077 ret == -EAGAIN ? "matcher in resize" : "N/A",
1078 ret);
1079 hws_bwc_rule_cnt_dec(bwc_rule);
1080 return ret;
1081 }
1082
1083 /* At this point the rule wasn't added.
1084 * It could be because there was collision, or some other problem.
1085 * Try rehash by size and insert rule again - last chance.
1086 */
1087 if (!bwc_rule->skip_rx)
1088 atomic_set(&bwc_matcher->rx_size.rehash_required, true);
1089 if (!bwc_rule->skip_tx)
1090 atomic_set(&bwc_matcher->tx_size.rehash_required, true);
1091
1092 mutex_unlock(queue_lock);
1093
1094 hws_bwc_lock_all_queues(ctx);
1095 ret = hws_bwc_matcher_rehash_size(bwc_matcher);
1096 hws_bwc_unlock_all_queues(ctx);
1097
1098 if (ret) {
1099 mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
1100 hws_bwc_rule_cnt_dec(bwc_rule);
1101 return ret;
1102 }
1103
1104 /* Rehash done, but we still have that pesky rule to add */
1105 mutex_lock(queue_lock);
1106
1107 ret = hws_bwc_rule_create_sync(bwc_rule,
1108 match_param,
1109 at_idx,
1110 rule_actions,
1111 &rule_attr);
1112
1113 if (unlikely(ret)) {
1114 mutex_unlock(queue_lock);
1115 mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
1116 hws_bwc_rule_cnt_dec(bwc_rule);
1117 return ret;
1118 }
1119
1120 hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
1121 mutex_unlock(queue_lock);
1122
1123 return 0;
1124 }
1125
1126 struct mlx5hws_bwc_rule *
mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_match_parameters * params,u32 flow_source,struct mlx5hws_rule_action rule_actions[])1127 mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
1128 struct mlx5hws_match_parameters *params,
1129 u32 flow_source,
1130 struct mlx5hws_rule_action rule_actions[])
1131 {
1132 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1133 struct mlx5hws_bwc_rule *bwc_rule;
1134 u16 bwc_queue_idx;
1135 int ret;
1136
1137 if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
1138 mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
1139 return NULL;
1140 }
1141
1142 bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
1143 if (unlikely(!bwc_rule))
1144 return NULL;
1145
1146 bwc_rule->flow_source = flow_source;
1147 mlx5hws_rule_skip(bwc_matcher->matcher, flow_source,
1148 &bwc_rule->skip_rx, &bwc_rule->skip_tx);
1149
1150 bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
1151
1152 if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
1153 ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
1154 params,
1155 flow_source,
1156 rule_actions,
1157 bwc_queue_idx);
1158 else
1159 ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
1160 params->match_buf,
1161 rule_actions,
1162 flow_source,
1163 bwc_queue_idx);
1164 if (unlikely(ret)) {
1165 mlx5hws_bwc_rule_free(bwc_rule);
1166 return NULL;
1167 }
1168
1169 return bwc_rule;
1170 }
1171
1172 static int
hws_bwc_rule_action_update(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_action rule_actions[])1173 hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
1174 struct mlx5hws_rule_action rule_actions[])
1175 {
1176 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
1177 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1178 struct mlx5hws_rule_attr rule_attr;
1179 struct mutex *queue_lock; /* Protect the queue */
1180 int at_idx, ret;
1181 u16 idx;
1182
1183 idx = bwc_rule->bwc_queue_idx;
1184
1185 mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, bwc_rule->flow_source,
1186 &rule_attr);
1187 queue_lock = hws_bwc_get_queue_lock(ctx, idx);
1188
1189 mutex_lock(queue_lock);
1190
1191 at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx);
1192 if (unlikely(at_idx < 0)) {
1193 mutex_unlock(queue_lock);
1194 mlx5hws_err(ctx, "BWC rule update: failed getting AT\n");
1195 return -EINVAL;
1196 }
1197
1198 ret = hws_bwc_rule_update_sync(bwc_rule,
1199 at_idx,
1200 rule_actions,
1201 &rule_attr);
1202 mutex_unlock(queue_lock);
1203
1204 if (unlikely(ret))
1205 mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
1206
1207 return ret;
1208 }
1209
mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_action rule_actions[])1210 int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
1211 struct mlx5hws_rule_action rule_actions[])
1212 {
1213 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
1214 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1215
1216 if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
1217 mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
1218 return -EINVAL;
1219 }
1220
1221 /* For complex rules, the update should happen on the last subrule. */
1222 while (bwc_rule->next_subrule)
1223 bwc_rule = bwc_rule->next_subrule;
1224
1225 return hws_bwc_rule_action_update(bwc_rule, rule_actions);
1226 }
1227