1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5
hws_bwc_gen_queue_idx(struct mlx5hws_context * ctx)6 static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
7 {
8 /* assign random queue */
9 return get_random_u8() % mlx5hws_bwc_queues(ctx);
10 }
11
12 static u16
hws_bwc_get_burst_th(struct mlx5hws_context * ctx,u16 queue_id)13 hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
14 {
15 return min(ctx->send_queue[queue_id].num_entries / 2,
16 MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
17 }
18
19 static struct mutex *
hws_bwc_get_queue_lock(struct mlx5hws_context * ctx,u16 idx)20 hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
21 {
22 return &ctx->bwc_send_queue_locks[idx];
23 }
24
hws_bwc_lock_all_queues(struct mlx5hws_context * ctx)25 static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
26 {
27 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
28 struct mutex *queue_lock; /* Protect the queue */
29 int i;
30
31 for (i = 0; i < bwc_queues; i++) {
32 queue_lock = hws_bwc_get_queue_lock(ctx, i);
33 mutex_lock(queue_lock);
34 }
35 }
36
hws_bwc_unlock_all_queues(struct mlx5hws_context * ctx)37 static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
38 {
39 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
40 struct mutex *queue_lock; /* Protect the queue */
41 int i = bwc_queues;
42
43 while (i--) {
44 queue_lock = hws_bwc_get_queue_lock(ctx, i);
45 mutex_unlock(queue_lock);
46 }
47 }
48
hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher * bwc_matcher,u32 priority,u8 size_log_rx,u8 size_log_tx,struct mlx5hws_matcher_attr * attr)49 static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
50 u32 priority,
51 u8 size_log_rx, u8 size_log_tx,
52 struct mlx5hws_matcher_attr *attr)
53 {
54 struct mlx5hws_bwc_matcher *first_matcher =
55 bwc_matcher->complex_first_bwc_matcher;
56
57 memset(attr, 0, sizeof(*attr));
58
59 attr->priority = priority;
60 attr->optimize_using_rule_idx = 0;
61 attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
62 attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
63 attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
64 attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
65 attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].rule.num_log = size_log_rx;
66 attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
67 attr->resizable = true;
68 attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
69
70 attr->isolated_matcher_end_ft_id =
71 first_matcher ? first_matcher->matcher->end_ft_id : 0;
72 }
73
74 static int
hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher * bwc_matcher)75 hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
76 {
77 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
78 struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
79 int drain_error = 0, move_error = 0, poll_error = 0;
80 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
81 struct mlx5hws_rule_attr rule_attr;
82 struct mlx5hws_bwc_rule *bwc_rule;
83 struct mlx5hws_send_engine *queue;
84 struct list_head *rules_list;
85 u32 pending_rules;
86 int i, ret = 0;
87 bool drain;
88
89 mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
90
91 for (i = 0; i < bwc_queues; i++) {
92 if (list_empty(&bwc_matcher->rules[i]))
93 continue;
94
95 pending_rules = 0;
96 rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
97 rules_list = &bwc_matcher->rules[i];
98
99 list_for_each_entry(bwc_rule, rules_list, list_node) {
100 ret = mlx5hws_matcher_resize_rule_move(matcher,
101 bwc_rule->rule,
102 &rule_attr);
103 if (unlikely(ret)) {
104 if (!move_error) {
105 mlx5hws_err(ctx,
106 "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
107 ret);
108 move_error = ret;
109 }
110 /* Rule wasn't queued, no need to poll */
111 continue;
112 }
113
114 pending_rules++;
115 drain = pending_rules >=
116 hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
117 ret = mlx5hws_bwc_queue_poll(ctx,
118 rule_attr.queue_id,
119 &pending_rules,
120 drain);
121 if (unlikely(ret)) {
122 if (ret == -ETIMEDOUT) {
123 mlx5hws_err(ctx,
124 "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n",
125 ret);
126 return ret;
127 }
128 if (!poll_error) {
129 mlx5hws_err(ctx,
130 "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
131 ret);
132 poll_error = ret;
133 }
134 }
135 }
136
137 if (pending_rules) {
138 queue = &ctx->send_queue[rule_attr.queue_id];
139 mlx5hws_send_engine_flush_queue(queue);
140 ret = mlx5hws_bwc_queue_poll(ctx,
141 rule_attr.queue_id,
142 &pending_rules,
143 true);
144 if (unlikely(ret)) {
145 if (ret == -ETIMEDOUT) {
146 mlx5hws_err(ctx,
147 "Moving bwc rule: timeout draining completions (%d), aborting rehash\n",
148 ret);
149 return ret;
150 }
151 if (!drain_error) {
152 mlx5hws_err(ctx,
153 "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n",
154 ret);
155 drain_error = ret;
156 }
157 }
158 }
159 }
160
161 /* Return the first error that happened */
162 if (unlikely(move_error))
163 return move_error;
164 if (unlikely(poll_error))
165 return poll_error;
166 if (unlikely(drain_error))
167 return drain_error;
168
169 return ret;
170 }
171
hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher * bwc_matcher)172 static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
173 {
174 if (!bwc_matcher->complex)
175 return hws_bwc_matcher_move_all_simple(bwc_matcher);
176
177 return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
178 }
179
hws_bwc_matcher_move(struct mlx5hws_bwc_matcher * bwc_matcher)180 static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
181 {
182 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
183 struct mlx5hws_matcher_attr matcher_attr = {0};
184 struct mlx5hws_matcher *old_matcher;
185 struct mlx5hws_matcher *new_matcher;
186 int ret;
187
188 hws_bwc_matcher_init_attr(bwc_matcher,
189 bwc_matcher->priority,
190 bwc_matcher->rx_size.size_log,
191 bwc_matcher->tx_size.size_log,
192 &matcher_attr);
193
194 old_matcher = bwc_matcher->matcher;
195 new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
196 &bwc_matcher->mt, 1,
197 bwc_matcher->at,
198 bwc_matcher->num_of_at,
199 &matcher_attr);
200 if (!new_matcher) {
201 mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
202 return -ENOMEM;
203 }
204
205 ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
206 if (ret) {
207 mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
208 return ret;
209 }
210
211 ret = hws_bwc_matcher_move_all(bwc_matcher);
212 if (ret)
213 mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
214
215 /* Error during rehash can't be rolled back.
216 * The best option here is to allow the rehash to complete and remove
217 * the old matcher - can't leave the matcher in the 'in_resize' state.
218 */
219
220 bwc_matcher->matcher = new_matcher;
221 mlx5hws_matcher_destroy(old_matcher);
222
223 return ret;
224 }
225
mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_table * table,u32 priority,u8 match_criteria_enable,struct mlx5hws_match_parameters * mask,enum mlx5hws_action_type action_types[])226 int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
227 struct mlx5hws_table *table,
228 u32 priority,
229 u8 match_criteria_enable,
230 struct mlx5hws_match_parameters *mask,
231 enum mlx5hws_action_type action_types[])
232 {
233 enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
234 struct mlx5hws_context *ctx = table->ctx;
235 u16 bwc_queues = mlx5hws_bwc_queues(ctx);
236 struct mlx5hws_matcher_attr attr = {0};
237 int i;
238
239 bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
240 if (!bwc_matcher->rules)
241 goto err;
242
243 for (i = 0; i < bwc_queues; i++)
244 INIT_LIST_HEAD(&bwc_matcher->rules[i]);
245
246 hws_bwc_matcher_init_attr(bwc_matcher,
247 priority,
248 bwc_matcher->rx_size.size_log,
249 bwc_matcher->tx_size.size_log,
250 &attr);
251
252 bwc_matcher->priority = priority;
253
254 bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
255 bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array,
256 sizeof(*bwc_matcher->at), GFP_KERNEL);
257 if (!bwc_matcher->at)
258 goto free_bwc_matcher_rules;
259
260 /* create dummy action template */
261 bwc_matcher->at[0] =
262 mlx5hws_action_template_create(action_types ?
263 action_types : init_action_types);
264 if (!bwc_matcher->at[0]) {
265 mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
266 goto free_bwc_matcher_at_array;
267 }
268
269 bwc_matcher->num_of_at = 1;
270
271 bwc_matcher->mt = mlx5hws_match_template_create(ctx,
272 mask->match_buf,
273 mask->match_sz,
274 match_criteria_enable);
275 if (!bwc_matcher->mt) {
276 mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
277 goto free_at;
278 }
279
280 bwc_matcher->matcher = mlx5hws_matcher_create(table,
281 &bwc_matcher->mt, 1,
282 &bwc_matcher->at[0],
283 bwc_matcher->num_of_at,
284 &attr);
285 if (!bwc_matcher->matcher) {
286 mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
287 goto free_mt;
288 }
289
290 return 0;
291
292 free_mt:
293 mlx5hws_match_template_destroy(bwc_matcher->mt);
294 free_at:
295 mlx5hws_action_template_destroy(bwc_matcher->at[0]);
296 free_bwc_matcher_at_array:
297 kfree(bwc_matcher->at);
298 free_bwc_matcher_rules:
299 kfree(bwc_matcher->rules);
300 err:
301 return -EINVAL;
302 }
303
304 static void
hws_bwc_matcher_init_size_rxtx(struct mlx5hws_bwc_matcher_size * size)305 hws_bwc_matcher_init_size_rxtx(struct mlx5hws_bwc_matcher_size *size)
306 {
307 size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
308 atomic_set(&size->num_of_rules, 0);
309 atomic_set(&size->rehash_required, false);
310 }
311
hws_bwc_matcher_init_size(struct mlx5hws_bwc_matcher * bwc_matcher)312 static void hws_bwc_matcher_init_size(struct mlx5hws_bwc_matcher *bwc_matcher)
313 {
314 hws_bwc_matcher_init_size_rxtx(&bwc_matcher->rx_size);
315 hws_bwc_matcher_init_size_rxtx(&bwc_matcher->tx_size);
316 }
317
318 struct mlx5hws_bwc_matcher *
mlx5hws_bwc_matcher_create(struct mlx5hws_table * table,u32 priority,u8 match_criteria_enable,struct mlx5hws_match_parameters * mask)319 mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
320 u32 priority,
321 u8 match_criteria_enable,
322 struct mlx5hws_match_parameters *mask)
323 {
324 struct mlx5hws_bwc_matcher *bwc_matcher;
325 bool is_complex;
326 int ret;
327
328 if (!mlx5hws_context_bwc_supported(table->ctx)) {
329 mlx5hws_err(table->ctx,
330 "BWC matcher: context created w/o BWC API compatibility\n");
331 return NULL;
332 }
333
334 bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
335 if (!bwc_matcher)
336 return NULL;
337
338 hws_bwc_matcher_init_size(bwc_matcher);
339
340 /* Check if the required match params can be all matched
341 * in single STE, otherwise complex matcher is needed.
342 */
343
344 is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
345 if (is_complex)
346 ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
347 table,
348 priority,
349 match_criteria_enable,
350 mask);
351 else
352 ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
353 table,
354 priority,
355 match_criteria_enable,
356 mask,
357 NULL);
358 if (ret)
359 goto free_bwc_matcher;
360
361 return bwc_matcher;
362
363 free_bwc_matcher:
364 kfree(bwc_matcher);
365
366 return NULL;
367 }
368
mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher * bwc_matcher)369 int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
370 {
371 int i;
372
373 mlx5hws_matcher_destroy(bwc_matcher->matcher);
374 bwc_matcher->matcher = NULL;
375
376 for (i = 0; i < bwc_matcher->num_of_at; i++)
377 mlx5hws_action_template_destroy(bwc_matcher->at[i]);
378 kfree(bwc_matcher->at);
379
380 mlx5hws_match_template_destroy(bwc_matcher->mt);
381 kfree(bwc_matcher->rules);
382
383 return 0;
384 }
385
mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher * bwc_matcher)386 int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
387 {
388 u32 rx_rules = atomic_read(&bwc_matcher->rx_size.num_of_rules);
389 u32 tx_rules = atomic_read(&bwc_matcher->tx_size.num_of_rules);
390
391 if (rx_rules || tx_rules)
392 mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
393 "BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
394 rx_rules, tx_rules);
395
396 if (bwc_matcher->complex)
397 mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
398 else
399 mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
400
401 kfree(bwc_matcher);
402 return 0;
403 }
404
mlx5hws_bwc_queue_poll(struct mlx5hws_context * ctx,u16 queue_id,u32 * pending_rules,bool drain)405 int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
406 u16 queue_id,
407 u32 *pending_rules,
408 bool drain)
409 {
410 unsigned long timeout = jiffies +
411 secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
412 struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
413 u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
414 bool got_comp = *pending_rules >= burst_th;
415 bool queue_full;
416 int err = 0;
417 int ret;
418 int i;
419
420 /* Check if there are any completions at all */
421 if (!got_comp && !drain)
422 return 0;
423
424 queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
425 while (queue_full || ((got_comp || drain) && *pending_rules)) {
426 ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
427 if (unlikely(ret < 0)) {
428 mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
429 queue_id, ret);
430 return -EINVAL;
431 }
432
433 if (ret) {
434 (*pending_rules) -= ret;
435 for (i = 0; i < ret; i++) {
436 if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
437 mlx5hws_err(ctx,
438 "BWC poll error: polling queue %d returned completion with error\n",
439 queue_id);
440 err = -EINVAL;
441 }
442 }
443 queue_full = false;
444 }
445
446 got_comp = !!ret;
447
448 if (unlikely(!got_comp && time_after(jiffies, timeout))) {
449 mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
450 return -ETIMEDOUT;
451 }
452 }
453
454 return err;
455 }
456
457 void
mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher * bwc_matcher,u16 bwc_queue_idx,u32 flow_source,struct mlx5hws_rule_attr * rule_attr)458 mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
459 u16 bwc_queue_idx,
460 u32 flow_source,
461 struct mlx5hws_rule_attr *rule_attr)
462 {
463 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
464
465 /* no use of INSERT_BY_INDEX in bwc rule */
466 rule_attr->rule_idx = 0;
467
468 /* notify HW at each rule insertion/deletion */
469 rule_attr->burst = 0;
470
471 /* We don't need user data, but the API requires it to exist */
472 rule_attr->user_data = (void *)0xFACADE;
473
474 rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
475 rule_attr->flow_source = flow_source;
476 }
477
478 struct mlx5hws_bwc_rule *
mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher * bwc_matcher)479 mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
480 {
481 struct mlx5hws_bwc_rule *bwc_rule;
482
483 bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
484 if (unlikely(!bwc_rule))
485 goto out_err;
486
487 bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
488 if (unlikely(!bwc_rule->rule))
489 goto free_rule;
490
491 bwc_rule->bwc_matcher = bwc_matcher;
492 return bwc_rule;
493
494 free_rule:
495 kfree(bwc_rule);
496 out_err:
497 return NULL;
498 }
499
mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule * bwc_rule)500 void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
501 {
502 if (likely(bwc_rule->rule))
503 kfree(bwc_rule->rule);
504 kfree(bwc_rule);
505 }
506
hws_bwc_rule_list_add(struct mlx5hws_bwc_rule * bwc_rule,u16 idx)507 static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
508 {
509 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
510
511 bwc_rule->bwc_queue_idx = idx;
512 list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
513 }
514
hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule * bwc_rule)515 static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
516 {
517 list_del_init(&bwc_rule->list_node);
518 }
519
520 static int
hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_attr * attr)521 hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
522 struct mlx5hws_rule_attr *attr)
523 {
524 return mlx5hws_rule_destroy(bwc_rule->rule, attr);
525 }
526
527 static int
hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_attr * rule_attr)528 hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
529 struct mlx5hws_rule_attr *rule_attr)
530 {
531 struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
532 u32 expected_completions = 1;
533 int ret;
534
535 ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
536 if (unlikely(ret))
537 return ret;
538
539 ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
540 &expected_completions, true);
541 if (unlikely(ret))
542 return ret;
543
544 if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
545 bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
546 mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
547 bwc_rule->rule->status);
548 return -EINVAL;
549 }
550
551 return 0;
552 }
553
hws_bwc_rule_cnt_dec(struct mlx5hws_bwc_rule * bwc_rule)554 static void hws_bwc_rule_cnt_dec(struct mlx5hws_bwc_rule *bwc_rule)
555 {
556 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
557
558 if (!bwc_rule->skip_rx)
559 atomic_dec(&bwc_matcher->rx_size.num_of_rules);
560 if (!bwc_rule->skip_tx)
561 atomic_dec(&bwc_matcher->tx_size.num_of_rules);
562 }
563
564 static int
hws_bwc_matcher_rehash_shrink(struct mlx5hws_bwc_matcher * bwc_matcher)565 hws_bwc_matcher_rehash_shrink(struct mlx5hws_bwc_matcher *bwc_matcher)
566 {
567 struct mlx5hws_bwc_matcher_size *rx_size = &bwc_matcher->rx_size;
568 struct mlx5hws_bwc_matcher_size *tx_size = &bwc_matcher->tx_size;
569
570 /* It is possible that another thread has added a rule.
571 * Need to check again if we really need rehash/shrink.
572 */
573 if (atomic_read(&rx_size->num_of_rules) ||
574 atomic_read(&tx_size->num_of_rules))
575 return 0;
576
577 /* If the current matcher RX/TX size is already at its initial size. */
578 if (rx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG &&
579 tx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG)
580 return 0;
581
582 /* Now we've done all the checking - do the shrinking:
583 * - reset match RTC size to the initial size
584 * - create new matcher
585 * - move the rules, which will not do anything as the matcher is empty
586 * - destroy the old matcher
587 */
588
589 rx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
590 tx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
591
592 return hws_bwc_matcher_move(bwc_matcher);
593 }
594
hws_bwc_rule_cnt_dec_with_shrink(struct mlx5hws_bwc_rule * bwc_rule,u16 bwc_queue_idx)595 static int hws_bwc_rule_cnt_dec_with_shrink(struct mlx5hws_bwc_rule *bwc_rule,
596 u16 bwc_queue_idx)
597 {
598 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
599 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
600 struct mutex *queue_lock; /* Protect the queue */
601 int ret;
602
603 hws_bwc_rule_cnt_dec(bwc_rule);
604
605 if (atomic_read(&bwc_matcher->rx_size.num_of_rules) ||
606 atomic_read(&bwc_matcher->tx_size.num_of_rules))
607 return 0;
608
609 /* Matcher has no more rules - shrink it to save ICM. */
610
611 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
612 mutex_unlock(queue_lock);
613
614 hws_bwc_lock_all_queues(ctx);
615 ret = hws_bwc_matcher_rehash_shrink(bwc_matcher);
616 hws_bwc_unlock_all_queues(ctx);
617
618 mutex_lock(queue_lock);
619
620 if (unlikely(ret))
621 mlx5hws_err(ctx,
622 "BWC rule deletion: shrinking empty matcher failed (%d)\n",
623 ret);
624
625 return ret;
626 }
627
mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule * bwc_rule)628 int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
629 {
630 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
631 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
632 u16 idx = bwc_rule->bwc_queue_idx;
633 struct mlx5hws_rule_attr attr;
634 struct mutex *queue_lock; /* Protect the queue */
635 int ret;
636
637 mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
638
639 queue_lock = hws_bwc_get_queue_lock(ctx, idx);
640
641 mutex_lock(queue_lock);
642
643 ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
644 hws_bwc_rule_list_remove(bwc_rule);
645 hws_bwc_rule_cnt_dec_with_shrink(bwc_rule, idx);
646
647 mutex_unlock(queue_lock);
648
649 return ret;
650 }
651
mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule * bwc_rule)652 int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
653 {
654 bool is_complex = !!bwc_rule->bwc_matcher->complex;
655 int ret = 0;
656
657 if (is_complex)
658 ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule);
659 else
660 ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
661
662 mlx5hws_bwc_rule_free(bwc_rule);
663 return ret;
664 }
665
666 static int
hws_bwc_rule_create_async(struct mlx5hws_bwc_rule * bwc_rule,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * rule_attr)667 hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
668 u32 *match_param,
669 u8 at_idx,
670 struct mlx5hws_rule_action rule_actions[],
671 struct mlx5hws_rule_attr *rule_attr)
672 {
673 return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
674 0, /* only one match template supported */
675 match_param,
676 at_idx,
677 rule_actions,
678 rule_attr,
679 bwc_rule->rule);
680 }
681
682 static int
hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule * bwc_rule,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * rule_attr)683 hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
684 u32 *match_param,
685 u8 at_idx,
686 struct mlx5hws_rule_action rule_actions[],
687 struct mlx5hws_rule_attr *rule_attr)
688
689 {
690 struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
691 u32 expected_completions = 1;
692 int ret;
693
694 ret = hws_bwc_rule_create_async(bwc_rule, match_param,
695 at_idx, rule_actions,
696 rule_attr);
697 if (unlikely(ret))
698 return ret;
699
700 return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
701 &expected_completions, true);
702 }
703
704 static int
hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule * bwc_rule,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * rule_attr)705 hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
706 u8 at_idx,
707 struct mlx5hws_rule_action rule_actions[],
708 struct mlx5hws_rule_attr *rule_attr)
709 {
710 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
711 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
712 u32 expected_completions = 1;
713 int ret;
714
715 ret = mlx5hws_rule_action_update(bwc_rule->rule,
716 at_idx,
717 rule_actions,
718 rule_attr);
719 if (unlikely(ret))
720 return ret;
721
722 ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
723 &expected_completions, true);
724 if (unlikely(ret))
725 mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
726
727 return ret;
728 }
729
730 static bool
hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_bwc_matcher_size * size)731 hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher,
732 struct mlx5hws_bwc_matcher_size *size)
733 {
734 struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
735
736 /* check the match RTC size */
737 return (size->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
738 MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
739 (caps->ste_alloc_log_max - 1);
740 }
741
742 static bool
hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_bwc_matcher_size * size,u32 num_of_rules)743 hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
744 struct mlx5hws_bwc_matcher_size *size,
745 u32 num_of_rules)
746 {
747 if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size)))
748 return false;
749
750 if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
751 (1UL << size->size_log)))
752 return true;
753
754 return false;
755 }
756
757 static void
hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],enum mlx5hws_action_type action_types[])758 hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
759 enum mlx5hws_action_type action_types[])
760 {
761 int i = 0;
762
763 for (i = 0;
764 rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
765 i++) {
766 action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
767 }
768
769 action_types[i] = MLX5HWS_ACTION_TYP_LAST;
770 }
771
772 static int
hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_rule_action rule_actions[])773 hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
774 struct mlx5hws_rule_action rule_actions[])
775 {
776 enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
777 void *p;
778
779 if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) {
780 if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
781 return -ENOMEM;
782 bwc_matcher->size_of_at_array *= 2;
783 p = krealloc(bwc_matcher->at,
784 bwc_matcher->size_of_at_array *
785 sizeof(*bwc_matcher->at),
786 __GFP_ZERO | GFP_KERNEL);
787 if (!p) {
788 bwc_matcher->size_of_at_array /= 2;
789 return -ENOMEM;
790 }
791
792 bwc_matcher->at = p;
793 }
794
795 hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
796
797 bwc_matcher->at[bwc_matcher->num_of_at] =
798 mlx5hws_action_template_create(action_types);
799
800 if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
801 return -ENOMEM;
802
803 bwc_matcher->num_of_at++;
804 return 0;
805 }
806
807 static int
hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_bwc_matcher_size * size)808 hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher,
809 struct mlx5hws_bwc_matcher_size *size)
810 {
811 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
812 struct mlx5hws_cmd_query_caps *caps = ctx->caps;
813
814 if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size))) {
815 mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
816 caps->rtc_log_depth_max);
817 return -ENOMEM;
818 }
819
820 size->size_log = min(size->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
821 caps->ste_alloc_log_max -
822 MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
823
824 return 0;
825 }
826
827 static int
hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_rule_action rule_actions[])828 hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
829 struct mlx5hws_rule_action rule_actions[])
830 {
831 enum mlx5hws_action_type *action_type_arr;
832 int i, j;
833
834 /* start from index 1 - first action template is a dummy */
835 for (i = 1; i < bwc_matcher->num_of_at; i++) {
836 j = 0;
837 action_type_arr = bwc_matcher->at[i]->action_type_arr;
838
839 while (rule_actions[j].action &&
840 rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
841 if (action_type_arr[j] != rule_actions[j].action->type)
842 break;
843 j++;
844 }
845
846 if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
847 (!rule_actions[j].action ||
848 rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
849 return i;
850 }
851
852 return -1;
853 }
854
855 static int
hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher * bwc_matcher)856 hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
857 {
858 bool need_rx_rehash, need_tx_rehash;
859 int ret;
860
861 need_rx_rehash = atomic_read(&bwc_matcher->rx_size.rehash_required);
862 need_tx_rehash = atomic_read(&bwc_matcher->tx_size.rehash_required);
863
864 /* It is possible that another rule has already performed rehash.
865 * Need to check again if we really need rehash.
866 */
867 if (!need_rx_rehash && !need_tx_rehash)
868 return 0;
869
870 /* If the current matcher RX/TX size is already at its max size,
871 * it can't be rehashed.
872 */
873 if (need_rx_rehash &&
874 hws_bwc_matcher_size_maxed_out(bwc_matcher,
875 &bwc_matcher->rx_size)) {
876 atomic_set(&bwc_matcher->rx_size.rehash_required, false);
877 need_rx_rehash = false;
878 }
879 if (need_tx_rehash &&
880 hws_bwc_matcher_size_maxed_out(bwc_matcher,
881 &bwc_matcher->tx_size)) {
882 atomic_set(&bwc_matcher->tx_size.rehash_required, false);
883 need_tx_rehash = false;
884 }
885
886 /* If both RX and TX rehash flags are now off, it means that whatever
887 * we wanted to rehash is now at its max size - no rehash can be done.
888 * Return and try adding the rule again - perhaps there was some change.
889 */
890 if (!need_rx_rehash && !need_tx_rehash)
891 return 0;
892
893 /* Now we're done all the checking - do the rehash:
894 * - extend match RTC size
895 * - create new matcher
896 * - move all the rules to the new matcher
897 * - destroy the old matcher
898 */
899 atomic_set(&bwc_matcher->rx_size.rehash_required, false);
900 atomic_set(&bwc_matcher->tx_size.rehash_required, false);
901
902 if (need_rx_rehash) {
903 ret = hws_bwc_matcher_extend_size(bwc_matcher,
904 &bwc_matcher->rx_size);
905 if (ret)
906 return ret;
907 }
908
909 if (need_tx_rehash) {
910 ret = hws_bwc_matcher_extend_size(bwc_matcher,
911 &bwc_matcher->tx_size);
912 if (ret)
913 return ret;
914 }
915
916 return hws_bwc_matcher_move(bwc_matcher);
917 }
918
hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_action rule_actions[],u16 bwc_queue_idx)919 static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule,
920 struct mlx5hws_rule_action rule_actions[],
921 u16 bwc_queue_idx)
922 {
923 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
924 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
925 struct mutex *queue_lock; /* Protect the queue */
926 int at_idx, ret;
927
928 /* check if rehash needed due to missing action template */
929 at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
930 if (likely(at_idx >= 0))
931 return at_idx;
932
933 /* we need to extend BWC matcher action templates array */
934 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
935 mutex_unlock(queue_lock);
936 hws_bwc_lock_all_queues(ctx);
937
938 /* check again - perhaps other thread already did extend_at */
939 at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
940 if (at_idx >= 0)
941 goto out;
942
943 ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
944 if (unlikely(ret)) {
945 mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret);
946 at_idx = -EINVAL;
947 goto out;
948 }
949
950 /* action templates array was extended, we need the last idx */
951 at_idx = bwc_matcher->num_of_at - 1;
952 ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
953 bwc_matcher->at[at_idx]);
954 if (unlikely(ret)) {
955 mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret);
956 at_idx = -EINVAL;
957 goto out;
958 }
959
960 out:
961 hws_bwc_unlock_all_queues(ctx);
962 mutex_lock(queue_lock);
963 return at_idx;
964 }
965
hws_bwc_rule_cnt_inc_rxtx(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_bwc_matcher_size * size)966 static void hws_bwc_rule_cnt_inc_rxtx(struct mlx5hws_bwc_rule *bwc_rule,
967 struct mlx5hws_bwc_matcher_size *size)
968 {
969 u32 num_of_rules = atomic_inc_return(&size->num_of_rules);
970
971 if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_rule->bwc_matcher,
972 size, num_of_rules)))
973 atomic_set(&size->rehash_required, true);
974 }
975
hws_bwc_rule_cnt_inc(struct mlx5hws_bwc_rule * bwc_rule)976 static void hws_bwc_rule_cnt_inc(struct mlx5hws_bwc_rule *bwc_rule)
977 {
978 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
979
980 if (!bwc_rule->skip_rx)
981 hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->rx_size);
982 if (!bwc_rule->skip_tx)
983 hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->tx_size);
984 }
985
hws_bwc_rule_cnt_inc_with_rehash(struct mlx5hws_bwc_rule * bwc_rule,u16 bwc_queue_idx)986 static int hws_bwc_rule_cnt_inc_with_rehash(struct mlx5hws_bwc_rule *bwc_rule,
987 u16 bwc_queue_idx)
988 {
989 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
990 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
991 struct mutex *queue_lock; /* Protect the queue */
992 int ret;
993
994 hws_bwc_rule_cnt_inc(bwc_rule);
995
996 if (!atomic_read(&bwc_matcher->rx_size.rehash_required) &&
997 !atomic_read(&bwc_matcher->tx_size.rehash_required))
998 return 0;
999
1000 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
1001 mutex_unlock(queue_lock);
1002
1003 hws_bwc_lock_all_queues(ctx);
1004 ret = hws_bwc_matcher_rehash_size(bwc_matcher);
1005 hws_bwc_unlock_all_queues(ctx);
1006
1007 mutex_lock(queue_lock);
1008
1009 if (likely(!ret))
1010 return 0;
1011
1012 /* Failed to rehash. Print a diagnostic and rollback the counters. */
1013 mlx5hws_err(ctx,
1014 "BWC rule insertion: rehash to sizes [%d, %d] failed (%d)\n",
1015 bwc_matcher->rx_size.size_log,
1016 bwc_matcher->tx_size.size_log, ret);
1017 hws_bwc_rule_cnt_dec(bwc_rule);
1018
1019 return ret;
1020 }
1021
mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule * bwc_rule,u32 * match_param,struct mlx5hws_rule_action rule_actions[],u32 flow_source,u16 bwc_queue_idx)1022 int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
1023 u32 *match_param,
1024 struct mlx5hws_rule_action rule_actions[],
1025 u32 flow_source,
1026 u16 bwc_queue_idx)
1027 {
1028 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
1029 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1030 struct mlx5hws_rule_attr rule_attr;
1031 struct mutex *queue_lock; /* Protect the queue */
1032 int ret = 0;
1033 int at_idx;
1034
1035 mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
1036
1037 queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
1038
1039 mutex_lock(queue_lock);
1040
1041 at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx);
1042 if (unlikely(at_idx < 0)) {
1043 mutex_unlock(queue_lock);
1044 mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)",
1045 ret);
1046 return -EINVAL;
1047 }
1048
1049 ret = hws_bwc_rule_cnt_inc_with_rehash(bwc_rule, bwc_queue_idx);
1050 if (unlikely(ret)) {
1051 mutex_unlock(queue_lock);
1052 return ret;
1053 }
1054
1055 ret = hws_bwc_rule_create_sync(bwc_rule,
1056 match_param,
1057 at_idx,
1058 rule_actions,
1059 &rule_attr);
1060 if (likely(!ret)) {
1061 hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
1062 mutex_unlock(queue_lock);
1063 return 0; /* rule inserted successfully */
1064 }
1065
1066 /* Rule insertion could fail due to queue being full, timeout, or
1067 * matcher in resize. In such cases, no point in trying to rehash.
1068 */
1069 if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) {
1070 mutex_unlock(queue_lock);
1071 mlx5hws_err(ctx,
1072 "BWC rule insertion failed - %s (%d)\n",
1073 ret == -EBUSY ? "queue is full" :
1074 ret == -ETIMEDOUT ? "timeout" :
1075 ret == -EAGAIN ? "matcher in resize" : "N/A",
1076 ret);
1077 hws_bwc_rule_cnt_dec(bwc_rule);
1078 return ret;
1079 }
1080
1081 /* At this point the rule wasn't added.
1082 * It could be because there was collision, or some other problem.
1083 * Try rehash by size and insert rule again - last chance.
1084 */
1085 if (!bwc_rule->skip_rx)
1086 atomic_set(&bwc_matcher->rx_size.rehash_required, true);
1087 if (!bwc_rule->skip_tx)
1088 atomic_set(&bwc_matcher->tx_size.rehash_required, true);
1089
1090 mutex_unlock(queue_lock);
1091
1092 hws_bwc_lock_all_queues(ctx);
1093 ret = hws_bwc_matcher_rehash_size(bwc_matcher);
1094 hws_bwc_unlock_all_queues(ctx);
1095
1096 if (ret) {
1097 mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
1098 hws_bwc_rule_cnt_dec(bwc_rule);
1099 return ret;
1100 }
1101
1102 /* Rehash done, but we still have that pesky rule to add */
1103 mutex_lock(queue_lock);
1104
1105 ret = hws_bwc_rule_create_sync(bwc_rule,
1106 match_param,
1107 at_idx,
1108 rule_actions,
1109 &rule_attr);
1110
1111 if (unlikely(ret)) {
1112 mutex_unlock(queue_lock);
1113 mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
1114 hws_bwc_rule_cnt_dec(bwc_rule);
1115 return ret;
1116 }
1117
1118 hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
1119 mutex_unlock(queue_lock);
1120
1121 return 0;
1122 }
1123
1124 struct mlx5hws_bwc_rule *
mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher * bwc_matcher,struct mlx5hws_match_parameters * params,u32 flow_source,struct mlx5hws_rule_action rule_actions[])1125 mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
1126 struct mlx5hws_match_parameters *params,
1127 u32 flow_source,
1128 struct mlx5hws_rule_action rule_actions[])
1129 {
1130 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1131 struct mlx5hws_bwc_rule *bwc_rule;
1132 u16 bwc_queue_idx;
1133 int ret;
1134
1135 if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
1136 mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
1137 return NULL;
1138 }
1139
1140 bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
1141 if (unlikely(!bwc_rule))
1142 return NULL;
1143
1144 bwc_rule->flow_source = flow_source;
1145 mlx5hws_rule_skip(bwc_matcher->matcher, flow_source,
1146 &bwc_rule->skip_rx, &bwc_rule->skip_tx);
1147
1148 bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
1149
1150 if (bwc_matcher->complex)
1151 ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
1152 params,
1153 flow_source,
1154 rule_actions,
1155 bwc_queue_idx);
1156 else
1157 ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
1158 params->match_buf,
1159 rule_actions,
1160 flow_source,
1161 bwc_queue_idx);
1162 if (unlikely(ret)) {
1163 mlx5hws_bwc_rule_free(bwc_rule);
1164 return NULL;
1165 }
1166
1167 return bwc_rule;
1168 }
1169
1170 static int
hws_bwc_rule_action_update(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_action rule_actions[])1171 hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
1172 struct mlx5hws_rule_action rule_actions[])
1173 {
1174 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
1175 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1176 struct mlx5hws_rule_attr rule_attr;
1177 struct mutex *queue_lock; /* Protect the queue */
1178 int at_idx, ret;
1179 u16 idx;
1180
1181 idx = bwc_rule->bwc_queue_idx;
1182
1183 mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, bwc_rule->flow_source,
1184 &rule_attr);
1185 queue_lock = hws_bwc_get_queue_lock(ctx, idx);
1186
1187 mutex_lock(queue_lock);
1188
1189 at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx);
1190 if (unlikely(at_idx < 0)) {
1191 mutex_unlock(queue_lock);
1192 mlx5hws_err(ctx, "BWC rule update: failed getting AT\n");
1193 return -EINVAL;
1194 }
1195
1196 ret = hws_bwc_rule_update_sync(bwc_rule,
1197 at_idx,
1198 rule_actions,
1199 &rule_attr);
1200 mutex_unlock(queue_lock);
1201
1202 if (unlikely(ret))
1203 mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
1204
1205 return ret;
1206 }
1207
mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule * bwc_rule,struct mlx5hws_rule_action rule_actions[])1208 int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
1209 struct mlx5hws_rule_action rule_actions[])
1210 {
1211 struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
1212 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
1213
1214 if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
1215 mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
1216 return -EINVAL;
1217 }
1218
1219 /* For complex rule, the update should happen on the second matcher */
1220 if (bwc_rule->isolated_bwc_rule)
1221 return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule,
1222 rule_actions);
1223 else
1224 return hws_bwc_rule_action_update(bwc_rule, rule_actions);
1225 }
1226