1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5
mlx5hws_rule_skip(struct mlx5hws_matcher * matcher,u32 flow_source,bool * skip_rx,bool * skip_tx)6 void mlx5hws_rule_skip(struct mlx5hws_matcher *matcher, u32 flow_source,
7 bool *skip_rx, bool *skip_tx)
8 {
9 /* By default FDB rules are added to both RX and TX */
10 *skip_rx = false;
11 *skip_tx = false;
12
13 if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
14 *skip_rx = true;
15 return;
16 }
17
18 if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
19 *skip_tx = true;
20 return;
21 }
22
23 /* If no flow source was set for current rule,
24 * check for flow source in matcher attributes.
25 */
26 *skip_tx = matcher->attr.optimize_flow_src ==
27 MLX5HWS_MATCHER_FLOW_SRC_WIRE;
28 *skip_rx = matcher->attr.optimize_flow_src ==
29 MLX5HWS_MATCHER_FLOW_SRC_VPORT;
30 }
31
32 static void
hws_rule_update_copy_tag(struct mlx5hws_rule * rule,struct mlx5hws_wqe_gta_data_seg_ste * wqe_data,bool is_jumbo)33 hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
34 struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
35 bool is_jumbo)
36 {
37 struct mlx5hws_rule_match_tag *tag;
38
39 if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
40 tag = &rule->tag;
41 } else {
42 struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
43 (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
44 tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
45 }
46
47 if (is_jumbo)
48 memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
49 else
50 memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
51 }
52
hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe * dep_wqe,struct mlx5hws_rule * rule,struct mlx5hws_match_template * mt,struct mlx5hws_rule_attr * attr)53 static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
54 struct mlx5hws_rule *rule,
55 struct mlx5hws_match_template *mt,
56 struct mlx5hws_rule_attr *attr)
57 {
58 struct mlx5hws_matcher *matcher = rule->matcher;
59 struct mlx5hws_table *tbl = matcher->tbl;
60 bool skip_rx, skip_tx;
61
62 dep_wqe->rule = rule;
63 dep_wqe->user_data = attr->user_data;
64 dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
65 attr->rule_idx : 0;
66
67 if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
68 mlx5hws_rule_skip(matcher, attr->flow_source,
69 &skip_rx, &skip_tx);
70
71 if (!skip_rx) {
72 dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
73 dep_wqe->retry_rtc_0 = matcher->col_matcher ?
74 matcher->col_matcher->match_ste.rtc_0_id : 0;
75 } else {
76 dep_wqe->rtc_0 = 0;
77 dep_wqe->retry_rtc_0 = 0;
78 }
79
80 if (!skip_tx) {
81 dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
82 dep_wqe->retry_rtc_1 = matcher->col_matcher ?
83 matcher->col_matcher->match_ste.rtc_1_id : 0;
84 } else {
85 dep_wqe->rtc_1 = 0;
86 dep_wqe->retry_rtc_1 = 0;
87 }
88 } else {
89 pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
90 }
91 }
92
hws_rule_move_get_rtc(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)93 static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
94 struct mlx5hws_send_ste_attr *ste_attr)
95 {
96 struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
97
98 if (rule->resize_info->rtc_0) {
99 ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
100 ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
101 dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
102 }
103 if (rule->resize_info->rtc_1) {
104 ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
105 ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
106 dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
107 }
108 }
109
hws_rule_gen_comp(struct mlx5hws_send_engine * queue,struct mlx5hws_rule * rule,bool err,void * user_data,enum mlx5hws_rule_status rule_status_on_succ)110 static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
111 struct mlx5hws_rule *rule,
112 bool err,
113 void *user_data,
114 enum mlx5hws_rule_status rule_status_on_succ)
115 {
116 enum mlx5hws_flow_op_status comp_status;
117
118 if (!err) {
119 comp_status = MLX5HWS_FLOW_OP_SUCCESS;
120 rule->status = rule_status_on_succ;
121 } else {
122 comp_status = MLX5HWS_FLOW_OP_ERROR;
123 rule->status = MLX5HWS_RULE_STATUS_FAILED;
124 }
125
126 mlx5hws_send_engine_inc_rule(queue);
127 mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
128 }
129
130 static void
hws_rule_save_resize_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)131 hws_rule_save_resize_info(struct mlx5hws_rule *rule,
132 struct mlx5hws_send_ste_attr *ste_attr)
133 {
134 if (!mlx5hws_matcher_is_resizable(rule->matcher))
135 return;
136
137 /* resize_info might already exist (if we're in update flow) */
138 if (likely(!rule->resize_info)) {
139 rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
140 if (unlikely(!rule->resize_info)) {
141 pr_warn("HWS: resize info isn't allocated for rule\n");
142 return;
143 }
144 }
145
146 memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
147 sizeof(rule->resize_info->ctrl_seg));
148 memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
149 sizeof(rule->resize_info->data_seg));
150 }
151
mlx5hws_rule_clear_resize_info(struct mlx5hws_rule * rule)152 void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
153 {
154 if (mlx5hws_matcher_is_resizable(rule->matcher) &&
155 rule->resize_info) {
156 kfree(rule->resize_info);
157 rule->resize_info = NULL;
158 }
159 }
160
161 static void
hws_rule_save_delete_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)162 hws_rule_save_delete_info(struct mlx5hws_rule *rule,
163 struct mlx5hws_send_ste_attr *ste_attr)
164 {
165 struct mlx5hws_match_template *mt = rule->matcher->mt;
166 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
167
168 if (mlx5hws_matcher_is_resizable(rule->matcher))
169 return;
170
171 if (is_jumbo)
172 memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
173 else
174 memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
175 }
176
177 static void
hws_rule_clear_delete_info(struct mlx5hws_rule * rule)178 hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
179 {
180 /* nothing to do here */
181 }
182
183 static void
hws_rule_load_delete_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)184 hws_rule_load_delete_info(struct mlx5hws_rule *rule,
185 struct mlx5hws_send_ste_attr *ste_attr)
186 {
187 if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
188 ste_attr->wqe_tag = &rule->tag;
189 } else {
190 struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
191 (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
192 struct mlx5hws_rule_match_tag *tag =
193 (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
194 ste_attr->wqe_tag = tag;
195 }
196 }
197
mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule * rule,u16 queue_id,bool skip_rx,bool skip_tx)198 static int mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
199 u16 queue_id, bool skip_rx,
200 bool skip_tx)
201 {
202 struct mlx5hws_matcher *matcher = rule->matcher;
203 struct mlx5hws_context *ctx = matcher->tbl->ctx;
204
205 rule->action_ste.ste.order =
206 ilog2(roundup_pow_of_two(matcher->num_of_action_stes));
207 return mlx5hws_action_ste_chunk_alloc(&ctx->action_ste_pool[queue_id],
208 skip_rx, skip_tx,
209 &rule->action_ste);
210 }
211
mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk * action_ste)212 void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste)
213 {
214 if (!action_ste->action_tbl)
215 return;
216
217 /* This release is safe only when the rule match STE was deleted
218 * (when the rule is being deleted) or replaced with the new STE that
219 * isn't pointing to old action STEs (when the rule is being updated).
220 */
221 mlx5hws_action_ste_chunk_free(action_ste);
222 }
223
hws_rule_create_init(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr,struct mlx5hws_actions_apply_data * apply,bool is_update)224 static void hws_rule_create_init(struct mlx5hws_rule *rule,
225 struct mlx5hws_send_ste_attr *ste_attr,
226 struct mlx5hws_actions_apply_data *apply,
227 bool is_update)
228 {
229 struct mlx5hws_matcher *matcher = rule->matcher;
230 struct mlx5hws_table *tbl = matcher->tbl;
231 struct mlx5hws_context *ctx = tbl->ctx;
232
233 /* Init rule before reuse */
234 if (!is_update) {
235 /* In update we use these rtc's */
236 rule->rtc_0 = 0;
237 rule->rtc_1 = 0;
238
239 rule->status = MLX5HWS_RULE_STATUS_CREATING;
240 } else {
241 rule->status = MLX5HWS_RULE_STATUS_UPDATING;
242 /* Save the old action STE info so we can free it after writing
243 * new action STEs and a corresponding match STE.
244 */
245 rule->old_action_ste = rule->action_ste;
246 }
247
248 rule->pending_wqes = 0;
249
250 /* Init default send STE attributes */
251 ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
252 ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
253 ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
254 ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
255
256 /* Init default action apply */
257 apply->tbl_type = tbl->type;
258 apply->common_res = &ctx->common_res;
259 apply->require_dep = 0;
260 }
261
hws_rule_move_init(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)262 static void hws_rule_move_init(struct mlx5hws_rule *rule,
263 struct mlx5hws_rule_attr *attr)
264 {
265 /* Save the old RTC IDs to be later used in match STE delete */
266 rule->resize_info->rtc_0 = rule->rtc_0;
267 rule->resize_info->rtc_1 = rule->rtc_1;
268 rule->resize_info->rule_idx = attr->rule_idx;
269
270 rule->rtc_0 = 0;
271 rule->rtc_1 = 0;
272
273 rule->pending_wqes = 0;
274 rule->status = MLX5HWS_RULE_STATUS_CREATING;
275 rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
276 }
277
mlx5hws_rule_move_in_progress(struct mlx5hws_rule * rule)278 bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
279 {
280 return mlx5hws_matcher_is_in_resize(rule->matcher) &&
281 rule->resize_info &&
282 rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
283 }
284
hws_rule_create_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr,u8 mt_idx,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[])285 static int hws_rule_create_hws(struct mlx5hws_rule *rule,
286 struct mlx5hws_rule_attr *attr,
287 u8 mt_idx,
288 u32 *match_param,
289 u8 at_idx,
290 struct mlx5hws_rule_action rule_actions[])
291 {
292 struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
293 struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
294 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
295 struct mlx5hws_matcher *matcher = rule->matcher;
296 struct mlx5hws_context *ctx = matcher->tbl->ctx;
297 struct mlx5hws_send_ste_attr ste_attr = {0};
298 struct mlx5hws_send_ring_dep_wqe *dep_wqe;
299 struct mlx5hws_actions_wqe_setter *setter;
300 struct mlx5hws_actions_apply_data apply;
301 struct mlx5hws_send_engine *queue;
302 u8 total_stes, action_stes;
303 bool is_update;
304 int i, ret;
305
306 is_update = !match_param;
307
308 setter = &at->setters[at->num_of_action_stes];
309 total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
310 action_stes = total_stes - 1;
311
312 queue = &ctx->send_queue[attr->queue_id];
313 if (unlikely(mlx5hws_send_engine_err(queue)))
314 return -EIO;
315
316 hws_rule_create_init(rule, &ste_attr, &apply, is_update);
317
318 /* Allocate dependent match WQE since rule might have dependent writes.
319 * The queued dependent WQE can be later aborted or kept as a dependency.
320 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
321 */
322 dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
323 hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
324
325 ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
326 ste_attr.wqe_data = &dep_wqe->wqe_data;
327 apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
328 apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
329 apply.rule_action = rule_actions;
330 apply.queue = queue;
331
332 if (action_stes) {
333 /* Allocate action STEs for rules that need more than match STE */
334 ret = mlx5hws_rule_alloc_action_ste(rule, attr->queue_id,
335 !!ste_attr.rtc_0,
336 !!ste_attr.rtc_1);
337 if (ret) {
338 mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
339 mlx5hws_send_abort_new_dep_wqe(queue);
340 return ret;
341 }
342 apply.jump_to_action_stc =
343 rule->action_ste.action_tbl->stc.offset;
344 /* Skip RX/TX based on the dep_wqe init */
345 ste_attr.rtc_0 = dep_wqe->rtc_0 ?
346 rule->action_ste.action_tbl->rtc_0_id : 0;
347 ste_attr.rtc_1 = dep_wqe->rtc_1 ?
348 rule->action_ste.action_tbl->rtc_1_id : 0;
349 /* Action STEs are written to a specific index last to first */
350 ste_attr.direct_index =
351 rule->action_ste.ste.offset + action_stes;
352 apply.next_direct_idx = ste_attr.direct_index;
353 } else {
354 apply.next_direct_idx = 0;
355 }
356
357 for (i = total_stes; i-- > 0;) {
358 mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
359
360 if (i == 0) {
361 /* Handle last match STE.
362 * For hash split / linear lookup RTCs, packets reaching any STE
363 * will always match and perform the specified actions, which
364 * makes the tag irrelevant.
365 */
366 if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
367 mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
368 (u8 *)dep_wqe->wqe_data.action);
369 else if (is_update)
370 hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
371
372 /* Rule has dependent WQEs, match dep_wqe is queued */
373 if (action_stes || apply.require_dep)
374 break;
375
376 /* Rule has no dependencies, abort dep_wqe and send WQE now */
377 mlx5hws_send_abort_new_dep_wqe(queue);
378 ste_attr.wqe_tag_is_jumbo = is_jumbo;
379 ste_attr.send_attr.notify_hw = !attr->burst;
380 ste_attr.send_attr.user_data = dep_wqe->user_data;
381 ste_attr.send_attr.rule = dep_wqe->rule;
382 ste_attr.rtc_0 = dep_wqe->rtc_0;
383 ste_attr.rtc_1 = dep_wqe->rtc_1;
384 ste_attr.used_id_rtc_0 = &rule->rtc_0;
385 ste_attr.used_id_rtc_1 = &rule->rtc_1;
386 ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
387 ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
388 ste_attr.direct_index = dep_wqe->direct_index;
389 } else {
390 apply.next_direct_idx = --ste_attr.direct_index;
391 }
392
393 mlx5hws_send_ste(queue, &ste_attr);
394 }
395
396 /* Backup TAG on the rule for deletion and resize info for
397 * moving rules to a new matcher, only after insertion.
398 */
399 if (!is_update)
400 hws_rule_save_delete_info(rule, &ste_attr);
401
402 hws_rule_save_resize_info(rule, &ste_attr);
403 mlx5hws_send_engine_inc_rule(queue);
404
405 if (!attr->burst)
406 mlx5hws_send_all_dep_wqe(queue);
407
408 return 0;
409 }
410
hws_rule_destroy_failed_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)411 static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
412 struct mlx5hws_rule_attr *attr)
413 {
414 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
415 struct mlx5hws_send_engine *queue;
416
417 queue = &ctx->send_queue[attr->queue_id];
418
419 hws_rule_gen_comp(queue, rule, false,
420 attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
421
422 /* Rule failed now we can safely release action STEs */
423 mlx5hws_rule_free_action_ste(&rule->action_ste);
424
425 /* Perhaps the rule failed updating - release old action STEs as well */
426 mlx5hws_rule_free_action_ste(&rule->old_action_ste);
427
428 /* Clear complex tag */
429 hws_rule_clear_delete_info(rule);
430
431 /* Clear info that was saved for resizing */
432 mlx5hws_rule_clear_resize_info(rule);
433
434 /* If a rule that was indicated as burst (need to trigger HW) has failed
435 * insertion we won't ring the HW as nothing is being written to the WQ.
436 * In such case update the last WQE and ring the HW with that work
437 */
438 if (attr->burst)
439 return;
440
441 mlx5hws_send_all_dep_wqe(queue);
442 mlx5hws_send_engine_flush_queue(queue);
443 }
444
hws_rule_destroy_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)445 static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
446 struct mlx5hws_rule_attr *attr)
447 {
448 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
449 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
450 struct mlx5hws_matcher *matcher = rule->matcher;
451 struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
452 struct mlx5hws_send_ste_attr ste_attr = {0};
453 struct mlx5hws_send_engine *queue;
454
455 queue = &ctx->send_queue[attr->queue_id];
456
457 if (unlikely(mlx5hws_send_engine_err(queue))) {
458 hws_rule_destroy_failed_hws(rule, attr);
459 return 0;
460 }
461
462 /* Rule is not completed yet */
463 if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
464 rule->status == MLX5HWS_RULE_STATUS_UPDATING)
465 return -EBUSY;
466
467 /* Rule failed and doesn't require cleanup */
468 if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
469 hws_rule_destroy_failed_hws(rule, attr);
470 return 0;
471 }
472
473 if (rule->skip_delete) {
474 /* Rule shouldn't be deleted in HW.
475 * Generate completion as if write succeeded, and we can
476 * safely release action STEs and clear resize info.
477 */
478 hws_rule_gen_comp(queue, rule, false,
479 attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
480
481 mlx5hws_rule_free_action_ste(&rule->action_ste);
482 mlx5hws_rule_clear_resize_info(rule);
483 return 0;
484 }
485
486 mlx5hws_send_engine_inc_rule(queue);
487
488 /* Send dependent WQE */
489 if (!attr->burst)
490 mlx5hws_send_all_dep_wqe(queue);
491
492 rule->status = MLX5HWS_RULE_STATUS_DELETING;
493
494 ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
495 ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
496 ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
497
498 ste_attr.send_attr.rule = rule;
499 ste_attr.send_attr.notify_hw = !attr->burst;
500 ste_attr.send_attr.user_data = attr->user_data;
501
502 ste_attr.rtc_0 = rule->rtc_0;
503 ste_attr.rtc_1 = rule->rtc_1;
504 ste_attr.used_id_rtc_0 = &rule->rtc_0;
505 ste_attr.used_id_rtc_1 = &rule->rtc_1;
506 ste_attr.wqe_ctrl = &wqe_ctrl;
507 ste_attr.wqe_tag_is_jumbo = is_jumbo;
508 ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
509 if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
510 ste_attr.direct_index = attr->rule_idx;
511
512 hws_rule_load_delete_info(rule, &ste_attr);
513 mlx5hws_send_ste(queue, &ste_attr);
514 hws_rule_clear_delete_info(rule);
515
516 return 0;
517 }
518
hws_rule_enqueue_precheck(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)519 static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
520 struct mlx5hws_rule_attr *attr)
521 {
522 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
523
524 if (unlikely(!attr->user_data))
525 return -EINVAL;
526
527 /* Check if there is room in queue */
528 if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
529 return -EBUSY;
530
531 return 0;
532 }
533
hws_rule_enqueue_precheck_move(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)534 static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
535 struct mlx5hws_rule_attr *attr)
536 {
537 if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
538 return -EINVAL;
539
540 return hws_rule_enqueue_precheck(rule, attr);
541 }
542
hws_rule_enqueue_precheck_create(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)543 static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
544 struct mlx5hws_rule_attr *attr)
545 {
546 if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
547 /* Matcher in resize - new rules are not allowed */
548 return -EAGAIN;
549
550 return hws_rule_enqueue_precheck(rule, attr);
551 }
552
hws_rule_enqueue_precheck_update(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)553 static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
554 struct mlx5hws_rule_attr *attr)
555 {
556 struct mlx5hws_matcher *matcher = rule->matcher;
557
558 if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
559 !matcher->attr.optimize_using_rule_idx &&
560 !mlx5hws_matcher_is_insert_by_idx(matcher))) {
561 return -EOPNOTSUPP;
562 }
563
564 if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
565 return -EBUSY;
566
567 return hws_rule_enqueue_precheck_create(rule, attr);
568 }
569
mlx5hws_rule_move_hws_remove(struct mlx5hws_rule * rule,void * queue_ptr,void * user_data)570 int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
571 void *queue_ptr,
572 void *user_data)
573 {
574 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
575 struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
576 struct mlx5hws_matcher *matcher = rule->matcher;
577 struct mlx5hws_send_engine *queue = queue_ptr;
578 struct mlx5hws_send_ste_attr ste_attr = {0};
579
580 mlx5hws_send_all_dep_wqe(queue);
581
582 rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
583
584 ste_attr.send_attr.fence = 0;
585 ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
586 ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
587 ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
588 ste_attr.send_attr.rule = rule;
589 ste_attr.send_attr.notify_hw = 1;
590 ste_attr.send_attr.user_data = user_data;
591 ste_attr.rtc_0 = rule->resize_info->rtc_0;
592 ste_attr.rtc_1 = rule->resize_info->rtc_1;
593 ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
594 ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
595 ste_attr.wqe_ctrl = &empty_wqe_ctrl;
596 ste_attr.wqe_tag_is_jumbo = is_jumbo;
597 ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
598
599 if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
600 ste_attr.direct_index = rule->resize_info->rule_idx;
601
602 hws_rule_load_delete_info(rule, &ste_attr);
603 mlx5hws_send_ste(queue, &ste_attr);
604
605 return 0;
606 }
607
mlx5hws_rule_move_hws_add(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)608 int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
609 struct mlx5hws_rule_attr *attr)
610 {
611 bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
612 struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
613 struct mlx5hws_matcher *matcher = rule->matcher;
614 struct mlx5hws_send_ste_attr ste_attr = {0};
615 struct mlx5hws_send_engine *queue;
616 int ret;
617
618 ret = hws_rule_enqueue_precheck_move(rule, attr);
619 if (unlikely(ret))
620 return ret;
621
622 queue = &ctx->send_queue[attr->queue_id];
623
624 ret = mlx5hws_send_engine_err(queue);
625 if (ret)
626 return ret;
627
628 hws_rule_move_init(rule, attr);
629 hws_rule_move_get_rtc(rule, &ste_attr);
630
631 ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
632 ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
633 ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
634 ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
635 ste_attr.wqe_tag_is_jumbo = is_jumbo;
636
637 ste_attr.send_attr.rule = rule;
638 ste_attr.send_attr.fence = 0;
639 ste_attr.send_attr.notify_hw = !attr->burst;
640 ste_attr.send_attr.user_data = attr->user_data;
641
642 ste_attr.used_id_rtc_0 = &rule->rtc_0;
643 ste_attr.used_id_rtc_1 = &rule->rtc_1;
644 ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
645 ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
646 ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
647 attr->rule_idx : 0;
648
649 mlx5hws_send_ste(queue, &ste_attr);
650 mlx5hws_send_engine_inc_rule(queue);
651
652 if (!attr->burst)
653 mlx5hws_send_all_dep_wqe(queue);
654
655 return 0;
656 }
657
hws_rule_ethertype_to_matcher_ipv(u32 ethertype)658 static u8 hws_rule_ethertype_to_matcher_ipv(u32 ethertype)
659 {
660 switch (ethertype) {
661 case ETH_P_IP:
662 return MLX5HWS_MATCHER_IPV_4;
663 case ETH_P_IPV6:
664 return MLX5HWS_MATCHER_IPV_6;
665 default:
666 return MLX5HWS_MATCHER_IPV_UNSET;
667 }
668 }
669
hws_rule_ip_version_to_matcher_ipv(u32 ip_version)670 static u8 hws_rule_ip_version_to_matcher_ipv(u32 ip_version)
671 {
672 switch (ip_version) {
673 case 4:
674 return MLX5HWS_MATCHER_IPV_4;
675 case 6:
676 return MLX5HWS_MATCHER_IPV_6;
677 default:
678 return MLX5HWS_MATCHER_IPV_UNSET;
679 }
680 }
681
hws_rule_check_outer_ip_version(struct mlx5hws_matcher * matcher,u32 * match_param)682 static int hws_rule_check_outer_ip_version(struct mlx5hws_matcher *matcher,
683 u32 *match_param)
684 {
685 struct mlx5hws_context *ctx = matcher->tbl->ctx;
686 u8 outer_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
687 u8 outer_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
688 u8 outer_ipv, ver;
689
690 if (matcher->matches_outer_ethertype) {
691 ver = MLX5_GET(fte_match_param, match_param,
692 outer_headers.ethertype);
693 outer_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
694 }
695 if (matcher->matches_outer_ip_version) {
696 ver = MLX5_GET(fte_match_param, match_param,
697 outer_headers.ip_version);
698 outer_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
699 }
700
701 if (outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
702 outer_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
703 outer_ipv_ether != outer_ipv_ip) {
704 mlx5hws_err(ctx, "Rule matches on inconsistent outer ethertype and ip version\n");
705 return -EINVAL;
706 }
707
708 outer_ipv = outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
709 outer_ipv_ether : outer_ipv_ip;
710 if (outer_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
711 matcher->outer_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
712 outer_ipv != matcher->outer_ip_version) {
713 mlx5hws_err(ctx, "Matcher and rule disagree on outer IP version\n");
714 return -EINVAL;
715 }
716 matcher->outer_ip_version = outer_ipv;
717
718 return 0;
719 }
720
hws_rule_check_inner_ip_version(struct mlx5hws_matcher * matcher,u32 * match_param)721 static int hws_rule_check_inner_ip_version(struct mlx5hws_matcher *matcher,
722 u32 *match_param)
723 {
724 struct mlx5hws_context *ctx = matcher->tbl->ctx;
725 u8 inner_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
726 u8 inner_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
727 u8 inner_ipv, ver;
728
729 if (matcher->matches_inner_ethertype) {
730 ver = MLX5_GET(fte_match_param, match_param,
731 inner_headers.ethertype);
732 inner_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
733 }
734 if (matcher->matches_inner_ip_version) {
735 ver = MLX5_GET(fte_match_param, match_param,
736 inner_headers.ip_version);
737 inner_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
738 }
739
740 if (inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
741 inner_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
742 inner_ipv_ether != inner_ipv_ip) {
743 mlx5hws_err(ctx, "Rule matches on inconsistent inner ethertype and ip version\n");
744 return -EINVAL;
745 }
746
747 inner_ipv = inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
748 inner_ipv_ether : inner_ipv_ip;
749 if (inner_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
750 matcher->inner_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
751 inner_ipv != matcher->inner_ip_version) {
752 mlx5hws_err(ctx, "Matcher and rule disagree on inner IP version\n");
753 return -EINVAL;
754 }
755 matcher->inner_ip_version = inner_ipv;
756
757 return 0;
758 }
759
hws_rule_check_ip_version(struct mlx5hws_matcher * matcher,u32 * match_param)760 static int hws_rule_check_ip_version(struct mlx5hws_matcher *matcher,
761 u32 *match_param)
762 {
763 int ret;
764
765 ret = hws_rule_check_outer_ip_version(matcher, match_param);
766 if (unlikely(ret))
767 return ret;
768
769 ret = hws_rule_check_inner_ip_version(matcher, match_param);
770 if (unlikely(ret))
771 return ret;
772
773 return 0;
774 }
775
mlx5hws_rule_create(struct mlx5hws_matcher * matcher,u8 mt_idx,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * attr,struct mlx5hws_rule * rule_handle)776 int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
777 u8 mt_idx,
778 u32 *match_param,
779 u8 at_idx,
780 struct mlx5hws_rule_action rule_actions[],
781 struct mlx5hws_rule_attr *attr,
782 struct mlx5hws_rule *rule_handle)
783 {
784 int ret;
785
786 ret = hws_rule_check_ip_version(matcher, match_param);
787 if (unlikely(ret))
788 return ret;
789
790 rule_handle->matcher = matcher;
791
792 ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
793 if (unlikely(ret))
794 return ret;
795
796 if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
797 !(matcher->num_of_at >= at_idx) ||
798 !match_param)) {
799 pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
800 return -EINVAL;
801 }
802
803 ret = hws_rule_create_hws(rule_handle,
804 attr,
805 mt_idx,
806 match_param,
807 at_idx,
808 rule_actions);
809
810 return ret;
811 }
812
mlx5hws_rule_destroy(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)813 int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
814 struct mlx5hws_rule_attr *attr)
815 {
816 int ret;
817
818 ret = hws_rule_enqueue_precheck(rule, attr);
819 if (unlikely(ret))
820 return ret;
821
822 ret = hws_rule_destroy_hws(rule, attr);
823
824 return ret;
825 }
826
mlx5hws_rule_action_update(struct mlx5hws_rule * rule,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * attr)827 int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
828 u8 at_idx,
829 struct mlx5hws_rule_action rule_actions[],
830 struct mlx5hws_rule_attr *attr)
831 {
832 int ret;
833
834 ret = hws_rule_enqueue_precheck_update(rule, attr);
835 if (unlikely(ret))
836 return ret;
837
838 ret = hws_rule_create_hws(rule,
839 attr,
840 0,
841 NULL,
842 at_idx,
843 rule_actions);
844
845 return ret;
846 }
847