xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #include "mlx5hws_internal.h"
5 
hws_rule_skip(struct mlx5hws_matcher * matcher,struct mlx5hws_match_template * mt,u32 flow_source,bool * skip_rx,bool * skip_tx)6 static void hws_rule_skip(struct mlx5hws_matcher *matcher,
7 			  struct mlx5hws_match_template *mt,
8 			  u32 flow_source,
9 			  bool *skip_rx, bool *skip_tx)
10 {
11 	/* By default FDB rules are added to both RX and TX */
12 	*skip_rx = false;
13 	*skip_tx = false;
14 
15 	if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
16 		*skip_rx = true;
17 	} else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
18 		*skip_tx = true;
19 	} else {
20 		/* If no flow source was set for current rule,
21 		 * check for flow source in matcher attributes.
22 		 */
23 		if (matcher->attr.optimize_flow_src) {
24 			*skip_tx =
25 				matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
26 			*skip_rx =
27 				matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
28 			return;
29 		}
30 	}
31 }
32 
33 static void
hws_rule_update_copy_tag(struct mlx5hws_rule * rule,struct mlx5hws_wqe_gta_data_seg_ste * wqe_data,bool is_jumbo)34 hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
35 			 struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
36 			 bool is_jumbo)
37 {
38 	struct mlx5hws_rule_match_tag *tag;
39 
40 	if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
41 		tag = &rule->tag;
42 	} else {
43 		struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
44 			(struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
45 		tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
46 	}
47 
48 	if (is_jumbo)
49 		memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
50 	else
51 		memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
52 }
53 
hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe * dep_wqe,struct mlx5hws_rule * rule,struct mlx5hws_match_template * mt,struct mlx5hws_rule_attr * attr)54 static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
55 				  struct mlx5hws_rule *rule,
56 				  struct mlx5hws_match_template *mt,
57 				  struct mlx5hws_rule_attr *attr)
58 {
59 	struct mlx5hws_matcher *matcher = rule->matcher;
60 	struct mlx5hws_table *tbl = matcher->tbl;
61 	bool skip_rx, skip_tx;
62 
63 	dep_wqe->rule = rule;
64 	dep_wqe->user_data = attr->user_data;
65 	dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
66 				attr->rule_idx : 0;
67 
68 	if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
69 		hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
70 
71 		if (!skip_rx) {
72 			dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
73 			dep_wqe->retry_rtc_0 = matcher->col_matcher ?
74 					       matcher->col_matcher->match_ste.rtc_0_id : 0;
75 		} else {
76 			dep_wqe->rtc_0 = 0;
77 			dep_wqe->retry_rtc_0 = 0;
78 		}
79 
80 		if (!skip_tx) {
81 			dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
82 			dep_wqe->retry_rtc_1 = matcher->col_matcher ?
83 					       matcher->col_matcher->match_ste.rtc_1_id : 0;
84 		} else {
85 			dep_wqe->rtc_1 = 0;
86 			dep_wqe->retry_rtc_1 = 0;
87 		}
88 	} else {
89 		pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
90 	}
91 }
92 
hws_rule_move_get_rtc(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)93 static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
94 				  struct mlx5hws_send_ste_attr *ste_attr)
95 {
96 	struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
97 
98 	if (rule->resize_info->rtc_0) {
99 		ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
100 		ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
101 					dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
102 	}
103 	if (rule->resize_info->rtc_1) {
104 		ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
105 		ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
106 					dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
107 	}
108 }
109 
hws_rule_gen_comp(struct mlx5hws_send_engine * queue,struct mlx5hws_rule * rule,bool err,void * user_data,enum mlx5hws_rule_status rule_status_on_succ)110 static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
111 			      struct mlx5hws_rule *rule,
112 			      bool err,
113 			      void *user_data,
114 			      enum mlx5hws_rule_status rule_status_on_succ)
115 {
116 	enum mlx5hws_flow_op_status comp_status;
117 
118 	if (!err) {
119 		comp_status = MLX5HWS_FLOW_OP_SUCCESS;
120 		rule->status = rule_status_on_succ;
121 	} else {
122 		comp_status = MLX5HWS_FLOW_OP_ERROR;
123 		rule->status = MLX5HWS_RULE_STATUS_FAILED;
124 	}
125 
126 	mlx5hws_send_engine_inc_rule(queue);
127 	mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
128 }
129 
130 static void
hws_rule_save_resize_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr,bool is_update)131 hws_rule_save_resize_info(struct mlx5hws_rule *rule,
132 			  struct mlx5hws_send_ste_attr *ste_attr,
133 			  bool is_update)
134 {
135 	if (!mlx5hws_matcher_is_resizable(rule->matcher))
136 		return;
137 
138 	if (likely(!is_update)) {
139 		rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
140 		if (unlikely(!rule->resize_info)) {
141 			pr_warn("HWS: resize info isn't allocated for rule\n");
142 			return;
143 		}
144 
145 		rule->resize_info->max_stes =
146 			rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
147 		rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
148 							rule->matcher->action_ste[0].pool :
149 							NULL;
150 		rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
151 							rule->matcher->action_ste[1].pool :
152 							NULL;
153 	}
154 
155 	memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
156 	       sizeof(rule->resize_info->ctrl_seg));
157 	memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
158 	       sizeof(rule->resize_info->data_seg));
159 }
160 
mlx5hws_rule_clear_resize_info(struct mlx5hws_rule * rule)161 void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
162 {
163 	if (mlx5hws_matcher_is_resizable(rule->matcher) &&
164 	    rule->resize_info) {
165 		kfree(rule->resize_info);
166 		rule->resize_info = NULL;
167 	}
168 }
169 
170 static void
hws_rule_save_delete_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)171 hws_rule_save_delete_info(struct mlx5hws_rule *rule,
172 			  struct mlx5hws_send_ste_attr *ste_attr)
173 {
174 	struct mlx5hws_match_template *mt = rule->matcher->mt;
175 	bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
176 
177 	if (mlx5hws_matcher_is_resizable(rule->matcher))
178 		return;
179 
180 	if (is_jumbo)
181 		memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
182 	else
183 		memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
184 }
185 
186 static void
hws_rule_clear_delete_info(struct mlx5hws_rule * rule)187 hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
188 {
189 	/* nothing to do here */
190 }
191 
192 static void
hws_rule_load_delete_info(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr)193 hws_rule_load_delete_info(struct mlx5hws_rule *rule,
194 			  struct mlx5hws_send_ste_attr *ste_attr)
195 {
196 	if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
197 		ste_attr->wqe_tag = &rule->tag;
198 	} else {
199 		struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
200 			(struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
201 		struct mlx5hws_rule_match_tag *tag =
202 			(struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
203 		ste_attr->wqe_tag = tag;
204 	}
205 }
206 
hws_rule_alloc_action_ste_idx(struct mlx5hws_rule * rule,u8 action_ste_selector)207 static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
208 					 u8 action_ste_selector)
209 {
210 	struct mlx5hws_matcher *matcher = rule->matcher;
211 	struct mlx5hws_matcher_action_ste *action_ste;
212 	struct mlx5hws_pool_chunk ste = {0};
213 	int ret;
214 
215 	action_ste = &matcher->action_ste[action_ste_selector];
216 	ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
217 	ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
218 	if (unlikely(ret)) {
219 		mlx5hws_err(matcher->tbl->ctx,
220 			    "Failed to allocate STE for rule actions");
221 		return ret;
222 	}
223 	rule->action_ste_idx = ste.offset;
224 
225 	return 0;
226 }
227 
hws_rule_free_action_ste_idx(struct mlx5hws_rule * rule,u8 action_ste_selector)228 static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
229 					 u8 action_ste_selector)
230 {
231 	struct mlx5hws_matcher *matcher = rule->matcher;
232 	struct mlx5hws_pool_chunk ste = {0};
233 	struct mlx5hws_pool *pool;
234 	u8 max_stes;
235 
236 	if (mlx5hws_matcher_is_resizable(matcher)) {
237 		/* Free the original action pool if rule was resized */
238 		max_stes = rule->resize_info->max_stes;
239 		pool = rule->resize_info->action_ste_pool[action_ste_selector];
240 	} else {
241 		max_stes = matcher->action_ste[action_ste_selector].max_stes;
242 		pool = matcher->action_ste[action_ste_selector].pool;
243 	}
244 
245 	/* This release is safe only when the rule match part was deleted */
246 	ste.order = ilog2(roundup_pow_of_two(max_stes));
247 	ste.offset = rule->action_ste_idx;
248 
249 	mlx5hws_pool_chunk_free(pool, &ste);
250 }
251 
hws_rule_alloc_action_ste(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)252 static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
253 				     struct mlx5hws_rule_attr *attr)
254 {
255 	int action_ste_idx;
256 	int ret;
257 
258 	ret = hws_rule_alloc_action_ste_idx(rule, 0);
259 	if (unlikely(ret))
260 		return ret;
261 
262 	action_ste_idx = rule->action_ste_idx;
263 
264 	ret = hws_rule_alloc_action_ste_idx(rule, 1);
265 	if (unlikely(ret)) {
266 		hws_rule_free_action_ste_idx(rule, 0);
267 		return ret;
268 	}
269 
270 	/* Both pools have to return the same index */
271 	if (unlikely(rule->action_ste_idx != action_ste_idx)) {
272 		pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
273 		return -EINVAL;
274 	}
275 
276 	return 0;
277 }
278 
mlx5hws_rule_free_action_ste(struct mlx5hws_rule * rule)279 void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
280 {
281 	if (rule->action_ste_idx > -1) {
282 		hws_rule_free_action_ste_idx(rule, 1);
283 		hws_rule_free_action_ste_idx(rule, 0);
284 	}
285 }
286 
hws_rule_create_init(struct mlx5hws_rule * rule,struct mlx5hws_send_ste_attr * ste_attr,struct mlx5hws_actions_apply_data * apply,bool is_update)287 static void hws_rule_create_init(struct mlx5hws_rule *rule,
288 				 struct mlx5hws_send_ste_attr *ste_attr,
289 				 struct mlx5hws_actions_apply_data *apply,
290 				 bool is_update)
291 {
292 	struct mlx5hws_matcher *matcher = rule->matcher;
293 	struct mlx5hws_table *tbl = matcher->tbl;
294 	struct mlx5hws_context *ctx = tbl->ctx;
295 
296 	/* Init rule before reuse */
297 	if (!is_update) {
298 		/* In update we use these rtc's */
299 		rule->rtc_0 = 0;
300 		rule->rtc_1 = 0;
301 		rule->action_ste_selector = 0;
302 	} else {
303 		rule->action_ste_selector = !rule->action_ste_selector;
304 	}
305 
306 	rule->pending_wqes = 0;
307 	rule->action_ste_idx = -1;
308 	rule->status = MLX5HWS_RULE_STATUS_CREATING;
309 
310 	/* Init default send STE attributes */
311 	ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
312 	ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
313 	ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
314 	ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
315 
316 	/* Init default action apply */
317 	apply->tbl_type = tbl->type;
318 	apply->common_res = &ctx->common_res[tbl->type];
319 	apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
320 	apply->require_dep = 0;
321 }
322 
hws_rule_move_init(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)323 static void hws_rule_move_init(struct mlx5hws_rule *rule,
324 			       struct mlx5hws_rule_attr *attr)
325 {
326 	/* Save the old RTC IDs to be later used in match STE delete */
327 	rule->resize_info->rtc_0 = rule->rtc_0;
328 	rule->resize_info->rtc_1 = rule->rtc_1;
329 	rule->resize_info->rule_idx = attr->rule_idx;
330 
331 	rule->rtc_0 = 0;
332 	rule->rtc_1 = 0;
333 
334 	rule->pending_wqes = 0;
335 	rule->action_ste_idx = -1;
336 	rule->action_ste_selector = 0;
337 	rule->status = MLX5HWS_RULE_STATUS_CREATING;
338 	rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
339 }
340 
mlx5hws_rule_move_in_progress(struct mlx5hws_rule * rule)341 bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
342 {
343 	return mlx5hws_matcher_is_in_resize(rule->matcher) &&
344 	       rule->resize_info &&
345 	       rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
346 }
347 
hws_rule_create_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr,u8 mt_idx,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[])348 static int hws_rule_create_hws(struct mlx5hws_rule *rule,
349 			       struct mlx5hws_rule_attr *attr,
350 			       u8 mt_idx,
351 			       u32 *match_param,
352 			       u8 at_idx,
353 			       struct mlx5hws_rule_action rule_actions[])
354 {
355 	struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
356 	struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
357 	bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
358 	struct mlx5hws_matcher *matcher = rule->matcher;
359 	struct mlx5hws_context *ctx = matcher->tbl->ctx;
360 	struct mlx5hws_send_ste_attr ste_attr = {0};
361 	struct mlx5hws_send_ring_dep_wqe *dep_wqe;
362 	struct mlx5hws_actions_wqe_setter *setter;
363 	struct mlx5hws_actions_apply_data apply;
364 	struct mlx5hws_send_engine *queue;
365 	u8 total_stes, action_stes;
366 	bool is_update;
367 	int i, ret;
368 
369 	is_update = !match_param;
370 
371 	setter = &at->setters[at->num_of_action_stes];
372 	total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
373 	action_stes = total_stes - 1;
374 
375 	queue = &ctx->send_queue[attr->queue_id];
376 	if (unlikely(mlx5hws_send_engine_err(queue)))
377 		return -EIO;
378 
379 	hws_rule_create_init(rule, &ste_attr, &apply, is_update);
380 
381 	/* Allocate dependent match WQE since rule might have dependent writes.
382 	 * The queued dependent WQE can be later aborted or kept as a dependency.
383 	 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
384 	 */
385 	dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
386 	hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
387 
388 	ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
389 	ste_attr.wqe_data = &dep_wqe->wqe_data;
390 	apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
391 	apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
392 	apply.rule_action = rule_actions;
393 	apply.queue = queue;
394 
395 	if (action_stes) {
396 		/* Allocate action STEs for rules that need more than match STE */
397 		if (!is_update) {
398 			ret = hws_rule_alloc_action_ste(rule, attr);
399 			if (ret) {
400 				mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
401 				mlx5hws_send_abort_new_dep_wqe(queue);
402 				return ret;
403 			}
404 		}
405 		/* Skip RX/TX based on the dep_wqe init */
406 		ste_attr.rtc_0 = dep_wqe->rtc_0 ?
407 				 matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
408 		ste_attr.rtc_1 = dep_wqe->rtc_1 ?
409 				 matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
410 		/* Action STEs are written to a specific index last to first */
411 		ste_attr.direct_index = rule->action_ste_idx + action_stes;
412 		apply.next_direct_idx = ste_attr.direct_index;
413 	} else {
414 		apply.next_direct_idx = 0;
415 	}
416 
417 	for (i = total_stes; i-- > 0;) {
418 		mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
419 
420 		if (i == 0) {
421 			/* Handle last match STE.
422 			 * For hash split / linear lookup RTCs, packets reaching any STE
423 			 * will always match and perform the specified actions, which
424 			 * makes the tag irrelevant.
425 			 */
426 			if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
427 				mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
428 							   (u8 *)dep_wqe->wqe_data.action);
429 			else if (is_update)
430 				hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
431 
432 			/* Rule has dependent WQEs, match dep_wqe is queued */
433 			if (action_stes || apply.require_dep)
434 				break;
435 
436 			/* Rule has no dependencies, abort dep_wqe and send WQE now */
437 			mlx5hws_send_abort_new_dep_wqe(queue);
438 			ste_attr.wqe_tag_is_jumbo = is_jumbo;
439 			ste_attr.send_attr.notify_hw = !attr->burst;
440 			ste_attr.send_attr.user_data = dep_wqe->user_data;
441 			ste_attr.send_attr.rule = dep_wqe->rule;
442 			ste_attr.rtc_0 = dep_wqe->rtc_0;
443 			ste_attr.rtc_1 = dep_wqe->rtc_1;
444 			ste_attr.used_id_rtc_0 = &rule->rtc_0;
445 			ste_attr.used_id_rtc_1 = &rule->rtc_1;
446 			ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
447 			ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
448 			ste_attr.direct_index = dep_wqe->direct_index;
449 		} else {
450 			apply.next_direct_idx = --ste_attr.direct_index;
451 		}
452 
453 		mlx5hws_send_ste(queue, &ste_attr);
454 	}
455 
456 	/* Backup TAG on the rule for deletion and resize info for
457 	 * moving rules to a new matcher, only after insertion.
458 	 */
459 	if (!is_update)
460 		hws_rule_save_delete_info(rule, &ste_attr);
461 
462 	hws_rule_save_resize_info(rule, &ste_attr, is_update);
463 	mlx5hws_send_engine_inc_rule(queue);
464 
465 	if (!attr->burst)
466 		mlx5hws_send_all_dep_wqe(queue);
467 
468 	return 0;
469 }
470 
hws_rule_destroy_failed_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)471 static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
472 					struct mlx5hws_rule_attr *attr)
473 {
474 	struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
475 	struct mlx5hws_send_engine *queue;
476 
477 	queue = &ctx->send_queue[attr->queue_id];
478 
479 	hws_rule_gen_comp(queue, rule, false,
480 			  attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
481 
482 	/* Rule failed now we can safely release action STEs */
483 	mlx5hws_rule_free_action_ste(rule);
484 
485 	/* Clear complex tag */
486 	hws_rule_clear_delete_info(rule);
487 
488 	/* Clear info that was saved for resizing */
489 	mlx5hws_rule_clear_resize_info(rule);
490 
491 	/* If a rule that was indicated as burst (need to trigger HW) has failed
492 	 * insertion we won't ring the HW as nothing is being written to the WQ.
493 	 * In such case update the last WQE and ring the HW with that work
494 	 */
495 	if (attr->burst)
496 		return;
497 
498 	mlx5hws_send_all_dep_wqe(queue);
499 	mlx5hws_send_engine_flush_queue(queue);
500 }
501 
hws_rule_destroy_hws(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)502 static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
503 				struct mlx5hws_rule_attr *attr)
504 {
505 	bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
506 	struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
507 	struct mlx5hws_matcher *matcher = rule->matcher;
508 	struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
509 	struct mlx5hws_send_ste_attr ste_attr = {0};
510 	struct mlx5hws_send_engine *queue;
511 
512 	queue = &ctx->send_queue[attr->queue_id];
513 
514 	if (unlikely(mlx5hws_send_engine_err(queue))) {
515 		hws_rule_destroy_failed_hws(rule, attr);
516 		return 0;
517 	}
518 
519 	/* Rule is not completed yet */
520 	if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
521 		return -EBUSY;
522 
523 	/* Rule failed and doesn't require cleanup */
524 	if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
525 		hws_rule_destroy_failed_hws(rule, attr);
526 		return 0;
527 	}
528 
529 	if (rule->skip_delete) {
530 		/* Rule shouldn't be deleted in HW.
531 		 * Generate completion as if write succeeded, and we can
532 		 * safely release action STEs and clear resize info.
533 		 */
534 		hws_rule_gen_comp(queue, rule, false,
535 				  attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
536 
537 		mlx5hws_rule_free_action_ste(rule);
538 		mlx5hws_rule_clear_resize_info(rule);
539 		return 0;
540 	}
541 
542 	mlx5hws_send_engine_inc_rule(queue);
543 
544 	/* Send dependent WQE */
545 	if (!attr->burst)
546 		mlx5hws_send_all_dep_wqe(queue);
547 
548 	rule->status = MLX5HWS_RULE_STATUS_DELETING;
549 
550 	ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
551 	ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
552 	ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
553 
554 	ste_attr.send_attr.rule = rule;
555 	ste_attr.send_attr.notify_hw = !attr->burst;
556 	ste_attr.send_attr.user_data = attr->user_data;
557 
558 	ste_attr.rtc_0 = rule->rtc_0;
559 	ste_attr.rtc_1 = rule->rtc_1;
560 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
561 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
562 	ste_attr.wqe_ctrl = &wqe_ctrl;
563 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
564 	ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
565 	if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
566 		ste_attr.direct_index = attr->rule_idx;
567 
568 	hws_rule_load_delete_info(rule, &ste_attr);
569 	mlx5hws_send_ste(queue, &ste_attr);
570 	hws_rule_clear_delete_info(rule);
571 
572 	return 0;
573 }
574 
hws_rule_enqueue_precheck(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)575 static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
576 				     struct mlx5hws_rule_attr *attr)
577 {
578 	struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
579 
580 	if (unlikely(!attr->user_data))
581 		return -EINVAL;
582 
583 	/* Check if there is room in queue */
584 	if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
585 		return -EBUSY;
586 
587 	return 0;
588 }
589 
hws_rule_enqueue_precheck_move(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)590 static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
591 					  struct mlx5hws_rule_attr *attr)
592 {
593 	if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
594 		return -EINVAL;
595 
596 	return hws_rule_enqueue_precheck(rule, attr);
597 }
598 
hws_rule_enqueue_precheck_create(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)599 static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
600 					    struct mlx5hws_rule_attr *attr)
601 {
602 	if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
603 		/* Matcher in resize - new rules are not allowed */
604 		return -EAGAIN;
605 
606 	return hws_rule_enqueue_precheck(rule, attr);
607 }
608 
hws_rule_enqueue_precheck_update(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)609 static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
610 					    struct mlx5hws_rule_attr *attr)
611 {
612 	struct mlx5hws_matcher *matcher = rule->matcher;
613 
614 	if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
615 		     !matcher->attr.optimize_using_rule_idx &&
616 		     !mlx5hws_matcher_is_insert_by_idx(matcher))) {
617 		return -EOPNOTSUPP;
618 	}
619 
620 	if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
621 		return -EBUSY;
622 
623 	return hws_rule_enqueue_precheck_create(rule, attr);
624 }
625 
mlx5hws_rule_move_hws_remove(struct mlx5hws_rule * rule,void * queue_ptr,void * user_data)626 int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
627 				 void *queue_ptr,
628 				 void *user_data)
629 {
630 	bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
631 	struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
632 	struct mlx5hws_matcher *matcher = rule->matcher;
633 	struct mlx5hws_send_engine *queue = queue_ptr;
634 	struct mlx5hws_send_ste_attr ste_attr = {0};
635 
636 	mlx5hws_send_all_dep_wqe(queue);
637 
638 	rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
639 
640 	ste_attr.send_attr.fence = 0;
641 	ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
642 	ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
643 	ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
644 	ste_attr.send_attr.rule = rule;
645 	ste_attr.send_attr.notify_hw = 1;
646 	ste_attr.send_attr.user_data = user_data;
647 	ste_attr.rtc_0 = rule->resize_info->rtc_0;
648 	ste_attr.rtc_1 = rule->resize_info->rtc_1;
649 	ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
650 	ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
651 	ste_attr.wqe_ctrl = &empty_wqe_ctrl;
652 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
653 	ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
654 
655 	if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
656 		ste_attr.direct_index = rule->resize_info->rule_idx;
657 
658 	hws_rule_load_delete_info(rule, &ste_attr);
659 	mlx5hws_send_ste(queue, &ste_attr);
660 
661 	return 0;
662 }
663 
mlx5hws_rule_move_hws_add(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)664 int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
665 			      struct mlx5hws_rule_attr *attr)
666 {
667 	bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
668 	struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
669 	struct mlx5hws_matcher *matcher = rule->matcher;
670 	struct mlx5hws_send_ste_attr ste_attr = {0};
671 	struct mlx5hws_send_engine *queue;
672 	int ret;
673 
674 	ret = hws_rule_enqueue_precheck_move(rule, attr);
675 	if (unlikely(ret))
676 		return ret;
677 
678 	queue = &ctx->send_queue[attr->queue_id];
679 
680 	ret = mlx5hws_send_engine_err(queue);
681 	if (ret)
682 		return ret;
683 
684 	hws_rule_move_init(rule, attr);
685 	hws_rule_move_get_rtc(rule, &ste_attr);
686 
687 	ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
688 	ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
689 	ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
690 	ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
691 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
692 
693 	ste_attr.send_attr.rule = rule;
694 	ste_attr.send_attr.fence = 0;
695 	ste_attr.send_attr.notify_hw = !attr->burst;
696 	ste_attr.send_attr.user_data = attr->user_data;
697 
698 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
699 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
700 	ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
701 	ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
702 	ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
703 				attr->rule_idx : 0;
704 
705 	mlx5hws_send_ste(queue, &ste_attr);
706 	mlx5hws_send_engine_inc_rule(queue);
707 
708 	if (!attr->burst)
709 		mlx5hws_send_all_dep_wqe(queue);
710 
711 	return 0;
712 }
713 
mlx5hws_rule_create(struct mlx5hws_matcher * matcher,u8 mt_idx,u32 * match_param,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * attr,struct mlx5hws_rule * rule_handle)714 int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
715 			u8 mt_idx,
716 			u32 *match_param,
717 			u8 at_idx,
718 			struct mlx5hws_rule_action rule_actions[],
719 			struct mlx5hws_rule_attr *attr,
720 			struct mlx5hws_rule *rule_handle)
721 {
722 	int ret;
723 
724 	rule_handle->matcher = matcher;
725 
726 	ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
727 	if (unlikely(ret))
728 		return ret;
729 
730 	if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
731 		     !(matcher->num_of_at >= at_idx) ||
732 		     !match_param)) {
733 		pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
734 		return -EINVAL;
735 	}
736 
737 	ret = hws_rule_create_hws(rule_handle,
738 				  attr,
739 				  mt_idx,
740 				  match_param,
741 				  at_idx,
742 				  rule_actions);
743 
744 	return ret;
745 }
746 
mlx5hws_rule_destroy(struct mlx5hws_rule * rule,struct mlx5hws_rule_attr * attr)747 int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
748 			 struct mlx5hws_rule_attr *attr)
749 {
750 	int ret;
751 
752 	ret = hws_rule_enqueue_precheck(rule, attr);
753 	if (unlikely(ret))
754 		return ret;
755 
756 	ret = hws_rule_destroy_hws(rule, attr);
757 
758 	return ret;
759 }
760 
mlx5hws_rule_action_update(struct mlx5hws_rule * rule,u8 at_idx,struct mlx5hws_rule_action rule_actions[],struct mlx5hws_rule_attr * attr)761 int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
762 			       u8 at_idx,
763 			       struct mlx5hws_rule_action rule_actions[],
764 			       struct mlx5hws_rule_attr *attr)
765 {
766 	int ret;
767 
768 	ret = hws_rule_enqueue_precheck_update(rule, attr);
769 	if (unlikely(ret))
770 		return ret;
771 
772 	ret = hws_rule_create_hws(rule,
773 				  attr,
774 				  0,
775 				  NULL,
776 				  at_idx,
777 				  rule_actions);
778 
779 	return ret;
780 }
781