xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #include "mlx5hws_internal.h"
5 
6 enum mlx5hws_arg_chunk_size
mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)7 mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
8 {
9 	/* Return the roundup of log2(data_size) */
10 	if (data_size <= MLX5HWS_ARG_DATA_SIZE)
11 		return MLX5HWS_ARG_CHUNK_SIZE_1;
12 	if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
13 		return MLX5HWS_ARG_CHUNK_SIZE_2;
14 	if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
15 		return MLX5HWS_ARG_CHUNK_SIZE_3;
16 	if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
17 		return MLX5HWS_ARG_CHUNK_SIZE_4;
18 
19 	return MLX5HWS_ARG_CHUNK_SIZE_MAX;
20 }
21 
mlx5hws_arg_data_size_to_arg_size(u16 data_size)22 u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
23 {
24 	return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
25 }
26 
27 enum mlx5hws_arg_chunk_size
mlx5hws_arg_get_arg_log_size(u16 num_of_actions)28 mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
29 {
30 	return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
31 						    MLX5HWS_MODIFY_ACTION_SIZE);
32 }
33 
mlx5hws_arg_get_arg_size(u16 num_of_actions)34 u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
35 {
36 	return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
37 }
38 
mlx5hws_pat_require_reparse(__be64 * actions,u16 num_of_actions)39 bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
40 {
41 	u16 i, field;
42 	u8 action_id;
43 
44 	for (i = 0; i < num_of_actions; i++) {
45 		action_id = MLX5_GET(set_action_in, &actions[i], action_type);
46 
47 		switch (action_id) {
48 		case MLX5_MODIFICATION_TYPE_NOP:
49 			field = MLX5_MODI_OUT_NONE;
50 			break;
51 
52 		case MLX5_MODIFICATION_TYPE_SET:
53 		case MLX5_MODIFICATION_TYPE_ADD:
54 			field = MLX5_GET(set_action_in, &actions[i], field);
55 			break;
56 
57 		case MLX5_MODIFICATION_TYPE_COPY:
58 		case MLX5_MODIFICATION_TYPE_ADD_FIELD:
59 			field = MLX5_GET(copy_action_in, &actions[i], dst_field);
60 			break;
61 
62 		default:
63 			/* Insert/Remove/Unknown actions require reparse */
64 			return true;
65 		}
66 
67 		/* Below fields can change packet structure require a reparse */
68 		if (field == MLX5_MODI_OUT_ETHERTYPE ||
69 		    field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
70 			return true;
71 	}
72 
73 	return false;
74 }
75 
76 /* Cache and cache element handling */
mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache ** cache)77 int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
78 {
79 	struct mlx5hws_pattern_cache *new_cache;
80 
81 	new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
82 	if (!new_cache)
83 		return -ENOMEM;
84 
85 	INIT_LIST_HEAD(&new_cache->ptrn_list);
86 	mutex_init(&new_cache->lock);
87 
88 	*cache = new_cache;
89 
90 	return 0;
91 }
92 
mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache * cache)93 void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
94 {
95 	mutex_destroy(&cache->lock);
96 	kfree(cache);
97 }
98 
mlx5hws_pat_compare_pattern(int cur_num_of_actions,__be64 cur_actions[],int num_of_actions,__be64 actions[])99 static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
100 					__be64 cur_actions[],
101 					int num_of_actions,
102 					__be64 actions[])
103 {
104 	int i;
105 
106 	if (cur_num_of_actions != num_of_actions)
107 		return false;
108 
109 	for (i = 0; i < num_of_actions; i++) {
110 		u8 action_id =
111 			MLX5_GET(set_action_in, &actions[i], action_type);
112 
113 		if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
114 		    action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
115 			if (actions[i] != cur_actions[i])
116 				return false;
117 		} else {
118 			/* Compare just the control, not the values */
119 			if ((__force __be32)actions[i] !=
120 			    (__force __be32)cur_actions[i])
121 				return false;
122 		}
123 	}
124 
125 	return true;
126 }
127 
128 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache * cache,u16 num_of_actions,__be64 * actions)129 mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
130 				u16 num_of_actions,
131 				__be64 *actions)
132 {
133 	struct mlx5hws_pattern_cache_item *cached_pat = NULL;
134 
135 	list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
136 		if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
137 						(__be64 *)cached_pat->mh_data.data,
138 						num_of_actions,
139 						actions))
140 			return cached_pat;
141 	}
142 
143 	return NULL;
144 }
145 
146 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache * cache,u16 num_of_actions,__be64 * actions)147 mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
148 					u16 num_of_actions,
149 					__be64 *actions)
150 {
151 	struct mlx5hws_pattern_cache_item *cached_pattern;
152 
153 	cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
154 	if (cached_pattern) {
155 		/* LRU: move it to be first in the list */
156 		list_del_init(&cached_pattern->ptrn_list_node);
157 		list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
158 		cached_pattern->refcount++;
159 	}
160 
161 	return cached_pattern;
162 }
163 
164 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache * cache,u32 pattern_id,u16 num_of_actions,__be64 * actions)165 mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
166 				 u32 pattern_id,
167 				 u16 num_of_actions,
168 				 __be64 *actions)
169 {
170 	struct mlx5hws_pattern_cache_item *cached_pattern;
171 
172 	cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
173 	if (!cached_pattern)
174 		return NULL;
175 
176 	cached_pattern->mh_data.num_of_actions = num_of_actions;
177 	cached_pattern->mh_data.pattern_id = pattern_id;
178 	cached_pattern->mh_data.data =
179 		kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
180 	if (!cached_pattern->mh_data.data)
181 		goto free_cached_obj;
182 
183 	list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
184 	cached_pattern->refcount = 1;
185 
186 	return cached_pattern;
187 
188 free_cached_obj:
189 	kfree(cached_pattern);
190 	return NULL;
191 }
192 
193 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache * cache,u32 ptrn_id)194 mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
195 				      u32 ptrn_id)
196 {
197 	struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
198 
199 	list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
200 		if (cached_pattern->mh_data.pattern_id == ptrn_id)
201 			return cached_pattern;
202 	}
203 
204 	return NULL;
205 }
206 
207 static void
mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item * cached_pattern)208 mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
209 {
210 	list_del_init(&cached_pattern->ptrn_list_node);
211 
212 	kfree(cached_pattern->mh_data.data);
213 	kfree(cached_pattern);
214 }
215 
mlx5hws_pat_put_pattern(struct mlx5hws_context * ctx,u32 ptrn_id)216 void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
217 {
218 	struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
219 	struct mlx5hws_pattern_cache_item *cached_pattern;
220 
221 	mutex_lock(&cache->lock);
222 	cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
223 	if (!cached_pattern) {
224 		mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
225 		pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
226 		goto out;
227 	}
228 
229 	if (--cached_pattern->refcount)
230 		goto out;
231 
232 	mlx5hws_pat_remove_pattern(cached_pattern);
233 	mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
234 
235 out:
236 	mutex_unlock(&cache->lock);
237 }
238 
mlx5hws_pat_get_pattern(struct mlx5hws_context * ctx,__be64 * pattern,size_t pattern_sz,u32 * pattern_id)239 int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
240 			    __be64 *pattern, size_t pattern_sz,
241 			    u32 *pattern_id)
242 {
243 	u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
244 	struct mlx5hws_pattern_cache_item *cached_pattern;
245 	u32 ptrn_id = 0;
246 	int ret = 0;
247 
248 	mutex_lock(&ctx->pattern_cache->lock);
249 
250 	cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
251 								 num_of_actions,
252 								 pattern);
253 	if (cached_pattern) {
254 		*pattern_id = cached_pattern->mh_data.pattern_id;
255 		goto out_unlock;
256 	}
257 
258 	ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
259 						       pattern_sz,
260 						       (u8 *)pattern,
261 						       &ptrn_id);
262 	if (ret) {
263 		mlx5hws_err(ctx, "Failed to create pattern FW object\n");
264 		goto out_unlock;
265 	}
266 
267 	cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
268 							  ptrn_id,
269 							  num_of_actions,
270 							  pattern);
271 	if (!cached_pattern) {
272 		mlx5hws_err(ctx, "Failed to add pattern to cache\n");
273 		ret = -EINVAL;
274 		goto clean_pattern;
275 	}
276 
277 	mutex_unlock(&ctx->pattern_cache->lock);
278 	*pattern_id = ptrn_id;
279 
280 	return ret;
281 
282 clean_pattern:
283 	mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
284 out_unlock:
285 	mutex_unlock(&ctx->pattern_cache->lock);
286 	return ret;
287 }
288 
289 static void
mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr * send_attr,void * comp_data,u32 arg_idx)290 mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
291 			 void *comp_data,
292 			 u32 arg_idx)
293 {
294 	send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
295 	send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
296 	send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
297 	send_attr->id = arg_idx;
298 	send_attr->user_data = comp_data;
299 }
300 
mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine * queue,u32 arg_idx,u8 * arg_data,u16 num_of_actions)301 void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
302 			       u32 arg_idx,
303 			       u8 *arg_data,
304 			       u16 num_of_actions)
305 {
306 	struct mlx5hws_send_engine_post_attr send_attr = {0};
307 	struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
308 	struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
309 	struct mlx5hws_send_engine_post_ctrl ctrl;
310 	size_t wqe_len;
311 
312 	mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
313 
314 	ctrl = mlx5hws_send_engine_post_start(queue);
315 	mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
316 	memset(wqe_ctrl, 0, wqe_len);
317 	mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
318 	mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
319 					     num_of_actions);
320 	mlx5hws_send_engine_post_end(&ctrl, &send_attr);
321 }
322 
mlx5hws_arg_write(struct mlx5hws_send_engine * queue,void * comp_data,u32 arg_idx,u8 * arg_data,size_t data_size)323 void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
324 		       void *comp_data,
325 		       u32 arg_idx,
326 		       u8 *arg_data,
327 		       size_t data_size)
328 {
329 	struct mlx5hws_send_engine_post_attr send_attr = {0};
330 	struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
331 	struct mlx5hws_send_engine_post_ctrl ctrl;
332 	struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
333 	int i, full_iter, leftover;
334 	size_t wqe_len;
335 
336 	mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
337 
338 	/* Each WQE can hold 64B of data, it might require multiple iteration */
339 	full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
340 	leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
341 
342 	for (i = 0; i < full_iter; i++) {
343 		ctrl = mlx5hws_send_engine_post_start(queue);
344 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
345 		memset(wqe_ctrl, 0, wqe_len);
346 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
347 		memcpy(wqe_arg, arg_data, wqe_len);
348 		send_attr.id = arg_idx++;
349 		mlx5hws_send_engine_post_end(&ctrl, &send_attr);
350 
351 		/* Move to next argument data */
352 		arg_data += MLX5HWS_ARG_DATA_SIZE;
353 	}
354 
355 	if (leftover) {
356 		ctrl = mlx5hws_send_engine_post_start(queue);
357 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
358 		memset(wqe_ctrl, 0, wqe_len);
359 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
360 		memcpy(wqe_arg, arg_data, leftover);
361 		send_attr.id = arg_idx;
362 		mlx5hws_send_engine_post_end(&ctrl, &send_attr);
363 	}
364 }
365 
mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context * ctx,u32 arg_idx,u8 * arg_data,size_t data_size)366 int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
367 				      u32 arg_idx,
368 				      u8 *arg_data,
369 				      size_t data_size)
370 {
371 	struct mlx5hws_send_engine *queue;
372 	int ret;
373 
374 	mutex_lock(&ctx->ctrl_lock);
375 
376 	/* Get the control queue */
377 	queue = &ctx->send_queue[ctx->queues - 1];
378 
379 	mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
380 
381 	mlx5hws_send_engine_flush_queue(queue);
382 
383 	/* Poll for completion */
384 	ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
385 					MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
386 
387 	if (ret)
388 		mlx5hws_err(ctx, "Failed to drain arg queue\n");
389 
390 	mutex_unlock(&ctx->ctrl_lock);
391 
392 	return ret;
393 }
394 
mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context * ctx,u32 arg_size)395 bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
396 					   u32 arg_size)
397 {
398 	if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
399 	    arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
400 		return false;
401 	}
402 	return true;
403 }
404 
mlx5hws_arg_create(struct mlx5hws_context * ctx,u8 * data,size_t data_sz,u32 log_bulk_sz,bool write_data,u32 * arg_id)405 int mlx5hws_arg_create(struct mlx5hws_context *ctx,
406 		       u8 *data,
407 		       size_t data_sz,
408 		       u32 log_bulk_sz,
409 		       bool write_data,
410 		       u32 *arg_id)
411 {
412 	u16 single_arg_log_sz;
413 	u16 multi_arg_log_sz;
414 	int ret;
415 	u32 id;
416 
417 	single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
418 	multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
419 
420 	if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
421 		mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
422 		return -EOPNOTSUPP;
423 	}
424 
425 	if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
426 		mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
427 		return -EOPNOTSUPP;
428 	}
429 
430 	/* Alloc bulk of args */
431 	ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
432 	if (ret) {
433 		mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
434 		return ret;
435 	}
436 
437 	if (write_data) {
438 		ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
439 							data, data_sz);
440 		if (ret) {
441 			mlx5hws_err(ctx, "Failed writing arg data\n");
442 			mlx5hws_cmd_arg_destroy(ctx->mdev, id);
443 			return ret;
444 		}
445 	}
446 
447 	*arg_id = id;
448 	return ret;
449 }
450 
mlx5hws_arg_destroy(struct mlx5hws_context * ctx,u32 arg_id)451 void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
452 {
453 	mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
454 }
455 
mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context * ctx,__be64 * data,u8 num_of_actions,u32 log_bulk_sz,bool write_data,u32 * arg_id)456 int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
457 					 __be64 *data,
458 					 u8 num_of_actions,
459 					 u32 log_bulk_sz,
460 					 bool write_data,
461 					 u32 *arg_id)
462 {
463 	size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
464 	int ret;
465 
466 	ret = mlx5hws_arg_create(ctx,
467 				 (u8 *)data,
468 				 data_sz,
469 				 log_bulk_sz,
470 				 write_data,
471 				 arg_id);
472 	if (ret)
473 		mlx5hws_err(ctx, "Failed creating modify header arg\n");
474 
475 	return ret;
476 }
477 
478 static int
hws_action_modify_check_field_limitation(u8 action_type,__be64 * pattern)479 hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
480 {
481 	/* Need to check field limitation here, but for now - return OK */
482 	return 0;
483 }
484 
485 #define INVALID_FIELD 0xffff
486 
487 static void
hws_action_modify_get_target_fields(u8 action_type,__be64 * pattern,u16 * src_field,u16 * dst_field)488 hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
489 				    u16 *src_field, u16 *dst_field)
490 {
491 	switch (action_type) {
492 	case MLX5_ACTION_TYPE_SET:
493 	case MLX5_ACTION_TYPE_ADD:
494 		*src_field = MLX5_GET(set_action_in, pattern, field);
495 		*dst_field = INVALID_FIELD;
496 		break;
497 	case MLX5_ACTION_TYPE_COPY:
498 		*src_field = MLX5_GET(copy_action_in, pattern, src_field);
499 		*dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
500 		break;
501 	default:
502 		pr_warn("HWS: invalid modify header action type %d\n", action_type);
503 	}
504 }
505 
mlx5hws_pat_verify_actions(struct mlx5hws_context * ctx,__be64 pattern[],size_t sz)506 bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
507 {
508 	size_t i;
509 
510 	for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
511 		u8 action_type =
512 			MLX5_GET(set_action_in, &pattern[i], action_type);
513 		if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
514 			mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
515 			return false;
516 		}
517 		if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
518 			mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
519 			return false;
520 		}
521 	}
522 
523 	return true;
524 }
525 
mlx5hws_pat_calc_nope(__be64 * pattern,size_t num_actions,size_t max_actions,size_t * new_size,u32 * nope_location,__be64 * new_pat)526 void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
527 			   size_t max_actions, size_t *new_size,
528 			   u32 *nope_location, __be64 *new_pat)
529 {
530 	u16 prev_src_field = 0, prev_dst_field = 0;
531 	u16 src_field, dst_field;
532 	u8 action_type;
533 	size_t i, j;
534 
535 	*new_size = num_actions;
536 	*nope_location = 0;
537 
538 	if (num_actions == 1)
539 		return;
540 
541 	for (i = 0, j = 0; i < num_actions; i++, j++) {
542 		action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
543 
544 		hws_action_modify_get_target_fields(action_type, &pattern[i],
545 						    &src_field, &dst_field);
546 		if (i % 2) {
547 			if (action_type == MLX5_ACTION_TYPE_COPY &&
548 			    (prev_src_field == src_field ||
549 			     prev_dst_field == dst_field)) {
550 				/* need Nope */
551 				*new_size += 1;
552 				*nope_location |= BIT(i);
553 				memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
554 				MLX5_SET(set_action_in, &new_pat[j],
555 					 action_type,
556 					 MLX5_MODIFICATION_TYPE_NOP);
557 				j++;
558 			} else if (prev_src_field == src_field) {
559 				/* need Nope*/
560 				*new_size += 1;
561 				*nope_location |= BIT(i);
562 				MLX5_SET(set_action_in, &new_pat[j],
563 					 action_type,
564 					 MLX5_MODIFICATION_TYPE_NOP);
565 				j++;
566 			}
567 		}
568 		memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
569 		/* check if no more space */
570 		if (j > max_actions) {
571 			*new_size = num_actions;
572 			*nope_location = 0;
573 			return;
574 		}
575 
576 		prev_src_field = src_field;
577 		prev_dst_field = dst_field;
578 	}
579 }
580