xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #include "mlx5hws_internal.h"
5 
6 /* Pattern tunnel Layer bits. */
7 #define MLX5_FLOW_LAYER_VXLAN      BIT(12)
8 #define MLX5_FLOW_LAYER_VXLAN_GPE  BIT(13)
9 #define MLX5_FLOW_LAYER_GRE        BIT(14)
10 #define MLX5_FLOW_LAYER_MPLS       BIT(15)
11 
12 /* Pattern tunnel Layer bits (continued). */
13 #define MLX5_FLOW_LAYER_IPIP       BIT(23)
14 #define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
15 #define MLX5_FLOW_LAYER_NVGRE      BIT(25)
16 #define MLX5_FLOW_LAYER_GENEVE     BIT(26)
17 
18 #define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
19 
20 /* Tunnel Masks. */
21 #define MLX5_FLOW_LAYER_TUNNEL \
22 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
23 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
24 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
25 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
26 	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
27 
28 #define GTP_PDU_SC	0x85
29 #define BAD_PORT	0xBAD
30 #define ETH_TYPE_IPV4_VXLAN	0x0800
31 #define ETH_TYPE_IPV6_VXLAN	0x86DD
32 #define UDP_GTPU_PORT	2152
33 #define UDP_PORT_MPLS	6635
34 #define UDP_GENEVE_PORT 6081
35 #define UDP_ROCEV2_PORT	4791
36 #define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
37 
38 #define STE_NO_VLAN	0x0
39 #define STE_SVLAN	0x1
40 #define STE_CVLAN	0x2
41 #define STE_NO_L3	0x0
42 #define STE_IPV4	0x1
43 #define STE_IPV6	0x2
44 #define STE_NO_L4	0x0
45 #define STE_TCP		0x1
46 #define STE_UDP		0x2
47 #define STE_ICMP	0x3
48 #define STE_ESP		0x3
49 
50 #define IPV4 0x4
51 #define IPV6 0x6
52 
53 /* Setter function based on bit offset and mask, for 32bit DW */
54 #define _HWS_SET32(p, v, byte_off, bit_off, mask) \
55 	do { \
56 		u32 _v = v; \
57 		*((__be32 *)(p) + ((byte_off) / 4)) = \
58 		cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
59 			     ((byte_off) / 4))) & \
60 			     (~((mask) << (bit_off)))) | \
61 			    (((_v) & (mask)) << \
62 			      (bit_off))); \
63 	} while (0)
64 
65 /* Setter function based on bit offset and mask, for unaligned 32bit DW */
66 #define HWS_SET32(p, v, byte_off, bit_off, mask) \
67 	do { \
68 		if (unlikely((bit_off) < 0)) { \
69 			u32 _bit_off = -1 * (bit_off); \
70 			u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
71 			_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
72 			_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
73 				    (bit_off) % BITS_IN_DW, second_dw_mask); \
74 		} else { \
75 			_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
76 		} \
77 	} while (0)
78 
79 /* Getter for up to aligned 32bit DW */
80 #define HWS_GET32(p, byte_off, bit_off, mask) \
81 	((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
82 
83 #define HWS_CALC_FNAME(field, inner) \
84 	((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
85 		   MLX5HWS_DEFINER_FNAME_##field##_O)
86 
87 #define HWS_GET_MATCH_PARAM(match_param, hdr) \
88 	MLX5_GET(fte_match_param, match_param, hdr)
89 
90 #define HWS_IS_FLD_SET(match_param, hdr) \
91 	(!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
92 
93 #define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
94 		BUILD_BUG_ON((sz_in_bits) % 32); \
95 		u32 sz = sz_in_bits; \
96 		u32 res = 0; \
97 		u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
98 		while (!res && sz >= 32) { \
99 			res = *((match_param) + (dw_off++)); \
100 			sz -= 32; \
101 		} \
102 		res; \
103 	})
104 
105 #define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
106 	(((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
107 			       !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
108 
109 #define HWS_GET64_MATCH_PARAM(match_param, hdr) \
110 	MLX5_GET64(fte_match_param, match_param, hdr)
111 
112 #define HWS_IS_FLD64_SET(match_param, hdr) \
113 	(!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
114 
115 #define HWS_CALC_HDR_SRC(fc, s_hdr) \
116 	do { \
117 		(fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
118 		(fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
119 		(fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
120 	} while (0)
121 
122 #define HWS_CALC_HDR_DST(fc, d_hdr) \
123 	do { \
124 		(fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
125 		(fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
126 		(fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
127 	} while (0)
128 
129 #define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
130 	do { \
131 		HWS_CALC_HDR_SRC(fc, s_hdr); \
132 		HWS_CALC_HDR_DST(fc, d_hdr); \
133 		(fc)->tag_set = &hws_definer_generic_set; \
134 	} while (0)
135 
136 #define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
137 	do { \
138 		if (HWS_IS_FLD_SET(match_param, s_hdr)) \
139 			HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
140 	} while (0)
141 
142 struct mlx5hws_definer_sel_ctrl {
143 	u8 allowed_full_dw; /* Full DW selectors cover all offsets */
144 	u8 allowed_lim_dw;  /* Limited DW selectors cover offset < 64 */
145 	u8 allowed_bytes;   /* Bytes selectors, up to offset 255 */
146 	u8 used_full_dw;
147 	u8 used_lim_dw;
148 	u8 used_bytes;
149 	u8 full_dw_selector[DW_SELECTORS];
150 	u8 lim_dw_selector[DW_SELECTORS_LIMITED];
151 	u8 byte_selector[BYTE_SELECTORS];
152 };
153 
154 struct mlx5hws_definer_conv_data {
155 	struct mlx5hws_context *ctx;
156 	struct mlx5hws_definer_fc *fc;
157 	/* enum mlx5hws_definer_match_flag */
158 	u32 match_flags;
159 };
160 
161 static void
hws_definer_ones_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)162 hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
163 		     void *match_param,
164 		     u8 *tag)
165 {
166 	HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
167 }
168 
169 static void
hws_definer_generic_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)170 hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
171 			void *match_param,
172 			u8 *tag)
173 {
174 	/* Can be optimized */
175 	u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
176 
177 	HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
178 }
179 
180 static void
hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)181 hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
182 				void *match_param,
183 				u8 *tag)
184 {
185 	if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
186 		HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
187 	else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
188 		HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
189 	else
190 		HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
191 }
192 
193 static void
hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)194 hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
195 				void *match_param,
196 				u8 *tag)
197 {
198 	if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
199 		HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
200 	else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
201 		HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
202 	else
203 		HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
204 }
205 
206 static void
hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag,bool inner)207 hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
208 				 void *match_param,
209 				 u8 *tag,
210 				 bool inner)
211 {
212 	u32 second_cvlan_tag = inner ?
213 		HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
214 		HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
215 	u32 second_svlan_tag = inner ?
216 		HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
217 		HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
218 
219 	if (second_cvlan_tag)
220 		HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
221 	else if (second_svlan_tag)
222 		HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
223 	else
224 		HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
225 }
226 
227 static void
hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)228 hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
229 				       void *match_param,
230 				       u8 *tag)
231 {
232 	hws_definer_second_vlan_type_set(fc, match_param, tag, true);
233 }
234 
235 static void
hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)236 hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
237 				       void *match_param,
238 				       u8 *tag)
239 {
240 	hws_definer_second_vlan_type_set(fc, match_param, tag, false);
241 }
242 
hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)243 static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
244 				     void *match_param,
245 				     u8 *tag)
246 {
247 	u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
248 	u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
249 	u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
250 		 (code << __mlx5_dw_bit_off(header_icmp, code));
251 
252 	HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
253 }
254 
255 static void
hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)256 hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
257 			   void *match_param,
258 			   u8 *tag)
259 {
260 	u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
261 	u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
262 	u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
263 		 (code << __mlx5_dw_bit_off(header_icmp, code));
264 
265 	HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
266 }
267 
268 static void
hws_definer_l3_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)269 hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
270 			void *match_param,
271 			u8 *tag)
272 {
273 	u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
274 
275 	if (val == IPV4)
276 		HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
277 	else if (val == IPV6)
278 		HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
279 	else
280 		HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
281 }
282 
283 static void
hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag,struct mlx5hws_context * peer_ctx)284 hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
285 				 void *match_param,
286 				 u8 *tag,
287 				 struct mlx5hws_context *peer_ctx)
288 {
289 	u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
290 	u16 vport_gvmi = 0;
291 	int ret;
292 
293 	ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
294 	if (ret) {
295 		HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
296 		mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
297 		return;
298 	}
299 
300 	if (vport_gvmi)
301 		HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
302 }
303 
304 static void
hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)305 hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
306 				    void *match_param,
307 				    u8 *tag)
308 __must_hold(&fc->ctx->ctrl_lock)
309 {
310 	int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
311 	struct mlx5hws_context *peer_ctx;
312 
313 	if (id == fc->ctx->caps->vhca_id)
314 		peer_ctx = fc->ctx;
315 	else
316 		peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
317 
318 	if (!peer_ctx) {
319 		HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
320 		mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
321 		return;
322 	}
323 
324 	hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
325 }
326 
327 static void
hws_definer_set_source_gvmi(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)328 hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
329 			    void *match_param,
330 			    u8 *tag)
331 {
332 	hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
333 }
334 
335 static struct mlx5hws_definer_fc *
hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data * cd,u8 parser_id)336 hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
337 						 u8 parser_id)
338 {
339 	struct mlx5hws_definer_fc *fc;
340 
341 	switch (parser_id) {
342 	case 0:
343 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
344 		HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
345 		fc->tag_set = &hws_definer_generic_set;
346 		break;
347 	case 1:
348 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
349 		HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
350 		fc->tag_set = &hws_definer_generic_set;
351 		break;
352 	case 2:
353 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
354 		HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
355 		fc->tag_set = &hws_definer_generic_set;
356 		break;
357 	case 3:
358 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
359 		HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
360 		fc->tag_set = &hws_definer_generic_set;
361 		break;
362 	case 4:
363 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
364 		HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
365 		fc->tag_set = &hws_definer_generic_set;
366 		break;
367 	case 5:
368 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
369 		HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
370 		fc->tag_set = &hws_definer_generic_set;
371 		break;
372 	case 6:
373 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
374 		HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
375 		fc->tag_set = &hws_definer_generic_set;
376 		break;
377 	case 7:
378 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
379 		HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
380 		fc->tag_set = &hws_definer_generic_set;
381 		break;
382 	default:
383 		mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
384 		return NULL;
385 	}
386 
387 	return fc;
388 }
389 
390 static struct mlx5hws_definer_fc *
hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data * cd,u8 parser_id)391 hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
392 				u8 parser_id)
393 {
394 	struct mlx5hws_definer_fc *fc;
395 
396 	switch (parser_id) {
397 	case 0:
398 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
399 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
400 		fc->tag_set = &hws_definer_generic_set;
401 		break;
402 	case 1:
403 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
404 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
405 		fc->tag_set = &hws_definer_generic_set;
406 		break;
407 	case 2:
408 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
409 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
410 		fc->tag_set = &hws_definer_generic_set;
411 		break;
412 	case 3:
413 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
414 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
415 		fc->tag_set = &hws_definer_generic_set;
416 		break;
417 	case 4:
418 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
419 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
420 		fc->tag_set = &hws_definer_generic_set;
421 		break;
422 	case 5:
423 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
424 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
425 		fc->tag_set = &hws_definer_generic_set;
426 		break;
427 	case 6:
428 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
429 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
430 		fc->tag_set = &hws_definer_generic_set;
431 		break;
432 	case 7:
433 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
434 		HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
435 		fc->tag_set = &hws_definer_generic_set;
436 		break;
437 	default:
438 		mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
439 		return NULL;
440 	}
441 
442 	return fc;
443 }
444 
445 static struct mlx5hws_definer_fc *
hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data * cd,bool * parser_is_used,u32 id,u32 value)446 hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
447 				 bool *parser_is_used,
448 				 u32 id,
449 				 u32 value)
450 {
451 	if (id || value) {
452 		if (id >= HWS_NUM_OF_FLEX_PARSERS) {
453 			mlx5hws_err(cd->ctx, "Unsupported parser id\n");
454 			return NULL;
455 		}
456 
457 		if (parser_is_used[id]) {
458 			mlx5hws_err(cd->ctx, "Parser id have been used\n");
459 			return NULL;
460 		}
461 	}
462 
463 	parser_is_used[id] = true;
464 
465 	return hws_definer_flex_parser_handler(cd, id);
466 }
467 
468 static int
hws_definer_check_match_flags(struct mlx5hws_definer_conv_data * cd)469 hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
470 {
471 	u32 flags;
472 
473 	flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
474 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
475 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
476 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
477 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
478 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
479 	if (flags & (flags - 1))
480 		goto err_conflict;
481 
482 	flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
483 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
484 
485 	if (flags & (flags - 1))
486 		goto err_conflict;
487 
488 	flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
489 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
490 	if (flags & (flags - 1))
491 		goto err_conflict;
492 
493 	flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
494 				   MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
495 				   MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
496 				   MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
497 	if (flags & (flags - 1))
498 		goto err_conflict;
499 
500 	return 0;
501 
502 err_conflict:
503 	mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
504 	return -EINVAL;
505 }
506 
507 static int
hws_definer_conv_outer(struct mlx5hws_definer_conv_data * cd,u32 * match_param)508 hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
509 		       u32 *match_param)
510 {
511 	bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
512 	struct mlx5hws_definer_fc *fc = cd->fc;
513 	struct mlx5hws_definer_fc *curr_fc;
514 	u32 *s_ipv6, *d_ipv6;
515 
516 	if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
517 	    HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
518 	    HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
519 		mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
520 		return -EINVAL;
521 	}
522 
523 	/* L2 Check ethertype */
524 	HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
525 		    outer_headers.ethertype,
526 		    eth_l2_outer.l3_ethertype);
527 	/* L2 Check SMAC 47_16 */
528 	HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
529 		    outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
530 	/* L2 Check SMAC 15_0 */
531 	HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
532 		    outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
533 	/* L2 Check DMAC 47_16 */
534 	HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
535 		    outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
536 	/* L2 Check DMAC 15_0 */
537 	HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
538 		    outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
539 
540 	/* L2 VLAN */
541 	HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
542 		    outer_headers.first_prio, eth_l2_outer.first_priority);
543 	HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
544 		    outer_headers.first_cfi, eth_l2_outer.first_cfi);
545 	HWS_SET_HDR(fc, match_param, VLAN_ID_O,
546 		    outer_headers.first_vid, eth_l2_outer.first_vlan_id);
547 
548 	/* L2 CVLAN and SVLAN */
549 	if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
550 	    HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
551 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
552 		HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
553 		curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
554 		curr_fc->tag_mask_set = &hws_definer_ones_set;
555 	}
556 
557 	/* L3 Check IP header */
558 	HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
559 		    outer_headers.ip_protocol,
560 		    eth_l3_outer.protocol_next_header);
561 	HWS_SET_HDR(fc, match_param, IP_TTL_O,
562 		    outer_headers.ttl_hoplimit,
563 		    eth_l3_outer.time_to_live_hop_limit);
564 
565 	/* L3 Check IPv4/IPv6 addresses */
566 	s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
567 			      outer_headers.src_ipv4_src_ipv6.ipv6_layout);
568 	d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
569 			      outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
570 
571 	/* Assume IPv6 is used if ipv6 bits are set */
572 	is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
573 	is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
574 
575 	if (is_s_ipv6) {
576 		/* Handle IPv6 source address */
577 		HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
578 			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
579 			    ipv6_src_outer.ipv6_address_127_96);
580 		HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
581 			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
582 			    ipv6_src_outer.ipv6_address_95_64);
583 		HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
584 			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
585 			    ipv6_src_outer.ipv6_address_63_32);
586 		HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
587 			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
588 			    ipv6_src_outer.ipv6_address_31_0);
589 	} else {
590 		/* Handle IPv4 source address */
591 		HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
592 			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
593 			    ipv4_src_dest_outer.source_address);
594 	}
595 	if (is_d_ipv6) {
596 		/* Handle IPv6 destination address */
597 		HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
598 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
599 			    ipv6_dst_outer.ipv6_address_127_96);
600 		HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
601 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
602 			    ipv6_dst_outer.ipv6_address_95_64);
603 		HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
604 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
605 			    ipv6_dst_outer.ipv6_address_63_32);
606 		HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
607 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
608 			    ipv6_dst_outer.ipv6_address_31_0);
609 	} else {
610 		/* Handle IPv4 destination address */
611 		HWS_SET_HDR(fc, match_param, IPV4_DST_O,
612 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
613 			    ipv4_src_dest_outer.destination_address);
614 	}
615 
616 	/* L4 Handle TCP/UDP */
617 	HWS_SET_HDR(fc, match_param, L4_SPORT_O,
618 		    outer_headers.tcp_sport, eth_l4_outer.source_port);
619 	HWS_SET_HDR(fc, match_param, L4_DPORT_O,
620 		    outer_headers.tcp_dport, eth_l4_outer.destination_port);
621 	HWS_SET_HDR(fc, match_param, L4_SPORT_O,
622 		    outer_headers.udp_sport, eth_l4_outer.source_port);
623 	HWS_SET_HDR(fc, match_param, L4_DPORT_O,
624 		    outer_headers.udp_dport, eth_l4_outer.destination_port);
625 	HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
626 		    outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
627 
628 	/* L3 Handle DSCP, ECN and IHL  */
629 	HWS_SET_HDR(fc, match_param, IP_DSCP_O,
630 		    outer_headers.ip_dscp, eth_l3_outer.dscp);
631 	HWS_SET_HDR(fc, match_param, IP_ECN_O,
632 		    outer_headers.ip_ecn, eth_l3_outer.ecn);
633 	HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
634 		    outer_headers.ipv4_ihl, eth_l3_outer.ihl);
635 
636 	/* Set IP fragmented bit */
637 	if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
638 		smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
639 				HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
640 		dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
641 				HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
642 		if (smac_set == dmac_set) {
643 			HWS_SET_HDR(fc, match_param, IP_FRAG_O,
644 				    outer_headers.frag, eth_l4_outer.ip_fragmented);
645 		} else {
646 			HWS_SET_HDR(fc, match_param, IP_FRAG_O,
647 				    outer_headers.frag, eth_l2_src_outer.ip_fragmented);
648 		}
649 	}
650 
651 	/* L3_type set */
652 	if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
653 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
654 		HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
655 		curr_fc->tag_set = &hws_definer_l3_type_set;
656 		curr_fc->tag_mask_set = &hws_definer_ones_set;
657 		HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
658 	}
659 
660 	return 0;
661 }
662 
663 static int
hws_definer_conv_inner(struct mlx5hws_definer_conv_data * cd,u32 * match_param)664 hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
665 		       u32 *match_param)
666 {
667 	bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
668 	struct mlx5hws_definer_fc *fc = cd->fc;
669 	struct mlx5hws_definer_fc *curr_fc;
670 	u32 *s_ipv6, *d_ipv6;
671 
672 	if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
673 	    HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
674 	    HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
675 		mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
676 		return -EINVAL;
677 	}
678 
679 	/* L2 Check ethertype */
680 	HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
681 		    inner_headers.ethertype,
682 		    eth_l2_inner.l3_ethertype);
683 	/* L2 Check SMAC 47_16 */
684 	HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
685 		    inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
686 	/* L2 Check SMAC 15_0 */
687 	HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
688 		    inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
689 	/* L2 Check DMAC 47_16 */
690 	HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
691 		    inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
692 	/* L2 Check DMAC 15_0 */
693 	HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
694 		    inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
695 
696 	/* L2 VLAN */
697 	HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
698 		    inner_headers.first_prio, eth_l2_inner.first_priority);
699 	HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
700 		    inner_headers.first_cfi, eth_l2_inner.first_cfi);
701 	HWS_SET_HDR(fc, match_param, VLAN_ID_I,
702 		    inner_headers.first_vid, eth_l2_inner.first_vlan_id);
703 
704 	/* L2 CVLAN and SVLAN */
705 	if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
706 	    HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
707 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
708 		HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
709 		curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
710 		curr_fc->tag_mask_set = &hws_definer_ones_set;
711 	}
712 	/* L3 Check IP header */
713 	HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
714 		    inner_headers.ip_protocol,
715 		    eth_l3_inner.protocol_next_header);
716 	HWS_SET_HDR(fc, match_param, IP_VERSION_I,
717 		    inner_headers.ip_version,
718 		    eth_l3_inner.ip_version);
719 	HWS_SET_HDR(fc, match_param, IP_TTL_I,
720 		    inner_headers.ttl_hoplimit,
721 		    eth_l3_inner.time_to_live_hop_limit);
722 
723 	/* L3 Check IPv4/IPv6 addresses */
724 	s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
725 			      inner_headers.src_ipv4_src_ipv6.ipv6_layout);
726 	d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
727 			      inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
728 
729 	/* Assume IPv6 is used if ipv6 bits are set */
730 	is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
731 	is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
732 
733 	if (is_s_ipv6) {
734 		/* Handle IPv6 source address */
735 		HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
736 			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
737 			    ipv6_src_inner.ipv6_address_127_96);
738 		HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
739 			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
740 			    ipv6_src_inner.ipv6_address_95_64);
741 		HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
742 			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
743 			    ipv6_src_inner.ipv6_address_63_32);
744 		HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
745 			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
746 			    ipv6_src_inner.ipv6_address_31_0);
747 	} else {
748 		/* Handle IPv4 source address */
749 		HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
750 			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
751 			    ipv4_src_dest_inner.source_address);
752 	}
753 	if (is_d_ipv6) {
754 		/* Handle IPv6 destination address */
755 		HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
756 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
757 			    ipv6_dst_inner.ipv6_address_127_96);
758 		HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
759 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
760 			    ipv6_dst_inner.ipv6_address_95_64);
761 		HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
762 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
763 			    ipv6_dst_inner.ipv6_address_63_32);
764 		HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
765 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
766 			    ipv6_dst_inner.ipv6_address_31_0);
767 	} else {
768 		/* Handle IPv4 destination address */
769 		HWS_SET_HDR(fc, match_param, IPV4_DST_I,
770 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
771 			    ipv4_src_dest_inner.destination_address);
772 	}
773 
774 	/* L4 Handle TCP/UDP */
775 	HWS_SET_HDR(fc, match_param, L4_SPORT_I,
776 		    inner_headers.tcp_sport, eth_l4_inner.source_port);
777 	HWS_SET_HDR(fc, match_param, L4_DPORT_I,
778 		    inner_headers.tcp_dport, eth_l4_inner.destination_port);
779 	HWS_SET_HDR(fc, match_param, L4_SPORT_I,
780 		    inner_headers.udp_sport, eth_l4_inner.source_port);
781 	HWS_SET_HDR(fc, match_param, L4_DPORT_I,
782 		    inner_headers.udp_dport, eth_l4_inner.destination_port);
783 	HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
784 		    inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
785 
786 	/* L3 Handle DSCP, ECN and IHL  */
787 	HWS_SET_HDR(fc, match_param, IP_DSCP_I,
788 		    inner_headers.ip_dscp, eth_l3_inner.dscp);
789 	HWS_SET_HDR(fc, match_param, IP_ECN_I,
790 		    inner_headers.ip_ecn, eth_l3_inner.ecn);
791 	HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
792 		    inner_headers.ipv4_ihl, eth_l3_inner.ihl);
793 
794 	/* Set IP fragmented bit */
795 	if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
796 		if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
797 			HWS_SET_HDR(fc, match_param, IP_FRAG_I,
798 				    inner_headers.frag, eth_l2_inner.ip_fragmented);
799 		} else {
800 			smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
801 				   HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
802 			dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
803 				   HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
804 			if (smac_set == dmac_set) {
805 				HWS_SET_HDR(fc, match_param, IP_FRAG_I,
806 					    inner_headers.frag, eth_l4_inner.ip_fragmented);
807 			} else {
808 				HWS_SET_HDR(fc, match_param, IP_FRAG_I,
809 					    inner_headers.frag, eth_l2_src_inner.ip_fragmented);
810 			}
811 		}
812 	}
813 
814 	/* L3_type set */
815 	if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
816 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
817 		HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
818 		curr_fc->tag_set = &hws_definer_l3_type_set;
819 		curr_fc->tag_mask_set = &hws_definer_ones_set;
820 		HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
821 	}
822 
823 	return 0;
824 }
825 
826 static int
hws_definer_conv_misc(struct mlx5hws_definer_conv_data * cd,u32 * match_param)827 hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
828 		      u32 *match_param)
829 {
830 	struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
831 	struct mlx5hws_definer_fc *fc = cd->fc;
832 	struct mlx5hws_definer_fc *curr_fc;
833 
834 	if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
835 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
836 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
837 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
838 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
839 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
840 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
841 	    HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
842 	    HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
843 	    HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
844 	    HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
845 	    HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
846 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
847 		mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
848 		return -EINVAL;
849 	}
850 
851 	/* Check GRE related fields */
852 	if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
853 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
854 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
855 		HWS_CALC_HDR(curr_fc,
856 			     misc_parameters.gre_c_present,
857 			     tunnel_header.tunnel_header_0);
858 		curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
859 		curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
860 	}
861 
862 	if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
863 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
864 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
865 		HWS_CALC_HDR(curr_fc,
866 			     misc_parameters.gre_k_present,
867 			     tunnel_header.tunnel_header_0);
868 		curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
869 		curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
870 	}
871 
872 	if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
873 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
874 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
875 		HWS_CALC_HDR(curr_fc,
876 			     misc_parameters.gre_s_present,
877 			     tunnel_header.tunnel_header_0);
878 		curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
879 		curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
880 	}
881 
882 	if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
883 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
884 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
885 		HWS_CALC_HDR(curr_fc,
886 			     misc_parameters.gre_protocol,
887 			     tunnel_header.tunnel_header_0);
888 		curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
889 		curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
890 	}
891 
892 	if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
893 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
894 				   MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
895 		HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
896 			    misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
897 	}
898 
899 	/* Check GENEVE related fields */
900 	if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
901 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
902 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
903 		HWS_CALC_HDR(curr_fc,
904 			     misc_parameters.geneve_vni,
905 			     tunnel_header.tunnel_header_1);
906 		curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
907 		curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
908 	}
909 
910 	if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
911 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
912 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
913 		HWS_CALC_HDR(curr_fc,
914 			     misc_parameters.geneve_opt_len,
915 			     tunnel_header.tunnel_header_0);
916 		curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
917 		curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
918 	}
919 
920 	if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
921 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
922 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
923 		HWS_CALC_HDR(curr_fc,
924 			     misc_parameters.geneve_protocol_type,
925 			     tunnel_header.tunnel_header_0);
926 		curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
927 		curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
928 	}
929 
930 	if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
931 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
932 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
933 		HWS_CALC_HDR(curr_fc,
934 			     misc_parameters.geneve_oam,
935 			     tunnel_header.tunnel_header_0);
936 		curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
937 		curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
938 	}
939 
940 	HWS_SET_HDR(fc, match_param, SOURCE_QP,
941 		    misc_parameters.source_sqn, source_qp_gvmi.source_qp);
942 	HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
943 		    misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
944 	HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
945 		    misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
946 
947 	/* L2 Second VLAN */
948 	HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
949 		    misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
950 	HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
951 		    misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
952 	HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
953 		    misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
954 	HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
955 		    misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
956 	HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
957 		    misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
958 	HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
959 		    misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
960 
961 	/* L2 Second CVLAN and SVLAN */
962 	if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
963 	    HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
964 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
965 		HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
966 		curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
967 		curr_fc->tag_mask_set = &hws_definer_ones_set;
968 	}
969 
970 	if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
971 	    HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
972 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
973 		HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
974 		curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
975 		curr_fc->tag_mask_set = &hws_definer_ones_set;
976 	}
977 
978 	/* VXLAN VNI  */
979 	if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
980 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
981 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
982 		HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
983 		curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
984 		curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
985 	}
986 
987 	/* Flex protocol steering ok bits */
988 	if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
989 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
990 
991 		if (!caps->flex_parser_ok_bits_supp) {
992 			mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
993 			return -EOPNOTSUPP;
994 		}
995 
996 		curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
997 				cd, caps->flex_parser_id_geneve_tlv_option_0);
998 		if (!curr_fc)
999 			return -EINVAL;
1000 
1001 		HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
1002 	}
1003 
1004 	if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
1005 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
1006 		HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
1007 		curr_fc->tag_mask_set = &hws_definer_ones_set;
1008 		curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
1009 						  misc_parameters.source_eswitch_owner_vhca_id) ?
1010 						  &hws_definer_set_source_gvmi_vhca_id :
1011 						  &hws_definer_set_source_gvmi;
1012 	} else {
1013 		if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
1014 			mlx5hws_err(cd->ctx,
1015 				    "Unsupported source_eswitch_owner_vhca_id field usage\n");
1016 			return -EOPNOTSUPP;
1017 		}
1018 	}
1019 
1020 	return 0;
1021 }
1022 
1023 static int
hws_definer_conv_misc2(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1024 hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
1025 		       u32 *match_param)
1026 {
1027 	struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
1028 	struct mlx5hws_definer_fc *fc = cd->fc;
1029 	struct mlx5hws_definer_fc *curr_fc;
1030 
1031 	if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
1032 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
1033 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
1034 	    HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
1035 	    HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
1036 		mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
1037 		return -EINVAL;
1038 	}
1039 
1040 	HWS_SET_HDR(fc, match_param, MPLS0_O,
1041 		    misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
1042 	HWS_SET_HDR(fc, match_param, MPLS0_I,
1043 		    misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
1044 	HWS_SET_HDR(fc, match_param, REG_0,
1045 		    misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
1046 	HWS_SET_HDR(fc, match_param, REG_1,
1047 		    misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
1048 	HWS_SET_HDR(fc, match_param, REG_2,
1049 		    misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
1050 	HWS_SET_HDR(fc, match_param, REG_3,
1051 		    misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
1052 	HWS_SET_HDR(fc, match_param, REG_4,
1053 		    misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
1054 	HWS_SET_HDR(fc, match_param, REG_5,
1055 		    misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
1056 	HWS_SET_HDR(fc, match_param, REG_6,
1057 		    misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
1058 	HWS_SET_HDR(fc, match_param, REG_7,
1059 		    misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
1060 	HWS_SET_HDR(fc, match_param, REG_A,
1061 		    misc_parameters_2.metadata_reg_a, metadata.general_purpose);
1062 
1063 	if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
1064 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
1065 
1066 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
1067 			mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
1068 			return -EOPNOTSUPP;
1069 		}
1070 
1071 		curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
1072 		if (!curr_fc)
1073 			return -EINVAL;
1074 
1075 		HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
1076 	}
1077 
1078 	if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
1079 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
1080 
1081 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
1082 			mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
1083 			return -EOPNOTSUPP;
1084 		}
1085 
1086 		curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
1087 		if (!curr_fc)
1088 			return -EINVAL;
1089 
1090 		HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static int
hws_definer_conv_misc3(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1097 hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
1098 {
1099 	struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
1100 	struct mlx5hws_definer_fc *fc = cd->fc;
1101 	struct mlx5hws_definer_fc *curr_fc;
1102 	bool vxlan_gpe_flex_parser_enabled;
1103 
1104 	/* Check reserved and unsupported fields */
1105 	if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
1106 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
1107 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
1108 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
1109 		mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
1110 		return -EINVAL;
1111 	}
1112 
1113 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
1114 	    HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
1115 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
1116 		HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
1117 			    misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
1118 		HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
1119 			    misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
1120 	}
1121 
1122 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
1123 	    HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
1124 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
1125 		HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
1126 			    misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
1127 		HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
1128 			    misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
1129 	}
1130 
1131 	vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
1132 
1133 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
1134 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1135 
1136 		if (!vxlan_gpe_flex_parser_enabled) {
1137 			mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1138 			return -EOPNOTSUPP;
1139 		}
1140 
1141 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
1142 		HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
1143 			     tunnel_header.tunnel_header_1);
1144 		curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
1145 		curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
1146 	}
1147 
1148 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
1149 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1150 
1151 		if (!vxlan_gpe_flex_parser_enabled) {
1152 			mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1153 			return -EOPNOTSUPP;
1154 		}
1155 
1156 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
1157 		HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
1158 			     tunnel_header.tunnel_header_0);
1159 		curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
1160 		curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
1161 		curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
1162 	}
1163 
1164 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
1165 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1166 
1167 		if (!vxlan_gpe_flex_parser_enabled) {
1168 			mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1169 			return -EOPNOTSUPP;
1170 		}
1171 
1172 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
1173 		HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
1174 			     tunnel_header.tunnel_header_0);
1175 		curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
1176 		curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
1177 	}
1178 
1179 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
1180 	    HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
1181 	    HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
1182 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
1183 
1184 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
1185 			mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
1186 			return -EOPNOTSUPP;
1187 		}
1188 
1189 		HWS_SET_HDR(fc, match_param, ICMP_DW3,
1190 			    misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
1191 
1192 		if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
1193 		    HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
1194 			curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
1195 			HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
1196 			curr_fc->tag_set = &hws_definer_icmp_dw1_set;
1197 		}
1198 	}
1199 
1200 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
1201 	    HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
1202 	    HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
1203 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
1204 
1205 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
1206 			mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
1207 			return -EOPNOTSUPP;
1208 		}
1209 
1210 		HWS_SET_HDR(fc, match_param, ICMP_DW3,
1211 			    misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
1212 
1213 		if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
1214 		    HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
1215 			curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
1216 			HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
1217 			curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
1218 		}
1219 	}
1220 
1221 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
1222 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1223 
1224 		curr_fc =
1225 			hws_definer_flex_parser_handler(cd,
1226 							caps->flex_parser_id_geneve_tlv_option_0);
1227 		if (!curr_fc)
1228 			return -EINVAL;
1229 
1230 		HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
1231 	}
1232 
1233 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
1234 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1235 
1236 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
1237 			mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
1238 			return -EOPNOTSUPP;
1239 		}
1240 
1241 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
1242 		fc->tag_set = &hws_definer_generic_set;
1243 		fc->bit_mask = __mlx5_mask(header_gtp, teid);
1244 		fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
1245 		HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
1246 	}
1247 
1248 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
1249 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1250 
1251 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
1252 			mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
1253 			return -EOPNOTSUPP;
1254 		}
1255 
1256 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
1257 		fc->tag_set = &hws_definer_generic_set;
1258 		fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
1259 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
1260 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1261 		HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
1262 	}
1263 
1264 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
1265 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1266 
1267 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
1268 			mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
1269 			return -EOPNOTSUPP;
1270 		}
1271 
1272 		fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
1273 		fc->tag_set = &hws_definer_generic_set;
1274 		fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
1275 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
1276 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1277 		HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
1278 	}
1279 
1280 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
1281 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1282 
1283 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
1284 			mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
1285 			return -EOPNOTSUPP;
1286 		}
1287 
1288 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
1289 		curr_fc->tag_set = &hws_definer_generic_set;
1290 		curr_fc->bit_mask = -1;
1291 		curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
1292 		HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
1293 	}
1294 
1295 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
1296 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1297 
1298 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1299 			mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
1300 			return -EOPNOTSUPP;
1301 		}
1302 
1303 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
1304 		curr_fc->tag_set = &hws_definer_generic_set;
1305 		curr_fc->bit_mask = -1;
1306 		curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1307 		HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
1308 	}
1309 
1310 	if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
1311 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1312 
1313 		if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
1314 			mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
1315 			return -EOPNOTSUPP;
1316 		}
1317 
1318 		curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
1319 		curr_fc->tag_set = &hws_definer_generic_set;
1320 		curr_fc->bit_mask = -1;
1321 		curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1322 		HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
1323 	}
1324 
1325 	return 0;
1326 }
1327 
1328 static int
hws_definer_conv_misc4(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1329 hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
1330 		       u32 *match_param)
1331 {
1332 	bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
1333 	struct mlx5hws_definer_fc *fc;
1334 	u32 id, value;
1335 
1336 	if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
1337 		mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
1338 		return -EINVAL;
1339 	}
1340 
1341 	id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
1342 	value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
1343 	fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1344 	if (!fc)
1345 		return -EINVAL;
1346 
1347 	HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
1348 
1349 	id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
1350 	value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
1351 	fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1352 	if (!fc)
1353 		return -EINVAL;
1354 
1355 	HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
1356 
1357 	id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
1358 	value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
1359 	fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1360 	if (!fc)
1361 		return -EINVAL;
1362 
1363 	HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
1364 
1365 	id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
1366 	value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
1367 	fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1368 	if (!fc)
1369 		return -EINVAL;
1370 
1371 	HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
1372 
1373 	return 0;
1374 }
1375 
1376 static int
hws_definer_conv_misc5(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1377 hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
1378 		       u32 *match_param)
1379 {
1380 	struct mlx5hws_definer_fc *fc = cd->fc;
1381 
1382 	if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
1383 	    HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
1384 	    HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
1385 	    HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
1386 	    HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
1387 		mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
1388 		return -EINVAL;
1389 	}
1390 
1391 	if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
1392 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
1393 		HWS_SET_HDR(fc, match_param, TNL_HDR_0,
1394 			    misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
1395 	}
1396 
1397 	if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
1398 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
1399 		HWS_SET_HDR(fc, match_param, TNL_HDR_1,
1400 			    misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
1401 	}
1402 
1403 	if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
1404 		cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
1405 		HWS_SET_HDR(fc, match_param, TNL_HDR_2,
1406 			    misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
1407 	}
1408 
1409 	HWS_SET_HDR(fc, match_param, TNL_HDR_3,
1410 		    misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
1411 
1412 	return 0;
1413 }
1414 
hws_definer_get_fc_size(struct mlx5hws_definer_fc * fc)1415 static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
1416 {
1417 	u32 fc_sz = 0;
1418 	int i;
1419 
1420 	/* For empty matcher, ZERO_SIZE_PTR is returned */
1421 	if (fc == ZERO_SIZE_PTR)
1422 		return 0;
1423 
1424 	for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
1425 		if (fc[i].tag_set)
1426 			fc_sz++;
1427 	return fc_sz;
1428 }
1429 
1430 static struct mlx5hws_definer_fc *
hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc * fc)1431 hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
1432 {
1433 	struct mlx5hws_definer_fc *compressed_fc = NULL;
1434 	u32 definer_size = hws_definer_get_fc_size(fc);
1435 	u32 fc_sz = 0;
1436 	int i;
1437 
1438 	compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
1439 	if (!compressed_fc)
1440 		return NULL;
1441 
1442 	/* For empty matcher, ZERO_SIZE_PTR is returned */
1443 	if (!definer_size)
1444 		return compressed_fc;
1445 
1446 	for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
1447 		if (!fc[i].tag_set)
1448 			continue;
1449 
1450 		fc[i].fname = i;
1451 		memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
1452 	}
1453 
1454 	return compressed_fc;
1455 }
1456 
1457 static void
hws_definer_set_hl(u8 * hl,struct mlx5hws_definer_fc * fc)1458 hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
1459 {
1460 	int i;
1461 
1462 	/* nothing to do for empty matcher */
1463 	if (fc == ZERO_SIZE_PTR)
1464 		return;
1465 
1466 	for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
1467 		if (!fc[i].tag_set)
1468 			continue;
1469 
1470 		HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
1471 	}
1472 }
1473 
1474 static struct mlx5hws_definer_fc *
hws_definer_alloc_fc(struct mlx5hws_context * ctx,size_t len)1475 hws_definer_alloc_fc(struct mlx5hws_context *ctx,
1476 		     size_t len)
1477 {
1478 	struct mlx5hws_definer_fc *fc;
1479 	int i;
1480 
1481 	fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
1482 	if (!fc)
1483 		return NULL;
1484 
1485 	for (i = 0; i < len; i++)
1486 		fc[i].ctx = ctx;
1487 
1488 	return fc;
1489 }
1490 
1491 static int
hws_definer_conv_match_params_to_hl(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,u8 * hl)1492 hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
1493 				    struct mlx5hws_match_template *mt,
1494 				    u8 *hl)
1495 {
1496 	struct mlx5hws_definer_conv_data cd = {0};
1497 	struct mlx5hws_definer_fc *fc;
1498 	int ret;
1499 
1500 	fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
1501 	if (!fc)
1502 		return -ENOMEM;
1503 
1504 	cd.fc = fc;
1505 	cd.ctx = ctx;
1506 
1507 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
1508 		mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
1509 		ret = -EOPNOTSUPP;
1510 		goto err_free_fc;
1511 	}
1512 
1513 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
1514 		ret = hws_definer_conv_outer(&cd, mt->match_param);
1515 		if (ret)
1516 			goto err_free_fc;
1517 	}
1518 
1519 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
1520 		ret = hws_definer_conv_inner(&cd, mt->match_param);
1521 		if (ret)
1522 			goto err_free_fc;
1523 	}
1524 
1525 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
1526 		ret = hws_definer_conv_misc(&cd, mt->match_param);
1527 		if (ret)
1528 			goto err_free_fc;
1529 	}
1530 
1531 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
1532 		ret = hws_definer_conv_misc2(&cd, mt->match_param);
1533 		if (ret)
1534 			goto err_free_fc;
1535 	}
1536 
1537 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
1538 		ret = hws_definer_conv_misc3(&cd, mt->match_param);
1539 		if (ret)
1540 			goto err_free_fc;
1541 	}
1542 
1543 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
1544 		ret = hws_definer_conv_misc4(&cd, mt->match_param);
1545 		if (ret)
1546 			goto err_free_fc;
1547 	}
1548 
1549 	if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
1550 		ret = hws_definer_conv_misc5(&cd, mt->match_param);
1551 		if (ret)
1552 			goto err_free_fc;
1553 	}
1554 
1555 	/* Check there is no conflicted fields set together */
1556 	ret = hws_definer_check_match_flags(&cd);
1557 	if (ret)
1558 		goto err_free_fc;
1559 
1560 	/* Allocate fc array on mt */
1561 	mt->fc = hws_definer_alloc_compressed_fc(fc);
1562 	if (!mt->fc) {
1563 		mlx5hws_err(ctx,
1564 			    "Convert match params: failed to set field copy to match template\n");
1565 		ret = -ENOMEM;
1566 		goto err_free_fc;
1567 	}
1568 	mt->fc_sz = hws_definer_get_fc_size(fc);
1569 
1570 	/* Fill in headers layout */
1571 	hws_definer_set_hl(hl, fc);
1572 
1573 	kfree(fc);
1574 	return 0;
1575 
1576 err_free_fc:
1577 	kfree(fc);
1578 	return ret;
1579 }
1580 
1581 struct mlx5hws_definer_fc *
mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context * ctx,u8 match_criteria_enable,u32 * match_param,int * fc_sz)1582 mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
1583 						   u8 match_criteria_enable,
1584 						   u32 *match_param,
1585 						   int *fc_sz)
1586 {
1587 	struct mlx5hws_definer_fc *compressed_fc = NULL;
1588 	struct mlx5hws_definer_conv_data cd = {0};
1589 	struct mlx5hws_definer_fc *fc;
1590 	int ret;
1591 
1592 	fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
1593 	if (!fc)
1594 		return NULL;
1595 
1596 	cd.fc = fc;
1597 	cd.ctx = ctx;
1598 
1599 	if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
1600 		ret = hws_definer_conv_outer(&cd, match_param);
1601 		if (ret)
1602 			goto err_free_fc;
1603 	}
1604 
1605 	if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
1606 		ret = hws_definer_conv_inner(&cd, match_param);
1607 		if (ret)
1608 			goto err_free_fc;
1609 	}
1610 
1611 	if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
1612 		ret = hws_definer_conv_misc(&cd, match_param);
1613 		if (ret)
1614 			goto err_free_fc;
1615 	}
1616 
1617 	if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
1618 		ret = hws_definer_conv_misc2(&cd, match_param);
1619 		if (ret)
1620 			goto err_free_fc;
1621 	}
1622 
1623 	if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
1624 		ret = hws_definer_conv_misc3(&cd, match_param);
1625 		if (ret)
1626 			goto err_free_fc;
1627 	}
1628 
1629 	if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
1630 		ret = hws_definer_conv_misc4(&cd, match_param);
1631 		if (ret)
1632 			goto err_free_fc;
1633 	}
1634 
1635 	if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
1636 		ret = hws_definer_conv_misc5(&cd, match_param);
1637 		if (ret)
1638 			goto err_free_fc;
1639 	}
1640 
1641 	/* Allocate fc array on mt */
1642 	compressed_fc = hws_definer_alloc_compressed_fc(fc);
1643 	if (!compressed_fc) {
1644 		mlx5hws_err(ctx,
1645 			    "Convert to compressed fc: failed to set field copy to match template\n");
1646 		goto err_free_fc;
1647 	}
1648 	*fc_sz = hws_definer_get_fc_size(fc);
1649 
1650 err_free_fc:
1651 	kfree(fc);
1652 	return compressed_fc;
1653 }
1654 
1655 static int
hws_definer_find_byte_in_tag(struct mlx5hws_definer * definer,u32 hl_byte_off,u32 * tag_byte_off)1656 hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
1657 			     u32 hl_byte_off,
1658 			     u32 *tag_byte_off)
1659 {
1660 	int i, dw_to_scan;
1661 	u8 byte_offset;
1662 
1663 	/* Avoid accessing unused DW selectors */
1664 	dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
1665 		DW_SELECTORS : DW_SELECTORS_MATCH;
1666 
1667 	/* Add offset since each DW covers multiple BYTEs */
1668 	byte_offset = hl_byte_off % DW_SIZE;
1669 	for (i = 0; i < dw_to_scan; i++) {
1670 		if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
1671 			*tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
1672 			return 0;
1673 		}
1674 	}
1675 
1676 	/* Add offset to skip DWs in definer */
1677 	byte_offset = DW_SIZE * DW_SELECTORS;
1678 	/* Iterate in reverse since the code uses bytes from 7 -> 0 */
1679 	for (i = BYTE_SELECTORS; i-- > 0 ;) {
1680 		if (definer->byte_selector[i] == hl_byte_off) {
1681 			*tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
1682 			return 0;
1683 		}
1684 	}
1685 
1686 	return -EINVAL;
1687 }
1688 
1689 static int
hws_definer_fc_bind(struct mlx5hws_definer * definer,struct mlx5hws_definer_fc * fc,u32 fc_sz)1690 hws_definer_fc_bind(struct mlx5hws_definer *definer,
1691 		    struct mlx5hws_definer_fc *fc,
1692 		    u32 fc_sz)
1693 {
1694 	u32 tag_offset = 0;
1695 	int ret, byte_diff;
1696 	u32 i;
1697 
1698 	for (i = 0; i < fc_sz; i++) {
1699 		/* Map header layout byte offset to byte offset in tag */
1700 		ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
1701 		if (ret)
1702 			return ret;
1703 
1704 		/* Move setter based on the location in the definer */
1705 		byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
1706 		fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
1707 
1708 		/* Update offset in headers layout to offset in tag */
1709 		fc->byte_off = tag_offset;
1710 		fc++;
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 static bool
hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl * ctrl,u32 cur_dw,u32 * data)1717 hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
1718 			     u32 cur_dw,
1719 			     u32 *data)
1720 {
1721 	u8 bytes_set;
1722 	int byte_idx;
1723 	bool ret;
1724 	int i;
1725 
1726 	/* Reached end, nothing left to do */
1727 	if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
1728 		return true;
1729 
1730 	/* No data set, can skip to next DW */
1731 	while (!*data) {
1732 		cur_dw++;
1733 		data++;
1734 
1735 		/* Reached end, nothing left to do */
1736 		if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
1737 			return true;
1738 	}
1739 
1740 	/* Used all DW selectors and Byte selectors, no possible solution */
1741 	if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
1742 	    ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
1743 	    ctrl->allowed_bytes == ctrl->used_bytes)
1744 		return false;
1745 
1746 	/* Try to use limited DW selectors */
1747 	if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
1748 		ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
1749 
1750 		ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
1751 		if (ret)
1752 			return ret;
1753 
1754 		ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
1755 	}
1756 
1757 	/* Try to use DW selectors */
1758 	if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
1759 		ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
1760 
1761 		ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
1762 		if (ret)
1763 			return ret;
1764 
1765 		ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
1766 	}
1767 
1768 	/* No byte selector for offset bigger than 255 */
1769 	if (cur_dw * DW_SIZE > 255)
1770 		return false;
1771 
1772 	bytes_set = !!(0x000000ff & *data) +
1773 		    !!(0x0000ff00 & *data) +
1774 		    !!(0x00ff0000 & *data) +
1775 		    !!(0xff000000 & *data);
1776 
1777 	/* Check if there are enough byte selectors left */
1778 	if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
1779 		return false;
1780 
1781 	/* Try to use Byte selectors */
1782 	for (i = 0; i < DW_SIZE; i++)
1783 		if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
1784 			/* Use byte selectors high to low */
1785 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
1786 			ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
1787 			ctrl->used_bytes++;
1788 		}
1789 
1790 	ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
1791 	if (ret)
1792 		return ret;
1793 
1794 	for (i = 0; i < DW_SIZE; i++)
1795 		if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
1796 			ctrl->used_bytes--;
1797 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
1798 			ctrl->byte_selector[byte_idx] = 0;
1799 		}
1800 
1801 	return false;
1802 }
1803 
1804 static void
hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl * ctrl,struct mlx5hws_definer * definer)1805 hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
1806 			  struct mlx5hws_definer *definer)
1807 {
1808 	memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
1809 	memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
1810 	memcpy(definer->dw_selector + ctrl->allowed_full_dw,
1811 	       ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
1812 }
1813 
1814 static int
hws_definer_find_best_match_fit(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer,u8 * hl)1815 hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
1816 				struct mlx5hws_definer *definer,
1817 				u8 *hl)
1818 {
1819 	struct mlx5hws_definer_sel_ctrl ctrl = {0};
1820 	bool found;
1821 
1822 	/* Try to create a match definer */
1823 	ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
1824 	ctrl.allowed_lim_dw = 0;
1825 	ctrl.allowed_bytes = BYTE_SELECTORS;
1826 
1827 	found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
1828 	if (found) {
1829 		hws_definer_copy_sel_ctrl(&ctrl, definer);
1830 		definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
1831 		return 0;
1832 	}
1833 
1834 	/* Try to create a full/limited jumbo definer */
1835 	ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
1836 								  DW_SELECTORS_MATCH;
1837 	ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
1838 								 DW_SELECTORS_LIMITED;
1839 	ctrl.allowed_bytes = BYTE_SELECTORS;
1840 
1841 	found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
1842 	if (found) {
1843 		hws_definer_copy_sel_ctrl(&ctrl, definer);
1844 		definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
1845 		return 0;
1846 	}
1847 
1848 	return E2BIG;
1849 }
1850 
1851 static void
hws_definer_create_tag_mask(u32 * match_param,struct mlx5hws_definer_fc * fc,u32 fc_sz,u8 * tag)1852 hws_definer_create_tag_mask(u32 *match_param,
1853 			    struct mlx5hws_definer_fc *fc,
1854 			    u32 fc_sz,
1855 			    u8 *tag)
1856 {
1857 	u32 i;
1858 
1859 	for (i = 0; i < fc_sz; i++) {
1860 		if (fc->tag_mask_set)
1861 			fc->tag_mask_set(fc, match_param, tag);
1862 		else
1863 			fc->tag_set(fc, match_param, tag);
1864 		fc++;
1865 	}
1866 }
1867 
mlx5hws_definer_create_tag(u32 * match_param,struct mlx5hws_definer_fc * fc,u32 fc_sz,u8 * tag)1868 void mlx5hws_definer_create_tag(u32 *match_param,
1869 				struct mlx5hws_definer_fc *fc,
1870 				u32 fc_sz,
1871 				u8 *tag)
1872 {
1873 	u32 i;
1874 
1875 	for (i = 0; i < fc_sz; i++) {
1876 		fc->tag_set(fc, match_param, tag);
1877 		fc++;
1878 	}
1879 }
1880 
mlx5hws_definer_get_id(struct mlx5hws_definer * definer)1881 int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
1882 {
1883 	return definer->obj_id;
1884 }
1885 
mlx5hws_definer_compare(struct mlx5hws_definer * definer_a,struct mlx5hws_definer * definer_b)1886 int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
1887 			    struct mlx5hws_definer *definer_b)
1888 {
1889 	int i;
1890 
1891 	/* Future: Optimize by comparing selectors with valid mask only */
1892 	for (i = 0; i < BYTE_SELECTORS; i++)
1893 		if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
1894 			return 1;
1895 
1896 	for (i = 0; i < DW_SELECTORS; i++)
1897 		if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
1898 			return 1;
1899 
1900 	for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
1901 		if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
1902 			return 1;
1903 
1904 	return 0;
1905 }
1906 
1907 int
mlx5hws_definer_calc_layout(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,struct mlx5hws_definer * match_definer)1908 mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
1909 			    struct mlx5hws_match_template *mt,
1910 			    struct mlx5hws_definer *match_definer)
1911 {
1912 	u8 *match_hl;
1913 	int ret;
1914 
1915 	/* Union header-layout (hl) is used for creating a single definer
1916 	 * field layout used with different bitmasks for hash and match.
1917 	 */
1918 	match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
1919 	if (!match_hl)
1920 		return -ENOMEM;
1921 
1922 	/* Convert all mt items to header layout (hl)
1923 	 * and allocate the match and range field copy array (fc & fcr).
1924 	 */
1925 	ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
1926 	if (ret) {
1927 		mlx5hws_err(ctx, "Failed to convert items to header layout\n");
1928 		goto free_fc;
1929 	}
1930 
1931 	/* Find the match definer layout for header layout match union */
1932 	ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
1933 	if (ret) {
1934 		if (ret == E2BIG)
1935 			mlx5hws_dbg(ctx,
1936 				    "Failed to create match definer from header layout - E2BIG\n");
1937 		else
1938 			mlx5hws_err(ctx,
1939 				    "Failed to create match definer from header layout (%d)\n",
1940 				    ret);
1941 		goto free_fc;
1942 	}
1943 
1944 	kfree(match_hl);
1945 	return 0;
1946 
1947 free_fc:
1948 	kfree(mt->fc);
1949 
1950 	kfree(match_hl);
1951 	return ret;
1952 }
1953 
mlx5hws_definer_init_cache(struct mlx5hws_definer_cache ** cache)1954 int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
1955 {
1956 	struct mlx5hws_definer_cache *new_cache;
1957 
1958 	new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
1959 	if (!new_cache)
1960 		return -ENOMEM;
1961 
1962 	INIT_LIST_HEAD(&new_cache->list_head);
1963 	*cache = new_cache;
1964 
1965 	return 0;
1966 }
1967 
mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache * cache)1968 void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
1969 {
1970 	kfree(cache);
1971 }
1972 
mlx5hws_definer_get_obj(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer)1973 int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
1974 			    struct mlx5hws_definer *definer)
1975 {
1976 	struct mlx5hws_definer_cache *cache = ctx->definer_cache;
1977 	struct mlx5hws_cmd_definer_create_attr def_attr = {0};
1978 	struct mlx5hws_definer_cache_item *cached_definer;
1979 	u32 obj_id;
1980 	int ret;
1981 
1982 	/* Search definer cache for requested definer */
1983 	list_for_each_entry(cached_definer, &cache->list_head, list_node) {
1984 		if (mlx5hws_definer_compare(&cached_definer->definer, definer))
1985 			continue;
1986 
1987 		/* Reuse definer and set LRU (move to be first in the list) */
1988 		list_del_init(&cached_definer->list_node);
1989 		list_add(&cached_definer->list_node, &cache->list_head);
1990 		cached_definer->refcount++;
1991 		return cached_definer->definer.obj_id;
1992 	}
1993 
1994 	/* Allocate and create definer based on the bitmask tag */
1995 	def_attr.match_mask = definer->mask.jumbo;
1996 	def_attr.dw_selector = definer->dw_selector;
1997 	def_attr.byte_selector = definer->byte_selector;
1998 
1999 	ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
2000 	if (ret)
2001 		return -1;
2002 
2003 	cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
2004 	if (!cached_definer)
2005 		goto free_definer_obj;
2006 
2007 	memcpy(&cached_definer->definer, definer, sizeof(*definer));
2008 	cached_definer->definer.obj_id = obj_id;
2009 	cached_definer->refcount = 1;
2010 	list_add(&cached_definer->list_node, &cache->list_head);
2011 
2012 	return obj_id;
2013 
2014 free_definer_obj:
2015 	mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
2016 	return -1;
2017 }
2018 
2019 static void
hws_definer_put_obj(struct mlx5hws_context * ctx,u32 obj_id)2020 hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
2021 {
2022 	struct mlx5hws_definer_cache_item *cached_definer;
2023 
2024 	list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
2025 		if (cached_definer->definer.obj_id != obj_id)
2026 			continue;
2027 
2028 		/* Object found */
2029 		if (--cached_definer->refcount)
2030 			return;
2031 
2032 		list_del_init(&cached_definer->list_node);
2033 		mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
2034 		kfree(cached_definer);
2035 		return;
2036 	}
2037 
2038 	/* Programming error, object must be part of cache */
2039 	pr_warn("HWS: failed putting definer object\n");
2040 }
2041 
2042 static struct mlx5hws_definer *
hws_definer_alloc(struct mlx5hws_context * ctx,struct mlx5hws_definer_fc * fc,int fc_sz,u32 * match_param,struct mlx5hws_definer * layout,bool bind_fc)2043 hws_definer_alloc(struct mlx5hws_context *ctx,
2044 		  struct mlx5hws_definer_fc *fc,
2045 		  int fc_sz,
2046 		  u32 *match_param,
2047 		  struct mlx5hws_definer *layout,
2048 		  bool bind_fc)
2049 {
2050 	struct mlx5hws_definer *definer;
2051 	int ret;
2052 
2053 	definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
2054 	if (!definer)
2055 		return NULL;
2056 
2057 	/* Align field copy array based on given layout */
2058 	if (bind_fc) {
2059 		ret = hws_definer_fc_bind(definer, fc, fc_sz);
2060 		if (ret) {
2061 			mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
2062 			goto free_definer;
2063 		}
2064 	}
2065 
2066 	/* Create the tag mask used for definer creation */
2067 	hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
2068 
2069 	ret = mlx5hws_definer_get_obj(ctx, definer);
2070 	if (ret < 0)
2071 		goto free_definer;
2072 
2073 	definer->obj_id = ret;
2074 	return definer;
2075 
2076 free_definer:
2077 	kfree(definer);
2078 	return NULL;
2079 }
2080 
mlx5hws_definer_free(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer)2081 void mlx5hws_definer_free(struct mlx5hws_context *ctx,
2082 			  struct mlx5hws_definer *definer)
2083 {
2084 	hws_definer_put_obj(ctx, definer->obj_id);
2085 	kfree(definer);
2086 }
2087 
2088 static int
hws_definer_mt_match_init(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,struct mlx5hws_definer * match_layout)2089 hws_definer_mt_match_init(struct mlx5hws_context *ctx,
2090 			  struct mlx5hws_match_template *mt,
2091 			  struct mlx5hws_definer *match_layout)
2092 {
2093 	/* Create mandatory match definer */
2094 	mt->definer = hws_definer_alloc(ctx,
2095 					mt->fc,
2096 					mt->fc_sz,
2097 					mt->match_param,
2098 					match_layout,
2099 					true);
2100 	if (!mt->definer) {
2101 		mlx5hws_err(ctx, "Failed to create match definer\n");
2102 		return -EINVAL;
2103 	}
2104 
2105 	return 0;
2106 }
2107 
2108 static void
hws_definer_mt_match_uninit(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2109 hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
2110 			    struct mlx5hws_match_template *mt)
2111 {
2112 	mlx5hws_definer_free(ctx, mt->definer);
2113 }
2114 
mlx5hws_definer_mt_init(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2115 int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
2116 			    struct mlx5hws_match_template *mt)
2117 {
2118 	struct mlx5hws_definer match_layout = {0};
2119 	int ret;
2120 
2121 	ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
2122 	if (ret) {
2123 		mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
2124 		return ret;
2125 	}
2126 
2127 	/* Calculate definers needed for exact match */
2128 	ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
2129 	if (ret) {
2130 		mlx5hws_err(ctx, "Failed to init match definers\n");
2131 		goto free_fc;
2132 	}
2133 
2134 	return 0;
2135 
2136 free_fc:
2137 	kfree(mt->fc);
2138 	return ret;
2139 }
2140 
mlx5hws_definer_mt_uninit(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2141 void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
2142 			       struct mlx5hws_match_template *mt)
2143 {
2144 	hws_definer_mt_match_uninit(ctx, mt);
2145 	kfree(mt->fc);
2146 }
2147