1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5
6 /* Pattern tunnel Layer bits. */
7 #define MLX5_FLOW_LAYER_VXLAN BIT(12)
8 #define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13)
9 #define MLX5_FLOW_LAYER_GRE BIT(14)
10 #define MLX5_FLOW_LAYER_MPLS BIT(15)
11
12 /* Pattern tunnel Layer bits (continued). */
13 #define MLX5_FLOW_LAYER_IPIP BIT(23)
14 #define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
15 #define MLX5_FLOW_LAYER_NVGRE BIT(25)
16 #define MLX5_FLOW_LAYER_GENEVE BIT(26)
17
18 #define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
19
20 /* Tunnel Masks. */
21 #define MLX5_FLOW_LAYER_TUNNEL \
22 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
23 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
24 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
25 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
26 MLX5_FLOW_ITEM_FLEX_TUNNEL)
27
28 #define GTP_PDU_SC 0x85
29 #define BAD_PORT 0xBAD
30 #define ETH_TYPE_IPV4_VXLAN 0x0800
31 #define ETH_TYPE_IPV6_VXLAN 0x86DD
32 #define UDP_GTPU_PORT 2152
33 #define UDP_PORT_MPLS 6635
34 #define UDP_GENEVE_PORT 6081
35 #define UDP_ROCEV2_PORT 4791
36 #define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
37
38 #define STE_NO_VLAN 0x0
39 #define STE_SVLAN 0x1
40 #define STE_CVLAN 0x2
41 #define STE_NO_L3 0x0
42 #define STE_IPV4 0x1
43 #define STE_IPV6 0x2
44 #define STE_NO_L4 0x0
45 #define STE_TCP 0x1
46 #define STE_UDP 0x2
47 #define STE_ICMP 0x3
48 #define STE_ESP 0x3
49
50 #define IPV4 0x4
51 #define IPV6 0x6
52
53 /* Setter function based on bit offset and mask, for 32bit DW */
54 #define _HWS_SET32(p, v, byte_off, bit_off, mask) \
55 do { \
56 u32 _v = v; \
57 *((__be32 *)(p) + ((byte_off) / 4)) = \
58 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
59 ((byte_off) / 4))) & \
60 (~((mask) << (bit_off)))) | \
61 (((_v) & (mask)) << \
62 (bit_off))); \
63 } while (0)
64
65 /* Setter function based on bit offset and mask, for unaligned 32bit DW */
66 #define HWS_SET32(p, v, byte_off, bit_off, mask) \
67 do { \
68 if (unlikely((bit_off) < 0)) { \
69 u32 _bit_off = -1 * (bit_off); \
70 u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
71 _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
72 _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
73 (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
74 } else { \
75 _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
76 } \
77 } while (0)
78
79 /* Getter for up to aligned 32bit DW */
80 #define HWS_GET32(p, byte_off, bit_off, mask) \
81 ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
82
83 #define HWS_CALC_FNAME(field, inner) \
84 ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
85 MLX5HWS_DEFINER_FNAME_##field##_O)
86
87 #define HWS_GET_MATCH_PARAM(match_param, hdr) \
88 MLX5_GET(fte_match_param, match_param, hdr)
89
90 #define HWS_IS_FLD_SET(match_param, hdr) \
91 (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
92
93 #define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
94 BUILD_BUG_ON((sz_in_bits) % 32); \
95 u32 sz = sz_in_bits; \
96 u32 res = 0; \
97 u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
98 while (!res && sz >= 32) { \
99 res = *((match_param) + (dw_off++)); \
100 sz -= 32; \
101 } \
102 res; \
103 })
104
105 #define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
106 (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
107 !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
108
109 #define HWS_GET64_MATCH_PARAM(match_param, hdr) \
110 MLX5_GET64(fte_match_param, match_param, hdr)
111
112 #define HWS_IS_FLD64_SET(match_param, hdr) \
113 (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
114
115 #define HWS_CALC_HDR_SRC(fc, s_hdr) \
116 do { \
117 (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
118 (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
119 (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
120 } while (0)
121
122 #define HWS_CALC_HDR_DST(fc, d_hdr) \
123 do { \
124 (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
125 (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
126 (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
127 } while (0)
128
129 #define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
130 do { \
131 HWS_CALC_HDR_SRC(fc, s_hdr); \
132 HWS_CALC_HDR_DST(fc, d_hdr); \
133 (fc)->tag_set = &hws_definer_generic_set; \
134 } while (0)
135
136 #define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
137 do { \
138 if (HWS_IS_FLD_SET(match_param, s_hdr)) \
139 HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
140 } while (0)
141
142 struct mlx5hws_definer_sel_ctrl {
143 u8 allowed_full_dw; /* Full DW selectors cover all offsets */
144 u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */
145 u8 allowed_bytes; /* Bytes selectors, up to offset 255 */
146 u8 used_full_dw;
147 u8 used_lim_dw;
148 u8 used_bytes;
149 u8 full_dw_selector[DW_SELECTORS];
150 u8 lim_dw_selector[DW_SELECTORS_LIMITED];
151 u8 byte_selector[BYTE_SELECTORS];
152 };
153
154 struct mlx5hws_definer_conv_data {
155 struct mlx5hws_context *ctx;
156 struct mlx5hws_definer_fc *fc;
157 /* enum mlx5hws_definer_match_flag */
158 u32 match_flags;
159 };
160
161 #define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name
162
163 static const char * const hws_definer_fname_to_str[] = {
164 HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O),
165 HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I),
166 HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O),
167 HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I),
168 HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O),
169 HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I),
170 HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O),
171 HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I),
172 HWS_DEFINER_ENTRY(ETH_TYPE_O),
173 HWS_DEFINER_ENTRY(ETH_TYPE_I),
174 HWS_DEFINER_ENTRY(ETH_L3_TYPE_O),
175 HWS_DEFINER_ENTRY(ETH_L3_TYPE_I),
176 HWS_DEFINER_ENTRY(VLAN_TYPE_O),
177 HWS_DEFINER_ENTRY(VLAN_TYPE_I),
178 HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O),
179 HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I),
180 HWS_DEFINER_ENTRY(VLAN_CFI_O),
181 HWS_DEFINER_ENTRY(VLAN_CFI_I),
182 HWS_DEFINER_ENTRY(VLAN_ID_O),
183 HWS_DEFINER_ENTRY(VLAN_ID_I),
184 HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O),
185 HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I),
186 HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O),
187 HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I),
188 HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O),
189 HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I),
190 HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O),
191 HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I),
192 HWS_DEFINER_ENTRY(IPV4_IHL_O),
193 HWS_DEFINER_ENTRY(IPV4_IHL_I),
194 HWS_DEFINER_ENTRY(IP_DSCP_O),
195 HWS_DEFINER_ENTRY(IP_DSCP_I),
196 HWS_DEFINER_ENTRY(IP_ECN_O),
197 HWS_DEFINER_ENTRY(IP_ECN_I),
198 HWS_DEFINER_ENTRY(IP_TTL_O),
199 HWS_DEFINER_ENTRY(IP_TTL_I),
200 HWS_DEFINER_ENTRY(IPV4_DST_O),
201 HWS_DEFINER_ENTRY(IPV4_DST_I),
202 HWS_DEFINER_ENTRY(IPV4_SRC_O),
203 HWS_DEFINER_ENTRY(IPV4_SRC_I),
204 HWS_DEFINER_ENTRY(IP_VERSION_O),
205 HWS_DEFINER_ENTRY(IP_VERSION_I),
206 HWS_DEFINER_ENTRY(IP_FRAG_O),
207 HWS_DEFINER_ENTRY(IP_FRAG_I),
208 HWS_DEFINER_ENTRY(IP_LEN_O),
209 HWS_DEFINER_ENTRY(IP_LEN_I),
210 HWS_DEFINER_ENTRY(IP_TOS_O),
211 HWS_DEFINER_ENTRY(IP_TOS_I),
212 HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O),
213 HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I),
214 HWS_DEFINER_ENTRY(IPV6_DST_127_96_O),
215 HWS_DEFINER_ENTRY(IPV6_DST_95_64_O),
216 HWS_DEFINER_ENTRY(IPV6_DST_63_32_O),
217 HWS_DEFINER_ENTRY(IPV6_DST_31_0_O),
218 HWS_DEFINER_ENTRY(IPV6_DST_127_96_I),
219 HWS_DEFINER_ENTRY(IPV6_DST_95_64_I),
220 HWS_DEFINER_ENTRY(IPV6_DST_63_32_I),
221 HWS_DEFINER_ENTRY(IPV6_DST_31_0_I),
222 HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O),
223 HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O),
224 HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O),
225 HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O),
226 HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I),
227 HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I),
228 HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I),
229 HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I),
230 HWS_DEFINER_ENTRY(IP_PROTOCOL_O),
231 HWS_DEFINER_ENTRY(IP_PROTOCOL_I),
232 HWS_DEFINER_ENTRY(L4_SPORT_O),
233 HWS_DEFINER_ENTRY(L4_SPORT_I),
234 HWS_DEFINER_ENTRY(L4_DPORT_O),
235 HWS_DEFINER_ENTRY(L4_DPORT_I),
236 HWS_DEFINER_ENTRY(TCP_FLAGS_I),
237 HWS_DEFINER_ENTRY(TCP_FLAGS_O),
238 HWS_DEFINER_ENTRY(TCP_SEQ_NUM),
239 HWS_DEFINER_ENTRY(TCP_ACK_NUM),
240 HWS_DEFINER_ENTRY(GTP_TEID),
241 HWS_DEFINER_ENTRY(GTP_MSG_TYPE),
242 HWS_DEFINER_ENTRY(GTP_EXT_FLAG),
243 HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR),
244 HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU),
245 HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI),
246 HWS_DEFINER_ENTRY(GTPU_DW0),
247 HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0),
248 HWS_DEFINER_ENTRY(GTPU_DW2),
249 HWS_DEFINER_ENTRY(FLEX_PARSER_0),
250 HWS_DEFINER_ENTRY(FLEX_PARSER_1),
251 HWS_DEFINER_ENTRY(FLEX_PARSER_2),
252 HWS_DEFINER_ENTRY(FLEX_PARSER_3),
253 HWS_DEFINER_ENTRY(FLEX_PARSER_4),
254 HWS_DEFINER_ENTRY(FLEX_PARSER_5),
255 HWS_DEFINER_ENTRY(FLEX_PARSER_6),
256 HWS_DEFINER_ENTRY(FLEX_PARSER_7),
257 HWS_DEFINER_ENTRY(VPORT_REG_C_0),
258 HWS_DEFINER_ENTRY(VXLAN_FLAGS),
259 HWS_DEFINER_ENTRY(VXLAN_VNI),
260 HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS),
261 HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0),
262 HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO),
263 HWS_DEFINER_ENTRY(VXLAN_GPE_VNI),
264 HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1),
265 HWS_DEFINER_ENTRY(GENEVE_OPT_LEN),
266 HWS_DEFINER_ENTRY(GENEVE_OAM),
267 HWS_DEFINER_ENTRY(GENEVE_PROTO),
268 HWS_DEFINER_ENTRY(GENEVE_VNI),
269 HWS_DEFINER_ENTRY(SOURCE_QP),
270 HWS_DEFINER_ENTRY(SOURCE_GVMI),
271 HWS_DEFINER_ENTRY(REG_0),
272 HWS_DEFINER_ENTRY(REG_1),
273 HWS_DEFINER_ENTRY(REG_2),
274 HWS_DEFINER_ENTRY(REG_3),
275 HWS_DEFINER_ENTRY(REG_4),
276 HWS_DEFINER_ENTRY(REG_5),
277 HWS_DEFINER_ENTRY(REG_6),
278 HWS_DEFINER_ENTRY(REG_7),
279 HWS_DEFINER_ENTRY(REG_8),
280 HWS_DEFINER_ENTRY(REG_9),
281 HWS_DEFINER_ENTRY(REG_10),
282 HWS_DEFINER_ENTRY(REG_11),
283 HWS_DEFINER_ENTRY(REG_A),
284 HWS_DEFINER_ENTRY(REG_B),
285 HWS_DEFINER_ENTRY(GRE_KEY_PRESENT),
286 HWS_DEFINER_ENTRY(GRE_C),
287 HWS_DEFINER_ENTRY(GRE_K),
288 HWS_DEFINER_ENTRY(GRE_S),
289 HWS_DEFINER_ENTRY(GRE_PROTOCOL),
290 HWS_DEFINER_ENTRY(GRE_OPT_KEY),
291 HWS_DEFINER_ENTRY(GRE_OPT_SEQ),
292 HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM),
293 HWS_DEFINER_ENTRY(INTEGRITY_O),
294 HWS_DEFINER_ENTRY(INTEGRITY_I),
295 HWS_DEFINER_ENTRY(ICMP_DW1),
296 HWS_DEFINER_ENTRY(ICMP_DW2),
297 HWS_DEFINER_ENTRY(ICMP_DW3),
298 HWS_DEFINER_ENTRY(IPSEC_SPI),
299 HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER),
300 HWS_DEFINER_ENTRY(IPSEC_SYNDROME),
301 HWS_DEFINER_ENTRY(MPLS0_O),
302 HWS_DEFINER_ENTRY(MPLS1_O),
303 HWS_DEFINER_ENTRY(MPLS2_O),
304 HWS_DEFINER_ENTRY(MPLS3_O),
305 HWS_DEFINER_ENTRY(MPLS4_O),
306 HWS_DEFINER_ENTRY(MPLS0_I),
307 HWS_DEFINER_ENTRY(MPLS1_I),
308 HWS_DEFINER_ENTRY(MPLS2_I),
309 HWS_DEFINER_ENTRY(MPLS3_I),
310 HWS_DEFINER_ENTRY(MPLS4_I),
311 HWS_DEFINER_ENTRY(FLEX_PARSER0_OK),
312 HWS_DEFINER_ENTRY(FLEX_PARSER1_OK),
313 HWS_DEFINER_ENTRY(FLEX_PARSER2_OK),
314 HWS_DEFINER_ENTRY(FLEX_PARSER3_OK),
315 HWS_DEFINER_ENTRY(FLEX_PARSER4_OK),
316 HWS_DEFINER_ENTRY(FLEX_PARSER5_OK),
317 HWS_DEFINER_ENTRY(FLEX_PARSER6_OK),
318 HWS_DEFINER_ENTRY(FLEX_PARSER7_OK),
319 HWS_DEFINER_ENTRY(OKS2_MPLS0_O),
320 HWS_DEFINER_ENTRY(OKS2_MPLS1_O),
321 HWS_DEFINER_ENTRY(OKS2_MPLS2_O),
322 HWS_DEFINER_ENTRY(OKS2_MPLS3_O),
323 HWS_DEFINER_ENTRY(OKS2_MPLS4_O),
324 HWS_DEFINER_ENTRY(OKS2_MPLS0_I),
325 HWS_DEFINER_ENTRY(OKS2_MPLS1_I),
326 HWS_DEFINER_ENTRY(OKS2_MPLS2_I),
327 HWS_DEFINER_ENTRY(OKS2_MPLS3_I),
328 HWS_DEFINER_ENTRY(OKS2_MPLS4_I),
329 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0),
330 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1),
331 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2),
332 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3),
333 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4),
334 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5),
335 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6),
336 HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7),
337 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0),
338 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1),
339 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2),
340 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3),
341 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4),
342 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5),
343 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6),
344 HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7),
345 HWS_DEFINER_ENTRY(IB_L4_OPCODE),
346 HWS_DEFINER_ENTRY(IB_L4_QPN),
347 HWS_DEFINER_ENTRY(IB_L4_A),
348 HWS_DEFINER_ENTRY(RANDOM_NUM),
349 HWS_DEFINER_ENTRY(PTYPE_L2_O),
350 HWS_DEFINER_ENTRY(PTYPE_L2_I),
351 HWS_DEFINER_ENTRY(PTYPE_L3_O),
352 HWS_DEFINER_ENTRY(PTYPE_L3_I),
353 HWS_DEFINER_ENTRY(PTYPE_L4_O),
354 HWS_DEFINER_ENTRY(PTYPE_L4_I),
355 HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O),
356 HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I),
357 HWS_DEFINER_ENTRY(PTYPE_FRAG_O),
358 HWS_DEFINER_ENTRY(PTYPE_FRAG_I),
359 HWS_DEFINER_ENTRY(TNL_HDR_0),
360 HWS_DEFINER_ENTRY(TNL_HDR_1),
361 HWS_DEFINER_ENTRY(TNL_HDR_2),
362 HWS_DEFINER_ENTRY(TNL_HDR_3),
363 [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN",
364 };
365
mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname)366 const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname)
367 {
368 if (fname > MLX5HWS_DEFINER_FNAME_MAX)
369 fname = MLX5HWS_DEFINER_FNAME_MAX;
370 return hws_definer_fname_to_str[fname];
371 }
372
373 static void
hws_definer_ones_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)374 hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
375 void *match_param,
376 u8 *tag)
377 {
378 HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
379 }
380
381 static void
hws_definer_generic_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)382 hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
383 void *match_param,
384 u8 *tag)
385 {
386 /* Can be optimized */
387 u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
388
389 HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
390 }
391
392 static void
hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)393 hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
394 void *match_param,
395 u8 *tag)
396 {
397 if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
398 HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
399 else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
400 HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
401 else
402 HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
403 }
404
405 static void
hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)406 hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
407 void *match_param,
408 u8 *tag)
409 {
410 if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
411 HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
412 else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
413 HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
414 else
415 HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
416 }
417
418 static void
hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag,bool inner)419 hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
420 void *match_param,
421 u8 *tag,
422 bool inner)
423 {
424 u32 second_cvlan_tag = inner ?
425 HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
426 HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
427 u32 second_svlan_tag = inner ?
428 HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
429 HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
430
431 if (second_cvlan_tag)
432 HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
433 else if (second_svlan_tag)
434 HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
435 else
436 HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
437 }
438
439 static void
hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)440 hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
441 void *match_param,
442 u8 *tag)
443 {
444 hws_definer_second_vlan_type_set(fc, match_param, tag, true);
445 }
446
447 static void
hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)448 hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
449 void *match_param,
450 u8 *tag)
451 {
452 hws_definer_second_vlan_type_set(fc, match_param, tag, false);
453 }
454
hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)455 static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
456 void *match_param,
457 u8 *tag)
458 {
459 u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
460 u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
461 u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
462 (code << __mlx5_dw_bit_off(header_icmp, code));
463
464 HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
465 }
466
467 static void
hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)468 hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
469 void *match_param,
470 u8 *tag)
471 {
472 u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
473 u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
474 u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
475 (code << __mlx5_dw_bit_off(header_icmp, code));
476
477 HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
478 }
479
480 static void
hws_definer_l3_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)481 hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
482 void *match_param,
483 u8 *tag)
484 {
485 u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
486
487 if (val == IPV4)
488 HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
489 else if (val == IPV6)
490 HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
491 else
492 HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
493 }
494
495 static void
hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag,struct mlx5hws_context * peer_ctx)496 hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
497 void *match_param,
498 u8 *tag,
499 struct mlx5hws_context *peer_ctx)
500 {
501 u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
502 u16 vport_gvmi = 0;
503 int ret;
504
505 ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
506 if (ret) {
507 HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
508 mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
509 return;
510 }
511
512 if (vport_gvmi)
513 HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
514 }
515
516 static void
hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)517 hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
518 void *match_param,
519 u8 *tag)
520 __must_hold(&fc->ctx->ctrl_lock)
521 {
522 int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
523 struct mlx5hws_context *peer_ctx;
524
525 if (id == fc->ctx->caps->vhca_id)
526 peer_ctx = fc->ctx;
527 else
528 peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
529
530 if (!peer_ctx) {
531 HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
532 mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
533 return;
534 }
535
536 hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
537 }
538
539 static void
hws_definer_set_source_gvmi(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)540 hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
541 void *match_param,
542 u8 *tag)
543 {
544 hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
545 }
546
547 static struct mlx5hws_definer_fc *
hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data * cd,u8 parser_id)548 hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
549 u8 parser_id)
550 {
551 struct mlx5hws_definer_fc *fc;
552
553 switch (parser_id) {
554 case 0:
555 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
556 HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
557 fc->tag_set = &hws_definer_generic_set;
558 break;
559 case 1:
560 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
561 HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
562 fc->tag_set = &hws_definer_generic_set;
563 break;
564 case 2:
565 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
566 HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
567 fc->tag_set = &hws_definer_generic_set;
568 break;
569 case 3:
570 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
571 HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
572 fc->tag_set = &hws_definer_generic_set;
573 break;
574 case 4:
575 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
576 HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
577 fc->tag_set = &hws_definer_generic_set;
578 break;
579 case 5:
580 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
581 HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
582 fc->tag_set = &hws_definer_generic_set;
583 break;
584 case 6:
585 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
586 HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
587 fc->tag_set = &hws_definer_generic_set;
588 break;
589 case 7:
590 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
591 HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
592 fc->tag_set = &hws_definer_generic_set;
593 break;
594 default:
595 mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
596 return NULL;
597 }
598
599 return fc;
600 }
601
602 static struct mlx5hws_definer_fc *
hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data * cd,u8 parser_id)603 hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
604 u8 parser_id)
605 {
606 struct mlx5hws_definer_fc *fc;
607
608 switch (parser_id) {
609 case 0:
610 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
611 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
612 fc->tag_set = &hws_definer_generic_set;
613 break;
614 case 1:
615 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
616 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
617 fc->tag_set = &hws_definer_generic_set;
618 break;
619 case 2:
620 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
621 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
622 fc->tag_set = &hws_definer_generic_set;
623 break;
624 case 3:
625 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
626 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
627 fc->tag_set = &hws_definer_generic_set;
628 break;
629 case 4:
630 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
631 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
632 fc->tag_set = &hws_definer_generic_set;
633 break;
634 case 5:
635 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
636 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
637 fc->tag_set = &hws_definer_generic_set;
638 break;
639 case 6:
640 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
641 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
642 fc->tag_set = &hws_definer_generic_set;
643 break;
644 case 7:
645 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
646 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
647 fc->tag_set = &hws_definer_generic_set;
648 break;
649 default:
650 mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
651 return NULL;
652 }
653
654 return fc;
655 }
656
657 static struct mlx5hws_definer_fc *
hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data * cd,bool * parser_is_used,u32 id,u32 value)658 hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
659 bool *parser_is_used,
660 u32 id,
661 u32 value)
662 {
663 if (id || value) {
664 if (id >= HWS_NUM_OF_FLEX_PARSERS) {
665 mlx5hws_err(cd->ctx, "Unsupported parser id\n");
666 return NULL;
667 }
668
669 if (parser_is_used[id]) {
670 mlx5hws_err(cd->ctx, "Parser id have been used\n");
671 return NULL;
672 }
673 }
674
675 parser_is_used[id] = true;
676
677 return hws_definer_flex_parser_handler(cd, id);
678 }
679
680 static int
hws_definer_check_match_flags(struct mlx5hws_definer_conv_data * cd)681 hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
682 {
683 u32 flags;
684
685 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
686 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
687 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
688 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
689 MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
690 MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
691 if (flags & (flags - 1))
692 goto err_conflict;
693
694 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
695 MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
696
697 if (flags & (flags - 1))
698 goto err_conflict;
699
700 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
701 MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
702 if (flags & (flags - 1))
703 goto err_conflict;
704
705 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
706 MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
707 MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
708 MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
709 if (flags & (flags - 1))
710 goto err_conflict;
711
712 return 0;
713
714 err_conflict:
715 mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
716 cd->match_flags);
717 return -EINVAL;
718 }
719
720 static int
hws_definer_conv_outer(struct mlx5hws_definer_conv_data * cd,u32 * match_param)721 hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
722 u32 *match_param)
723 {
724 bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
725 struct mlx5hws_definer_fc *fc = cd->fc;
726 struct mlx5hws_definer_fc *curr_fc;
727 u32 *s_ipv6, *d_ipv6;
728
729 if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
730 HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type_ext, 0x4) ||
731 HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c6, 0xa) ||
732 HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_d4, 0x4)) {
733 mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
734 return -EINVAL;
735 }
736
737 ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
738 outer_headers.src_ipv4_src_ipv6,
739 0x80) ||
740 HWS_IS_FLD_SET_SZ(match_param,
741 outer_headers.dst_ipv4_dst_ipv6, 0x80);
742 ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) ||
743 HWS_IS_FLD_SET(match_param, outer_headers.ethertype);
744
745 if (ip_addr_set && !ip_ver_set) {
746 mlx5hws_err(cd->ctx,
747 "Unsupported match on IP address without version or ethertype\n");
748 return -EINVAL;
749 }
750
751 /* L2 Check ethertype */
752 HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
753 outer_headers.ethertype,
754 eth_l2_outer.l3_ethertype);
755 /* L2 Check SMAC 47_16 */
756 HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
757 outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
758 /* L2 Check SMAC 15_0 */
759 HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
760 outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
761 /* L2 Check DMAC 47_16 */
762 HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
763 outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
764 /* L2 Check DMAC 15_0 */
765 HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
766 outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
767
768 /* L2 VLAN */
769 HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
770 outer_headers.first_prio, eth_l2_outer.first_priority);
771 HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
772 outer_headers.first_cfi, eth_l2_outer.first_cfi);
773 HWS_SET_HDR(fc, match_param, VLAN_ID_O,
774 outer_headers.first_vid, eth_l2_outer.first_vlan_id);
775
776 /* L2 CVLAN and SVLAN */
777 if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
778 HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
779 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
780 HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
781 curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
782 curr_fc->tag_mask_set = &hws_definer_ones_set;
783 }
784
785 /* L3 Check IP header */
786 HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
787 outer_headers.ip_protocol,
788 eth_l3_outer.protocol_next_header);
789 HWS_SET_HDR(fc, match_param, IP_VERSION_O,
790 outer_headers.ip_version,
791 eth_l3_outer.ip_version);
792 HWS_SET_HDR(fc, match_param, IP_TTL_O,
793 outer_headers.ttl_hoplimit,
794 eth_l3_outer.time_to_live_hop_limit);
795
796 /* L3 Check IPv4/IPv6 addresses */
797 s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
798 outer_headers.src_ipv4_src_ipv6.ipv6_layout);
799 d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
800 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
801
802 /* Assume IPv6 is used if ipv6 bits are set */
803 is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
804 d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
805
806 /* IHL is an IPv4-specific field. */
807 if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) {
808 mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
809 return -EINVAL;
810 }
811
812 if (is_ipv6) {
813 /* Handle IPv6 source address */
814 HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
815 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
816 ipv6_src_outer.ipv6_address_127_96);
817 HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
818 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
819 ipv6_src_outer.ipv6_address_95_64);
820 HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
821 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
822 ipv6_src_outer.ipv6_address_63_32);
823 HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
824 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
825 ipv6_src_outer.ipv6_address_31_0);
826 /* Handle IPv6 destination address */
827 HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
828 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
829 ipv6_dst_outer.ipv6_address_127_96);
830 HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
831 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
832 ipv6_dst_outer.ipv6_address_95_64);
833 HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
834 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
835 ipv6_dst_outer.ipv6_address_63_32);
836 HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
837 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
838 ipv6_dst_outer.ipv6_address_31_0);
839 } else {
840 /* Handle IPv4 source address */
841 HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
842 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
843 ipv4_src_dest_outer.source_address);
844 /* Handle IPv4 destination address */
845 HWS_SET_HDR(fc, match_param, IPV4_DST_O,
846 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
847 ipv4_src_dest_outer.destination_address);
848 }
849
850 /* L4 Handle TCP/UDP */
851 HWS_SET_HDR(fc, match_param, L4_SPORT_O,
852 outer_headers.tcp_sport, eth_l4_outer.source_port);
853 HWS_SET_HDR(fc, match_param, L4_DPORT_O,
854 outer_headers.tcp_dport, eth_l4_outer.destination_port);
855 HWS_SET_HDR(fc, match_param, L4_SPORT_O,
856 outer_headers.udp_sport, eth_l4_outer.source_port);
857 HWS_SET_HDR(fc, match_param, L4_DPORT_O,
858 outer_headers.udp_dport, eth_l4_outer.destination_port);
859 HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
860 outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
861
862 /* L3 Handle DSCP, ECN and IHL */
863 HWS_SET_HDR(fc, match_param, IP_DSCP_O,
864 outer_headers.ip_dscp, eth_l3_outer.dscp);
865 HWS_SET_HDR(fc, match_param, IP_ECN_O,
866 outer_headers.ip_ecn, eth_l3_outer.ecn);
867 HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
868 outer_headers.ipv4_ihl, eth_l3_outer.ihl);
869
870 /* Set IP fragmented bit */
871 if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
872 smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
873 HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
874 dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
875 HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
876 if (smac_set == dmac_set) {
877 HWS_SET_HDR(fc, match_param, IP_FRAG_O,
878 outer_headers.frag, eth_l4_outer.ip_fragmented);
879 } else {
880 HWS_SET_HDR(fc, match_param, IP_FRAG_O,
881 outer_headers.frag, eth_l2_src_outer.ip_fragmented);
882 }
883 }
884
885 /* L3_type set */
886 if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
887 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
888 HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
889 curr_fc->tag_set = &hws_definer_l3_type_set;
890 curr_fc->tag_mask_set = &hws_definer_ones_set;
891 HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
892 }
893
894 return 0;
895 }
896
897 static int
hws_definer_conv_inner(struct mlx5hws_definer_conv_data * cd,u32 * match_param)898 hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
899 u32 *match_param)
900 {
901 bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
902 struct mlx5hws_definer_fc *fc = cd->fc;
903 struct mlx5hws_definer_fc *curr_fc;
904 u32 *s_ipv6, *d_ipv6;
905
906 if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
907 HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type_ext, 0x4) ||
908 HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c6, 0xa) ||
909 HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_d4, 0x4)) {
910 mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
911 return -EINVAL;
912 }
913
914 ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
915 inner_headers.src_ipv4_src_ipv6,
916 0x80) ||
917 HWS_IS_FLD_SET_SZ(match_param,
918 inner_headers.dst_ipv4_dst_ipv6, 0x80);
919 ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) ||
920 HWS_IS_FLD_SET(match_param, inner_headers.ethertype);
921
922 if (ip_addr_set && !ip_ver_set) {
923 mlx5hws_err(cd->ctx,
924 "Unsupported match on IP address without version or ethertype\n");
925 return -EINVAL;
926 }
927
928 /* L2 Check ethertype */
929 HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
930 inner_headers.ethertype,
931 eth_l2_inner.l3_ethertype);
932 /* L2 Check SMAC 47_16 */
933 HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
934 inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
935 /* L2 Check SMAC 15_0 */
936 HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
937 inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
938 /* L2 Check DMAC 47_16 */
939 HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
940 inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
941 /* L2 Check DMAC 15_0 */
942 HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
943 inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
944
945 /* L2 VLAN */
946 HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
947 inner_headers.first_prio, eth_l2_inner.first_priority);
948 HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
949 inner_headers.first_cfi, eth_l2_inner.first_cfi);
950 HWS_SET_HDR(fc, match_param, VLAN_ID_I,
951 inner_headers.first_vid, eth_l2_inner.first_vlan_id);
952
953 /* L2 CVLAN and SVLAN */
954 if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
955 HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
956 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
957 HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
958 curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
959 curr_fc->tag_mask_set = &hws_definer_ones_set;
960 }
961 /* L3 Check IP header */
962 HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
963 inner_headers.ip_protocol,
964 eth_l3_inner.protocol_next_header);
965 HWS_SET_HDR(fc, match_param, IP_VERSION_I,
966 inner_headers.ip_version,
967 eth_l3_inner.ip_version);
968 HWS_SET_HDR(fc, match_param, IP_TTL_I,
969 inner_headers.ttl_hoplimit,
970 eth_l3_inner.time_to_live_hop_limit);
971
972 /* L3 Check IPv4/IPv6 addresses */
973 s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
974 inner_headers.src_ipv4_src_ipv6.ipv6_layout);
975 d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
976 inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
977
978 /* Assume IPv6 is used if ipv6 bits are set */
979 is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
980 d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
981
982 /* IHL is an IPv4-specific field. */
983 if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) {
984 mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
985 return -EINVAL;
986 }
987
988 if (is_ipv6) {
989 /* Handle IPv6 source address */
990 HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
991 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
992 ipv6_src_inner.ipv6_address_127_96);
993 HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
994 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
995 ipv6_src_inner.ipv6_address_95_64);
996 HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
997 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
998 ipv6_src_inner.ipv6_address_63_32);
999 HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
1000 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
1001 ipv6_src_inner.ipv6_address_31_0);
1002 /* Handle IPv6 destination address */
1003 HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
1004 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
1005 ipv6_dst_inner.ipv6_address_127_96);
1006 HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
1007 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
1008 ipv6_dst_inner.ipv6_address_95_64);
1009 HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
1010 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
1011 ipv6_dst_inner.ipv6_address_63_32);
1012 HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
1013 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
1014 ipv6_dst_inner.ipv6_address_31_0);
1015 } else {
1016 /* Handle IPv4 source address */
1017 HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
1018 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
1019 ipv4_src_dest_inner.source_address);
1020 /* Handle IPv4 destination address */
1021 HWS_SET_HDR(fc, match_param, IPV4_DST_I,
1022 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
1023 ipv4_src_dest_inner.destination_address);
1024 }
1025
1026 /* L4 Handle TCP/UDP */
1027 HWS_SET_HDR(fc, match_param, L4_SPORT_I,
1028 inner_headers.tcp_sport, eth_l4_inner.source_port);
1029 HWS_SET_HDR(fc, match_param, L4_DPORT_I,
1030 inner_headers.tcp_dport, eth_l4_inner.destination_port);
1031 HWS_SET_HDR(fc, match_param, L4_SPORT_I,
1032 inner_headers.udp_sport, eth_l4_inner.source_port);
1033 HWS_SET_HDR(fc, match_param, L4_DPORT_I,
1034 inner_headers.udp_dport, eth_l4_inner.destination_port);
1035 HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
1036 inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
1037
1038 /* L3 Handle DSCP, ECN and IHL */
1039 HWS_SET_HDR(fc, match_param, IP_DSCP_I,
1040 inner_headers.ip_dscp, eth_l3_inner.dscp);
1041 HWS_SET_HDR(fc, match_param, IP_ECN_I,
1042 inner_headers.ip_ecn, eth_l3_inner.ecn);
1043 HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
1044 inner_headers.ipv4_ihl, eth_l3_inner.ihl);
1045
1046 /* Set IP fragmented bit */
1047 if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
1048 if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
1049 HWS_SET_HDR(fc, match_param, IP_FRAG_I,
1050 inner_headers.frag, eth_l2_inner.ip_fragmented);
1051 } else {
1052 smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
1053 HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
1054 dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
1055 HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
1056 if (smac_set == dmac_set) {
1057 HWS_SET_HDR(fc, match_param, IP_FRAG_I,
1058 inner_headers.frag, eth_l4_inner.ip_fragmented);
1059 } else {
1060 HWS_SET_HDR(fc, match_param, IP_FRAG_I,
1061 inner_headers.frag, eth_l2_src_inner.ip_fragmented);
1062 }
1063 }
1064 }
1065
1066 /* L3_type set */
1067 if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
1068 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
1069 HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
1070 curr_fc->tag_set = &hws_definer_l3_type_set;
1071 curr_fc->tag_mask_set = &hws_definer_ones_set;
1072 HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
1073 }
1074
1075 return 0;
1076 }
1077
1078 static int
hws_definer_conv_misc(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1079 hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
1080 u32 *match_param)
1081 {
1082 struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
1083 struct mlx5hws_definer_fc *fc = cd->fc;
1084 struct mlx5hws_definer_fc *curr_fc;
1085
1086 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
1087 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
1088 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
1089 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
1090 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
1091 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
1092 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
1093 HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
1094 HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
1095 HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
1096 HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
1097 HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
1098 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
1099 mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
1100 return -EINVAL;
1101 }
1102
1103 /* Check GRE related fields */
1104 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
1105 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
1106 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
1107 HWS_CALC_HDR(curr_fc,
1108 misc_parameters.gre_c_present,
1109 tunnel_header.tunnel_header_0);
1110 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
1111 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
1112 }
1113
1114 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
1115 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
1116 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
1117 HWS_CALC_HDR(curr_fc,
1118 misc_parameters.gre_k_present,
1119 tunnel_header.tunnel_header_0);
1120 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
1121 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
1122 }
1123
1124 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
1125 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
1126 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
1127 HWS_CALC_HDR(curr_fc,
1128 misc_parameters.gre_s_present,
1129 tunnel_header.tunnel_header_0);
1130 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
1131 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
1132 }
1133
1134 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
1135 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
1136 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
1137 HWS_CALC_HDR(curr_fc,
1138 misc_parameters.gre_protocol,
1139 tunnel_header.tunnel_header_0);
1140 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
1141 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
1142 }
1143
1144 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
1145 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
1146 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
1147 HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
1148 misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
1149 }
1150
1151 /* Check GENEVE related fields */
1152 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
1153 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1154 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
1155 HWS_CALC_HDR(curr_fc,
1156 misc_parameters.geneve_vni,
1157 tunnel_header.tunnel_header_1);
1158 curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
1159 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
1160 }
1161
1162 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
1163 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1164 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
1165 HWS_CALC_HDR(curr_fc,
1166 misc_parameters.geneve_opt_len,
1167 tunnel_header.tunnel_header_0);
1168 curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
1169 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
1170 }
1171
1172 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
1173 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1174 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
1175 HWS_CALC_HDR(curr_fc,
1176 misc_parameters.geneve_protocol_type,
1177 tunnel_header.tunnel_header_0);
1178 curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
1179 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
1180 }
1181
1182 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
1183 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1184 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
1185 HWS_CALC_HDR(curr_fc,
1186 misc_parameters.geneve_oam,
1187 tunnel_header.tunnel_header_0);
1188 curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
1189 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
1190 }
1191
1192 HWS_SET_HDR(fc, match_param, SOURCE_QP,
1193 misc_parameters.source_sqn, source_qp_gvmi.source_qp);
1194 HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
1195 misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
1196 HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
1197 misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
1198
1199 /* L2 Second VLAN */
1200 HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
1201 misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
1202 HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
1203 misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
1204 HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
1205 misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
1206 HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
1207 misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
1208 HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
1209 misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
1210 HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
1211 misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
1212
1213 /* L2 Second CVLAN and SVLAN */
1214 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
1215 HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
1216 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
1217 HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
1218 curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
1219 curr_fc->tag_mask_set = &hws_definer_ones_set;
1220 }
1221
1222 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
1223 HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
1224 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
1225 HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
1226 curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
1227 curr_fc->tag_mask_set = &hws_definer_ones_set;
1228 }
1229
1230 /* VXLAN VNI */
1231 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
1232 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
1233 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
1234 HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
1235 curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
1236 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
1237 }
1238
1239 /* Flex protocol steering ok bits */
1240 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
1241 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1242
1243 if (!caps->flex_parser_ok_bits_supp) {
1244 mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
1245 return -EOPNOTSUPP;
1246 }
1247
1248 curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
1249 cd, caps->flex_parser_id_geneve_tlv_option_0);
1250 if (!curr_fc)
1251 return -EINVAL;
1252
1253 HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
1254 }
1255
1256 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
1257 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
1258 HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
1259 curr_fc->tag_mask_set = &hws_definer_ones_set;
1260 curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
1261 misc_parameters.source_eswitch_owner_vhca_id) ?
1262 &hws_definer_set_source_gvmi_vhca_id :
1263 &hws_definer_set_source_gvmi;
1264 } else {
1265 if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
1266 mlx5hws_err(cd->ctx,
1267 "Unsupported source_eswitch_owner_vhca_id field usage\n");
1268 return -EOPNOTSUPP;
1269 }
1270 }
1271
1272 return 0;
1273 }
1274
1275 static int
hws_definer_conv_misc2(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1276 hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
1277 u32 *match_param)
1278 {
1279 struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
1280 struct mlx5hws_definer_fc *fc = cd->fc;
1281 struct mlx5hws_definer_fc *curr_fc;
1282
1283 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
1284 HWS_IS_FLD_SET_SZ(match_param,
1285 misc_parameters_2.ipsec_next_header, 0x8) ||
1286 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
1287 HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
1288 HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
1289 mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
1290 return -EINVAL;
1291 }
1292
1293 HWS_SET_HDR(fc, match_param, MPLS0_O,
1294 misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
1295 HWS_SET_HDR(fc, match_param, MPLS0_I,
1296 misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
1297 HWS_SET_HDR(fc, match_param, REG_0,
1298 misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
1299 HWS_SET_HDR(fc, match_param, REG_1,
1300 misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
1301 HWS_SET_HDR(fc, match_param, REG_2,
1302 misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
1303 HWS_SET_HDR(fc, match_param, REG_3,
1304 misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
1305 HWS_SET_HDR(fc, match_param, REG_4,
1306 misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
1307 HWS_SET_HDR(fc, match_param, REG_5,
1308 misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
1309 HWS_SET_HDR(fc, match_param, REG_6,
1310 misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
1311 HWS_SET_HDR(fc, match_param, REG_7,
1312 misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
1313 HWS_SET_HDR(fc, match_param, REG_A,
1314 misc_parameters_2.metadata_reg_a, metadata.general_purpose);
1315
1316 if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
1317 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
1318
1319 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
1320 mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
1321 return -EOPNOTSUPP;
1322 }
1323
1324 curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
1325 if (!curr_fc)
1326 return -EINVAL;
1327
1328 HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
1329 }
1330
1331 if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
1332 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
1333
1334 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
1335 mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
1336 return -EOPNOTSUPP;
1337 }
1338
1339 curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
1340 if (!curr_fc)
1341 return -EINVAL;
1342
1343 HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
1344 }
1345
1346 return 0;
1347 }
1348
1349 static int
hws_definer_conv_misc3(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1350 hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
1351 {
1352 struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
1353 struct mlx5hws_definer_fc *fc = cd->fc;
1354 struct mlx5hws_definer_fc *curr_fc;
1355 bool vxlan_gpe_flex_parser_enabled;
1356
1357 /* Check reserved and unsupported fields */
1358 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
1359 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
1360 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
1361 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
1362 mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
1363 return -EINVAL;
1364 }
1365
1366 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
1367 HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
1368 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
1369 HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
1370 misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
1371 HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
1372 misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
1373 }
1374
1375 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
1376 HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
1377 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
1378 HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
1379 misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
1380 HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
1381 misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
1382 }
1383
1384 vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
1385
1386 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
1387 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1388
1389 if (!vxlan_gpe_flex_parser_enabled) {
1390 mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1391 return -EOPNOTSUPP;
1392 }
1393
1394 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
1395 HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
1396 tunnel_header.tunnel_header_1);
1397 curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
1398 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
1399 }
1400
1401 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
1402 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1403
1404 if (!vxlan_gpe_flex_parser_enabled) {
1405 mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1406 return -EOPNOTSUPP;
1407 }
1408
1409 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
1410 HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
1411 tunnel_header.tunnel_header_0);
1412 curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
1413 curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
1414 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
1415 }
1416
1417 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
1418 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1419
1420 if (!vxlan_gpe_flex_parser_enabled) {
1421 mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1422 return -EOPNOTSUPP;
1423 }
1424
1425 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
1426 HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
1427 tunnel_header.tunnel_header_0);
1428 curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
1429 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
1430 }
1431
1432 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
1433 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
1434 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
1435 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
1436
1437 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
1438 mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
1439 return -EOPNOTSUPP;
1440 }
1441
1442 HWS_SET_HDR(fc, match_param, ICMP_DW3,
1443 misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
1444
1445 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
1446 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
1447 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
1448 HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
1449 curr_fc->tag_set = &hws_definer_icmp_dw1_set;
1450 }
1451 }
1452
1453 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
1454 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
1455 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
1456 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
1457
1458 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
1459 mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
1460 return -EOPNOTSUPP;
1461 }
1462
1463 HWS_SET_HDR(fc, match_param, ICMP_DW3,
1464 misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
1465
1466 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
1467 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
1468 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
1469 HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
1470 curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
1471 }
1472 }
1473
1474 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
1475 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1476
1477 curr_fc =
1478 hws_definer_flex_parser_handler(cd,
1479 caps->flex_parser_id_geneve_tlv_option_0);
1480 if (!curr_fc)
1481 return -EINVAL;
1482
1483 HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
1484 }
1485
1486 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
1487 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1488
1489 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
1490 mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
1491 return -EOPNOTSUPP;
1492 }
1493
1494 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
1495 fc->tag_set = &hws_definer_generic_set;
1496 fc->bit_mask = __mlx5_mask(header_gtp, teid);
1497 fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
1498 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
1499 }
1500
1501 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
1502 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1503
1504 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
1505 mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
1506 return -EOPNOTSUPP;
1507 }
1508
1509 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
1510 fc->tag_set = &hws_definer_generic_set;
1511 fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
1512 fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
1513 fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1514 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
1515 }
1516
1517 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
1518 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1519
1520 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
1521 mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
1522 return -EOPNOTSUPP;
1523 }
1524
1525 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
1526 fc->tag_set = &hws_definer_generic_set;
1527 fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
1528 fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
1529 fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1530 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
1531 }
1532
1533 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
1534 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1535
1536 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
1537 mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
1538 return -EOPNOTSUPP;
1539 }
1540
1541 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
1542 curr_fc->tag_set = &hws_definer_generic_set;
1543 curr_fc->bit_mask = -1;
1544 curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
1545 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
1546 }
1547
1548 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
1549 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1550
1551 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1552 mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
1553 return -EOPNOTSUPP;
1554 }
1555
1556 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
1557 curr_fc->tag_set = &hws_definer_generic_set;
1558 curr_fc->bit_mask = -1;
1559 curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1560 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
1561 }
1562
1563 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
1564 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1565
1566 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
1567 mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
1568 return -EOPNOTSUPP;
1569 }
1570
1571 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
1572 curr_fc->tag_set = &hws_definer_generic_set;
1573 curr_fc->bit_mask = -1;
1574 curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1575 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
1576 }
1577
1578 return 0;
1579 }
1580
1581 static int
hws_definer_conv_misc4(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1582 hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
1583 u32 *match_param)
1584 {
1585 bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
1586 struct mlx5hws_definer_fc *fc;
1587 u32 id, value;
1588
1589 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
1590 mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
1591 return -EINVAL;
1592 }
1593
1594 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
1595 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
1596 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1597 if (!fc)
1598 return -EINVAL;
1599
1600 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
1601
1602 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
1603 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
1604 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1605 if (!fc)
1606 return -EINVAL;
1607
1608 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
1609
1610 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
1611 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
1612 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1613 if (!fc)
1614 return -EINVAL;
1615
1616 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
1617
1618 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
1619 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
1620 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1621 if (!fc)
1622 return -EINVAL;
1623
1624 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
1625
1626 return 0;
1627 }
1628
1629 static int
hws_definer_conv_misc5(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1630 hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
1631 u32 *match_param)
1632 {
1633 struct mlx5hws_definer_fc *fc = cd->fc;
1634
1635 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
1636 HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
1637 HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
1638 HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
1639 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
1640 mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
1641 return -EINVAL;
1642 }
1643
1644 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
1645 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
1646 HWS_SET_HDR(fc, match_param, TNL_HDR_0,
1647 misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
1648 }
1649
1650 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
1651 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
1652 HWS_SET_HDR(fc, match_param, TNL_HDR_1,
1653 misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
1654 }
1655
1656 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
1657 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
1658 HWS_SET_HDR(fc, match_param, TNL_HDR_2,
1659 misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
1660 }
1661
1662 HWS_SET_HDR(fc, match_param, TNL_HDR_3,
1663 misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
1664
1665 return 0;
1666 }
1667
hws_definer_get_fc_size(struct mlx5hws_definer_fc * fc)1668 static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
1669 {
1670 u32 fc_sz = 0;
1671 int i;
1672
1673 /* For empty matcher, ZERO_SIZE_PTR is returned */
1674 if (fc == ZERO_SIZE_PTR)
1675 return 0;
1676
1677 for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
1678 if (fc[i].tag_set)
1679 fc_sz++;
1680 return fc_sz;
1681 }
1682
1683 static struct mlx5hws_definer_fc *
hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc * fc)1684 hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
1685 {
1686 struct mlx5hws_definer_fc *compressed_fc = NULL;
1687 u32 definer_size = hws_definer_get_fc_size(fc);
1688 u32 fc_sz = 0;
1689 int i;
1690
1691 compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
1692 if (!compressed_fc)
1693 return NULL;
1694
1695 /* For empty matcher, ZERO_SIZE_PTR is returned */
1696 if (!definer_size)
1697 return compressed_fc;
1698
1699 for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
1700 if (!fc[i].tag_set)
1701 continue;
1702
1703 fc[i].fname = i;
1704 memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
1705 }
1706
1707 return compressed_fc;
1708 }
1709
1710 static void
hws_definer_set_hl(u8 * hl,struct mlx5hws_definer_fc * fc)1711 hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
1712 {
1713 int i;
1714
1715 /* nothing to do for empty matcher */
1716 if (fc == ZERO_SIZE_PTR)
1717 return;
1718
1719 for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
1720 if (!fc[i].tag_set)
1721 continue;
1722
1723 HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
1724 }
1725 }
1726
1727 static struct mlx5hws_definer_fc *
hws_definer_alloc_fc(struct mlx5hws_context * ctx,size_t len)1728 hws_definer_alloc_fc(struct mlx5hws_context *ctx,
1729 size_t len)
1730 {
1731 struct mlx5hws_definer_fc *fc;
1732 int i;
1733
1734 fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
1735 if (!fc)
1736 return NULL;
1737
1738 for (i = 0; i < len; i++)
1739 fc[i].ctx = ctx;
1740
1741 return fc;
1742 }
1743
1744 static int
hws_definer_conv_match_params_to_hl(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,u8 * hl)1745 hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
1746 struct mlx5hws_match_template *mt,
1747 u8 *hl)
1748 {
1749 struct mlx5hws_definer_conv_data cd = {0};
1750 struct mlx5hws_definer_fc *fc;
1751 int ret;
1752
1753 fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
1754 if (!fc)
1755 return -ENOMEM;
1756
1757 cd.fc = fc;
1758 cd.ctx = ctx;
1759
1760 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
1761 mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
1762 ret = -EOPNOTSUPP;
1763 goto err_free_fc;
1764 }
1765
1766 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
1767 ret = hws_definer_conv_outer(&cd, mt->match_param);
1768 if (ret)
1769 goto err_free_fc;
1770 }
1771
1772 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
1773 ret = hws_definer_conv_inner(&cd, mt->match_param);
1774 if (ret)
1775 goto err_free_fc;
1776 }
1777
1778 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
1779 ret = hws_definer_conv_misc(&cd, mt->match_param);
1780 if (ret)
1781 goto err_free_fc;
1782 }
1783
1784 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
1785 ret = hws_definer_conv_misc2(&cd, mt->match_param);
1786 if (ret)
1787 goto err_free_fc;
1788 }
1789
1790 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
1791 ret = hws_definer_conv_misc3(&cd, mt->match_param);
1792 if (ret)
1793 goto err_free_fc;
1794 }
1795
1796 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
1797 ret = hws_definer_conv_misc4(&cd, mt->match_param);
1798 if (ret)
1799 goto err_free_fc;
1800 }
1801
1802 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
1803 ret = hws_definer_conv_misc5(&cd, mt->match_param);
1804 if (ret)
1805 goto err_free_fc;
1806 }
1807
1808 /* Check there is no conflicted fields set together */
1809 ret = hws_definer_check_match_flags(&cd);
1810 if (ret)
1811 goto err_free_fc;
1812
1813 /* Allocate fc array on mt */
1814 mt->fc = hws_definer_alloc_compressed_fc(fc);
1815 if (!mt->fc) {
1816 mlx5hws_err(ctx,
1817 "Convert match params: failed to set field copy to match template\n");
1818 ret = -ENOMEM;
1819 goto err_free_fc;
1820 }
1821 mt->fc_sz = hws_definer_get_fc_size(fc);
1822
1823 /* Fill in headers layout */
1824 hws_definer_set_hl(hl, fc);
1825
1826 kfree(fc);
1827 return 0;
1828
1829 err_free_fc:
1830 kfree(fc);
1831 return ret;
1832 }
1833
1834 struct mlx5hws_definer_fc *
mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context * ctx,u8 match_criteria_enable,u32 * match_param,int * fc_sz)1835 mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
1836 u8 match_criteria_enable,
1837 u32 *match_param,
1838 int *fc_sz)
1839 {
1840 struct mlx5hws_definer_fc *compressed_fc = NULL;
1841 struct mlx5hws_definer_conv_data cd = {0};
1842 struct mlx5hws_definer_fc *fc;
1843 int ret;
1844
1845 fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
1846 if (!fc)
1847 return NULL;
1848
1849 cd.fc = fc;
1850 cd.ctx = ctx;
1851
1852 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
1853 ret = hws_definer_conv_outer(&cd, match_param);
1854 if (ret)
1855 goto err_free_fc;
1856 }
1857
1858 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
1859 ret = hws_definer_conv_inner(&cd, match_param);
1860 if (ret)
1861 goto err_free_fc;
1862 }
1863
1864 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
1865 ret = hws_definer_conv_misc(&cd, match_param);
1866 if (ret)
1867 goto err_free_fc;
1868 }
1869
1870 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
1871 ret = hws_definer_conv_misc2(&cd, match_param);
1872 if (ret)
1873 goto err_free_fc;
1874 }
1875
1876 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
1877 ret = hws_definer_conv_misc3(&cd, match_param);
1878 if (ret)
1879 goto err_free_fc;
1880 }
1881
1882 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
1883 ret = hws_definer_conv_misc4(&cd, match_param);
1884 if (ret)
1885 goto err_free_fc;
1886 }
1887
1888 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
1889 ret = hws_definer_conv_misc5(&cd, match_param);
1890 if (ret)
1891 goto err_free_fc;
1892 }
1893
1894 /* Allocate fc array on mt */
1895 compressed_fc = hws_definer_alloc_compressed_fc(fc);
1896 if (!compressed_fc) {
1897 mlx5hws_err(ctx,
1898 "Convert to compressed fc: failed to set field copy to match template\n");
1899 goto err_free_fc;
1900 }
1901 *fc_sz = hws_definer_get_fc_size(fc);
1902
1903 err_free_fc:
1904 kfree(fc);
1905 return compressed_fc;
1906 }
1907
1908 static int
hws_definer_find_byte_in_tag(struct mlx5hws_definer * definer,u32 hl_byte_off,u32 * tag_byte_off)1909 hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
1910 u32 hl_byte_off,
1911 u32 *tag_byte_off)
1912 {
1913 int i, dw_to_scan;
1914 u8 byte_offset;
1915
1916 /* Avoid accessing unused DW selectors */
1917 dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
1918 DW_SELECTORS : DW_SELECTORS_MATCH;
1919
1920 /* Add offset since each DW covers multiple BYTEs */
1921 byte_offset = hl_byte_off % DW_SIZE;
1922 for (i = 0; i < dw_to_scan; i++) {
1923 if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
1924 *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
1925 return 0;
1926 }
1927 }
1928
1929 /* Add offset to skip DWs in definer */
1930 byte_offset = DW_SIZE * DW_SELECTORS;
1931 /* Iterate in reverse since the code uses bytes from 7 -> 0 */
1932 for (i = BYTE_SELECTORS; i-- > 0 ;) {
1933 if (definer->byte_selector[i] == hl_byte_off) {
1934 *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
1935 return 0;
1936 }
1937 }
1938
1939 return -EINVAL;
1940 }
1941
1942 static int
hws_definer_fc_bind(struct mlx5hws_definer * definer,struct mlx5hws_definer_fc * fc,u32 fc_sz)1943 hws_definer_fc_bind(struct mlx5hws_definer *definer,
1944 struct mlx5hws_definer_fc *fc,
1945 u32 fc_sz)
1946 {
1947 u32 tag_offset = 0;
1948 int ret, byte_diff;
1949 u32 i;
1950
1951 for (i = 0; i < fc_sz; i++) {
1952 /* Map header layout byte offset to byte offset in tag */
1953 ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
1954 if (ret)
1955 return ret;
1956
1957 /* Move setter based on the location in the definer */
1958 byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
1959 fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
1960
1961 /* Update offset in headers layout to offset in tag */
1962 fc->byte_off = tag_offset;
1963 fc++;
1964 }
1965
1966 return 0;
1967 }
1968
1969 static bool
hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl * ctrl,u32 cur_dw,u32 * data)1970 hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
1971 u32 cur_dw,
1972 u32 *data)
1973 {
1974 u8 bytes_set;
1975 int byte_idx;
1976 bool ret;
1977 int i;
1978
1979 /* Reached end, nothing left to do */
1980 if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
1981 return true;
1982
1983 /* No data set, can skip to next DW */
1984 while (!*data) {
1985 cur_dw++;
1986 data++;
1987
1988 /* Reached end, nothing left to do */
1989 if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
1990 return true;
1991 }
1992
1993 /* Used all DW selectors and Byte selectors, no possible solution */
1994 if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
1995 ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
1996 ctrl->allowed_bytes == ctrl->used_bytes)
1997 return false;
1998
1999 /* Try to use limited DW selectors */
2000 if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
2001 ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
2002
2003 ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
2004 if (ret)
2005 return ret;
2006
2007 ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
2008 }
2009
2010 /* Try to use DW selectors */
2011 if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
2012 ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
2013
2014 ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
2015 if (ret)
2016 return ret;
2017
2018 ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
2019 }
2020
2021 /* No byte selector for offset bigger than 255 */
2022 if (cur_dw * DW_SIZE > 255)
2023 return false;
2024
2025 bytes_set = !!(0x000000ff & *data) +
2026 !!(0x0000ff00 & *data) +
2027 !!(0x00ff0000 & *data) +
2028 !!(0xff000000 & *data);
2029
2030 /* Check if there are enough byte selectors left */
2031 if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
2032 return false;
2033
2034 /* Try to use Byte selectors */
2035 for (i = 0; i < DW_SIZE; i++)
2036 if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
2037 /* Use byte selectors high to low */
2038 byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
2039 ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
2040 ctrl->used_bytes++;
2041 }
2042
2043 ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
2044 if (ret)
2045 return ret;
2046
2047 for (i = 0; i < DW_SIZE; i++)
2048 if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
2049 ctrl->used_bytes--;
2050 byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
2051 ctrl->byte_selector[byte_idx] = 0;
2052 }
2053
2054 return false;
2055 }
2056
2057 static void
hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl * ctrl,struct mlx5hws_definer * definer)2058 hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
2059 struct mlx5hws_definer *definer)
2060 {
2061 memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
2062 memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
2063 memcpy(definer->dw_selector + ctrl->allowed_full_dw,
2064 ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
2065 }
2066
2067 static int
hws_definer_find_best_match_fit(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer,u8 * hl)2068 hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
2069 struct mlx5hws_definer *definer,
2070 u8 *hl)
2071 {
2072 struct mlx5hws_definer_sel_ctrl ctrl = {0};
2073 bool found;
2074
2075 /* Try to create a match definer */
2076 ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
2077 ctrl.allowed_lim_dw = 0;
2078 ctrl.allowed_bytes = BYTE_SELECTORS;
2079
2080 found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
2081 if (found) {
2082 hws_definer_copy_sel_ctrl(&ctrl, definer);
2083 definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
2084 return 0;
2085 }
2086
2087 /* Try to create a full/limited jumbo definer */
2088 ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
2089 DW_SELECTORS_MATCH;
2090 ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
2091 DW_SELECTORS_LIMITED;
2092 ctrl.allowed_bytes = BYTE_SELECTORS;
2093
2094 found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
2095 if (found) {
2096 hws_definer_copy_sel_ctrl(&ctrl, definer);
2097 definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
2098 return 0;
2099 }
2100
2101 return -E2BIG;
2102 }
2103
2104 static void
hws_definer_create_tag_mask(u32 * match_param,struct mlx5hws_definer_fc * fc,u32 fc_sz,u8 * tag)2105 hws_definer_create_tag_mask(u32 *match_param,
2106 struct mlx5hws_definer_fc *fc,
2107 u32 fc_sz,
2108 u8 *tag)
2109 {
2110 u32 i;
2111
2112 for (i = 0; i < fc_sz; i++) {
2113 if (fc->tag_mask_set)
2114 fc->tag_mask_set(fc, match_param, tag);
2115 else
2116 fc->tag_set(fc, match_param, tag);
2117 fc++;
2118 }
2119 }
2120
mlx5hws_definer_create_tag(u32 * match_param,struct mlx5hws_definer_fc * fc,u32 fc_sz,u8 * tag)2121 void mlx5hws_definer_create_tag(u32 *match_param,
2122 struct mlx5hws_definer_fc *fc,
2123 u32 fc_sz,
2124 u8 *tag)
2125 {
2126 u32 i;
2127
2128 for (i = 0; i < fc_sz; i++) {
2129 fc->tag_set(fc, match_param, tag);
2130 fc++;
2131 }
2132 }
2133
mlx5hws_definer_get_id(struct mlx5hws_definer * definer)2134 int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
2135 {
2136 return definer->obj_id;
2137 }
2138
mlx5hws_definer_compare(struct mlx5hws_definer * definer_a,struct mlx5hws_definer * definer_b)2139 int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
2140 struct mlx5hws_definer *definer_b)
2141 {
2142 int i;
2143
2144 /* Future: Optimize by comparing selectors with valid mask only */
2145 for (i = 0; i < BYTE_SELECTORS; i++)
2146 if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
2147 return 1;
2148
2149 for (i = 0; i < DW_SELECTORS; i++)
2150 if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
2151 return 1;
2152
2153 for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
2154 if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
2155 return 1;
2156
2157 return 0;
2158 }
2159
2160 int
mlx5hws_definer_calc_layout(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,struct mlx5hws_definer * match_definer)2161 mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
2162 struct mlx5hws_match_template *mt,
2163 struct mlx5hws_definer *match_definer)
2164 {
2165 u8 *match_hl;
2166 int ret;
2167
2168 /* Union header-layout (hl) is used for creating a single definer
2169 * field layout used with different bitmasks for hash and match.
2170 */
2171 match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
2172 if (!match_hl)
2173 return -ENOMEM;
2174
2175 /* Convert all mt items to header layout (hl)
2176 * and allocate the match and range field copy array (fc & fcr).
2177 */
2178 ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
2179 if (ret) {
2180 mlx5hws_err(ctx, "Failed to convert items to header layout\n");
2181 goto free_match_hl;
2182 }
2183
2184 /* Find the match definer layout for header layout match union */
2185 ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
2186 if (ret) {
2187 if (ret == -E2BIG)
2188 mlx5hws_dbg(ctx,
2189 "Failed to create match definer from header layout - E2BIG\n");
2190 else
2191 mlx5hws_err(ctx,
2192 "Failed to create match definer from header layout (%d)\n",
2193 ret);
2194 goto free_fc;
2195 }
2196
2197 kfree(match_hl);
2198 return 0;
2199
2200 free_fc:
2201 kfree(mt->fc);
2202 free_match_hl:
2203 kfree(match_hl);
2204 return ret;
2205 }
2206
mlx5hws_definer_init_cache(struct mlx5hws_definer_cache ** cache)2207 int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
2208 {
2209 struct mlx5hws_definer_cache *new_cache;
2210
2211 new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
2212 if (!new_cache)
2213 return -ENOMEM;
2214
2215 INIT_LIST_HEAD(&new_cache->list_head);
2216 *cache = new_cache;
2217
2218 return 0;
2219 }
2220
mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache * cache)2221 void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
2222 {
2223 kfree(cache);
2224 }
2225
mlx5hws_definer_get_obj(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer)2226 int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
2227 struct mlx5hws_definer *definer)
2228 {
2229 struct mlx5hws_definer_cache *cache = ctx->definer_cache;
2230 struct mlx5hws_cmd_definer_create_attr def_attr = {0};
2231 struct mlx5hws_definer_cache_item *cached_definer;
2232 u32 obj_id;
2233 int ret;
2234
2235 /* Search definer cache for requested definer */
2236 list_for_each_entry(cached_definer, &cache->list_head, list_node) {
2237 if (mlx5hws_definer_compare(&cached_definer->definer, definer))
2238 continue;
2239
2240 /* Reuse definer and set LRU (move to be first in the list) */
2241 list_move(&cached_definer->list_node, &cache->list_head);
2242 cached_definer->refcount++;
2243 return cached_definer->definer.obj_id;
2244 }
2245
2246 /* Allocate and create definer based on the bitmask tag */
2247 def_attr.match_mask = definer->mask.jumbo;
2248 def_attr.dw_selector = definer->dw_selector;
2249 def_attr.byte_selector = definer->byte_selector;
2250
2251 ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
2252 if (ret)
2253 return -1;
2254
2255 cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
2256 if (!cached_definer)
2257 goto free_definer_obj;
2258
2259 memcpy(&cached_definer->definer, definer, sizeof(*definer));
2260 cached_definer->definer.obj_id = obj_id;
2261 cached_definer->refcount = 1;
2262 list_add(&cached_definer->list_node, &cache->list_head);
2263
2264 return obj_id;
2265
2266 free_definer_obj:
2267 mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
2268 return -1;
2269 }
2270
2271 static void
hws_definer_put_obj(struct mlx5hws_context * ctx,u32 obj_id)2272 hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
2273 {
2274 struct mlx5hws_definer_cache_item *cached_definer;
2275
2276 list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
2277 if (cached_definer->definer.obj_id != obj_id)
2278 continue;
2279
2280 /* Object found */
2281 if (--cached_definer->refcount)
2282 return;
2283
2284 list_del_init(&cached_definer->list_node);
2285 mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
2286 kfree(cached_definer);
2287 return;
2288 }
2289
2290 /* Programming error, object must be part of cache */
2291 pr_warn("HWS: failed putting definer object\n");
2292 }
2293
2294 static struct mlx5hws_definer *
hws_definer_alloc(struct mlx5hws_context * ctx,struct mlx5hws_definer_fc * fc,int fc_sz,u32 * match_param,struct mlx5hws_definer * layout,bool bind_fc)2295 hws_definer_alloc(struct mlx5hws_context *ctx,
2296 struct mlx5hws_definer_fc *fc,
2297 int fc_sz,
2298 u32 *match_param,
2299 struct mlx5hws_definer *layout,
2300 bool bind_fc)
2301 {
2302 struct mlx5hws_definer *definer;
2303 int ret;
2304
2305 definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
2306 if (!definer)
2307 return NULL;
2308
2309 /* Align field copy array based on given layout */
2310 if (bind_fc) {
2311 ret = hws_definer_fc_bind(definer, fc, fc_sz);
2312 if (ret) {
2313 mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
2314 goto free_definer;
2315 }
2316 }
2317
2318 /* Create the tag mask used for definer creation */
2319 hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
2320
2321 ret = mlx5hws_definer_get_obj(ctx, definer);
2322 if (ret < 0)
2323 goto free_definer;
2324
2325 definer->obj_id = ret;
2326 return definer;
2327
2328 free_definer:
2329 kfree(definer);
2330 return NULL;
2331 }
2332
mlx5hws_definer_free(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer)2333 void mlx5hws_definer_free(struct mlx5hws_context *ctx,
2334 struct mlx5hws_definer *definer)
2335 {
2336 hws_definer_put_obj(ctx, definer->obj_id);
2337 kfree(definer);
2338 }
2339
2340 static int
hws_definer_mt_match_init(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,struct mlx5hws_definer * match_layout)2341 hws_definer_mt_match_init(struct mlx5hws_context *ctx,
2342 struct mlx5hws_match_template *mt,
2343 struct mlx5hws_definer *match_layout)
2344 {
2345 /* Create mandatory match definer */
2346 mt->definer = hws_definer_alloc(ctx,
2347 mt->fc,
2348 mt->fc_sz,
2349 mt->match_param,
2350 match_layout,
2351 true);
2352 if (!mt->definer) {
2353 mlx5hws_err(ctx, "Failed to create match definer\n");
2354 return -EINVAL;
2355 }
2356
2357 return 0;
2358 }
2359
2360 static void
hws_definer_mt_match_uninit(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2361 hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
2362 struct mlx5hws_match_template *mt)
2363 {
2364 mlx5hws_definer_free(ctx, mt->definer);
2365 }
2366
mlx5hws_definer_mt_init(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2367 int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
2368 struct mlx5hws_match_template *mt)
2369 {
2370 struct mlx5hws_definer match_layout = {0};
2371 int ret;
2372
2373 ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
2374 if (ret) {
2375 mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
2376 return ret;
2377 }
2378
2379 /* Calculate definers needed for exact match */
2380 ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
2381 if (ret) {
2382 mlx5hws_err(ctx, "Failed to init match definers\n");
2383 goto free_fc;
2384 }
2385
2386 return 0;
2387
2388 free_fc:
2389 kfree(mt->fc);
2390 return ret;
2391 }
2392
mlx5hws_definer_mt_uninit(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2393 void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
2394 struct mlx5hws_match_template *mt)
2395 {
2396 hws_definer_mt_match_uninit(ctx, mt);
2397 kfree(mt->fc);
2398 }
2399