1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3
4 #include <linux/types.h>
5 #include "mlx5_ifc_dr_ste_v1.h"
6 #include "dr_ste_v1.h"
7
8 #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
9 ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
10 DR_STE_V1_LU_TYPE_##lookup_type##_O)
11
12 enum dr_ste_v1_entry_format {
13 DR_STE_V1_TYPE_BWC_BYTE = 0x0,
14 DR_STE_V1_TYPE_BWC_DW = 0x1,
15 DR_STE_V1_TYPE_MATCH = 0x2,
16 DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
17 };
18
19 /* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
20 enum {
21 DR_STE_V1_LU_TYPE_NOP = 0x0000,
22 DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
23 DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
24 DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
25 DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
26 DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
27 DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
28 DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
29 DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
30 DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
31 DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
32 DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
33 DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
34 DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
35 DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
36 DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
37 DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
38 DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
39 DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
40 DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
41 DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
42 DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
43 DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
44 DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
45 DR_STE_V1_LU_TYPE_GRE = 0x010d,
46 DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
47 DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
48 DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
49 DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
50 DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
51 DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
52 DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
53 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
54 DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
55 DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
56 DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
57 DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
58 };
59
60 enum dr_ste_v1_header_anchors {
61 DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
62 DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
63 DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
64 DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
65 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
66 };
67
68 enum dr_ste_v1_action_size {
69 DR_STE_ACTION_SINGLE_SZ = 4,
70 DR_STE_ACTION_DOUBLE_SZ = 8,
71 DR_STE_ACTION_TRIPLE_SZ = 12,
72 };
73
74 enum dr_ste_v1_action_insert_ptr_attr {
75 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
76 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
77 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
78 };
79
80 enum dr_ste_v1_action_id {
81 DR_STE_V1_ACTION_ID_NOP = 0x00,
82 DR_STE_V1_ACTION_ID_COPY = 0x05,
83 DR_STE_V1_ACTION_ID_SET = 0x06,
84 DR_STE_V1_ACTION_ID_ADD = 0x07,
85 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
86 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
87 DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
88 DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
89 DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
90 DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
91 DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
92 DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
93 DR_STE_V1_ACTION_ID_ASO = 0x12,
94 DR_STE_V1_ACTION_ID_TRAILER = 0x13,
95 DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
96 DR_STE_V1_ACTION_ID_MAX = 0x21,
97 /* use for special cases */
98 DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
99 };
100
101 enum {
102 DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
103 DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
104 DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
105 DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
106 DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
107 DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
108 DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
109 DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
110 DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
111 DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
112 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
113 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
114 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
115 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
116 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
117 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
118 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
119 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
120 DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
121 DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
122 DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
123 DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
124 DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
125 DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
126 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
127 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
128 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
129 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
130 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
131 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
132 };
133
134 enum dr_ste_v1_aso_ctx_type {
135 DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
136 };
137
138 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
139 [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
140 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
141 },
142 [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
143 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
144 },
145 [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
146 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
147 },
148 [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
149 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
150 },
151 [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
152 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
153 },
154 [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
155 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
156 },
157 [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
158 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
159 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
160 },
161 [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
162 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
163 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
164 },
165 [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
166 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
167 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
168 },
169 [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
170 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
171 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
172 },
173 [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
174 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
175 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
176 },
177 [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
178 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
179 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
180 },
181 [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
182 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
183 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
184 },
185 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
186 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
187 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
188 },
189 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
190 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
191 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
192 },
193 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
194 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
195 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
196 },
197 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
198 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
199 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
200 },
201 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
202 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
203 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
204 },
205 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
206 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
207 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
208 },
209 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
210 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
211 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
212 },
213 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
214 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
215 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
216 },
217 [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
218 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
219 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
220 },
221 [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
222 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
223 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
224 },
225 [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
226 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
227 },
228 [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
229 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
230 },
231 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
232 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
233 },
234 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
235 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
236 },
237 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
238 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
239 },
240 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
241 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
242 },
243 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
244 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
245 },
246 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
247 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
248 },
249 [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
250 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
251 },
252 [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
253 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
254 },
255 [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
256 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
257 },
258 [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
259 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
260 },
261 [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
262 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
263 },
264 };
265
dr_ste_v1_set_entry_type(u8 * hw_ste_p,u8 entry_type)266 static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
267 {
268 MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
269 }
270
dr_ste_v1_is_miss_addr_set(u8 * hw_ste_p)271 bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
272 {
273 u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
274
275 /* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
276 * are part of the action, so they both set as part of STE init
277 */
278 return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
279 }
280
dr_ste_v1_set_miss_addr(u8 * hw_ste_p,u64 miss_addr)281 void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
282 {
283 u64 index = miss_addr >> 6;
284
285 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
286 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
287 }
288
dr_ste_v1_get_miss_addr(u8 * hw_ste_p)289 u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
290 {
291 u64 index =
292 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
293 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
294
295 return index << 6;
296 }
297
dr_ste_v1_set_byte_mask(u8 * hw_ste_p,u16 byte_mask)298 void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
299 {
300 MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
301 }
302
dr_ste_v1_get_byte_mask(u8 * hw_ste_p)303 u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
304 {
305 return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
306 }
307
dr_ste_v1_set_lu_type(u8 * hw_ste_p,u16 lu_type)308 static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
309 {
310 MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
311 MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
312 }
313
dr_ste_v1_set_next_lu_type(u8 * hw_ste_p,u16 lu_type)314 void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
315 {
316 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
317 MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
318 }
319
dr_ste_v1_get_next_lu_type(u8 * hw_ste_p)320 u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
321 {
322 u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
323 u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
324
325 return (mode << 8 | index);
326 }
327
dr_ste_v1_set_hit_gvmi(u8 * hw_ste_p,u16 gvmi)328 static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
329 {
330 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
331 }
332
dr_ste_v1_set_hit_addr(u8 * hw_ste_p,u64 icm_addr,u32 ht_size)333 void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
334 {
335 u64 index = (icm_addr >> 5) | ht_size;
336
337 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
338 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
339 }
340
dr_ste_v1_init(u8 * hw_ste_p,u16 lu_type,bool is_rx,u16 gvmi)341 void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
342 {
343 dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
344 dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
345
346 MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
347 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
348 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
349 }
350
dr_ste_v1_prepare_for_postsend(u8 * hw_ste_p,u32 ste_size)351 void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
352 {
353 u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
354 u8 *mask = tag + DR_STE_SIZE_TAG;
355 u8 tmp_tag[DR_STE_SIZE_TAG] = {};
356
357 if (ste_size == DR_STE_SIZE_CTRL)
358 return;
359
360 WARN_ON(ste_size != DR_STE_SIZE);
361
362 /* Backup tag */
363 memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
364
365 /* Swap mask and tag both are the same size */
366 memcpy(tag, mask, DR_STE_SIZE_MASK);
367 memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
368 }
369
dr_ste_v1_set_rx_flow_tag(u8 * s_action,u32 flow_tag)370 static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
371 {
372 MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
373 DR_STE_V1_ACTION_ID_FLOW_TAG);
374 MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
375 }
376
dr_ste_v1_set_counter_id(u8 * hw_ste_p,u32 ctr_id)377 static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
378 {
379 MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
380 }
381
dr_ste_v1_set_reparse(u8 * hw_ste_p)382 static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
383 {
384 MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
385 }
386
dr_ste_v1_set_encap(u8 * hw_ste_p,u8 * d_action,u32 reformat_id,int size)387 static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
388 u32 reformat_id, int size)
389 {
390 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
391 DR_STE_V1_ACTION_ID_INSERT_POINTER);
392 /* The hardware expects here size in words (2 byte) */
393 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
394 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
395 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
396 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
397 dr_ste_v1_set_reparse(hw_ste_p);
398 }
399
dr_ste_v1_set_insert_hdr(u8 * hw_ste_p,u8 * d_action,u32 reformat_id,u8 anchor,u8 offset,int size)400 static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
401 u32 reformat_id,
402 u8 anchor, u8 offset,
403 int size)
404 {
405 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
406 action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
407 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
408
409 /* The hardware expects here size and offset in words (2 byte) */
410 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
411 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
412
413 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
414 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
415 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
416
417 dr_ste_v1_set_reparse(hw_ste_p);
418 }
419
dr_ste_v1_set_remove_hdr(u8 * hw_ste_p,u8 * s_action,u8 anchor,u8 offset,int size)420 static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
421 u8 anchor, u8 offset,
422 int size)
423 {
424 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
425 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
426 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
427
428 /* The hardware expects here size and offset in words (2 byte) */
429 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
430 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
431
432 dr_ste_v1_set_reparse(hw_ste_p);
433 }
434
dr_ste_v1_set_push_vlan(u8 * hw_ste_p,u8 * d_action,u32 vlan_hdr)435 static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
436 u32 vlan_hdr)
437 {
438 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
439 action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
440 /* The hardware expects offset to vlan header in words (2 byte) */
441 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
442 start_offset, HDR_LEN_L2_MACS >> 1);
443 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
444 inline_data, vlan_hdr);
445
446 dr_ste_v1_set_reparse(hw_ste_p);
447 }
448
dr_ste_v1_set_pop_vlan(u8 * hw_ste_p,u8 * s_action,u8 vlans_num)449 static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
450 {
451 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
452 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
453 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
454 start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
455 /* The hardware expects here size in words (2 byte) */
456 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
457 remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
458
459 dr_ste_v1_set_reparse(hw_ste_p);
460 }
461
dr_ste_v1_set_encap_l3(u8 * hw_ste_p,u8 * frst_s_action,u8 * scnd_d_action,u32 reformat_id,int size)462 static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
463 u8 *frst_s_action,
464 u8 *scnd_d_action,
465 u32 reformat_id,
466 int size)
467 {
468 /* Remove L2 headers */
469 MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
470 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
471 MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
472 DR_STE_HEADER_ANCHOR_IPV6_IPV4);
473
474 /* Encapsulate with given reformat ID */
475 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
476 DR_STE_V1_ACTION_ID_INSERT_POINTER);
477 /* The hardware expects here size in words (2 byte) */
478 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
479 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
480 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
481 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
482
483 dr_ste_v1_set_reparse(hw_ste_p);
484 }
485
dr_ste_v1_set_rx_decap(u8 * hw_ste_p,u8 * s_action)486 static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
487 {
488 MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
489 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
490 MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
491 MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
492 MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
493 DR_STE_HEADER_ANCHOR_INNER_MAC);
494
495 dr_ste_v1_set_reparse(hw_ste_p);
496 }
497
dr_ste_v1_set_accelerated_rewrite_actions(u8 * hw_ste_p,u8 * d_action,u16 num_of_actions,u32 rewrite_pattern,u32 rewrite_args,u8 * action_data)498 static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p,
499 u8 *d_action,
500 u16 num_of_actions,
501 u32 rewrite_pattern,
502 u32 rewrite_args,
503 u8 *action_data)
504 {
505 if (action_data) {
506 memcpy(d_action, action_data, DR_MODIFY_ACTION_SIZE);
507 } else {
508 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
509 action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST);
510 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
511 modify_actions_pattern_pointer, rewrite_pattern);
512 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
513 number_of_modify_actions, num_of_actions);
514 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
515 modify_actions_argument_pointer, rewrite_args);
516 }
517
518 dr_ste_v1_set_reparse(hw_ste_p);
519 }
520
dr_ste_v1_set_basic_rewrite_actions(u8 * hw_ste_p,u8 * s_action,u16 num_of_actions,u32 rewrite_index)521 static void dr_ste_v1_set_basic_rewrite_actions(u8 *hw_ste_p,
522 u8 *s_action,
523 u16 num_of_actions,
524 u32 rewrite_index)
525 {
526 MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
527 DR_STE_V1_ACTION_ID_MODIFY_LIST);
528 MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
529 num_of_actions);
530 MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
531 rewrite_index);
532
533 dr_ste_v1_set_reparse(hw_ste_p);
534 }
535
dr_ste_v1_set_rewrite_actions(u8 * hw_ste_p,u8 * action,u16 num_of_actions,u32 rewrite_pattern,u32 rewrite_args,u8 * action_data)536 static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
537 u8 *action,
538 u16 num_of_actions,
539 u32 rewrite_pattern,
540 u32 rewrite_args,
541 u8 *action_data)
542 {
543 if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX)
544 return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p,
545 action,
546 num_of_actions,
547 rewrite_pattern,
548 rewrite_args,
549 action_data);
550
551 /* fall back to the code that doesn't support accelerated modify header */
552 return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p,
553 action,
554 num_of_actions,
555 rewrite_args);
556 }
557
dr_ste_v1_set_aso_flow_meter(u8 * d_action,u32 object_id,u32 offset,u8 dest_reg_id,u8 init_color)558 static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
559 u32 object_id,
560 u32 offset,
561 u8 dest_reg_id,
562 u8 init_color)
563 {
564 MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
565 DR_STE_V1_ACTION_ID_ASO);
566 MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
567 object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
568 /* Convert reg_c index to HW 64bit index */
569 MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
570 (dest_reg_id - 1) / 2);
571 MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
572 DR_STE_V1_ASO_CTX_TYPE_POLICERS);
573 MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
574 offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
575 MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
576 init_color);
577 }
578
dr_ste_v1_set_match_range_pkt_len(u8 * hw_ste_p,u32 definer_id,u32 min,u32 max)579 static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
580 u32 min, u32 max)
581 {
582 MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
583
584 /* When the STE will be sent, its mask and tags will be swapped in
585 * dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
586 * which doesn't have mask, and shouldn't have mask/tag swapped.
587 * We're using the common utilities functions to send this STE, so need
588 * to allow for this swapping - place the values in the corresponding
589 * locations to allow flipping them when writing to ICM.
590 *
591 * min/max_value_2 corresponds to match_dw_0 in its definer.
592 * To allow mask/tag swapping, writing the min/max_2 to min/max_0.
593 *
594 * Pkt len is 2 bytes that are stored in the higher section of the DW.
595 */
596 MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
597 MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
598 }
599
dr_ste_v1_arr_init_next_match(u8 ** last_ste,u32 * added_stes,u16 gvmi)600 static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
601 u32 *added_stes,
602 u16 gvmi)
603 {
604 u8 *action;
605
606 (*added_stes)++;
607 *last_ste += DR_STE_SIZE;
608 dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
609 dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
610
611 action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
612 memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
613 }
614
dr_ste_v1_arr_init_next_match_range(u8 ** last_ste,u32 * added_stes,u16 gvmi)615 static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
616 u32 *added_stes,
617 u16 gvmi)
618 {
619 dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
620 dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
621 }
622
dr_ste_v1_set_actions_tx(struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)623 void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
624 u8 *action_type_set,
625 u32 actions_caps,
626 u8 *last_ste,
627 struct mlx5dr_ste_actions_attr *attr,
628 u32 *added_stes)
629 {
630 u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
631 u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
632 bool allow_modify_hdr = true;
633 bool allow_encap = true;
634
635 if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
636 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
637 dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
638 attr->gvmi);
639 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
640 last_ste, action);
641 action_sz = DR_STE_ACTION_TRIPLE_SZ;
642 }
643 dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
644 action_sz -= DR_STE_ACTION_SINGLE_SZ;
645 action += DR_STE_ACTION_SINGLE_SZ;
646
647 /* Check if vlan_pop and modify_hdr on same STE is supported */
648 if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
649 allow_modify_hdr = false;
650 }
651
652 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
653 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
654 dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
655 attr->gvmi);
656 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
657 last_ste, action);
658 action_sz = DR_STE_ACTION_TRIPLE_SZ;
659 }
660 dr_ste_v1_set_rewrite_actions(last_ste, action,
661 attr->modify_actions,
662 attr->modify_pat_idx,
663 attr->modify_index,
664 attr->single_modify_action);
665 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
666 action += DR_STE_ACTION_DOUBLE_SZ;
667 allow_encap = false;
668 }
669
670 if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
671 int i;
672
673 for (i = 0; i < attr->vlans.count; i++) {
674 if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
675 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
676 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
677 action_sz = DR_STE_ACTION_TRIPLE_SZ;
678 allow_encap = true;
679 }
680 dr_ste_v1_set_push_vlan(last_ste, action,
681 attr->vlans.headers[i]);
682 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
683 action += DR_STE_ACTION_DOUBLE_SZ;
684 }
685 }
686
687 if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
688 if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
689 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
690 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
691 action_sz = DR_STE_ACTION_TRIPLE_SZ;
692 allow_encap = true;
693 }
694 dr_ste_v1_set_encap(last_ste, action,
695 attr->reformat.id,
696 attr->reformat.size);
697 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
698 action += DR_STE_ACTION_DOUBLE_SZ;
699 } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
700 u8 *d_action;
701
702 if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
703 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
704 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
705 action_sz = DR_STE_ACTION_TRIPLE_SZ;
706 }
707 d_action = action + DR_STE_ACTION_SINGLE_SZ;
708
709 dr_ste_v1_set_encap_l3(last_ste,
710 action, d_action,
711 attr->reformat.id,
712 attr->reformat.size);
713 action_sz -= DR_STE_ACTION_TRIPLE_SZ;
714 action += DR_STE_ACTION_TRIPLE_SZ;
715 } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
716 if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
717 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
718 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
719 action_sz = DR_STE_ACTION_TRIPLE_SZ;
720 }
721 dr_ste_v1_set_insert_hdr(last_ste, action,
722 attr->reformat.id,
723 attr->reformat.param_0,
724 attr->reformat.param_1,
725 attr->reformat.size);
726 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
727 action += DR_STE_ACTION_DOUBLE_SZ;
728 } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
729 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
730 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
731 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
732 action_sz = DR_STE_ACTION_TRIPLE_SZ;
733 }
734 dr_ste_v1_set_remove_hdr(last_ste, action,
735 attr->reformat.param_0,
736 attr->reformat.param_1,
737 attr->reformat.size);
738 action_sz -= DR_STE_ACTION_SINGLE_SZ;
739 action += DR_STE_ACTION_SINGLE_SZ;
740 }
741
742 if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
743 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
744 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
745 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
746 action_sz = DR_STE_ACTION_TRIPLE_SZ;
747 }
748 dr_ste_v1_set_aso_flow_meter(action,
749 attr->aso_flow_meter.obj_id,
750 attr->aso_flow_meter.offset,
751 attr->aso_flow_meter.dest_reg_id,
752 attr->aso_flow_meter.init_color);
753 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
754 action += DR_STE_ACTION_DOUBLE_SZ;
755 }
756
757 if (action_type_set[DR_ACTION_TYP_RANGE]) {
758 /* match ranges requires a new STE of its own type */
759 dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
760 dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
761
762 /* we do not support setting any action on the match ranges STE */
763 action_sz = 0;
764
765 dr_ste_v1_set_match_range_pkt_len(last_ste,
766 attr->range.definer_id,
767 attr->range.min,
768 attr->range.max);
769 }
770
771 /* set counter ID on the last STE to adhere to DMFS behavior */
772 if (action_type_set[DR_ACTION_TYP_CTR])
773 dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
774
775 dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
776 dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
777 }
778
dr_ste_v1_set_actions_rx(struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)779 void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
780 u8 *action_type_set,
781 u32 actions_caps,
782 u8 *last_ste,
783 struct mlx5dr_ste_actions_attr *attr,
784 u32 *added_stes)
785 {
786 u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
787 u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
788 bool allow_modify_hdr = true;
789 bool allow_ctr = true;
790
791 if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
792 dr_ste_v1_set_rewrite_actions(last_ste, action,
793 attr->decap_actions,
794 attr->decap_pat_idx,
795 attr->decap_index,
796 NULL);
797 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
798 action += DR_STE_ACTION_DOUBLE_SZ;
799 allow_modify_hdr = false;
800 allow_ctr = false;
801 } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
802 dr_ste_v1_set_rx_decap(last_ste, action);
803 action_sz -= DR_STE_ACTION_SINGLE_SZ;
804 action += DR_STE_ACTION_SINGLE_SZ;
805 allow_modify_hdr = false;
806 allow_ctr = false;
807 }
808
809 if (action_type_set[DR_ACTION_TYP_TAG]) {
810 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
811 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
812 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
813 action_sz = DR_STE_ACTION_TRIPLE_SZ;
814 allow_modify_hdr = true;
815 allow_ctr = true;
816 }
817 dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
818 action_sz -= DR_STE_ACTION_SINGLE_SZ;
819 action += DR_STE_ACTION_SINGLE_SZ;
820 }
821
822 if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
823 if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
824 !allow_modify_hdr) {
825 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
826 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
827 action_sz = DR_STE_ACTION_TRIPLE_SZ;
828 }
829
830 dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
831 action_sz -= DR_STE_ACTION_SINGLE_SZ;
832 action += DR_STE_ACTION_SINGLE_SZ;
833 allow_ctr = false;
834
835 /* Check if vlan_pop and modify_hdr on same STE is supported */
836 if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
837 allow_modify_hdr = false;
838 }
839
840 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
841 /* Modify header and decapsulation must use different STEs */
842 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
843 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
844 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
845 action_sz = DR_STE_ACTION_TRIPLE_SZ;
846 allow_modify_hdr = true;
847 allow_ctr = true;
848 }
849 dr_ste_v1_set_rewrite_actions(last_ste, action,
850 attr->modify_actions,
851 attr->modify_pat_idx,
852 attr->modify_index,
853 attr->single_modify_action);
854 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
855 action += DR_STE_ACTION_DOUBLE_SZ;
856 }
857
858 if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
859 int i;
860
861 for (i = 0; i < attr->vlans.count; i++) {
862 if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
863 !allow_modify_hdr) {
864 dr_ste_v1_arr_init_next_match(&last_ste,
865 added_stes,
866 attr->gvmi);
867 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
868 last_ste, action);
869 action_sz = DR_STE_ACTION_TRIPLE_SZ;
870 }
871 dr_ste_v1_set_push_vlan(last_ste, action,
872 attr->vlans.headers[i]);
873 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
874 action += DR_STE_ACTION_DOUBLE_SZ;
875 }
876 }
877
878 if (action_type_set[DR_ACTION_TYP_CTR]) {
879 /* Counter action set after decap and before insert_hdr
880 * to exclude decaped / encaped header respectively.
881 */
882 if (!allow_ctr) {
883 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
884 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
885 action_sz = DR_STE_ACTION_TRIPLE_SZ;
886 allow_modify_hdr = true;
887 }
888 dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
889 allow_ctr = false;
890 }
891
892 if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
893 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
894 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
895 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
896 action_sz = DR_STE_ACTION_TRIPLE_SZ;
897 }
898 dr_ste_v1_set_encap(last_ste, action,
899 attr->reformat.id,
900 attr->reformat.size);
901 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
902 action += DR_STE_ACTION_DOUBLE_SZ;
903 allow_modify_hdr = false;
904 } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
905 u8 *d_action;
906
907 if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
908 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
909 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
910 action_sz = DR_STE_ACTION_TRIPLE_SZ;
911 }
912
913 d_action = action + DR_STE_ACTION_SINGLE_SZ;
914
915 dr_ste_v1_set_encap_l3(last_ste,
916 action, d_action,
917 attr->reformat.id,
918 attr->reformat.size);
919 action_sz -= DR_STE_ACTION_TRIPLE_SZ;
920 allow_modify_hdr = false;
921 } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
922 /* Modify header, decap, and encap must use different STEs */
923 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
924 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
925 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
926 action_sz = DR_STE_ACTION_TRIPLE_SZ;
927 }
928 dr_ste_v1_set_insert_hdr(last_ste, action,
929 attr->reformat.id,
930 attr->reformat.param_0,
931 attr->reformat.param_1,
932 attr->reformat.size);
933 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
934 action += DR_STE_ACTION_DOUBLE_SZ;
935 allow_modify_hdr = false;
936 } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
937 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
938 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
939 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
940 action_sz = DR_STE_ACTION_TRIPLE_SZ;
941 allow_modify_hdr = true;
942 allow_ctr = true;
943 }
944 dr_ste_v1_set_remove_hdr(last_ste, action,
945 attr->reformat.param_0,
946 attr->reformat.param_1,
947 attr->reformat.size);
948 action_sz -= DR_STE_ACTION_SINGLE_SZ;
949 action += DR_STE_ACTION_SINGLE_SZ;
950 }
951
952 if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
953 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
954 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
955 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
956 action_sz = DR_STE_ACTION_TRIPLE_SZ;
957 }
958 dr_ste_v1_set_aso_flow_meter(action,
959 attr->aso_flow_meter.obj_id,
960 attr->aso_flow_meter.offset,
961 attr->aso_flow_meter.dest_reg_id,
962 attr->aso_flow_meter.init_color);
963 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
964 action += DR_STE_ACTION_DOUBLE_SZ;
965 }
966
967 if (action_type_set[DR_ACTION_TYP_RANGE]) {
968 /* match ranges requires a new STE of its own type */
969 dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
970 dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
971
972 /* we do not support setting any action on the match ranges STE */
973 action_sz = 0;
974
975 dr_ste_v1_set_match_range_pkt_len(last_ste,
976 attr->range.definer_id,
977 attr->range.min,
978 attr->range.max);
979 }
980
981 dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
982 dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
983 }
984
dr_ste_v1_set_action_set(u8 * d_action,u8 hw_field,u8 shifter,u8 length,u32 data)985 void dr_ste_v1_set_action_set(u8 *d_action,
986 u8 hw_field,
987 u8 shifter,
988 u8 length,
989 u32 data)
990 {
991 shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
992 MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
993 MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
994 MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
995 MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
996 MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
997 }
998
dr_ste_v1_set_action_add(u8 * d_action,u8 hw_field,u8 shifter,u8 length,u32 data)999 void dr_ste_v1_set_action_add(u8 *d_action,
1000 u8 hw_field,
1001 u8 shifter,
1002 u8 length,
1003 u32 data)
1004 {
1005 shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
1006 MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
1007 MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
1008 MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
1009 MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
1010 MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
1011 }
1012
dr_ste_v1_set_action_copy(u8 * d_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)1013 void dr_ste_v1_set_action_copy(u8 *d_action,
1014 u8 dst_hw_field,
1015 u8 dst_shifter,
1016 u8 dst_len,
1017 u8 src_hw_field,
1018 u8 src_shifter)
1019 {
1020 dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
1021 src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
1022 MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
1023 MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
1024 MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
1025 MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
1026 MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
1027 MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
1028 }
1029
1030 #define DR_STE_DECAP_L3_ACTION_NUM 8
1031 #define DR_STE_L2_HDR_MAX_SZ 20
1032
dr_ste_v1_set_action_decap_l3_list(void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)1033 int dr_ste_v1_set_action_decap_l3_list(void *data,
1034 u32 data_sz,
1035 u8 *hw_action,
1036 u32 hw_action_sz,
1037 u16 *used_hw_action_num)
1038 {
1039 u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
1040 void *data_ptr = padded_data;
1041 u16 used_actions = 0;
1042 u32 inline_data_sz;
1043 u32 i;
1044
1045 if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
1046 return -EINVAL;
1047
1048 inline_data_sz =
1049 MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
1050
1051 /* Add an alignment padding */
1052 memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
1053
1054 /* Remove L2L3 outer headers */
1055 MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
1056 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
1057 MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
1058 MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
1059 MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
1060 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
1061 hw_action += DR_STE_ACTION_DOUBLE_SZ;
1062 used_actions++; /* Remove and NOP are a single double action */
1063
1064 /* Point to the last dword of the header */
1065 data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
1066
1067 /* Add the new header using inline action 4Byte at a time, the header
1068 * is added in reversed order to the beginning of the packet to avoid
1069 * incorrect parsing by the HW. Since header is 14B or 18B an extra
1070 * two bytes are padded and later removed.
1071 */
1072 for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
1073 void *addr_inline;
1074
1075 MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
1076 DR_STE_V1_ACTION_ID_INSERT_INLINE);
1077 /* The hardware expects here offset to words (2 bytes) */
1078 MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
1079
1080 /* Copy bytes one by one to avoid endianness problem */
1081 addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
1082 hw_action, inline_data);
1083 memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
1084 hw_action += DR_STE_ACTION_DOUBLE_SZ;
1085 used_actions++;
1086 }
1087
1088 /* Remove first 2 extra bytes */
1089 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
1090 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
1091 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
1092 /* The hardware expects here size in words (2 bytes) */
1093 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
1094 used_actions++;
1095
1096 *used_hw_action_num = used_actions;
1097
1098 return 0;
1099 }
1100
dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1101 static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
1102 bool inner, u8 *bit_mask)
1103 {
1104 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1105
1106 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1107 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1108
1109 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
1110 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
1111
1112 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
1113 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
1114 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
1115 DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
1116
1117 if (mask->cvlan_tag) {
1118 MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
1119 mask->cvlan_tag = 0;
1120 } else if (mask->svlan_tag) {
1121 MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
1122 mask->svlan_tag = 0;
1123 }
1124 }
1125
dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1126 static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
1127 struct mlx5dr_ste_build *sb,
1128 u8 *tag)
1129 {
1130 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1131
1132 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1133 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1134
1135 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
1136 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
1137
1138 if (spec->ip_version == IP_VERSION_IPV4) {
1139 MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
1140 spec->ip_version = 0;
1141 } else if (spec->ip_version == IP_VERSION_IPV6) {
1142 MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
1143 spec->ip_version = 0;
1144 } else if (spec->ip_version) {
1145 return -EINVAL;
1146 }
1147
1148 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
1149 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
1150 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
1151
1152 if (spec->cvlan_tag) {
1153 MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1154 spec->cvlan_tag = 0;
1155 } else if (spec->svlan_tag) {
1156 MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1157 spec->svlan_tag = 0;
1158 }
1159 return 0;
1160 }
1161
dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1162 void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
1163 struct mlx5dr_match_param *mask)
1164 {
1165 dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1166
1167 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
1168 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1169 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
1170 }
1171
dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1172 static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1173 struct mlx5dr_ste_build *sb,
1174 u8 *tag)
1175 {
1176 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1177
1178 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1179 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1180 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1181 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1182
1183 return 0;
1184 }
1185
dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1186 void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
1187 struct mlx5dr_match_param *mask)
1188 {
1189 dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
1190
1191 sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
1192 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1193 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
1194 }
1195
dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1196 static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1197 struct mlx5dr_ste_build *sb,
1198 u8 *tag)
1199 {
1200 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1201
1202 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1203 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1204 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1205 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1206
1207 return 0;
1208 }
1209
dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1210 void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
1211 struct mlx5dr_match_param *mask)
1212 {
1213 dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
1214
1215 sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
1216 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1217 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
1218 }
1219
dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1220 static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1221 struct mlx5dr_ste_build *sb,
1222 u8 *tag)
1223 {
1224 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1225
1226 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
1227 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
1228 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
1229 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
1230 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
1231 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
1232 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
1233 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
1234 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
1235 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
1236
1237 if (spec->tcp_flags) {
1238 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
1239 spec->tcp_flags = 0;
1240 }
1241
1242 return 0;
1243 }
1244
dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1245 void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
1246 struct mlx5dr_match_param *mask)
1247 {
1248 dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
1249
1250 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
1251 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1252 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
1253 }
1254
dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1255 static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1256 bool inner, u8 *bit_mask)
1257 {
1258 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1259 struct mlx5dr_match_misc *misc_mask = &value->misc;
1260
1261 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
1262 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
1263 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
1264 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
1265 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
1266 DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
1267
1268 if (mask->svlan_tag || mask->cvlan_tag) {
1269 MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
1270 mask->cvlan_tag = 0;
1271 mask->svlan_tag = 0;
1272 }
1273
1274 if (inner) {
1275 if (misc_mask->inner_second_cvlan_tag ||
1276 misc_mask->inner_second_svlan_tag) {
1277 MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1278 misc_mask->inner_second_cvlan_tag = 0;
1279 misc_mask->inner_second_svlan_tag = 0;
1280 }
1281
1282 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1283 second_vlan_id, misc_mask, inner_second_vid);
1284 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1285 second_cfi, misc_mask, inner_second_cfi);
1286 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1287 second_priority, misc_mask, inner_second_prio);
1288 } else {
1289 if (misc_mask->outer_second_cvlan_tag ||
1290 misc_mask->outer_second_svlan_tag) {
1291 MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1292 misc_mask->outer_second_cvlan_tag = 0;
1293 misc_mask->outer_second_svlan_tag = 0;
1294 }
1295
1296 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1297 second_vlan_id, misc_mask, outer_second_vid);
1298 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1299 second_cfi, misc_mask, outer_second_cfi);
1300 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1301 second_priority, misc_mask, outer_second_prio);
1302 }
1303 }
1304
dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param * value,bool inner,u8 * tag)1305 static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1306 bool inner, u8 *tag)
1307 {
1308 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1309 struct mlx5dr_match_misc *misc_spec = &value->misc;
1310
1311 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
1312 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
1313 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
1314 DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
1315 DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
1316
1317 if (spec->ip_version == IP_VERSION_IPV4) {
1318 MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
1319 spec->ip_version = 0;
1320 } else if (spec->ip_version == IP_VERSION_IPV6) {
1321 MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
1322 spec->ip_version = 0;
1323 } else if (spec->ip_version) {
1324 return -EINVAL;
1325 }
1326
1327 if (spec->cvlan_tag) {
1328 MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1329 spec->cvlan_tag = 0;
1330 } else if (spec->svlan_tag) {
1331 MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1332 spec->svlan_tag = 0;
1333 }
1334
1335 if (inner) {
1336 if (misc_spec->inner_second_cvlan_tag) {
1337 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1338 misc_spec->inner_second_cvlan_tag = 0;
1339 } else if (misc_spec->inner_second_svlan_tag) {
1340 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1341 misc_spec->inner_second_svlan_tag = 0;
1342 }
1343
1344 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
1345 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
1346 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
1347 } else {
1348 if (misc_spec->outer_second_cvlan_tag) {
1349 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1350 misc_spec->outer_second_cvlan_tag = 0;
1351 } else if (misc_spec->outer_second_svlan_tag) {
1352 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1353 misc_spec->outer_second_svlan_tag = 0;
1354 }
1355 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
1356 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
1357 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
1358 }
1359
1360 return 0;
1361 }
1362
dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1363 static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1364 bool inner, u8 *bit_mask)
1365 {
1366 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1367
1368 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
1369 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
1370
1371 dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1372 }
1373
dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1374 static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1375 struct mlx5dr_ste_build *sb,
1376 u8 *tag)
1377 {
1378 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1379
1380 DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
1381 DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
1382
1383 return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1384 }
1385
dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1386 void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1387 struct mlx5dr_match_param *mask)
1388 {
1389 dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1390
1391 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
1392 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1393 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
1394 }
1395
dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1396 static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1397 bool inner, u8 *bit_mask)
1398 {
1399 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1400
1401 DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1402 DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1403
1404 dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1405 }
1406
dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1407 static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1408 struct mlx5dr_ste_build *sb,
1409 u8 *tag)
1410 {
1411 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1412
1413 DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1414 DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1415
1416 return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1417 }
1418
dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1419 void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1420 struct mlx5dr_match_param *mask)
1421 {
1422 dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1423
1424 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
1425 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1426 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
1427 }
1428
dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1429 static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1430 bool inner, u8 *bit_mask)
1431 {
1432 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1433 struct mlx5dr_match_misc *misc = &value->misc;
1434
1435 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1436 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1437 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
1438 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
1439 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
1440 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
1441 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
1442 DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
1443
1444 if (misc->vxlan_vni) {
1445 MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
1446 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1447 misc->vxlan_vni = 0;
1448 }
1449
1450 if (mask->svlan_tag || mask->cvlan_tag) {
1451 MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
1452 mask->cvlan_tag = 0;
1453 mask->svlan_tag = 0;
1454 }
1455 }
1456
dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1457 static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1458 struct mlx5dr_ste_build *sb,
1459 u8 *tag)
1460 {
1461 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1462 struct mlx5dr_match_misc *misc = &value->misc;
1463
1464 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
1465 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
1466 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
1467 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
1468 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
1469 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
1470 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
1471
1472 if (misc->vxlan_vni) {
1473 MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
1474 (misc->vxlan_vni << 8));
1475 misc->vxlan_vni = 0;
1476 }
1477
1478 if (spec->cvlan_tag) {
1479 MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1480 spec->cvlan_tag = 0;
1481 } else if (spec->svlan_tag) {
1482 MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1483 spec->svlan_tag = 0;
1484 }
1485
1486 if (spec->ip_version == IP_VERSION_IPV4) {
1487 MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
1488 spec->ip_version = 0;
1489 } else if (spec->ip_version == IP_VERSION_IPV6) {
1490 MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
1491 spec->ip_version = 0;
1492 } else if (spec->ip_version) {
1493 return -EINVAL;
1494 }
1495
1496 return 0;
1497 }
1498
dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1499 void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1500 struct mlx5dr_match_param *mask)
1501 {
1502 dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1503
1504 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
1505 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1506 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
1507 }
1508
dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1509 static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1510 struct mlx5dr_ste_build *sb,
1511 u8 *tag)
1512 {
1513 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1514
1515 DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
1516 DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
1517
1518 return 0;
1519 }
1520
dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1521 void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1522 struct mlx5dr_match_param *mask)
1523 {
1524 dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1525
1526 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
1527 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1528 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
1529 }
1530
dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1531 static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1532 struct mlx5dr_ste_build *sb,
1533 u8 *tag)
1534 {
1535 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1536 struct mlx5dr_match_misc *misc = &value->misc;
1537
1538 DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
1539 DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
1540 DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
1541 DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
1542 DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
1543 DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
1544 DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
1545 DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
1546 DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1547
1548 if (sb->inner)
1549 DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
1550 else
1551 DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
1552
1553 if (spec->tcp_flags) {
1554 DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
1555 spec->tcp_flags = 0;
1556 }
1557
1558 return 0;
1559 }
1560
dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1561 void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1562 struct mlx5dr_match_param *mask)
1563 {
1564 dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1565
1566 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
1567 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1568 sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
1569 }
1570
dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1571 static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
1572 struct mlx5dr_ste_build *sb,
1573 u8 *tag)
1574 {
1575 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1576
1577 if (sb->inner)
1578 DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
1579 else
1580 DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
1581
1582 return 0;
1583 }
1584
dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1585 void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
1586 struct mlx5dr_match_param *mask)
1587 {
1588 dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
1589
1590 sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
1591 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1592 sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
1593 }
1594
dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1595 static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1596 struct mlx5dr_ste_build *sb,
1597 u8 *tag)
1598 {
1599 struct mlx5dr_match_misc *misc = &value->misc;
1600
1601 DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
1602 DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
1603 DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
1604 DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
1605
1606 DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
1607 DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
1608
1609 return 0;
1610 }
1611
dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1612 void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1613 struct mlx5dr_match_param *mask)
1614 {
1615 dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1616
1617 sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
1618 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1619 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
1620 }
1621
dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1622 static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1623 struct mlx5dr_ste_build *sb,
1624 u8 *tag)
1625 {
1626 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1627
1628 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
1629 DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1630 misc2, outer_first_mpls_over_gre_label);
1631
1632 DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1633 misc2, outer_first_mpls_over_gre_exp);
1634
1635 DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1636 misc2, outer_first_mpls_over_gre_s_bos);
1637
1638 DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1639 misc2, outer_first_mpls_over_gre_ttl);
1640 } else {
1641 DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1642 misc2, outer_first_mpls_over_udp_label);
1643
1644 DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1645 misc2, outer_first_mpls_over_udp_exp);
1646
1647 DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1648 misc2, outer_first_mpls_over_udp_s_bos);
1649
1650 DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1651 misc2, outer_first_mpls_over_udp_ttl);
1652 }
1653
1654 return 0;
1655 }
1656
dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1657 void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1658 struct mlx5dr_match_param *mask)
1659 {
1660 dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1661
1662 sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
1663 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1664 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
1665 }
1666
dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1667 static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1668 struct mlx5dr_ste_build *sb,
1669 u8 *tag)
1670 {
1671 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1672 u8 *parser_ptr;
1673 u8 parser_id;
1674 u32 mpls_hdr;
1675
1676 mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1677 misc2->outer_first_mpls_over_udp_label = 0;
1678 mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1679 misc2->outer_first_mpls_over_udp_exp = 0;
1680 mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1681 misc2->outer_first_mpls_over_udp_s_bos = 0;
1682 mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1683 misc2->outer_first_mpls_over_udp_ttl = 0;
1684
1685 parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1686 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1687 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1688
1689 return 0;
1690 }
1691
dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1692 void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1693 struct mlx5dr_match_param *mask)
1694 {
1695 dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1696
1697 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1698 * flex parsers_{0-3}/{4-7} respectively.
1699 */
1700 sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1701 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1702 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1703
1704 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1705 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
1706 }
1707
dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1708 static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1709 struct mlx5dr_ste_build *sb,
1710 u8 *tag)
1711 {
1712 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1713 u8 *parser_ptr;
1714 u8 parser_id;
1715 u32 mpls_hdr;
1716
1717 mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1718 misc2->outer_first_mpls_over_gre_label = 0;
1719 mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1720 misc2->outer_first_mpls_over_gre_exp = 0;
1721 mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1722 misc2->outer_first_mpls_over_gre_s_bos = 0;
1723 mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1724 misc2->outer_first_mpls_over_gre_ttl = 0;
1725
1726 parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1727 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1728 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1729
1730 return 0;
1731 }
1732
dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1733 void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1734 struct mlx5dr_match_param *mask)
1735 {
1736 dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1737
1738 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1739 * flex parsers_{0-3}/{4-7} respectively.
1740 */
1741 sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1742 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1743 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1744
1745 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1746 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
1747 }
1748
dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1749 static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
1750 struct mlx5dr_ste_build *sb,
1751 u8 *tag)
1752 {
1753 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1754 bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
1755 u32 *icmp_header_data;
1756 u8 *icmp_type;
1757 u8 *icmp_code;
1758
1759 if (is_ipv4) {
1760 icmp_header_data = &misc3->icmpv4_header_data;
1761 icmp_type = &misc3->icmpv4_type;
1762 icmp_code = &misc3->icmpv4_code;
1763 } else {
1764 icmp_header_data = &misc3->icmpv6_header_data;
1765 icmp_type = &misc3->icmpv6_type;
1766 icmp_code = &misc3->icmpv6_code;
1767 }
1768
1769 MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
1770 MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
1771 MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
1772
1773 *icmp_header_data = 0;
1774 *icmp_type = 0;
1775 *icmp_code = 0;
1776
1777 return 0;
1778 }
1779
dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1780 void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
1781 struct mlx5dr_match_param *mask)
1782 {
1783 dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
1784
1785 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1786 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1787 sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
1788 }
1789
dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1790 static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
1791 struct mlx5dr_ste_build *sb,
1792 u8 *tag)
1793 {
1794 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1795
1796 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1797 misc2, metadata_reg_a);
1798
1799 return 0;
1800 }
1801
dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1802 void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1803 struct mlx5dr_match_param *mask)
1804 {
1805 dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
1806
1807 sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
1808 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1809 sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
1810 }
1811
dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1812 static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1813 struct mlx5dr_ste_build *sb,
1814 u8 *tag)
1815 {
1816 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1817
1818 if (sb->inner) {
1819 DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
1820 DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
1821 } else {
1822 DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
1823 DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
1824 }
1825
1826 return 0;
1827 }
1828
dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1829 void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1830 struct mlx5dr_match_param *mask)
1831 {
1832 dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1833
1834 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1835 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1836 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
1837 }
1838
1839 static int
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1840 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1841 struct mlx5dr_ste_build *sb,
1842 u8 *tag)
1843 {
1844 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1845
1846 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1847 outer_vxlan_gpe_flags, misc3,
1848 outer_vxlan_gpe_flags);
1849 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1850 outer_vxlan_gpe_next_protocol, misc3,
1851 outer_vxlan_gpe_next_protocol);
1852 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1853 outer_vxlan_gpe_vni, misc3,
1854 outer_vxlan_gpe_vni);
1855
1856 return 0;
1857 }
1858
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1859 void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1860 struct mlx5dr_match_param *mask)
1861 {
1862 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1863
1864 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1865 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1866 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
1867 }
1868
1869 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1870 dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1871 struct mlx5dr_ste_build *sb,
1872 u8 *tag)
1873 {
1874 struct mlx5dr_match_misc *misc = &value->misc;
1875
1876 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1877 geneve_protocol_type, misc, geneve_protocol_type);
1878 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1879 geneve_oam, misc, geneve_oam);
1880 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1881 geneve_opt_len, misc, geneve_opt_len);
1882 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1883 geneve_vni, misc, geneve_vni);
1884
1885 return 0;
1886 }
1887
dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1888 void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1889 struct mlx5dr_match_param *mask)
1890 {
1891 dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1892
1893 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1894 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1895 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
1896 }
1897
dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1898 static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
1899 struct mlx5dr_ste_build *sb,
1900 u8 *tag)
1901 {
1902 struct mlx5dr_match_misc5 *misc5 = &value->misc5;
1903
1904 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
1905 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
1906
1907 return 0;
1908 }
1909
dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1910 void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
1911 struct mlx5dr_match_param *mask)
1912 {
1913 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1914 dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
1915 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1916 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
1917 }
1918
dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1919 static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
1920 struct mlx5dr_ste_build *sb,
1921 u8 *tag)
1922 {
1923 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1924
1925 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1926 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1927 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1928 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1929
1930 return 0;
1931 }
1932
dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1933 void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
1934 struct mlx5dr_match_param *mask)
1935 {
1936 dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
1937
1938 sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
1939 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1940 sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
1941 }
1942
dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1943 static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
1944 struct mlx5dr_ste_build *sb,
1945 u8 *tag)
1946 {
1947 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1948
1949 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1950 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1951 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1952 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1953
1954 return 0;
1955 }
1956
dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1957 void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
1958 struct mlx5dr_match_param *mask)
1959 {
1960 dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
1961
1962 sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
1963 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1964 sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
1965 }
1966
dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)1967 static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1968 u8 *bit_mask)
1969 {
1970 struct mlx5dr_match_misc *misc_mask = &value->misc;
1971
1972 DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
1973 DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
1974 misc_mask->source_eswitch_owner_vhca_id = 0;
1975 }
1976
dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1977 static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1978 struct mlx5dr_ste_build *sb,
1979 u8 *tag)
1980 {
1981 struct mlx5dr_match_misc *misc = &value->misc;
1982 int id = misc->source_eswitch_owner_vhca_id;
1983 struct mlx5dr_cmd_vport_cap *vport_cap;
1984 struct mlx5dr_domain *dmn = sb->dmn;
1985 struct mlx5dr_domain *vport_dmn;
1986 u8 *bit_mask = sb->bit_mask;
1987 struct mlx5dr_domain *peer;
1988
1989 DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
1990
1991 if (sb->vhca_id_valid) {
1992 peer = xa_load(&dmn->peer_dmn_xa, id);
1993 /* Find port GVMI based on the eswitch_owner_vhca_id */
1994 if (id == dmn->info.caps.gvmi)
1995 vport_dmn = dmn;
1996 else if (peer && (id == peer->info.caps.gvmi))
1997 vport_dmn = peer;
1998 else
1999 return -EINVAL;
2000
2001 misc->source_eswitch_owner_vhca_id = 0;
2002 } else {
2003 vport_dmn = dmn;
2004 }
2005
2006 if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
2007 return 0;
2008
2009 vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
2010 if (!vport_cap) {
2011 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
2012 misc->source_port);
2013 return -EINVAL;
2014 }
2015
2016 if (vport_cap->vport_gvmi)
2017 MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
2018
2019 misc->source_port = 0;
2020 return 0;
2021 }
2022
dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2023 void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
2024 struct mlx5dr_match_param *mask)
2025 {
2026 dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
2027
2028 sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
2029 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2030 sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
2031 }
2032
dr_ste_v1_set_flex_parser(u32 * misc4_field_id,u32 * misc4_field_value,bool * parser_is_used,u8 * tag)2033 static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
2034 u32 *misc4_field_value,
2035 bool *parser_is_used,
2036 u8 *tag)
2037 {
2038 u32 id = *misc4_field_id;
2039 u8 *parser_ptr;
2040
2041 if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
2042 return;
2043
2044 parser_is_used[id] = true;
2045 parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
2046
2047 *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
2048 *misc4_field_id = 0;
2049 *misc4_field_value = 0;
2050 }
2051
dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2052 static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
2053 struct mlx5dr_ste_build *sb,
2054 u8 *tag)
2055 {
2056 struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
2057 bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
2058
2059 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
2060 &misc_4_mask->prog_sample_field_value_0,
2061 parser_is_used, tag);
2062
2063 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
2064 &misc_4_mask->prog_sample_field_value_1,
2065 parser_is_used, tag);
2066
2067 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
2068 &misc_4_mask->prog_sample_field_value_2,
2069 parser_is_used, tag);
2070
2071 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
2072 &misc_4_mask->prog_sample_field_value_3,
2073 parser_is_used, tag);
2074
2075 return 0;
2076 }
2077
dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2078 void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
2079 struct mlx5dr_match_param *mask)
2080 {
2081 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2082 dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
2083 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2084 sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
2085 }
2086
dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2087 void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
2088 struct mlx5dr_match_param *mask)
2089 {
2090 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
2091 dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
2092 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2093 sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
2094 }
2095
2096 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2097 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
2098 struct mlx5dr_ste_build *sb,
2099 u8 *tag)
2100 {
2101 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2102 u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
2103 u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
2104
2105 MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
2106 misc3->geneve_tlv_option_0_data);
2107 misc3->geneve_tlv_option_0_data = 0;
2108
2109 return 0;
2110 }
2111
2112 void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2113 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
2114 struct mlx5dr_match_param *mask)
2115 {
2116 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
2117
2118 /* STEs with lookup type FLEX_PARSER_{0/1} includes
2119 * flex parsers_{0-3}/{4-7} respectively.
2120 */
2121 sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
2122 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
2123 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2124
2125 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2126 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
2127 }
2128
2129 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2130 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
2131 struct mlx5dr_ste_build *sb,
2132 u8 *tag)
2133 {
2134 u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
2135 struct mlx5dr_match_misc *misc = &value->misc;
2136
2137 if (misc->geneve_tlv_option_0_exist) {
2138 MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
2139 misc->geneve_tlv_option_0_exist = 0;
2140 }
2141
2142 return 0;
2143 }
2144
2145 void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2146 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
2147 struct mlx5dr_match_param *mask)
2148 {
2149 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
2150 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
2151 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2152 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
2153 }
2154
dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2155 static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
2156 struct mlx5dr_ste_build *sb,
2157 u8 *tag)
2158 {
2159 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2160
2161 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
2162 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
2163 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
2164
2165 return 0;
2166 }
2167
dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2168 void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
2169 struct mlx5dr_match_param *mask)
2170 {
2171 dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
2172
2173 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2174 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2175 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
2176 }
2177
2178 static int
dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2179 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
2180 struct mlx5dr_ste_build *sb,
2181 u8 *tag)
2182 {
2183 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
2184 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2185 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
2186 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2187 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
2188 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2189 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2190 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2191 return 0;
2192 }
2193
2194 void
dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2195 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
2196 struct mlx5dr_match_param *mask)
2197 {
2198 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
2199
2200 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2201 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2202 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
2203 }
2204
2205 static int
dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2206 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
2207 struct mlx5dr_ste_build *sb,
2208 u8 *tag)
2209 {
2210 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
2211 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2212 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
2213 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2214 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
2215 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2216 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2217 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2218 return 0;
2219 }
2220
2221 void
dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2222 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
2223 struct mlx5dr_match_param *mask)
2224 {
2225 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
2226
2227 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
2228 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2229 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
2230 }
2231
dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action * action)2232 int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
2233 {
2234 struct mlx5dr_ptrn_mgr *ptrn_mgr;
2235 int ret;
2236
2237 ptrn_mgr = action->rewrite->dmn->ptrn_mgr;
2238 if (!ptrn_mgr)
2239 return -EOPNOTSUPP;
2240
2241 action->rewrite->arg = mlx5dr_arg_get_obj(action->rewrite->dmn->arg_mgr,
2242 action->rewrite->num_of_actions,
2243 action->rewrite->data);
2244 if (!action->rewrite->arg) {
2245 mlx5dr_err(action->rewrite->dmn, "Failed allocating args for modify header\n");
2246 return -EAGAIN;
2247 }
2248
2249 action->rewrite->ptrn =
2250 mlx5dr_ptrn_cache_get_pattern(ptrn_mgr,
2251 action->rewrite->num_of_actions,
2252 action->rewrite->data);
2253 if (!action->rewrite->ptrn) {
2254 mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n");
2255 ret = -EAGAIN;
2256 goto put_arg;
2257 }
2258
2259 return 0;
2260
2261 put_arg:
2262 mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
2263 action->rewrite->arg);
2264 return ret;
2265 }
2266
dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action * action)2267 void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
2268 {
2269 mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr,
2270 action->rewrite->ptrn);
2271 mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
2272 action->rewrite->arg);
2273 }
2274
2275 static struct mlx5dr_ste_ctx ste_ctx_v1 = {
2276 /* Builders */
2277 .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
2278 .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
2279 .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
2280 .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
2281 .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
2282 .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
2283 .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
2284 .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
2285 .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
2286 .build_mpls_init = &dr_ste_v1_build_mpls_init,
2287 .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
2288 .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
2289 .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
2290 .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
2291 .build_icmp_init = &dr_ste_v1_build_icmp_init,
2292 .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
2293 .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
2294 .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
2295 .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
2296 .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
2297 .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
2298 .build_register_0_init = &dr_ste_v1_build_register_0_init,
2299 .build_register_1_init = &dr_ste_v1_build_register_1_init,
2300 .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
2301 .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
2302 .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
2303 .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
2304 .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
2305 .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
2306 .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
2307
2308 /* Getters and Setters */
2309 .ste_init = &dr_ste_v1_init,
2310 .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
2311 .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
2312 .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
2313 .set_miss_addr = &dr_ste_v1_set_miss_addr,
2314 .get_miss_addr = &dr_ste_v1_get_miss_addr,
2315 .set_hit_addr = &dr_ste_v1_set_hit_addr,
2316 .set_byte_mask = &dr_ste_v1_set_byte_mask,
2317 .get_byte_mask = &dr_ste_v1_get_byte_mask,
2318 /* Actions */
2319 .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
2320 DR_STE_CTX_ACTION_CAP_RX_PUSH |
2321 DR_STE_CTX_ACTION_CAP_RX_ENCAP |
2322 DR_STE_CTX_ACTION_CAP_POP_MDFY,
2323 .set_actions_rx = &dr_ste_v1_set_actions_rx,
2324 .set_actions_tx = &dr_ste_v1_set_actions_tx,
2325 .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
2326 .modify_field_arr = dr_ste_v1_action_modify_field_arr,
2327 .set_action_set = &dr_ste_v1_set_action_set,
2328 .set_action_add = &dr_ste_v1_set_action_add,
2329 .set_action_copy = &dr_ste_v1_set_action_copy,
2330 .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
2331 .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
2332 .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
2333
2334 /* Send */
2335 .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
2336 };
2337
mlx5dr_ste_get_ctx_v1(void)2338 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
2339 {
2340 return &ste_ctx_v1;
2341 }
2342