1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019, Mellanox Technologies */
3
4 #ifndef _DR_TYPES_
5 #define _DR_TYPES_
6
7 #include <linux/mlx5/vport.h>
8 #include <linux/refcount.h>
9 #include "fs_core.h"
10 #include "wq.h"
11 #include "lib/mlx5.h"
12 #include "mlx5_ifc_dr.h"
13 #include "mlx5dr.h"
14 #include "dr_dbg.h"
15
16 #define DR_RULE_MAX_STES 18
17 #define DR_ACTION_MAX_STES 5
18 #define DR_STE_SVLAN 0x1
19 #define DR_STE_CVLAN 0x2
20 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
21 #define DR_NUM_OF_FLEX_PARSERS 8
22 #define DR_STE_MAX_FLEX_0_ID 3
23 #define DR_STE_MAX_FLEX_1_ID 7
24 #define DR_ACTION_CACHE_LINE_SIZE 64
25
26 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
27 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
28 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
29
30 struct mlx5dr_ptrn_mgr;
31 struct mlx5dr_arg_mgr;
32 struct mlx5dr_arg_obj;
33
dr_is_flex_parser_0_id(u8 parser_id)34 static inline bool dr_is_flex_parser_0_id(u8 parser_id)
35 {
36 return parser_id <= DR_STE_MAX_FLEX_0_ID;
37 }
38
dr_is_flex_parser_1_id(u8 parser_id)39 static inline bool dr_is_flex_parser_1_id(u8 parser_id)
40 {
41 return parser_id > DR_STE_MAX_FLEX_0_ID;
42 }
43
44 enum mlx5dr_icm_chunk_size {
45 DR_CHUNK_SIZE_1,
46 DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
47 DR_CHUNK_SIZE_2,
48 DR_CHUNK_SIZE_4,
49 DR_CHUNK_SIZE_8,
50 DR_CHUNK_SIZE_16,
51 DR_CHUNK_SIZE_32,
52 DR_CHUNK_SIZE_64,
53 DR_CHUNK_SIZE_128,
54 DR_CHUNK_SIZE_256,
55 DR_CHUNK_SIZE_512,
56 DR_CHUNK_SIZE_1K,
57 DR_CHUNK_SIZE_2K,
58 DR_CHUNK_SIZE_4K,
59 DR_CHUNK_SIZE_8K,
60 DR_CHUNK_SIZE_16K,
61 DR_CHUNK_SIZE_32K,
62 DR_CHUNK_SIZE_64K,
63 DR_CHUNK_SIZE_128K,
64 DR_CHUNK_SIZE_256K,
65 DR_CHUNK_SIZE_512K,
66 DR_CHUNK_SIZE_1024K,
67 DR_CHUNK_SIZE_2048K,
68 DR_CHUNK_SIZE_MAX,
69 };
70
71 enum mlx5dr_icm_type {
72 DR_ICM_TYPE_STE,
73 DR_ICM_TYPE_MODIFY_ACTION,
74 DR_ICM_TYPE_MODIFY_HDR_PTRN,
75 DR_ICM_TYPE_MAX,
76 };
77
78 static inline enum mlx5dr_icm_chunk_size
mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)79 mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
80 {
81 chunk += 2;
82 if (chunk < DR_CHUNK_SIZE_MAX)
83 return chunk;
84
85 return DR_CHUNK_SIZE_MAX;
86 }
87
88 enum {
89 DR_STE_SIZE = 64,
90 DR_STE_SIZE_CTRL = 32,
91 DR_STE_SIZE_MATCH_TAG = 32,
92 DR_STE_SIZE_TAG = 16,
93 DR_STE_SIZE_MASK = 16,
94 DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
95 };
96
97 enum mlx5dr_ste_ctx_action_cap {
98 DR_STE_CTX_ACTION_CAP_NONE = 0,
99 DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0,
100 DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1,
101 DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
102 DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3,
103 };
104
105 enum {
106 DR_MODIFY_ACTION_SIZE = 8,
107 };
108
109 enum mlx5dr_matcher_criteria {
110 DR_MATCHER_CRITERIA_EMPTY = 0,
111 DR_MATCHER_CRITERIA_OUTER = 1 << 0,
112 DR_MATCHER_CRITERIA_MISC = 1 << 1,
113 DR_MATCHER_CRITERIA_INNER = 1 << 2,
114 DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
115 DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
116 DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
117 DR_MATCHER_CRITERIA_MISC5 = 1 << 6,
118 DR_MATCHER_CRITERIA_MAX = 1 << 7,
119 };
120
121 enum mlx5dr_action_type {
122 DR_ACTION_TYP_TNL_L2_TO_L2,
123 DR_ACTION_TYP_L2_TO_TNL_L2,
124 DR_ACTION_TYP_TNL_L3_TO_L2,
125 DR_ACTION_TYP_L2_TO_TNL_L3,
126 DR_ACTION_TYP_DROP,
127 DR_ACTION_TYP_QP,
128 DR_ACTION_TYP_FT,
129 DR_ACTION_TYP_CTR,
130 DR_ACTION_TYP_TAG,
131 DR_ACTION_TYP_MODIFY_HDR,
132 DR_ACTION_TYP_VPORT,
133 DR_ACTION_TYP_POP_VLAN,
134 DR_ACTION_TYP_PUSH_VLAN,
135 DR_ACTION_TYP_INSERT_HDR,
136 DR_ACTION_TYP_REMOVE_HDR,
137 DR_ACTION_TYP_SAMPLER,
138 DR_ACTION_TYP_ASO_FLOW_METER,
139 DR_ACTION_TYP_RANGE,
140 DR_ACTION_TYP_MAX,
141 };
142
143 enum mlx5dr_ipv {
144 DR_RULE_IPV4,
145 DR_RULE_IPV6,
146 DR_RULE_IPV_MAX,
147 };
148
149 struct mlx5dr_icm_pool;
150 struct mlx5dr_icm_chunk;
151 struct mlx5dr_icm_buddy_mem;
152 struct mlx5dr_ste_htbl;
153 struct mlx5dr_match_param;
154 struct mlx5dr_cmd_caps;
155 struct mlx5dr_rule_rx_tx;
156 struct mlx5dr_matcher_rx_tx;
157 struct mlx5dr_ste_ctx;
158 struct mlx5dr_send_info_pool;
159 struct mlx5dr_icm_hot_chunk;
160
161 struct mlx5dr_ste {
162 /* refcount: indicates the num of rules that using this ste */
163 u32 refcount;
164
165 /* this ste is part of a rule, located in ste's chain */
166 u8 ste_chain_location;
167
168 /* attached to the miss_list head at each htbl entry */
169 struct list_head miss_list_node;
170
171 /* this ste is member of htbl */
172 struct mlx5dr_ste_htbl *htbl;
173
174 struct mlx5dr_ste_htbl *next_htbl;
175
176 /* The rule this STE belongs to */
177 struct mlx5dr_rule_rx_tx *rule_rx_tx;
178 };
179
180 struct mlx5dr_ste_htbl_ctrl {
181 /* total number of valid entries belonging to this hash table. This
182 * includes the non collision and collision entries
183 */
184 unsigned int num_of_valid_entries;
185
186 /* total number of collisions entries attached to this table */
187 unsigned int num_of_collisions;
188 };
189
190 struct mlx5dr_ste_htbl {
191 u16 lu_type;
192 u16 byte_mask;
193 u32 refcount;
194 struct mlx5dr_icm_chunk *chunk;
195 struct mlx5dr_ste *pointing_ste;
196 struct mlx5dr_ste_htbl_ctrl ctrl;
197 };
198
199 struct mlx5dr_ste_send_info {
200 struct mlx5dr_ste *ste;
201 struct list_head send_list;
202 u16 size;
203 u16 offset;
204 u8 data_cont[DR_STE_SIZE];
205 u8 *data;
206 };
207
208 void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
209 u16 offset, u8 *data,
210 struct mlx5dr_ste_send_info *ste_info,
211 struct list_head *send_list,
212 bool copy_data);
213
214 struct mlx5dr_ste_build {
215 u8 inner:1;
216 u8 rx:1;
217 u8 vhca_id_valid:1;
218 struct mlx5dr_domain *dmn;
219 struct mlx5dr_cmd_caps *caps;
220 u16 lu_type;
221 u16 byte_mask;
222 u8 bit_mask[DR_STE_SIZE_MASK];
223 int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
224 struct mlx5dr_ste_build *sb,
225 u8 *tag);
226 };
227
228 struct mlx5dr_ste_htbl *
229 mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
230 enum mlx5dr_icm_chunk_size chunk_size,
231 u16 lu_type, u16 byte_mask);
232
233 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
234
mlx5dr_htbl_put(struct mlx5dr_ste_htbl * htbl)235 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
236 {
237 htbl->refcount--;
238 if (!htbl->refcount)
239 mlx5dr_ste_htbl_free(htbl);
240 }
241
mlx5dr_htbl_get(struct mlx5dr_ste_htbl * htbl)242 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
243 {
244 htbl->refcount++;
245 }
246
247 /* STE utils */
248 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
249 bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_ste_p);
250 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
251 u8 *hw_ste, u64 miss_addr);
252 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
253 u8 *hw_ste, u64 icm_addr, u32 ht_size);
254 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
255 u8 *hw_ste,
256 struct mlx5dr_ste_htbl *next_htbl);
257 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
258 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
259 u8 ste_location);
260 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
261 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
262 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
263
264 #define MLX5DR_MAX_VLANS 2
265 #define MLX5DR_INVALID_PATTERN_INDEX 0xffffffff
266
267 struct mlx5dr_ste_actions_attr {
268 u32 modify_index;
269 u32 modify_pat_idx;
270 u16 modify_actions;
271 u8 *single_modify_action;
272 u32 decap_index;
273 u32 decap_pat_idx;
274 u16 decap_actions;
275 u8 decap_with_vlan:1;
276 u64 final_icm_addr;
277 u32 flow_tag;
278 u32 ctr_id;
279 u16 gvmi;
280 u16 hit_gvmi;
281 struct {
282 u32 id;
283 u32 size;
284 u8 param_0;
285 u8 param_1;
286 } reformat;
287 struct {
288 int count;
289 u32 headers[MLX5DR_MAX_VLANS];
290 } vlans;
291
292 struct {
293 u32 obj_id;
294 u32 offset;
295 u8 dest_reg_id;
296 u8 init_color;
297 } aso_flow_meter;
298
299 struct {
300 u64 miss_icm_addr;
301 u32 definer_id;
302 u32 min;
303 u32 max;
304 } range;
305 };
306
307 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
308 struct mlx5dr_domain *dmn,
309 u8 *action_type_set,
310 u8 *last_ste,
311 struct mlx5dr_ste_actions_attr *attr,
312 u32 *added_stes);
313 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
314 struct mlx5dr_domain *dmn,
315 u8 *action_type_set,
316 u8 *last_ste,
317 struct mlx5dr_ste_actions_attr *attr,
318 u32 *added_stes);
319
320 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
321 __be64 *hw_action,
322 u8 hw_field,
323 u8 shifter,
324 u8 length,
325 u32 data);
326 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
327 __be64 *hw_action,
328 u8 hw_field,
329 u8 shifter,
330 u8 length,
331 u32 data);
332 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
333 __be64 *hw_action,
334 u8 dst_hw_field,
335 u8 dst_shifter,
336 u8 dst_len,
337 u8 src_hw_field,
338 u8 src_shifter);
339 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
340 void *data,
341 u32 data_sz,
342 u8 *hw_action,
343 u32 hw_action_sz,
344 u16 *used_hw_action_num);
345 int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action);
346 void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action);
347
348 const struct mlx5dr_ste_action_modify_field *
349 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
350
351 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
352 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
353 struct mlx5dr_matcher *matcher,
354 struct mlx5dr_matcher_rx_tx *nic_matcher);
mlx5dr_ste_put(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)355 static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
356 struct mlx5dr_matcher *matcher,
357 struct mlx5dr_matcher_rx_tx *nic_matcher)
358 {
359 ste->refcount--;
360 if (!ste->refcount)
361 mlx5dr_ste_free(ste, matcher, nic_matcher);
362 }
363
364 /* initial as 0, increased only when ste appears in a new rule */
mlx5dr_ste_get(struct mlx5dr_ste * ste)365 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
366 {
367 ste->refcount++;
368 }
369
mlx5dr_ste_is_not_used(struct mlx5dr_ste * ste)370 static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
371 {
372 return !ste->refcount;
373 }
374
375 bool mlx5dr_ste_equal_tag(void *src, void *dst);
376 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
377 struct mlx5dr_matcher_rx_tx *nic_matcher,
378 struct mlx5dr_ste *ste,
379 u8 *cur_hw_ste,
380 enum mlx5dr_icm_chunk_size log_table_size);
381
382 /* STE build functions */
383 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
384 u8 match_criteria,
385 struct mlx5dr_match_param *mask,
386 struct mlx5dr_match_param *value);
387 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
388 struct mlx5dr_matcher_rx_tx *nic_matcher,
389 struct mlx5dr_match_param *value,
390 u8 *ste_arr);
391 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
392 struct mlx5dr_ste_build *builder,
393 struct mlx5dr_match_param *mask,
394 bool inner, bool rx);
395 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
396 struct mlx5dr_ste_build *sb,
397 struct mlx5dr_match_param *mask,
398 bool inner, bool rx);
399 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
400 struct mlx5dr_ste_build *sb,
401 struct mlx5dr_match_param *mask,
402 bool inner, bool rx);
403 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
404 struct mlx5dr_ste_build *sb,
405 struct mlx5dr_match_param *mask,
406 bool inner, bool rx);
407 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
408 struct mlx5dr_ste_build *sb,
409 struct mlx5dr_match_param *mask,
410 bool inner, bool rx);
411 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
412 struct mlx5dr_ste_build *sb,
413 struct mlx5dr_match_param *mask,
414 bool inner, bool rx);
415 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
416 struct mlx5dr_ste_build *sb,
417 struct mlx5dr_match_param *mask,
418 bool inner, bool rx);
419 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
420 struct mlx5dr_ste_build *sb,
421 struct mlx5dr_match_param *mask,
422 bool inner, bool rx);
423 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
424 struct mlx5dr_ste_build *sb,
425 struct mlx5dr_match_param *mask,
426 bool inner, bool rx);
427 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
428 struct mlx5dr_ste_build *sb,
429 struct mlx5dr_match_param *mask,
430 bool inner, bool rx);
431 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
432 struct mlx5dr_ste_build *sb,
433 struct mlx5dr_match_param *mask,
434 bool inner, bool rx);
435 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
436 struct mlx5dr_ste_build *sb,
437 struct mlx5dr_match_param *mask,
438 bool inner, bool rx);
439 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
440 struct mlx5dr_ste_build *sb,
441 struct mlx5dr_match_param *mask,
442 struct mlx5dr_cmd_caps *caps,
443 bool inner, bool rx);
444 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
445 struct mlx5dr_ste_build *sb,
446 struct mlx5dr_match_param *mask,
447 struct mlx5dr_cmd_caps *caps,
448 bool inner, bool rx);
449 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
450 struct mlx5dr_ste_build *sb,
451 struct mlx5dr_match_param *mask,
452 struct mlx5dr_cmd_caps *caps,
453 bool inner, bool rx);
454 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
455 struct mlx5dr_ste_build *sb,
456 struct mlx5dr_match_param *mask,
457 bool inner, bool rx);
458 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
459 struct mlx5dr_ste_build *sb,
460 struct mlx5dr_match_param *mask,
461 bool inner, bool rx);
462 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
463 struct mlx5dr_ste_build *sb,
464 struct mlx5dr_match_param *mask,
465 struct mlx5dr_cmd_caps *caps,
466 bool inner, bool rx);
467 void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
468 struct mlx5dr_ste_build *sb,
469 struct mlx5dr_match_param *mask,
470 struct mlx5dr_cmd_caps *caps,
471 bool inner, bool rx);
472 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
473 struct mlx5dr_ste_build *sb,
474 struct mlx5dr_match_param *mask,
475 bool inner, bool rx);
476 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
477 struct mlx5dr_ste_build *sb,
478 struct mlx5dr_match_param *mask,
479 struct mlx5dr_cmd_caps *caps,
480 bool inner, bool rx);
481 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
482 struct mlx5dr_ste_build *sb,
483 struct mlx5dr_match_param *mask,
484 struct mlx5dr_cmd_caps *caps,
485 bool inner, bool rx);
486 void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
487 struct mlx5dr_ste_build *sb,
488 struct mlx5dr_match_param *mask,
489 bool inner, bool rx);
490 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
491 struct mlx5dr_ste_build *sb,
492 struct mlx5dr_match_param *mask,
493 bool inner, bool rx);
494 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
495 struct mlx5dr_ste_build *sb,
496 struct mlx5dr_match_param *mask,
497 bool inner, bool rx);
498 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
499 struct mlx5dr_ste_build *sb,
500 struct mlx5dr_match_param *mask,
501 bool inner, bool rx);
502 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
503 struct mlx5dr_ste_build *sb,
504 struct mlx5dr_match_param *mask,
505 struct mlx5dr_domain *dmn,
506 bool inner, bool rx);
507 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
508 struct mlx5dr_ste_build *sb,
509 struct mlx5dr_match_param *mask,
510 bool inner, bool rx);
511 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
512 struct mlx5dr_ste_build *sb,
513 struct mlx5dr_match_param *mask,
514 bool inner, bool rx);
515 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
516
517 /* Actions utils */
518 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
519 struct mlx5dr_matcher_rx_tx *nic_matcher,
520 struct mlx5dr_action *actions[],
521 u32 num_actions,
522 u8 *ste_arr,
523 u32 *new_hw_ste_arr_sz);
524
525 struct mlx5dr_match_spec {
526 u32 smac_47_16; /* Source MAC address of incoming packet */
527 /* Incoming packet Ethertype - this is the Ethertype
528 * following the last VLAN tag of the packet
529 */
530 u32 smac_15_0:16; /* Source MAC address of incoming packet */
531 u32 ethertype:16;
532
533 u32 dmac_47_16; /* Destination MAC address of incoming packet */
534
535 u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
536 /* Priority of first VLAN tag in the incoming packet.
537 * Valid only when cvlan_tag==1 or svlan_tag==1
538 */
539 u32 first_prio:3;
540 /* CFI bit of first VLAN tag in the incoming packet.
541 * Valid only when cvlan_tag==1 or svlan_tag==1
542 */
543 u32 first_cfi:1;
544 /* VLAN ID of first VLAN tag in the incoming packet.
545 * Valid only when cvlan_tag==1 or svlan_tag==1
546 */
547 u32 first_vid:12;
548
549 u32 ip_protocol:8; /* IP protocol */
550 /* Differentiated Services Code Point derived from
551 * Traffic Class/TOS field of IPv6/v4
552 */
553 u32 ip_dscp:6;
554 /* Explicit Congestion Notification derived from
555 * Traffic Class/TOS field of IPv6/v4
556 */
557 u32 ip_ecn:2;
558 /* The first vlan in the packet is c-vlan (0x8100).
559 * cvlan_tag and svlan_tag cannot be set together
560 */
561 u32 cvlan_tag:1;
562 /* The first vlan in the packet is s-vlan (0x8a88).
563 * cvlan_tag and svlan_tag cannot be set together
564 */
565 u32 svlan_tag:1;
566 u32 frag:1; /* Packet is an IP fragment */
567 u32 ip_version:4; /* IP version */
568 /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
569 * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
570 */
571 u32 tcp_flags:9;
572
573 /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
574 u32 tcp_sport:16;
575 /* TCP destination port.
576 * tcp and udp sport/dport are mutually exclusive
577 */
578 u32 tcp_dport:16;
579
580 u32 reserved_auto1:16;
581 u32 ipv4_ihl:4;
582 u32 reserved_auto2:4;
583 u32 ttl_hoplimit:8;
584
585 /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
586 u32 udp_sport:16;
587 /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
588 u32 udp_dport:16;
589
590 /* IPv6 source address of incoming packets
591 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
592 * This field should be qualified by an appropriate ethertype
593 */
594 u32 src_ip_127_96;
595 /* IPv6 source address of incoming packets
596 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
597 * This field should be qualified by an appropriate ethertype
598 */
599 u32 src_ip_95_64;
600 /* IPv6 source address of incoming packets
601 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
602 * This field should be qualified by an appropriate ethertype
603 */
604 u32 src_ip_63_32;
605 /* IPv6 source address of incoming packets
606 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
607 * This field should be qualified by an appropriate ethertype
608 */
609 u32 src_ip_31_0;
610 /* IPv6 destination address of incoming packets
611 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
612 * This field should be qualified by an appropriate ethertype
613 */
614 u32 dst_ip_127_96;
615 /* IPv6 destination address of incoming packets
616 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
617 * This field should be qualified by an appropriate ethertype
618 */
619 u32 dst_ip_95_64;
620 /* IPv6 destination address of incoming packets
621 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
622 * This field should be qualified by an appropriate ethertype
623 */
624 u32 dst_ip_63_32;
625 /* IPv6 destination address of incoming packets
626 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
627 * This field should be qualified by an appropriate ethertype
628 */
629 u32 dst_ip_31_0;
630 };
631
632 struct mlx5dr_match_misc {
633 /* used with GRE, checksum exist when gre_c_present == 1 */
634 u32 gre_c_present:1;
635 u32 reserved_auto1:1;
636 /* used with GRE, key exist when gre_k_present == 1 */
637 u32 gre_k_present:1;
638 /* used with GRE, sequence number exist when gre_s_present == 1 */
639 u32 gre_s_present:1;
640 u32 source_vhca_port:4;
641 u32 source_sqn:24; /* Source SQN */
642
643 u32 source_eswitch_owner_vhca_id:16;
644 /* Source port.;0xffff determines wire port */
645 u32 source_port:16;
646
647 /* Priority of second VLAN tag in the outer header of the incoming packet.
648 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
649 */
650 u32 outer_second_prio:3;
651 /* CFI bit of first VLAN tag in the outer header of the incoming packet.
652 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
653 */
654 u32 outer_second_cfi:1;
655 /* VLAN ID of first VLAN tag the outer header of the incoming packet.
656 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
657 */
658 u32 outer_second_vid:12;
659 /* Priority of second VLAN tag in the inner header of the incoming packet.
660 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
661 */
662 u32 inner_second_prio:3;
663 /* CFI bit of first VLAN tag in the inner header of the incoming packet.
664 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
665 */
666 u32 inner_second_cfi:1;
667 /* VLAN ID of first VLAN tag the inner header of the incoming packet.
668 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
669 */
670 u32 inner_second_vid:12;
671
672 u32 outer_second_cvlan_tag:1;
673 u32 inner_second_cvlan_tag:1;
674 /* The second vlan in the outer header of the packet is c-vlan (0x8100).
675 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
676 */
677 u32 outer_second_svlan_tag:1;
678 /* The second vlan in the inner header of the packet is c-vlan (0x8100).
679 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
680 */
681 u32 inner_second_svlan_tag:1;
682 /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
683 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
684 */
685 u32 reserved_auto2:12;
686 /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
687 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
688 */
689 u32 gre_protocol:16; /* GRE Protocol (outer) */
690
691 u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
692 u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
693
694 u32 vxlan_vni:24; /* VXLAN VNI (outer) */
695 u32 reserved_auto3:8;
696
697 u32 geneve_vni:24; /* GENEVE VNI field (outer) */
698 u32 reserved_auto4:6;
699 u32 geneve_tlv_option_0_exist:1;
700 u32 geneve_oam:1; /* GENEVE OAM field (outer) */
701
702 u32 reserved_auto5:12;
703 u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
704
705 u32 reserved_auto6:12;
706 u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
707
708 u32 reserved_auto7:10;
709 u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
710 u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
711
712 u32 reserved_auto8:8;
713 u32 bth_dst_qp:24; /* Destination QP in BTH header */
714
715 u32 reserved_auto9;
716 u32 outer_esp_spi;
717 u32 reserved_auto10[3];
718 };
719
720 struct mlx5dr_match_misc2 {
721 u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
722 u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
723 u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
724 u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
725
726 u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
727 u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
728 u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
729 u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
730
731 u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
732 u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
733 u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
734 u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
735
736 u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
737 u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
738 u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
739 u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
740
741 u32 metadata_reg_c_7; /* metadata_reg_c_7 */
742 u32 metadata_reg_c_6; /* metadata_reg_c_6 */
743 u32 metadata_reg_c_5; /* metadata_reg_c_5 */
744 u32 metadata_reg_c_4; /* metadata_reg_c_4 */
745 u32 metadata_reg_c_3; /* metadata_reg_c_3 */
746 u32 metadata_reg_c_2; /* metadata_reg_c_2 */
747 u32 metadata_reg_c_1; /* metadata_reg_c_1 */
748 u32 metadata_reg_c_0; /* metadata_reg_c_0 */
749 u32 metadata_reg_a; /* metadata_reg_a */
750 u32 reserved_auto1[3];
751 };
752
753 struct mlx5dr_match_misc3 {
754 u32 inner_tcp_seq_num;
755 u32 outer_tcp_seq_num;
756 u32 inner_tcp_ack_num;
757 u32 outer_tcp_ack_num;
758
759 u32 reserved_auto1:8;
760 u32 outer_vxlan_gpe_vni:24;
761
762 u32 outer_vxlan_gpe_next_protocol:8;
763 u32 outer_vxlan_gpe_flags:8;
764 u32 reserved_auto2:16;
765
766 u32 icmpv4_header_data;
767 u32 icmpv6_header_data;
768
769 u8 icmpv4_type;
770 u8 icmpv4_code;
771 u8 icmpv6_type;
772 u8 icmpv6_code;
773
774 u32 geneve_tlv_option_0_data;
775
776 u32 gtpu_teid;
777
778 u8 gtpu_msg_type;
779 u8 gtpu_msg_flags;
780 u32 reserved_auto3:16;
781
782 u32 gtpu_dw_2;
783 u32 gtpu_first_ext_dw_0;
784 u32 gtpu_dw_0;
785 u32 reserved_auto4;
786 };
787
788 struct mlx5dr_match_misc4 {
789 u32 prog_sample_field_value_0;
790 u32 prog_sample_field_id_0;
791 u32 prog_sample_field_value_1;
792 u32 prog_sample_field_id_1;
793 u32 prog_sample_field_value_2;
794 u32 prog_sample_field_id_2;
795 u32 prog_sample_field_value_3;
796 u32 prog_sample_field_id_3;
797 u32 reserved_auto1[8];
798 };
799
800 struct mlx5dr_match_misc5 {
801 u32 macsec_tag_0;
802 u32 macsec_tag_1;
803 u32 macsec_tag_2;
804 u32 macsec_tag_3;
805 u32 tunnel_header_0;
806 u32 tunnel_header_1;
807 u32 tunnel_header_2;
808 u32 tunnel_header_3;
809 };
810
811 struct mlx5dr_match_param {
812 struct mlx5dr_match_spec outer;
813 struct mlx5dr_match_misc misc;
814 struct mlx5dr_match_spec inner;
815 struct mlx5dr_match_misc2 misc2;
816 struct mlx5dr_match_misc3 misc3;
817 struct mlx5dr_match_misc4 misc4;
818 struct mlx5dr_match_misc5 misc5;
819 };
820
821 #define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
822 (_misc3)->icmpv4_code || \
823 (_misc3)->icmpv4_header_data)
824
825 #define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
826 (_spec)->src_ip_95_64 || \
827 (_spec)->src_ip_63_32 || \
828 (_spec)->src_ip_31_0)
829
830 #define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
831 (_spec)->dst_ip_95_64 || \
832 (_spec)->dst_ip_63_32 || \
833 (_spec)->dst_ip_31_0)
834
835 struct mlx5dr_esw_caps {
836 u64 drop_icm_address_rx;
837 u64 drop_icm_address_tx;
838 u64 uplink_icm_address_rx;
839 u64 uplink_icm_address_tx;
840 u8 sw_owner:1;
841 u8 sw_owner_v2:1;
842 };
843
844 struct mlx5dr_cmd_vport_cap {
845 u16 vport_gvmi;
846 u16 vhca_gvmi;
847 u16 num;
848 u64 icm_address_rx;
849 u64 icm_address_tx;
850 };
851
852 struct mlx5dr_roce_cap {
853 u8 roce_en:1;
854 u8 fl_rc_qp_when_roce_disabled:1;
855 u8 fl_rc_qp_when_roce_enabled:1;
856 };
857
858 struct mlx5dr_vports {
859 struct mlx5dr_cmd_vport_cap esw_manager_caps;
860 struct mlx5dr_cmd_vport_cap uplink_caps;
861 struct xarray vports_caps_xa;
862 };
863
864 struct mlx5dr_cmd_caps {
865 u16 gvmi;
866 u64 nic_rx_drop_address;
867 u64 nic_tx_drop_address;
868 u64 nic_tx_allow_address;
869 u64 esw_rx_drop_address;
870 u64 esw_tx_drop_address;
871 u32 log_icm_size;
872 u64 hdr_modify_icm_addr;
873 u32 log_modify_pattern_icm_size;
874 u64 hdr_modify_pattern_icm_addr;
875 u32 flex_protocols;
876 u8 flex_parser_id_icmp_dw0;
877 u8 flex_parser_id_icmp_dw1;
878 u8 flex_parser_id_icmpv6_dw0;
879 u8 flex_parser_id_icmpv6_dw1;
880 u8 flex_parser_id_geneve_tlv_option_0;
881 u8 flex_parser_id_mpls_over_gre;
882 u8 flex_parser_id_mpls_over_udp;
883 u8 flex_parser_id_gtpu_dw_0;
884 u8 flex_parser_id_gtpu_teid;
885 u8 flex_parser_id_gtpu_dw_2;
886 u8 flex_parser_id_gtpu_first_ext_dw_0;
887 u8 flex_parser_ok_bits_supp;
888 u8 max_ft_level;
889 u16 roce_min_src_udp;
890 u8 sw_format_ver;
891 bool eswitch_manager;
892 bool rx_sw_owner;
893 bool tx_sw_owner;
894 bool fdb_sw_owner;
895 u8 rx_sw_owner_v2:1;
896 u8 tx_sw_owner_v2:1;
897 u8 fdb_sw_owner_v2:1;
898 struct mlx5dr_esw_caps esw_caps;
899 struct mlx5dr_vports vports;
900 bool prio_tag_required;
901 struct mlx5dr_roce_cap roce_caps;
902 u16 log_header_modify_argument_granularity;
903 u16 log_header_modify_argument_max_alloc;
904 bool support_modify_argument;
905 u8 is_ecpf:1;
906 u8 isolate_vl_tc:1;
907 };
908
909 enum mlx5dr_domain_nic_type {
910 DR_DOMAIN_NIC_TYPE_RX,
911 DR_DOMAIN_NIC_TYPE_TX,
912 };
913
914 struct mlx5dr_domain_rx_tx {
915 u64 drop_icm_addr;
916 u64 default_icm_addr;
917 enum mlx5dr_domain_nic_type type;
918 struct mutex mutex; /* protect rx/tx domain */
919 };
920
921 struct mlx5dr_domain_info {
922 bool supp_sw_steering;
923 u32 max_inline_size;
924 u32 max_send_wr;
925 u32 max_log_sw_icm_sz;
926 u32 max_log_action_icm_sz;
927 u32 max_log_modify_hdr_pattern_icm_sz;
928 struct mlx5dr_domain_rx_tx rx;
929 struct mlx5dr_domain_rx_tx tx;
930 struct mlx5dr_cmd_caps caps;
931 };
932
933 struct mlx5dr_domain {
934 struct mlx5_core_dev *mdev;
935 u32 pdn;
936 struct mlx5_uars_page *uar;
937 enum mlx5dr_domain_type type;
938 refcount_t refcount;
939 struct mlx5dr_icm_pool *ste_icm_pool;
940 struct mlx5dr_icm_pool *action_icm_pool;
941 struct mlx5dr_send_info_pool *send_info_pool_rx;
942 struct mlx5dr_send_info_pool *send_info_pool_tx;
943 struct kmem_cache *chunks_kmem_cache;
944 struct kmem_cache *htbls_kmem_cache;
945 struct mlx5dr_ptrn_mgr *ptrn_mgr;
946 struct mlx5dr_arg_mgr *arg_mgr;
947 struct mlx5dr_send_ring *send_ring;
948 struct mlx5dr_domain_info info;
949 struct xarray csum_fts_xa;
950 struct mlx5dr_ste_ctx *ste_ctx;
951 struct list_head dbg_tbl_list;
952 struct mlx5dr_dbg_dump_info dump_info;
953 struct xarray definers_xa;
954 struct xarray peer_dmn_xa;
955 /* memory management statistics */
956 u32 num_buddies[DR_ICM_TYPE_MAX];
957 };
958
959 struct mlx5dr_table_rx_tx {
960 struct mlx5dr_ste_htbl *s_anchor;
961 struct mlx5dr_domain_rx_tx *nic_dmn;
962 u64 default_icm_addr;
963 struct list_head nic_matcher_list;
964 };
965
966 struct mlx5dr_table {
967 struct mlx5dr_domain *dmn;
968 struct mlx5dr_table_rx_tx rx;
969 struct mlx5dr_table_rx_tx tx;
970 u32 level;
971 u32 table_type;
972 u32 table_id;
973 u32 flags;
974 struct list_head matcher_list;
975 struct mlx5dr_action *miss_action;
976 refcount_t refcount;
977 struct list_head dbg_node;
978 };
979
980 struct mlx5dr_matcher_rx_tx {
981 struct mlx5dr_ste_htbl *s_htbl;
982 struct mlx5dr_ste_htbl *e_anchor;
983 struct mlx5dr_ste_build *ste_builder;
984 struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
985 [DR_RULE_IPV_MAX]
986 [DR_RULE_MAX_STES];
987 u8 num_of_builders;
988 u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
989 u64 default_icm_addr;
990 struct mlx5dr_table_rx_tx *nic_tbl;
991 u32 prio;
992 struct list_head list_node;
993 u32 rules;
994 };
995
996 struct mlx5dr_matcher {
997 struct mlx5dr_table *tbl;
998 struct mlx5dr_matcher_rx_tx rx;
999 struct mlx5dr_matcher_rx_tx tx;
1000 struct list_head list_node; /* Used for both matchers and dbg managing */
1001 u32 prio;
1002 struct mlx5dr_match_param mask;
1003 u8 match_criteria;
1004 refcount_t refcount;
1005 struct list_head dbg_rule_list;
1006 };
1007
1008 struct mlx5dr_ste_action_modify_field {
1009 u16 hw_field;
1010 u8 start;
1011 u8 end;
1012 u8 l3_type;
1013 u8 l4_type;
1014 };
1015
1016 struct mlx5dr_ptrn_obj {
1017 struct mlx5dr_icm_chunk *chunk;
1018 u8 *data;
1019 u16 num_of_actions;
1020 u32 index;
1021 refcount_t refcount;
1022 struct list_head list;
1023 };
1024
1025 struct mlx5dr_arg_obj {
1026 u32 obj_id;
1027 u32 obj_offset;
1028 struct list_head list_node;
1029 u32 log_chunk_size;
1030 };
1031
1032 struct mlx5dr_action_rewrite {
1033 struct mlx5dr_domain *dmn;
1034 struct mlx5dr_icm_chunk *chunk;
1035 u8 *data;
1036 u16 num_of_actions;
1037 u32 index;
1038 u8 single_action_opt:1;
1039 u8 allow_rx:1;
1040 u8 allow_tx:1;
1041 u8 modify_ttl:1;
1042 struct mlx5dr_ptrn_obj *ptrn;
1043 struct mlx5dr_arg_obj *arg;
1044 };
1045
1046 struct mlx5dr_action_reformat {
1047 struct mlx5dr_domain *dmn;
1048 u32 id;
1049 u32 size;
1050 u8 param_0;
1051 u8 param_1;
1052 };
1053
1054 struct mlx5dr_action_sampler {
1055 struct mlx5dr_domain *dmn;
1056 u64 rx_icm_addr;
1057 u64 tx_icm_addr;
1058 u32 sampler_id;
1059 };
1060
1061 struct mlx5dr_action_dest_tbl {
1062 u8 is_fw_tbl:1;
1063 u8 is_wire_ft:1;
1064 union {
1065 struct mlx5dr_table *tbl;
1066 struct {
1067 struct mlx5dr_domain *dmn;
1068 u32 id;
1069 u32 group_id;
1070 enum fs_flow_table_type type;
1071 u64 rx_icm_addr;
1072 u64 tx_icm_addr;
1073 struct mlx5dr_action **ref_actions;
1074 u32 num_of_ref_actions;
1075 } fw_tbl;
1076 };
1077 };
1078
1079 struct mlx5dr_action_range {
1080 struct mlx5dr_domain *dmn;
1081 struct mlx5dr_action *hit_tbl_action;
1082 struct mlx5dr_action *miss_tbl_action;
1083 u32 definer_id;
1084 u32 min;
1085 u32 max;
1086 };
1087
1088 struct mlx5dr_action_ctr {
1089 u32 ctr_id;
1090 u32 offset;
1091 };
1092
1093 struct mlx5dr_action_vport {
1094 struct mlx5dr_domain *dmn;
1095 struct mlx5dr_cmd_vport_cap *caps;
1096 };
1097
1098 struct mlx5dr_action_push_vlan {
1099 u32 vlan_hdr; /* tpid_pcp_dei_vid */
1100 };
1101
1102 struct mlx5dr_action_flow_tag {
1103 u32 flow_tag;
1104 };
1105
1106 struct mlx5dr_rule_action_member {
1107 struct mlx5dr_action *action;
1108 struct list_head list;
1109 };
1110
1111 struct mlx5dr_action_aso_flow_meter {
1112 struct mlx5dr_domain *dmn;
1113 u32 obj_id;
1114 u32 offset;
1115 u8 dest_reg_id;
1116 u8 init_color;
1117 };
1118
1119 struct mlx5dr_action {
1120 enum mlx5dr_action_type action_type;
1121 refcount_t refcount;
1122
1123 union {
1124 void *data;
1125 struct mlx5dr_action_rewrite *rewrite;
1126 struct mlx5dr_action_reformat *reformat;
1127 struct mlx5dr_action_sampler *sampler;
1128 struct mlx5dr_action_dest_tbl *dest_tbl;
1129 struct mlx5dr_action_ctr *ctr;
1130 struct mlx5dr_action_vport *vport;
1131 struct mlx5dr_action_push_vlan *push_vlan;
1132 struct mlx5dr_action_flow_tag *flow_tag;
1133 struct mlx5dr_action_aso_flow_meter *aso;
1134 struct mlx5dr_action_range *range;
1135 };
1136 };
1137
1138 enum mlx5dr_connect_type {
1139 CONNECT_HIT = 1,
1140 CONNECT_MISS = 2,
1141 };
1142
1143 struct mlx5dr_htbl_connect_info {
1144 enum mlx5dr_connect_type type;
1145 union {
1146 struct mlx5dr_ste_htbl *hit_next_htbl;
1147 u64 miss_icm_addr;
1148 };
1149 };
1150
1151 struct mlx5dr_rule_rx_tx {
1152 struct mlx5dr_matcher_rx_tx *nic_matcher;
1153 struct mlx5dr_ste *last_rule_ste;
1154 };
1155
1156 struct mlx5dr_rule {
1157 struct mlx5dr_matcher *matcher;
1158 struct mlx5dr_rule_rx_tx rx;
1159 struct mlx5dr_rule_rx_tx tx;
1160 struct list_head rule_actions_list;
1161 struct list_head dbg_node;
1162 u32 flow_source;
1163 };
1164
1165 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
1166 struct mlx5dr_ste *ste,
1167 bool force);
1168 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
1169 struct mlx5dr_ste *curr_ste,
1170 int *num_of_stes);
1171
1172 struct mlx5dr_icm_chunk {
1173 struct mlx5dr_icm_buddy_mem *buddy_mem;
1174
1175 /* indicates the index of this chunk in the whole memory,
1176 * used for deleting the chunk from the buddy
1177 */
1178 unsigned int seg;
1179 enum mlx5dr_icm_chunk_size size;
1180
1181 /* Memory optimisation */
1182 struct mlx5dr_ste *ste_arr;
1183 u8 *hw_ste_arr;
1184 struct list_head *miss_list;
1185 };
1186
mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx * nic_dmn)1187 static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
1188 {
1189 mutex_lock(&nic_dmn->mutex);
1190 }
1191
mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx * nic_dmn)1192 static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
1193 {
1194 mutex_unlock(&nic_dmn->mutex);
1195 }
1196
mlx5dr_domain_lock(struct mlx5dr_domain * dmn)1197 static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
1198 {
1199 mlx5dr_domain_nic_lock(&dmn->info.rx);
1200 mlx5dr_domain_nic_lock(&dmn->info.tx);
1201 }
1202
mlx5dr_domain_unlock(struct mlx5dr_domain * dmn)1203 static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
1204 {
1205 mlx5dr_domain_nic_unlock(&dmn->info.tx);
1206 mlx5dr_domain_nic_unlock(&dmn->info.rx);
1207 }
1208
1209 int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
1210 struct mlx5dr_matcher_rx_tx *nic_matcher);
1211 int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
1212 struct mlx5dr_matcher_rx_tx *nic_matcher);
1213
1214 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
1215 struct mlx5dr_matcher_rx_tx *nic_matcher,
1216 enum mlx5dr_ipv outer_ipv,
1217 enum mlx5dr_ipv inner_ipv);
1218
1219 u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
1220 u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
1221 u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
1222 u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
1223 u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
1224 u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
1225
1226 struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool);
1227 void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl);
1228
1229 static inline int
mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)1230 mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
1231 {
1232 if (icm_type == DR_ICM_TYPE_STE)
1233 return DR_STE_SIZE;
1234
1235 return DR_MODIFY_ACTION_SIZE;
1236 }
1237
1238 static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)1239 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
1240 {
1241 return 1 << chunk_size;
1242 }
1243
1244 static inline int
mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,enum mlx5dr_icm_type icm_type)1245 mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
1246 enum mlx5dr_icm_type icm_type)
1247 {
1248 int num_of_entries;
1249 int entry_size;
1250
1251 entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type);
1252 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
1253
1254 return entry_size * num_of_entries;
1255 }
1256
1257 static inline int
mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl * htbl)1258 mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
1259 {
1260 int num_of_entries =
1261 mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
1262
1263 /* Threshold is 50%, one is added to table of size 1 */
1264 return (num_of_entries + 1) / 2;
1265 }
1266
1267 static inline bool
mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl * htbl)1268 mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
1269 {
1270 if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
1271 return false;
1272
1273 return true;
1274 }
1275
1276 struct mlx5dr_cmd_vport_cap *
1277 mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
1278
1279 struct mlx5dr_cmd_query_flow_table_details {
1280 u8 status;
1281 u8 level;
1282 u64 sw_owner_icm_root_1;
1283 u64 sw_owner_icm_root_0;
1284 };
1285
1286 struct mlx5dr_cmd_create_flow_table_attr {
1287 u32 table_type;
1288 u16 uid;
1289 u64 icm_addr_rx;
1290 u64 icm_addr_tx;
1291 u8 level;
1292 bool sw_owner;
1293 bool term_tbl;
1294 bool decap_en;
1295 bool reformat_en;
1296 };
1297
1298 /* internal API functions */
1299 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
1300 struct mlx5dr_cmd_caps *caps);
1301 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
1302 bool other_vport, u16 vport_number,
1303 u64 *icm_address_rx,
1304 u64 *icm_address_tx);
1305 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
1306 bool other_vport, u16 vport_number, u16 *gvmi);
1307 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
1308 struct mlx5dr_esw_caps *caps);
1309 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
1310 u32 sampler_id,
1311 u64 *rx_icm_addr,
1312 u64 *tx_icm_addr);
1313 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
1314 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
1315 u32 table_type,
1316 u32 table_id,
1317 u32 group_id,
1318 u32 modify_header_id,
1319 u16 vport_id);
1320 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
1321 u32 table_type,
1322 u32 table_id);
1323 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
1324 u32 table_type,
1325 u8 num_of_actions,
1326 u64 *actions,
1327 u32 *modify_header_id);
1328 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
1329 u32 modify_header_id);
1330 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
1331 u32 table_type,
1332 u32 table_id,
1333 u32 *group_id);
1334 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
1335 u32 table_type,
1336 u32 table_id,
1337 u32 group_id);
1338 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
1339 struct mlx5dr_cmd_create_flow_table_attr *attr,
1340 u64 *fdb_rx_icm_addr,
1341 u32 *table_id);
1342 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
1343 u32 table_id,
1344 u32 table_type);
1345 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
1346 enum fs_flow_table_type type,
1347 u32 table_id,
1348 struct mlx5dr_cmd_query_flow_table_details *output);
1349 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
1350 enum mlx5_reformat_ctx_type rt,
1351 u8 reformat_param_0,
1352 u8 reformat_param_1,
1353 size_t reformat_size,
1354 void *reformat_data,
1355 u32 *reformat_id);
1356 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
1357 u32 reformat_id);
1358 int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
1359 u16 format_id,
1360 u8 *dw_selectors,
1361 u8 *byte_selectors,
1362 u8 *match_mask,
1363 u32 *definer_id);
1364 void mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev,
1365 u32 definer_id);
1366
1367 struct mlx5dr_cmd_gid_attr {
1368 u8 gid[16];
1369 u8 mac[6];
1370 u32 roce_ver;
1371 };
1372
1373 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
1374 u16 index, struct mlx5dr_cmd_gid_attr *attr);
1375
1376 int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
1377 u16 log_obj_range, u32 pd,
1378 u32 *obj_id);
1379 void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
1380 u32 obj_id);
1381
1382 int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
1383 u8 *dw_selectors, u8 *byte_selectors,
1384 u8 *match_mask, u32 *definer_id);
1385 void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
1386
1387 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
1388 enum mlx5dr_icm_type icm_type);
1389 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
1390
1391 struct mlx5dr_icm_chunk *
1392 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
1393 enum mlx5dr_icm_chunk_size chunk_size);
1394 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
1395
1396 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
1397 u8 *hw_ste_p, u32 ste_size);
1398 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
1399 struct mlx5dr_domain_rx_tx *nic_dmn,
1400 struct mlx5dr_ste_htbl *htbl,
1401 struct mlx5dr_htbl_connect_info *connect_info,
1402 bool update_hw_ste);
1403 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
1404 u16 gvmi,
1405 enum mlx5dr_domain_nic_type nic_type,
1406 struct mlx5dr_ste_htbl *htbl,
1407 u8 *formatted_ste,
1408 struct mlx5dr_htbl_connect_info *connect_info);
1409 void mlx5dr_ste_copy_param(u8 match_criteria,
1410 struct mlx5dr_match_param *set_param,
1411 struct mlx5dr_match_parameters *mask,
1412 bool clear);
1413
1414 struct mlx5dr_qp {
1415 struct mlx5_core_dev *mdev;
1416 struct mlx5_wq_qp wq;
1417 struct mlx5_uars_page *uar;
1418 struct mlx5_wq_ctrl wq_ctrl;
1419 u32 qpn;
1420 struct {
1421 unsigned int head;
1422 unsigned int pc;
1423 unsigned int cc;
1424 unsigned int size;
1425 unsigned int *wqe_head;
1426 unsigned int wqe_cnt;
1427 } sq;
1428 struct {
1429 unsigned int pc;
1430 unsigned int cc;
1431 unsigned int size;
1432 unsigned int wqe_cnt;
1433 } rq;
1434 int max_inline_data;
1435 };
1436
1437 struct mlx5dr_cq {
1438 struct mlx5_core_dev *mdev;
1439 struct mlx5_cqwq wq;
1440 struct mlx5_wq_ctrl wq_ctrl;
1441 struct mlx5_core_cq mcq;
1442 struct mlx5dr_qp *qp;
1443 };
1444
1445 struct mlx5dr_mr {
1446 struct mlx5_core_dev *mdev;
1447 u32 mkey;
1448 dma_addr_t dma_addr;
1449 void *addr;
1450 size_t size;
1451 };
1452
1453 struct mlx5dr_send_ring {
1454 struct mlx5dr_cq *cq;
1455 struct mlx5dr_qp *qp;
1456 struct mlx5dr_mr *mr;
1457 /* How much wqes are waiting for completion */
1458 u32 pending_wqe;
1459 /* Signal request per this trash hold value */
1460 u16 signal_th;
1461 /* Each post_send_size less than max_post_send_size */
1462 u32 max_post_send_size;
1463 /* manage the send queue */
1464 u32 tx_head;
1465 void *buf;
1466 u32 buf_size;
1467 u8 *sync_buff;
1468 struct mlx5dr_mr *sync_mr;
1469 spinlock_t lock; /* Protect the data path of the send ring */
1470 bool err_state; /* send_ring is not usable in err state */
1471 };
1472
1473 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
1474 void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1475 struct mlx5dr_send_ring *send_ring);
1476 int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
1477 int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
1478 struct mlx5dr_ste *ste,
1479 u8 *data,
1480 u16 size,
1481 u16 offset);
1482 int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
1483 struct mlx5dr_ste_htbl *htbl,
1484 u8 *formatted_ste, u8 *mask);
1485 int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
1486 struct mlx5dr_ste_htbl *htbl,
1487 u8 *ste_init_data,
1488 bool update_hw_ste);
1489 int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
1490 struct mlx5dr_action *action);
1491 int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
1492 struct mlx5dr_icm_chunk *chunk,
1493 u16 num_of_actions,
1494 u8 *data);
1495 int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
1496 u16 num_of_actions, u8 *actions_data);
1497
1498 int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn);
1499 void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn);
1500 struct mlx5dr_ste_send_info *mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
1501 enum mlx5dr_domain_nic_type nic_type);
1502 void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info);
1503
1504 struct mlx5dr_cmd_ft_info {
1505 u32 id;
1506 u16 vport;
1507 enum fs_flow_table_type type;
1508 };
1509
1510 struct mlx5dr_cmd_flow_destination_hw_info {
1511 enum mlx5_flow_destination_type type;
1512 union {
1513 u32 tir_num;
1514 u32 ft_num;
1515 u32 ft_id;
1516 u32 counter_id;
1517 u32 sampler_id;
1518 struct {
1519 u16 num;
1520 u16 vhca_id;
1521 u32 reformat_id;
1522 u8 flags;
1523 } vport;
1524 };
1525 };
1526
1527 struct mlx5dr_cmd_fte_info {
1528 u32 dests_size;
1529 u32 index;
1530 struct mlx5_flow_context flow_context;
1531 u32 *val;
1532 struct mlx5_flow_act action;
1533 struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
1534 bool ignore_flow_level;
1535 };
1536
1537 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
1538 int opmod, int modify_mask,
1539 struct mlx5dr_cmd_ft_info *ft,
1540 u32 group_id,
1541 struct mlx5dr_cmd_fte_info *fte);
1542
1543 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps);
1544
1545 struct mlx5dr_fw_recalc_cs_ft {
1546 u64 rx_icm_addr;
1547 u32 table_id;
1548 u32 group_id;
1549 u32 modify_hdr_id;
1550 };
1551
1552 struct mlx5dr_fw_recalc_cs_ft *
1553 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
1554 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
1555 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
1556 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
1557 u16 vport_num,
1558 u64 *rx_icm_addr);
1559 int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
1560 struct mlx5dr_cmd_flow_destination_hw_info *dest,
1561 int num_dest,
1562 bool reformat_req,
1563 u32 *tbl_id,
1564 u32 *group_id,
1565 bool ignore_flow_level,
1566 u32 flow_source);
1567 void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
1568 u32 group_id);
1569
mlx5dr_is_fw_table(struct mlx5_flow_table * ft)1570 static inline bool mlx5dr_is_fw_table(struct mlx5_flow_table *ft)
1571 {
1572 return !ft->fs_dr_table.dr_table;
1573 }
1574
mlx5dr_supp_match_ranges(struct mlx5_core_dev * dev)1575 static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
1576 {
1577 return (MLX5_CAP_GEN(dev, steering_format_version) >=
1578 MLX5_STEERING_FORMAT_CONNECTX_6DX) &&
1579 (MLX5_CAP_GEN_64(dev, match_definer_format_supported) &
1580 (1ULL << MLX5_IFC_DEFINER_FORMAT_ID_SELECT));
1581 }
1582
1583 bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn);
1584 struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn);
1585 void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr);
1586 struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
1587 u16 num_of_actions, u8 *data);
1588 void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
1589 struct mlx5dr_ptrn_obj *pattern);
1590 struct mlx5dr_arg_mgr *mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn);
1591 void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr);
1592 struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
1593 u16 num_of_actions,
1594 u8 *data);
1595 void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
1596 struct mlx5dr_arg_obj *arg_obj);
1597 u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj);
1598
1599 #endif /* _DR_TYPES_H_ */
1600