1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #ifndef MLX5HWS_BWC_H_
5 #define MLX5HWS_BWC_H_
6
7 #define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
8 #define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
9 #define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
10 #define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
11 #define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
12
13 #define MLX5HWS_BWC_MAX_ACTS 16
14
15 struct mlx5hws_bwc_matcher {
16 struct mlx5hws_matcher *matcher;
17 struct mlx5hws_match_template *mt;
18 struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
19 u8 num_of_at;
20 u16 priority;
21 u8 size_log;
22 u32 num_of_rules; /* atomically accessed */
23 struct list_head *rules;
24 };
25
26 struct mlx5hws_bwc_rule {
27 struct mlx5hws_bwc_matcher *bwc_matcher;
28 struct mlx5hws_rule *rule;
29 u16 bwc_queue_idx;
30 struct list_head list_node;
31 };
32
33 int
34 mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
35 struct mlx5hws_table *table,
36 u32 priority,
37 u8 match_criteria_enable,
38 struct mlx5hws_match_parameters *mask,
39 enum mlx5hws_action_type action_types[]);
40
41 int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
42
43 struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
44
45 void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
46
47 int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
48 u32 *match_param,
49 struct mlx5hws_rule_action rule_actions[],
50 u32 flow_source,
51 u16 bwc_queue_idx);
52
53 int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
54
55 void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
56 u16 bwc_queue_idx,
57 u32 flow_source,
58 struct mlx5hws_rule_attr *rule_attr);
59
mlx5hws_bwc_queues(struct mlx5hws_context * ctx)60 static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
61 {
62 /* Besides the control queue, half of the queues are
63 * reguler HWS queues, and the other half are BWC queues.
64 */
65 return (ctx->queues - 1) / 2;
66 }
67
mlx5hws_bwc_get_queue_id(struct mlx5hws_context * ctx,u16 idx)68 static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
69 {
70 return idx + mlx5hws_bwc_queues(ctx);
71 }
72
73 #endif /* MLX5HWS_BWC_H_ */
74