1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #ifndef HWS_BWC_H_
5 #define HWS_BWC_H_
6
7 #define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
8 #define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
9 #define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
10 #define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
11
12 /* Max number of AT attach operations for the same matcher.
13 * When the limit is reached, a larger buffer is allocated for the ATs.
14 */
15 #define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
16
17 #define MLX5HWS_BWC_MAX_ACTS 16
18
19 #define MLX5HWS_BWC_POLLING_TIMEOUT 60
20
21 struct mlx5hws_bwc_matcher_complex_data;
22
23 struct mlx5hws_bwc_matcher_size {
24 u8 size_log;
25 atomic_t num_of_rules;
26 atomic_t rehash_required;
27 };
28
29 struct mlx5hws_bwc_matcher {
30 struct mlx5hws_matcher *matcher;
31 struct mlx5hws_match_template *mt;
32 struct mlx5hws_action_template **at;
33 struct mlx5hws_bwc_matcher_complex_data *complex;
34 struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
35 u8 num_of_at;
36 u8 size_of_at_array;
37 u32 priority;
38 struct mlx5hws_bwc_matcher_size rx_size;
39 struct mlx5hws_bwc_matcher_size tx_size;
40 struct list_head *rules;
41 };
42
43 struct mlx5hws_bwc_rule {
44 struct mlx5hws_bwc_matcher *bwc_matcher;
45 struct mlx5hws_rule *rule;
46 struct mlx5hws_bwc_rule *isolated_bwc_rule;
47 struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
48 u32 flow_source;
49 u16 bwc_queue_idx;
50 bool skip_rx;
51 bool skip_tx;
52 struct list_head list_node;
53 };
54
55 int
56 mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
57 struct mlx5hws_table *table,
58 u32 priority,
59 u8 match_criteria_enable,
60 struct mlx5hws_match_parameters *mask,
61 enum mlx5hws_action_type action_types[]);
62
63 int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
64
65 struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
66
67 void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
68
69 int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
70 u32 *match_param,
71 struct mlx5hws_rule_action rule_actions[],
72 u32 flow_source,
73 u16 bwc_queue_idx);
74
75 int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
76
77 void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
78 u16 bwc_queue_idx,
79 u32 flow_source,
80 struct mlx5hws_rule_attr *rule_attr);
81
82 int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
83 u16 queue_id,
84 u32 *pending_rules,
85 bool drain);
86
mlx5hws_bwc_queues(struct mlx5hws_context * ctx)87 static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
88 {
89 /* Besides the control queue, half of the queues are
90 * regular HWS queues, and the other half are BWC queues.
91 */
92 if (mlx5hws_context_bwc_supported(ctx))
93 return (ctx->queues - 1) / 2;
94 return 0;
95 }
96
mlx5hws_bwc_get_queue_id(struct mlx5hws_context * ctx,u16 idx)97 static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
98 {
99 return idx + mlx5hws_bwc_queues(ctx);
100 }
101
102 #endif /* HWS_BWC_H_ */
103