xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h (revision 9410645520e9b820069761f3450ef6661418e279)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #ifndef MLX5HWS_SEND_H_
5 #define MLX5HWS_SEND_H_
6 
7 /* As a single operation requires at least two WQEBBS.
8  * This means a maximum of 16 such operations per rule.
9  */
10 #define MAX_WQES_PER_RULE 32
11 
12 enum mlx5hws_wqe_opcode {
13 	MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
14 };
15 
16 enum mlx5hws_wqe_opmod {
17 	MLX5HWS_WQE_OPMOD_GTA_STE = 0,
18 	MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
19 };
20 
21 enum mlx5hws_wqe_gta_opcode {
22 	MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
23 	MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
24 };
25 
26 enum mlx5hws_wqe_gta_opmod {
27 	MLX5HWS_WQE_GTA_OPMOD_STE = 0,
28 	MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
29 };
30 
31 enum mlx5hws_wqe_gta_sz {
32 	MLX5HWS_WQE_SZ_GTA_CTRL = 48,
33 	MLX5HWS_WQE_SZ_GTA_DATA = 64,
34 };
35 
36 /* WQE Control segment. */
37 struct mlx5hws_wqe_ctrl_seg {
38 	__be32 opmod_idx_opcode;
39 	__be32 qpn_ds;
40 	__be32 flags;
41 	__be32 imm;
42 };
43 
44 struct mlx5hws_wqe_gta_ctrl_seg {
45 	__be32 op_dirix;
46 	__be32 stc_ix[5];
47 	__be32 rsvd0[6];
48 };
49 
50 struct mlx5hws_wqe_gta_data_seg_ste {
51 	__be32 rsvd0_ctr_id;
52 	__be32 rsvd1_definer;
53 	__be32 rsvd2[3];
54 	union {
55 		struct {
56 		__be32 action[3];
57 		__be32 tag[8];
58 		};
59 		__be32 jumbo[11];
60 	};
61 };
62 
63 struct mlx5hws_wqe_gta_data_seg_arg {
64 	__be32 action_args[8];
65 };
66 
67 struct mlx5hws_wqe_gta {
68 	struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
69 	union {
70 		struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
71 		struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
72 	};
73 };
74 
75 struct mlx5hws_send_ring_cq {
76 	struct mlx5_core_dev *mdev;
77 	struct mlx5_cqwq wq;
78 	struct mlx5_wq_ctrl wq_ctrl;
79 	struct mlx5_core_cq mcq;
80 	u16 poll_wqe;
81 };
82 
83 struct mlx5hws_send_ring_priv {
84 	struct mlx5hws_rule *rule;
85 	void *user_data;
86 	u32 num_wqebbs;
87 	u32 id;
88 	u32 retry_id;
89 	u32 *used_id;
90 };
91 
92 struct mlx5hws_send_ring_dep_wqe {
93 	struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
94 	struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
95 	struct mlx5hws_rule *rule;
96 	u32 rtc_0;
97 	u32 rtc_1;
98 	u32 retry_rtc_0;
99 	u32 retry_rtc_1;
100 	u32 direct_index;
101 	void *user_data;
102 };
103 
104 struct mlx5hws_send_ring_sq {
105 	struct mlx5_core_dev *mdev;
106 	u16 cur_post;
107 	u16 buf_mask;
108 	struct mlx5hws_send_ring_priv *wr_priv;
109 	unsigned int last_idx;
110 	struct mlx5hws_send_ring_dep_wqe *dep_wqe;
111 	unsigned int head_dep_idx;
112 	unsigned int tail_dep_idx;
113 	u32 sqn;
114 	struct mlx5_wq_cyc wq;
115 	struct mlx5_wq_ctrl wq_ctrl;
116 	void __iomem *uar_map;
117 };
118 
119 struct mlx5hws_send_ring {
120 	struct mlx5hws_send_ring_cq send_cq;
121 	struct mlx5hws_send_ring_sq send_sq;
122 };
123 
124 struct mlx5hws_completed_poll_entry {
125 	void *user_data;
126 	enum mlx5hws_flow_op_status status;
127 };
128 
129 struct mlx5hws_completed_poll {
130 	struct mlx5hws_completed_poll_entry *entries;
131 	u16 ci;
132 	u16 pi;
133 	u16 mask;
134 };
135 
136 struct mlx5hws_send_engine {
137 	struct mlx5hws_send_ring send_ring;
138 	struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
139 	struct mlx5hws_completed_poll completed;
140 	u16 used_entries;
141 	u16 num_entries;
142 	bool err;
143 	struct mutex lock; /* Protects the send engine */
144 };
145 
146 struct mlx5hws_send_engine_post_ctrl {
147 	struct mlx5hws_send_engine *queue;
148 	struct mlx5hws_send_ring *send_ring;
149 	size_t num_wqebbs;
150 };
151 
152 struct mlx5hws_send_engine_post_attr {
153 	u8 opcode;
154 	u8 opmod;
155 	u8 notify_hw;
156 	u8 fence;
157 	u8 match_definer_id;
158 	u8 range_definer_id;
159 	size_t len;
160 	struct mlx5hws_rule *rule;
161 	u32 id;
162 	u32 retry_id;
163 	u32 *used_id;
164 	void *user_data;
165 };
166 
167 struct mlx5hws_send_ste_attr {
168 	u32 rtc_0;
169 	u32 rtc_1;
170 	u32 retry_rtc_0;
171 	u32 retry_rtc_1;
172 	u32 *used_id_rtc_0;
173 	u32 *used_id_rtc_1;
174 	bool wqe_tag_is_jumbo;
175 	u8 gta_opcode;
176 	u32 direct_index;
177 	struct mlx5hws_send_engine_post_attr send_attr;
178 	struct mlx5hws_rule_match_tag *wqe_tag;
179 	struct mlx5hws_rule_match_tag *range_wqe_tag;
180 	struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
181 	struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
182 	struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
183 };
184 
185 struct mlx5hws_send_ring_dep_wqe *
186 mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
187 
188 void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
189 
190 void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
191 
192 void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
193 
194 int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
195 			    struct mlx5hws_send_engine *queue,
196 			    u16 queue_size);
197 
198 void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
199 
200 int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
201 			     u16 queues,
202 			     u16 queue_size);
203 
204 int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
205 			      u16 queue_id,
206 			      u32 actions);
207 
208 int mlx5hws_send_test(struct mlx5hws_context *ctx,
209 		      u16 queues,
210 		      u16 queue_size);
211 
212 struct mlx5hws_send_engine_post_ctrl
213 mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
214 
215 void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
216 				      char **buf, size_t *len);
217 
218 void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
219 				  struct mlx5hws_send_engine_post_attr *attr);
220 
221 void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
222 		      struct mlx5hws_send_ste_attr *ste_attr);
223 
224 void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
225 			  struct mlx5hws_send_engine *queue,
226 			  struct mlx5hws_send_ste_attr *ste_attr);
227 
228 void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
229 
mlx5hws_send_engine_empty(struct mlx5hws_send_engine * queue)230 static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
231 {
232 	struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
233 	struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
234 
235 	return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
236 }
237 
mlx5hws_send_engine_full(struct mlx5hws_send_engine * queue)238 static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
239 {
240 	return queue->used_entries >= queue->num_entries;
241 }
242 
mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine * queue)243 static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
244 {
245 	queue->used_entries++;
246 }
247 
mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine * queue)248 static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
249 {
250 	queue->used_entries--;
251 }
252 
mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine * queue,void * user_data,int comp_status)253 static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
254 						void *user_data,
255 						int comp_status)
256 {
257 	struct mlx5hws_completed_poll *comp = &queue->completed;
258 
259 	comp->entries[comp->pi].status = comp_status;
260 	comp->entries[comp->pi].user_data = user_data;
261 
262 	comp->pi = (comp->pi + 1) & comp->mask;
263 }
264 
mlx5hws_send_engine_err(struct mlx5hws_send_engine * queue)265 static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
266 {
267 	return queue->err;
268 }
269 
270 #endif /* MLX5HWS_SEND_H_ */
271