1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5 #include "lib/clock.h"
6
7 enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
8
9 struct mlx5hws_send_ring_dep_wqe *
mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine * queue)10 mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
11 {
12 struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
13 unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
14
15 memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
16
17 return &send_sq->dep_wqe[idx];
18 }
19
mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine * queue)20 void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
21 {
22 queue->send_ring.send_sq.head_dep_idx--;
23 }
24
mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine * queue)25 void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
26 {
27 struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
28 struct mlx5hws_send_ste_attr ste_attr = {0};
29 struct mlx5hws_send_ring_dep_wqe *dep_wqe;
30
31 ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
32 ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
33 ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
34 ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
35
36 /* Fence first from previous depend WQEs */
37 ste_attr.send_attr.fence = 1;
38
39 while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
40 dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
41
42 /* Notify HW on the last WQE */
43 ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
44 ste_attr.send_attr.user_data = dep_wqe->user_data;
45 ste_attr.send_attr.rule = dep_wqe->rule;
46
47 ste_attr.rtc_0 = dep_wqe->rtc_0;
48 ste_attr.rtc_1 = dep_wqe->rtc_1;
49 ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
50 ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
51 ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
52 ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
53 ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
54 ste_attr.wqe_data = &dep_wqe->wqe_data;
55 ste_attr.direct_index = dep_wqe->direct_index;
56
57 mlx5hws_send_ste(queue, &ste_attr);
58
59 /* Fencing is done only on the first WQE */
60 ste_attr.send_attr.fence = 0;
61 }
62 }
63
64 struct mlx5hws_send_engine_post_ctrl
mlx5hws_send_engine_post_start(struct mlx5hws_send_engine * queue)65 mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
66 {
67 struct mlx5hws_send_engine_post_ctrl ctrl;
68
69 ctrl.queue = queue;
70 /* Currently only one send ring is supported */
71 ctrl.send_ring = &queue->send_ring;
72 ctrl.num_wqebbs = 0;
73
74 return ctrl;
75 }
76
mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl * ctrl,char ** buf,size_t * len)77 void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
78 char **buf, size_t *len)
79 {
80 struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
81 unsigned int idx;
82
83 idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
84
85 /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
86 * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
87 * can be on 2 different kernel memory pages.
88 */
89 *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
90 *len = MLX5_SEND_WQE_BB;
91
92 if (!ctrl->num_wqebbs) {
93 *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
94 *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
95 }
96
97 ctrl->num_wqebbs++;
98 }
99
hws_send_engine_post_ring(struct mlx5hws_send_ring_sq * sq,struct mlx5hws_wqe_ctrl_seg * doorbell_cseg)100 static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
101 struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
102 {
103 /* ensure wqe is visible to device before updating doorbell record */
104 dma_wmb();
105
106 *sq->wq.db = cpu_to_be32(sq->cur_post);
107
108 /* ensure doorbell record is visible to device before ringing the
109 * doorbell
110 */
111 wmb();
112
113 mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
114
115 /* Ensure doorbell is written on uar_page before poll_cq */
116 WRITE_ONCE(doorbell_cseg, NULL);
117 }
118
119 static void
hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste * wqe_data,struct mlx5hws_rule_match_tag * tag,bool is_jumbo)120 hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
121 struct mlx5hws_rule_match_tag *tag,
122 bool is_jumbo)
123 {
124 if (is_jumbo) {
125 /* Clear previous possibly dirty control */
126 memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
127 memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
128 } else {
129 /* Clear previous possibly dirty control and actions */
130 memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
131 memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
132 }
133 }
134
mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl * ctrl,struct mlx5hws_send_engine_post_attr * attr)135 void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
136 struct mlx5hws_send_engine_post_attr *attr)
137 {
138 struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
139 struct mlx5hws_send_ring_sq *sq;
140 unsigned int idx;
141 u32 flags = 0;
142
143 sq = &ctrl->send_ring->send_sq;
144 idx = sq->cur_post & sq->buf_mask;
145 sq->last_idx = idx;
146
147 wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
148
149 wqe_ctrl->opmod_idx_opcode =
150 cpu_to_be32((attr->opmod << 24) |
151 ((sq->cur_post & 0xffff) << 8) |
152 attr->opcode);
153 wqe_ctrl->qpn_ds =
154 cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
155 sq->sqn << 8);
156 wqe_ctrl->imm = cpu_to_be32(attr->id);
157
158 flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
159 flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
160 wqe_ctrl->flags = cpu_to_be32(flags);
161
162 sq->wr_priv[idx].id = attr->id;
163 sq->wr_priv[idx].retry_id = attr->retry_id;
164
165 sq->wr_priv[idx].rule = attr->rule;
166 sq->wr_priv[idx].user_data = attr->user_data;
167 sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
168
169 if (attr->rule) {
170 sq->wr_priv[idx].rule->pending_wqes++;
171 sq->wr_priv[idx].used_id = attr->used_id;
172 }
173
174 sq->cur_post += ctrl->num_wqebbs;
175
176 if (attr->notify_hw)
177 hws_send_engine_post_ring(sq, wqe_ctrl);
178 }
179
hws_send_wqe(struct mlx5hws_send_engine * queue,struct mlx5hws_send_engine_post_attr * send_attr,struct mlx5hws_wqe_gta_ctrl_seg * send_wqe_ctrl,void * send_wqe_data,void * send_wqe_tag,bool is_jumbo,u8 gta_opcode,u32 direct_index)180 static void hws_send_wqe(struct mlx5hws_send_engine *queue,
181 struct mlx5hws_send_engine_post_attr *send_attr,
182 struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
183 void *send_wqe_data,
184 void *send_wqe_tag,
185 bool is_jumbo,
186 u8 gta_opcode,
187 u32 direct_index)
188 {
189 struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
190 struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
191 struct mlx5hws_send_engine_post_ctrl ctrl;
192 size_t wqe_len;
193
194 ctrl = mlx5hws_send_engine_post_start(queue);
195 mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
196 mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
197
198 wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
199 memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
200 sizeof(send_wqe_ctrl->stc_ix));
201
202 if (send_wqe_data)
203 memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
204 else
205 hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
206
207 mlx5hws_send_engine_post_end(&ctrl, send_attr);
208 }
209
mlx5hws_send_ste(struct mlx5hws_send_engine * queue,struct mlx5hws_send_ste_attr * ste_attr)210 void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
211 struct mlx5hws_send_ste_attr *ste_attr)
212 {
213 struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
214 u8 notify_hw = send_attr->notify_hw;
215 u8 fence = send_attr->fence;
216
217 if (ste_attr->rtc_1) {
218 send_attr->id = ste_attr->rtc_1;
219 send_attr->used_id = ste_attr->used_id_rtc_1;
220 send_attr->retry_id = ste_attr->retry_rtc_1;
221 send_attr->fence = fence;
222 send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
223 hws_send_wqe(queue, send_attr,
224 ste_attr->wqe_ctrl,
225 ste_attr->wqe_data,
226 ste_attr->wqe_tag,
227 ste_attr->wqe_tag_is_jumbo,
228 ste_attr->gta_opcode,
229 ste_attr->direct_index);
230 }
231
232 if (ste_attr->rtc_0) {
233 send_attr->id = ste_attr->rtc_0;
234 send_attr->used_id = ste_attr->used_id_rtc_0;
235 send_attr->retry_id = ste_attr->retry_rtc_0;
236 send_attr->fence = fence && !ste_attr->rtc_1;
237 send_attr->notify_hw = notify_hw;
238 hws_send_wqe(queue, send_attr,
239 ste_attr->wqe_ctrl,
240 ste_attr->wqe_data,
241 ste_attr->wqe_tag,
242 ste_attr->wqe_tag_is_jumbo,
243 ste_attr->gta_opcode,
244 ste_attr->direct_index);
245 }
246
247 /* Restore to original requested values */
248 send_attr->notify_hw = notify_hw;
249 send_attr->fence = fence;
250 }
251
hws_send_engine_retry_post_send(struct mlx5hws_send_engine * queue,struct mlx5hws_send_ring_priv * priv,u16 wqe_cnt)252 static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
253 struct mlx5hws_send_ring_priv *priv,
254 u16 wqe_cnt)
255 {
256 struct mlx5hws_send_engine_post_attr send_attr = {0};
257 struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
258 struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
259 struct mlx5hws_send_engine_post_ctrl ctrl;
260 struct mlx5hws_send_ring_sq *send_sq;
261 unsigned int idx;
262 size_t wqe_len;
263 char *p;
264
265 send_attr.rule = priv->rule;
266 send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
267 send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
268 send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
269 send_attr.notify_hw = 1;
270 send_attr.fence = 0;
271 send_attr.user_data = priv->user_data;
272 send_attr.id = priv->retry_id;
273 send_attr.used_id = priv->used_id;
274
275 ctrl = mlx5hws_send_engine_post_start(queue);
276 mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
277 mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
278
279 send_sq = &ctrl.send_ring->send_sq;
280 idx = wqe_cnt & send_sq->buf_mask;
281 p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
282
283 /* Copy old gta ctrl */
284 memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
285 MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
286
287 idx = (wqe_cnt + 1) & send_sq->buf_mask;
288 p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
289
290 /* Copy old gta data */
291 memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
292
293 mlx5hws_send_engine_post_end(&ctrl, &send_attr);
294 }
295
mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine * queue)296 void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
297 {
298 struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
299 struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
300
301 wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
302 wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
303
304 hws_send_engine_post_ring(sq, wqe_ctrl);
305 }
306
307 static void
hws_send_engine_update_rule_resize(struct mlx5hws_send_engine * queue,struct mlx5hws_send_ring_priv * priv,enum mlx5hws_flow_op_status * status)308 hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
309 struct mlx5hws_send_ring_priv *priv,
310 enum mlx5hws_flow_op_status *status)
311 {
312 switch (priv->rule->resize_info->state) {
313 case MLX5HWS_RULE_RESIZE_STATE_WRITING:
314 if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
315 /* Backup original RTCs */
316 u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
317 u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
318
319 /* Delete partially failed move rule using resize_info */
320 priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
321 priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
322
323 /* Move rule to original RTC for future delete */
324 priv->rule->rtc_0 = orig_rtc_0;
325 priv->rule->rtc_1 = orig_rtc_1;
326 }
327 /* Clean leftovers */
328 mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
329 break;
330
331 case MLX5HWS_RULE_RESIZE_STATE_DELETING:
332 if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
333 *status = MLX5HWS_FLOW_OP_ERROR;
334 } else {
335 *status = MLX5HWS_FLOW_OP_SUCCESS;
336 priv->rule->matcher = priv->rule->matcher->resize_dst;
337 }
338 priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
339 priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
340 break;
341
342 default:
343 break;
344 }
345 }
346
hws_send_engine_update_rule(struct mlx5hws_send_engine * queue,struct mlx5hws_send_ring_priv * priv,u16 wqe_cnt,enum mlx5hws_flow_op_status * status)347 static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
348 struct mlx5hws_send_ring_priv *priv,
349 u16 wqe_cnt,
350 enum mlx5hws_flow_op_status *status)
351 {
352 priv->rule->pending_wqes--;
353
354 if (*status == MLX5HWS_FLOW_OP_ERROR) {
355 if (priv->retry_id) {
356 hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
357 return;
358 }
359 /* Some part of the rule failed */
360 priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
361 *priv->used_id = 0;
362 } else {
363 *priv->used_id = priv->id;
364 }
365
366 /* Update rule status for the last completion */
367 if (!priv->rule->pending_wqes) {
368 if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
369 hws_send_engine_update_rule_resize(queue, priv, status);
370 return;
371 }
372
373 if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
374 /* Rule completely failed and doesn't require cleanup */
375 if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
376 priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
377
378 *status = MLX5HWS_FLOW_OP_ERROR;
379 } else {
380 /* Increase the status, this only works on good flow as the enum
381 * is arrange it away creating -> created -> deleting -> deleted
382 */
383 priv->rule->status++;
384 *status = MLX5HWS_FLOW_OP_SUCCESS;
385 /* Rule was deleted now we can safely release action STEs
386 * and clear resize info
387 */
388 if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
389 mlx5hws_rule_free_action_ste(priv->rule);
390 mlx5hws_rule_clear_resize_info(priv->rule);
391 }
392 }
393 }
394 }
395
hws_send_engine_update(struct mlx5hws_send_engine * queue,struct mlx5_cqe64 * cqe,struct mlx5hws_send_ring_priv * priv,struct mlx5hws_flow_op_result res[],s64 * i,u32 res_nb,u16 wqe_cnt)396 static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
397 struct mlx5_cqe64 *cqe,
398 struct mlx5hws_send_ring_priv *priv,
399 struct mlx5hws_flow_op_result res[],
400 s64 *i,
401 u32 res_nb,
402 u16 wqe_cnt)
403 {
404 enum mlx5hws_flow_op_status status;
405
406 if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
407 likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
408 status = MLX5HWS_FLOW_OP_SUCCESS;
409 } else {
410 status = MLX5HWS_FLOW_OP_ERROR;
411 }
412
413 if (priv->user_data) {
414 if (priv->rule) {
415 hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
416 /* Completion is provided on the last rule WQE */
417 if (priv->rule->pending_wqes)
418 return;
419 }
420
421 if (*i < res_nb) {
422 res[*i].user_data = priv->user_data;
423 res[*i].status = status;
424 (*i)++;
425 mlx5hws_send_engine_dec_rule(queue);
426 } else {
427 mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
428 }
429 }
430 }
431
mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq * cq,struct mlx5_cqe64 * cqe64)432 static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
433 struct mlx5_cqe64 *cqe64)
434 {
435 if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
436 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
437
438 mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
439 mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
440 mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
441 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
442 16, 1, err_cqe,
443 sizeof(*err_cqe), false);
444 return CQ_POLL_ERR;
445 }
446
447 return CQ_OK;
448 }
449
mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq * cq)450 static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
451 {
452 struct mlx5_cqe64 *cqe64;
453 int err;
454
455 cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
456 if (!cqe64) {
457 if (unlikely(cq->mdev->state ==
458 MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
459 mlx5_core_dbg_once(cq->mdev,
460 "Polling CQ while device is shutting down\n");
461 return CQ_POLL_ERR;
462 }
463 return CQ_EMPTY;
464 }
465
466 mlx5_cqwq_pop(&cq->wq);
467 err = mlx5hws_parse_cqe(cq, cqe64);
468 mlx5_cqwq_update_db_record(&cq->wq);
469
470 return err;
471 }
472
hws_send_engine_poll_cq(struct mlx5hws_send_engine * queue,struct mlx5hws_flow_op_result res[],s64 * polled,u32 res_nb)473 static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
474 struct mlx5hws_flow_op_result res[],
475 s64 *polled,
476 u32 res_nb)
477 {
478 struct mlx5hws_send_ring *send_ring = &queue->send_ring;
479 struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
480 struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
481 struct mlx5hws_send_ring_priv *priv;
482 struct mlx5_cqe64 *cqe;
483 u8 cqe_opcode;
484 u16 wqe_cnt;
485
486 cqe = mlx5_cqwq_get_cqe(&cq->wq);
487 if (!cqe)
488 return;
489
490 cqe_opcode = get_cqe_opcode(cqe);
491 if (cqe_opcode == MLX5_CQE_INVALID)
492 return;
493
494 if (unlikely(cqe_opcode != MLX5_CQE_REQ))
495 queue->err = true;
496
497 wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
498
499 while (cq->poll_wqe != wqe_cnt) {
500 priv = &sq->wr_priv[cq->poll_wqe];
501 hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
502 cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
503 }
504
505 priv = &sq->wr_priv[wqe_cnt];
506 cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
507 hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
508 mlx5hws_cq_poll_one(cq);
509 }
510
hws_send_engine_poll_list(struct mlx5hws_send_engine * queue,struct mlx5hws_flow_op_result res[],s64 * polled,u32 res_nb)511 static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
512 struct mlx5hws_flow_op_result res[],
513 s64 *polled,
514 u32 res_nb)
515 {
516 struct mlx5hws_completed_poll *comp = &queue->completed;
517
518 while (comp->ci != comp->pi) {
519 if (*polled < res_nb) {
520 res[*polled].status =
521 comp->entries[comp->ci].status;
522 res[*polled].user_data =
523 comp->entries[comp->ci].user_data;
524 (*polled)++;
525 comp->ci = (comp->ci + 1) & comp->mask;
526 mlx5hws_send_engine_dec_rule(queue);
527 } else {
528 return;
529 }
530 }
531 }
532
hws_send_engine_poll(struct mlx5hws_send_engine * queue,struct mlx5hws_flow_op_result res[],u32 res_nb)533 static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
534 struct mlx5hws_flow_op_result res[],
535 u32 res_nb)
536 {
537 s64 polled = 0;
538
539 hws_send_engine_poll_list(queue, res, &polled, res_nb);
540
541 if (polled >= res_nb)
542 return polled;
543
544 hws_send_engine_poll_cq(queue, res, &polled, res_nb);
545
546 return polled;
547 }
548
mlx5hws_send_queue_poll(struct mlx5hws_context * ctx,u16 queue_id,struct mlx5hws_flow_op_result res[],u32 res_nb)549 int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
550 u16 queue_id,
551 struct mlx5hws_flow_op_result res[],
552 u32 res_nb)
553 {
554 return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
555 }
556
hws_send_ring_alloc_sq(struct mlx5_core_dev * mdev,int numa_node,struct mlx5hws_send_engine * queue,struct mlx5hws_send_ring_sq * sq,void * sqc_data)557 static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
558 int numa_node,
559 struct mlx5hws_send_engine *queue,
560 struct mlx5hws_send_ring_sq *sq,
561 void *sqc_data)
562 {
563 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
564 struct mlx5_wq_cyc *wq = &sq->wq;
565 struct mlx5_wq_param param;
566 size_t buf_sz;
567 int err;
568
569 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
570 sq->mdev = mdev;
571
572 param.db_numa_node = numa_node;
573 param.buf_numa_node = numa_node;
574 err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl);
575 if (err)
576 return err;
577 wq->db = &wq->db[MLX5_SND_DBR];
578
579 buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
580 sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
581 if (!sq->dep_wqe) {
582 err = -ENOMEM;
583 goto destroy_wq_cyc;
584 }
585
586 sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
587 if (!sq->wr_priv) {
588 err = -ENOMEM;
589 goto free_dep_wqe;
590 }
591
592 sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
593
594 return 0;
595
596 free_dep_wqe:
597 kfree(sq->dep_wqe);
598 destroy_wq_cyc:
599 mlx5_wq_destroy(&sq->wq_ctrl);
600 return err;
601 }
602
hws_send_ring_free_sq(struct mlx5hws_send_ring_sq * sq)603 static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
604 {
605 if (!sq)
606 return;
607 kfree(sq->wr_priv);
608 kfree(sq->dep_wqe);
609 mlx5_wq_destroy(&sq->wq_ctrl);
610 }
611
hws_send_ring_create_sq(struct mlx5_core_dev * mdev,u32 pdn,void * sqc_data,struct mlx5hws_send_engine * queue,struct mlx5hws_send_ring_sq * sq,struct mlx5hws_send_ring_cq * cq)612 static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
613 void *sqc_data,
614 struct mlx5hws_send_engine *queue,
615 struct mlx5hws_send_ring_sq *sq,
616 struct mlx5hws_send_ring_cq *cq)
617 {
618 void *in, *sqc, *wq;
619 int inlen, err;
620 u8 ts_format;
621
622 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
623 sizeof(u64) * sq->wq_ctrl.buf.npages;
624 in = kvzalloc(inlen, GFP_KERNEL);
625 if (!in)
626 return -ENOMEM;
627
628 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
629 wq = MLX5_ADDR_OF(sqc, sqc, wq);
630
631 memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
632 MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
633
634 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
635 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
636
637 ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
638 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
639 MLX5_SET(sqc, sqc, ts_format, ts_format);
640
641 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
642 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
643 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
644 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
645
646 mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
647 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
648
649 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
650
651 kvfree(in);
652
653 return err;
654 }
655
hws_send_ring_destroy_sq(struct mlx5_core_dev * mdev,struct mlx5hws_send_ring_sq * sq)656 static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev,
657 struct mlx5hws_send_ring_sq *sq)
658 {
659 mlx5_core_destroy_sq(mdev, sq->sqn);
660 }
661
hws_send_ring_set_sq_rdy(struct mlx5_core_dev * mdev,u32 sqn)662 static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
663 {
664 void *in, *sqc;
665 int inlen, err;
666
667 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
668 in = kvzalloc(inlen, GFP_KERNEL);
669 if (!in)
670 return -ENOMEM;
671
672 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
673 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
674 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
675
676 err = mlx5_core_modify_sq(mdev, sqn, in);
677
678 kvfree(in);
679
680 return err;
681 }
682
hws_send_ring_close_sq(struct mlx5hws_send_ring_sq * sq)683 static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
684 {
685 mlx5_core_destroy_sq(sq->mdev, sq->sqn);
686 mlx5_wq_destroy(&sq->wq_ctrl);
687 kfree(sq->wr_priv);
688 kfree(sq->dep_wqe);
689 }
690
hws_send_ring_create_sq_rdy(struct mlx5_core_dev * mdev,u32 pdn,void * sqc_data,struct mlx5hws_send_engine * queue,struct mlx5hws_send_ring_sq * sq,struct mlx5hws_send_ring_cq * cq)691 static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
692 void *sqc_data,
693 struct mlx5hws_send_engine *queue,
694 struct mlx5hws_send_ring_sq *sq,
695 struct mlx5hws_send_ring_cq *cq)
696 {
697 int err;
698
699 err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
700 if (err)
701 return err;
702
703 err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
704 if (err)
705 hws_send_ring_destroy_sq(mdev, sq);
706
707 return err;
708 }
709
hws_send_ring_open_sq(struct mlx5hws_context * ctx,int numa_node,struct mlx5hws_send_engine * queue,struct mlx5hws_send_ring_sq * sq,struct mlx5hws_send_ring_cq * cq)710 static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
711 int numa_node,
712 struct mlx5hws_send_engine *queue,
713 struct mlx5hws_send_ring_sq *sq,
714 struct mlx5hws_send_ring_cq *cq)
715 {
716 size_t buf_sz, sq_log_buf_sz;
717 void *sqc_data, *wq;
718 int err;
719
720 sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
721 if (!sqc_data)
722 return -ENOMEM;
723
724 buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
725 sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
726
727 wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
728 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
729 MLX5_SET(wq, wq, pd, ctx->pd_num);
730 MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
731
732 err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
733 if (err)
734 goto err_free_sqc;
735
736 err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
737 queue, sq, cq);
738 if (err)
739 goto err_free_sq;
740
741 kvfree(sqc_data);
742
743 return 0;
744 err_free_sq:
745 hws_send_ring_free_sq(sq);
746 err_free_sqc:
747 kvfree(sqc_data);
748 return err;
749 }
750
hws_cq_complete(struct mlx5_core_cq * mcq,struct mlx5_eqe * eqe)751 static void hws_cq_complete(struct mlx5_core_cq *mcq,
752 struct mlx5_eqe *eqe)
753 {
754 pr_err("CQ completion CQ: #%u\n", mcq->cqn);
755 }
756
hws_send_ring_alloc_cq(struct mlx5_core_dev * mdev,int numa_node,struct mlx5hws_send_engine * queue,void * cqc_data,struct mlx5hws_send_ring_cq * cq)757 static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
758 int numa_node,
759 struct mlx5hws_send_engine *queue,
760 void *cqc_data,
761 struct mlx5hws_send_ring_cq *cq)
762 {
763 struct mlx5_core_cq *mcq = &cq->mcq;
764 struct mlx5_wq_param param;
765 struct mlx5_cqe64 *cqe;
766 int err;
767 u32 i;
768
769 param.buf_numa_node = numa_node;
770 param.db_numa_node = numa_node;
771
772 err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl);
773 if (err)
774 return err;
775
776 mcq->cqe_sz = 64;
777 mcq->set_ci_db = cq->wq_ctrl.db.db;
778 mcq->arm_db = cq->wq_ctrl.db.db + 1;
779 mcq->comp = hws_cq_complete;
780
781 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
782 cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
783 cqe->op_own = 0xf1;
784 }
785
786 cq->mdev = mdev;
787
788 return 0;
789 }
790
hws_send_ring_create_cq(struct mlx5_core_dev * mdev,struct mlx5hws_send_engine * queue,void * cqc_data,struct mlx5hws_send_ring_cq * cq)791 static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
792 struct mlx5hws_send_engine *queue,
793 void *cqc_data,
794 struct mlx5hws_send_ring_cq *cq)
795 {
796 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
797 struct mlx5_core_cq *mcq = &cq->mcq;
798 void *in, *cqc;
799 int inlen, eqn;
800 int err;
801
802 err = mlx5_comp_eqn_get(mdev, 0, &eqn);
803 if (err)
804 return err;
805
806 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
807 sizeof(u64) * cq->wq_ctrl.buf.npages;
808 in = kvzalloc(inlen, GFP_KERNEL);
809 if (!in)
810 return -ENOMEM;
811
812 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
813 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
814 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
815 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
816
817 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
818 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
819 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
820 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
821
822 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
823
824 kvfree(in);
825
826 return err;
827 }
828
hws_send_ring_open_cq(struct mlx5_core_dev * mdev,struct mlx5hws_send_engine * queue,int numa_node,struct mlx5hws_send_ring_cq * cq)829 static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
830 struct mlx5hws_send_engine *queue,
831 int numa_node,
832 struct mlx5hws_send_ring_cq *cq)
833 {
834 void *cqc_data;
835 int err;
836
837 cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
838 if (!cqc_data)
839 return -ENOMEM;
840
841 MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
842 MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
843 MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
844
845 err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
846 if (err)
847 goto err_out;
848
849 err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
850 if (err)
851 goto err_free_cq;
852
853 kvfree(cqc_data);
854
855 return 0;
856
857 err_free_cq:
858 mlx5_wq_destroy(&cq->wq_ctrl);
859 err_out:
860 kvfree(cqc_data);
861 return err;
862 }
863
hws_send_ring_close_cq(struct mlx5hws_send_ring_cq * cq)864 static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
865 {
866 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
867 mlx5_wq_destroy(&cq->wq_ctrl);
868 }
869
hws_send_ring_close(struct mlx5hws_send_engine * queue)870 static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
871 {
872 hws_send_ring_close_sq(&queue->send_ring.send_sq);
873 hws_send_ring_close_cq(&queue->send_ring.send_cq);
874 }
875
mlx5hws_send_ring_open(struct mlx5hws_context * ctx,struct mlx5hws_send_engine * queue)876 static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
877 struct mlx5hws_send_engine *queue)
878 {
879 int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
880 struct mlx5hws_send_ring *ring = &queue->send_ring;
881 int err;
882
883 err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
884 if (err)
885 return err;
886
887 err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
888 &ring->send_cq);
889 if (err)
890 goto close_cq;
891
892 return err;
893
894 close_cq:
895 hws_send_ring_close_cq(&ring->send_cq);
896 return err;
897 }
898
mlx5hws_send_queue_close(struct mlx5hws_send_engine * queue)899 void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
900 {
901 hws_send_ring_close(queue);
902 kfree(queue->completed.entries);
903 }
904
mlx5hws_send_queue_open(struct mlx5hws_context * ctx,struct mlx5hws_send_engine * queue,u16 queue_size)905 int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
906 struct mlx5hws_send_engine *queue,
907 u16 queue_size)
908 {
909 int err;
910
911 mutex_init(&queue->lock);
912
913 queue->num_entries = roundup_pow_of_two(queue_size);
914 queue->used_entries = 0;
915
916 queue->completed.entries = kcalloc(queue->num_entries,
917 sizeof(queue->completed.entries[0]),
918 GFP_KERNEL);
919 if (!queue->completed.entries)
920 return -ENOMEM;
921
922 queue->completed.pi = 0;
923 queue->completed.ci = 0;
924 queue->completed.mask = queue->num_entries - 1;
925 err = mlx5hws_send_ring_open(ctx, queue);
926 if (err)
927 goto free_completed_entries;
928
929 return 0;
930
931 free_completed_entries:
932 kfree(queue->completed.entries);
933 return err;
934 }
935
__hws_send_queues_close(struct mlx5hws_context * ctx,u16 queues)936 static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
937 {
938 while (queues--)
939 mlx5hws_send_queue_close(&ctx->send_queue[queues]);
940 }
941
hws_send_queues_bwc_locks_destroy(struct mlx5hws_context * ctx)942 static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
943 {
944 int bwc_queues = mlx5hws_bwc_queues(ctx);
945 int i;
946
947 if (!mlx5hws_context_bwc_supported(ctx))
948 return;
949
950 for (i = 0; i < bwc_queues; i++) {
951 mutex_destroy(&ctx->bwc_send_queue_locks[i]);
952 lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
953 }
954
955 kfree(ctx->bwc_lock_class_keys);
956 kfree(ctx->bwc_send_queue_locks);
957 }
958
mlx5hws_send_queues_close(struct mlx5hws_context * ctx)959 void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
960 {
961 hws_send_queues_bwc_locks_destroy(ctx);
962 __hws_send_queues_close(ctx, ctx->queues);
963 kfree(ctx->send_queue);
964 }
965
hws_bwc_send_queues_init(struct mlx5hws_context * ctx)966 static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
967 {
968 /* Number of BWC queues is equal to number of the usual HWS queues */
969 int bwc_queues = ctx->queues - 1;
970 int i;
971
972 if (!mlx5hws_context_bwc_supported(ctx))
973 return 0;
974
975 ctx->queues += bwc_queues;
976
977 ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
978 sizeof(*ctx->bwc_send_queue_locks),
979 GFP_KERNEL);
980
981 if (!ctx->bwc_send_queue_locks)
982 return -ENOMEM;
983
984 ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
985 sizeof(*ctx->bwc_lock_class_keys),
986 GFP_KERNEL);
987 if (!ctx->bwc_lock_class_keys)
988 goto err_lock_class_keys;
989
990 for (i = 0; i < bwc_queues; i++) {
991 mutex_init(&ctx->bwc_send_queue_locks[i]);
992 lockdep_register_key(ctx->bwc_lock_class_keys + i);
993 lockdep_set_class(ctx->bwc_send_queue_locks + i, ctx->bwc_lock_class_keys + i);
994 }
995
996 return 0;
997
998 err_lock_class_keys:
999 kfree(ctx->bwc_send_queue_locks);
1000 return -ENOMEM;
1001 }
1002
mlx5hws_send_queues_open(struct mlx5hws_context * ctx,u16 queues,u16 queue_size)1003 int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
1004 u16 queues,
1005 u16 queue_size)
1006 {
1007 int err = 0;
1008 u32 i;
1009
1010 /* Open one extra queue for control path */
1011 ctx->queues = queues + 1;
1012
1013 /* open a separate set of queues and locks for bwc API */
1014 err = hws_bwc_send_queues_init(ctx);
1015 if (err)
1016 return err;
1017
1018 ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
1019 if (!ctx->send_queue) {
1020 err = -ENOMEM;
1021 goto free_bwc_locks;
1022 }
1023
1024 for (i = 0; i < ctx->queues; i++) {
1025 err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
1026 if (err)
1027 goto close_send_queues;
1028 }
1029
1030 return 0;
1031
1032 close_send_queues:
1033 __hws_send_queues_close(ctx, i);
1034
1035 kfree(ctx->send_queue);
1036
1037 free_bwc_locks:
1038 hws_send_queues_bwc_locks_destroy(ctx);
1039
1040 return err;
1041 }
1042
mlx5hws_send_queue_action(struct mlx5hws_context * ctx,u16 queue_id,u32 actions)1043 int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
1044 u16 queue_id,
1045 u32 actions)
1046 {
1047 struct mlx5hws_send_ring_sq *send_sq;
1048 struct mlx5hws_send_engine *queue;
1049 bool wait_comp = false;
1050 s64 polled = 0;
1051
1052 queue = &ctx->send_queue[queue_id];
1053 send_sq = &queue->send_ring.send_sq;
1054
1055 switch (actions) {
1056 case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
1057 wait_comp = true;
1058 fallthrough;
1059 case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
1060 if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
1061 /* Send dependent WQEs to drain the queue */
1062 mlx5hws_send_all_dep_wqe(queue);
1063 else
1064 /* Signal on the last posted WQE */
1065 mlx5hws_send_engine_flush_queue(queue);
1066
1067 /* Poll queue until empty */
1068 while (wait_comp && !mlx5hws_send_engine_empty(queue))
1069 hws_send_engine_poll_cq(queue, NULL, &polled, 0);
1070
1071 break;
1072 default:
1073 return -EINVAL;
1074 }
1075
1076 return 0;
1077 }
1078
1079 static int
hws_send_wqe_fw(struct mlx5_core_dev * mdev,u32 pd_num,struct mlx5hws_send_engine_post_attr * send_attr,struct mlx5hws_wqe_gta_ctrl_seg * send_wqe_ctrl,void * send_wqe_match_data,void * send_wqe_match_tag,void * send_wqe_range_data,void * send_wqe_range_tag,bool is_jumbo,u8 gta_opcode)1080 hws_send_wqe_fw(struct mlx5_core_dev *mdev,
1081 u32 pd_num,
1082 struct mlx5hws_send_engine_post_attr *send_attr,
1083 struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
1084 void *send_wqe_match_data,
1085 void *send_wqe_match_tag,
1086 void *send_wqe_range_data,
1087 void *send_wqe_range_tag,
1088 bool is_jumbo,
1089 u8 gta_opcode)
1090 {
1091 bool has_range = send_wqe_range_data || send_wqe_range_tag;
1092 bool has_match = send_wqe_match_data || send_wqe_match_tag;
1093 struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
1094 struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
1095 struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
1096 struct mlx5hws_cmd_generate_wqe_attr attr = {0};
1097 struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
1098 struct mlx5_cqe64 cqe;
1099 u32 flags = 0;
1100 int ret;
1101
1102 /* Set WQE control */
1103 wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
1104 wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
1105 flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
1106 wqe_ctrl.flags = cpu_to_be32(flags);
1107 wqe_ctrl.imm = cpu_to_be32(send_attr->id);
1108
1109 /* Set GTA WQE CTRL */
1110 memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
1111 gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
1112
1113 /* Set GTA match WQE DATA */
1114 if (has_match) {
1115 if (send_wqe_match_data)
1116 memcpy(>a_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
1117 else
1118 hws_send_wqe_set_tag(>a_wqe_data0, send_wqe_match_tag, is_jumbo);
1119
1120 gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
1121 attr.gta_data_0 = (u8 *)>a_wqe_data0;
1122 }
1123
1124 /* Set GTA range WQE DATA */
1125 if (has_range) {
1126 if (send_wqe_range_data)
1127 memcpy(>a_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
1128 else
1129 hws_send_wqe_set_tag(>a_wqe_data1, send_wqe_range_tag, false);
1130
1131 gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
1132 attr.gta_data_1 = (u8 *)>a_wqe_data1;
1133 }
1134
1135 attr.pdn = pd_num;
1136 attr.wqe_ctrl = (u8 *)&wqe_ctrl;
1137 attr.gta_ctrl = (u8 *)>a_wqe_ctrl;
1138
1139 send_wqe:
1140 ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
1141 if (ret) {
1142 mlx5_core_err(mdev, "Failed to write WQE using command");
1143 return ret;
1144 }
1145
1146 if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
1147 (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
1148 *send_attr->used_id = send_attr->id;
1149 return 0;
1150 }
1151
1152 /* Retry if rule failed */
1153 if (send_attr->retry_id) {
1154 wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
1155 send_attr->id = send_attr->retry_id;
1156 send_attr->retry_id = 0;
1157 goto send_wqe;
1158 }
1159
1160 return -1;
1161 }
1162
mlx5hws_send_stes_fw(struct mlx5hws_context * ctx,struct mlx5hws_send_engine * queue,struct mlx5hws_send_ste_attr * ste_attr)1163 void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
1164 struct mlx5hws_send_engine *queue,
1165 struct mlx5hws_send_ste_attr *ste_attr)
1166 {
1167 struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
1168 struct mlx5hws_rule *rule = send_attr->rule;
1169 struct mlx5_core_dev *mdev;
1170 u16 queue_id;
1171 u32 pdn;
1172 int ret;
1173
1174 queue_id = queue - ctx->send_queue;
1175 mdev = ctx->mdev;
1176 pdn = ctx->pd_num;
1177
1178 /* Writing through FW can't HW fence, therefore we drain the queue */
1179 if (send_attr->fence)
1180 mlx5hws_send_queue_action(ctx,
1181 queue_id,
1182 MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
1183
1184 if (ste_attr->rtc_1) {
1185 send_attr->id = ste_attr->rtc_1;
1186 send_attr->used_id = ste_attr->used_id_rtc_1;
1187 send_attr->retry_id = ste_attr->retry_rtc_1;
1188 ret = hws_send_wqe_fw(mdev, pdn, send_attr,
1189 ste_attr->wqe_ctrl,
1190 ste_attr->wqe_data,
1191 ste_attr->wqe_tag,
1192 ste_attr->range_wqe_data,
1193 ste_attr->range_wqe_tag,
1194 ste_attr->wqe_tag_is_jumbo,
1195 ste_attr->gta_opcode);
1196 if (ret)
1197 goto fail_rule;
1198 }
1199
1200 if (ste_attr->rtc_0) {
1201 send_attr->id = ste_attr->rtc_0;
1202 send_attr->used_id = ste_attr->used_id_rtc_0;
1203 send_attr->retry_id = ste_attr->retry_rtc_0;
1204 ret = hws_send_wqe_fw(mdev, pdn, send_attr,
1205 ste_attr->wqe_ctrl,
1206 ste_attr->wqe_data,
1207 ste_attr->wqe_tag,
1208 ste_attr->range_wqe_data,
1209 ste_attr->range_wqe_tag,
1210 ste_attr->wqe_tag_is_jumbo,
1211 ste_attr->gta_opcode);
1212 if (ret)
1213 goto fail_rule;
1214 }
1215
1216 /* Increase the status, this only works on good flow as the enum
1217 * is arrange it away creating -> created -> deleting -> deleted
1218 */
1219 if (likely(rule))
1220 rule->status++;
1221
1222 mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
1223
1224 return;
1225
1226 fail_rule:
1227 if (likely(rule))
1228 rule->status = !rule->rtc_0 && !rule->rtc_1 ?
1229 MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
1230
1231 mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
1232 }
1233