1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3
4 #include <linux/bitfield.h>
5 #include <linux/delay.h>
6 #include <linux/dma-mapping.h>
7
8 #include "hinic3_cmdq.h"
9 #include "hinic3_hwdev.h"
10 #include "hinic3_hwif.h"
11 #include "hinic3_mbox.h"
12
13 #define CMDQ_BUF_SIZE 2048
14 #define CMDQ_WQEBB_SIZE 64
15
16 #define CMDQ_CMD_TIMEOUT 5000
17 #define CMDQ_ENABLE_WAIT_TIMEOUT 300
18
19 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK GENMASK_ULL(51, 0)
20 #define CMDQ_CTXT_EQ_ID_MASK GENMASK_ULL(60, 53)
21 #define CMDQ_CTXT_CEQ_ARM_MASK BIT_ULL(61)
22 #define CMDQ_CTXT_CEQ_EN_MASK BIT_ULL(62)
23 #define CMDQ_CTXT_HW_BUSY_BIT_MASK BIT_ULL(63)
24
25 #define CMDQ_CTXT_WQ_BLOCK_PFN_MASK GENMASK_ULL(51, 0)
26 #define CMDQ_CTXT_CI_MASK GENMASK_ULL(63, 52)
27 #define CMDQ_CTXT_SET(val, member) \
28 FIELD_PREP(CMDQ_CTXT_##member##_MASK, val)
29
30 #define CMDQ_WQE_HDR_BUFDESC_LEN_MASK GENMASK(7, 0)
31 #define CMDQ_WQE_HDR_COMPLETE_FMT_MASK BIT(15)
32 #define CMDQ_WQE_HDR_DATA_FMT_MASK BIT(22)
33 #define CMDQ_WQE_HDR_COMPLETE_REQ_MASK BIT(23)
34 #define CMDQ_WQE_HDR_COMPLETE_SECT_LEN_MASK GENMASK(28, 27)
35 #define CMDQ_WQE_HDR_CTRL_LEN_MASK GENMASK(30, 29)
36 #define CMDQ_WQE_HDR_HW_BUSY_BIT_MASK BIT(31)
37 #define CMDQ_WQE_HDR_SET(val, member) \
38 FIELD_PREP(CMDQ_WQE_HDR_##member##_MASK, val)
39 #define CMDQ_WQE_HDR_GET(val, member) \
40 FIELD_GET(CMDQ_WQE_HDR_##member##_MASK, le32_to_cpu(val))
41
42 #define CMDQ_CTRL_PI_MASK GENMASK(15, 0)
43 #define CMDQ_CTRL_CMD_MASK GENMASK(23, 16)
44 #define CMDQ_CTRL_MOD_MASK GENMASK(28, 24)
45 #define CMDQ_CTRL_HW_BUSY_BIT_MASK BIT(31)
46 #define CMDQ_CTRL_SET(val, member) \
47 FIELD_PREP(CMDQ_CTRL_##member##_MASK, val)
48 #define CMDQ_CTRL_GET(val, member) \
49 FIELD_GET(CMDQ_CTRL_##member##_MASK, val)
50
51 #define CMDQ_WQE_ERRCODE_VAL_MASK GENMASK(30, 0)
52 #define CMDQ_WQE_ERRCODE_GET(val, member) \
53 FIELD_GET(CMDQ_WQE_ERRCODE_##member##_MASK, le32_to_cpu(val))
54
55 #define CMDQ_DB_INFO_HI_PROD_IDX_MASK GENMASK(7, 0)
56 #define CMDQ_DB_INFO_SET(val, member) \
57 FIELD_PREP(CMDQ_DB_INFO_##member##_MASK, val)
58
59 #define CMDQ_DB_HEAD_QUEUE_TYPE_MASK BIT(23)
60 #define CMDQ_DB_HEAD_CMDQ_TYPE_MASK GENMASK(26, 24)
61 #define CMDQ_DB_HEAD_SET(val, member) \
62 FIELD_PREP(CMDQ_DB_HEAD_##member##_MASK, val)
63
64 #define CMDQ_CEQE_TYPE_MASK GENMASK(2, 0)
65 #define CMDQ_CEQE_GET(val, member) \
66 FIELD_GET(CMDQ_CEQE_##member##_MASK, le32_to_cpu(val))
67
68 #define CMDQ_WQE_HEADER(wqe) ((struct cmdq_header *)(wqe))
69 #define CMDQ_WQE_COMPLETED(ctrl_info) \
70 CMDQ_CTRL_GET(le32_to_cpu(ctrl_info), HW_BUSY_BIT)
71
72 #define CMDQ_PFN(addr) ((addr) >> 12)
73
74 /* cmdq work queue's chip logical address table is up to 512B */
75 #define CMDQ_WQ_CLA_SIZE 512
76
77 /* Completion codes: send, direct sync, force stop */
78 #define CMDQ_SEND_CMPT_CODE 10
79 #define CMDQ_DIRECT_SYNC_CMPT_CODE 11
80 #define CMDQ_FORCE_STOP_CMPT_CODE 12
81
82 enum cmdq_data_format {
83 CMDQ_DATA_SGE = 0,
84 CMDQ_DATA_DIRECT = 1,
85 };
86
87 enum cmdq_ctrl_sect_len {
88 CMDQ_CTRL_SECT_LEN = 1,
89 CMDQ_CTRL_DIRECT_SECT_LEN = 2,
90 };
91
92 enum cmdq_bufdesc_len {
93 CMDQ_BUFDESC_LCMD_LEN = 2,
94 CMDQ_BUFDESC_SCMD_LEN = 3,
95 };
96
97 enum cmdq_completion_format {
98 CMDQ_COMPLETE_DIRECT = 0,
99 CMDQ_COMPLETE_SGE = 1,
100 };
101
102 enum cmdq_cmd_type {
103 CMDQ_CMD_DIRECT_RESP,
104 CMDQ_CMD_SGE_RESP,
105 };
106
107 #define CMDQ_WQE_NUM_WQEBBS 1
108
cmdq_read_wqe(struct hinic3_wq * wq,u16 * ci)109 static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci)
110 {
111 if (hinic3_wq_get_used(wq) == 0)
112 return NULL;
113
114 *ci = wq->cons_idx & wq->idx_mask;
115
116 return get_q_element(&wq->qpages, wq->cons_idx, NULL);
117 }
118
hinic3_alloc_cmd_buf(struct hinic3_hwdev * hwdev)119 struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev)
120 {
121 struct hinic3_cmd_buf *cmd_buf;
122 struct hinic3_cmdqs *cmdqs;
123
124 cmdqs = hwdev->cmdqs;
125
126 cmd_buf = kmalloc_obj(*cmd_buf, GFP_ATOMIC);
127 if (!cmd_buf)
128 return NULL;
129
130 cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
131 &cmd_buf->dma_addr);
132 if (!cmd_buf->buf) {
133 dev_err(hwdev->dev, "Failed to allocate cmdq cmd buf from the pool\n");
134 goto err_free_cmd_buf;
135 }
136
137 cmd_buf->size = cpu_to_le16(CMDQ_BUF_SIZE);
138 refcount_set(&cmd_buf->ref_cnt, 1);
139
140 return cmd_buf;
141
142 err_free_cmd_buf:
143 kfree(cmd_buf);
144
145 return NULL;
146 }
147
hinic3_free_cmd_buf(struct hinic3_hwdev * hwdev,struct hinic3_cmd_buf * cmd_buf)148 void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
149 struct hinic3_cmd_buf *cmd_buf)
150 {
151 struct hinic3_cmdqs *cmdqs;
152
153 if (!refcount_dec_and_test(&cmd_buf->ref_cnt))
154 return;
155
156 cmdqs = hwdev->cmdqs;
157
158 dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
159 kfree(cmd_buf);
160 }
161
cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info * cmd_info,struct hinic3_hwdev * hwdev)162 static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info,
163 struct hinic3_hwdev *hwdev)
164 {
165 if (cmd_info->buf_in) {
166 hinic3_free_cmd_buf(hwdev, cmd_info->buf_in);
167 cmd_info->buf_in = NULL;
168 }
169 }
170
clear_wqe_complete_bit(struct hinic3_cmdq * cmdq,struct cmdq_wqe * wqe,u16 ci)171 static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq,
172 struct cmdq_wqe *wqe, u16 ci)
173 {
174 struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
175 __le32 header_info = hdr->header_info;
176 enum cmdq_data_format df;
177 struct cmdq_ctrl *ctrl;
178
179 df = CMDQ_WQE_HDR_GET(header_info, DATA_FMT);
180 if (df == CMDQ_DATA_SGE)
181 ctrl = &wqe->wqe_lcmd.ctrl;
182 else
183 ctrl = &wqe->wqe_scmd.ctrl;
184
185 /* clear HW busy bit */
186 ctrl->ctrl_info = 0;
187 cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE;
188 wmb(); /* verify wqe is clear before updating ci */
189 hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
190 }
191
cmdq_update_cmd_status(struct hinic3_cmdq * cmdq,u16 prod_idx,struct cmdq_wqe * wqe)192 static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx,
193 struct cmdq_wqe *wqe)
194 {
195 struct hinic3_cmdq_cmd_info *cmd_info;
196 struct cmdq_wqe_lcmd *wqe_lcmd;
197 __le32 status_info;
198
199 wqe_lcmd = &wqe->wqe_lcmd;
200 cmd_info = &cmdq->cmd_infos[prod_idx];
201 if (cmd_info->errcode) {
202 status_info = wqe_lcmd->status.status_info;
203 *cmd_info->errcode = CMDQ_WQE_ERRCODE_GET(status_info, VAL);
204 }
205
206 if (cmd_info->direct_resp)
207 *cmd_info->direct_resp = wqe_lcmd->completion.resp.direct.val;
208 }
209
cmdq_sync_cmd_handler(struct hinic3_cmdq * cmdq,struct cmdq_wqe * wqe,u16 ci)210 static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq,
211 struct cmdq_wqe *wqe, u16 ci)
212 {
213 spin_lock(&cmdq->cmdq_lock);
214 cmdq_update_cmd_status(cmdq, ci, wqe);
215 if (cmdq->cmd_infos[ci].cmpt_code) {
216 *cmdq->cmd_infos[ci].cmpt_code = CMDQ_DIRECT_SYNC_CMPT_CODE;
217 cmdq->cmd_infos[ci].cmpt_code = NULL;
218 }
219
220 /* Ensure that completion code has been updated before updating done */
221 smp_wmb();
222 if (cmdq->cmd_infos[ci].done) {
223 complete(cmdq->cmd_infos[ci].done);
224 cmdq->cmd_infos[ci].done = NULL;
225 }
226 spin_unlock(&cmdq->cmdq_lock);
227
228 cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev);
229 clear_wqe_complete_bit(cmdq, wqe, ci);
230 }
231
hinic3_cmdq_ceq_handler(struct hinic3_hwdev * hwdev,__le32 ceqe_data)232 void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data)
233 {
234 enum hinic3_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
235 struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
236 struct hinic3_cmdq_cmd_info *cmd_info;
237 struct cmdq_wqe_lcmd *wqe_lcmd;
238 struct hinic3_cmdq *cmdq;
239 struct cmdq_wqe *wqe;
240 __le32 ctrl_info;
241 u16 ci;
242
243 if (unlikely(cmdq_type >= ARRAY_SIZE(cmdqs->cmdq)))
244 return;
245
246 cmdq = &cmdqs->cmdq[cmdq_type];
247 while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) {
248 cmd_info = &cmdq->cmd_infos[ci];
249 switch (cmd_info->cmd_type) {
250 case HINIC3_CMD_TYPE_NONE:
251 return;
252 case HINIC3_CMD_TYPE_TIMEOUT:
253 dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n",
254 cmdq_type, ci);
255 fallthrough;
256 case HINIC3_CMD_TYPE_FAKE_TIMEOUT:
257 cmdq_clear_cmd_buf(cmd_info, hwdev);
258 clear_wqe_complete_bit(cmdq, wqe, ci);
259 break;
260 default:
261 /* only arm bit is using scmd wqe,
262 * the other wqe is lcmd
263 */
264 wqe_lcmd = &wqe->wqe_lcmd;
265 ctrl_info = wqe_lcmd->ctrl.ctrl_info;
266 if (!CMDQ_WQE_COMPLETED(ctrl_info))
267 return;
268
269 dma_rmb();
270 /* For FORCE_STOP cmd_type, we also need to wait for
271 * the firmware processing to complete to prevent the
272 * firmware from accessing the released cmd_buf
273 */
274 if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) {
275 cmdq_clear_cmd_buf(cmd_info, hwdev);
276 clear_wqe_complete_bit(cmdq, wqe, ci);
277 } else {
278 cmdq_sync_cmd_handler(cmdq, wqe, ci);
279 }
280
281 break;
282 }
283 }
284 }
285
wait_cmdqs_enable(struct hinic3_cmdqs * cmdqs)286 static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs)
287 {
288 unsigned long end;
289
290 end = jiffies + msecs_to_jiffies(CMDQ_ENABLE_WAIT_TIMEOUT);
291 do {
292 if (cmdqs->status & HINIC3_CMDQ_ENABLE)
293 return 0;
294 usleep_range(1000, 2000);
295 } while (time_before(jiffies, end) && !cmdqs->disable_flag);
296
297 cmdqs->disable_flag = 1;
298
299 return -EBUSY;
300 }
301
cmdq_set_completion(struct cmdq_completion * complete,struct hinic3_cmd_buf * buf_out)302 static void cmdq_set_completion(struct cmdq_completion *complete,
303 struct hinic3_cmd_buf *buf_out)
304 {
305 struct hinic3_sge *sge = &complete->resp.sge;
306
307 hinic3_set_sge(sge, buf_out->dma_addr, cpu_to_le32(CMDQ_BUF_SIZE));
308 }
309
cmdq_get_wqe(struct hinic3_wq * wq,u16 * pi)310 static struct cmdq_wqe *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi)
311 {
312 if (!hinic3_wq_free_wqebbs(wq))
313 return NULL;
314
315 return hinic3_wq_get_one_wqebb(wq, pi);
316 }
317
cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd * wqe,struct hinic3_cmd_buf * buf_in)318 static void cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd *wqe,
319 struct hinic3_cmd_buf *buf_in)
320 {
321 hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr,
322 (__force __le32)buf_in->size);
323 }
324
cmdq_set_db(struct hinic3_cmdq * cmdq,enum hinic3_cmdq_type cmdq_type,u16 prod_idx)325 static void cmdq_set_db(struct hinic3_cmdq *cmdq,
326 enum hinic3_cmdq_type cmdq_type, u16 prod_idx)
327 {
328 u8 __iomem *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base;
329 u16 db_ofs = (prod_idx & 0xFF) << 3;
330 struct cmdq_db db;
331
332 db.db_info = cpu_to_le32(CMDQ_DB_INFO_SET(prod_idx >> 8, HI_PROD_IDX));
333 db.db_head = cpu_to_le32(CMDQ_DB_HEAD_SET(1, QUEUE_TYPE) |
334 CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE));
335 writeq(*(u64 *)&db, db_base + db_ofs);
336 }
337
cmdq_wqe_fill(struct cmdq_wqe * hw_wqe,const struct cmdq_wqe * shadow_wqe)338 static void cmdq_wqe_fill(struct cmdq_wqe *hw_wqe,
339 const struct cmdq_wqe *shadow_wqe)
340 {
341 const struct cmdq_header *src = (struct cmdq_header *)shadow_wqe;
342 struct cmdq_header *dst = (struct cmdq_header *)hw_wqe;
343 size_t len;
344
345 len = sizeof(struct cmdq_wqe) - sizeof(struct cmdq_header);
346 memcpy(dst + 1, src + 1, len);
347 /* Ensure buffer len before updating header */
348 wmb();
349 WRITE_ONCE(*dst, *src);
350 }
351
cmdq_prepare_wqe_ctrl(struct cmdq_wqe * wqe,u8 wrapped,u8 mod,u8 cmd,u16 prod_idx,enum cmdq_completion_format complete_format,enum cmdq_data_format data_format,enum cmdq_bufdesc_len buf_len)352 static void cmdq_prepare_wqe_ctrl(struct cmdq_wqe *wqe, u8 wrapped,
353 u8 mod, u8 cmd, u16 prod_idx,
354 enum cmdq_completion_format complete_format,
355 enum cmdq_data_format data_format,
356 enum cmdq_bufdesc_len buf_len)
357 {
358 struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
359 enum cmdq_ctrl_sect_len ctrl_len;
360 struct cmdq_wqe_lcmd *wqe_lcmd;
361 struct cmdq_wqe_scmd *wqe_scmd;
362 struct cmdq_ctrl *ctrl;
363
364 if (data_format == CMDQ_DATA_SGE) {
365 wqe_lcmd = &wqe->wqe_lcmd;
366 wqe_lcmd->status.status_info = 0;
367 ctrl = &wqe_lcmd->ctrl;
368 ctrl_len = CMDQ_CTRL_SECT_LEN;
369 } else {
370 wqe_scmd = &wqe->wqe_scmd;
371 wqe_scmd->status.status_info = 0;
372 ctrl = &wqe_scmd->ctrl;
373 ctrl_len = CMDQ_CTRL_DIRECT_SECT_LEN;
374 }
375
376 ctrl->ctrl_info =
377 cpu_to_le32(CMDQ_CTRL_SET(prod_idx, PI) |
378 CMDQ_CTRL_SET(cmd, CMD) |
379 CMDQ_CTRL_SET(mod, MOD));
380
381 hdr->header_info =
382 cpu_to_le32(CMDQ_WQE_HDR_SET(buf_len, BUFDESC_LEN) |
383 CMDQ_WQE_HDR_SET(complete_format, COMPLETE_FMT) |
384 CMDQ_WQE_HDR_SET(data_format, DATA_FMT) |
385 CMDQ_WQE_HDR_SET(1, COMPLETE_REQ) |
386 CMDQ_WQE_HDR_SET(3, COMPLETE_SECT_LEN) |
387 CMDQ_WQE_HDR_SET(ctrl_len, CTRL_LEN) |
388 CMDQ_WQE_HDR_SET(wrapped, HW_BUSY_BIT));
389 }
390
cmdq_set_lcmd_wqe(struct cmdq_wqe * wqe,enum cmdq_cmd_type cmd_type,struct hinic3_cmd_buf * buf_in,struct hinic3_cmd_buf * buf_out,u8 wrapped,u8 mod,u8 cmd,u16 prod_idx)391 static void cmdq_set_lcmd_wqe(struct cmdq_wqe *wqe,
392 enum cmdq_cmd_type cmd_type,
393 struct hinic3_cmd_buf *buf_in,
394 struct hinic3_cmd_buf *buf_out,
395 u8 wrapped, u8 mod, u8 cmd, u16 prod_idx)
396 {
397 enum cmdq_completion_format complete_format = CMDQ_COMPLETE_DIRECT;
398 struct cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
399
400 switch (cmd_type) {
401 case CMDQ_CMD_DIRECT_RESP:
402 wqe_lcmd->completion.resp.direct.val = 0;
403 break;
404 case CMDQ_CMD_SGE_RESP:
405 if (buf_out) {
406 complete_format = CMDQ_COMPLETE_SGE;
407 cmdq_set_completion(&wqe_lcmd->completion, buf_out);
408 }
409 break;
410 }
411
412 cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format,
413 CMDQ_DATA_SGE, CMDQ_BUFDESC_LCMD_LEN);
414 cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
415 }
416
hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq * cmdq,struct cmdq_wqe * wqe,u16 pi)417 static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq,
418 struct cmdq_wqe *wqe, u16 pi)
419 {
420 struct cmdq_wqe_lcmd *wqe_lcmd;
421 struct cmdq_ctrl *ctrl;
422 __le32 ctrl_info;
423
424 wqe_lcmd = &wqe->wqe_lcmd;
425 ctrl = &wqe_lcmd->ctrl;
426 ctrl_info = ctrl->ctrl_info;
427 if (!CMDQ_WQE_COMPLETED(ctrl_info)) {
428 dev_dbg(cmdq->hwdev->dev, "Cmdq sync command check busy bit not set\n");
429 return -EFAULT;
430 }
431 cmdq_update_cmd_status(cmdq, pi, wqe);
432
433 return 0;
434 }
435
clear_cmd_info(struct hinic3_cmdq_cmd_info * cmd_info,const struct hinic3_cmdq_cmd_info * saved_cmd_info)436 static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info,
437 const struct hinic3_cmdq_cmd_info *saved_cmd_info)
438 {
439 if (cmd_info->errcode == saved_cmd_info->errcode)
440 cmd_info->errcode = NULL;
441
442 if (cmd_info->done == saved_cmd_info->done)
443 cmd_info->done = NULL;
444
445 if (cmd_info->direct_resp == saved_cmd_info->direct_resp)
446 cmd_info->direct_resp = NULL;
447 }
448
wait_cmdq_sync_cmd_completion(struct hinic3_cmdq * cmdq,struct hinic3_cmdq_cmd_info * cmd_info,struct hinic3_cmdq_cmd_info * saved_cmd_info,u64 curr_msg_id,u16 curr_prod_idx,struct cmdq_wqe * curr_wqe,u32 timeout)449 static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq,
450 struct hinic3_cmdq_cmd_info *cmd_info,
451 struct hinic3_cmdq_cmd_info *saved_cmd_info,
452 u64 curr_msg_id, u16 curr_prod_idx,
453 struct cmdq_wqe *curr_wqe,
454 u32 timeout)
455 {
456 ulong timeo = msecs_to_jiffies(timeout);
457 int err;
458
459 if (wait_for_completion_timeout(saved_cmd_info->done, timeo))
460 return 0;
461
462 spin_lock_bh(&cmdq->cmdq_lock);
463 if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code)
464 cmd_info->cmpt_code = NULL;
465
466 if (*saved_cmd_info->cmpt_code == CMDQ_DIRECT_SYNC_CMPT_CODE) {
467 dev_dbg(cmdq->hwdev->dev, "Cmdq direct sync command has been completed\n");
468 spin_unlock_bh(&cmdq->cmdq_lock);
469 return 0;
470 }
471
472 if (curr_msg_id == cmd_info->cmdq_msg_id) {
473 err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe,
474 curr_prod_idx);
475 if (err)
476 cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT;
477 else
478 cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT;
479 } else {
480 err = -ETIMEDOUT;
481 dev_err(cmdq->hwdev->dev,
482 "Cmdq sync command current msg id mismatch cmd_info msg id\n");
483 }
484
485 clear_cmd_info(cmd_info, saved_cmd_info);
486 spin_unlock_bh(&cmdq->cmdq_lock);
487
488 return err;
489 }
490
cmdq_sync_cmd_direct_resp(struct hinic3_cmdq * cmdq,u8 mod,u8 cmd,struct hinic3_cmd_buf * buf_in,__le64 * out_param)491 static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd,
492 struct hinic3_cmd_buf *buf_in,
493 __le64 *out_param)
494 {
495 struct hinic3_cmdq_cmd_info *cmd_info, saved_cmd_info;
496 int cmpt_code = CMDQ_SEND_CMPT_CODE;
497 struct cmdq_wqe *curr_wqe, wqe = {};
498 struct hinic3_wq *wq = &cmdq->wq;
499 u16 curr_prod_idx, next_prod_idx;
500 struct completion done;
501 u64 curr_msg_id;
502 int errcode;
503 u8 wrapped;
504 int err;
505
506 spin_lock_bh(&cmdq->cmdq_lock);
507 curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx);
508 if (!curr_wqe) {
509 spin_unlock_bh(&cmdq->cmdq_lock);
510 return -EBUSY;
511 }
512
513 wrapped = cmdq->wrapped;
514 next_prod_idx = curr_prod_idx + CMDQ_WQE_NUM_WQEBBS;
515 if (next_prod_idx >= wq->q_depth) {
516 cmdq->wrapped ^= 1;
517 next_prod_idx -= wq->q_depth;
518 }
519
520 cmd_info = &cmdq->cmd_infos[curr_prod_idx];
521 init_completion(&done);
522 refcount_inc(&buf_in->ref_cnt);
523 cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP;
524 cmd_info->done = &done;
525 cmd_info->errcode = &errcode;
526 cmd_info->direct_resp = out_param;
527 cmd_info->cmpt_code = &cmpt_code;
528 cmd_info->buf_in = buf_in;
529 saved_cmd_info = *cmd_info;
530 cmdq_set_lcmd_wqe(&wqe, CMDQ_CMD_DIRECT_RESP, buf_in, NULL,
531 wrapped, mod, cmd, curr_prod_idx);
532
533 cmdq_wqe_fill(curr_wqe, &wqe);
534 (cmd_info->cmdq_msg_id)++;
535 curr_msg_id = cmd_info->cmdq_msg_id;
536 cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx);
537 spin_unlock_bh(&cmdq->cmdq_lock);
538
539 err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info,
540 curr_msg_id, curr_prod_idx,
541 curr_wqe, CMDQ_CMD_TIMEOUT);
542 if (err) {
543 dev_err(cmdq->hwdev->dev,
544 "Cmdq sync command timeout, mod: %u, cmd: %u, prod idx: 0x%x\n",
545 mod, cmd, curr_prod_idx);
546 err = -ETIMEDOUT;
547 }
548
549 if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) {
550 dev_dbg(cmdq->hwdev->dev,
551 "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd);
552 err = -EAGAIN;
553 }
554
555 smp_rmb(); /* read error code after completion */
556
557 return err ? err : errcode;
558 }
559
hinic3_cmdq_direct_resp(struct hinic3_hwdev * hwdev,u8 mod,u8 cmd,struct hinic3_cmd_buf * buf_in,__le64 * out_param)560 int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
561 struct hinic3_cmd_buf *buf_in, __le64 *out_param)
562 {
563 struct hinic3_cmdqs *cmdqs;
564 int err;
565
566 cmdqs = hwdev->cmdqs;
567 err = wait_cmdqs_enable(cmdqs);
568 if (err) {
569 dev_err(hwdev->dev, "Cmdq is disabled\n");
570 return err;
571 }
572
573 err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC],
574 mod, cmd, buf_in, out_param);
575
576 return err;
577 }
578
cmdq_init_queue_ctxt(struct hinic3_hwdev * hwdev,u8 cmdq_id,struct comm_cmdq_ctxt_info * ctxt_info)579 static void cmdq_init_queue_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id,
580 struct comm_cmdq_ctxt_info *ctxt_info)
581 {
582 const struct hinic3_cmdqs *cmdqs;
583 u64 cmdq_first_block_paddr, pfn;
584 const struct hinic3_wq *wq;
585
586 cmdqs = hwdev->cmdqs;
587 wq = &cmdqs->cmdq[cmdq_id].wq;
588 pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq));
589
590 ctxt_info->curr_wqe_page_pfn =
591 cpu_to_le64(CMDQ_CTXT_SET(1, HW_BUSY_BIT) |
592 CMDQ_CTXT_SET(1, CEQ_EN) |
593 CMDQ_CTXT_SET(1, CEQ_ARM) |
594 CMDQ_CTXT_SET(0, EQ_ID) |
595 CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN));
596
597 if (!hinic3_wq_is_0_level_cla(wq)) {
598 cmdq_first_block_paddr = cmdqs->wq_block_paddr;
599 pfn = CMDQ_PFN(cmdq_first_block_paddr);
600 }
601
602 ctxt_info->wq_block_pfn = cpu_to_le64(CMDQ_CTXT_SET(wq->cons_idx, CI) |
603 CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN));
604 }
605
init_cmdq(struct hinic3_cmdq * cmdq,struct hinic3_hwdev * hwdev,enum hinic3_cmdq_type q_type)606 static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev,
607 enum hinic3_cmdq_type q_type)
608 {
609 int err;
610
611 cmdq->cmdq_type = q_type;
612 cmdq->wrapped = 1;
613 cmdq->hwdev = hwdev;
614
615 spin_lock_init(&cmdq->cmdq_lock);
616
617 cmdq->cmd_infos = kzalloc_objs(*cmdq->cmd_infos, cmdq->wq.q_depth);
618 if (!cmdq->cmd_infos) {
619 err = -ENOMEM;
620 return err;
621 }
622
623 return 0;
624 }
625
hinic3_set_cmdq_ctxt(struct hinic3_hwdev * hwdev,u8 cmdq_id)626 static int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id)
627 {
628 struct comm_cmd_set_cmdq_ctxt cmdq_ctxt = {};
629 struct mgmt_msg_params msg_params = {};
630 int err;
631
632 cmdq_init_queue_ctxt(hwdev, cmdq_id, &cmdq_ctxt.ctxt);
633 cmdq_ctxt.func_id = hinic3_global_func_id(hwdev);
634 cmdq_ctxt.cmdq_id = cmdq_id;
635
636 mgmt_msg_params_init_default(&msg_params, &cmdq_ctxt,
637 sizeof(cmdq_ctxt));
638
639 err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
640 COMM_CMD_SET_CMDQ_CTXT, &msg_params);
641 if (err || cmdq_ctxt.head.status) {
642 dev_err(hwdev->dev, "Failed to set cmdq ctxt, err: %d, status: 0x%x\n",
643 err, cmdq_ctxt.head.status);
644 return -EFAULT;
645 }
646
647 return 0;
648 }
649
hinic3_set_cmdq_ctxts(struct hinic3_hwdev * hwdev)650 static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev)
651 {
652 struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
653 u8 cmdq_type;
654 int err;
655
656 for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
657 err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type);
658 if (err)
659 return err;
660 }
661
662 cmdqs->status |= HINIC3_CMDQ_ENABLE;
663 cmdqs->disable_flag = 0;
664
665 return 0;
666 }
667
create_cmdq_wq(struct hinic3_hwdev * hwdev,struct hinic3_cmdqs * cmdqs)668 static int create_cmdq_wq(struct hinic3_hwdev *hwdev,
669 struct hinic3_cmdqs *cmdqs)
670 {
671 u8 cmdq_type;
672 int err;
673
674 for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
675 err = hinic3_wq_create(hwdev, &cmdqs->cmdq[cmdq_type].wq,
676 CMDQ_DEPTH, CMDQ_WQEBB_SIZE);
677 if (err) {
678 dev_err(hwdev->dev, "Failed to create cmdq wq\n");
679 goto err_destroy_wq;
680 }
681 }
682
683 /* 1-level Chip Logical Address (CLA) must put all
684 * cmdq's wq page addr in one wq block
685 */
686 if (!hinic3_wq_is_0_level_cla(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) {
687 if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages >
688 CMDQ_WQ_CLA_SIZE / sizeof(u64)) {
689 err = -EINVAL;
690 dev_err(hwdev->dev,
691 "Cmdq number of wq pages exceeds limit: %lu\n",
692 CMDQ_WQ_CLA_SIZE / sizeof(u64));
693 goto err_destroy_wq;
694 }
695
696 cmdqs->wq_block_vaddr =
697 dma_alloc_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
698 &cmdqs->wq_block_paddr, GFP_KERNEL);
699 if (!cmdqs->wq_block_vaddr) {
700 err = -ENOMEM;
701 goto err_destroy_wq;
702 }
703
704 for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
705 memcpy((u8 *)cmdqs->wq_block_vaddr +
706 CMDQ_WQ_CLA_SIZE * cmdq_type,
707 cmdqs->cmdq[cmdq_type].wq.wq_block_vaddr,
708 cmdqs->cmdq[cmdq_type].wq.qpages.num_pages *
709 sizeof(__be64));
710 }
711
712 return 0;
713
714 err_destroy_wq:
715 while (cmdq_type > 0) {
716 cmdq_type--;
717 hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
718 }
719
720 return err;
721 }
722
destroy_cmdq_wq(struct hinic3_hwdev * hwdev,struct hinic3_cmdqs * cmdqs)723 static void destroy_cmdq_wq(struct hinic3_hwdev *hwdev,
724 struct hinic3_cmdqs *cmdqs)
725 {
726 u8 cmdq_type;
727
728 if (cmdqs->wq_block_vaddr)
729 dma_free_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
730 cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr);
731
732 for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
733 hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
734 }
735
init_cmdqs(struct hinic3_hwdev * hwdev)736 static int init_cmdqs(struct hinic3_hwdev *hwdev)
737 {
738 struct hinic3_cmdqs *cmdqs;
739
740 cmdqs = kzalloc_obj(*cmdqs);
741 if (!cmdqs)
742 return -ENOMEM;
743
744 hwdev->cmdqs = cmdqs;
745 cmdqs->hwdev = hwdev;
746 cmdqs->cmdq_num = hwdev->max_cmdq;
747
748 cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev,
749 CMDQ_BUF_SIZE, CMDQ_BUF_SIZE, 0);
750 if (!cmdqs->cmd_buf_pool) {
751 dev_err(hwdev->dev, "Failed to create cmdq buffer pool\n");
752 kfree(cmdqs);
753 return -ENOMEM;
754 }
755
756 return 0;
757 }
758
cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info * cmd_info)759 static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info)
760 {
761 if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP)
762 return;
763
764 cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP;
765
766 if (cmd_info->cmpt_code &&
767 *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE)
768 *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE;
769
770 if (cmd_info->done) {
771 complete(cmd_info->done);
772 cmd_info->done = NULL;
773 cmd_info->cmpt_code = NULL;
774 cmd_info->direct_resp = NULL;
775 cmd_info->errcode = NULL;
776 }
777 }
778
hinic3_cmdq_flush_cmd(struct hinic3_cmdq * cmdq)779 static void hinic3_cmdq_flush_cmd(struct hinic3_cmdq *cmdq)
780 {
781 struct hinic3_cmdq_cmd_info *cmd_info;
782 u16 ci;
783
784 spin_lock_bh(&cmdq->cmdq_lock);
785 while (cmdq_read_wqe(&cmdq->wq, &ci)) {
786 hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
787 cmd_info = &cmdq->cmd_infos[ci];
788 if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP)
789 cmdq_flush_sync_cmd(cmd_info);
790 }
791 spin_unlock_bh(&cmdq->cmdq_lock);
792 }
793
hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev * hwdev)794 void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev)
795 {
796 struct hinic3_cmdq *cmdq;
797 u16 wqe_cnt, wqe_idx, i;
798 struct hinic3_wq *wq;
799
800 cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC];
801 spin_lock_bh(&cmdq->cmdq_lock);
802 wq = &cmdq->wq;
803 wqe_cnt = hinic3_wq_get_used(wq);
804 for (i = 0; i < wqe_cnt; i++) {
805 wqe_idx = (wq->cons_idx + i) & wq->idx_mask;
806 cmdq_flush_sync_cmd(cmdq->cmd_infos + wqe_idx);
807 }
808 spin_unlock_bh(&cmdq->cmdq_lock);
809 }
810
hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq * cmdq)811 static void hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq *cmdq)
812 {
813 u16 i;
814
815 for (i = 0; i < cmdq->wq.q_depth; i++)
816 cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev);
817 }
818
hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev * hwdev)819 int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev)
820 {
821 struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
822 u8 cmdq_type;
823
824 for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
825 hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
826 hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
827 cmdqs->cmdq[cmdq_type].wrapped = 1;
828 hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq);
829 }
830
831 return hinic3_set_cmdq_ctxts(hwdev);
832 }
833
hinic3_cmdqs_init(struct hinic3_hwdev * hwdev)834 int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev)
835 {
836 struct hinic3_cmdqs *cmdqs;
837 void __iomem *db_base;
838 u8 cmdq_type;
839 int err;
840
841 err = init_cmdqs(hwdev);
842 if (err)
843 goto err_out;
844
845 cmdqs = hwdev->cmdqs;
846 err = create_cmdq_wq(hwdev, cmdqs);
847 if (err)
848 goto err_free_cmdqs;
849
850 err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
851 if (err) {
852 dev_err(hwdev->dev, "Failed to allocate doorbell address\n");
853 goto err_destroy_cmdq_wq;
854 }
855 cmdqs->cmdqs_db_base = db_base;
856
857 for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
858 err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type);
859 if (err) {
860 dev_err(hwdev->dev,
861 "Failed to initialize cmdq type : %d\n",
862 cmdq_type);
863 goto err_free_cmd_infos;
864 }
865 }
866
867 err = hinic3_set_cmdq_ctxts(hwdev);
868 if (err)
869 goto err_free_cmd_infos;
870
871 return 0;
872
873 err_free_cmd_infos:
874 while (cmdq_type > 0) {
875 cmdq_type--;
876 kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
877 }
878
879 hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
880 err_destroy_cmdq_wq:
881 destroy_cmdq_wq(hwdev, cmdqs);
882 err_free_cmdqs:
883 dma_pool_destroy(cmdqs->cmd_buf_pool);
884 kfree(cmdqs);
885 err_out:
886 return err;
887 }
888
hinic3_cmdqs_free(struct hinic3_hwdev * hwdev)889 void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev)
890 {
891 struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
892 u8 cmdq_type;
893
894 cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
895
896 for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
897 hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
898 hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
899 kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
900 }
901
902 hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
903 destroy_cmdq_wq(hwdev, cmdqs);
904 dma_pool_destroy(cmdqs->cmd_buf_pool);
905 kfree(cmdqs);
906 }
907
hinic3_cmdq_idle(struct hinic3_cmdq * cmdq)908 bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq)
909 {
910 return hinic3_wq_get_used(&cmdq->wq) == 0;
911 }
912