xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c (revision 23313771c7b99b3b8dba169bc71dae619d41ab56)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/bitfield.h>
5 #include <linux/delay.h>
6 #include <linux/dma-mapping.h>
7 
8 #include "hinic3_cmdq.h"
9 #include "hinic3_hwdev.h"
10 #include "hinic3_hwif.h"
11 #include "hinic3_mbox.h"
12 
13 #define CMDQ_BUF_SIZE             2048
14 #define CMDQ_WQEBB_SIZE           64
15 
16 #define CMDQ_CMD_TIMEOUT          5000
17 #define CMDQ_ENABLE_WAIT_TIMEOUT  300
18 
19 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK  GENMASK_ULL(51, 0)
20 #define CMDQ_CTXT_EQ_ID_MASK              GENMASK_ULL(60, 53)
21 #define CMDQ_CTXT_CEQ_ARM_MASK            BIT_ULL(61)
22 #define CMDQ_CTXT_CEQ_EN_MASK             BIT_ULL(62)
23 #define CMDQ_CTXT_HW_BUSY_BIT_MASK        BIT_ULL(63)
24 
25 #define CMDQ_CTXT_WQ_BLOCK_PFN_MASK       GENMASK_ULL(51, 0)
26 #define CMDQ_CTXT_CI_MASK                 GENMASK_ULL(63, 52)
27 #define CMDQ_CTXT_SET(val, member)  \
28 	FIELD_PREP(CMDQ_CTXT_##member##_MASK, val)
29 
30 #define CMDQ_WQE_HDR_BUFDESC_LEN_MASK        GENMASK(7, 0)
31 #define CMDQ_WQE_HDR_COMPLETE_FMT_MASK       BIT(15)
32 #define CMDQ_WQE_HDR_DATA_FMT_MASK           BIT(22)
33 #define CMDQ_WQE_HDR_COMPLETE_REQ_MASK       BIT(23)
34 #define CMDQ_WQE_HDR_COMPLETE_SECT_LEN_MASK  GENMASK(28, 27)
35 #define CMDQ_WQE_HDR_CTRL_LEN_MASK           GENMASK(30, 29)
36 #define CMDQ_WQE_HDR_HW_BUSY_BIT_MASK        BIT(31)
37 #define CMDQ_WQE_HDR_SET(val, member)  \
38 	FIELD_PREP(CMDQ_WQE_HDR_##member##_MASK, val)
39 #define CMDQ_WQE_HDR_GET(val, member)  \
40 	FIELD_GET(CMDQ_WQE_HDR_##member##_MASK, le32_to_cpu(val))
41 
42 #define CMDQ_CTRL_PI_MASK              GENMASK(15, 0)
43 #define CMDQ_CTRL_CMD_MASK             GENMASK(23, 16)
44 #define CMDQ_CTRL_MOD_MASK             GENMASK(28, 24)
45 #define CMDQ_CTRL_HW_BUSY_BIT_MASK     BIT(31)
46 #define CMDQ_CTRL_SET(val, member)  \
47 	FIELD_PREP(CMDQ_CTRL_##member##_MASK, val)
48 #define CMDQ_CTRL_GET(val, member)  \
49 	FIELD_GET(CMDQ_CTRL_##member##_MASK, val)
50 
51 #define CMDQ_WQE_ERRCODE_VAL_MASK      GENMASK(30, 0)
52 #define CMDQ_WQE_ERRCODE_GET(val, member)  \
53 	FIELD_GET(CMDQ_WQE_ERRCODE_##member##_MASK, le32_to_cpu(val))
54 
55 #define CMDQ_DB_INFO_HI_PROD_IDX_MASK  GENMASK(7, 0)
56 #define CMDQ_DB_INFO_SET(val, member)  \
57 	FIELD_PREP(CMDQ_DB_INFO_##member##_MASK, val)
58 
59 #define CMDQ_DB_HEAD_QUEUE_TYPE_MASK   BIT(23)
60 #define CMDQ_DB_HEAD_CMDQ_TYPE_MASK    GENMASK(26, 24)
61 #define CMDQ_DB_HEAD_SET(val, member)  \
62 	FIELD_PREP(CMDQ_DB_HEAD_##member##_MASK, val)
63 
64 #define CMDQ_CEQE_TYPE_MASK            GENMASK(2, 0)
65 #define CMDQ_CEQE_GET(val, member)  \
66 	FIELD_GET(CMDQ_CEQE_##member##_MASK, le32_to_cpu(val))
67 
68 #define CMDQ_WQE_HEADER(wqe)           ((struct cmdq_header *)(wqe))
69 #define CMDQ_WQE_COMPLETED(ctrl_info)  \
70 	CMDQ_CTRL_GET(le32_to_cpu(ctrl_info), HW_BUSY_BIT)
71 
72 #define CMDQ_PFN(addr)  ((addr) >> 12)
73 
74 /* cmdq work queue's chip logical address table is up to 512B */
75 #define CMDQ_WQ_CLA_SIZE  512
76 
77 /* Completion codes: send, direct sync, force stop */
78 #define CMDQ_SEND_CMPT_CODE         10
79 #define CMDQ_DIRECT_SYNC_CMPT_CODE  11
80 #define CMDQ_FORCE_STOP_CMPT_CODE   12
81 
82 enum cmdq_data_format {
83 	CMDQ_DATA_SGE    = 0,
84 	CMDQ_DATA_DIRECT = 1,
85 };
86 
87 enum cmdq_ctrl_sect_len {
88 	CMDQ_CTRL_SECT_LEN        = 1,
89 	CMDQ_CTRL_DIRECT_SECT_LEN = 2,
90 };
91 
92 enum cmdq_bufdesc_len {
93 	CMDQ_BUFDESC_LCMD_LEN = 2,
94 	CMDQ_BUFDESC_SCMD_LEN = 3,
95 };
96 
97 enum cmdq_completion_format {
98 	CMDQ_COMPLETE_DIRECT = 0,
99 	CMDQ_COMPLETE_SGE    = 1,
100 };
101 
102 enum cmdq_cmd_type {
103 	CMDQ_CMD_DIRECT_RESP,
104 	CMDQ_CMD_SGE_RESP,
105 };
106 
107 #define CMDQ_WQE_NUM_WQEBBS  1
108 
109 static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci)
110 {
111 	if (hinic3_wq_get_used(wq) == 0)
112 		return NULL;
113 
114 	*ci = wq->cons_idx & wq->idx_mask;
115 
116 	return get_q_element(&wq->qpages, wq->cons_idx, NULL);
117 }
118 
119 struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev)
120 {
121 	struct hinic3_cmd_buf *cmd_buf;
122 	struct hinic3_cmdqs *cmdqs;
123 
124 	cmdqs = hwdev->cmdqs;
125 
126 	cmd_buf = kmalloc(sizeof(*cmd_buf), GFP_ATOMIC);
127 	if (!cmd_buf)
128 		return NULL;
129 
130 	cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
131 				      &cmd_buf->dma_addr);
132 	if (!cmd_buf->buf) {
133 		dev_err(hwdev->dev, "Failed to allocate cmdq cmd buf from the pool\n");
134 		goto err_free_cmd_buf;
135 	}
136 
137 	cmd_buf->size = cpu_to_le16(CMDQ_BUF_SIZE);
138 	refcount_set(&cmd_buf->ref_cnt, 1);
139 
140 	return cmd_buf;
141 
142 err_free_cmd_buf:
143 	kfree(cmd_buf);
144 
145 	return NULL;
146 }
147 
148 void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
149 			 struct hinic3_cmd_buf *cmd_buf)
150 {
151 	struct hinic3_cmdqs *cmdqs;
152 
153 	if (!refcount_dec_and_test(&cmd_buf->ref_cnt))
154 		return;
155 
156 	cmdqs = hwdev->cmdqs;
157 
158 	dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
159 	kfree(cmd_buf);
160 }
161 
162 static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info,
163 			       struct hinic3_hwdev *hwdev)
164 {
165 	if (cmd_info->buf_in) {
166 		hinic3_free_cmd_buf(hwdev, cmd_info->buf_in);
167 		cmd_info->buf_in = NULL;
168 	}
169 }
170 
171 static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq,
172 				   struct cmdq_wqe *wqe, u16 ci)
173 {
174 	struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
175 	__le32 header_info = hdr->header_info;
176 	enum cmdq_data_format df;
177 	struct cmdq_ctrl *ctrl;
178 
179 	df = CMDQ_WQE_HDR_GET(header_info, DATA_FMT);
180 	if (df == CMDQ_DATA_SGE)
181 		ctrl = &wqe->wqe_lcmd.ctrl;
182 	else
183 		ctrl = &wqe->wqe_scmd.ctrl;
184 
185 	/* clear HW busy bit */
186 	ctrl->ctrl_info = 0;
187 	cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE;
188 	wmb(); /* verify wqe is clear before updating ci */
189 	hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
190 }
191 
192 static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx,
193 				   struct cmdq_wqe *wqe)
194 {
195 	struct hinic3_cmdq_cmd_info *cmd_info;
196 	struct cmdq_wqe_lcmd *wqe_lcmd;
197 	__le32 status_info;
198 
199 	wqe_lcmd = &wqe->wqe_lcmd;
200 	cmd_info = &cmdq->cmd_infos[prod_idx];
201 	if (cmd_info->errcode) {
202 		status_info = wqe_lcmd->status.status_info;
203 		*cmd_info->errcode = CMDQ_WQE_ERRCODE_GET(status_info, VAL);
204 	}
205 
206 	if (cmd_info->direct_resp)
207 		*cmd_info->direct_resp = wqe_lcmd->completion.resp.direct.val;
208 }
209 
210 static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq,
211 				  struct cmdq_wqe *wqe, u16 ci)
212 {
213 	spin_lock(&cmdq->cmdq_lock);
214 	cmdq_update_cmd_status(cmdq, ci, wqe);
215 	if (cmdq->cmd_infos[ci].cmpt_code) {
216 		*cmdq->cmd_infos[ci].cmpt_code = CMDQ_DIRECT_SYNC_CMPT_CODE;
217 		cmdq->cmd_infos[ci].cmpt_code = NULL;
218 	}
219 
220 	/* Ensure that completion code has been updated before updating done */
221 	smp_wmb();
222 	if (cmdq->cmd_infos[ci].done) {
223 		complete(cmdq->cmd_infos[ci].done);
224 		cmdq->cmd_infos[ci].done = NULL;
225 	}
226 	spin_unlock(&cmdq->cmdq_lock);
227 
228 	cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev);
229 	clear_wqe_complete_bit(cmdq, wqe, ci);
230 }
231 
232 void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data)
233 {
234 	enum hinic3_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
235 	struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
236 	struct hinic3_cmdq_cmd_info *cmd_info;
237 	struct cmdq_wqe_lcmd *wqe_lcmd;
238 	struct hinic3_cmdq *cmdq;
239 	struct cmdq_wqe *wqe;
240 	__le32 ctrl_info;
241 	u16 ci;
242 
243 	if (unlikely(cmdq_type >= ARRAY_SIZE(cmdqs->cmdq)))
244 		return;
245 
246 	cmdq = &cmdqs->cmdq[cmdq_type];
247 	while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) {
248 		cmd_info = &cmdq->cmd_infos[ci];
249 		switch (cmd_info->cmd_type) {
250 		case HINIC3_CMD_TYPE_NONE:
251 			return;
252 		case HINIC3_CMD_TYPE_TIMEOUT:
253 			dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n",
254 				 cmdq_type, ci);
255 			fallthrough;
256 		case HINIC3_CMD_TYPE_FAKE_TIMEOUT:
257 			cmdq_clear_cmd_buf(cmd_info, hwdev);
258 			clear_wqe_complete_bit(cmdq, wqe, ci);
259 			break;
260 		default:
261 			/* only arm bit is using scmd wqe,
262 			 * the other wqe is lcmd
263 			 */
264 			wqe_lcmd = &wqe->wqe_lcmd;
265 			ctrl_info = wqe_lcmd->ctrl.ctrl_info;
266 			if (!CMDQ_WQE_COMPLETED(ctrl_info))
267 				return;
268 
269 			dma_rmb();
270 			/* For FORCE_STOP cmd_type, we also need to wait for
271 			 * the firmware processing to complete to prevent the
272 			 * firmware from accessing the released cmd_buf
273 			 */
274 			if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) {
275 				cmdq_clear_cmd_buf(cmd_info, hwdev);
276 				clear_wqe_complete_bit(cmdq, wqe, ci);
277 			} else {
278 				cmdq_sync_cmd_handler(cmdq, wqe, ci);
279 			}
280 
281 			break;
282 		}
283 	}
284 }
285 
286 static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs)
287 {
288 	unsigned long end;
289 
290 	end = jiffies + msecs_to_jiffies(CMDQ_ENABLE_WAIT_TIMEOUT);
291 	do {
292 		if (cmdqs->status & HINIC3_CMDQ_ENABLE)
293 			return 0;
294 		usleep_range(1000, 2000);
295 	} while (time_before(jiffies, end) && !cmdqs->disable_flag);
296 
297 	cmdqs->disable_flag = 1;
298 
299 	return -EBUSY;
300 }
301 
302 static void cmdq_set_completion(struct cmdq_completion *complete,
303 				struct hinic3_cmd_buf *buf_out)
304 {
305 	struct hinic3_sge *sge = &complete->resp.sge;
306 
307 	hinic3_set_sge(sge, buf_out->dma_addr, cpu_to_le32(CMDQ_BUF_SIZE));
308 }
309 
310 static struct cmdq_wqe *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi)
311 {
312 	if (!hinic3_wq_free_wqebbs(wq))
313 		return NULL;
314 
315 	return hinic3_wq_get_one_wqebb(wq, pi);
316 }
317 
318 static void cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd *wqe,
319 				  struct hinic3_cmd_buf *buf_in)
320 {
321 	hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr,
322 		       (__force __le32)buf_in->size);
323 }
324 
325 static void cmdq_set_db(struct hinic3_cmdq *cmdq,
326 			enum hinic3_cmdq_type cmdq_type, u16 prod_idx)
327 {
328 	u8 __iomem *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base;
329 	u16 db_ofs = (prod_idx & 0xFF) << 3;
330 	struct cmdq_db db;
331 
332 	db.db_info = cpu_to_le32(CMDQ_DB_INFO_SET(prod_idx >> 8, HI_PROD_IDX));
333 	db.db_head = cpu_to_le32(CMDQ_DB_HEAD_SET(1, QUEUE_TYPE) |
334 				 CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE));
335 	writeq(*(u64 *)&db, db_base + db_ofs);
336 }
337 
338 static void cmdq_wqe_fill(struct cmdq_wqe *hw_wqe,
339 			  const struct cmdq_wqe *shadow_wqe)
340 {
341 	const struct cmdq_header *src = (struct cmdq_header *)shadow_wqe;
342 	struct cmdq_header *dst = (struct cmdq_header *)hw_wqe;
343 	size_t len;
344 
345 	len = sizeof(struct cmdq_wqe) - sizeof(struct cmdq_header);
346 	memcpy(dst + 1, src + 1, len);
347 	/* Ensure buffer len before updating header */
348 	wmb();
349 	WRITE_ONCE(*dst, *src);
350 }
351 
352 static void cmdq_prepare_wqe_ctrl(struct cmdq_wqe *wqe, u8 wrapped,
353 				  u8 mod, u8 cmd, u16 prod_idx,
354 				  enum cmdq_completion_format complete_format,
355 				  enum cmdq_data_format data_format,
356 				  enum cmdq_bufdesc_len buf_len)
357 {
358 	struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
359 	enum cmdq_ctrl_sect_len ctrl_len;
360 	struct cmdq_wqe_lcmd *wqe_lcmd;
361 	struct cmdq_wqe_scmd *wqe_scmd;
362 	struct cmdq_ctrl *ctrl;
363 
364 	if (data_format == CMDQ_DATA_SGE) {
365 		wqe_lcmd = &wqe->wqe_lcmd;
366 		wqe_lcmd->status.status_info = 0;
367 		ctrl = &wqe_lcmd->ctrl;
368 		ctrl_len = CMDQ_CTRL_SECT_LEN;
369 	} else {
370 		wqe_scmd = &wqe->wqe_scmd;
371 		wqe_scmd->status.status_info = 0;
372 		ctrl = &wqe_scmd->ctrl;
373 		ctrl_len = CMDQ_CTRL_DIRECT_SECT_LEN;
374 	}
375 
376 	ctrl->ctrl_info =
377 		cpu_to_le32(CMDQ_CTRL_SET(prod_idx, PI) |
378 			    CMDQ_CTRL_SET(cmd, CMD) |
379 			    CMDQ_CTRL_SET(mod, MOD));
380 
381 	hdr->header_info =
382 		cpu_to_le32(CMDQ_WQE_HDR_SET(buf_len, BUFDESC_LEN) |
383 			    CMDQ_WQE_HDR_SET(complete_format, COMPLETE_FMT) |
384 			    CMDQ_WQE_HDR_SET(data_format, DATA_FMT) |
385 			    CMDQ_WQE_HDR_SET(1, COMPLETE_REQ) |
386 			    CMDQ_WQE_HDR_SET(3, COMPLETE_SECT_LEN) |
387 			    CMDQ_WQE_HDR_SET(ctrl_len, CTRL_LEN) |
388 			    CMDQ_WQE_HDR_SET(wrapped, HW_BUSY_BIT));
389 }
390 
391 static void cmdq_set_lcmd_wqe(struct cmdq_wqe *wqe,
392 			      enum cmdq_cmd_type cmd_type,
393 			      struct hinic3_cmd_buf *buf_in,
394 			      struct hinic3_cmd_buf *buf_out,
395 			      u8 wrapped, u8 mod, u8 cmd, u16 prod_idx)
396 {
397 	enum cmdq_completion_format complete_format = CMDQ_COMPLETE_DIRECT;
398 	struct cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
399 
400 	switch (cmd_type) {
401 	case CMDQ_CMD_DIRECT_RESP:
402 		wqe_lcmd->completion.resp.direct.val = 0;
403 		break;
404 	case CMDQ_CMD_SGE_RESP:
405 		if (buf_out) {
406 			complete_format = CMDQ_COMPLETE_SGE;
407 			cmdq_set_completion(&wqe_lcmd->completion, buf_out);
408 		}
409 		break;
410 	}
411 
412 	cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format,
413 			      CMDQ_DATA_SGE, CMDQ_BUFDESC_LCMD_LEN);
414 	cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
415 }
416 
417 static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq,
418 					  struct cmdq_wqe *wqe, u16 pi)
419 {
420 	struct cmdq_wqe_lcmd *wqe_lcmd;
421 	struct cmdq_ctrl *ctrl;
422 	__le32 ctrl_info;
423 
424 	wqe_lcmd = &wqe->wqe_lcmd;
425 	ctrl = &wqe_lcmd->ctrl;
426 	ctrl_info = ctrl->ctrl_info;
427 	if (!CMDQ_WQE_COMPLETED(ctrl_info)) {
428 		dev_dbg(cmdq->hwdev->dev, "Cmdq sync command check busy bit not set\n");
429 		return -EFAULT;
430 	}
431 	cmdq_update_cmd_status(cmdq, pi, wqe);
432 
433 	return 0;
434 }
435 
436 static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info,
437 			   const struct hinic3_cmdq_cmd_info *saved_cmd_info)
438 {
439 	if (cmd_info->errcode == saved_cmd_info->errcode)
440 		cmd_info->errcode = NULL;
441 
442 	if (cmd_info->done == saved_cmd_info->done)
443 		cmd_info->done = NULL;
444 
445 	if (cmd_info->direct_resp == saved_cmd_info->direct_resp)
446 		cmd_info->direct_resp = NULL;
447 }
448 
449 static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq,
450 					 struct hinic3_cmdq_cmd_info *cmd_info,
451 					 struct hinic3_cmdq_cmd_info *saved_cmd_info,
452 					 u64 curr_msg_id, u16 curr_prod_idx,
453 					 struct cmdq_wqe *curr_wqe,
454 					 u32 timeout)
455 {
456 	ulong timeo = msecs_to_jiffies(timeout);
457 	int err;
458 
459 	if (wait_for_completion_timeout(saved_cmd_info->done, timeo))
460 		return 0;
461 
462 	spin_lock_bh(&cmdq->cmdq_lock);
463 	if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code)
464 		cmd_info->cmpt_code = NULL;
465 
466 	if (*saved_cmd_info->cmpt_code == CMDQ_DIRECT_SYNC_CMPT_CODE) {
467 		dev_dbg(cmdq->hwdev->dev, "Cmdq direct sync command has been completed\n");
468 		spin_unlock_bh(&cmdq->cmdq_lock);
469 		return 0;
470 	}
471 
472 	if (curr_msg_id == cmd_info->cmdq_msg_id) {
473 		err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe,
474 						     curr_prod_idx);
475 		if (err)
476 			cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT;
477 		else
478 			cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT;
479 	} else {
480 		err = -ETIMEDOUT;
481 		dev_err(cmdq->hwdev->dev,
482 			"Cmdq sync command current msg id mismatch cmd_info msg id\n");
483 	}
484 
485 	clear_cmd_info(cmd_info, saved_cmd_info);
486 	spin_unlock_bh(&cmdq->cmdq_lock);
487 
488 	return err;
489 }
490 
491 static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd,
492 				     struct hinic3_cmd_buf *buf_in,
493 				     __le64 *out_param)
494 {
495 	struct hinic3_cmdq_cmd_info *cmd_info, saved_cmd_info;
496 	int cmpt_code = CMDQ_SEND_CMPT_CODE;
497 	struct cmdq_wqe *curr_wqe, wqe = {};
498 	struct hinic3_wq *wq = &cmdq->wq;
499 	u16 curr_prod_idx, next_prod_idx;
500 	struct completion done;
501 	u64 curr_msg_id;
502 	int errcode;
503 	u8 wrapped;
504 	int err;
505 
506 	spin_lock_bh(&cmdq->cmdq_lock);
507 	curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx);
508 	if (!curr_wqe) {
509 		spin_unlock_bh(&cmdq->cmdq_lock);
510 		return -EBUSY;
511 	}
512 
513 	wrapped = cmdq->wrapped;
514 	next_prod_idx = curr_prod_idx + CMDQ_WQE_NUM_WQEBBS;
515 	if (next_prod_idx >= wq->q_depth) {
516 		cmdq->wrapped ^= 1;
517 		next_prod_idx -= wq->q_depth;
518 	}
519 
520 	cmd_info = &cmdq->cmd_infos[curr_prod_idx];
521 	init_completion(&done);
522 	refcount_inc(&buf_in->ref_cnt);
523 	cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP;
524 	cmd_info->done = &done;
525 	cmd_info->errcode = &errcode;
526 	cmd_info->direct_resp = out_param;
527 	cmd_info->cmpt_code = &cmpt_code;
528 	cmd_info->buf_in = buf_in;
529 	saved_cmd_info = *cmd_info;
530 	cmdq_set_lcmd_wqe(&wqe, CMDQ_CMD_DIRECT_RESP, buf_in, NULL,
531 			  wrapped, mod, cmd, curr_prod_idx);
532 
533 	cmdq_wqe_fill(curr_wqe, &wqe);
534 	(cmd_info->cmdq_msg_id)++;
535 	curr_msg_id = cmd_info->cmdq_msg_id;
536 	cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx);
537 	spin_unlock_bh(&cmdq->cmdq_lock);
538 
539 	err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info,
540 					    curr_msg_id, curr_prod_idx,
541 					    curr_wqe, CMDQ_CMD_TIMEOUT);
542 	if (err) {
543 		dev_err(cmdq->hwdev->dev,
544 			"Cmdq sync command timeout, mod: %u, cmd: %u, prod idx: 0x%x\n",
545 			mod, cmd, curr_prod_idx);
546 		err = -ETIMEDOUT;
547 	}
548 
549 	if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) {
550 		dev_dbg(cmdq->hwdev->dev,
551 			"Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd);
552 		err = -EAGAIN;
553 	}
554 
555 	smp_rmb(); /* read error code after completion */
556 
557 	return err ? err : errcode;
558 }
559 
560 int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
561 			    struct hinic3_cmd_buf *buf_in, __le64 *out_param)
562 {
563 	struct hinic3_cmdqs *cmdqs;
564 	int err;
565 
566 	cmdqs = hwdev->cmdqs;
567 	err = wait_cmdqs_enable(cmdqs);
568 	if (err) {
569 		dev_err(hwdev->dev, "Cmdq is disabled\n");
570 		return err;
571 	}
572 
573 	err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC],
574 					mod, cmd, buf_in, out_param);
575 
576 	return err;
577 }
578 
579 static void cmdq_init_queue_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id,
580 				 struct comm_cmdq_ctxt_info *ctxt_info)
581 {
582 	const struct hinic3_cmdqs *cmdqs;
583 	u64 cmdq_first_block_paddr, pfn;
584 	const struct hinic3_wq *wq;
585 
586 	cmdqs = hwdev->cmdqs;
587 	wq = &cmdqs->cmdq[cmdq_id].wq;
588 	pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq));
589 
590 	ctxt_info->curr_wqe_page_pfn =
591 		cpu_to_le64(CMDQ_CTXT_SET(1, HW_BUSY_BIT) |
592 			    CMDQ_CTXT_SET(1, CEQ_EN)	|
593 			    CMDQ_CTXT_SET(1, CEQ_ARM)	|
594 			    CMDQ_CTXT_SET(0, EQ_ID) |
595 			    CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN));
596 
597 	if (!hinic3_wq_is_0_level_cla(wq)) {
598 		cmdq_first_block_paddr = cmdqs->wq_block_paddr;
599 		pfn = CMDQ_PFN(cmdq_first_block_paddr);
600 	}
601 
602 	ctxt_info->wq_block_pfn = cpu_to_le64(CMDQ_CTXT_SET(wq->cons_idx, CI) |
603 					      CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN));
604 }
605 
606 static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev,
607 		     enum hinic3_cmdq_type q_type)
608 {
609 	int err;
610 
611 	cmdq->cmdq_type = q_type;
612 	cmdq->wrapped = 1;
613 	cmdq->hwdev = hwdev;
614 
615 	spin_lock_init(&cmdq->cmdq_lock);
616 
617 	cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos),
618 				  GFP_KERNEL);
619 	if (!cmdq->cmd_infos) {
620 		err = -ENOMEM;
621 		return err;
622 	}
623 
624 	return 0;
625 }
626 
627 static int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id)
628 {
629 	struct comm_cmd_set_cmdq_ctxt cmdq_ctxt = {};
630 	struct mgmt_msg_params msg_params = {};
631 	int err;
632 
633 	cmdq_init_queue_ctxt(hwdev, cmdq_id, &cmdq_ctxt.ctxt);
634 	cmdq_ctxt.func_id = hinic3_global_func_id(hwdev);
635 	cmdq_ctxt.cmdq_id = cmdq_id;
636 
637 	mgmt_msg_params_init_default(&msg_params, &cmdq_ctxt,
638 				     sizeof(cmdq_ctxt));
639 
640 	err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
641 				       COMM_CMD_SET_CMDQ_CTXT, &msg_params);
642 	if (err || cmdq_ctxt.head.status) {
643 		dev_err(hwdev->dev, "Failed to set cmdq ctxt, err: %d, status: 0x%x\n",
644 			err, cmdq_ctxt.head.status);
645 		return -EFAULT;
646 	}
647 
648 	return 0;
649 }
650 
651 static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev)
652 {
653 	struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
654 	u8 cmdq_type;
655 	int err;
656 
657 	for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
658 		err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type);
659 		if (err)
660 			return err;
661 	}
662 
663 	cmdqs->status |= HINIC3_CMDQ_ENABLE;
664 	cmdqs->disable_flag = 0;
665 
666 	return 0;
667 }
668 
669 static int create_cmdq_wq(struct hinic3_hwdev *hwdev,
670 			  struct hinic3_cmdqs *cmdqs)
671 {
672 	u8 cmdq_type;
673 	int err;
674 
675 	for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
676 		err = hinic3_wq_create(hwdev, &cmdqs->cmdq[cmdq_type].wq,
677 				       CMDQ_DEPTH, CMDQ_WQEBB_SIZE);
678 		if (err) {
679 			dev_err(hwdev->dev, "Failed to create cmdq wq\n");
680 			goto err_destroy_wq;
681 		}
682 	}
683 
684 	/* 1-level Chip Logical Address (CLA) must put all
685 	 * cmdq's wq page addr in one wq block
686 	 */
687 	if (!hinic3_wq_is_0_level_cla(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) {
688 		if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages >
689 		    CMDQ_WQ_CLA_SIZE / sizeof(u64)) {
690 			err = -EINVAL;
691 			dev_err(hwdev->dev,
692 				"Cmdq number of wq pages exceeds limit: %lu\n",
693 				CMDQ_WQ_CLA_SIZE / sizeof(u64));
694 			goto err_destroy_wq;
695 		}
696 
697 		cmdqs->wq_block_vaddr =
698 			dma_alloc_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
699 					   &cmdqs->wq_block_paddr, GFP_KERNEL);
700 		if (!cmdqs->wq_block_vaddr) {
701 			err = -ENOMEM;
702 			goto err_destroy_wq;
703 		}
704 
705 		for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
706 			memcpy((u8 *)cmdqs->wq_block_vaddr +
707 			       CMDQ_WQ_CLA_SIZE * cmdq_type,
708 			       cmdqs->cmdq[cmdq_type].wq.wq_block_vaddr,
709 			       cmdqs->cmdq[cmdq_type].wq.qpages.num_pages *
710 			       sizeof(__be64));
711 	}
712 
713 	return 0;
714 
715 err_destroy_wq:
716 	while (cmdq_type > 0) {
717 		cmdq_type--;
718 		hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
719 	}
720 
721 	return err;
722 }
723 
724 static void destroy_cmdq_wq(struct hinic3_hwdev *hwdev,
725 			    struct hinic3_cmdqs *cmdqs)
726 {
727 	u8 cmdq_type;
728 
729 	if (cmdqs->wq_block_vaddr)
730 		dma_free_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
731 				  cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr);
732 
733 	for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
734 		hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
735 }
736 
737 static int init_cmdqs(struct hinic3_hwdev *hwdev)
738 {
739 	struct hinic3_cmdqs *cmdqs;
740 
741 	cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
742 	if (!cmdqs)
743 		return -ENOMEM;
744 
745 	hwdev->cmdqs = cmdqs;
746 	cmdqs->hwdev = hwdev;
747 	cmdqs->cmdq_num = hwdev->max_cmdq;
748 
749 	cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev,
750 					      CMDQ_BUF_SIZE, CMDQ_BUF_SIZE, 0);
751 	if (!cmdqs->cmd_buf_pool) {
752 		dev_err(hwdev->dev, "Failed to create cmdq buffer pool\n");
753 		kfree(cmdqs);
754 		return -ENOMEM;
755 	}
756 
757 	return 0;
758 }
759 
760 static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info)
761 {
762 	if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP)
763 		return;
764 
765 	cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP;
766 
767 	if (cmd_info->cmpt_code &&
768 	    *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE)
769 		*cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE;
770 
771 	if (cmd_info->done) {
772 		complete(cmd_info->done);
773 		cmd_info->done = NULL;
774 		cmd_info->cmpt_code = NULL;
775 		cmd_info->direct_resp = NULL;
776 		cmd_info->errcode = NULL;
777 	}
778 }
779 
780 static void hinic3_cmdq_flush_cmd(struct hinic3_cmdq *cmdq)
781 {
782 	struct hinic3_cmdq_cmd_info *cmd_info;
783 	u16 ci;
784 
785 	spin_lock_bh(&cmdq->cmdq_lock);
786 	while (cmdq_read_wqe(&cmdq->wq, &ci)) {
787 		hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
788 		cmd_info = &cmdq->cmd_infos[ci];
789 		if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP)
790 			cmdq_flush_sync_cmd(cmd_info);
791 	}
792 	spin_unlock_bh(&cmdq->cmdq_lock);
793 }
794 
795 void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev)
796 {
797 	struct hinic3_cmdq *cmdq;
798 	u16 wqe_cnt, wqe_idx, i;
799 	struct hinic3_wq *wq;
800 
801 	cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC];
802 	spin_lock_bh(&cmdq->cmdq_lock);
803 	wq = &cmdq->wq;
804 	wqe_cnt = hinic3_wq_get_used(wq);
805 	for (i = 0; i < wqe_cnt; i++) {
806 		wqe_idx = (wq->cons_idx + i) & wq->idx_mask;
807 		cmdq_flush_sync_cmd(cmdq->cmd_infos + wqe_idx);
808 	}
809 	spin_unlock_bh(&cmdq->cmdq_lock);
810 }
811 
812 static void hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq *cmdq)
813 {
814 	u16 i;
815 
816 	for (i = 0; i < cmdq->wq.q_depth; i++)
817 		cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev);
818 }
819 
820 int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev)
821 {
822 	struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
823 	u8 cmdq_type;
824 
825 	for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
826 		hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
827 		hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
828 		cmdqs->cmdq[cmdq_type].wrapped = 1;
829 		hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq);
830 	}
831 
832 	return hinic3_set_cmdq_ctxts(hwdev);
833 }
834 
835 int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev)
836 {
837 	struct hinic3_cmdqs *cmdqs;
838 	void __iomem *db_base;
839 	u8 cmdq_type;
840 	int err;
841 
842 	err = init_cmdqs(hwdev);
843 	if (err)
844 		goto err_out;
845 
846 	cmdqs = hwdev->cmdqs;
847 	err = create_cmdq_wq(hwdev, cmdqs);
848 	if (err)
849 		goto err_free_cmdqs;
850 
851 	err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
852 	if (err) {
853 		dev_err(hwdev->dev, "Failed to allocate doorbell address\n");
854 		goto err_destroy_cmdq_wq;
855 	}
856 	cmdqs->cmdqs_db_base = db_base;
857 
858 	for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
859 		err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type);
860 		if (err) {
861 			dev_err(hwdev->dev,
862 				"Failed to initialize cmdq type : %d\n",
863 				cmdq_type);
864 			goto err_free_cmd_infos;
865 		}
866 	}
867 
868 	err = hinic3_set_cmdq_ctxts(hwdev);
869 	if (err)
870 		goto err_free_cmd_infos;
871 
872 	return 0;
873 
874 err_free_cmd_infos:
875 	while (cmdq_type > 0) {
876 		cmdq_type--;
877 		kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
878 	}
879 
880 	hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
881 
882 err_destroy_cmdq_wq:
883 	destroy_cmdq_wq(hwdev, cmdqs);
884 
885 err_free_cmdqs:
886 	dma_pool_destroy(cmdqs->cmd_buf_pool);
887 	kfree(cmdqs);
888 
889 err_out:
890 	return err;
891 }
892 
893 void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev)
894 {
895 	struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
896 	u8 cmdq_type;
897 
898 	cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
899 
900 	for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
901 		hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
902 		hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
903 		kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
904 	}
905 
906 	hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
907 	destroy_cmdq_wq(hwdev, cmdqs);
908 	dma_pool_destroy(cmdqs->cmd_buf_pool);
909 	kfree(cmdqs);
910 }
911 
912 bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq)
913 {
914 	return hinic3_wq_get_used(&cmdq->wq) == 0;
915 }
916