xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c (revision 23313771c7b99b3b8dba169bc71dae619d41ab56)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/dma-mapping.h>
5 
6 #include "hinic3_common.h"
7 #include "hinic3_csr.h"
8 #include "hinic3_hwdev.h"
9 #include "hinic3_hwif.h"
10 #include "hinic3_mbox.h"
11 
12 #define MBOX_INT_DST_AEQN_MASK        GENMASK(11, 10)
13 #define MBOX_INT_SRC_RESP_AEQN_MASK   GENMASK(13, 12)
14 #define MBOX_INT_STAT_DMA_MASK        GENMASK(19, 14)
15 /* TX size, expressed in 4 bytes units */
16 #define MBOX_INT_TX_SIZE_MASK         GENMASK(24, 20)
17 /* SO_RO == strong order, relaxed order */
18 #define MBOX_INT_STAT_DMA_SO_RO_MASK  GENMASK(26, 25)
19 #define MBOX_INT_WB_EN_MASK           BIT(28)
20 #define MBOX_INT_SET(val, field)  \
21 	FIELD_PREP(MBOX_INT_##field##_MASK, val)
22 
23 #define MBOX_CTRL_TRIGGER_AEQE_MASK   BIT(0)
24 #define MBOX_CTRL_TX_STATUS_MASK      BIT(1)
25 #define MBOX_CTRL_DST_FUNC_MASK       GENMASK(28, 16)
26 #define MBOX_CTRL_SET(val, field)  \
27 	FIELD_PREP(MBOX_CTRL_##field##_MASK, val)
28 
29 #define MBOX_MSG_POLLING_TIMEOUT_MS  8000 // send msg seg timeout
30 #define MBOX_COMP_POLLING_TIMEOUT_MS 40000 // response
31 
32 #define MBOX_MAX_BUF_SZ           2048
33 #define MBOX_HEADER_SZ            8
34 
35 /* MBOX size is 64B, 8B for mbox_header, 8B reserved */
36 #define MBOX_SEG_LEN              48
37 #define MBOX_SEG_LEN_ALIGN        4
38 #define MBOX_WB_STATUS_LEN        16
39 
40 #define MBOX_SEQ_ID_START_VAL     0
41 #define MBOX_SEQ_ID_MAX_VAL       42
42 #define MBOX_LAST_SEG_MAX_LEN  \
43 	(MBOX_MAX_BUF_SZ - MBOX_SEQ_ID_MAX_VAL * MBOX_SEG_LEN)
44 
45 /* mbox write back status is 16B, only first 4B is used */
46 #define MBOX_WB_STATUS_ERRCODE_MASK      0xFFFF
47 #define MBOX_WB_STATUS_MASK              0xFF
48 #define MBOX_WB_ERROR_CODE_MASK          0xFF00
49 #define MBOX_WB_STATUS_FINISHED_SUCCESS  0xFF
50 #define MBOX_WB_STATUS_NOT_FINISHED      0x00
51 
52 #define MBOX_STATUS_FINISHED(wb)  \
53 	((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) != MBOX_WB_STATUS_NOT_FINISHED)
54 #define MBOX_STATUS_SUCCESS(wb)  \
55 	((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) ==  \
56 	MBOX_WB_STATUS_FINISHED_SUCCESS)
57 #define MBOX_STATUS_ERRCODE(wb)  \
58 	((wb) & MBOX_WB_ERROR_CODE_MASK)
59 
60 #define MBOX_DMA_MSG_QUEUE_DEPTH    32
61 #define MBOX_AREA(hwif)  \
62 	((hwif)->cfg_regs_base + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF)
63 
64 #define MBOX_MQ_CI_OFFSET  \
65 	(HINIC3_CFG_REGS_FLAG + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF + \
66 	 MBOX_HEADER_SZ + MBOX_SEG_LEN)
67 
68 #define MBOX_MQ_SYNC_CI_MASK   GENMASK(7, 0)
69 #define MBOX_MQ_ASYNC_CI_MASK  GENMASK(15, 8)
70 #define MBOX_MQ_CI_GET(val, field)  \
71 	FIELD_GET(MBOX_MQ_##field##_CI_MASK, val)
72 
73 #define MBOX_MGMT_FUNC_ID         0x1FFF
74 #define MBOX_COMM_F_MBOX_SEGMENT  BIT(3)
75 
76 static u8 *get_mobx_body_from_hdr(u8 *header)
77 {
78 	return header + MBOX_HEADER_SZ;
79 }
80 
81 static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox,
82 						 enum mbox_msg_direction_type dir,
83 						 u16 src_func_id)
84 {
85 	struct hinic3_msg_channel *msg_ch;
86 
87 	msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ?
88 		&mbox->mgmt_msg : mbox->func_msg;
89 
90 	return (dir == MBOX_MSG_SEND) ?
91 		&msg_ch->recv_msg : &msg_ch->resp_msg;
92 }
93 
94 static void resp_mbox_handler(struct hinic3_mbox *mbox,
95 			      const struct hinic3_msg_desc *msg_desc)
96 {
97 	spin_lock(&mbox->mbox_lock);
98 	if (msg_desc->msg_info.msg_id == mbox->send_msg_id &&
99 	    mbox->event_flag == MBOX_EVENT_START)
100 		mbox->event_flag = MBOX_EVENT_SUCCESS;
101 	spin_unlock(&mbox->mbox_lock);
102 }
103 
104 static bool mbox_segment_valid(struct hinic3_mbox *mbox,
105 			       struct hinic3_msg_desc *msg_desc,
106 			       __le64 mbox_header)
107 {
108 	u8 seq_id, seg_len, msg_id, mod;
109 	__le16 src_func_idx, cmd;
110 
111 	seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
112 	seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
113 	msg_id = MBOX_MSG_HEADER_GET(mbox_header, MSG_ID);
114 	mod = MBOX_MSG_HEADER_GET(mbox_header, MODULE);
115 	cmd = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, CMD));
116 	src_func_idx = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
117 						       SRC_GLB_FUNC_IDX));
118 
119 	if (seq_id > MBOX_SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN ||
120 	    (seq_id == MBOX_SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN))
121 		goto err_seg;
122 
123 	if (seq_id == 0) {
124 		msg_desc->seq_id = seq_id;
125 		msg_desc->msg_info.msg_id = msg_id;
126 		msg_desc->mod = mod;
127 		msg_desc->cmd = cmd;
128 	} else {
129 		if (seq_id != msg_desc->seq_id + 1 ||
130 		    msg_id != msg_desc->msg_info.msg_id ||
131 		    mod != msg_desc->mod || cmd != msg_desc->cmd)
132 			goto err_seg;
133 
134 		msg_desc->seq_id = seq_id;
135 	}
136 
137 	return true;
138 
139 err_seg:
140 	dev_err(mbox->hwdev->dev,
141 		"Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
142 		src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id,
143 		msg_desc->mod, msg_desc->cmd);
144 	dev_err(mbox->hwdev->dev,
145 		"Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
146 		seg_len, seq_id, msg_id, mod, cmd);
147 
148 	return false;
149 }
150 
151 static void recv_mbox_handler(struct hinic3_mbox *mbox,
152 			      u8 *header, struct hinic3_msg_desc *msg_desc)
153 {
154 	__le64 mbox_header = *((__force __le64 *)header);
155 	u8 *mbox_body = get_mobx_body_from_hdr(header);
156 	u8 seq_id, seg_len;
157 	int pos;
158 
159 	if (!mbox_segment_valid(mbox, msg_desc, mbox_header)) {
160 		msg_desc->seq_id = MBOX_SEQ_ID_MAX_VAL;
161 		return;
162 	}
163 
164 	seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
165 	seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
166 
167 	pos = seq_id * MBOX_SEG_LEN;
168 	memcpy(msg_desc->msg + pos, mbox_body, seg_len);
169 
170 	if (!MBOX_MSG_HEADER_GET(mbox_header, LAST))
171 		return;
172 
173 	msg_desc->msg_len = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
174 							    MSG_LEN));
175 	msg_desc->msg_info.status = MBOX_MSG_HEADER_GET(mbox_header, STATUS);
176 
177 	if (MBOX_MSG_HEADER_GET(mbox_header, DIRECTION) == MBOX_MSG_RESP)
178 		resp_mbox_handler(mbox, msg_desc);
179 }
180 
181 void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
182 				   u8 size)
183 {
184 	__le64 mbox_header = *((__force __le64 *)header);
185 	enum mbox_msg_direction_type dir;
186 	struct hinic3_msg_desc *msg_desc;
187 	struct hinic3_mbox *mbox;
188 	u16 src_func_id;
189 
190 	mbox = hwdev->mbox;
191 	dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION);
192 	src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
193 	msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id);
194 	recv_mbox_handler(mbox, header, msg_desc);
195 }
196 
197 static int init_mbox_dma_queue(struct hinic3_hwdev *hwdev,
198 			       struct mbox_dma_queue *mq)
199 {
200 	u32 size;
201 
202 	mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH;
203 	mq->prod_idx = 0;
204 	mq->cons_idx = 0;
205 
206 	size = mq->depth * MBOX_MAX_BUF_SZ;
207 	mq->dma_buf_vaddr = dma_alloc_coherent(hwdev->dev, size,
208 					       &mq->dma_buf_paddr,
209 					       GFP_KERNEL);
210 	if (!mq->dma_buf_vaddr)
211 		return -ENOMEM;
212 
213 	return 0;
214 }
215 
216 static void uninit_mbox_dma_queue(struct hinic3_hwdev *hwdev,
217 				  struct mbox_dma_queue *mq)
218 {
219 	dma_free_coherent(hwdev->dev, mq->depth * MBOX_MAX_BUF_SZ,
220 			  mq->dma_buf_vaddr, mq->dma_buf_paddr);
221 }
222 
223 static int hinic3_init_mbox_dma_queue(struct hinic3_mbox *mbox)
224 {
225 	u32 val;
226 	int err;
227 
228 	err = init_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
229 	if (err)
230 		return err;
231 
232 	err = init_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
233 	if (err) {
234 		uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
235 		return err;
236 	}
237 
238 	val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
239 	val &= ~MBOX_MQ_SYNC_CI_MASK;
240 	val &= ~MBOX_MQ_ASYNC_CI_MASK;
241 	hinic3_hwif_write_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET, val);
242 
243 	return 0;
244 }
245 
246 static void hinic3_uninit_mbox_dma_queue(struct hinic3_mbox *mbox)
247 {
248 	uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
249 	uninit_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
250 }
251 
252 static int alloc_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
253 {
254 	msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
255 	if (!msg_ch->resp_msg.msg)
256 		return -ENOMEM;
257 
258 	msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
259 	if (!msg_ch->recv_msg.msg) {
260 		kfree(msg_ch->resp_msg.msg);
261 		return -ENOMEM;
262 	}
263 
264 	msg_ch->resp_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
265 	msg_ch->recv_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
266 
267 	return 0;
268 }
269 
270 static void free_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
271 {
272 	kfree(msg_ch->recv_msg.msg);
273 	kfree(msg_ch->resp_msg.msg);
274 }
275 
276 static int init_mgmt_msg_channel(struct hinic3_mbox *mbox)
277 {
278 	int err;
279 
280 	err = alloc_mbox_msg_channel(&mbox->mgmt_msg);
281 	if (err) {
282 		dev_err(mbox->hwdev->dev, "Failed to alloc mgmt message channel\n");
283 		return err;
284 	}
285 
286 	err = hinic3_init_mbox_dma_queue(mbox);
287 	if (err) {
288 		dev_err(mbox->hwdev->dev, "Failed to init mbox dma queue\n");
289 		free_mbox_msg_channel(&mbox->mgmt_msg);
290 		return err;
291 	}
292 
293 	return 0;
294 }
295 
296 static void uninit_mgmt_msg_channel(struct hinic3_mbox *mbox)
297 {
298 	hinic3_uninit_mbox_dma_queue(mbox);
299 	free_mbox_msg_channel(&mbox->mgmt_msg);
300 }
301 
302 static int hinic3_init_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
303 {
304 	struct hinic3_mbox *mbox;
305 	int err;
306 
307 	mbox = hwdev->mbox;
308 	mbox->func_msg = kzalloc(sizeof(*mbox->func_msg), GFP_KERNEL);
309 	if (!mbox->func_msg)
310 		return -ENOMEM;
311 
312 	err = alloc_mbox_msg_channel(mbox->func_msg);
313 	if (err)
314 		goto err_free_func_msg;
315 
316 	return 0;
317 
318 err_free_func_msg:
319 	kfree(mbox->func_msg);
320 	mbox->func_msg = NULL;
321 
322 	return err;
323 }
324 
325 static void hinic3_uninit_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
326 {
327 	struct hinic3_mbox *mbox = hwdev->mbox;
328 
329 	free_mbox_msg_channel(mbox->func_msg);
330 	kfree(mbox->func_msg);
331 	mbox->func_msg = NULL;
332 }
333 
334 static void prepare_send_mbox(struct hinic3_mbox *mbox)
335 {
336 	struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
337 
338 	send_mbox->data = MBOX_AREA(mbox->hwdev->hwif);
339 }
340 
341 static int alloc_mbox_wb_status(struct hinic3_mbox *mbox)
342 {
343 	struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
344 	struct hinic3_hwdev *hwdev = mbox->hwdev;
345 	u32 addr_h, addr_l;
346 
347 	send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev,
348 						 MBOX_WB_STATUS_LEN,
349 						 &send_mbox->wb_paddr,
350 						 GFP_KERNEL);
351 	if (!send_mbox->wb_vaddr)
352 		return -ENOMEM;
353 
354 	addr_h = upper_32_bits(send_mbox->wb_paddr);
355 	addr_l = lower_32_bits(send_mbox->wb_paddr);
356 	hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
357 			      addr_h);
358 	hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
359 			      addr_l);
360 
361 	return 0;
362 }
363 
364 static void free_mbox_wb_status(struct hinic3_mbox *mbox)
365 {
366 	struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
367 	struct hinic3_hwdev *hwdev = mbox->hwdev;
368 
369 	hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
370 			      0);
371 	hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
372 			      0);
373 
374 	dma_free_coherent(hwdev->dev, MBOX_WB_STATUS_LEN,
375 			  send_mbox->wb_vaddr, send_mbox->wb_paddr);
376 }
377 
378 static int hinic3_mbox_pre_init(struct hinic3_hwdev *hwdev,
379 				struct hinic3_mbox *mbox)
380 {
381 	mbox->hwdev = hwdev;
382 	mutex_init(&mbox->mbox_send_lock);
383 	spin_lock_init(&mbox->mbox_lock);
384 
385 	mbox->workq = create_singlethread_workqueue(HINIC3_MBOX_WQ_NAME);
386 	if (!mbox->workq) {
387 		dev_err(hwdev->dev, "Failed to initialize MBOX workqueue\n");
388 		return -ENOMEM;
389 	}
390 	hwdev->mbox = mbox;
391 
392 	return 0;
393 }
394 
395 int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
396 {
397 	struct hinic3_mbox *mbox;
398 	int err;
399 
400 	mbox = kzalloc(sizeof(*mbox), GFP_KERNEL);
401 	if (!mbox)
402 		return -ENOMEM;
403 
404 	err = hinic3_mbox_pre_init(hwdev, mbox);
405 	if (err)
406 		goto err_free_mbox;
407 
408 	err = init_mgmt_msg_channel(mbox);
409 	if (err)
410 		goto err_destroy_workqueue;
411 
412 	err = hinic3_init_func_mbox_msg_channel(hwdev);
413 	if (err)
414 		goto err_uninit_mgmt_msg_ch;
415 
416 	err = alloc_mbox_wb_status(mbox);
417 	if (err) {
418 		dev_err(hwdev->dev, "Failed to alloc mbox write back status\n");
419 		goto err_uninit_func_mbox_msg_ch;
420 	}
421 
422 	prepare_send_mbox(mbox);
423 
424 	return 0;
425 
426 err_uninit_func_mbox_msg_ch:
427 	hinic3_uninit_func_mbox_msg_channel(hwdev);
428 
429 err_uninit_mgmt_msg_ch:
430 	uninit_mgmt_msg_channel(mbox);
431 
432 err_destroy_workqueue:
433 	destroy_workqueue(mbox->workq);
434 
435 err_free_mbox:
436 	kfree(mbox);
437 
438 	return err;
439 }
440 
441 void hinic3_free_mbox(struct hinic3_hwdev *hwdev)
442 {
443 	struct hinic3_mbox *mbox = hwdev->mbox;
444 
445 	destroy_workqueue(mbox->workq);
446 	free_mbox_wb_status(mbox);
447 	hinic3_uninit_func_mbox_msg_channel(hwdev);
448 	uninit_mgmt_msg_channel(mbox);
449 	kfree(mbox);
450 }
451 
452 #define MBOX_DMA_MSG_INIT_XOR_VAL    0x5a5a5a5a
453 #define MBOX_XOR_DATA_ALIGN          4
454 static u32 mbox_dma_msg_xor(u32 *data, u32 msg_len)
455 {
456 	u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL;
457 	u32 dw_len = msg_len / sizeof(u32);
458 	u32 i;
459 
460 	for (i = 0; i < dw_len; i++)
461 		xor ^= data[i];
462 
463 	return xor;
464 }
465 
466 #define MBOX_MQ_ID_MASK(mq, idx)  ((idx) & ((mq)->depth - 1))
467 
468 static bool is_msg_queue_full(struct mbox_dma_queue *mq)
469 {
470 	return MBOX_MQ_ID_MASK(mq, (mq)->prod_idx + 1) ==
471 	       MBOX_MQ_ID_MASK(mq, (mq)->cons_idx);
472 }
473 
474 static int mbox_prepare_dma_entry(struct hinic3_mbox *mbox,
475 				  struct mbox_dma_queue *mq,
476 				  struct mbox_dma_msg *dma_msg,
477 				  const void *msg, u32 msg_len)
478 {
479 	u64 dma_addr, offset;
480 	void *dma_vaddr;
481 
482 	if (is_msg_queue_full(mq)) {
483 		dev_err(mbox->hwdev->dev, "Mbox sync message queue is busy, pi: %u, ci: %u\n",
484 			mq->prod_idx, MBOX_MQ_ID_MASK(mq, mq->cons_idx));
485 		return -EBUSY;
486 	}
487 
488 	/* copy data to DMA buffer */
489 	offset = mq->prod_idx * MBOX_MAX_BUF_SZ;
490 	dma_vaddr = (u8 *)mq->dma_buf_vaddr + offset;
491 	memcpy(dma_vaddr, msg, msg_len);
492 	dma_addr = mq->dma_buf_paddr + offset;
493 	dma_msg->dma_addr_high = cpu_to_le32(upper_32_bits(dma_addr));
494 	dma_msg->dma_addr_low = cpu_to_le32(lower_32_bits(dma_addr));
495 	dma_msg->msg_len = cpu_to_le32(msg_len);
496 	/* The firmware obtains message based on 4B alignment. */
497 	dma_msg->xor = cpu_to_le32(mbox_dma_msg_xor(dma_vaddr,
498 						    ALIGN(msg_len, MBOX_XOR_DATA_ALIGN)));
499 	mq->prod_idx++;
500 	mq->prod_idx = MBOX_MQ_ID_MASK(mq, mq->prod_idx);
501 
502 	return 0;
503 }
504 
505 static int mbox_prepare_dma_msg(struct hinic3_mbox *mbox,
506 				enum mbox_msg_ack_type ack_type,
507 				struct mbox_dma_msg *dma_msg, const void *msg,
508 				u32 msg_len)
509 {
510 	struct mbox_dma_queue *mq;
511 	u32 val;
512 
513 	val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
514 	if (ack_type == MBOX_MSG_ACK) {
515 		mq = &mbox->sync_msg_queue;
516 		mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC);
517 	} else {
518 		mq = &mbox->async_msg_queue;
519 		mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC);
520 	}
521 
522 	return mbox_prepare_dma_entry(mbox, mq, dma_msg, msg, msg_len);
523 }
524 
525 static void clear_mbox_status(struct hinic3_send_mbox *mbox)
526 {
527 	__be64 *wb_status = mbox->wb_vaddr;
528 
529 	*wb_status = 0;
530 	/* clear mailbox write back status */
531 	wmb();
532 }
533 
534 static void mbox_dword_write(const void *src, void __iomem *dst, u32 count)
535 {
536 	const __le32 *src32 = src;
537 	u32 __iomem *dst32 = dst;
538 	u32 i;
539 
540 	/* Data written to mbox is arranged in structs with little endian fields
541 	 * but when written to HW every dword (32bits) should be swapped since
542 	 * the HW will swap it again.
543 	 */
544 	for (i = 0; i < count; i++)
545 		__raw_writel(swab32((__force __u32)src32[i]), dst32 + i);
546 }
547 
548 static void mbox_copy_header(struct hinic3_hwdev *hwdev,
549 			     struct hinic3_send_mbox *mbox, __le64 *header)
550 {
551 	mbox_dword_write(header, mbox->data, MBOX_HEADER_SZ / sizeof(__le32));
552 }
553 
554 static void mbox_copy_send_data(struct hinic3_hwdev *hwdev,
555 				struct hinic3_send_mbox *mbox, void *seg,
556 				u32 seg_len)
557 {
558 	u32 __iomem *dst = (u32 __iomem *)(mbox->data + MBOX_HEADER_SZ);
559 	u32 count, leftover, last_dword;
560 	const __le32 *src = seg;
561 
562 	count = seg_len / sizeof(u32);
563 	leftover = seg_len % sizeof(u32);
564 	if (count > 0)
565 		mbox_dword_write(src, dst, count);
566 
567 	if (leftover > 0) {
568 		last_dword = 0;
569 		memcpy(&last_dword, src + count, leftover);
570 		mbox_dword_write(&last_dword, dst + count, 1);
571 	}
572 }
573 
574 static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
575 				u16 dst_func, u16 dst_aeqn, u32 seg_len)
576 {
577 	struct hinic3_hwif *hwif = mbox->hwdev->hwif;
578 	u32 mbox_int, mbox_ctrl, tx_size;
579 
580 	tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2;
581 
582 	mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) |
583 		   MBOX_INT_SET(0, STAT_DMA) |
584 		   MBOX_INT_SET(tx_size, TX_SIZE) |
585 		   MBOX_INT_SET(0, STAT_DMA_SO_RO) |
586 		   MBOX_INT_SET(1, WB_EN);
587 
588 	mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) |
589 		    MBOX_CTRL_SET(0, TRIGGER_AEQE) |
590 		    MBOX_CTRL_SET(dst_func, DST_FUNC);
591 
592 	hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int);
593 	hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF,
594 			      mbox_ctrl);
595 }
596 
597 static u16 get_mbox_status(const struct hinic3_send_mbox *mbox)
598 {
599 	__be64 *wb_status = mbox->wb_vaddr;
600 	u64 wb_val;
601 
602 	wb_val = be64_to_cpu(*wb_status);
603 	/* verify reading before check */
604 	rmb();
605 
606 	return wb_val & MBOX_WB_STATUS_ERRCODE_MASK;
607 }
608 
609 static enum hinic3_wait_return check_mbox_wb_status(void *priv_data)
610 {
611 	struct hinic3_mbox *mbox = priv_data;
612 	u16 wb_status;
613 
614 	wb_status = get_mbox_status(&mbox->send_mbox);
615 
616 	return MBOX_STATUS_FINISHED(wb_status) ?
617 	       HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
618 }
619 
620 static int send_mbox_seg(struct hinic3_mbox *mbox, __le64 header,
621 			 u16 dst_func, void *seg, u32 seg_len, void *msg_info)
622 {
623 	struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
624 	struct hinic3_hwdev *hwdev = mbox->hwdev;
625 	u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
626 	enum mbox_msg_direction_type dir;
627 	u16 dst_aeqn, wb_status, errcode;
628 	int err;
629 
630 	/* mbox to mgmt cpu, hardware doesn't care about dst aeq id */
631 	if (num_aeqs > MBOX_MSG_AEQ_FOR_MBOX) {
632 		dir = MBOX_MSG_HEADER_GET(header, DIRECTION);
633 		dst_aeqn = (dir == MBOX_MSG_SEND) ?
634 			   MBOX_MSG_AEQ_FOR_EVENT : MBOX_MSG_AEQ_FOR_MBOX;
635 	} else {
636 		dst_aeqn = 0;
637 	}
638 
639 	clear_mbox_status(send_mbox);
640 	mbox_copy_header(hwdev, send_mbox, &header);
641 	mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
642 	write_mbox_msg_attr(mbox, dst_func, dst_aeqn, seg_len);
643 
644 	err = hinic3_wait_for_timeout(mbox, check_mbox_wb_status,
645 				      MBOX_MSG_POLLING_TIMEOUT_MS,
646 				      USEC_PER_MSEC);
647 	wb_status = get_mbox_status(send_mbox);
648 	if (err) {
649 		dev_err(hwdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
650 			wb_status);
651 		return err;
652 	}
653 
654 	if (!MBOX_STATUS_SUCCESS(wb_status)) {
655 		dev_err(hwdev->dev,
656 			"Send mailbox segment to function %u error, wb status: 0x%x\n",
657 			dst_func, wb_status);
658 		errcode = MBOX_STATUS_ERRCODE(wb_status);
659 		return errcode ? errcode : -EFAULT;
660 	}
661 
662 	return 0;
663 }
664 
665 static int send_mbox_msg(struct hinic3_mbox *mbox, u8 mod, u16 cmd,
666 			 const void *msg, u32 msg_len, u16 dst_func,
667 			 enum mbox_msg_direction_type direction,
668 			 enum mbox_msg_ack_type ack_type,
669 			 struct mbox_msg_info *msg_info)
670 {
671 	enum mbox_msg_data_type data_type = MBOX_MSG_DATA_INLINE;
672 	struct hinic3_hwdev *hwdev = mbox->hwdev;
673 	struct mbox_dma_msg dma_msg;
674 	u32 seg_len = MBOX_SEG_LEN;
675 	__le64 header = 0;
676 	u32 seq_id = 0;
677 	u16 rsp_aeq_id;
678 	u8 *msg_seg;
679 	int err = 0;
680 	u32 left;
681 
682 	if (hwdev->hwif->attr.num_aeqs > MBOX_MSG_AEQ_FOR_MBOX)
683 		rsp_aeq_id = MBOX_MSG_AEQ_FOR_MBOX;
684 	else
685 		rsp_aeq_id = 0;
686 
687 	if (dst_func == MBOX_MGMT_FUNC_ID &&
688 	    !(hwdev->features[0] & MBOX_COMM_F_MBOX_SEGMENT)) {
689 		err = mbox_prepare_dma_msg(mbox, ack_type, &dma_msg,
690 					   msg, msg_len);
691 		if (err)
692 			goto err_send;
693 
694 		msg = &dma_msg;
695 		msg_len = sizeof(dma_msg);
696 		data_type = MBOX_MSG_DATA_DMA;
697 	}
698 
699 	msg_seg = (u8 *)msg;
700 	left = msg_len;
701 
702 	header = cpu_to_le64(MBOX_MSG_HEADER_SET(msg_len, MSG_LEN) |
703 			     MBOX_MSG_HEADER_SET(mod, MODULE) |
704 			     MBOX_MSG_HEADER_SET(seg_len, SEG_LEN) |
705 			     MBOX_MSG_HEADER_SET(ack_type, NO_ACK) |
706 			     MBOX_MSG_HEADER_SET(data_type, DATA_TYPE) |
707 			     MBOX_MSG_HEADER_SET(MBOX_SEQ_ID_START_VAL, SEQID) |
708 			     MBOX_MSG_HEADER_SET(direction, DIRECTION) |
709 			     MBOX_MSG_HEADER_SET(cmd, CMD) |
710 			     MBOX_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) |
711 			     MBOX_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) |
712 			     MBOX_MSG_HEADER_SET(MBOX_MSG_FROM_MBOX, SOURCE) |
713 			     MBOX_MSG_HEADER_SET(!!msg_info->status, STATUS));
714 
715 	while (!(MBOX_MSG_HEADER_GET(header, LAST))) {
716 		if (left <= MBOX_SEG_LEN) {
717 			header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
718 			header |=
719 				cpu_to_le64(MBOX_MSG_HEADER_SET(left, SEG_LEN) |
720 					    MBOX_MSG_HEADER_SET(1, LAST));
721 			seg_len = left;
722 		}
723 
724 		err = send_mbox_seg(mbox, header, dst_func, msg_seg,
725 				    seg_len, msg_info);
726 		if (err) {
727 			dev_err(hwdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
728 				MBOX_MSG_HEADER_GET(header, SEQID));
729 			goto err_send;
730 		}
731 
732 		left -= MBOX_SEG_LEN;
733 		msg_seg += MBOX_SEG_LEN;
734 		seq_id++;
735 		header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
736 		header |= cpu_to_le64(MBOX_MSG_HEADER_SET(seq_id, SEQID));
737 	}
738 
739 err_send:
740 	return err;
741 }
742 
743 static void set_mbox_to_func_event(struct hinic3_mbox *mbox,
744 				   enum mbox_event_state event_flag)
745 {
746 	spin_lock(&mbox->mbox_lock);
747 	mbox->event_flag = event_flag;
748 	spin_unlock(&mbox->mbox_lock);
749 }
750 
751 static enum hinic3_wait_return check_mbox_msg_finish(void *priv_data)
752 {
753 	struct hinic3_mbox *mbox = priv_data;
754 
755 	return (mbox->event_flag == MBOX_EVENT_SUCCESS) ?
756 		HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
757 }
758 
759 static int wait_mbox_msg_completion(struct hinic3_mbox *mbox,
760 				    u32 timeout)
761 {
762 	u32 wait_time;
763 	int err;
764 
765 	wait_time = (timeout != 0) ? timeout : MBOX_COMP_POLLING_TIMEOUT_MS;
766 	err = hinic3_wait_for_timeout(mbox, check_mbox_msg_finish,
767 				      wait_time, USEC_PER_MSEC);
768 	if (err) {
769 		set_mbox_to_func_event(mbox, MBOX_EVENT_TIMEOUT);
770 		return err;
771 	}
772 	set_mbox_to_func_event(mbox, MBOX_EVENT_END);
773 
774 	return 0;
775 }
776 
777 int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
778 			     const struct mgmt_msg_params *msg_params)
779 {
780 	struct hinic3_mbox *mbox = hwdev->mbox;
781 	struct mbox_msg_info msg_info = {};
782 	struct hinic3_msg_desc *msg_desc;
783 	u32 msg_len;
784 	int err;
785 
786 	/* expect response message */
787 	msg_desc = get_mbox_msg_desc(mbox, MBOX_MSG_RESP, MBOX_MGMT_FUNC_ID);
788 	mutex_lock(&mbox->mbox_send_lock);
789 	msg_info.msg_id = (mbox->send_msg_id + 1) & 0xF;
790 	mbox->send_msg_id = msg_info.msg_id;
791 	set_mbox_to_func_event(mbox, MBOX_EVENT_START);
792 
793 	err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
794 			    msg_params->in_size, MBOX_MGMT_FUNC_ID,
795 			    MBOX_MSG_SEND, MBOX_MSG_ACK, &msg_info);
796 	if (err) {
797 		dev_err(hwdev->dev, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n",
798 			mod, cmd, msg_info.msg_id, err);
799 		set_mbox_to_func_event(mbox, MBOX_EVENT_FAIL);
800 		goto err_send;
801 	}
802 
803 	if (wait_mbox_msg_completion(mbox, msg_params->timeout_ms)) {
804 		dev_err(hwdev->dev,
805 			"Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id);
806 		err = -ETIMEDOUT;
807 		goto err_send;
808 	}
809 
810 	if (mod != msg_desc->mod || cmd != le16_to_cpu(msg_desc->cmd)) {
811 		dev_err(hwdev->dev,
812 			"Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n",
813 			msg_desc->mod, msg_desc->cmd, mod, cmd);
814 		err = -EFAULT;
815 		goto err_send;
816 	}
817 
818 	if (msg_desc->msg_info.status) {
819 		err = msg_desc->msg_info.status;
820 		goto err_send;
821 	}
822 
823 	if (msg_params->buf_out) {
824 		msg_len = le16_to_cpu(msg_desc->msg_len);
825 		if (msg_len != msg_params->expected_out_size) {
826 			dev_err(hwdev->dev,
827 				"Invalid response mbox message length: %u for mod %d cmd %u, expected length: %u\n",
828 				msg_desc->msg_len, mod, cmd,
829 				msg_params->expected_out_size);
830 			err = -EFAULT;
831 			goto err_send;
832 		}
833 
834 		memcpy(msg_params->buf_out, msg_desc->msg, msg_len);
835 	}
836 
837 err_send:
838 	mutex_unlock(&mbox->mbox_send_lock);
839 
840 	return err;
841 }
842 
843 int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
844 				    const struct mgmt_msg_params *msg_params)
845 {
846 	struct hinic3_mbox *mbox = hwdev->mbox;
847 	struct mbox_msg_info msg_info = {};
848 	int err;
849 
850 	mutex_lock(&mbox->mbox_send_lock);
851 	err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
852 			    msg_params->in_size, MBOX_MGMT_FUNC_ID,
853 			    MBOX_MSG_SEND, MBOX_MSG_NO_ACK, &msg_info);
854 	if (err)
855 		dev_err(hwdev->dev, "Send mailbox no ack failed\n");
856 
857 	mutex_unlock(&mbox->mbox_send_lock);
858 
859 	return err;
860 }
861