xref: /linux/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c (revision 5d085ad2e68cceec8332b23ea8f630a28b506366)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Huawei HiNIC PCI Express Linux driver
3  * Copyright(c) 2017 Huawei Technologies Co., Ltd
4  */
5 #include <linux/pci.h>
6 #include <linux/delay.h>
7 #include <linux/types.h>
8 #include <linux/completion.h>
9 #include <linux/semaphore.h>
10 #include <linux/spinlock.h>
11 #include <linux/workqueue.h>
12 
13 #include "hinic_hw_if.h"
14 #include "hinic_hw_mgmt.h"
15 #include "hinic_hw_csr.h"
16 #include "hinic_hw_dev.h"
17 #include "hinic_hw_mbox.h"
18 
19 #define HINIC_MBOX_INT_DST_FUNC_SHIFT				0
20 #define HINIC_MBOX_INT_DST_AEQN_SHIFT				10
21 #define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT			12
22 #define HINIC_MBOX_INT_STAT_DMA_SHIFT				14
23 /* The size of data to be sended (unit of 4 bytes) */
24 #define HINIC_MBOX_INT_TX_SIZE_SHIFT				20
25 /* SO_RO(strong order, relax order) */
26 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT			25
27 #define HINIC_MBOX_INT_WB_EN_SHIFT				28
28 
29 #define HINIC_MBOX_INT_DST_FUNC_MASK				0x3FF
30 #define HINIC_MBOX_INT_DST_AEQN_MASK				0x3
31 #define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK			0x3
32 #define HINIC_MBOX_INT_STAT_DMA_MASK				0x3F
33 #define HINIC_MBOX_INT_TX_SIZE_MASK				0x1F
34 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK			0x3
35 #define HINIC_MBOX_INT_WB_EN_MASK				0x1
36 
37 #define HINIC_MBOX_INT_SET(val, field)	\
38 			(((val) & HINIC_MBOX_INT_##field##_MASK) << \
39 			HINIC_MBOX_INT_##field##_SHIFT)
40 
41 enum hinic_mbox_tx_status {
42 	TX_NOT_DONE = 1,
43 };
44 
45 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT			0
46 
47 /* specifies the issue request for the message data.
48  * 0 - Tx request is done;
49  * 1 - Tx request is in process.
50  */
51 #define HINIC_MBOX_CTRL_TX_STATUS_SHIFT				1
52 
53 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK			0x1
54 #define HINIC_MBOX_CTRL_TX_STATUS_MASK				0x1
55 
56 #define HINIC_MBOX_CTRL_SET(val, field)	\
57 			(((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
58 			HINIC_MBOX_CTRL_##field##_SHIFT)
59 
60 #define HINIC_MBOX_HEADER_MSG_LEN_SHIFT				0
61 #define HINIC_MBOX_HEADER_MODULE_SHIFT				11
62 #define HINIC_MBOX_HEADER_SEG_LEN_SHIFT				16
63 #define HINIC_MBOX_HEADER_NO_ACK_SHIFT				22
64 #define HINIC_MBOX_HEADER_SEQID_SHIFT				24
65 #define HINIC_MBOX_HEADER_LAST_SHIFT				30
66 
67 /* specifies the mailbox message direction
68  * 0 - send
69  * 1 - receive
70  */
71 #define HINIC_MBOX_HEADER_DIRECTION_SHIFT			31
72 #define HINIC_MBOX_HEADER_CMD_SHIFT				32
73 #define HINIC_MBOX_HEADER_MSG_ID_SHIFT				40
74 #define HINIC_MBOX_HEADER_STATUS_SHIFT				48
75 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT		54
76 
77 #define HINIC_MBOX_HEADER_MSG_LEN_MASK				0x7FF
78 #define HINIC_MBOX_HEADER_MODULE_MASK				0x1F
79 #define HINIC_MBOX_HEADER_SEG_LEN_MASK				0x3F
80 #define HINIC_MBOX_HEADER_NO_ACK_MASK				0x1
81 #define HINIC_MBOX_HEADER_SEQID_MASK				0x3F
82 #define HINIC_MBOX_HEADER_LAST_MASK				0x1
83 #define HINIC_MBOX_HEADER_DIRECTION_MASK			0x1
84 #define HINIC_MBOX_HEADER_CMD_MASK				0xFF
85 #define HINIC_MBOX_HEADER_MSG_ID_MASK				0xFF
86 #define HINIC_MBOX_HEADER_STATUS_MASK				0x3F
87 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK			0x3FF
88 
89 #define HINIC_MBOX_HEADER_GET(val, field)	\
90 			(((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
91 			HINIC_MBOX_HEADER_##field##_MASK)
92 #define HINIC_MBOX_HEADER_SET(val, field)	\
93 			((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
94 			HINIC_MBOX_HEADER_##field##_SHIFT)
95 
96 #define MBOX_SEGLEN_MASK			\
97 		HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
98 
99 #define HINIC_MBOX_SEG_LEN			48
100 #define HINIC_MBOX_COMP_TIME			8000U
101 #define MBOX_MSG_POLLING_TIMEOUT		8000
102 
103 #define HINIC_MBOX_DATA_SIZE			2040
104 
105 #define MBOX_MAX_BUF_SZ				2048UL
106 #define MBOX_HEADER_SZ				8
107 
108 #define MBOX_INFO_SZ				4
109 
110 /* MBOX size is 64B, 8B for mbox_header, 4B reserved */
111 #define MBOX_SEG_LEN				48
112 #define MBOX_SEG_LEN_ALIGN			4
113 #define MBOX_WB_STATUS_LEN			16UL
114 
115 /* mbox write back status is 16B, only first 4B is used */
116 #define MBOX_WB_STATUS_ERRCODE_MASK		0xFFFF
117 #define MBOX_WB_STATUS_MASK			0xFF
118 #define MBOX_WB_ERROR_CODE_MASK			0xFF00
119 #define MBOX_WB_STATUS_FINISHED_SUCCESS		0xFF
120 #define MBOX_WB_STATUS_FINISHED_WITH_ERR	0xFE
121 #define MBOX_WB_STATUS_NOT_FINISHED		0x00
122 
123 #define MBOX_STATUS_FINISHED(wb)	\
124 	(((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
125 #define MBOX_STATUS_SUCCESS(wb)		\
126 	(((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
127 #define MBOX_STATUS_ERRCODE(wb)		\
128 	((wb) & MBOX_WB_ERROR_CODE_MASK)
129 
130 #define SEQ_ID_START_VAL			0
131 #define SEQ_ID_MAX_VAL				42
132 
133 #define DST_AEQ_IDX_DEFAULT_VAL			0
134 #define SRC_AEQ_IDX_DEFAULT_VAL			0
135 #define NO_DMA_ATTRIBUTE_VAL			0
136 
137 #define HINIC_MGMT_RSP_AEQN			0
138 #define HINIC_MBOX_RSP_AEQN			2
139 #define HINIC_MBOX_RECV_AEQN			0
140 
141 #define MBOX_MSG_NO_DATA_LEN			1
142 
143 #define MBOX_BODY_FROM_HDR(header)	((u8 *)(header) + MBOX_HEADER_SZ)
144 #define MBOX_AREA(hwif)			\
145 	((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
146 
147 #define IS_PF_OR_PPF_SRC(src_func_idx)	((src_func_idx) < HINIC_MAX_PF_FUNCS)
148 
149 #define MBOX_RESPONSE_ERROR		0x1
150 #define MBOX_MSG_ID_MASK		0xFF
151 #define MBOX_MSG_ID(func_to_func)	((func_to_func)->send_msg_id)
152 #define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
153 			(MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
154 
155 #define FUNC_ID_OFF_SET_8B		8
156 #define FUNC_ID_OFF_SET_10B		10
157 
158 /* max message counter wait to process for one function */
159 #define HINIC_MAX_MSG_CNT_TO_PROCESS	10
160 
161 #define HINIC_QUEUE_MIN_DEPTH		6
162 #define HINIC_QUEUE_MAX_DEPTH		12
163 #define HINIC_MAX_RX_BUFFER_SIZE		15
164 
165 enum hinic_hwif_direction_type {
166 	HINIC_HWIF_DIRECT_SEND	= 0,
167 	HINIC_HWIF_RESPONSE	= 1,
168 };
169 
170 enum mbox_send_mod {
171 	MBOX_SEND_MSG_INT,
172 };
173 
174 enum mbox_seg_type {
175 	NOT_LAST_SEG,
176 	LAST_SEG,
177 };
178 
179 enum mbox_ordering_type {
180 	STRONG_ORDER,
181 };
182 
183 enum mbox_write_back_type {
184 	WRITE_BACK = 1,
185 };
186 
187 enum mbox_aeq_trig_type {
188 	NOT_TRIGGER,
189 	TRIGGER,
190 };
191 
192 /**
193  * hinic_register_pf_mbox_cb - register mbox callback for pf
194  * @hwdev: the pointer to hw device
195  * @mod:	specific mod that the callback will handle
196  * @callback:	callback function
197  * Return: 0 - success, negative - failure
198  */
199 int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
200 			      enum hinic_mod_type mod,
201 			      hinic_pf_mbox_cb callback)
202 {
203 	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
204 
205 	if (mod >= HINIC_MOD_MAX)
206 		return -EFAULT;
207 
208 	func_to_func->pf_mbox_cb[mod] = callback;
209 
210 	set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
211 
212 	return 0;
213 }
214 
215 /**
216  * hinic_register_vf_mbox_cb - register mbox callback for vf
217  * @hwdev: the pointer to hw device
218  * @mod:	specific mod that the callback will handle
219  * @callback:	callback function
220  * Return: 0 - success, negative - failure
221  */
222 int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
223 			      enum hinic_mod_type mod,
224 			      hinic_vf_mbox_cb callback)
225 {
226 	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
227 
228 	if (mod >= HINIC_MOD_MAX)
229 		return -EFAULT;
230 
231 	func_to_func->vf_mbox_cb[mod] = callback;
232 
233 	set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
234 
235 	return 0;
236 }
237 
238 /**
239  * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
240  * @hwdev:	the pointer to hw device
241  * @mod:	specific mod that the callback will handle
242  */
243 void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
244 				 enum hinic_mod_type mod)
245 {
246 	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
247 
248 	clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
249 
250 	while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
251 			&func_to_func->pf_mbox_cb_state[mod]))
252 		usleep_range(900, 1000);
253 
254 	func_to_func->pf_mbox_cb[mod] = NULL;
255 }
256 
257 /**
258  * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
259  * @hwdev:	the pointer to hw device
260  * @mod:	specific mod that the callback will handle
261  */
262 void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
263 				 enum hinic_mod_type mod)
264 {
265 	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
266 
267 	clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
268 
269 	while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
270 			&func_to_func->vf_mbox_cb_state[mod]))
271 		usleep_range(900, 1000);
272 
273 	func_to_func->vf_mbox_cb[mod] = NULL;
274 }
275 
276 static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
277 				struct hinic_recv_mbox *recv_mbox,
278 				void *buf_out, u16 *out_size)
279 {
280 	hinic_vf_mbox_cb cb;
281 	int ret = 0;
282 
283 	if (recv_mbox->mod >= HINIC_MOD_MAX) {
284 		dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
285 			recv_mbox->mod);
286 		return -EINVAL;
287 	}
288 
289 	set_bit(HINIC_VF_MBOX_CB_RUNNING,
290 		&func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
291 
292 	cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
293 	if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
294 			   &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
295 		cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
296 		   recv_mbox->mbox_len, buf_out, out_size);
297 	} else {
298 		dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
299 		ret = -EINVAL;
300 	}
301 
302 	clear_bit(HINIC_VF_MBOX_CB_RUNNING,
303 		  &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
304 
305 	return ret;
306 }
307 
308 static int
309 recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
310 			     struct hinic_recv_mbox *recv_mbox,
311 			     u16 src_func_idx, void *buf_out,
312 			     u16 *out_size)
313 {
314 	hinic_pf_mbox_cb cb;
315 	u16 vf_id = 0;
316 	int ret;
317 
318 	if (recv_mbox->mod >= HINIC_MOD_MAX) {
319 		dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
320 			recv_mbox->mod);
321 		return -EINVAL;
322 	}
323 
324 	set_bit(HINIC_PF_MBOX_CB_RUNNING,
325 		&func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
326 
327 	cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
328 	if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
329 			   &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
330 		vf_id = src_func_idx -
331 			hinic_glb_pf_vf_offset(func_to_func->hwif);
332 		ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
333 			 recv_mbox->mbox, recv_mbox->mbox_len,
334 			 buf_out, out_size);
335 	} else {
336 		dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
337 			recv_mbox->mod);
338 		ret = -EINVAL;
339 	}
340 
341 	clear_bit(HINIC_PF_MBOX_CB_RUNNING,
342 		  &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
343 
344 	return ret;
345 }
346 
347 static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
348 					  u8 seq_id, u8 seg_len)
349 {
350 	if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
351 		return false;
352 
353 	if (seq_id == 0) {
354 		recv_mbox->seq_id = seq_id;
355 	} else {
356 		if (seq_id != recv_mbox->seq_id + 1)
357 			return false;
358 
359 		recv_mbox->seq_id = seq_id;
360 	}
361 
362 	return true;
363 }
364 
365 static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
366 			      struct hinic_recv_mbox *recv_mbox)
367 {
368 	spin_lock(&func_to_func->mbox_lock);
369 	if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
370 	    func_to_func->event_flag == EVENT_START)
371 		complete(&recv_mbox->recv_done);
372 	else
373 		dev_err(&func_to_func->hwif->pdev->dev,
374 			"Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
375 			func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
376 			recv_mbox->msg_info.status);
377 	spin_unlock(&func_to_func->mbox_lock);
378 }
379 
380 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
381 				   struct hinic_recv_mbox *recv_mbox,
382 				   u16 src_func_idx);
383 
384 static void recv_func_mbox_work_handler(struct work_struct *work)
385 {
386 	struct hinic_mbox_work *mbox_work =
387 			container_of(work, struct hinic_mbox_work, work);
388 	struct hinic_recv_mbox *recv_mbox;
389 
390 	recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
391 			       mbox_work->src_func_idx);
392 
393 	recv_mbox =
394 		&mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
395 
396 	atomic_dec(&recv_mbox->msg_cnt);
397 
398 	kfree(mbox_work);
399 }
400 
401 static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
402 			      void *header, struct hinic_recv_mbox *recv_mbox)
403 {
404 	void *mbox_body = MBOX_BODY_FROM_HDR(header);
405 	struct hinic_recv_mbox *rcv_mbox_temp = NULL;
406 	u64 mbox_header = *((u64 *)header);
407 	struct hinic_mbox_work *mbox_work;
408 	u8 seq_id, seg_len;
409 	u16 src_func_idx;
410 	int pos;
411 
412 	seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
413 	seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
414 	src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
415 
416 	if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
417 		dev_err(&func_to_func->hwif->pdev->dev,
418 			"Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
419 			src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
420 		recv_mbox->seq_id = SEQ_ID_MAX_VAL;
421 		return;
422 	}
423 
424 	pos = seq_id * MBOX_SEG_LEN;
425 	memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
426 	       HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
427 
428 	if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
429 		return;
430 
431 	recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
432 	recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
433 	recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
434 	recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
435 	recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
436 	recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
437 	recv_mbox->seq_id = SEQ_ID_MAX_VAL;
438 
439 	if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
440 	    HINIC_HWIF_RESPONSE) {
441 		resp_mbox_handler(func_to_func, recv_mbox);
442 		return;
443 	}
444 
445 	if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
446 		dev_warn(&func_to_func->hwif->pdev->dev,
447 			 "This function(%u) have %d message wait to process,can't add to work queue\n",
448 			 src_func_idx, atomic_read(&recv_mbox->msg_cnt));
449 		return;
450 	}
451 
452 	rcv_mbox_temp = kzalloc(sizeof(*rcv_mbox_temp), GFP_KERNEL);
453 	if (!rcv_mbox_temp)
454 		return;
455 
456 	memcpy(rcv_mbox_temp, recv_mbox, sizeof(*rcv_mbox_temp));
457 
458 	rcv_mbox_temp->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
459 	if (!rcv_mbox_temp->mbox)
460 		goto err_alloc_rcv_mbox_msg;
461 
462 	memcpy(rcv_mbox_temp->mbox, recv_mbox->mbox, MBOX_MAX_BUF_SZ);
463 
464 	rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
465 	if (!rcv_mbox_temp->buf_out)
466 		goto err_alloc_rcv_mbox_buf;
467 
468 	mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
469 	if (!mbox_work)
470 		goto err_alloc_mbox_work;
471 
472 	mbox_work->func_to_func = func_to_func;
473 	mbox_work->recv_mbox = rcv_mbox_temp;
474 	mbox_work->src_func_idx = src_func_idx;
475 
476 	atomic_inc(&recv_mbox->msg_cnt);
477 	INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
478 	queue_work(func_to_func->workq, &mbox_work->work);
479 
480 	return;
481 
482 err_alloc_mbox_work:
483 	kfree(rcv_mbox_temp->buf_out);
484 
485 err_alloc_rcv_mbox_buf:
486 	kfree(rcv_mbox_temp->mbox);
487 
488 err_alloc_rcv_mbox_msg:
489 	kfree(rcv_mbox_temp);
490 }
491 
492 void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
493 {
494 	struct hinic_mbox_func_to_func *func_to_func;
495 	u64 mbox_header = *((u64 *)header);
496 	struct hinic_recv_mbox *recv_mbox;
497 	u64 src, dir;
498 
499 	func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
500 
501 	dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
502 	src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
503 
504 	if (src >= HINIC_MAX_FUNCTIONS) {
505 		dev_err(&func_to_func->hwif->pdev->dev,
506 			"Mailbox source function id:%u is invalid\n", (u32)src);
507 		return;
508 	}
509 
510 	recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
511 		    &func_to_func->mbox_send[src] :
512 		    &func_to_func->mbox_resp[src];
513 
514 	recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
515 }
516 
517 void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
518 {
519 	struct hinic_mbox_func_to_func *func_to_func;
520 	struct hinic_send_mbox *send_mbox;
521 
522 	func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
523 	send_mbox = &func_to_func->send_mbox;
524 
525 	complete(&send_mbox->send_done);
526 }
527 
528 static void clear_mbox_status(struct hinic_send_mbox *mbox)
529 {
530 	*mbox->wb_status = 0;
531 
532 	/* clear mailbox write back status */
533 	wmb();
534 }
535 
536 static void mbox_copy_header(struct hinic_hwdev *hwdev,
537 			     struct hinic_send_mbox *mbox, u64 *header)
538 {
539 	u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
540 	u32 *data = (u32 *)header;
541 
542 	for (i = 0; i < idx_max; i++)
543 		__raw_writel(*(data + i), mbox->data + i * sizeof(u32));
544 }
545 
546 static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
547 				struct hinic_send_mbox *mbox, void *seg,
548 				u16 seg_len)
549 {
550 	u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
551 	u32 data_len, chk_sz = sizeof(u32);
552 	u32 *data = seg;
553 	u32 i, idx_max;
554 
555 	/* The mbox message should be aligned in 4 bytes. */
556 	if (seg_len % chk_sz) {
557 		memcpy(mbox_max_buf, seg, seg_len);
558 		data = (u32 *)mbox_max_buf;
559 	}
560 
561 	data_len = seg_len;
562 	idx_max = ALIGN(data_len, chk_sz) / chk_sz;
563 
564 	for (i = 0; i < idx_max; i++)
565 		__raw_writel(*(data + i),
566 			     mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
567 }
568 
569 static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
570 				u16 dst_func, u16 dst_aeqn, u16 seg_len,
571 				int poll)
572 {
573 	u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
574 	u32 mbox_int, mbox_ctrl;
575 
576 	mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
577 		   HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
578 		   HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
579 		   HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
580 		   HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
581 				      MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
582 				      TX_SIZE) |
583 		   HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
584 		   HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
585 
586 	hinic_hwif_write_reg(func_to_func->hwif,
587 			     HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
588 
589 	wmb(); /* writing the mbox int attributes */
590 	mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
591 
592 	if (poll)
593 		mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
594 	else
595 		mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
596 
597 	hinic_hwif_write_reg(func_to_func->hwif,
598 			     HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
599 }
600 
601 void dump_mox_reg(struct hinic_hwdev *hwdev)
602 {
603 	u32 val;
604 
605 	val = hinic_hwif_read_reg(hwdev->hwif,
606 				  HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
607 	dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
608 
609 	val = hinic_hwif_read_reg(hwdev->hwif,
610 				  HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
611 	dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
612 		val);
613 }
614 
615 static u16 get_mbox_status(struct hinic_send_mbox *mbox)
616 {
617 	/* write back is 16B, but only use first 4B */
618 	u64 wb_val = be64_to_cpu(*mbox->wb_status);
619 
620 	rmb(); /* verify reading before check */
621 
622 	return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
623 }
624 
625 static int
626 wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
627 			     int poll, u16 *wb_status)
628 {
629 	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
630 	struct hinic_hwdev *hwdev = func_to_func->hwdev;
631 	struct completion *done = &send_mbox->send_done;
632 	u32 cnt = 0;
633 	ulong jif;
634 
635 	if (poll) {
636 		while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
637 			*wb_status = get_mbox_status(send_mbox);
638 			if (MBOX_STATUS_FINISHED(*wb_status))
639 				break;
640 
641 			usleep_range(900, 1000);
642 			cnt++;
643 		}
644 
645 		if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
646 			dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
647 				*wb_status);
648 			dump_mox_reg(hwdev);
649 			return -ETIMEDOUT;
650 		}
651 	} else {
652 		jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
653 		if (!wait_for_completion_timeout(done, jif)) {
654 			dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
655 			dump_mox_reg(hwdev);
656 			return -ETIMEDOUT;
657 		}
658 
659 		*wb_status = get_mbox_status(send_mbox);
660 	}
661 
662 	return 0;
663 }
664 
665 static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
666 			 u64 header, u16 dst_func, void *seg, u16 seg_len,
667 			 int poll, void *msg_info)
668 {
669 	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
670 	u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
671 	struct hinic_hwdev *hwdev = func_to_func->hwdev;
672 	struct completion *done = &send_mbox->send_done;
673 	u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
674 	u16 dst_aeqn, wb_status = 0, errcode;
675 
676 	if (num_aeqs >= 4)
677 		dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
678 			   HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
679 	else
680 		dst_aeqn = 0;
681 
682 	if (!poll)
683 		init_completion(done);
684 
685 	clear_mbox_status(send_mbox);
686 
687 	mbox_copy_header(hwdev, send_mbox, &header);
688 
689 	mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
690 
691 	write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
692 
693 	wmb(); /* writing the mbox msg attributes */
694 
695 	if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
696 		return -ETIMEDOUT;
697 
698 	if (!MBOX_STATUS_SUCCESS(wb_status)) {
699 		dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
700 			dst_func, wb_status);
701 		errcode = MBOX_STATUS_ERRCODE(wb_status);
702 		return errcode ? errcode : -EFAULT;
703 	}
704 
705 	return 0;
706 }
707 
708 static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
709 			     enum hinic_mod_type mod, u16 cmd, void *msg,
710 			     u16 msg_len, u16 dst_func,
711 			     enum hinic_hwif_direction_type direction,
712 			     enum hinic_mbox_ack_type ack_type,
713 			     struct mbox_msg_info *msg_info)
714 {
715 	struct hinic_hwdev *hwdev = func_to_func->hwdev;
716 	u16 seg_len = MBOX_SEG_LEN;
717 	u8 *msg_seg = (u8 *)msg;
718 	u16 left = msg_len;
719 	u32 seq_id = 0;
720 	u64 header = 0;
721 	int err = 0;
722 
723 	down(&func_to_func->msg_send_sem);
724 
725 	header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
726 		 HINIC_MBOX_HEADER_SET(mod, MODULE) |
727 		 HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
728 		 HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
729 		 HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
730 		 HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
731 		 HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
732 		 HINIC_MBOX_HEADER_SET(cmd, CMD) |
733 		 /* The vf's offset to it's associated pf */
734 		 HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
735 		 HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
736 		 HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
737 				       SRC_GLB_FUNC_IDX);
738 
739 	while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
740 		if (left <= HINIC_MBOX_SEG_LEN) {
741 			header &= ~MBOX_SEGLEN_MASK;
742 			header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
743 			header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
744 
745 			seg_len = left;
746 		}
747 
748 		err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
749 				    seg_len, MBOX_SEND_MSG_INT, msg_info);
750 		if (err) {
751 			dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
752 				HINIC_MBOX_HEADER_GET(header, SEQID));
753 			goto err_send_mbox_seg;
754 		}
755 
756 		left -= HINIC_MBOX_SEG_LEN;
757 		msg_seg += HINIC_MBOX_SEG_LEN;
758 
759 		seq_id++;
760 		header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
761 						  SEQID));
762 		header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
763 	}
764 
765 err_send_mbox_seg:
766 	up(&func_to_func->msg_send_sem);
767 
768 	return err;
769 }
770 
771 static void
772 response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
773 			    struct hinic_recv_mbox *recv_mbox, int err,
774 			    u16 out_size, u16 src_func_idx)
775 {
776 	struct mbox_msg_info msg_info = {0};
777 
778 	if (recv_mbox->ack_type == MBOX_ACK) {
779 		msg_info.msg_id = recv_mbox->msg_info.msg_id;
780 		if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
781 			msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
782 		else if (err == HINIC_MBOX_VF_CMD_ERROR)
783 			msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
784 		else if (err)
785 			msg_info.status = HINIC_MBOX_PF_SEND_ERR;
786 
787 		/* if no data needs to response, set out_size to 1 */
788 		if (!out_size || err)
789 			out_size = MBOX_MSG_NO_DATA_LEN;
790 
791 		send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
792 				  recv_mbox->buf_out, out_size, src_func_idx,
793 				  HINIC_HWIF_RESPONSE, MBOX_ACK,
794 				  &msg_info);
795 	}
796 }
797 
798 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
799 				   struct hinic_recv_mbox *recv_mbox,
800 				   u16 src_func_idx)
801 {
802 	void *buf_out = recv_mbox->buf_out;
803 	u16 out_size = MBOX_MAX_BUF_SZ;
804 	int err = 0;
805 
806 	if (HINIC_IS_VF(func_to_func->hwif)) {
807 		err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
808 					   &out_size);
809 	} else {
810 		if (IS_PF_OR_PPF_SRC(src_func_idx))
811 			dev_warn(&func_to_func->hwif->pdev->dev,
812 				 "Unsupported pf2pf mbox msg\n");
813 		else
814 			err = recv_pf_from_vf_mbox_handler(func_to_func,
815 							   recv_mbox,
816 							   src_func_idx,
817 							   buf_out, &out_size);
818 	}
819 
820 	response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
821 				    src_func_idx);
822 	kfree(recv_mbox->buf_out);
823 	kfree(recv_mbox->mbox);
824 	kfree(recv_mbox);
825 }
826 
827 static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
828 				   enum mbox_event_state event_flag)
829 {
830 	spin_lock(&func_to_func->mbox_lock);
831 	func_to_func->event_flag = event_flag;
832 	spin_unlock(&func_to_func->mbox_lock);
833 }
834 
835 static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
836 				  struct hinic_recv_mbox *mbox_for_resp,
837 				  enum hinic_mod_type mod, u16 cmd,
838 				  void *buf_out, u16 *out_size)
839 {
840 	int err;
841 
842 	if (mbox_for_resp->msg_info.status) {
843 		err = mbox_for_resp->msg_info.status;
844 		if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
845 			dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
846 				mbox_for_resp->msg_info.status);
847 		return err;
848 	}
849 
850 	if (buf_out && out_size) {
851 		if (*out_size < mbox_for_resp->mbox_len) {
852 			dev_err(&func_to_func->hwif->pdev->dev,
853 				"Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
854 				mbox_for_resp->mbox_len, mod, cmd, *out_size);
855 			return -EFAULT;
856 		}
857 
858 		if (mbox_for_resp->mbox_len)
859 			memcpy(buf_out, mbox_for_resp->mbox,
860 			       mbox_for_resp->mbox_len);
861 
862 		*out_size = mbox_for_resp->mbox_len;
863 	}
864 
865 	return 0;
866 }
867 
868 int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
869 		       enum hinic_mod_type mod, u16 cmd, u16 dst_func,
870 		       void *buf_in, u16 in_size, void *buf_out,
871 		       u16 *out_size, u32 timeout)
872 {
873 	struct hinic_recv_mbox *mbox_for_resp;
874 	struct mbox_msg_info msg_info = {0};
875 	ulong timeo;
876 	int err;
877 
878 	mbox_for_resp = &func_to_func->mbox_resp[dst_func];
879 
880 	down(&func_to_func->mbox_send_sem);
881 
882 	init_completion(&mbox_for_resp->recv_done);
883 
884 	msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
885 
886 	set_mbox_to_func_event(func_to_func, EVENT_START);
887 
888 	err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
889 				dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
890 				&msg_info);
891 	if (err) {
892 		dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
893 			msg_info.msg_id);
894 		set_mbox_to_func_event(func_to_func, EVENT_FAIL);
895 		goto err_send_mbox;
896 	}
897 
898 	timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
899 	if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
900 		set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
901 		dev_err(&func_to_func->hwif->pdev->dev,
902 			"Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
903 		err = -ETIMEDOUT;
904 		goto err_send_mbox;
905 	}
906 
907 	set_mbox_to_func_event(func_to_func, EVENT_END);
908 
909 	err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
910 				     buf_out, out_size);
911 
912 err_send_mbox:
913 	up(&func_to_func->mbox_send_sem);
914 
915 	return err;
916 }
917 
918 static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
919 				  void *buf_in, u16 in_size)
920 {
921 	if (in_size > HINIC_MBOX_DATA_SIZE) {
922 		dev_err(&func_to_func->hwif->pdev->dev,
923 			"Mbox msg len(%d) exceed limit(%d)\n",
924 			in_size, HINIC_MBOX_DATA_SIZE);
925 		return -EINVAL;
926 	}
927 
928 	return 0;
929 }
930 
931 int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
932 		     enum hinic_mod_type mod, u8 cmd, void *buf_in,
933 		     u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
934 {
935 	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
936 	int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
937 
938 	if (err)
939 		return err;
940 
941 	if (!HINIC_IS_VF(hwdev->hwif)) {
942 		dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
943 			HINIC_FUNC_TYPE(hwdev->hwif));
944 		return -EINVAL;
945 	}
946 
947 	return hinic_mbox_to_func(func_to_func, mod, cmd,
948 				  hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
949 				  in_size, buf_out, out_size, timeout);
950 }
951 
952 int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
953 		     enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
954 		     u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
955 {
956 	struct hinic_mbox_func_to_func *func_to_func;
957 	u16 dst_func_idx;
958 	int err;
959 
960 	if (!hwdev)
961 		return -EINVAL;
962 
963 	func_to_func = hwdev->func_to_func;
964 	err = mbox_func_params_valid(func_to_func, buf_in, in_size);
965 	if (err)
966 		return err;
967 
968 	if (HINIC_IS_VF(hwdev->hwif)) {
969 		dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
970 			HINIC_FUNC_TYPE(hwdev->hwif));
971 		return -EINVAL;
972 	}
973 
974 	if (!vf_id) {
975 		dev_err(&hwdev->hwif->pdev->dev,
976 			"VF id(%d) error!\n", vf_id);
977 		return -EINVAL;
978 	}
979 
980 	/* vf_offset_to_pf + vf_id is the vf's global function id of vf in
981 	 * this pf
982 	 */
983 	dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
984 
985 	return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
986 				  in_size, buf_out, out_size, timeout);
987 }
988 
989 static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
990 {
991 	int err;
992 
993 	mbox_info->seq_id = SEQ_ID_MAX_VAL;
994 
995 	mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
996 	if (!mbox_info->mbox)
997 		return -ENOMEM;
998 
999 	mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
1000 	if (!mbox_info->buf_out) {
1001 		err = -ENOMEM;
1002 		goto err_alloc_buf_out;
1003 	}
1004 
1005 	atomic_set(&mbox_info->msg_cnt, 0);
1006 
1007 	return 0;
1008 
1009 err_alloc_buf_out:
1010 	kfree(mbox_info->mbox);
1011 
1012 	return err;
1013 }
1014 
1015 static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
1016 {
1017 	kfree(mbox_info->buf_out);
1018 	kfree(mbox_info->mbox);
1019 }
1020 
1021 static int alloc_mbox_info(struct hinic_hwdev *hwdev,
1022 			   struct hinic_recv_mbox *mbox_info)
1023 {
1024 	u16 func_idx, i;
1025 	int err;
1026 
1027 	for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
1028 		err = init_mbox_info(&mbox_info[func_idx]);
1029 		if (err) {
1030 			dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
1031 				func_idx);
1032 			goto err_init_mbox_info;
1033 		}
1034 	}
1035 
1036 	return 0;
1037 
1038 err_init_mbox_info:
1039 	for (i = 0; i < func_idx; i++)
1040 		clean_mbox_info(&mbox_info[i]);
1041 
1042 	return err;
1043 }
1044 
1045 static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
1046 {
1047 	u16 func_idx;
1048 
1049 	for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
1050 		clean_mbox_info(&mbox_info[func_idx]);
1051 }
1052 
1053 static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
1054 {
1055 	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1056 
1057 	send_mbox->data = MBOX_AREA(func_to_func->hwif);
1058 }
1059 
1060 static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1061 {
1062 	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1063 	struct hinic_hwdev *hwdev = func_to_func->hwdev;
1064 	u32 addr_h, addr_l;
1065 
1066 	send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
1067 						 MBOX_WB_STATUS_LEN,
1068 						 &send_mbox->wb_paddr,
1069 						 GFP_KERNEL);
1070 	if (!send_mbox->wb_vaddr)
1071 		return -ENOMEM;
1072 
1073 	send_mbox->wb_status = send_mbox->wb_vaddr;
1074 
1075 	addr_h = upper_32_bits(send_mbox->wb_paddr);
1076 	addr_l = lower_32_bits(send_mbox->wb_paddr);
1077 
1078 	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1079 			     addr_h);
1080 	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1081 			     addr_l);
1082 
1083 	return 0;
1084 }
1085 
1086 static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1087 {
1088 	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1089 	struct hinic_hwdev *hwdev = func_to_func->hwdev;
1090 
1091 	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1092 			     0);
1093 	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1094 			     0);
1095 
1096 	dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
1097 			  send_mbox->wb_vaddr,
1098 			  send_mbox->wb_paddr);
1099 }
1100 
1101 static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
1102 				u16 in_size, void *buf_out, u16 *out_size)
1103 {
1104 	struct hinic_hwdev *hwdev = handle;
1105 	struct hinic_pfhwdev *pfhwdev;
1106 	int err = 0;
1107 
1108 	pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
1109 
1110 	if (cmd == HINIC_COMM_CMD_START_FLR) {
1111 		*out_size = 0;
1112 	} else {
1113 		err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
1114 					cmd, buf_in, in_size, buf_out, out_size,
1115 					HINIC_MGMT_MSG_SYNC);
1116 		if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
1117 			dev_err(&hwdev->hwif->pdev->dev,
1118 				"PF mbox common callback handler err: %d\n",
1119 				err);
1120 	}
1121 
1122 	return err;
1123 }
1124 
1125 int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
1126 {
1127 	struct hinic_mbox_func_to_func *func_to_func;
1128 	struct hinic_pfhwdev *pfhwdev;
1129 	int err;
1130 
1131 	pfhwdev =  container_of(hwdev, struct hinic_pfhwdev, hwdev);
1132 	func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
1133 	if (!func_to_func)
1134 		return -ENOMEM;
1135 
1136 	hwdev->func_to_func = func_to_func;
1137 	func_to_func->hwdev = hwdev;
1138 	func_to_func->hwif = hwdev->hwif;
1139 	sema_init(&func_to_func->mbox_send_sem, 1);
1140 	sema_init(&func_to_func->msg_send_sem, 1);
1141 	spin_lock_init(&func_to_func->mbox_lock);
1142 	func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
1143 	if (!func_to_func->workq) {
1144 		dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
1145 		err = -ENOMEM;
1146 		goto err_create_mbox_workq;
1147 	}
1148 
1149 	err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
1150 	if (err) {
1151 		dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
1152 		goto err_alloc_mbox_for_send;
1153 	}
1154 
1155 	err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
1156 	if (err) {
1157 		dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
1158 		goto err_alloc_mbox_for_resp;
1159 	}
1160 
1161 	err = alloc_mbox_wb_status(func_to_func);
1162 	if (err) {
1163 		dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
1164 		goto err_alloc_wb_status;
1165 	}
1166 
1167 	prepare_send_mbox(func_to_func);
1168 
1169 	hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
1170 				 &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
1171 	hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
1172 				 &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
1173 
1174 	if (!HINIC_IS_VF(hwdev->hwif))
1175 		hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
1176 					  comm_pf_mbox_handler);
1177 
1178 	return 0;
1179 
1180 err_alloc_wb_status:
1181 	free_mbox_info(func_to_func->mbox_resp);
1182 
1183 err_alloc_mbox_for_resp:
1184 	free_mbox_info(func_to_func->mbox_send);
1185 
1186 err_alloc_mbox_for_send:
1187 	destroy_workqueue(func_to_func->workq);
1188 
1189 err_create_mbox_workq:
1190 	kfree(func_to_func);
1191 
1192 	return err;
1193 }
1194 
1195 void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
1196 {
1197 	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
1198 
1199 	hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
1200 	hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
1201 
1202 	hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
1203 	/* destroy workqueue before free related mbox resources in case of
1204 	 * illegal resource access
1205 	 */
1206 	destroy_workqueue(func_to_func->workq);
1207 
1208 	free_mbox_wb_status(func_to_func);
1209 	free_mbox_info(func_to_func->mbox_resp);
1210 	free_mbox_info(func_to_func->mbox_send);
1211 
1212 	kfree(func_to_func);
1213 }
1214