xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_fw.c (revision c8faf11cd192214e231626c3ee973a35d8fc33f2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11 
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14 
15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 				int desc_idx, u64 desc)
17 {
18 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19 
20 	fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
21 	fw_wrfl(fbd);
22 	fw_wr32(fbd, desc_offset, lower_32_bits(desc));
23 }
24 
25 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
26 {
27 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
28 	u64 desc;
29 
30 	desc = fw_rd32(fbd, desc_offset);
31 	desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
32 
33 	return desc;
34 }
35 
36 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
37 {
38 	int desc_idx;
39 
40 	/* Initialize first descriptor to all 0s. Doing this gives us a
41 	 * solid stop for the firmware to hit when it is done looping
42 	 * through the ring.
43 	 */
44 	__fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
45 
46 	fw_wrfl(fbd);
47 
48 	/* We then fill the rest of the ring starting at the end and moving
49 	 * back toward descriptor 0 with skip descriptors that have no
50 	 * length nor address, and tell the firmware that they can skip
51 	 * them and just move past them to the one we initialized to 0.
52 	 */
53 	for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
54 		__fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
55 				    FBNIC_IPC_MBX_DESC_FW_CMPL |
56 				    FBNIC_IPC_MBX_DESC_HOST_CMPL);
57 		fw_wrfl(fbd);
58 	}
59 }
60 
61 void fbnic_mbx_init(struct fbnic_dev *fbd)
62 {
63 	int i;
64 
65 	/* Initialize lock to protect Tx ring */
66 	spin_lock_init(&fbd->fw_tx_lock);
67 
68 	/* Reinitialize mailbox memory */
69 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
70 		memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
71 
72 	/* Do not auto-clear the FW mailbox interrupt, let SW clear it */
73 	wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
74 
75 	/* Clear any stale causes in vector 0 as that is used for doorbell */
76 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
77 
78 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
79 		fbnic_mbx_init_desc_ring(fbd, i);
80 }
81 
82 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
83 			     struct fbnic_tlv_msg *msg, u16 length, u8 eom)
84 {
85 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
86 	u8 tail = mbx->tail;
87 	dma_addr_t addr;
88 	int direction;
89 
90 	if (!mbx->ready || !fbnic_fw_present(fbd))
91 		return -ENODEV;
92 
93 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
94 							DMA_TO_DEVICE;
95 
96 	if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
97 		return -EBUSY;
98 
99 	addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
100 	if (dma_mapping_error(fbd->dev, addr)) {
101 		free_page((unsigned long)msg);
102 
103 		return -ENOSPC;
104 	}
105 
106 	mbx->buf_info[tail].msg = msg;
107 	mbx->buf_info[tail].addr = addr;
108 
109 	mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
110 
111 	fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
112 
113 	__fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
114 			    FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
115 			    (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
116 			    (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
117 			    FBNIC_IPC_MBX_DESC_HOST_CMPL);
118 
119 	return 0;
120 }
121 
122 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
123 					 int desc_idx)
124 {
125 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
126 	int direction;
127 
128 	if (!mbx->buf_info[desc_idx].msg)
129 		return;
130 
131 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
132 							DMA_TO_DEVICE;
133 	dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
134 			 PAGE_SIZE, direction);
135 
136 	free_page((unsigned long)mbx->buf_info[desc_idx].msg);
137 	mbx->buf_info[desc_idx].msg = NULL;
138 }
139 
140 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
141 {
142 	int i;
143 
144 	fbnic_mbx_init_desc_ring(fbd, mbx_idx);
145 
146 	for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
147 		fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
148 }
149 
150 void fbnic_mbx_clean(struct fbnic_dev *fbd)
151 {
152 	int i;
153 
154 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
155 		fbnic_mbx_clean_desc_ring(fbd, i);
156 }
157 
158 #define FBNIC_MBX_MAX_PAGE_SIZE	FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
159 #define FBNIC_RX_PAGE_SIZE	min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
160 
161 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
162 {
163 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
164 	u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
165 	int err = 0;
166 
167 	/* Do nothing if mailbox is not ready, or we already have pages on
168 	 * the ring that can be used by the firmware
169 	 */
170 	if (!rx_mbx->ready)
171 		return -ENODEV;
172 
173 	/* Fill all but 1 unused descriptors in the Rx queue. */
174 	count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
175 	while (!err && count--) {
176 		struct fbnic_tlv_msg *msg;
177 
178 		msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
179 							      __GFP_NOWARN);
180 		if (!msg) {
181 			err = -ENOMEM;
182 			break;
183 		}
184 
185 		err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
186 					FBNIC_RX_PAGE_SIZE, 0);
187 		if (err)
188 			free_page((unsigned long)msg);
189 	}
190 
191 	return err;
192 }
193 
194 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
195 				 struct fbnic_tlv_msg *msg)
196 {
197 	unsigned long flags;
198 	int err;
199 
200 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
201 
202 	err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
203 				le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
204 
205 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
206 
207 	return err;
208 }
209 
210 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
211 {
212 	struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
213 	u8 head = tx_mbx->head;
214 	u64 desc;
215 
216 	while (head != tx_mbx->tail) {
217 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
218 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
219 			break;
220 
221 		fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
222 
223 		head++;
224 		head %= FBNIC_IPC_MBX_DESC_LEN;
225 	}
226 
227 	/* Record head for next interrupt */
228 	tx_mbx->head = head;
229 }
230 
231 /**
232  * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
233  * @fbd: FBNIC device structure
234  * @msg_type: ENUM value indicating message type to send
235  *
236  * Return:
237  *   One the following values:
238  *     -EOPNOTSUPP: Is not ASIC so mailbox is not supported
239  *     -ENODEV: Device I/O error
240  *     -ENOMEM: Failed to allocate message
241  *     -EBUSY: No space in mailbox
242  *     -ENOSPC: DMA mapping failed
243  *
244  * This function sends a single TLV header indicating the host wants to take
245  * some action. However there are no other side effects which means that any
246  * response will need to be caught via a completion if this action is
247  * expected to kick off a resultant action.
248  */
249 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
250 {
251 	struct fbnic_tlv_msg *msg;
252 	int err = 0;
253 
254 	if (!fbnic_fw_present(fbd))
255 		return -ENODEV;
256 
257 	msg = fbnic_tlv_msg_alloc(msg_type);
258 	if (!msg)
259 		return -ENOMEM;
260 
261 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
262 	if (err)
263 		free_page((unsigned long)msg);
264 
265 	return err;
266 }
267 
268 /**
269  * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
270  * @fbd: FBNIC device structure
271  *
272  * Return: NULL on failure to allocate, error pointer on error, or pointer
273  * to new TLV test message.
274  *
275  * Sends a single TLV header indicating the host wants the firmware to
276  * confirm the capabilities and version.
277  **/
278 static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
279 {
280 	int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
281 
282 	/* Return 0 if we are not calling this on ASIC */
283 	return (err == -EOPNOTSUPP) ? 0 : err;
284 }
285 
286 static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
287 {
288 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
289 
290 	/* This is a one time init, so just exit if it is completed */
291 	if (mbx->ready)
292 		return;
293 
294 	mbx->ready = true;
295 
296 	switch (mbx_idx) {
297 	case FBNIC_IPC_MBX_RX_IDX:
298 		/* Make sure we have a page for the FW to write to */
299 		fbnic_mbx_alloc_rx_msgs(fbd);
300 		break;
301 	case FBNIC_IPC_MBX_TX_IDX:
302 		/* Force version to 1 if we successfully requested an update
303 		 * from the firmware. This should be overwritten once we get
304 		 * the actual version from the firmware in the capabilities
305 		 * request message.
306 		 */
307 		if (!fbnic_fw_xmit_cap_msg(fbd) &&
308 		    !fbd->fw_cap.running.mgmt.version)
309 			fbd->fw_cap.running.mgmt.version = 1;
310 		break;
311 	}
312 }
313 
314 static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
315 {
316 	int i;
317 
318 	/* We only need to do this on the first interrupt following init.
319 	 * this primes the mailbox so that we will have cleared all the
320 	 * skip descriptors.
321 	 */
322 	if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
323 		return;
324 
325 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
326 
327 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
328 		fbnic_mbx_postinit_desc_ring(fbd, i);
329 }
330 
331 /**
332  * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
333  * to FW mailbox
334  *
335  * @fbd: FBNIC device structure
336  * @take_ownership: take/release the ownership
337  *
338  * Return: zero on success, negative value on failure
339  *
340  * Notifies the firmware that the driver either takes ownership of the NIC
341  * (when @take_ownership is true) or releases it.
342  */
343 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
344 {
345 	unsigned long req_time = jiffies;
346 	struct fbnic_tlv_msg *msg;
347 	int err = 0;
348 
349 	if (!fbnic_fw_present(fbd))
350 		return -ENODEV;
351 
352 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
353 	if (!msg)
354 		return -ENOMEM;
355 
356 	if (take_ownership) {
357 		err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
358 		if (err)
359 			goto free_message;
360 	}
361 
362 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
363 	if (err)
364 		goto free_message;
365 
366 	/* Initialize heartbeat, set last response to 1 second in the past
367 	 * so that we will trigger a timeout if the firmware doesn't respond
368 	 */
369 	fbd->last_heartbeat_response = req_time - HZ;
370 
371 	fbd->last_heartbeat_request = req_time;
372 
373 	/* Set heartbeat detection based on if we are taking ownership */
374 	fbd->fw_heartbeat_enabled = take_ownership;
375 
376 	return err;
377 
378 free_message:
379 	free_page((unsigned long)msg);
380 	return err;
381 }
382 
383 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
384 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
385 	FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
386 	FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
387 	FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
388 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
389 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
390 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
391 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
392 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
393 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
394 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
395 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
396 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
397 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
398 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
399 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
400 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
401 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
402 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
403 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
404 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
405 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
406 	FBNIC_TLV_ATTR_LAST
407 };
408 
409 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
410 				    struct fbnic_tlv_msg *attr, int len)
411 {
412 	int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
413 	struct fbnic_tlv_msg *mac_results[8];
414 	int err, i = 0;
415 
416 	/* Make sure we have enough room to process all the MAC addresses */
417 	if (len > 8)
418 		return -ENOSPC;
419 
420 	/* Parse the array */
421 	err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
422 					 fbnic_fw_cap_resp_index,
423 					 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
424 	if (err)
425 		return err;
426 
427 	/* Copy results into MAC addr array */
428 	for (i = 0; i < len && mac_results[i]; i++)
429 		fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
430 
431 	/* Zero remaining unused addresses */
432 	while (i < len)
433 		eth_zero_addr(bmc_mac_addr[i++]);
434 
435 	return 0;
436 }
437 
438 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
439 {
440 	u32 active_slot = 0, all_multi = 0;
441 	struct fbnic_dev *fbd = opaque;
442 	u32 speed = 0, fec = 0;
443 	size_t commit_size = 0;
444 	bool bmc_present;
445 	int err;
446 
447 	get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
448 			    fbd->fw_cap.running.mgmt.version);
449 
450 	if (!fbd->fw_cap.running.mgmt.version)
451 		return -EINVAL;
452 
453 	if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
454 		char running_ver[FBNIC_FW_VER_MAX_SIZE];
455 
456 		fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
457 				    running_ver);
458 		dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
459 			running_ver,
460 			MIN_FW_MAJOR_VERSION,
461 			MIN_FW_MINOR_VERSION,
462 			MIN_FW_BUILD_VERSION);
463 		/* Disable TX mailbox to prevent card use until firmware is
464 		 * updated.
465 		 */
466 		fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
467 		return -EINVAL;
468 	}
469 
470 	get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
471 			  fbd->fw_cap.running.mgmt.commit,
472 			  FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
473 	if (!commit_size)
474 		dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
475 
476 	get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
477 			    fbd->fw_cap.stored.mgmt.version);
478 	get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
479 			  fbd->fw_cap.stored.mgmt.commit,
480 			  FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
481 
482 	get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
483 			    fbd->fw_cap.running.bootloader.version);
484 	get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
485 			  fbd->fw_cap.running.bootloader.commit,
486 			  FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
487 
488 	get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
489 			    fbd->fw_cap.stored.bootloader.version);
490 	get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
491 			  fbd->fw_cap.stored.bootloader.commit,
492 			  FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
493 
494 	get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
495 			    fbd->fw_cap.stored.undi.version);
496 	get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
497 			  fbd->fw_cap.stored.undi.commit,
498 			  FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
499 
500 	get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
501 	fbd->fw_cap.active_slot = active_slot;
502 
503 	get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
504 	get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
505 	fbd->fw_cap.link_speed = speed;
506 	fbd->fw_cap.link_fec = fec;
507 
508 	bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
509 	if (bmc_present) {
510 		struct fbnic_tlv_msg *attr;
511 
512 		attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
513 		if (!attr)
514 			return -EINVAL;
515 
516 		err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
517 					       attr, 4);
518 		if (err)
519 			return err;
520 
521 		get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
522 	} else {
523 		memset(fbd->fw_cap.bmc_mac_addr, 0,
524 		       sizeof(fbd->fw_cap.bmc_mac_addr));
525 	}
526 
527 	fbd->fw_cap.bmc_present = bmc_present;
528 
529 	if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
530 		fbd->fw_cap.all_multi = all_multi;
531 
532 	return 0;
533 }
534 
535 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
536 	FBNIC_TLV_ATTR_LAST
537 };
538 
539 static int fbnic_fw_parse_ownership_resp(void *opaque,
540 					 struct fbnic_tlv_msg **results)
541 {
542 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
543 
544 	/* Count the ownership response as a heartbeat reply */
545 	fbd->last_heartbeat_response = jiffies;
546 
547 	return 0;
548 }
549 
550 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
551 	FBNIC_TLV_ATTR_LAST
552 };
553 
554 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
555 					 struct fbnic_tlv_msg **results)
556 {
557 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
558 
559 	fbd->last_heartbeat_response = jiffies;
560 
561 	return 0;
562 }
563 
564 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
565 {
566 	unsigned long req_time = jiffies;
567 	struct fbnic_tlv_msg *msg;
568 	int err = 0;
569 
570 	if (!fbnic_fw_present(fbd))
571 		return -ENODEV;
572 
573 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
574 	if (!msg)
575 		return -ENOMEM;
576 
577 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
578 	if (err)
579 		goto free_message;
580 
581 	fbd->last_heartbeat_request = req_time;
582 
583 	return err;
584 
585 free_message:
586 	free_page((unsigned long)msg);
587 	return err;
588 }
589 
590 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
591 {
592 	unsigned long last_response = fbd->last_heartbeat_response;
593 	unsigned long last_request = fbd->last_heartbeat_request;
594 
595 	return !time_before(last_response, last_request);
596 }
597 
598 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
599 {
600 	int err = -ETIMEDOUT;
601 	int attempts = 50;
602 
603 	if (!fbnic_fw_present(fbd))
604 		return -ENODEV;
605 
606 	while (attempts--) {
607 		msleep(200);
608 		if (poll)
609 			fbnic_mbx_poll(fbd);
610 
611 		if (!fbnic_fw_heartbeat_current(fbd))
612 			continue;
613 
614 		/* Place new message on mailbox to elicit a response */
615 		err = fbnic_fw_xmit_heartbeat_message(fbd);
616 		if (err)
617 			dev_warn(fbd->dev,
618 				 "Failed to send heartbeat message: %d\n",
619 				 err);
620 		break;
621 	}
622 
623 	return err;
624 }
625 
626 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
627 {
628 	unsigned long last_request = fbd->last_heartbeat_request;
629 	int err;
630 
631 	/* Do not check heartbeat or send another request until current
632 	 * period has expired. Otherwise we might start spamming requests.
633 	 */
634 	if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
635 		return;
636 
637 	/* We already reported no mailbox. Wait for it to come back */
638 	if (!fbd->fw_heartbeat_enabled)
639 		return;
640 
641 	/* Was the last heartbeat response long time ago? */
642 	if (!fbnic_fw_heartbeat_current(fbd)) {
643 		dev_warn(fbd->dev,
644 			 "Firmware did not respond to heartbeat message\n");
645 		fbd->fw_heartbeat_enabled = false;
646 	}
647 
648 	/* Place new message on mailbox to elicit a response */
649 	err = fbnic_fw_xmit_heartbeat_message(fbd);
650 	if (err)
651 		dev_warn(fbd->dev, "Failed to send heartbeat message\n");
652 }
653 
654 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
655 	FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
656 			 fbnic_fw_parse_cap_resp),
657 	FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
658 			 fbnic_fw_parse_ownership_resp),
659 	FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
660 			 fbnic_fw_parse_heartbeat_resp),
661 	FBNIC_TLV_MSG_ERROR
662 };
663 
664 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
665 {
666 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
667 	u8 head = rx_mbx->head;
668 	u64 desc, length;
669 
670 	while (head != rx_mbx->tail) {
671 		struct fbnic_tlv_msg *msg;
672 		int err;
673 
674 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
675 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
676 			break;
677 
678 		dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
679 				 PAGE_SIZE, DMA_FROM_DEVICE);
680 
681 		msg = rx_mbx->buf_info[head].msg;
682 
683 		length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
684 
685 		/* Ignore NULL mailbox descriptors */
686 		if (!length)
687 			goto next_page;
688 
689 		/* Report descriptors with length greater than page size */
690 		if (length > PAGE_SIZE) {
691 			dev_warn(fbd->dev,
692 				 "Invalid mailbox descriptor length: %lld\n",
693 				 length);
694 			goto next_page;
695 		}
696 
697 		if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
698 			dev_warn(fbd->dev, "Mailbox message length mismatch\n");
699 
700 		/* If parsing fails dump contents of message to dmesg */
701 		err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
702 		if (err) {
703 			dev_warn(fbd->dev, "Unable to process message: %d\n",
704 				 err);
705 			print_hex_dump(KERN_WARNING, "fbnic:",
706 				       DUMP_PREFIX_OFFSET, 16, 2,
707 				       msg, length, true);
708 		}
709 
710 		dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
711 next_page:
712 
713 		free_page((unsigned long)rx_mbx->buf_info[head].msg);
714 		rx_mbx->buf_info[head].msg = NULL;
715 
716 		head++;
717 		head %= FBNIC_IPC_MBX_DESC_LEN;
718 	}
719 
720 	/* Record head for next interrupt */
721 	rx_mbx->head = head;
722 
723 	/* Make sure we have at least one page for the FW to write to */
724 	fbnic_mbx_alloc_rx_msgs(fbd);
725 }
726 
727 void fbnic_mbx_poll(struct fbnic_dev *fbd)
728 {
729 	fbnic_mbx_postinit(fbd);
730 
731 	fbnic_mbx_process_tx_msgs(fbd);
732 	fbnic_mbx_process_rx_msgs(fbd);
733 }
734 
735 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
736 {
737 	struct fbnic_fw_mbx *tx_mbx;
738 	int attempts = 50;
739 
740 	/* Immediate fail if BAR4 isn't there */
741 	if (!fbnic_fw_present(fbd))
742 		return -ENODEV;
743 
744 	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
745 	while (!tx_mbx->ready && --attempts) {
746 		/* Force the firmware to trigger an interrupt response to
747 		 * avoid the mailbox getting stuck closed if the interrupt
748 		 * is reset.
749 		 */
750 		fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
751 
752 		msleep(200);
753 
754 		fbnic_mbx_poll(fbd);
755 	}
756 
757 	return attempts ? 0 : -ETIMEDOUT;
758 }
759 
760 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
761 {
762 	struct fbnic_fw_mbx *tx_mbx;
763 	int attempts = 50;
764 	u8 count = 0;
765 
766 	/* Nothing to do if there is no mailbox */
767 	if (!fbnic_fw_present(fbd))
768 		return;
769 
770 	/* Record current Rx stats */
771 	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
772 
773 	/* Nothing to do if mailbox never got to ready */
774 	if (!tx_mbx->ready)
775 		return;
776 
777 	/* Give firmware time to process packet,
778 	 * we will wait up to 10 seconds which is 50 waits of 200ms.
779 	 */
780 	do {
781 		u8 head = tx_mbx->head;
782 
783 		if (head == tx_mbx->tail)
784 			break;
785 
786 		msleep(200);
787 		fbnic_mbx_process_tx_msgs(fbd);
788 
789 		count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
790 	} while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
791 }
792