xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_fw.c (revision e0c0ab04f6785abaa71b9b8dc252cb1a2072c225)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11 
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14 
15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 				int desc_idx, u64 desc)
17 {
18 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19 
20 	/* Write the upper 32b and then the lower 32b. Doing this the
21 	 * FW can then read lower, upper, lower to verify that the state
22 	 * of the descriptor wasn't changed mid-transaction.
23 	 */
24 	fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
25 	fw_wrfl(fbd);
26 	fw_wr32(fbd, desc_offset, lower_32_bits(desc));
27 }
28 
29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
30 					int desc_idx, u32 desc)
31 {
32 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
33 
34 	/* For initialization we write the lower 32b of the descriptor first.
35 	 * This way we can set the state to mark it invalid before we clear the
36 	 * upper 32b.
37 	 */
38 	fw_wr32(fbd, desc_offset, desc);
39 	fw_wrfl(fbd);
40 	fw_wr32(fbd, desc_offset + 1, 0);
41 }
42 
43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
44 {
45 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
46 	u64 desc;
47 
48 	desc = fw_rd32(fbd, desc_offset);
49 	desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
50 
51 	return desc;
52 }
53 
54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
55 {
56 	int desc_idx;
57 
58 	/* Disable DMA transactions from the device,
59 	 * and flush any transactions triggered during cleaning
60 	 */
61 	switch (mbx_idx) {
62 	case FBNIC_IPC_MBX_RX_IDX:
63 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
64 		     FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
65 		break;
66 	case FBNIC_IPC_MBX_TX_IDX:
67 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
68 		     FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
69 		break;
70 	}
71 
72 	wrfl(fbd);
73 
74 	/* Initialize first descriptor to all 0s. Doing this gives us a
75 	 * solid stop for the firmware to hit when it is done looping
76 	 * through the ring.
77 	 */
78 	__fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
79 
80 	/* We then fill the rest of the ring starting at the end and moving
81 	 * back toward descriptor 0 with skip descriptors that have no
82 	 * length nor address, and tell the firmware that they can skip
83 	 * them and just move past them to the one we initialized to 0.
84 	 */
85 	for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
86 		__fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
87 					    FBNIC_IPC_MBX_DESC_FW_CMPL |
88 					    FBNIC_IPC_MBX_DESC_HOST_CMPL);
89 }
90 
91 void fbnic_mbx_init(struct fbnic_dev *fbd)
92 {
93 	int i;
94 
95 	/* Initialize lock to protect Tx ring */
96 	spin_lock_init(&fbd->fw_tx_lock);
97 
98 	/* Reinitialize mailbox memory */
99 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
100 		memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
101 
102 	/* Do not auto-clear the FW mailbox interrupt, let SW clear it */
103 	wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
104 
105 	/* Clear any stale causes in vector 0 as that is used for doorbell */
106 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
107 
108 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
109 		fbnic_mbx_reset_desc_ring(fbd, i);
110 }
111 
112 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
113 			     struct fbnic_tlv_msg *msg, u16 length, u8 eom)
114 {
115 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
116 	u8 tail = mbx->tail;
117 	dma_addr_t addr;
118 	int direction;
119 
120 	if (!mbx->ready || !fbnic_fw_present(fbd))
121 		return -ENODEV;
122 
123 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
124 							DMA_TO_DEVICE;
125 
126 	if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
127 		return -EBUSY;
128 
129 	addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
130 	if (dma_mapping_error(fbd->dev, addr)) {
131 		free_page((unsigned long)msg);
132 
133 		return -ENOSPC;
134 	}
135 
136 	mbx->buf_info[tail].msg = msg;
137 	mbx->buf_info[tail].addr = addr;
138 
139 	mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
140 
141 	fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
142 
143 	__fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
144 			    FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
145 			    (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
146 			    (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
147 			    FBNIC_IPC_MBX_DESC_HOST_CMPL);
148 
149 	return 0;
150 }
151 
152 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
153 					 int desc_idx)
154 {
155 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
156 	int direction;
157 
158 	if (!mbx->buf_info[desc_idx].msg)
159 		return;
160 
161 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
162 							DMA_TO_DEVICE;
163 	dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
164 			 PAGE_SIZE, direction);
165 
166 	free_page((unsigned long)mbx->buf_info[desc_idx].msg);
167 	mbx->buf_info[desc_idx].msg = NULL;
168 }
169 
170 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
171 {
172 	int i;
173 
174 	fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
175 
176 	for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
177 		fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
178 }
179 
180 void fbnic_mbx_clean(struct fbnic_dev *fbd)
181 {
182 	int i;
183 
184 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
185 		fbnic_mbx_clean_desc_ring(fbd, i);
186 }
187 
188 #define FBNIC_MBX_MAX_PAGE_SIZE	FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
189 #define FBNIC_RX_PAGE_SIZE	min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
190 
191 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
192 {
193 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
194 	u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
195 	int err = 0;
196 
197 	/* Do nothing if mailbox is not ready, or we already have pages on
198 	 * the ring that can be used by the firmware
199 	 */
200 	if (!rx_mbx->ready)
201 		return -ENODEV;
202 
203 	/* Fill all but 1 unused descriptors in the Rx queue. */
204 	count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
205 	while (!err && count--) {
206 		struct fbnic_tlv_msg *msg;
207 
208 		msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
209 							      __GFP_NOWARN);
210 		if (!msg) {
211 			err = -ENOMEM;
212 			break;
213 		}
214 
215 		err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
216 					FBNIC_RX_PAGE_SIZE, 0);
217 		if (err)
218 			free_page((unsigned long)msg);
219 	}
220 
221 	return err;
222 }
223 
224 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
225 				 struct fbnic_tlv_msg *msg)
226 {
227 	unsigned long flags;
228 	int err;
229 
230 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
231 
232 	err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
233 				le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
234 
235 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
236 
237 	return err;
238 }
239 
240 static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
241 				   struct fbnic_fw_completion *cmpl_data)
242 {
243 	struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
244 	int free = -EXFULL;
245 	int i;
246 
247 	if (!tx_mbx->ready)
248 		return -ENODEV;
249 
250 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
251 		if (!fbd->cmpl_data[i])
252 			free = i;
253 		else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
254 			return -EEXIST;
255 	}
256 
257 	if (free == -EXFULL)
258 		return -EXFULL;
259 
260 	fbd->cmpl_data[free] = cmpl_data;
261 
262 	return 0;
263 }
264 
265 static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
266 				      struct fbnic_fw_completion *cmpl_data)
267 {
268 	int i;
269 
270 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
271 		if (fbd->cmpl_data[i] == cmpl_data) {
272 			fbd->cmpl_data[i] = NULL;
273 			break;
274 		}
275 	}
276 }
277 
278 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
279 {
280 	struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
281 	u8 head = tx_mbx->head;
282 	u64 desc;
283 
284 	while (head != tx_mbx->tail) {
285 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
286 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
287 			break;
288 
289 		fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
290 
291 		head++;
292 		head %= FBNIC_IPC_MBX_DESC_LEN;
293 	}
294 
295 	/* Record head for next interrupt */
296 	tx_mbx->head = head;
297 }
298 
299 int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
300 		       struct fbnic_fw_completion *cmpl_data)
301 {
302 	unsigned long flags;
303 	int err;
304 
305 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
306 	err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
307 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
308 
309 	return err;
310 }
311 
312 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
313 				    struct fbnic_tlv_msg *msg,
314 				    struct fbnic_fw_completion *cmpl_data)
315 {
316 	unsigned long flags;
317 	int err;
318 
319 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
320 	if (cmpl_data) {
321 		err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
322 		if (err)
323 			goto unlock_mbx;
324 	}
325 
326 	err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
327 				le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
328 
329 	/* If we successfully reserved a completion and msg failed
330 	 * then clear completion data for next caller
331 	 */
332 	if (err && cmpl_data)
333 		fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
334 
335 unlock_mbx:
336 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
337 
338 	return err;
339 }
340 
341 static void fbnic_fw_release_cmpl_data(struct kref *kref)
342 {
343 	struct fbnic_fw_completion *cmpl_data;
344 
345 	cmpl_data = container_of(kref, struct fbnic_fw_completion,
346 				 ref_count);
347 	kfree(cmpl_data);
348 }
349 
350 static struct fbnic_fw_completion *
351 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
352 {
353 	struct fbnic_fw_completion *cmpl_data = NULL;
354 	unsigned long flags;
355 	int i;
356 
357 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
358 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
359 		if (fbd->cmpl_data[i] &&
360 		    fbd->cmpl_data[i]->msg_type == msg_type) {
361 			cmpl_data = fbd->cmpl_data[i];
362 			kref_get(&cmpl_data->ref_count);
363 			break;
364 		}
365 	}
366 
367 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
368 
369 	return cmpl_data;
370 }
371 
372 /**
373  * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
374  * @fbd: FBNIC device structure
375  * @msg_type: ENUM value indicating message type to send
376  *
377  * Return:
378  *   One the following values:
379  *     -EOPNOTSUPP: Is not ASIC so mailbox is not supported
380  *     -ENODEV: Device I/O error
381  *     -ENOMEM: Failed to allocate message
382  *     -EBUSY: No space in mailbox
383  *     -ENOSPC: DMA mapping failed
384  *
385  * This function sends a single TLV header indicating the host wants to take
386  * some action. However there are no other side effects which means that any
387  * response will need to be caught via a completion if this action is
388  * expected to kick off a resultant action.
389  */
390 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
391 {
392 	struct fbnic_tlv_msg *msg;
393 	int err = 0;
394 
395 	if (!fbnic_fw_present(fbd))
396 		return -ENODEV;
397 
398 	msg = fbnic_tlv_msg_alloc(msg_type);
399 	if (!msg)
400 		return -ENOMEM;
401 
402 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
403 	if (err)
404 		free_page((unsigned long)msg);
405 
406 	return err;
407 }
408 
409 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
410 {
411 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
412 
413 	mbx->ready = true;
414 
415 	switch (mbx_idx) {
416 	case FBNIC_IPC_MBX_RX_IDX:
417 		/* Enable DMA writes from the device */
418 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
419 		     FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
420 
421 		/* Make sure we have a page for the FW to write to */
422 		fbnic_mbx_alloc_rx_msgs(fbd);
423 		break;
424 	case FBNIC_IPC_MBX_TX_IDX:
425 		/* Enable DMA reads from the device */
426 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
427 		     FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
428 		break;
429 	}
430 }
431 
432 static bool fbnic_mbx_event(struct fbnic_dev *fbd)
433 {
434 	/* We only need to do this on the first interrupt following reset.
435 	 * this primes the mailbox so that we will have cleared all the
436 	 * skip descriptors.
437 	 */
438 	if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
439 		return false;
440 
441 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
442 
443 	return true;
444 }
445 
446 /**
447  * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
448  * to FW mailbox
449  *
450  * @fbd: FBNIC device structure
451  * @take_ownership: take/release the ownership
452  *
453  * Return: zero on success, negative value on failure
454  *
455  * Notifies the firmware that the driver either takes ownership of the NIC
456  * (when @take_ownership is true) or releases it.
457  */
458 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
459 {
460 	unsigned long req_time = jiffies;
461 	struct fbnic_tlv_msg *msg;
462 	int err = 0;
463 
464 	if (!fbnic_fw_present(fbd))
465 		return -ENODEV;
466 
467 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
468 	if (!msg)
469 		return -ENOMEM;
470 
471 	if (take_ownership) {
472 		err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
473 		if (err)
474 			goto free_message;
475 	}
476 
477 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
478 	if (err)
479 		goto free_message;
480 
481 	/* Initialize heartbeat, set last response to 1 second in the past
482 	 * so that we will trigger a timeout if the firmware doesn't respond
483 	 */
484 	fbd->last_heartbeat_response = req_time - HZ;
485 
486 	fbd->last_heartbeat_request = req_time;
487 
488 	/* Set heartbeat detection based on if we are taking ownership */
489 	fbd->fw_heartbeat_enabled = take_ownership;
490 
491 	return err;
492 
493 free_message:
494 	free_page((unsigned long)msg);
495 	return err;
496 }
497 
498 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
499 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
500 	FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
501 	FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
502 	FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
503 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
504 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
505 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
506 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
507 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
508 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
509 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
510 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
511 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
512 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
513 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
514 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
515 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
516 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
517 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
518 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
519 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
520 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
521 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
522 	FBNIC_TLV_ATTR_LAST
523 };
524 
525 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
526 				    struct fbnic_tlv_msg *attr, int len)
527 {
528 	int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
529 	struct fbnic_tlv_msg *mac_results[8];
530 	int err, i = 0;
531 
532 	/* Make sure we have enough room to process all the MAC addresses */
533 	if (len > 8)
534 		return -ENOSPC;
535 
536 	/* Parse the array */
537 	err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
538 					 fbnic_fw_cap_resp_index,
539 					 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
540 	if (err)
541 		return err;
542 
543 	/* Copy results into MAC addr array */
544 	for (i = 0; i < len && mac_results[i]; i++)
545 		fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
546 
547 	/* Zero remaining unused addresses */
548 	while (i < len)
549 		eth_zero_addr(bmc_mac_addr[i++]);
550 
551 	return 0;
552 }
553 
554 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
555 {
556 	u32 all_multi = 0, version = 0;
557 	struct fbnic_dev *fbd = opaque;
558 	bool bmc_present;
559 	int err;
560 
561 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
562 	fbd->fw_cap.running.mgmt.version = version;
563 	if (!fbd->fw_cap.running.mgmt.version)
564 		return -EINVAL;
565 
566 	if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
567 		char running_ver[FBNIC_FW_VER_MAX_SIZE];
568 
569 		fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
570 				    running_ver);
571 		dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
572 			running_ver,
573 			MIN_FW_MAJOR_VERSION,
574 			MIN_FW_MINOR_VERSION,
575 			MIN_FW_BUILD_VERSION);
576 		/* Disable TX mailbox to prevent card use until firmware is
577 		 * updated.
578 		 */
579 		fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
580 		return -EINVAL;
581 	}
582 
583 	if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
584 			fbd->fw_cap.running.mgmt.commit,
585 			FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
586 		dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
587 
588 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
589 	fbd->fw_cap.stored.mgmt.version = version;
590 	fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
591 		    fbd->fw_cap.stored.mgmt.commit,
592 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
593 
594 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
595 	fbd->fw_cap.running.bootloader.version = version;
596 	fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
597 		    fbd->fw_cap.running.bootloader.commit,
598 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
599 
600 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
601 	fbd->fw_cap.stored.bootloader.version = version;
602 	fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
603 		    fbd->fw_cap.stored.bootloader.commit,
604 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
605 
606 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
607 	fbd->fw_cap.stored.undi.version = version;
608 	fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
609 		    fbd->fw_cap.stored.undi.commit,
610 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
611 
612 	fbd->fw_cap.active_slot =
613 		fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
614 	fbd->fw_cap.link_speed =
615 		fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
616 	fbd->fw_cap.link_fec =
617 		fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
618 
619 	bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
620 	if (bmc_present) {
621 		struct fbnic_tlv_msg *attr;
622 
623 		attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
624 		if (!attr)
625 			return -EINVAL;
626 
627 		err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
628 					       attr, 4);
629 		if (err)
630 			return err;
631 
632 		all_multi =
633 			fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
634 	} else {
635 		memset(fbd->fw_cap.bmc_mac_addr, 0,
636 		       sizeof(fbd->fw_cap.bmc_mac_addr));
637 	}
638 
639 	fbd->fw_cap.bmc_present = bmc_present;
640 
641 	if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
642 		fbd->fw_cap.all_multi = all_multi;
643 
644 	fbd->fw_cap.anti_rollback_version =
645 		fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
646 
647 	return 0;
648 }
649 
650 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
651 	FBNIC_TLV_ATTR_LAST
652 };
653 
654 static int fbnic_fw_parse_ownership_resp(void *opaque,
655 					 struct fbnic_tlv_msg **results)
656 {
657 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
658 
659 	/* Count the ownership response as a heartbeat reply */
660 	fbd->last_heartbeat_response = jiffies;
661 
662 	return 0;
663 }
664 
665 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
666 	FBNIC_TLV_ATTR_LAST
667 };
668 
669 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
670 					 struct fbnic_tlv_msg **results)
671 {
672 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
673 
674 	fbd->last_heartbeat_response = jiffies;
675 
676 	return 0;
677 }
678 
679 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
680 {
681 	unsigned long req_time = jiffies;
682 	struct fbnic_tlv_msg *msg;
683 	int err = 0;
684 
685 	if (!fbnic_fw_present(fbd))
686 		return -ENODEV;
687 
688 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
689 	if (!msg)
690 		return -ENOMEM;
691 
692 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
693 	if (err)
694 		goto free_message;
695 
696 	fbd->last_heartbeat_request = req_time;
697 
698 	return err;
699 
700 free_message:
701 	free_page((unsigned long)msg);
702 	return err;
703 }
704 
705 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
706 {
707 	unsigned long last_response = fbd->last_heartbeat_response;
708 	unsigned long last_request = fbd->last_heartbeat_request;
709 
710 	return !time_before(last_response, last_request);
711 }
712 
713 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
714 {
715 	int err = -ETIMEDOUT;
716 	int attempts = 50;
717 
718 	if (!fbnic_fw_present(fbd))
719 		return -ENODEV;
720 
721 	while (attempts--) {
722 		msleep(200);
723 		if (poll)
724 			fbnic_mbx_poll(fbd);
725 
726 		if (!fbnic_fw_heartbeat_current(fbd))
727 			continue;
728 
729 		/* Place new message on mailbox to elicit a response */
730 		err = fbnic_fw_xmit_heartbeat_message(fbd);
731 		if (err)
732 			dev_warn(fbd->dev,
733 				 "Failed to send heartbeat message: %d\n",
734 				 err);
735 		break;
736 	}
737 
738 	return err;
739 }
740 
741 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
742 {
743 	unsigned long last_request = fbd->last_heartbeat_request;
744 	int err;
745 
746 	/* Do not check heartbeat or send another request until current
747 	 * period has expired. Otherwise we might start spamming requests.
748 	 */
749 	if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
750 		return;
751 
752 	/* We already reported no mailbox. Wait for it to come back */
753 	if (!fbd->fw_heartbeat_enabled)
754 		return;
755 
756 	/* Was the last heartbeat response long time ago? */
757 	if (!fbnic_fw_heartbeat_current(fbd)) {
758 		dev_warn(fbd->dev,
759 			 "Firmware did not respond to heartbeat message\n");
760 		fbd->fw_heartbeat_enabled = false;
761 	}
762 
763 	/* Place new message on mailbox to elicit a response */
764 	err = fbnic_fw_xmit_heartbeat_message(fbd);
765 	if (err)
766 		dev_warn(fbd->dev, "Failed to send heartbeat message\n");
767 }
768 
769 int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
770 				   struct fbnic_fw_completion *cmpl_data,
771 				   unsigned int id, unsigned int len)
772 {
773 	struct fbnic_tlv_msg *msg;
774 	int err;
775 
776 	if (!fbnic_fw_present(fbd))
777 		return -ENODEV;
778 
779 	if (!len)
780 		return -EINVAL;
781 
782 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
783 	if (!msg)
784 		return -ENOMEM;
785 
786 	err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
787 	if (err)
788 		goto free_message;
789 
790 	err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
791 				     len);
792 	if (err)
793 		goto free_message;
794 
795 	err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
796 	if (err)
797 		goto free_message;
798 
799 	return 0;
800 
801 free_message:
802 	free_page((unsigned long)msg);
803 	return err;
804 }
805 
806 static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
807 	FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
808 	FBNIC_TLV_ATTR_LAST
809 };
810 
811 static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
812 						struct fbnic_tlv_msg **results)
813 {
814 	struct fbnic_fw_completion *cmpl_data;
815 	struct fbnic_dev *fbd = opaque;
816 	u32 msg_type;
817 	s32 err;
818 
819 	/* Verify we have a completion pointer */
820 	msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
821 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
822 	if (!cmpl_data)
823 		return -ENOSPC;
824 
825 	/* Check for errors */
826 	err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
827 
828 	cmpl_data->result = err;
829 	complete(&cmpl_data->done);
830 	fbnic_fw_put_cmpl(cmpl_data);
831 
832 	return 0;
833 }
834 
835 int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
836 				 const u8 *data, u32 offset, u16 length,
837 				 int cancel_error)
838 {
839 	struct fbnic_tlv_msg *msg;
840 	int err;
841 
842 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
843 	if (!msg)
844 		return -ENOMEM;
845 
846 	/* Report error to FW to cancel upgrade */
847 	if (cancel_error) {
848 		err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
849 					     cancel_error);
850 		if (err)
851 			goto free_message;
852 	}
853 
854 	if (data) {
855 		err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
856 					     offset);
857 		if (err)
858 			goto free_message;
859 
860 		err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
861 					     length);
862 		if (err)
863 			goto free_message;
864 
865 		err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
866 					       data + offset, length);
867 		if (err)
868 			goto free_message;
869 	}
870 
871 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
872 	if (err)
873 		goto free_message;
874 
875 	return 0;
876 
877 free_message:
878 	free_page((unsigned long)msg);
879 	return err;
880 }
881 
882 static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
883 	FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
884 	FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
885 	FBNIC_TLV_ATTR_LAST
886 };
887 
888 static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
889 					     struct fbnic_tlv_msg **results)
890 {
891 	struct fbnic_fw_completion *cmpl_data;
892 	struct fbnic_dev *fbd = opaque;
893 	u32 msg_type;
894 	u32 offset;
895 	u32 length;
896 
897 	/* Verify we have a completion pointer */
898 	msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
899 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
900 	if (!cmpl_data)
901 		return -ENOSPC;
902 
903 	/* Pull length/offset pair and mark it as complete */
904 	offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
905 	length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
906 	cmpl_data->u.fw_update.offset = offset;
907 	cmpl_data->u.fw_update.length = length;
908 
909 	complete(&cmpl_data->done);
910 	fbnic_fw_put_cmpl(cmpl_data);
911 
912 	return 0;
913 }
914 
915 static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
916 	FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
917 	FBNIC_TLV_ATTR_LAST
918 };
919 
920 static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
921 						struct fbnic_tlv_msg **results)
922 {
923 	struct fbnic_fw_completion *cmpl_data;
924 	struct fbnic_dev *fbd = opaque;
925 	u32 msg_type;
926 	s32 err;
927 
928 	/* Verify we have a completion pointer */
929 	msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
930 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
931 	if (!cmpl_data)
932 		return -ENOSPC;
933 
934 	/* Check for errors */
935 	err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
936 
937 	/* Close out update by incrementing offset by length which should
938 	 * match the total size of the component. Set length to 0 since no
939 	 * new chunks will be requested.
940 	 */
941 	cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
942 	cmpl_data->u.fw_update.length = 0;
943 
944 	cmpl_data->result = err;
945 	complete(&cmpl_data->done);
946 	fbnic_fw_put_cmpl(cmpl_data);
947 
948 	return 0;
949 }
950 
951 /**
952  * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
953  * @fbd: FBNIC device structure
954  * @cmpl_data: Completion data structure to store sensor response
955  *
956  * Asks the firmware to provide an update with the latest sensor data.
957  * The response will contain temperature and voltage readings.
958  *
959  * Return: 0 on success, negative error value on failure
960  */
961 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
962 				 struct fbnic_fw_completion *cmpl_data)
963 {
964 	struct fbnic_tlv_msg *msg;
965 	int err;
966 
967 	if (!fbnic_fw_present(fbd))
968 		return -ENODEV;
969 
970 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
971 	if (!msg)
972 		return -ENOMEM;
973 
974 	err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
975 	if (err)
976 		goto free_message;
977 
978 	return 0;
979 
980 free_message:
981 	free_page((unsigned long)msg);
982 	return err;
983 }
984 
985 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
986 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
987 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
988 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
989 	FBNIC_TLV_ATTR_LAST
990 };
991 
992 static int fbnic_fw_parse_tsene_read_resp(void *opaque,
993 					  struct fbnic_tlv_msg **results)
994 {
995 	struct fbnic_fw_completion *cmpl_data;
996 	struct fbnic_dev *fbd = opaque;
997 	s32 err_resp;
998 	int err = 0;
999 
1000 	/* Verify we have a completion pointer to provide with data */
1001 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
1002 					      FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
1003 	if (!cmpl_data)
1004 		return -ENOSPC;
1005 
1006 	err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
1007 	if (err_resp)
1008 		goto msg_err;
1009 
1010 	if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
1011 		err = -EINVAL;
1012 		goto msg_err;
1013 	}
1014 
1015 	cmpl_data->u.tsene.millidegrees =
1016 		fta_get_sint(results, FBNIC_FW_TSENE_THERM);
1017 	cmpl_data->u.tsene.millivolts =
1018 		fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
1019 
1020 msg_err:
1021 	cmpl_data->result = err_resp ? : err;
1022 	complete(&cmpl_data->done);
1023 	fbnic_fw_put_cmpl(cmpl_data);
1024 
1025 	return err;
1026 }
1027 
1028 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
1029 	FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
1030 			 fbnic_fw_parse_cap_resp),
1031 	FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
1032 			 fbnic_fw_parse_ownership_resp),
1033 	FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
1034 			 fbnic_fw_parse_heartbeat_resp),
1035 	FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
1036 			 fbnic_fw_start_upgrade_resp_index,
1037 			 fbnic_fw_parse_fw_start_upgrade_resp),
1038 	FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
1039 			 fbnic_fw_write_chunk_req_index,
1040 			 fbnic_fw_parse_fw_write_chunk_req),
1041 	FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
1042 			 fbnic_fw_finish_upgrade_req_index,
1043 			 fbnic_fw_parse_fw_finish_upgrade_req),
1044 	FBNIC_TLV_PARSER(TSENE_READ_RESP,
1045 			 fbnic_tsene_read_resp_index,
1046 			 fbnic_fw_parse_tsene_read_resp),
1047 	FBNIC_TLV_MSG_ERROR
1048 };
1049 
1050 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
1051 {
1052 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
1053 	u8 head = rx_mbx->head;
1054 	u64 desc, length;
1055 
1056 	while (head != rx_mbx->tail) {
1057 		struct fbnic_tlv_msg *msg;
1058 		int err;
1059 
1060 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
1061 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
1062 			break;
1063 
1064 		dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
1065 				 PAGE_SIZE, DMA_FROM_DEVICE);
1066 
1067 		msg = rx_mbx->buf_info[head].msg;
1068 
1069 		length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
1070 
1071 		/* Ignore NULL mailbox descriptors */
1072 		if (!length)
1073 			goto next_page;
1074 
1075 		/* Report descriptors with length greater than page size */
1076 		if (length > PAGE_SIZE) {
1077 			dev_warn(fbd->dev,
1078 				 "Invalid mailbox descriptor length: %lld\n",
1079 				 length);
1080 			goto next_page;
1081 		}
1082 
1083 		if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
1084 			dev_warn(fbd->dev, "Mailbox message length mismatch\n");
1085 
1086 		/* If parsing fails dump contents of message to dmesg */
1087 		err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
1088 		if (err) {
1089 			dev_warn(fbd->dev, "Unable to process message: %d\n",
1090 				 err);
1091 			print_hex_dump(KERN_WARNING, "fbnic:",
1092 				       DUMP_PREFIX_OFFSET, 16, 2,
1093 				       msg, length, true);
1094 		}
1095 
1096 		dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
1097 next_page:
1098 
1099 		free_page((unsigned long)rx_mbx->buf_info[head].msg);
1100 		rx_mbx->buf_info[head].msg = NULL;
1101 
1102 		head++;
1103 		head %= FBNIC_IPC_MBX_DESC_LEN;
1104 	}
1105 
1106 	/* Record head for next interrupt */
1107 	rx_mbx->head = head;
1108 
1109 	/* Make sure we have at least one page for the FW to write to */
1110 	fbnic_mbx_alloc_rx_msgs(fbd);
1111 }
1112 
1113 void fbnic_mbx_poll(struct fbnic_dev *fbd)
1114 {
1115 	fbnic_mbx_event(fbd);
1116 
1117 	fbnic_mbx_process_tx_msgs(fbd);
1118 	fbnic_mbx_process_rx_msgs(fbd);
1119 }
1120 
1121 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
1122 {
1123 	unsigned long timeout = jiffies + 10 * HZ + 1;
1124 	int err, i;
1125 
1126 	do {
1127 		if (!time_is_after_jiffies(timeout))
1128 			return -ETIMEDOUT;
1129 
1130 		/* Force the firmware to trigger an interrupt response to
1131 		 * avoid the mailbox getting stuck closed if the interrupt
1132 		 * is reset.
1133 		 */
1134 		fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
1135 
1136 		/* Immediate fail if BAR4 went away */
1137 		if (!fbnic_fw_present(fbd))
1138 			return -ENODEV;
1139 
1140 		msleep(20);
1141 	} while (!fbnic_mbx_event(fbd));
1142 
1143 	/* FW has shown signs of life. Enable DMA and start Tx/Rx */
1144 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
1145 		fbnic_mbx_init_desc_ring(fbd, i);
1146 
1147 	/* Request an update from the firmware. This should overwrite
1148 	 * mgmt.version once we get the actual version from the firmware
1149 	 * in the capabilities request message.
1150 	 */
1151 	err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
1152 	if (err)
1153 		goto clean_mbx;
1154 
1155 	/* Use "1" to indicate we entered the state waiting for a response */
1156 	fbd->fw_cap.running.mgmt.version = 1;
1157 
1158 	return 0;
1159 clean_mbx:
1160 	/* Cleanup Rx buffers and disable mailbox */
1161 	fbnic_mbx_clean(fbd);
1162 	return err;
1163 }
1164 
1165 static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
1166 {
1167 	cmpl_data->result = -EPIPE;
1168 	complete(&cmpl_data->done);
1169 }
1170 
1171 static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
1172 {
1173 	int i;
1174 
1175 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
1176 		struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
1177 
1178 		if (cmpl_data)
1179 			__fbnic_fw_evict_cmpl(cmpl_data);
1180 	}
1181 
1182 	memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
1183 }
1184 
1185 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
1186 {
1187 	unsigned long timeout = jiffies + 10 * HZ + 1;
1188 	struct fbnic_fw_mbx *tx_mbx;
1189 	u8 tail;
1190 
1191 	/* Record current Rx stats */
1192 	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1193 
1194 	spin_lock_irq(&fbd->fw_tx_lock);
1195 
1196 	/* Clear ready to prevent any further attempts to transmit */
1197 	tx_mbx->ready = false;
1198 
1199 	/* Read tail to determine the last tail state for the ring */
1200 	tail = tx_mbx->tail;
1201 
1202 	/* Flush any completions as we are no longer processing Rx */
1203 	fbnic_mbx_evict_all_cmpl(fbd);
1204 
1205 	spin_unlock_irq(&fbd->fw_tx_lock);
1206 
1207 	/* Give firmware time to process packet,
1208 	 * we will wait up to 10 seconds which is 500 waits of 20ms.
1209 	 */
1210 	do {
1211 		u8 head = tx_mbx->head;
1212 
1213 		/* Tx ring is empty once head == tail */
1214 		if (head == tail)
1215 			break;
1216 
1217 		msleep(20);
1218 		fbnic_mbx_process_tx_msgs(fbd);
1219 	} while (time_is_after_jiffies(timeout));
1220 }
1221 
1222 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
1223 				 const size_t str_sz)
1224 {
1225 	struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
1226 	const char *delim = "";
1227 
1228 	if (mgmt->commit[0])
1229 		delim = "_";
1230 
1231 	fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
1232 				 fw_version, str_sz);
1233 }
1234 
1235 struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
1236 {
1237 	struct fbnic_fw_completion *cmpl;
1238 
1239 	cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
1240 	if (!cmpl)
1241 		return NULL;
1242 
1243 	cmpl->msg_type = msg_type;
1244 	init_completion(&cmpl->done);
1245 	kref_init(&cmpl->ref_count);
1246 
1247 	return cmpl;
1248 }
1249 
1250 void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
1251 			 struct fbnic_fw_completion *fw_cmpl)
1252 {
1253 	unsigned long flags;
1254 
1255 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
1256 	fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
1257 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
1258 }
1259 
1260 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
1261 {
1262 	kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
1263 }
1264