xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_fw.c (revision 23313771c7b99b3b8dba169bc71dae619d41ab56)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11 
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14 
15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 				int desc_idx, u64 desc)
17 {
18 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19 
20 	/* Write the upper 32b and then the lower 32b. Doing this the
21 	 * FW can then read lower, upper, lower to verify that the state
22 	 * of the descriptor wasn't changed mid-transaction.
23 	 */
24 	fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
25 	fw_wrfl(fbd);
26 	fw_wr32(fbd, desc_offset, lower_32_bits(desc));
27 }
28 
29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
30 					int desc_idx, u32 desc)
31 {
32 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
33 
34 	/* For initialization we write the lower 32b of the descriptor first.
35 	 * This way we can set the state to mark it invalid before we clear the
36 	 * upper 32b.
37 	 */
38 	fw_wr32(fbd, desc_offset, desc);
39 	fw_wrfl(fbd);
40 	fw_wr32(fbd, desc_offset + 1, 0);
41 }
42 
43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
44 {
45 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
46 	u64 desc;
47 
48 	desc = fw_rd32(fbd, desc_offset);
49 	desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
50 
51 	return desc;
52 }
53 
54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
55 {
56 	int desc_idx;
57 
58 	/* Disable DMA transactions from the device,
59 	 * and flush any transactions triggered during cleaning
60 	 */
61 	switch (mbx_idx) {
62 	case FBNIC_IPC_MBX_RX_IDX:
63 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
64 		     FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
65 		break;
66 	case FBNIC_IPC_MBX_TX_IDX:
67 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
68 		     FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
69 		break;
70 	}
71 
72 	wrfl(fbd);
73 
74 	/* Initialize first descriptor to all 0s. Doing this gives us a
75 	 * solid stop for the firmware to hit when it is done looping
76 	 * through the ring.
77 	 */
78 	__fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
79 
80 	/* We then fill the rest of the ring starting at the end and moving
81 	 * back toward descriptor 0 with skip descriptors that have no
82 	 * length nor address, and tell the firmware that they can skip
83 	 * them and just move past them to the one we initialized to 0.
84 	 */
85 	for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
86 		__fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
87 					    FBNIC_IPC_MBX_DESC_FW_CMPL |
88 					    FBNIC_IPC_MBX_DESC_HOST_CMPL);
89 }
90 
91 void fbnic_mbx_init(struct fbnic_dev *fbd)
92 {
93 	int i;
94 
95 	/* Initialize lock to protect Tx ring */
96 	spin_lock_init(&fbd->fw_tx_lock);
97 
98 	/* Reset FW Capabilities */
99 	memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap));
100 
101 	/* Reinitialize mailbox memory */
102 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
103 		memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
104 
105 	/* Do not auto-clear the FW mailbox interrupt, let SW clear it */
106 	wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
107 
108 	/* Clear any stale causes in vector 0 as that is used for doorbell */
109 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
110 
111 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
112 		fbnic_mbx_reset_desc_ring(fbd, i);
113 }
114 
115 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
116 			     struct fbnic_tlv_msg *msg, u16 length, u8 eom)
117 {
118 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
119 	u8 tail = mbx->tail;
120 	dma_addr_t addr;
121 	int direction;
122 
123 	if (!mbx->ready || !fbnic_fw_present(fbd))
124 		return -ENODEV;
125 
126 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
127 							DMA_TO_DEVICE;
128 
129 	if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
130 		return -EBUSY;
131 
132 	addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
133 	if (dma_mapping_error(fbd->dev, addr))
134 		return -ENOSPC;
135 
136 	mbx->buf_info[tail].msg = msg;
137 	mbx->buf_info[tail].addr = addr;
138 
139 	mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
140 
141 	fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
142 
143 	__fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
144 			    FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
145 			    (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
146 			    (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
147 			    FBNIC_IPC_MBX_DESC_HOST_CMPL);
148 
149 	return 0;
150 }
151 
152 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
153 					 int desc_idx)
154 {
155 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
156 	int direction;
157 
158 	if (!mbx->buf_info[desc_idx].msg)
159 		return;
160 
161 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
162 							DMA_TO_DEVICE;
163 	dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
164 			 PAGE_SIZE, direction);
165 
166 	free_page((unsigned long)mbx->buf_info[desc_idx].msg);
167 	mbx->buf_info[desc_idx].msg = NULL;
168 }
169 
170 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
171 {
172 	int i;
173 
174 	fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
175 
176 	for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
177 		fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
178 }
179 
180 void fbnic_mbx_clean(struct fbnic_dev *fbd)
181 {
182 	int i;
183 
184 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
185 		fbnic_mbx_clean_desc_ring(fbd, i);
186 }
187 
188 #define FBNIC_MBX_MAX_PAGE_SIZE	FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
189 #define FBNIC_RX_PAGE_SIZE	min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
190 
191 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
192 {
193 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
194 	u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
195 	int err = 0;
196 
197 	/* Do nothing if mailbox is not ready, or we already have pages on
198 	 * the ring that can be used by the firmware
199 	 */
200 	if (!rx_mbx->ready)
201 		return -ENODEV;
202 
203 	/* Fill all but 1 unused descriptors in the Rx queue. */
204 	count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
205 	while (!err && count--) {
206 		struct fbnic_tlv_msg *msg;
207 
208 		msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
209 							      __GFP_NOWARN);
210 		if (!msg) {
211 			err = -ENOMEM;
212 			break;
213 		}
214 
215 		err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
216 					FBNIC_RX_PAGE_SIZE, 0);
217 		if (err)
218 			free_page((unsigned long)msg);
219 	}
220 
221 	return err;
222 }
223 
224 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
225 				 struct fbnic_tlv_msg *msg)
226 {
227 	unsigned long flags;
228 	int err;
229 
230 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
231 
232 	err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
233 				le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
234 
235 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
236 
237 	return err;
238 }
239 
240 static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
241 				   struct fbnic_fw_completion *cmpl_data)
242 {
243 	struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
244 	int free = -EXFULL;
245 	int i;
246 
247 	if (!tx_mbx->ready)
248 		return -ENODEV;
249 
250 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
251 		if (!fbd->cmpl_data[i])
252 			free = i;
253 		else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
254 			return -EEXIST;
255 	}
256 
257 	if (free == -EXFULL)
258 		return -EXFULL;
259 
260 	fbd->cmpl_data[free] = cmpl_data;
261 
262 	return 0;
263 }
264 
265 static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
266 				      struct fbnic_fw_completion *cmpl_data)
267 {
268 	int i;
269 
270 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
271 		if (fbd->cmpl_data[i] == cmpl_data) {
272 			fbd->cmpl_data[i] = NULL;
273 			break;
274 		}
275 	}
276 }
277 
278 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
279 {
280 	struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
281 	u8 head = tx_mbx->head;
282 	u64 desc;
283 
284 	while (head != tx_mbx->tail) {
285 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
286 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
287 			break;
288 
289 		fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
290 
291 		head++;
292 		head %= FBNIC_IPC_MBX_DESC_LEN;
293 	}
294 
295 	/* Record head for next interrupt */
296 	tx_mbx->head = head;
297 }
298 
299 int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
300 		       struct fbnic_fw_completion *cmpl_data)
301 {
302 	unsigned long flags;
303 	int err;
304 
305 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
306 	err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
307 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
308 
309 	return err;
310 }
311 
312 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
313 				    struct fbnic_tlv_msg *msg,
314 				    struct fbnic_fw_completion *cmpl_data)
315 {
316 	unsigned long flags;
317 	int err;
318 
319 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
320 	if (cmpl_data) {
321 		err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
322 		if (err)
323 			goto unlock_mbx;
324 	}
325 
326 	err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
327 				le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
328 
329 	/* If we successfully reserved a completion and msg failed
330 	 * then clear completion data for next caller
331 	 */
332 	if (err && cmpl_data)
333 		fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
334 
335 unlock_mbx:
336 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
337 
338 	return err;
339 }
340 
341 void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
342 			  struct fbnic_fw_completion *fw_cmpl)
343 {
344 	unsigned long flags;
345 
346 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
347 	fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
348 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
349 }
350 
351 static void fbnic_fw_release_cmpl_data(struct kref *kref)
352 {
353 	struct fbnic_fw_completion *cmpl_data;
354 
355 	cmpl_data = container_of(kref, struct fbnic_fw_completion,
356 				 ref_count);
357 	kfree(cmpl_data);
358 }
359 
360 static struct fbnic_fw_completion *
361 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
362 {
363 	struct fbnic_fw_completion *cmpl_data = NULL;
364 	unsigned long flags;
365 	int i;
366 
367 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
368 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
369 		if (fbd->cmpl_data[i] &&
370 		    fbd->cmpl_data[i]->msg_type == msg_type) {
371 			cmpl_data = fbd->cmpl_data[i];
372 			kref_get(&cmpl_data->ref_count);
373 			break;
374 		}
375 	}
376 
377 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
378 
379 	return cmpl_data;
380 }
381 
382 /**
383  * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
384  * @fbd: FBNIC device structure
385  * @msg_type: ENUM value indicating message type to send
386  *
387  * Return:
388  *   One the following values:
389  *	-EOPNOTSUPP: Is not ASIC so mailbox is not supported
390  *	-ENODEV: Device I/O error
391  *	-ENOMEM: Failed to allocate message
392  *	-EBUSY: No space in mailbox
393  *	-ENOSPC: DMA mapping failed
394  *
395  * This function sends a single TLV header indicating the host wants to take
396  * some action. However there are no other side effects which means that any
397  * response will need to be caught via a completion if this action is
398  * expected to kick off a resultant action.
399  */
400 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
401 {
402 	struct fbnic_tlv_msg *msg;
403 	int err = 0;
404 
405 	if (!fbnic_fw_present(fbd))
406 		return -ENODEV;
407 
408 	msg = fbnic_tlv_msg_alloc(msg_type);
409 	if (!msg)
410 		return -ENOMEM;
411 
412 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
413 	if (err)
414 		free_page((unsigned long)msg);
415 
416 	return err;
417 }
418 
419 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
420 {
421 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
422 
423 	mbx->ready = true;
424 
425 	switch (mbx_idx) {
426 	case FBNIC_IPC_MBX_RX_IDX:
427 		/* Enable DMA writes from the device */
428 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
429 		     FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
430 
431 		/* Make sure we have a page for the FW to write to */
432 		fbnic_mbx_alloc_rx_msgs(fbd);
433 		break;
434 	case FBNIC_IPC_MBX_TX_IDX:
435 		/* Enable DMA reads from the device */
436 		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
437 		     FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
438 		break;
439 	}
440 }
441 
442 static bool fbnic_mbx_event(struct fbnic_dev *fbd)
443 {
444 	/* We only need to do this on the first interrupt following reset.
445 	 * this primes the mailbox so that we will have cleared all the
446 	 * skip descriptors.
447 	 */
448 	if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
449 		return false;
450 
451 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
452 
453 	return true;
454 }
455 
456 /**
457  * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
458  * to FW mailbox
459  *
460  * @fbd: FBNIC device structure
461  * @take_ownership: take/release the ownership
462  *
463  * Return: zero on success, negative value on failure
464  *
465  * Notifies the firmware that the driver either takes ownership of the NIC
466  * (when @take_ownership is true) or releases it.
467  */
468 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
469 {
470 	unsigned long req_time = jiffies;
471 	struct fbnic_tlv_msg *msg;
472 	int err = 0;
473 
474 	if (!fbnic_fw_present(fbd))
475 		return -ENODEV;
476 
477 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
478 	if (!msg)
479 		return -ENOMEM;
480 
481 	if (take_ownership) {
482 		err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
483 		if (err)
484 			goto free_message;
485 	}
486 
487 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
488 	if (err)
489 		goto free_message;
490 
491 	/* Initialize heartbeat, set last response to 1 second in the past
492 	 * so that we will trigger a timeout if the firmware doesn't respond
493 	 */
494 	fbd->last_heartbeat_response = req_time - HZ;
495 
496 	fbd->last_heartbeat_request = req_time;
497 
498 	/* Set heartbeat detection based on if we are taking ownership */
499 	fbd->fw_heartbeat_enabled = take_ownership;
500 
501 	return err;
502 
503 free_message:
504 	free_page((unsigned long)msg);
505 	return err;
506 }
507 
508 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
509 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
510 	FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
511 	FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
512 	FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
513 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
514 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
515 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
516 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
517 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
518 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
519 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
520 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
521 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
522 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
523 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
524 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
525 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
526 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
527 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
528 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
529 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
530 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
531 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
532 	FBNIC_TLV_ATTR_LAST
533 };
534 
535 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
536 				    struct fbnic_tlv_msg *attr, int len)
537 {
538 	int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
539 	struct fbnic_tlv_msg *mac_results[8];
540 	int err, i = 0;
541 
542 	/* Make sure we have enough room to process all the MAC addresses */
543 	if (len > 8)
544 		return -ENOSPC;
545 
546 	/* Parse the array */
547 	err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
548 					 fbnic_fw_cap_resp_index,
549 					 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
550 	if (err)
551 		return err;
552 
553 	/* Copy results into MAC addr array */
554 	for (i = 0; i < len && mac_results[i]; i++)
555 		fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
556 
557 	/* Zero remaining unused addresses */
558 	while (i < len)
559 		eth_zero_addr(bmc_mac_addr[i++]);
560 
561 	return 0;
562 }
563 
564 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
565 {
566 	u32 all_multi = 0, version = 0;
567 	struct fbnic_dev *fbd = opaque;
568 	bool bmc_present;
569 	int err;
570 
571 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
572 	fbd->fw_cap.running.mgmt.version = version;
573 	if (!fbd->fw_cap.running.mgmt.version)
574 		return -EINVAL;
575 
576 	if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE) {
577 		char required_ver[FBNIC_FW_VER_MAX_SIZE];
578 		char running_ver[FBNIC_FW_VER_MAX_SIZE];
579 
580 		fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
581 				    running_ver);
582 		fbnic_mk_fw_ver_str(MIN_FW_VER_CODE, required_ver);
583 		dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%s)\n",
584 			running_ver, required_ver);
585 		/* Disable TX mailbox to prevent card use until firmware is
586 		 * updated.
587 		 */
588 		fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
589 		return -EINVAL;
590 	}
591 
592 	if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
593 			fbd->fw_cap.running.mgmt.commit,
594 			FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
595 		dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
596 
597 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
598 	fbd->fw_cap.stored.mgmt.version = version;
599 	fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
600 		    fbd->fw_cap.stored.mgmt.commit,
601 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
602 
603 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
604 	fbd->fw_cap.running.bootloader.version = version;
605 	fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
606 		    fbd->fw_cap.running.bootloader.commit,
607 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
608 
609 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
610 	fbd->fw_cap.stored.bootloader.version = version;
611 	fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
612 		    fbd->fw_cap.stored.bootloader.commit,
613 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
614 
615 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
616 	fbd->fw_cap.stored.undi.version = version;
617 	fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
618 		    fbd->fw_cap.stored.undi.commit,
619 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
620 
621 	fbd->fw_cap.active_slot =
622 		fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
623 	fbd->fw_cap.link_speed =
624 		fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
625 	fbd->fw_cap.link_fec =
626 		fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
627 
628 	bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
629 	if (bmc_present) {
630 		struct fbnic_tlv_msg *attr;
631 
632 		attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
633 		if (!attr)
634 			return -EINVAL;
635 
636 		err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
637 					       attr, 4);
638 		if (err)
639 			return err;
640 
641 		all_multi =
642 			fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
643 	} else {
644 		memset(fbd->fw_cap.bmc_mac_addr, 0,
645 		       sizeof(fbd->fw_cap.bmc_mac_addr));
646 	}
647 
648 	fbd->fw_cap.bmc_present = bmc_present;
649 
650 	if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
651 		fbd->fw_cap.all_multi = all_multi;
652 
653 	fbd->fw_cap.anti_rollback_version =
654 		fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
655 
656 	/* Always assume we need a BMC reinit */
657 	fbd->fw_cap.need_bmc_tcam_reinit = true;
658 
659 	return 0;
660 }
661 
662 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
663 	FBNIC_TLV_ATTR_LAST
664 };
665 
666 static int fbnic_fw_parse_ownership_resp(void *opaque,
667 					 struct fbnic_tlv_msg **results)
668 {
669 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
670 
671 	/* Count the ownership response as a heartbeat reply */
672 	fbd->last_heartbeat_response = jiffies;
673 
674 	return 0;
675 }
676 
677 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
678 	FBNIC_TLV_ATTR_LAST
679 };
680 
681 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
682 					 struct fbnic_tlv_msg **results)
683 {
684 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
685 
686 	fbd->last_heartbeat_response = jiffies;
687 
688 	return 0;
689 }
690 
691 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
692 {
693 	unsigned long req_time = jiffies;
694 	struct fbnic_tlv_msg *msg;
695 	int err = 0;
696 
697 	if (!fbnic_fw_present(fbd))
698 		return -ENODEV;
699 
700 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
701 	if (!msg)
702 		return -ENOMEM;
703 
704 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
705 	if (err)
706 		goto free_message;
707 
708 	fbd->last_heartbeat_request = req_time;
709 
710 	return err;
711 
712 free_message:
713 	free_page((unsigned long)msg);
714 	return err;
715 }
716 
717 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
718 {
719 	unsigned long last_response = fbd->last_heartbeat_response;
720 	unsigned long last_request = fbd->last_heartbeat_request;
721 
722 	return !time_before(last_response, last_request);
723 }
724 
725 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
726 {
727 	int err = -ETIMEDOUT;
728 	int attempts = 50;
729 
730 	if (!fbnic_fw_present(fbd))
731 		return -ENODEV;
732 
733 	while (attempts--) {
734 		msleep(200);
735 		if (poll)
736 			fbnic_mbx_poll(fbd);
737 
738 		if (!fbnic_fw_heartbeat_current(fbd))
739 			continue;
740 
741 		/* Place new message on mailbox to elicit a response */
742 		err = fbnic_fw_xmit_heartbeat_message(fbd);
743 		if (err)
744 			dev_warn(fbd->dev,
745 				 "Failed to send heartbeat message: %d\n",
746 				 err);
747 		break;
748 	}
749 
750 	return err;
751 }
752 
753 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
754 {
755 	unsigned long last_request = fbd->last_heartbeat_request;
756 	int err;
757 
758 	/* Do not check heartbeat or send another request until current
759 	 * period has expired. Otherwise we might start spamming requests.
760 	 */
761 	if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
762 		return;
763 
764 	/* We already reported no mailbox. Wait for it to come back */
765 	if (!fbd->fw_heartbeat_enabled)
766 		return;
767 
768 	/* Was the last heartbeat response long time ago? */
769 	if (!fbnic_fw_heartbeat_current(fbd)) {
770 		dev_warn(fbd->dev,
771 			 "Firmware did not respond to heartbeat message\n");
772 		fbd->fw_heartbeat_enabled = false;
773 	}
774 
775 	/* Place new message on mailbox to elicit a response */
776 	err = fbnic_fw_xmit_heartbeat_message(fbd);
777 	if (err)
778 		dev_warn(fbd->dev, "Failed to send heartbeat message\n");
779 }
780 
781 int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
782 				   struct fbnic_fw_completion *cmpl_data,
783 				   unsigned int id, unsigned int len)
784 {
785 	struct fbnic_tlv_msg *msg;
786 	int err;
787 
788 	if (!fbnic_fw_present(fbd))
789 		return -ENODEV;
790 
791 	if (!len)
792 		return -EINVAL;
793 
794 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
795 	if (!msg)
796 		return -ENOMEM;
797 
798 	err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
799 	if (err)
800 		goto free_message;
801 
802 	err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
803 				     len);
804 	if (err)
805 		goto free_message;
806 
807 	err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
808 	if (err)
809 		goto free_message;
810 
811 	return 0;
812 
813 free_message:
814 	free_page((unsigned long)msg);
815 	return err;
816 }
817 
818 static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
819 	FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
820 	FBNIC_TLV_ATTR_LAST
821 };
822 
823 static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
824 						struct fbnic_tlv_msg **results)
825 {
826 	struct fbnic_fw_completion *cmpl_data;
827 	struct fbnic_dev *fbd = opaque;
828 	u32 msg_type;
829 	s32 err;
830 
831 	/* Verify we have a completion pointer */
832 	msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
833 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
834 	if (!cmpl_data)
835 		return -ENOSPC;
836 
837 	/* Check for errors */
838 	err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
839 
840 	cmpl_data->result = err;
841 	complete(&cmpl_data->done);
842 	fbnic_fw_put_cmpl(cmpl_data);
843 
844 	return 0;
845 }
846 
847 int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
848 				 const u8 *data, u32 offset, u16 length,
849 				 int cancel_error)
850 {
851 	struct fbnic_tlv_msg *msg;
852 	int err;
853 
854 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
855 	if (!msg)
856 		return -ENOMEM;
857 
858 	/* Report error to FW to cancel upgrade */
859 	if (cancel_error) {
860 		err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
861 					     cancel_error);
862 		if (err)
863 			goto free_message;
864 	}
865 
866 	if (data) {
867 		err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
868 					     offset);
869 		if (err)
870 			goto free_message;
871 
872 		err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
873 					     length);
874 		if (err)
875 			goto free_message;
876 
877 		err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
878 					       data + offset, length);
879 		if (err)
880 			goto free_message;
881 	}
882 
883 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
884 	if (err)
885 		goto free_message;
886 
887 	return 0;
888 
889 free_message:
890 	free_page((unsigned long)msg);
891 	return err;
892 }
893 
894 static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
895 	FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
896 	FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
897 	FBNIC_TLV_ATTR_LAST
898 };
899 
900 static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
901 					     struct fbnic_tlv_msg **results)
902 {
903 	struct fbnic_fw_completion *cmpl_data;
904 	struct fbnic_dev *fbd = opaque;
905 	u32 msg_type;
906 	u32 offset;
907 	u32 length;
908 
909 	/* Verify we have a completion pointer */
910 	msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
911 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
912 	if (!cmpl_data)
913 		return -ENOSPC;
914 
915 	/* Pull length/offset pair and mark it as complete */
916 	offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
917 	length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
918 	cmpl_data->u.fw_update.offset = offset;
919 	cmpl_data->u.fw_update.length = length;
920 
921 	complete(&cmpl_data->done);
922 	fbnic_fw_put_cmpl(cmpl_data);
923 
924 	return 0;
925 }
926 
927 static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
928 	FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
929 	FBNIC_TLV_ATTR_LAST
930 };
931 
932 static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
933 						struct fbnic_tlv_msg **results)
934 {
935 	struct fbnic_fw_completion *cmpl_data;
936 	struct fbnic_dev *fbd = opaque;
937 	u32 msg_type;
938 	s32 err;
939 
940 	/* Verify we have a completion pointer */
941 	msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
942 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
943 	if (!cmpl_data)
944 		return -ENOSPC;
945 
946 	/* Check for errors */
947 	err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
948 
949 	/* Close out update by incrementing offset by length which should
950 	 * match the total size of the component. Set length to 0 since no
951 	 * new chunks will be requested.
952 	 */
953 	cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
954 	cmpl_data->u.fw_update.length = 0;
955 
956 	cmpl_data->result = err;
957 	complete(&cmpl_data->done);
958 	fbnic_fw_put_cmpl(cmpl_data);
959 
960 	return 0;
961 }
962 
963 /**
964  * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
965  * @fbd: FBNIC device structure
966  * @cmpl_data: Completion data structure to store sensor response
967  *
968  * Asks the firmware to provide an update with the latest sensor data.
969  * The response will contain temperature and voltage readings.
970  *
971  * Return: 0 on success, negative error value on failure
972  */
973 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
974 				 struct fbnic_fw_completion *cmpl_data)
975 {
976 	struct fbnic_tlv_msg *msg;
977 	int err;
978 
979 	if (!fbnic_fw_present(fbd))
980 		return -ENODEV;
981 
982 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
983 	if (!msg)
984 		return -ENOMEM;
985 
986 	err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
987 	if (err)
988 		goto free_message;
989 
990 	return 0;
991 
992 free_message:
993 	free_page((unsigned long)msg);
994 	return err;
995 }
996 
997 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
998 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
999 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
1000 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
1001 	FBNIC_TLV_ATTR_LAST
1002 };
1003 
1004 static int fbnic_fw_parse_tsene_read_resp(void *opaque,
1005 					  struct fbnic_tlv_msg **results)
1006 {
1007 	struct fbnic_fw_completion *cmpl_data;
1008 	struct fbnic_dev *fbd = opaque;
1009 	s32 err_resp;
1010 	int err = 0;
1011 
1012 	/* Verify we have a completion pointer to provide with data */
1013 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
1014 					      FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
1015 	if (!cmpl_data)
1016 		return -ENOSPC;
1017 
1018 	err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
1019 	if (err_resp)
1020 		goto msg_err;
1021 
1022 	if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
1023 		err = -EINVAL;
1024 		goto msg_err;
1025 	}
1026 
1027 	cmpl_data->u.tsene.millidegrees =
1028 		fta_get_sint(results, FBNIC_FW_TSENE_THERM);
1029 	cmpl_data->u.tsene.millivolts =
1030 		fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
1031 
1032 msg_err:
1033 	cmpl_data->result = err_resp ? : err;
1034 	complete(&cmpl_data->done);
1035 	fbnic_fw_put_cmpl(cmpl_data);
1036 
1037 	return err;
1038 }
1039 
1040 static const struct fbnic_tlv_index fbnic_fw_log_req_index[] = {
1041 	FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_MSEC),
1042 	FBNIC_TLV_ATTR_U64(FBNIC_FW_LOG_INDEX),
1043 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_LOG_MSG, FBNIC_FW_LOG_MAX_SIZE),
1044 	FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_LENGTH),
1045 	FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSEC_ARRAY),
1046 	FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_INDEX_ARRAY),
1047 	FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSG_ARRAY),
1048 	FBNIC_TLV_ATTR_LAST
1049 };
1050 
1051 static int fbnic_fw_process_log_array(struct fbnic_tlv_msg **results,
1052 				      u16 length, u16 arr_type_idx,
1053 				      u16 attr_type_idx,
1054 				      struct fbnic_tlv_msg **tlv_array_out)
1055 {
1056 	struct fbnic_tlv_msg *attr;
1057 	int attr_len;
1058 	int err;
1059 
1060 	if (!results[attr_type_idx])
1061 		return -EINVAL;
1062 
1063 	tlv_array_out[0] = results[attr_type_idx];
1064 
1065 	if (!length)
1066 		return 0;
1067 
1068 	if (!results[arr_type_idx])
1069 		return -EINVAL;
1070 
1071 	attr = results[arr_type_idx];
1072 	attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
1073 	err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, &tlv_array_out[1],
1074 					 fbnic_fw_log_req_index,
1075 					 attr_type_idx,
1076 					 length);
1077 	if (err)
1078 		return err;
1079 
1080 	return 0;
1081 }
1082 
1083 static int fbnic_fw_parse_logs(struct fbnic_dev *fbd,
1084 			       struct fbnic_tlv_msg **msec_tlv,
1085 			       struct fbnic_tlv_msg **index_tlv,
1086 			       struct fbnic_tlv_msg **log_tlv,
1087 			       int count)
1088 {
1089 	int i;
1090 
1091 	for (i = 0; i < count; i++) {
1092 		char log[FBNIC_FW_LOG_MAX_SIZE];
1093 		ssize_t len;
1094 		u64 index;
1095 		u32 msec;
1096 		int err;
1097 
1098 		if (!msec_tlv[i] || !index_tlv[i] || !log_tlv[i]) {
1099 			dev_warn(fbd->dev, "Received log message with missing attributes!\n");
1100 			return -EINVAL;
1101 		}
1102 
1103 		index = fbnic_tlv_attr_get_signed(index_tlv[i], 0);
1104 		msec = fbnic_tlv_attr_get_signed(msec_tlv[i], 0);
1105 		len = fbnic_tlv_attr_get_string(log_tlv[i], log,
1106 						FBNIC_FW_LOG_MAX_SIZE);
1107 		if (len < 0)
1108 			return len;
1109 
1110 		err = fbnic_fw_log_write(fbd, index, msec, log);
1111 		if (err)
1112 			return err;
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 static int fbnic_fw_parse_log_req(void *opaque,
1119 				  struct fbnic_tlv_msg **results)
1120 {
1121 	struct fbnic_tlv_msg *index_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1122 	struct fbnic_tlv_msg *msec_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1123 	struct fbnic_tlv_msg *log_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1124 	struct fbnic_dev *fbd = opaque;
1125 	u16 length;
1126 	int err;
1127 
1128 	length = fta_get_uint(results, FBNIC_FW_LOG_LENGTH);
1129 	if (length >= FBNIC_FW_MAX_LOG_HISTORY)
1130 		return -E2BIG;
1131 
1132 	err = fbnic_fw_process_log_array(results, length,
1133 					 FBNIC_FW_LOG_MSEC_ARRAY,
1134 					 FBNIC_FW_LOG_MSEC, msec_tlv);
1135 	if (err)
1136 		return err;
1137 
1138 	err = fbnic_fw_process_log_array(results, length,
1139 					 FBNIC_FW_LOG_INDEX_ARRAY,
1140 					 FBNIC_FW_LOG_INDEX, index_tlv);
1141 	if (err)
1142 		return err;
1143 
1144 	err = fbnic_fw_process_log_array(results, length,
1145 					 FBNIC_FW_LOG_MSG_ARRAY,
1146 					 FBNIC_FW_LOG_MSG, log_tlv);
1147 	if (err)
1148 		return err;
1149 
1150 	err = fbnic_fw_parse_logs(fbd, msec_tlv, index_tlv, log_tlv,
1151 				  length + 1);
1152 	if (err)
1153 		return err;
1154 
1155 	return 0;
1156 }
1157 
1158 int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
1159 			    bool send_log_history)
1160 {
1161 	struct fbnic_tlv_msg *msg;
1162 	int err;
1163 
1164 	if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_LOG) {
1165 		dev_warn(fbd->dev, "Firmware version is too old to support firmware logs!\n");
1166 		return -EOPNOTSUPP;
1167 	}
1168 
1169 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ);
1170 	if (!msg)
1171 		return -ENOMEM;
1172 
1173 	if (enable) {
1174 		err = fbnic_tlv_attr_put_flag(msg, FBNIC_SEND_LOGS);
1175 		if (err)
1176 			goto free_message;
1177 
1178 		/* Report request for version 1 of logs */
1179 		err = fbnic_tlv_attr_put_int(msg, FBNIC_SEND_LOGS_VERSION,
1180 					     FBNIC_FW_LOG_VERSION);
1181 		if (err)
1182 			goto free_message;
1183 
1184 		if (send_log_history) {
1185 			err = fbnic_tlv_attr_put_flag(msg,
1186 						      FBNIC_SEND_LOGS_HISTORY);
1187 			if (err)
1188 				goto free_message;
1189 		}
1190 	}
1191 
1192 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
1193 	if (err)
1194 		goto free_message;
1195 
1196 	return 0;
1197 
1198 free_message:
1199 	free_page((unsigned long)msg);
1200 	return err;
1201 }
1202 
1203 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
1204 	FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
1205 			 fbnic_fw_parse_cap_resp),
1206 	FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
1207 			 fbnic_fw_parse_ownership_resp),
1208 	FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
1209 			 fbnic_fw_parse_heartbeat_resp),
1210 	FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
1211 			 fbnic_fw_start_upgrade_resp_index,
1212 			 fbnic_fw_parse_fw_start_upgrade_resp),
1213 	FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
1214 			 fbnic_fw_write_chunk_req_index,
1215 			 fbnic_fw_parse_fw_write_chunk_req),
1216 	FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
1217 			 fbnic_fw_finish_upgrade_req_index,
1218 			 fbnic_fw_parse_fw_finish_upgrade_req),
1219 	FBNIC_TLV_PARSER(TSENE_READ_RESP,
1220 			 fbnic_tsene_read_resp_index,
1221 			 fbnic_fw_parse_tsene_read_resp),
1222 	FBNIC_TLV_PARSER(LOG_MSG_REQ,
1223 			 fbnic_fw_log_req_index,
1224 			 fbnic_fw_parse_log_req),
1225 	FBNIC_TLV_MSG_ERROR
1226 };
1227 
1228 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
1229 {
1230 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
1231 	u8 head = rx_mbx->head;
1232 	u64 desc, length;
1233 
1234 	while (head != rx_mbx->tail) {
1235 		struct fbnic_tlv_msg *msg;
1236 		int err;
1237 
1238 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
1239 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
1240 			break;
1241 
1242 		dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
1243 				 PAGE_SIZE, DMA_FROM_DEVICE);
1244 
1245 		msg = rx_mbx->buf_info[head].msg;
1246 
1247 		length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
1248 
1249 		/* Ignore NULL mailbox descriptors */
1250 		if (!length)
1251 			goto next_page;
1252 
1253 		/* Report descriptors with length greater than page size */
1254 		if (length > PAGE_SIZE) {
1255 			dev_warn(fbd->dev,
1256 				 "Invalid mailbox descriptor length: %lld\n",
1257 				 length);
1258 			goto next_page;
1259 		}
1260 
1261 		if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
1262 			dev_warn(fbd->dev, "Mailbox message length mismatch\n");
1263 
1264 		/* If parsing fails dump contents of message to dmesg */
1265 		err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
1266 		if (err) {
1267 			dev_warn(fbd->dev, "Unable to process message: %d\n",
1268 				 err);
1269 			print_hex_dump(KERN_WARNING, "fbnic:",
1270 				       DUMP_PREFIX_OFFSET, 16, 2,
1271 				       msg, length, true);
1272 		}
1273 
1274 		dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
1275 next_page:
1276 
1277 		free_page((unsigned long)rx_mbx->buf_info[head].msg);
1278 		rx_mbx->buf_info[head].msg = NULL;
1279 
1280 		head++;
1281 		head %= FBNIC_IPC_MBX_DESC_LEN;
1282 	}
1283 
1284 	/* Record head for next interrupt */
1285 	rx_mbx->head = head;
1286 
1287 	/* Make sure we have at least one page for the FW to write to */
1288 	fbnic_mbx_alloc_rx_msgs(fbd);
1289 }
1290 
1291 void fbnic_mbx_poll(struct fbnic_dev *fbd)
1292 {
1293 	fbnic_mbx_event(fbd);
1294 
1295 	fbnic_mbx_process_tx_msgs(fbd);
1296 	fbnic_mbx_process_rx_msgs(fbd);
1297 }
1298 
1299 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
1300 {
1301 	struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1302 	unsigned long timeout = jiffies + 10 * HZ + 1;
1303 	int err, i;
1304 
1305 	do {
1306 		if (!time_is_after_jiffies(timeout))
1307 			return -ETIMEDOUT;
1308 
1309 		/* Force the firmware to trigger an interrupt response to
1310 		 * avoid the mailbox getting stuck closed if the interrupt
1311 		 * is reset.
1312 		 */
1313 		fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
1314 
1315 		/* Immediate fail if BAR4 went away */
1316 		if (!fbnic_fw_present(fbd))
1317 			return -ENODEV;
1318 
1319 		msleep(20);
1320 	} while (!fbnic_mbx_event(fbd));
1321 
1322 	/* FW has shown signs of life. Enable DMA and start Tx/Rx */
1323 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
1324 		fbnic_mbx_init_desc_ring(fbd, i);
1325 
1326 	/* Request an update from the firmware. This should overwrite
1327 	 * mgmt.version once we get the actual version from the firmware
1328 	 * in the capabilities request message.
1329 	 */
1330 	err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
1331 	if (err)
1332 		goto clean_mbx;
1333 
1334 	/* Poll until we get a current management firmware version, use "1"
1335 	 * to indicate we entered the polling state waiting for a response
1336 	 */
1337 	for (fbd->fw_cap.running.mgmt.version = 1;
1338 	     fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE;) {
1339 		if (!tx_mbx->ready)
1340 			err = -ENODEV;
1341 		if (err)
1342 			goto clean_mbx;
1343 
1344 		msleep(20);
1345 		fbnic_mbx_poll(fbd);
1346 
1347 		/* set err, but wait till mgmt.version check to report it */
1348 		if (!time_is_after_jiffies(timeout))
1349 			err = -ETIMEDOUT;
1350 	}
1351 
1352 	return 0;
1353 clean_mbx:
1354 	/* Cleanup Rx buffers and disable mailbox */
1355 	fbnic_mbx_clean(fbd);
1356 	return err;
1357 }
1358 
1359 static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
1360 {
1361 	cmpl_data->result = -EPIPE;
1362 	complete(&cmpl_data->done);
1363 }
1364 
1365 static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
1366 {
1367 	int i;
1368 
1369 	for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
1370 		struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
1371 
1372 		if (cmpl_data)
1373 			__fbnic_fw_evict_cmpl(cmpl_data);
1374 	}
1375 
1376 	memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
1377 }
1378 
1379 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
1380 {
1381 	unsigned long timeout = jiffies + 10 * HZ + 1;
1382 	struct fbnic_fw_mbx *tx_mbx;
1383 	u8 tail;
1384 
1385 	/* Record current Rx stats */
1386 	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1387 
1388 	spin_lock_irq(&fbd->fw_tx_lock);
1389 
1390 	/* Clear ready to prevent any further attempts to transmit */
1391 	tx_mbx->ready = false;
1392 
1393 	/* Read tail to determine the last tail state for the ring */
1394 	tail = tx_mbx->tail;
1395 
1396 	/* Flush any completions as we are no longer processing Rx */
1397 	fbnic_mbx_evict_all_cmpl(fbd);
1398 
1399 	spin_unlock_irq(&fbd->fw_tx_lock);
1400 
1401 	/* Give firmware time to process packet,
1402 	 * we will wait up to 10 seconds which is 500 waits of 20ms.
1403 	 */
1404 	do {
1405 		u8 head = tx_mbx->head;
1406 
1407 		/* Tx ring is empty once head == tail */
1408 		if (head == tail)
1409 			break;
1410 
1411 		msleep(20);
1412 		fbnic_mbx_process_tx_msgs(fbd);
1413 	} while (time_is_after_jiffies(timeout));
1414 }
1415 
1416 int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd)
1417 {
1418 	struct fbnic_tlv_msg *mac_array;
1419 	int i, addr_count = 0, err;
1420 	struct fbnic_tlv_msg *msg;
1421 	u32 rx_flags = 0;
1422 
1423 	/* Nothing to do if there is no FW to sync with */
1424 	if (!fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready)
1425 		return 0;
1426 
1427 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ);
1428 	if (!msg)
1429 		return -ENOMEM;
1430 
1431 	mac_array = fbnic_tlv_attr_nest_start(msg,
1432 					      FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY);
1433 	if (!mac_array)
1434 		goto free_message_nospc;
1435 
1436 	/* Populate the unicast MAC addrs and capture PROMISC/ALLMULTI flags */
1437 	for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_PROMISC_IDX;
1438 	     i >= fbd->mac_addr_boundary; i--) {
1439 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
1440 
1441 		if (mac_addr->state != FBNIC_TCAM_S_VALID)
1442 			continue;
1443 		if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
1444 			rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
1445 		if (test_bit(FBNIC_MAC_ADDR_T_PROMISC, mac_addr->act_tcam))
1446 			rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
1447 		if (!test_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam))
1448 			continue;
1449 		if (addr_count == FW_RPC_MAC_SYNC_UC_ARRAY_SIZE) {
1450 			rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
1451 			continue;
1452 		}
1453 
1454 		err = fbnic_tlv_attr_put_value(mac_array,
1455 					       FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
1456 					       mac_addr->value.addr8,
1457 					       ETH_ALEN);
1458 		if (err)
1459 			goto free_message;
1460 		addr_count++;
1461 	}
1462 
1463 	/* Close array */
1464 	fbnic_tlv_attr_nest_stop(msg);
1465 
1466 	mac_array = fbnic_tlv_attr_nest_start(msg,
1467 					      FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY);
1468 	if (!mac_array)
1469 		goto free_message_nospc;
1470 
1471 	/* Repeat for multicast addrs, record BROADCAST/ALLMULTI flags */
1472 	for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;
1473 	     i < fbd->mac_addr_boundary; i++) {
1474 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
1475 
1476 		if (mac_addr->state != FBNIC_TCAM_S_VALID)
1477 			continue;
1478 		if (test_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam))
1479 			rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST;
1480 		if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
1481 			rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
1482 		if (!test_bit(FBNIC_MAC_ADDR_T_MULTICAST, mac_addr->act_tcam))
1483 			continue;
1484 		if (addr_count == FW_RPC_MAC_SYNC_MC_ARRAY_SIZE) {
1485 			rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
1486 			continue;
1487 		}
1488 
1489 		err = fbnic_tlv_attr_put_value(mac_array,
1490 					       FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
1491 					       mac_addr->value.addr8,
1492 					       ETH_ALEN);
1493 		if (err)
1494 			goto free_message;
1495 		addr_count++;
1496 	}
1497 
1498 	/* Close array */
1499 	fbnic_tlv_attr_nest_stop(msg);
1500 
1501 	/* Report flags at end of list */
1502 	err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS,
1503 				     rx_flags);
1504 	if (err)
1505 		goto free_message;
1506 
1507 	/* Send message of to FW notifying it of current RPC config */
1508 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
1509 	if (err)
1510 		goto free_message;
1511 	return 0;
1512 free_message_nospc:
1513 	err = -ENOSPC;
1514 free_message:
1515 	free_page((unsigned long)msg);
1516 	return err;
1517 }
1518 
1519 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
1520 				 const size_t str_sz)
1521 {
1522 	struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
1523 	const char *delim = "";
1524 
1525 	if (mgmt->commit[0])
1526 		delim = "_";
1527 
1528 	fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
1529 				 fw_version, str_sz);
1530 }
1531 
1532 struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
1533 {
1534 	struct fbnic_fw_completion *cmpl;
1535 
1536 	cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
1537 	if (!cmpl)
1538 		return NULL;
1539 
1540 	cmpl->msg_type = msg_type;
1541 	init_completion(&cmpl->done);
1542 	kref_init(&cmpl->ref_count);
1543 
1544 	return cmpl;
1545 }
1546 
1547 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
1548 {
1549 	kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
1550 }
1551