xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_fw.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11 
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14 
__fbnic_mbx_wr_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u64 desc)15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 				int desc_idx, u64 desc)
17 {
18 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19 
20 	fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
21 	fw_wrfl(fbd);
22 	fw_wr32(fbd, desc_offset, lower_32_bits(desc));
23 }
24 
__fbnic_mbx_rd_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)25 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
26 {
27 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
28 	u64 desc;
29 
30 	desc = fw_rd32(fbd, desc_offset);
31 	desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
32 
33 	return desc;
34 }
35 
fbnic_mbx_init_desc_ring(struct fbnic_dev * fbd,int mbx_idx)36 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
37 {
38 	int desc_idx;
39 
40 	/* Initialize first descriptor to all 0s. Doing this gives us a
41 	 * solid stop for the firmware to hit when it is done looping
42 	 * through the ring.
43 	 */
44 	__fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
45 
46 	fw_wrfl(fbd);
47 
48 	/* We then fill the rest of the ring starting at the end and moving
49 	 * back toward descriptor 0 with skip descriptors that have no
50 	 * length nor address, and tell the firmware that they can skip
51 	 * them and just move past them to the one we initialized to 0.
52 	 */
53 	for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
54 		__fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
55 				    FBNIC_IPC_MBX_DESC_FW_CMPL |
56 				    FBNIC_IPC_MBX_DESC_HOST_CMPL);
57 		fw_wrfl(fbd);
58 	}
59 }
60 
fbnic_mbx_init(struct fbnic_dev * fbd)61 void fbnic_mbx_init(struct fbnic_dev *fbd)
62 {
63 	int i;
64 
65 	/* Initialize lock to protect Tx ring */
66 	spin_lock_init(&fbd->fw_tx_lock);
67 
68 	/* Reinitialize mailbox memory */
69 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
70 		memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
71 
72 	/* Do not auto-clear the FW mailbox interrupt, let SW clear it */
73 	wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
74 
75 	/* Clear any stale causes in vector 0 as that is used for doorbell */
76 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
77 
78 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
79 		fbnic_mbx_init_desc_ring(fbd, i);
80 }
81 
fbnic_mbx_map_msg(struct fbnic_dev * fbd,int mbx_idx,struct fbnic_tlv_msg * msg,u16 length,u8 eom)82 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
83 			     struct fbnic_tlv_msg *msg, u16 length, u8 eom)
84 {
85 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
86 	u8 tail = mbx->tail;
87 	dma_addr_t addr;
88 	int direction;
89 
90 	if (!mbx->ready || !fbnic_fw_present(fbd))
91 		return -ENODEV;
92 
93 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
94 							DMA_TO_DEVICE;
95 
96 	if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
97 		return -EBUSY;
98 
99 	addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
100 	if (dma_mapping_error(fbd->dev, addr)) {
101 		free_page((unsigned long)msg);
102 
103 		return -ENOSPC;
104 	}
105 
106 	mbx->buf_info[tail].msg = msg;
107 	mbx->buf_info[tail].addr = addr;
108 
109 	mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
110 
111 	fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
112 
113 	__fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
114 			    FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
115 			    (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
116 			    (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
117 			    FBNIC_IPC_MBX_DESC_HOST_CMPL);
118 
119 	return 0;
120 }
121 
fbnic_mbx_unmap_and_free_msg(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)122 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
123 					 int desc_idx)
124 {
125 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
126 	int direction;
127 
128 	if (!mbx->buf_info[desc_idx].msg)
129 		return;
130 
131 	direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
132 							DMA_TO_DEVICE;
133 	dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
134 			 PAGE_SIZE, direction);
135 
136 	free_page((unsigned long)mbx->buf_info[desc_idx].msg);
137 	mbx->buf_info[desc_idx].msg = NULL;
138 }
139 
fbnic_mbx_clean_desc_ring(struct fbnic_dev * fbd,int mbx_idx)140 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
141 {
142 	int i;
143 
144 	fbnic_mbx_init_desc_ring(fbd, mbx_idx);
145 
146 	for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
147 		fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
148 }
149 
fbnic_mbx_clean(struct fbnic_dev * fbd)150 void fbnic_mbx_clean(struct fbnic_dev *fbd)
151 {
152 	int i;
153 
154 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
155 		fbnic_mbx_clean_desc_ring(fbd, i);
156 }
157 
158 #define FBNIC_MBX_MAX_PAGE_SIZE	FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
159 #define FBNIC_RX_PAGE_SIZE	min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
160 
fbnic_mbx_alloc_rx_msgs(struct fbnic_dev * fbd)161 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
162 {
163 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
164 	u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
165 	int err = 0;
166 
167 	/* Do nothing if mailbox is not ready, or we already have pages on
168 	 * the ring that can be used by the firmware
169 	 */
170 	if (!rx_mbx->ready)
171 		return -ENODEV;
172 
173 	/* Fill all but 1 unused descriptors in the Rx queue. */
174 	count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
175 	while (!err && count--) {
176 		struct fbnic_tlv_msg *msg;
177 
178 		msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
179 							      __GFP_NOWARN);
180 		if (!msg) {
181 			err = -ENOMEM;
182 			break;
183 		}
184 
185 		err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
186 					FBNIC_RX_PAGE_SIZE, 0);
187 		if (err)
188 			free_page((unsigned long)msg);
189 	}
190 
191 	return err;
192 }
193 
fbnic_mbx_map_tlv_msg(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg)194 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
195 				 struct fbnic_tlv_msg *msg)
196 {
197 	unsigned long flags;
198 	int err;
199 
200 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
201 
202 	err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
203 				le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
204 
205 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
206 
207 	return err;
208 }
209 
fbnic_mbx_process_tx_msgs(struct fbnic_dev * fbd)210 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
211 {
212 	struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
213 	u8 head = tx_mbx->head;
214 	u64 desc;
215 
216 	while (head != tx_mbx->tail) {
217 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
218 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
219 			break;
220 
221 		fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
222 
223 		head++;
224 		head %= FBNIC_IPC_MBX_DESC_LEN;
225 	}
226 
227 	/* Record head for next interrupt */
228 	tx_mbx->head = head;
229 }
230 
fbnic_mbx_map_req_w_cmpl(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg,struct fbnic_fw_completion * cmpl_data)231 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
232 				    struct fbnic_tlv_msg *msg,
233 				    struct fbnic_fw_completion *cmpl_data)
234 {
235 	unsigned long flags;
236 	int err;
237 
238 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
239 
240 	/* If we are already waiting on a completion then abort */
241 	if (cmpl_data && fbd->cmpl_data) {
242 		err = -EBUSY;
243 		goto unlock_mbx;
244 	}
245 
246 	/* Record completion location and submit request */
247 	if (cmpl_data)
248 		fbd->cmpl_data = cmpl_data;
249 
250 	err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
251 				le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
252 
253 	/* If msg failed then clear completion data for next caller */
254 	if (err && cmpl_data)
255 		fbd->cmpl_data = NULL;
256 
257 unlock_mbx:
258 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
259 
260 	return err;
261 }
262 
fbnic_fw_release_cmpl_data(struct kref * kref)263 static void fbnic_fw_release_cmpl_data(struct kref *kref)
264 {
265 	struct fbnic_fw_completion *cmpl_data;
266 
267 	cmpl_data = container_of(kref, struct fbnic_fw_completion,
268 				 ref_count);
269 	kfree(cmpl_data);
270 }
271 
272 static struct fbnic_fw_completion *
fbnic_fw_get_cmpl_by_type(struct fbnic_dev * fbd,u32 msg_type)273 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
274 {
275 	struct fbnic_fw_completion *cmpl_data = NULL;
276 	unsigned long flags;
277 
278 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
279 	if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) {
280 		cmpl_data = fbd->cmpl_data;
281 		kref_get(&fbd->cmpl_data->ref_count);
282 	}
283 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
284 
285 	return cmpl_data;
286 }
287 
288 /**
289  * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
290  * @fbd: FBNIC device structure
291  * @msg_type: ENUM value indicating message type to send
292  *
293  * Return:
294  *   One the following values:
295  *     -EOPNOTSUPP: Is not ASIC so mailbox is not supported
296  *     -ENODEV: Device I/O error
297  *     -ENOMEM: Failed to allocate message
298  *     -EBUSY: No space in mailbox
299  *     -ENOSPC: DMA mapping failed
300  *
301  * This function sends a single TLV header indicating the host wants to take
302  * some action. However there are no other side effects which means that any
303  * response will need to be caught via a completion if this action is
304  * expected to kick off a resultant action.
305  */
fbnic_fw_xmit_simple_msg(struct fbnic_dev * fbd,u32 msg_type)306 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
307 {
308 	struct fbnic_tlv_msg *msg;
309 	int err = 0;
310 
311 	if (!fbnic_fw_present(fbd))
312 		return -ENODEV;
313 
314 	msg = fbnic_tlv_msg_alloc(msg_type);
315 	if (!msg)
316 		return -ENOMEM;
317 
318 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
319 	if (err)
320 		free_page((unsigned long)msg);
321 
322 	return err;
323 }
324 
325 /**
326  * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
327  * @fbd: FBNIC device structure
328  *
329  * Return: NULL on failure to allocate, error pointer on error, or pointer
330  * to new TLV test message.
331  *
332  * Sends a single TLV header indicating the host wants the firmware to
333  * confirm the capabilities and version.
334  **/
fbnic_fw_xmit_cap_msg(struct fbnic_dev * fbd)335 static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
336 {
337 	int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
338 
339 	/* Return 0 if we are not calling this on ASIC */
340 	return (err == -EOPNOTSUPP) ? 0 : err;
341 }
342 
fbnic_mbx_postinit_desc_ring(struct fbnic_dev * fbd,int mbx_idx)343 static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
344 {
345 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
346 
347 	/* This is a one time init, so just exit if it is completed */
348 	if (mbx->ready)
349 		return;
350 
351 	mbx->ready = true;
352 
353 	switch (mbx_idx) {
354 	case FBNIC_IPC_MBX_RX_IDX:
355 		/* Make sure we have a page for the FW to write to */
356 		fbnic_mbx_alloc_rx_msgs(fbd);
357 		break;
358 	case FBNIC_IPC_MBX_TX_IDX:
359 		/* Force version to 1 if we successfully requested an update
360 		 * from the firmware. This should be overwritten once we get
361 		 * the actual version from the firmware in the capabilities
362 		 * request message.
363 		 */
364 		if (!fbnic_fw_xmit_cap_msg(fbd) &&
365 		    !fbd->fw_cap.running.mgmt.version)
366 			fbd->fw_cap.running.mgmt.version = 1;
367 		break;
368 	}
369 }
370 
fbnic_mbx_postinit(struct fbnic_dev * fbd)371 static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
372 {
373 	int i;
374 
375 	/* We only need to do this on the first interrupt following init.
376 	 * this primes the mailbox so that we will have cleared all the
377 	 * skip descriptors.
378 	 */
379 	if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
380 		return;
381 
382 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
383 
384 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
385 		fbnic_mbx_postinit_desc_ring(fbd, i);
386 }
387 
388 /**
389  * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
390  * to FW mailbox
391  *
392  * @fbd: FBNIC device structure
393  * @take_ownership: take/release the ownership
394  *
395  * Return: zero on success, negative value on failure
396  *
397  * Notifies the firmware that the driver either takes ownership of the NIC
398  * (when @take_ownership is true) or releases it.
399  */
fbnic_fw_xmit_ownership_msg(struct fbnic_dev * fbd,bool take_ownership)400 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
401 {
402 	unsigned long req_time = jiffies;
403 	struct fbnic_tlv_msg *msg;
404 	int err = 0;
405 
406 	if (!fbnic_fw_present(fbd))
407 		return -ENODEV;
408 
409 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
410 	if (!msg)
411 		return -ENOMEM;
412 
413 	if (take_ownership) {
414 		err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
415 		if (err)
416 			goto free_message;
417 	}
418 
419 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
420 	if (err)
421 		goto free_message;
422 
423 	/* Initialize heartbeat, set last response to 1 second in the past
424 	 * so that we will trigger a timeout if the firmware doesn't respond
425 	 */
426 	fbd->last_heartbeat_response = req_time - HZ;
427 
428 	fbd->last_heartbeat_request = req_time;
429 
430 	/* Set heartbeat detection based on if we are taking ownership */
431 	fbd->fw_heartbeat_enabled = take_ownership;
432 
433 	return err;
434 
435 free_message:
436 	free_page((unsigned long)msg);
437 	return err;
438 }
439 
440 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
441 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
442 	FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
443 	FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
444 	FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
445 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
446 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
447 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
448 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
449 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
450 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
451 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
452 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
453 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
454 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
455 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
456 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
457 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
458 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
459 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
460 	FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
461 	FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
462 			      FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
463 	FBNIC_TLV_ATTR_LAST
464 };
465 
fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],struct fbnic_tlv_msg * attr,int len)466 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
467 				    struct fbnic_tlv_msg *attr, int len)
468 {
469 	int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
470 	struct fbnic_tlv_msg *mac_results[8];
471 	int err, i = 0;
472 
473 	/* Make sure we have enough room to process all the MAC addresses */
474 	if (len > 8)
475 		return -ENOSPC;
476 
477 	/* Parse the array */
478 	err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
479 					 fbnic_fw_cap_resp_index,
480 					 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
481 	if (err)
482 		return err;
483 
484 	/* Copy results into MAC addr array */
485 	for (i = 0; i < len && mac_results[i]; i++)
486 		fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
487 
488 	/* Zero remaining unused addresses */
489 	while (i < len)
490 		eth_zero_addr(bmc_mac_addr[i++]);
491 
492 	return 0;
493 }
494 
fbnic_fw_parse_cap_resp(void * opaque,struct fbnic_tlv_msg ** results)495 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
496 {
497 	u32 all_multi = 0, version = 0;
498 	struct fbnic_dev *fbd = opaque;
499 	bool bmc_present;
500 	int err;
501 
502 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
503 	fbd->fw_cap.running.mgmt.version = version;
504 	if (!fbd->fw_cap.running.mgmt.version)
505 		return -EINVAL;
506 
507 	if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
508 		char running_ver[FBNIC_FW_VER_MAX_SIZE];
509 
510 		fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
511 				    running_ver);
512 		dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
513 			running_ver,
514 			MIN_FW_MAJOR_VERSION,
515 			MIN_FW_MINOR_VERSION,
516 			MIN_FW_BUILD_VERSION);
517 		/* Disable TX mailbox to prevent card use until firmware is
518 		 * updated.
519 		 */
520 		fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
521 		return -EINVAL;
522 	}
523 
524 	if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
525 			fbd->fw_cap.running.mgmt.commit,
526 			FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
527 		dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
528 
529 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
530 	fbd->fw_cap.stored.mgmt.version = version;
531 	fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
532 		    fbd->fw_cap.stored.mgmt.commit,
533 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
534 
535 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
536 	fbd->fw_cap.running.bootloader.version = version;
537 	fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
538 		    fbd->fw_cap.running.bootloader.commit,
539 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
540 
541 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
542 	fbd->fw_cap.stored.bootloader.version = version;
543 	fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
544 		    fbd->fw_cap.stored.bootloader.commit,
545 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
546 
547 	version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
548 	fbd->fw_cap.stored.undi.version = version;
549 	fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
550 		    fbd->fw_cap.stored.undi.commit,
551 		    FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
552 
553 	fbd->fw_cap.active_slot =
554 		fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
555 	fbd->fw_cap.link_speed =
556 		fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
557 	fbd->fw_cap.link_fec =
558 		fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
559 
560 	bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
561 	if (bmc_present) {
562 		struct fbnic_tlv_msg *attr;
563 
564 		attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
565 		if (!attr)
566 			return -EINVAL;
567 
568 		err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
569 					       attr, 4);
570 		if (err)
571 			return err;
572 
573 		all_multi =
574 			fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
575 	} else {
576 		memset(fbd->fw_cap.bmc_mac_addr, 0,
577 		       sizeof(fbd->fw_cap.bmc_mac_addr));
578 	}
579 
580 	fbd->fw_cap.bmc_present = bmc_present;
581 
582 	if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
583 		fbd->fw_cap.all_multi = all_multi;
584 
585 	return 0;
586 }
587 
588 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
589 	FBNIC_TLV_ATTR_LAST
590 };
591 
fbnic_fw_parse_ownership_resp(void * opaque,struct fbnic_tlv_msg ** results)592 static int fbnic_fw_parse_ownership_resp(void *opaque,
593 					 struct fbnic_tlv_msg **results)
594 {
595 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
596 
597 	/* Count the ownership response as a heartbeat reply */
598 	fbd->last_heartbeat_response = jiffies;
599 
600 	return 0;
601 }
602 
603 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
604 	FBNIC_TLV_ATTR_LAST
605 };
606 
fbnic_fw_parse_heartbeat_resp(void * opaque,struct fbnic_tlv_msg ** results)607 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
608 					 struct fbnic_tlv_msg **results)
609 {
610 	struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
611 
612 	fbd->last_heartbeat_response = jiffies;
613 
614 	return 0;
615 }
616 
fbnic_fw_xmit_heartbeat_message(struct fbnic_dev * fbd)617 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
618 {
619 	unsigned long req_time = jiffies;
620 	struct fbnic_tlv_msg *msg;
621 	int err = 0;
622 
623 	if (!fbnic_fw_present(fbd))
624 		return -ENODEV;
625 
626 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
627 	if (!msg)
628 		return -ENOMEM;
629 
630 	err = fbnic_mbx_map_tlv_msg(fbd, msg);
631 	if (err)
632 		goto free_message;
633 
634 	fbd->last_heartbeat_request = req_time;
635 
636 	return err;
637 
638 free_message:
639 	free_page((unsigned long)msg);
640 	return err;
641 }
642 
fbnic_fw_heartbeat_current(struct fbnic_dev * fbd)643 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
644 {
645 	unsigned long last_response = fbd->last_heartbeat_response;
646 	unsigned long last_request = fbd->last_heartbeat_request;
647 
648 	return !time_before(last_response, last_request);
649 }
650 
fbnic_fw_init_heartbeat(struct fbnic_dev * fbd,bool poll)651 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
652 {
653 	int err = -ETIMEDOUT;
654 	int attempts = 50;
655 
656 	if (!fbnic_fw_present(fbd))
657 		return -ENODEV;
658 
659 	while (attempts--) {
660 		msleep(200);
661 		if (poll)
662 			fbnic_mbx_poll(fbd);
663 
664 		if (!fbnic_fw_heartbeat_current(fbd))
665 			continue;
666 
667 		/* Place new message on mailbox to elicit a response */
668 		err = fbnic_fw_xmit_heartbeat_message(fbd);
669 		if (err)
670 			dev_warn(fbd->dev,
671 				 "Failed to send heartbeat message: %d\n",
672 				 err);
673 		break;
674 	}
675 
676 	return err;
677 }
678 
fbnic_fw_check_heartbeat(struct fbnic_dev * fbd)679 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
680 {
681 	unsigned long last_request = fbd->last_heartbeat_request;
682 	int err;
683 
684 	/* Do not check heartbeat or send another request until current
685 	 * period has expired. Otherwise we might start spamming requests.
686 	 */
687 	if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
688 		return;
689 
690 	/* We already reported no mailbox. Wait for it to come back */
691 	if (!fbd->fw_heartbeat_enabled)
692 		return;
693 
694 	/* Was the last heartbeat response long time ago? */
695 	if (!fbnic_fw_heartbeat_current(fbd)) {
696 		dev_warn(fbd->dev,
697 			 "Firmware did not respond to heartbeat message\n");
698 		fbd->fw_heartbeat_enabled = false;
699 	}
700 
701 	/* Place new message on mailbox to elicit a response */
702 	err = fbnic_fw_xmit_heartbeat_message(fbd);
703 	if (err)
704 		dev_warn(fbd->dev, "Failed to send heartbeat message\n");
705 }
706 
707 /**
708  * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
709  * @fbd: FBNIC device structure
710  * @cmpl_data: Completion data structure to store sensor response
711  *
712  * Asks the firmware to provide an update with the latest sensor data.
713  * The response will contain temperature and voltage readings.
714  *
715  * Return: 0 on success, negative error value on failure
716  */
fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)717 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
718 				 struct fbnic_fw_completion *cmpl_data)
719 {
720 	struct fbnic_tlv_msg *msg;
721 	int err;
722 
723 	if (!fbnic_fw_present(fbd))
724 		return -ENODEV;
725 
726 	msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
727 	if (!msg)
728 		return -ENOMEM;
729 
730 	err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
731 	if (err)
732 		goto free_message;
733 
734 	return 0;
735 
736 free_message:
737 	free_page((unsigned long)msg);
738 	return err;
739 }
740 
741 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
742 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
743 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
744 	FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
745 	FBNIC_TLV_ATTR_LAST
746 };
747 
fbnic_fw_parse_tsene_read_resp(void * opaque,struct fbnic_tlv_msg ** results)748 static int fbnic_fw_parse_tsene_read_resp(void *opaque,
749 					  struct fbnic_tlv_msg **results)
750 {
751 	struct fbnic_fw_completion *cmpl_data;
752 	struct fbnic_dev *fbd = opaque;
753 	s32 err_resp;
754 	int err = 0;
755 
756 	/* Verify we have a completion pointer to provide with data */
757 	cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
758 					      FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
759 	if (!cmpl_data)
760 		return -ENOSPC;
761 
762 	err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
763 	if (err_resp)
764 		goto msg_err;
765 
766 	if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
767 		err = -EINVAL;
768 		goto msg_err;
769 	}
770 
771 	cmpl_data->u.tsene.millidegrees =
772 		fta_get_sint(results, FBNIC_FW_TSENE_THERM);
773 	cmpl_data->u.tsene.millivolts =
774 		fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
775 
776 msg_err:
777 	cmpl_data->result = err_resp ? : err;
778 	complete(&cmpl_data->done);
779 	fbnic_fw_put_cmpl(cmpl_data);
780 
781 	return err;
782 }
783 
784 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
785 	FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
786 			 fbnic_fw_parse_cap_resp),
787 	FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
788 			 fbnic_fw_parse_ownership_resp),
789 	FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
790 			 fbnic_fw_parse_heartbeat_resp),
791 	FBNIC_TLV_PARSER(TSENE_READ_RESP,
792 			 fbnic_tsene_read_resp_index,
793 			 fbnic_fw_parse_tsene_read_resp),
794 	FBNIC_TLV_MSG_ERROR
795 };
796 
fbnic_mbx_process_rx_msgs(struct fbnic_dev * fbd)797 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
798 {
799 	struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
800 	u8 head = rx_mbx->head;
801 	u64 desc, length;
802 
803 	while (head != rx_mbx->tail) {
804 		struct fbnic_tlv_msg *msg;
805 		int err;
806 
807 		desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
808 		if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
809 			break;
810 
811 		dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
812 				 PAGE_SIZE, DMA_FROM_DEVICE);
813 
814 		msg = rx_mbx->buf_info[head].msg;
815 
816 		length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
817 
818 		/* Ignore NULL mailbox descriptors */
819 		if (!length)
820 			goto next_page;
821 
822 		/* Report descriptors with length greater than page size */
823 		if (length > PAGE_SIZE) {
824 			dev_warn(fbd->dev,
825 				 "Invalid mailbox descriptor length: %lld\n",
826 				 length);
827 			goto next_page;
828 		}
829 
830 		if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
831 			dev_warn(fbd->dev, "Mailbox message length mismatch\n");
832 
833 		/* If parsing fails dump contents of message to dmesg */
834 		err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
835 		if (err) {
836 			dev_warn(fbd->dev, "Unable to process message: %d\n",
837 				 err);
838 			print_hex_dump(KERN_WARNING, "fbnic:",
839 				       DUMP_PREFIX_OFFSET, 16, 2,
840 				       msg, length, true);
841 		}
842 
843 		dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
844 next_page:
845 
846 		free_page((unsigned long)rx_mbx->buf_info[head].msg);
847 		rx_mbx->buf_info[head].msg = NULL;
848 
849 		head++;
850 		head %= FBNIC_IPC_MBX_DESC_LEN;
851 	}
852 
853 	/* Record head for next interrupt */
854 	rx_mbx->head = head;
855 
856 	/* Make sure we have at least one page for the FW to write to */
857 	fbnic_mbx_alloc_rx_msgs(fbd);
858 }
859 
fbnic_mbx_poll(struct fbnic_dev * fbd)860 void fbnic_mbx_poll(struct fbnic_dev *fbd)
861 {
862 	fbnic_mbx_postinit(fbd);
863 
864 	fbnic_mbx_process_tx_msgs(fbd);
865 	fbnic_mbx_process_rx_msgs(fbd);
866 }
867 
fbnic_mbx_poll_tx_ready(struct fbnic_dev * fbd)868 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
869 {
870 	struct fbnic_fw_mbx *tx_mbx;
871 	int attempts = 50;
872 
873 	/* Immediate fail if BAR4 isn't there */
874 	if (!fbnic_fw_present(fbd))
875 		return -ENODEV;
876 
877 	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
878 	while (!tx_mbx->ready && --attempts) {
879 		/* Force the firmware to trigger an interrupt response to
880 		 * avoid the mailbox getting stuck closed if the interrupt
881 		 * is reset.
882 		 */
883 		fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
884 
885 		msleep(200);
886 
887 		fbnic_mbx_poll(fbd);
888 	}
889 
890 	return attempts ? 0 : -ETIMEDOUT;
891 }
892 
fbnic_mbx_flush_tx(struct fbnic_dev * fbd)893 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
894 {
895 	struct fbnic_fw_mbx *tx_mbx;
896 	int attempts = 50;
897 	u8 count = 0;
898 
899 	/* Nothing to do if there is no mailbox */
900 	if (!fbnic_fw_present(fbd))
901 		return;
902 
903 	/* Record current Rx stats */
904 	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
905 
906 	/* Nothing to do if mailbox never got to ready */
907 	if (!tx_mbx->ready)
908 		return;
909 
910 	/* Give firmware time to process packet,
911 	 * we will wait up to 10 seconds which is 50 waits of 200ms.
912 	 */
913 	do {
914 		u8 head = tx_mbx->head;
915 
916 		if (head == tx_mbx->tail)
917 			break;
918 
919 		msleep(200);
920 		fbnic_mbx_process_tx_msgs(fbd);
921 
922 		count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
923 	} while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
924 }
925 
fbnic_get_fw_ver_commit_str(struct fbnic_dev * fbd,char * fw_version,const size_t str_sz)926 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
927 				 const size_t str_sz)
928 {
929 	struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
930 	const char *delim = "";
931 
932 	if (mgmt->commit[0])
933 		delim = "_";
934 
935 	fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
936 				 fw_version, str_sz);
937 }
938 
fbnic_fw_init_cmpl(struct fbnic_fw_completion * fw_cmpl,u32 msg_type)939 void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl,
940 			u32 msg_type)
941 {
942 	fw_cmpl->msg_type = msg_type;
943 	init_completion(&fw_cmpl->done);
944 	kref_init(&fw_cmpl->ref_count);
945 }
946 
fbnic_fw_clear_compl(struct fbnic_dev * fbd)947 void fbnic_fw_clear_compl(struct fbnic_dev *fbd)
948 {
949 	unsigned long flags;
950 
951 	spin_lock_irqsave(&fbd->fw_tx_lock, flags);
952 	fbd->cmpl_data = NULL;
953 	spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
954 }
955 
fbnic_fw_put_cmpl(struct fbnic_fw_completion * fw_cmpl)956 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
957 {
958 	kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
959 }
960