1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14
__fbnic_mbx_wr_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u64 desc)15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 int desc_idx, u64 desc)
17 {
18 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19
20 /* Write the upper 32b and then the lower 32b. Doing this the
21 * FW can then read lower, upper, lower to verify that the state
22 * of the descriptor wasn't changed mid-transaction.
23 */
24 fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
25 fw_wrfl(fbd);
26 fw_wr32(fbd, desc_offset, lower_32_bits(desc));
27 }
28
__fbnic_mbx_invalidate_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u32 desc)29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
30 int desc_idx, u32 desc)
31 {
32 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
33
34 /* For initialization we write the lower 32b of the descriptor first.
35 * This way we can set the state to mark it invalid before we clear the
36 * upper 32b.
37 */
38 fw_wr32(fbd, desc_offset, desc);
39 fw_wrfl(fbd);
40 fw_wr32(fbd, desc_offset + 1, 0);
41 }
42
__fbnic_mbx_rd_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
44 {
45 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
46 u64 desc;
47
48 desc = fw_rd32(fbd, desc_offset);
49 desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
50
51 return desc;
52 }
53
fbnic_mbx_reset_desc_ring(struct fbnic_dev * fbd,int mbx_idx)54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
55 {
56 int desc_idx;
57
58 /* Disable DMA transactions from the device,
59 * and flush any transactions triggered during cleaning
60 */
61 switch (mbx_idx) {
62 case FBNIC_IPC_MBX_RX_IDX:
63 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
64 FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
65 break;
66 case FBNIC_IPC_MBX_TX_IDX:
67 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
68 FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
69 break;
70 }
71
72 wrfl(fbd);
73
74 /* Initialize first descriptor to all 0s. Doing this gives us a
75 * solid stop for the firmware to hit when it is done looping
76 * through the ring.
77 */
78 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
79
80 /* We then fill the rest of the ring starting at the end and moving
81 * back toward descriptor 0 with skip descriptors that have no
82 * length nor address, and tell the firmware that they can skip
83 * them and just move past them to the one we initialized to 0.
84 */
85 for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
86 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
87 FBNIC_IPC_MBX_DESC_FW_CMPL |
88 FBNIC_IPC_MBX_DESC_HOST_CMPL);
89 }
90
fbnic_mbx_init(struct fbnic_dev * fbd)91 void fbnic_mbx_init(struct fbnic_dev *fbd)
92 {
93 int i;
94
95 /* Initialize lock to protect Tx ring */
96 spin_lock_init(&fbd->fw_tx_lock);
97
98 /* Reinitialize mailbox memory */
99 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
100 memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
101
102 /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
103 wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
104
105 /* Clear any stale causes in vector 0 as that is used for doorbell */
106 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
107
108 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
109 fbnic_mbx_reset_desc_ring(fbd, i);
110 }
111
fbnic_mbx_map_msg(struct fbnic_dev * fbd,int mbx_idx,struct fbnic_tlv_msg * msg,u16 length,u8 eom)112 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
113 struct fbnic_tlv_msg *msg, u16 length, u8 eom)
114 {
115 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
116 u8 tail = mbx->tail;
117 dma_addr_t addr;
118 int direction;
119
120 if (!mbx->ready || !fbnic_fw_present(fbd))
121 return -ENODEV;
122
123 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
124 DMA_TO_DEVICE;
125
126 if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
127 return -EBUSY;
128
129 addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
130 if (dma_mapping_error(fbd->dev, addr))
131 return -ENOSPC;
132
133 mbx->buf_info[tail].msg = msg;
134 mbx->buf_info[tail].addr = addr;
135
136 mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
137
138 fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
139
140 __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
141 FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
142 (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
143 (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
144 FBNIC_IPC_MBX_DESC_HOST_CMPL);
145
146 return 0;
147 }
148
fbnic_mbx_unmap_and_free_msg(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)149 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
150 int desc_idx)
151 {
152 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
153 int direction;
154
155 if (!mbx->buf_info[desc_idx].msg)
156 return;
157
158 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
159 DMA_TO_DEVICE;
160 dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
161 PAGE_SIZE, direction);
162
163 free_page((unsigned long)mbx->buf_info[desc_idx].msg);
164 mbx->buf_info[desc_idx].msg = NULL;
165 }
166
fbnic_mbx_clean_desc_ring(struct fbnic_dev * fbd,int mbx_idx)167 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
168 {
169 int i;
170
171 fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
172
173 for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
174 fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
175 }
176
fbnic_mbx_clean(struct fbnic_dev * fbd)177 void fbnic_mbx_clean(struct fbnic_dev *fbd)
178 {
179 int i;
180
181 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
182 fbnic_mbx_clean_desc_ring(fbd, i);
183 }
184
185 #define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
186 #define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
187
fbnic_mbx_alloc_rx_msgs(struct fbnic_dev * fbd)188 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
189 {
190 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
191 u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
192 int err = 0;
193
194 /* Do nothing if mailbox is not ready, or we already have pages on
195 * the ring that can be used by the firmware
196 */
197 if (!rx_mbx->ready)
198 return -ENODEV;
199
200 /* Fill all but 1 unused descriptors in the Rx queue. */
201 count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
202 while (!err && count--) {
203 struct fbnic_tlv_msg *msg;
204
205 msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
206 __GFP_NOWARN);
207 if (!msg) {
208 err = -ENOMEM;
209 break;
210 }
211
212 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
213 FBNIC_RX_PAGE_SIZE, 0);
214 if (err)
215 free_page((unsigned long)msg);
216 }
217
218 return err;
219 }
220
fbnic_mbx_map_tlv_msg(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg)221 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
222 struct fbnic_tlv_msg *msg)
223 {
224 unsigned long flags;
225 int err;
226
227 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
228
229 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
230 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
231
232 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
233
234 return err;
235 }
236
fbnic_mbx_set_cmpl_slot(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)237 static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
238 struct fbnic_fw_completion *cmpl_data)
239 {
240 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
241 int free = -EXFULL;
242 int i;
243
244 if (!tx_mbx->ready)
245 return -ENODEV;
246
247 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
248 if (!fbd->cmpl_data[i])
249 free = i;
250 else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
251 return -EEXIST;
252 }
253
254 if (free == -EXFULL)
255 return -EXFULL;
256
257 fbd->cmpl_data[free] = cmpl_data;
258
259 return 0;
260 }
261
fbnic_mbx_clear_cmpl_slot(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)262 static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
263 struct fbnic_fw_completion *cmpl_data)
264 {
265 int i;
266
267 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
268 if (fbd->cmpl_data[i] == cmpl_data) {
269 fbd->cmpl_data[i] = NULL;
270 break;
271 }
272 }
273 }
274
fbnic_mbx_process_tx_msgs(struct fbnic_dev * fbd)275 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
276 {
277 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
278 u8 head = tx_mbx->head;
279 u64 desc;
280
281 while (head != tx_mbx->tail) {
282 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
283 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
284 break;
285
286 fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
287
288 head++;
289 head %= FBNIC_IPC_MBX_DESC_LEN;
290 }
291
292 /* Record head for next interrupt */
293 tx_mbx->head = head;
294 }
295
fbnic_mbx_set_cmpl(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)296 int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
297 struct fbnic_fw_completion *cmpl_data)
298 {
299 unsigned long flags;
300 int err;
301
302 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
303 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
304 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
305
306 return err;
307 }
308
fbnic_mbx_map_req_w_cmpl(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg,struct fbnic_fw_completion * cmpl_data)309 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
310 struct fbnic_tlv_msg *msg,
311 struct fbnic_fw_completion *cmpl_data)
312 {
313 unsigned long flags;
314 int err;
315
316 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
317 if (cmpl_data) {
318 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
319 if (err)
320 goto unlock_mbx;
321 }
322
323 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
324 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
325
326 /* If we successfully reserved a completion and msg failed
327 * then clear completion data for next caller
328 */
329 if (err && cmpl_data)
330 fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
331
332 unlock_mbx:
333 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
334
335 return err;
336 }
337
fbnic_fw_release_cmpl_data(struct kref * kref)338 static void fbnic_fw_release_cmpl_data(struct kref *kref)
339 {
340 struct fbnic_fw_completion *cmpl_data;
341
342 cmpl_data = container_of(kref, struct fbnic_fw_completion,
343 ref_count);
344 kfree(cmpl_data);
345 }
346
347 static struct fbnic_fw_completion *
fbnic_fw_get_cmpl_by_type(struct fbnic_dev * fbd,u32 msg_type)348 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
349 {
350 struct fbnic_fw_completion *cmpl_data = NULL;
351 unsigned long flags;
352 int i;
353
354 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
355 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
356 if (fbd->cmpl_data[i] &&
357 fbd->cmpl_data[i]->msg_type == msg_type) {
358 cmpl_data = fbd->cmpl_data[i];
359 kref_get(&cmpl_data->ref_count);
360 break;
361 }
362 }
363
364 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
365
366 return cmpl_data;
367 }
368
369 /**
370 * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
371 * @fbd: FBNIC device structure
372 * @msg_type: ENUM value indicating message type to send
373 *
374 * Return:
375 * One the following values:
376 * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
377 * -ENODEV: Device I/O error
378 * -ENOMEM: Failed to allocate message
379 * -EBUSY: No space in mailbox
380 * -ENOSPC: DMA mapping failed
381 *
382 * This function sends a single TLV header indicating the host wants to take
383 * some action. However there are no other side effects which means that any
384 * response will need to be caught via a completion if this action is
385 * expected to kick off a resultant action.
386 */
fbnic_fw_xmit_simple_msg(struct fbnic_dev * fbd,u32 msg_type)387 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
388 {
389 struct fbnic_tlv_msg *msg;
390 int err = 0;
391
392 if (!fbnic_fw_present(fbd))
393 return -ENODEV;
394
395 msg = fbnic_tlv_msg_alloc(msg_type);
396 if (!msg)
397 return -ENOMEM;
398
399 err = fbnic_mbx_map_tlv_msg(fbd, msg);
400 if (err)
401 free_page((unsigned long)msg);
402
403 return err;
404 }
405
fbnic_mbx_init_desc_ring(struct fbnic_dev * fbd,int mbx_idx)406 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
407 {
408 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
409
410 mbx->ready = true;
411
412 switch (mbx_idx) {
413 case FBNIC_IPC_MBX_RX_IDX:
414 /* Enable DMA writes from the device */
415 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
416 FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
417
418 /* Make sure we have a page for the FW to write to */
419 fbnic_mbx_alloc_rx_msgs(fbd);
420 break;
421 case FBNIC_IPC_MBX_TX_IDX:
422 /* Enable DMA reads from the device */
423 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
424 FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
425 break;
426 }
427 }
428
fbnic_mbx_event(struct fbnic_dev * fbd)429 static bool fbnic_mbx_event(struct fbnic_dev *fbd)
430 {
431 /* We only need to do this on the first interrupt following reset.
432 * this primes the mailbox so that we will have cleared all the
433 * skip descriptors.
434 */
435 if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
436 return false;
437
438 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
439
440 return true;
441 }
442
443 /**
444 * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
445 * to FW mailbox
446 *
447 * @fbd: FBNIC device structure
448 * @take_ownership: take/release the ownership
449 *
450 * Return: zero on success, negative value on failure
451 *
452 * Notifies the firmware that the driver either takes ownership of the NIC
453 * (when @take_ownership is true) or releases it.
454 */
fbnic_fw_xmit_ownership_msg(struct fbnic_dev * fbd,bool take_ownership)455 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
456 {
457 unsigned long req_time = jiffies;
458 struct fbnic_tlv_msg *msg;
459 int err = 0;
460
461 if (!fbnic_fw_present(fbd))
462 return -ENODEV;
463
464 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
465 if (!msg)
466 return -ENOMEM;
467
468 if (take_ownership) {
469 err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
470 if (err)
471 goto free_message;
472 }
473
474 err = fbnic_mbx_map_tlv_msg(fbd, msg);
475 if (err)
476 goto free_message;
477
478 /* Initialize heartbeat, set last response to 1 second in the past
479 * so that we will trigger a timeout if the firmware doesn't respond
480 */
481 fbd->last_heartbeat_response = req_time - HZ;
482
483 fbd->last_heartbeat_request = req_time;
484
485 /* Set heartbeat detection based on if we are taking ownership */
486 fbd->fw_heartbeat_enabled = take_ownership;
487
488 return err;
489
490 free_message:
491 free_page((unsigned long)msg);
492 return err;
493 }
494
495 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
496 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
497 FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
498 FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
499 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
500 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
501 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
502 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
503 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
504 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
505 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
506 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
507 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
508 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
509 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
510 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
511 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
512 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
513 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
514 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
515 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
516 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
517 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
518 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
519 FBNIC_TLV_ATTR_LAST
520 };
521
fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],struct fbnic_tlv_msg * attr,int len)522 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
523 struct fbnic_tlv_msg *attr, int len)
524 {
525 int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
526 struct fbnic_tlv_msg *mac_results[8];
527 int err, i = 0;
528
529 /* Make sure we have enough room to process all the MAC addresses */
530 if (len > 8)
531 return -ENOSPC;
532
533 /* Parse the array */
534 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
535 fbnic_fw_cap_resp_index,
536 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
537 if (err)
538 return err;
539
540 /* Copy results into MAC addr array */
541 for (i = 0; i < len && mac_results[i]; i++)
542 fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
543
544 /* Zero remaining unused addresses */
545 while (i < len)
546 eth_zero_addr(bmc_mac_addr[i++]);
547
548 return 0;
549 }
550
fbnic_fw_parse_cap_resp(void * opaque,struct fbnic_tlv_msg ** results)551 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
552 {
553 u32 all_multi = 0, version = 0;
554 struct fbnic_dev *fbd = opaque;
555 bool bmc_present;
556 int err;
557
558 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
559 fbd->fw_cap.running.mgmt.version = version;
560 if (!fbd->fw_cap.running.mgmt.version)
561 return -EINVAL;
562
563 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
564 char running_ver[FBNIC_FW_VER_MAX_SIZE];
565
566 fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
567 running_ver);
568 dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
569 running_ver,
570 MIN_FW_MAJOR_VERSION,
571 MIN_FW_MINOR_VERSION,
572 MIN_FW_BUILD_VERSION);
573 /* Disable TX mailbox to prevent card use until firmware is
574 * updated.
575 */
576 fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
577 return -EINVAL;
578 }
579
580 if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
581 fbd->fw_cap.running.mgmt.commit,
582 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
583 dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
584
585 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
586 fbd->fw_cap.stored.mgmt.version = version;
587 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
588 fbd->fw_cap.stored.mgmt.commit,
589 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
590
591 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
592 fbd->fw_cap.running.bootloader.version = version;
593 fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
594 fbd->fw_cap.running.bootloader.commit,
595 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
596
597 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
598 fbd->fw_cap.stored.bootloader.version = version;
599 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
600 fbd->fw_cap.stored.bootloader.commit,
601 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
602
603 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
604 fbd->fw_cap.stored.undi.version = version;
605 fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
606 fbd->fw_cap.stored.undi.commit,
607 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
608
609 fbd->fw_cap.active_slot =
610 fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
611 fbd->fw_cap.link_speed =
612 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
613 fbd->fw_cap.link_fec =
614 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
615
616 bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
617 if (bmc_present) {
618 struct fbnic_tlv_msg *attr;
619
620 attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
621 if (!attr)
622 return -EINVAL;
623
624 err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
625 attr, 4);
626 if (err)
627 return err;
628
629 all_multi =
630 fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
631 } else {
632 memset(fbd->fw_cap.bmc_mac_addr, 0,
633 sizeof(fbd->fw_cap.bmc_mac_addr));
634 }
635
636 fbd->fw_cap.bmc_present = bmc_present;
637
638 if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
639 fbd->fw_cap.all_multi = all_multi;
640
641 fbd->fw_cap.anti_rollback_version =
642 fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
643
644 return 0;
645 }
646
647 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
648 FBNIC_TLV_ATTR_LAST
649 };
650
fbnic_fw_parse_ownership_resp(void * opaque,struct fbnic_tlv_msg ** results)651 static int fbnic_fw_parse_ownership_resp(void *opaque,
652 struct fbnic_tlv_msg **results)
653 {
654 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
655
656 /* Count the ownership response as a heartbeat reply */
657 fbd->last_heartbeat_response = jiffies;
658
659 return 0;
660 }
661
662 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
663 FBNIC_TLV_ATTR_LAST
664 };
665
fbnic_fw_parse_heartbeat_resp(void * opaque,struct fbnic_tlv_msg ** results)666 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
667 struct fbnic_tlv_msg **results)
668 {
669 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
670
671 fbd->last_heartbeat_response = jiffies;
672
673 return 0;
674 }
675
fbnic_fw_xmit_heartbeat_message(struct fbnic_dev * fbd)676 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
677 {
678 unsigned long req_time = jiffies;
679 struct fbnic_tlv_msg *msg;
680 int err = 0;
681
682 if (!fbnic_fw_present(fbd))
683 return -ENODEV;
684
685 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
686 if (!msg)
687 return -ENOMEM;
688
689 err = fbnic_mbx_map_tlv_msg(fbd, msg);
690 if (err)
691 goto free_message;
692
693 fbd->last_heartbeat_request = req_time;
694
695 return err;
696
697 free_message:
698 free_page((unsigned long)msg);
699 return err;
700 }
701
fbnic_fw_heartbeat_current(struct fbnic_dev * fbd)702 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
703 {
704 unsigned long last_response = fbd->last_heartbeat_response;
705 unsigned long last_request = fbd->last_heartbeat_request;
706
707 return !time_before(last_response, last_request);
708 }
709
fbnic_fw_init_heartbeat(struct fbnic_dev * fbd,bool poll)710 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
711 {
712 int err = -ETIMEDOUT;
713 int attempts = 50;
714
715 if (!fbnic_fw_present(fbd))
716 return -ENODEV;
717
718 while (attempts--) {
719 msleep(200);
720 if (poll)
721 fbnic_mbx_poll(fbd);
722
723 if (!fbnic_fw_heartbeat_current(fbd))
724 continue;
725
726 /* Place new message on mailbox to elicit a response */
727 err = fbnic_fw_xmit_heartbeat_message(fbd);
728 if (err)
729 dev_warn(fbd->dev,
730 "Failed to send heartbeat message: %d\n",
731 err);
732 break;
733 }
734
735 return err;
736 }
737
fbnic_fw_check_heartbeat(struct fbnic_dev * fbd)738 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
739 {
740 unsigned long last_request = fbd->last_heartbeat_request;
741 int err;
742
743 /* Do not check heartbeat or send another request until current
744 * period has expired. Otherwise we might start spamming requests.
745 */
746 if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
747 return;
748
749 /* We already reported no mailbox. Wait for it to come back */
750 if (!fbd->fw_heartbeat_enabled)
751 return;
752
753 /* Was the last heartbeat response long time ago? */
754 if (!fbnic_fw_heartbeat_current(fbd)) {
755 dev_warn(fbd->dev,
756 "Firmware did not respond to heartbeat message\n");
757 fbd->fw_heartbeat_enabled = false;
758 }
759
760 /* Place new message on mailbox to elicit a response */
761 err = fbnic_fw_xmit_heartbeat_message(fbd);
762 if (err)
763 dev_warn(fbd->dev, "Failed to send heartbeat message\n");
764 }
765
fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data,unsigned int id,unsigned int len)766 int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
767 struct fbnic_fw_completion *cmpl_data,
768 unsigned int id, unsigned int len)
769 {
770 struct fbnic_tlv_msg *msg;
771 int err;
772
773 if (!fbnic_fw_present(fbd))
774 return -ENODEV;
775
776 if (!len)
777 return -EINVAL;
778
779 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
780 if (!msg)
781 return -ENOMEM;
782
783 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
784 if (err)
785 goto free_message;
786
787 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
788 len);
789 if (err)
790 goto free_message;
791
792 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
793 if (err)
794 goto free_message;
795
796 return 0;
797
798 free_message:
799 free_page((unsigned long)msg);
800 return err;
801 }
802
803 static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
804 FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
805 FBNIC_TLV_ATTR_LAST
806 };
807
fbnic_fw_parse_fw_start_upgrade_resp(void * opaque,struct fbnic_tlv_msg ** results)808 static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
809 struct fbnic_tlv_msg **results)
810 {
811 struct fbnic_fw_completion *cmpl_data;
812 struct fbnic_dev *fbd = opaque;
813 u32 msg_type;
814 s32 err;
815
816 /* Verify we have a completion pointer */
817 msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
818 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
819 if (!cmpl_data)
820 return -ENOSPC;
821
822 /* Check for errors */
823 err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
824
825 cmpl_data->result = err;
826 complete(&cmpl_data->done);
827 fbnic_fw_put_cmpl(cmpl_data);
828
829 return 0;
830 }
831
fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev * fbd,const u8 * data,u32 offset,u16 length,int cancel_error)832 int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
833 const u8 *data, u32 offset, u16 length,
834 int cancel_error)
835 {
836 struct fbnic_tlv_msg *msg;
837 int err;
838
839 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
840 if (!msg)
841 return -ENOMEM;
842
843 /* Report error to FW to cancel upgrade */
844 if (cancel_error) {
845 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
846 cancel_error);
847 if (err)
848 goto free_message;
849 }
850
851 if (data) {
852 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
853 offset);
854 if (err)
855 goto free_message;
856
857 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
858 length);
859 if (err)
860 goto free_message;
861
862 err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
863 data + offset, length);
864 if (err)
865 goto free_message;
866 }
867
868 err = fbnic_mbx_map_tlv_msg(fbd, msg);
869 if (err)
870 goto free_message;
871
872 return 0;
873
874 free_message:
875 free_page((unsigned long)msg);
876 return err;
877 }
878
879 static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
880 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
881 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
882 FBNIC_TLV_ATTR_LAST
883 };
884
fbnic_fw_parse_fw_write_chunk_req(void * opaque,struct fbnic_tlv_msg ** results)885 static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
886 struct fbnic_tlv_msg **results)
887 {
888 struct fbnic_fw_completion *cmpl_data;
889 struct fbnic_dev *fbd = opaque;
890 u32 msg_type;
891 u32 offset;
892 u32 length;
893
894 /* Verify we have a completion pointer */
895 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
896 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
897 if (!cmpl_data)
898 return -ENOSPC;
899
900 /* Pull length/offset pair and mark it as complete */
901 offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
902 length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
903 cmpl_data->u.fw_update.offset = offset;
904 cmpl_data->u.fw_update.length = length;
905
906 complete(&cmpl_data->done);
907 fbnic_fw_put_cmpl(cmpl_data);
908
909 return 0;
910 }
911
912 static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
913 FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
914 FBNIC_TLV_ATTR_LAST
915 };
916
fbnic_fw_parse_fw_finish_upgrade_req(void * opaque,struct fbnic_tlv_msg ** results)917 static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
918 struct fbnic_tlv_msg **results)
919 {
920 struct fbnic_fw_completion *cmpl_data;
921 struct fbnic_dev *fbd = opaque;
922 u32 msg_type;
923 s32 err;
924
925 /* Verify we have a completion pointer */
926 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
927 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
928 if (!cmpl_data)
929 return -ENOSPC;
930
931 /* Check for errors */
932 err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
933
934 /* Close out update by incrementing offset by length which should
935 * match the total size of the component. Set length to 0 since no
936 * new chunks will be requested.
937 */
938 cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
939 cmpl_data->u.fw_update.length = 0;
940
941 cmpl_data->result = err;
942 complete(&cmpl_data->done);
943 fbnic_fw_put_cmpl(cmpl_data);
944
945 return 0;
946 }
947
948 /**
949 * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
950 * @fbd: FBNIC device structure
951 * @cmpl_data: Completion data structure to store sensor response
952 *
953 * Asks the firmware to provide an update with the latest sensor data.
954 * The response will contain temperature and voltage readings.
955 *
956 * Return: 0 on success, negative error value on failure
957 */
fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)958 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
959 struct fbnic_fw_completion *cmpl_data)
960 {
961 struct fbnic_tlv_msg *msg;
962 int err;
963
964 if (!fbnic_fw_present(fbd))
965 return -ENODEV;
966
967 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
968 if (!msg)
969 return -ENOMEM;
970
971 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
972 if (err)
973 goto free_message;
974
975 return 0;
976
977 free_message:
978 free_page((unsigned long)msg);
979 return err;
980 }
981
982 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
983 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
984 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
985 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
986 FBNIC_TLV_ATTR_LAST
987 };
988
fbnic_fw_parse_tsene_read_resp(void * opaque,struct fbnic_tlv_msg ** results)989 static int fbnic_fw_parse_tsene_read_resp(void *opaque,
990 struct fbnic_tlv_msg **results)
991 {
992 struct fbnic_fw_completion *cmpl_data;
993 struct fbnic_dev *fbd = opaque;
994 s32 err_resp;
995 int err = 0;
996
997 /* Verify we have a completion pointer to provide with data */
998 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
999 FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
1000 if (!cmpl_data)
1001 return -ENOSPC;
1002
1003 err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
1004 if (err_resp)
1005 goto msg_err;
1006
1007 if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
1008 err = -EINVAL;
1009 goto msg_err;
1010 }
1011
1012 cmpl_data->u.tsene.millidegrees =
1013 fta_get_sint(results, FBNIC_FW_TSENE_THERM);
1014 cmpl_data->u.tsene.millivolts =
1015 fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
1016
1017 msg_err:
1018 cmpl_data->result = err_resp ? : err;
1019 complete(&cmpl_data->done);
1020 fbnic_fw_put_cmpl(cmpl_data);
1021
1022 return err;
1023 }
1024
1025 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
1026 FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
1027 fbnic_fw_parse_cap_resp),
1028 FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
1029 fbnic_fw_parse_ownership_resp),
1030 FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
1031 fbnic_fw_parse_heartbeat_resp),
1032 FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
1033 fbnic_fw_start_upgrade_resp_index,
1034 fbnic_fw_parse_fw_start_upgrade_resp),
1035 FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
1036 fbnic_fw_write_chunk_req_index,
1037 fbnic_fw_parse_fw_write_chunk_req),
1038 FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
1039 fbnic_fw_finish_upgrade_req_index,
1040 fbnic_fw_parse_fw_finish_upgrade_req),
1041 FBNIC_TLV_PARSER(TSENE_READ_RESP,
1042 fbnic_tsene_read_resp_index,
1043 fbnic_fw_parse_tsene_read_resp),
1044 FBNIC_TLV_MSG_ERROR
1045 };
1046
fbnic_mbx_process_rx_msgs(struct fbnic_dev * fbd)1047 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
1048 {
1049 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
1050 u8 head = rx_mbx->head;
1051 u64 desc, length;
1052
1053 while (head != rx_mbx->tail) {
1054 struct fbnic_tlv_msg *msg;
1055 int err;
1056
1057 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
1058 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
1059 break;
1060
1061 dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
1062 PAGE_SIZE, DMA_FROM_DEVICE);
1063
1064 msg = rx_mbx->buf_info[head].msg;
1065
1066 length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
1067
1068 /* Ignore NULL mailbox descriptors */
1069 if (!length)
1070 goto next_page;
1071
1072 /* Report descriptors with length greater than page size */
1073 if (length > PAGE_SIZE) {
1074 dev_warn(fbd->dev,
1075 "Invalid mailbox descriptor length: %lld\n",
1076 length);
1077 goto next_page;
1078 }
1079
1080 if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
1081 dev_warn(fbd->dev, "Mailbox message length mismatch\n");
1082
1083 /* If parsing fails dump contents of message to dmesg */
1084 err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
1085 if (err) {
1086 dev_warn(fbd->dev, "Unable to process message: %d\n",
1087 err);
1088 print_hex_dump(KERN_WARNING, "fbnic:",
1089 DUMP_PREFIX_OFFSET, 16, 2,
1090 msg, length, true);
1091 }
1092
1093 dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
1094 next_page:
1095
1096 free_page((unsigned long)rx_mbx->buf_info[head].msg);
1097 rx_mbx->buf_info[head].msg = NULL;
1098
1099 head++;
1100 head %= FBNIC_IPC_MBX_DESC_LEN;
1101 }
1102
1103 /* Record head for next interrupt */
1104 rx_mbx->head = head;
1105
1106 /* Make sure we have at least one page for the FW to write to */
1107 fbnic_mbx_alloc_rx_msgs(fbd);
1108 }
1109
fbnic_mbx_poll(struct fbnic_dev * fbd)1110 void fbnic_mbx_poll(struct fbnic_dev *fbd)
1111 {
1112 fbnic_mbx_event(fbd);
1113
1114 fbnic_mbx_process_tx_msgs(fbd);
1115 fbnic_mbx_process_rx_msgs(fbd);
1116 }
1117
fbnic_mbx_poll_tx_ready(struct fbnic_dev * fbd)1118 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
1119 {
1120 unsigned long timeout = jiffies + 10 * HZ + 1;
1121 int err, i;
1122
1123 do {
1124 if (!time_is_after_jiffies(timeout))
1125 return -ETIMEDOUT;
1126
1127 /* Force the firmware to trigger an interrupt response to
1128 * avoid the mailbox getting stuck closed if the interrupt
1129 * is reset.
1130 */
1131 fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
1132
1133 /* Immediate fail if BAR4 went away */
1134 if (!fbnic_fw_present(fbd))
1135 return -ENODEV;
1136
1137 msleep(20);
1138 } while (!fbnic_mbx_event(fbd));
1139
1140 /* FW has shown signs of life. Enable DMA and start Tx/Rx */
1141 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
1142 fbnic_mbx_init_desc_ring(fbd, i);
1143
1144 /* Request an update from the firmware. This should overwrite
1145 * mgmt.version once we get the actual version from the firmware
1146 * in the capabilities request message.
1147 */
1148 err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
1149 if (err)
1150 goto clean_mbx;
1151
1152 /* Use "1" to indicate we entered the state waiting for a response */
1153 fbd->fw_cap.running.mgmt.version = 1;
1154
1155 return 0;
1156 clean_mbx:
1157 /* Cleanup Rx buffers and disable mailbox */
1158 fbnic_mbx_clean(fbd);
1159 return err;
1160 }
1161
__fbnic_fw_evict_cmpl(struct fbnic_fw_completion * cmpl_data)1162 static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
1163 {
1164 cmpl_data->result = -EPIPE;
1165 complete(&cmpl_data->done);
1166 }
1167
fbnic_mbx_evict_all_cmpl(struct fbnic_dev * fbd)1168 static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
1169 {
1170 int i;
1171
1172 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
1173 struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
1174
1175 if (cmpl_data)
1176 __fbnic_fw_evict_cmpl(cmpl_data);
1177 }
1178
1179 memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
1180 }
1181
fbnic_mbx_flush_tx(struct fbnic_dev * fbd)1182 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
1183 {
1184 unsigned long timeout = jiffies + 10 * HZ + 1;
1185 struct fbnic_fw_mbx *tx_mbx;
1186 u8 tail;
1187
1188 /* Record current Rx stats */
1189 tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1190
1191 spin_lock_irq(&fbd->fw_tx_lock);
1192
1193 /* Clear ready to prevent any further attempts to transmit */
1194 tx_mbx->ready = false;
1195
1196 /* Read tail to determine the last tail state for the ring */
1197 tail = tx_mbx->tail;
1198
1199 /* Flush any completions as we are no longer processing Rx */
1200 fbnic_mbx_evict_all_cmpl(fbd);
1201
1202 spin_unlock_irq(&fbd->fw_tx_lock);
1203
1204 /* Give firmware time to process packet,
1205 * we will wait up to 10 seconds which is 500 waits of 20ms.
1206 */
1207 do {
1208 u8 head = tx_mbx->head;
1209
1210 /* Tx ring is empty once head == tail */
1211 if (head == tail)
1212 break;
1213
1214 msleep(20);
1215 fbnic_mbx_process_tx_msgs(fbd);
1216 } while (time_is_after_jiffies(timeout));
1217 }
1218
fbnic_get_fw_ver_commit_str(struct fbnic_dev * fbd,char * fw_version,const size_t str_sz)1219 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
1220 const size_t str_sz)
1221 {
1222 struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
1223 const char *delim = "";
1224
1225 if (mgmt->commit[0])
1226 delim = "_";
1227
1228 fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
1229 fw_version, str_sz);
1230 }
1231
fbnic_fw_alloc_cmpl(u32 msg_type)1232 struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
1233 {
1234 struct fbnic_fw_completion *cmpl;
1235
1236 cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
1237 if (!cmpl)
1238 return NULL;
1239
1240 cmpl->msg_type = msg_type;
1241 init_completion(&cmpl->done);
1242 kref_init(&cmpl->ref_count);
1243
1244 return cmpl;
1245 }
1246
fbnic_fw_clear_cmpl(struct fbnic_dev * fbd,struct fbnic_fw_completion * fw_cmpl)1247 void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
1248 struct fbnic_fw_completion *fw_cmpl)
1249 {
1250 unsigned long flags;
1251
1252 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
1253 fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
1254 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
1255 }
1256
fbnic_fw_put_cmpl(struct fbnic_fw_completion * fw_cmpl)1257 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
1258 {
1259 kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
1260 }
1261