1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14
__fbnic_mbx_wr_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u64 desc)15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 int desc_idx, u64 desc)
17 {
18 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19
20 /* Write the upper 32b and then the lower 32b. Doing this the
21 * FW can then read lower, upper, lower to verify that the state
22 * of the descriptor wasn't changed mid-transaction.
23 */
24 fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
25 fw_wrfl(fbd);
26 fw_wr32(fbd, desc_offset, lower_32_bits(desc));
27 }
28
__fbnic_mbx_invalidate_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u32 desc)29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
30 int desc_idx, u32 desc)
31 {
32 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
33
34 /* For initialization we write the lower 32b of the descriptor first.
35 * This way we can set the state to mark it invalid before we clear the
36 * upper 32b.
37 */
38 fw_wr32(fbd, desc_offset, desc);
39 fw_wrfl(fbd);
40 fw_wr32(fbd, desc_offset + 1, 0);
41 }
42
__fbnic_mbx_rd_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
44 {
45 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
46 u64 desc;
47
48 desc = fw_rd32(fbd, desc_offset);
49 desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
50
51 return desc;
52 }
53
fbnic_mbx_reset_desc_ring(struct fbnic_dev * fbd,int mbx_idx)54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
55 {
56 int desc_idx;
57
58 /* Disable DMA transactions from the device,
59 * and flush any transactions triggered during cleaning
60 */
61 switch (mbx_idx) {
62 case FBNIC_IPC_MBX_RX_IDX:
63 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
64 FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
65 break;
66 case FBNIC_IPC_MBX_TX_IDX:
67 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
68 FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
69 break;
70 }
71
72 wrfl(fbd);
73
74 /* Initialize first descriptor to all 0s. Doing this gives us a
75 * solid stop for the firmware to hit when it is done looping
76 * through the ring.
77 */
78 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
79
80 /* We then fill the rest of the ring starting at the end and moving
81 * back toward descriptor 0 with skip descriptors that have no
82 * length nor address, and tell the firmware that they can skip
83 * them and just move past them to the one we initialized to 0.
84 */
85 for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
86 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
87 FBNIC_IPC_MBX_DESC_FW_CMPL |
88 FBNIC_IPC_MBX_DESC_HOST_CMPL);
89 }
90
fbnic_mbx_init(struct fbnic_dev * fbd)91 void fbnic_mbx_init(struct fbnic_dev *fbd)
92 {
93 int i;
94
95 /* Initialize lock to protect Tx ring */
96 spin_lock_init(&fbd->fw_tx_lock);
97
98 /* Reset FW Capabilities */
99 memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap));
100
101 /* Reinitialize mailbox memory */
102 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
103 memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
104
105 /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
106 wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
107
108 /* Clear any stale causes in vector 0 as that is used for doorbell */
109 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
110
111 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
112 fbnic_mbx_reset_desc_ring(fbd, i);
113 }
114
fbnic_mbx_map_msg(struct fbnic_dev * fbd,int mbx_idx,struct fbnic_tlv_msg * msg,u16 length,u8 eom)115 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
116 struct fbnic_tlv_msg *msg, u16 length, u8 eom)
117 {
118 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
119 u8 tail = mbx->tail;
120 dma_addr_t addr;
121 int direction;
122
123 if (!mbx->ready || !fbnic_fw_present(fbd))
124 return -ENODEV;
125
126 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
127 DMA_TO_DEVICE;
128
129 if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
130 return -EBUSY;
131
132 addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
133 if (dma_mapping_error(fbd->dev, addr))
134 return -ENOSPC;
135
136 mbx->buf_info[tail].msg = msg;
137 mbx->buf_info[tail].addr = addr;
138
139 mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
140
141 fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
142
143 __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
144 FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
145 (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
146 (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
147 FBNIC_IPC_MBX_DESC_HOST_CMPL);
148
149 return 0;
150 }
151
fbnic_mbx_unmap_and_free_msg(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)152 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
153 int desc_idx)
154 {
155 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
156 int direction;
157
158 if (!mbx->buf_info[desc_idx].msg)
159 return;
160
161 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
162 DMA_TO_DEVICE;
163 dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
164 PAGE_SIZE, direction);
165
166 free_page((unsigned long)mbx->buf_info[desc_idx].msg);
167 mbx->buf_info[desc_idx].msg = NULL;
168 }
169
fbnic_mbx_clean_desc_ring(struct fbnic_dev * fbd,int mbx_idx)170 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
171 {
172 int i;
173
174 fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
175
176 for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
177 fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
178 }
179
fbnic_mbx_clean(struct fbnic_dev * fbd)180 void fbnic_mbx_clean(struct fbnic_dev *fbd)
181 {
182 int i;
183
184 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
185 fbnic_mbx_clean_desc_ring(fbd, i);
186 }
187
188 #define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
189 #define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
190
fbnic_mbx_alloc_rx_msgs(struct fbnic_dev * fbd)191 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
192 {
193 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
194 u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
195 int err = 0;
196
197 /* Do nothing if mailbox is not ready, or we already have pages on
198 * the ring that can be used by the firmware
199 */
200 if (!rx_mbx->ready)
201 return -ENODEV;
202
203 /* Fill all but 1 unused descriptors in the Rx queue. */
204 count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
205 while (!err && count--) {
206 struct fbnic_tlv_msg *msg;
207
208 msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
209 __GFP_NOWARN);
210 if (!msg) {
211 err = -ENOMEM;
212 break;
213 }
214
215 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
216 FBNIC_RX_PAGE_SIZE, 0);
217 if (err)
218 free_page((unsigned long)msg);
219 }
220
221 return err;
222 }
223
fbnic_mbx_map_tlv_msg(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg)224 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
225 struct fbnic_tlv_msg *msg)
226 {
227 unsigned long flags;
228 int err;
229
230 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
231
232 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
233 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
234
235 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
236
237 return err;
238 }
239
fbnic_mbx_set_cmpl_slot(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)240 static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
241 struct fbnic_fw_completion *cmpl_data)
242 {
243 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
244 int free = -EXFULL;
245 int i;
246
247 if (!tx_mbx->ready)
248 return -ENODEV;
249
250 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
251 if (!fbd->cmpl_data[i])
252 free = i;
253 else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
254 return -EEXIST;
255 }
256
257 if (free == -EXFULL)
258 return -EXFULL;
259
260 fbd->cmpl_data[free] = cmpl_data;
261
262 return 0;
263 }
264
fbnic_mbx_clear_cmpl_slot(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)265 static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
266 struct fbnic_fw_completion *cmpl_data)
267 {
268 int i;
269
270 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
271 if (fbd->cmpl_data[i] == cmpl_data) {
272 fbd->cmpl_data[i] = NULL;
273 break;
274 }
275 }
276 }
277
fbnic_mbx_process_tx_msgs(struct fbnic_dev * fbd)278 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
279 {
280 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
281 u8 head = tx_mbx->head;
282 u64 desc;
283
284 while (head != tx_mbx->tail) {
285 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
286 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
287 break;
288
289 fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
290
291 head++;
292 head %= FBNIC_IPC_MBX_DESC_LEN;
293 }
294
295 /* Record head for next interrupt */
296 tx_mbx->head = head;
297 }
298
fbnic_mbx_set_cmpl(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)299 int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
300 struct fbnic_fw_completion *cmpl_data)
301 {
302 unsigned long flags;
303 int err;
304
305 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
306 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
307 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
308
309 return err;
310 }
311
fbnic_mbx_map_req_w_cmpl(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg,struct fbnic_fw_completion * cmpl_data)312 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
313 struct fbnic_tlv_msg *msg,
314 struct fbnic_fw_completion *cmpl_data)
315 {
316 unsigned long flags;
317 int err;
318
319 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
320 if (cmpl_data) {
321 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
322 if (err)
323 goto unlock_mbx;
324 }
325
326 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
327 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
328
329 /* If we successfully reserved a completion and msg failed
330 * then clear completion data for next caller
331 */
332 if (err && cmpl_data)
333 fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
334
335 unlock_mbx:
336 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
337
338 return err;
339 }
340
fbnic_mbx_clear_cmpl(struct fbnic_dev * fbd,struct fbnic_fw_completion * fw_cmpl)341 void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
342 struct fbnic_fw_completion *fw_cmpl)
343 {
344 unsigned long flags;
345
346 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
347 fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
348 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
349 }
350
fbnic_fw_release_cmpl_data(struct kref * kref)351 static void fbnic_fw_release_cmpl_data(struct kref *kref)
352 {
353 struct fbnic_fw_completion *cmpl_data;
354
355 cmpl_data = container_of(kref, struct fbnic_fw_completion,
356 ref_count);
357 kfree(cmpl_data);
358 }
359
360 static struct fbnic_fw_completion *
fbnic_fw_get_cmpl_by_type(struct fbnic_dev * fbd,u32 msg_type)361 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
362 {
363 struct fbnic_fw_completion *cmpl_data = NULL;
364 unsigned long flags;
365 int i;
366
367 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
368 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
369 if (fbd->cmpl_data[i] &&
370 fbd->cmpl_data[i]->msg_type == msg_type) {
371 cmpl_data = fbd->cmpl_data[i];
372 kref_get(&cmpl_data->ref_count);
373 break;
374 }
375 }
376
377 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
378
379 return cmpl_data;
380 }
381
382 /**
383 * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
384 * @fbd: FBNIC device structure
385 * @msg_type: ENUM value indicating message type to send
386 *
387 * Return:
388 * One the following values:
389 * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
390 * -ENODEV: Device I/O error
391 * -ENOMEM: Failed to allocate message
392 * -EBUSY: No space in mailbox
393 * -ENOSPC: DMA mapping failed
394 *
395 * This function sends a single TLV header indicating the host wants to take
396 * some action. However there are no other side effects which means that any
397 * response will need to be caught via a completion if this action is
398 * expected to kick off a resultant action.
399 */
fbnic_fw_xmit_simple_msg(struct fbnic_dev * fbd,u32 msg_type)400 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
401 {
402 struct fbnic_tlv_msg *msg;
403 int err = 0;
404
405 if (!fbnic_fw_present(fbd))
406 return -ENODEV;
407
408 msg = fbnic_tlv_msg_alloc(msg_type);
409 if (!msg)
410 return -ENOMEM;
411
412 err = fbnic_mbx_map_tlv_msg(fbd, msg);
413 if (err)
414 free_page((unsigned long)msg);
415
416 return err;
417 }
418
fbnic_mbx_init_desc_ring(struct fbnic_dev * fbd,int mbx_idx)419 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
420 {
421 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
422
423 mbx->ready = true;
424
425 switch (mbx_idx) {
426 case FBNIC_IPC_MBX_RX_IDX:
427 /* Enable DMA writes from the device */
428 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
429 FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
430
431 /* Make sure we have a page for the FW to write to */
432 fbnic_mbx_alloc_rx_msgs(fbd);
433 break;
434 case FBNIC_IPC_MBX_TX_IDX:
435 /* Enable DMA reads from the device */
436 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
437 FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
438 break;
439 }
440 }
441
fbnic_mbx_event(struct fbnic_dev * fbd)442 static bool fbnic_mbx_event(struct fbnic_dev *fbd)
443 {
444 /* We only need to do this on the first interrupt following reset.
445 * this primes the mailbox so that we will have cleared all the
446 * skip descriptors.
447 */
448 if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
449 return false;
450
451 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
452
453 return true;
454 }
455
456 /**
457 * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
458 * to FW mailbox
459 *
460 * @fbd: FBNIC device structure
461 * @take_ownership: take/release the ownership
462 *
463 * Return: zero on success, negative value on failure
464 *
465 * Notifies the firmware that the driver either takes ownership of the NIC
466 * (when @take_ownership is true) or releases it.
467 */
fbnic_fw_xmit_ownership_msg(struct fbnic_dev * fbd,bool take_ownership)468 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
469 {
470 unsigned long req_time = jiffies;
471 struct fbnic_tlv_msg *msg;
472 int err = 0;
473
474 if (!fbnic_fw_present(fbd))
475 return -ENODEV;
476
477 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
478 if (!msg)
479 return -ENOMEM;
480
481 if (take_ownership) {
482 err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
483 if (err)
484 goto free_message;
485 }
486
487 err = fbnic_mbx_map_tlv_msg(fbd, msg);
488 if (err)
489 goto free_message;
490
491 /* Initialize heartbeat, set last response to 1 second in the past
492 * so that we will trigger a timeout if the firmware doesn't respond
493 */
494 fbd->last_heartbeat_response = req_time - HZ;
495
496 fbd->last_heartbeat_request = req_time;
497
498 /* Set heartbeat detection based on if we are taking ownership */
499 fbd->fw_heartbeat_enabled = take_ownership;
500
501 return err;
502
503 free_message:
504 free_page((unsigned long)msg);
505 return err;
506 }
507
508 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
509 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
510 FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
511 FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
512 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
513 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
514 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
515 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
516 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
517 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
518 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
519 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
520 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
521 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
522 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
523 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
524 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
525 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
526 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
527 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
528 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
529 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
530 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
531 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
532 FBNIC_TLV_ATTR_LAST
533 };
534
fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],struct fbnic_tlv_msg * attr,int len)535 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
536 struct fbnic_tlv_msg *attr, int len)
537 {
538 int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
539 struct fbnic_tlv_msg *mac_results[8];
540 int err, i = 0;
541
542 /* Make sure we have enough room to process all the MAC addresses */
543 if (len > 8)
544 return -ENOSPC;
545
546 /* Parse the array */
547 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
548 fbnic_fw_cap_resp_index,
549 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
550 if (err)
551 return err;
552
553 /* Copy results into MAC addr array */
554 for (i = 0; i < len && mac_results[i]; i++)
555 fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
556
557 /* Zero remaining unused addresses */
558 while (i < len)
559 eth_zero_addr(bmc_mac_addr[i++]);
560
561 return 0;
562 }
563
fbnic_fw_parse_cap_resp(void * opaque,struct fbnic_tlv_msg ** results)564 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
565 {
566 u32 all_multi = 0, version = 0;
567 struct fbnic_dev *fbd = opaque;
568 bool bmc_present;
569 int err;
570
571 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
572 fbd->fw_cap.running.mgmt.version = version;
573 if (!fbd->fw_cap.running.mgmt.version)
574 return -EINVAL;
575
576 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE) {
577 char required_ver[FBNIC_FW_VER_MAX_SIZE];
578 char running_ver[FBNIC_FW_VER_MAX_SIZE];
579
580 fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
581 running_ver);
582 fbnic_mk_fw_ver_str(MIN_FW_VER_CODE, required_ver);
583 dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%s)\n",
584 running_ver, required_ver);
585 /* Disable TX mailbox to prevent card use until firmware is
586 * updated.
587 */
588 fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
589 return -EINVAL;
590 }
591
592 if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
593 fbd->fw_cap.running.mgmt.commit,
594 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
595 dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
596
597 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
598 fbd->fw_cap.stored.mgmt.version = version;
599 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
600 fbd->fw_cap.stored.mgmt.commit,
601 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
602
603 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
604 fbd->fw_cap.running.bootloader.version = version;
605 fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
606 fbd->fw_cap.running.bootloader.commit,
607 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
608
609 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
610 fbd->fw_cap.stored.bootloader.version = version;
611 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
612 fbd->fw_cap.stored.bootloader.commit,
613 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
614
615 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
616 fbd->fw_cap.stored.undi.version = version;
617 fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
618 fbd->fw_cap.stored.undi.commit,
619 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
620
621 fbd->fw_cap.active_slot =
622 fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
623 fbd->fw_cap.link_speed =
624 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
625 fbd->fw_cap.link_fec =
626 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
627
628 bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
629 if (bmc_present) {
630 struct fbnic_tlv_msg *attr;
631
632 attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
633 if (!attr)
634 return -EINVAL;
635
636 err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
637 attr, 4);
638 if (err)
639 return err;
640
641 all_multi =
642 fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
643 } else {
644 memset(fbd->fw_cap.bmc_mac_addr, 0,
645 sizeof(fbd->fw_cap.bmc_mac_addr));
646 }
647
648 fbd->fw_cap.bmc_present = bmc_present;
649
650 if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
651 fbd->fw_cap.all_multi = all_multi;
652
653 fbd->fw_cap.anti_rollback_version =
654 fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
655
656 return 0;
657 }
658
659 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
660 FBNIC_TLV_ATTR_LAST
661 };
662
fbnic_fw_parse_ownership_resp(void * opaque,struct fbnic_tlv_msg ** results)663 static int fbnic_fw_parse_ownership_resp(void *opaque,
664 struct fbnic_tlv_msg **results)
665 {
666 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
667
668 /* Count the ownership response as a heartbeat reply */
669 fbd->last_heartbeat_response = jiffies;
670
671 return 0;
672 }
673
674 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
675 FBNIC_TLV_ATTR_LAST
676 };
677
fbnic_fw_parse_heartbeat_resp(void * opaque,struct fbnic_tlv_msg ** results)678 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
679 struct fbnic_tlv_msg **results)
680 {
681 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
682
683 fbd->last_heartbeat_response = jiffies;
684
685 return 0;
686 }
687
fbnic_fw_xmit_heartbeat_message(struct fbnic_dev * fbd)688 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
689 {
690 unsigned long req_time = jiffies;
691 struct fbnic_tlv_msg *msg;
692 int err = 0;
693
694 if (!fbnic_fw_present(fbd))
695 return -ENODEV;
696
697 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
698 if (!msg)
699 return -ENOMEM;
700
701 err = fbnic_mbx_map_tlv_msg(fbd, msg);
702 if (err)
703 goto free_message;
704
705 fbd->last_heartbeat_request = req_time;
706
707 return err;
708
709 free_message:
710 free_page((unsigned long)msg);
711 return err;
712 }
713
fbnic_fw_heartbeat_current(struct fbnic_dev * fbd)714 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
715 {
716 unsigned long last_response = fbd->last_heartbeat_response;
717 unsigned long last_request = fbd->last_heartbeat_request;
718
719 return !time_before(last_response, last_request);
720 }
721
fbnic_fw_init_heartbeat(struct fbnic_dev * fbd,bool poll)722 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
723 {
724 int err = -ETIMEDOUT;
725 int attempts = 50;
726
727 if (!fbnic_fw_present(fbd))
728 return -ENODEV;
729
730 while (attempts--) {
731 msleep(200);
732 if (poll)
733 fbnic_mbx_poll(fbd);
734
735 if (!fbnic_fw_heartbeat_current(fbd))
736 continue;
737
738 /* Place new message on mailbox to elicit a response */
739 err = fbnic_fw_xmit_heartbeat_message(fbd);
740 if (err)
741 dev_warn(fbd->dev,
742 "Failed to send heartbeat message: %d\n",
743 err);
744 break;
745 }
746
747 return err;
748 }
749
fbnic_fw_check_heartbeat(struct fbnic_dev * fbd)750 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
751 {
752 unsigned long last_request = fbd->last_heartbeat_request;
753 int err;
754
755 /* Do not check heartbeat or send another request until current
756 * period has expired. Otherwise we might start spamming requests.
757 */
758 if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
759 return;
760
761 /* We already reported no mailbox. Wait for it to come back */
762 if (!fbd->fw_heartbeat_enabled)
763 return;
764
765 /* Was the last heartbeat response long time ago? */
766 if (!fbnic_fw_heartbeat_current(fbd)) {
767 dev_warn(fbd->dev,
768 "Firmware did not respond to heartbeat message\n");
769 fbd->fw_heartbeat_enabled = false;
770 }
771
772 /* Place new message on mailbox to elicit a response */
773 err = fbnic_fw_xmit_heartbeat_message(fbd);
774 if (err)
775 dev_warn(fbd->dev, "Failed to send heartbeat message\n");
776 }
777
fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data,unsigned int id,unsigned int len)778 int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
779 struct fbnic_fw_completion *cmpl_data,
780 unsigned int id, unsigned int len)
781 {
782 struct fbnic_tlv_msg *msg;
783 int err;
784
785 if (!fbnic_fw_present(fbd))
786 return -ENODEV;
787
788 if (!len)
789 return -EINVAL;
790
791 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
792 if (!msg)
793 return -ENOMEM;
794
795 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
796 if (err)
797 goto free_message;
798
799 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
800 len);
801 if (err)
802 goto free_message;
803
804 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
805 if (err)
806 goto free_message;
807
808 return 0;
809
810 free_message:
811 free_page((unsigned long)msg);
812 return err;
813 }
814
815 static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
816 FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
817 FBNIC_TLV_ATTR_LAST
818 };
819
fbnic_fw_parse_fw_start_upgrade_resp(void * opaque,struct fbnic_tlv_msg ** results)820 static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
821 struct fbnic_tlv_msg **results)
822 {
823 struct fbnic_fw_completion *cmpl_data;
824 struct fbnic_dev *fbd = opaque;
825 u32 msg_type;
826 s32 err;
827
828 /* Verify we have a completion pointer */
829 msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
830 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
831 if (!cmpl_data)
832 return -ENOSPC;
833
834 /* Check for errors */
835 err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
836
837 cmpl_data->result = err;
838 complete(&cmpl_data->done);
839 fbnic_fw_put_cmpl(cmpl_data);
840
841 return 0;
842 }
843
fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev * fbd,const u8 * data,u32 offset,u16 length,int cancel_error)844 int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
845 const u8 *data, u32 offset, u16 length,
846 int cancel_error)
847 {
848 struct fbnic_tlv_msg *msg;
849 int err;
850
851 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
852 if (!msg)
853 return -ENOMEM;
854
855 /* Report error to FW to cancel upgrade */
856 if (cancel_error) {
857 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
858 cancel_error);
859 if (err)
860 goto free_message;
861 }
862
863 if (data) {
864 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
865 offset);
866 if (err)
867 goto free_message;
868
869 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
870 length);
871 if (err)
872 goto free_message;
873
874 err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
875 data + offset, length);
876 if (err)
877 goto free_message;
878 }
879
880 err = fbnic_mbx_map_tlv_msg(fbd, msg);
881 if (err)
882 goto free_message;
883
884 return 0;
885
886 free_message:
887 free_page((unsigned long)msg);
888 return err;
889 }
890
891 static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
892 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
893 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
894 FBNIC_TLV_ATTR_LAST
895 };
896
fbnic_fw_parse_fw_write_chunk_req(void * opaque,struct fbnic_tlv_msg ** results)897 static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
898 struct fbnic_tlv_msg **results)
899 {
900 struct fbnic_fw_completion *cmpl_data;
901 struct fbnic_dev *fbd = opaque;
902 u32 msg_type;
903 u32 offset;
904 u32 length;
905
906 /* Verify we have a completion pointer */
907 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
908 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
909 if (!cmpl_data)
910 return -ENOSPC;
911
912 /* Pull length/offset pair and mark it as complete */
913 offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
914 length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
915 cmpl_data->u.fw_update.offset = offset;
916 cmpl_data->u.fw_update.length = length;
917
918 complete(&cmpl_data->done);
919 fbnic_fw_put_cmpl(cmpl_data);
920
921 return 0;
922 }
923
924 static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
925 FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
926 FBNIC_TLV_ATTR_LAST
927 };
928
fbnic_fw_parse_fw_finish_upgrade_req(void * opaque,struct fbnic_tlv_msg ** results)929 static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
930 struct fbnic_tlv_msg **results)
931 {
932 struct fbnic_fw_completion *cmpl_data;
933 struct fbnic_dev *fbd = opaque;
934 u32 msg_type;
935 s32 err;
936
937 /* Verify we have a completion pointer */
938 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
939 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
940 if (!cmpl_data)
941 return -ENOSPC;
942
943 /* Check for errors */
944 err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
945
946 /* Close out update by incrementing offset by length which should
947 * match the total size of the component. Set length to 0 since no
948 * new chunks will be requested.
949 */
950 cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
951 cmpl_data->u.fw_update.length = 0;
952
953 cmpl_data->result = err;
954 complete(&cmpl_data->done);
955 fbnic_fw_put_cmpl(cmpl_data);
956
957 return 0;
958 }
959
960 /**
961 * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
962 * @fbd: FBNIC device structure
963 * @cmpl_data: Completion data structure to store sensor response
964 *
965 * Asks the firmware to provide an update with the latest sensor data.
966 * The response will contain temperature and voltage readings.
967 *
968 * Return: 0 on success, negative error value on failure
969 */
fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)970 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
971 struct fbnic_fw_completion *cmpl_data)
972 {
973 struct fbnic_tlv_msg *msg;
974 int err;
975
976 if (!fbnic_fw_present(fbd))
977 return -ENODEV;
978
979 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
980 if (!msg)
981 return -ENOMEM;
982
983 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
984 if (err)
985 goto free_message;
986
987 return 0;
988
989 free_message:
990 free_page((unsigned long)msg);
991 return err;
992 }
993
994 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
995 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
996 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
997 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
998 FBNIC_TLV_ATTR_LAST
999 };
1000
fbnic_fw_parse_tsene_read_resp(void * opaque,struct fbnic_tlv_msg ** results)1001 static int fbnic_fw_parse_tsene_read_resp(void *opaque,
1002 struct fbnic_tlv_msg **results)
1003 {
1004 struct fbnic_fw_completion *cmpl_data;
1005 struct fbnic_dev *fbd = opaque;
1006 s32 err_resp;
1007 int err = 0;
1008
1009 /* Verify we have a completion pointer to provide with data */
1010 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
1011 FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
1012 if (!cmpl_data)
1013 return -ENOSPC;
1014
1015 err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
1016 if (err_resp)
1017 goto msg_err;
1018
1019 if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
1020 err = -EINVAL;
1021 goto msg_err;
1022 }
1023
1024 cmpl_data->u.tsene.millidegrees =
1025 fta_get_sint(results, FBNIC_FW_TSENE_THERM);
1026 cmpl_data->u.tsene.millivolts =
1027 fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
1028
1029 msg_err:
1030 cmpl_data->result = err_resp ? : err;
1031 complete(&cmpl_data->done);
1032 fbnic_fw_put_cmpl(cmpl_data);
1033
1034 return err;
1035 }
1036
1037 static const struct fbnic_tlv_index fbnic_fw_log_req_index[] = {
1038 FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_MSEC),
1039 FBNIC_TLV_ATTR_U64(FBNIC_FW_LOG_INDEX),
1040 FBNIC_TLV_ATTR_STRING(FBNIC_FW_LOG_MSG, FBNIC_FW_LOG_MAX_SIZE),
1041 FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_LENGTH),
1042 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSEC_ARRAY),
1043 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_INDEX_ARRAY),
1044 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSG_ARRAY),
1045 FBNIC_TLV_ATTR_LAST
1046 };
1047
fbnic_fw_process_log_array(struct fbnic_tlv_msg ** results,u16 length,u16 arr_type_idx,u16 attr_type_idx,struct fbnic_tlv_msg ** tlv_array_out)1048 static int fbnic_fw_process_log_array(struct fbnic_tlv_msg **results,
1049 u16 length, u16 arr_type_idx,
1050 u16 attr_type_idx,
1051 struct fbnic_tlv_msg **tlv_array_out)
1052 {
1053 struct fbnic_tlv_msg *attr;
1054 int attr_len;
1055 int err;
1056
1057 if (!results[attr_type_idx])
1058 return -EINVAL;
1059
1060 tlv_array_out[0] = results[attr_type_idx];
1061
1062 if (!length)
1063 return 0;
1064
1065 if (!results[arr_type_idx])
1066 return -EINVAL;
1067
1068 attr = results[arr_type_idx];
1069 attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
1070 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, &tlv_array_out[1],
1071 fbnic_fw_log_req_index,
1072 attr_type_idx,
1073 length);
1074 if (err)
1075 return err;
1076
1077 return 0;
1078 }
1079
fbnic_fw_parse_logs(struct fbnic_dev * fbd,struct fbnic_tlv_msg ** msec_tlv,struct fbnic_tlv_msg ** index_tlv,struct fbnic_tlv_msg ** log_tlv,int count)1080 static int fbnic_fw_parse_logs(struct fbnic_dev *fbd,
1081 struct fbnic_tlv_msg **msec_tlv,
1082 struct fbnic_tlv_msg **index_tlv,
1083 struct fbnic_tlv_msg **log_tlv,
1084 int count)
1085 {
1086 int i;
1087
1088 for (i = 0; i < count; i++) {
1089 char log[FBNIC_FW_LOG_MAX_SIZE];
1090 ssize_t len;
1091 u64 index;
1092 u32 msec;
1093 int err;
1094
1095 if (!msec_tlv[i] || !index_tlv[i] || !log_tlv[i]) {
1096 dev_warn(fbd->dev, "Received log message with missing attributes!\n");
1097 return -EINVAL;
1098 }
1099
1100 index = fbnic_tlv_attr_get_signed(index_tlv[i], 0);
1101 msec = fbnic_tlv_attr_get_signed(msec_tlv[i], 0);
1102 len = fbnic_tlv_attr_get_string(log_tlv[i], log,
1103 FBNIC_FW_LOG_MAX_SIZE);
1104 if (len < 0)
1105 return len;
1106
1107 err = fbnic_fw_log_write(fbd, index, msec, log);
1108 if (err)
1109 return err;
1110 }
1111
1112 return 0;
1113 }
1114
fbnic_fw_parse_log_req(void * opaque,struct fbnic_tlv_msg ** results)1115 static int fbnic_fw_parse_log_req(void *opaque,
1116 struct fbnic_tlv_msg **results)
1117 {
1118 struct fbnic_tlv_msg *index_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1119 struct fbnic_tlv_msg *msec_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1120 struct fbnic_tlv_msg *log_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1121 struct fbnic_dev *fbd = opaque;
1122 u16 length;
1123 int err;
1124
1125 length = fta_get_uint(results, FBNIC_FW_LOG_LENGTH);
1126 if (length >= FBNIC_FW_MAX_LOG_HISTORY)
1127 return -E2BIG;
1128
1129 err = fbnic_fw_process_log_array(results, length,
1130 FBNIC_FW_LOG_MSEC_ARRAY,
1131 FBNIC_FW_LOG_MSEC, msec_tlv);
1132 if (err)
1133 return err;
1134
1135 err = fbnic_fw_process_log_array(results, length,
1136 FBNIC_FW_LOG_INDEX_ARRAY,
1137 FBNIC_FW_LOG_INDEX, index_tlv);
1138 if (err)
1139 return err;
1140
1141 err = fbnic_fw_process_log_array(results, length,
1142 FBNIC_FW_LOG_MSG_ARRAY,
1143 FBNIC_FW_LOG_MSG, log_tlv);
1144 if (err)
1145 return err;
1146
1147 err = fbnic_fw_parse_logs(fbd, msec_tlv, index_tlv, log_tlv,
1148 length + 1);
1149 if (err)
1150 return err;
1151
1152 return 0;
1153 }
1154
fbnic_fw_xmit_send_logs(struct fbnic_dev * fbd,bool enable,bool send_log_history)1155 int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
1156 bool send_log_history)
1157 {
1158 struct fbnic_tlv_msg *msg;
1159 int err;
1160
1161 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_LOG) {
1162 dev_warn(fbd->dev, "Firmware version is too old to support firmware logs!\n");
1163 return -EOPNOTSUPP;
1164 }
1165
1166 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ);
1167 if (!msg)
1168 return -ENOMEM;
1169
1170 if (enable) {
1171 err = fbnic_tlv_attr_put_flag(msg, FBNIC_SEND_LOGS);
1172 if (err)
1173 goto free_message;
1174
1175 /* Report request for version 1 of logs */
1176 err = fbnic_tlv_attr_put_int(msg, FBNIC_SEND_LOGS_VERSION,
1177 FBNIC_FW_LOG_VERSION);
1178 if (err)
1179 goto free_message;
1180
1181 if (send_log_history) {
1182 err = fbnic_tlv_attr_put_flag(msg,
1183 FBNIC_SEND_LOGS_HISTORY);
1184 if (err)
1185 goto free_message;
1186 }
1187 }
1188
1189 err = fbnic_mbx_map_tlv_msg(fbd, msg);
1190 if (err)
1191 goto free_message;
1192
1193 return 0;
1194
1195 free_message:
1196 free_page((unsigned long)msg);
1197 return err;
1198 }
1199
1200 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
1201 FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
1202 fbnic_fw_parse_cap_resp),
1203 FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
1204 fbnic_fw_parse_ownership_resp),
1205 FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
1206 fbnic_fw_parse_heartbeat_resp),
1207 FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
1208 fbnic_fw_start_upgrade_resp_index,
1209 fbnic_fw_parse_fw_start_upgrade_resp),
1210 FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
1211 fbnic_fw_write_chunk_req_index,
1212 fbnic_fw_parse_fw_write_chunk_req),
1213 FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
1214 fbnic_fw_finish_upgrade_req_index,
1215 fbnic_fw_parse_fw_finish_upgrade_req),
1216 FBNIC_TLV_PARSER(TSENE_READ_RESP,
1217 fbnic_tsene_read_resp_index,
1218 fbnic_fw_parse_tsene_read_resp),
1219 FBNIC_TLV_PARSER(LOG_MSG_REQ,
1220 fbnic_fw_log_req_index,
1221 fbnic_fw_parse_log_req),
1222 FBNIC_TLV_MSG_ERROR
1223 };
1224
fbnic_mbx_process_rx_msgs(struct fbnic_dev * fbd)1225 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
1226 {
1227 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
1228 u8 head = rx_mbx->head;
1229 u64 desc, length;
1230
1231 while (head != rx_mbx->tail) {
1232 struct fbnic_tlv_msg *msg;
1233 int err;
1234
1235 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
1236 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
1237 break;
1238
1239 dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
1240 PAGE_SIZE, DMA_FROM_DEVICE);
1241
1242 msg = rx_mbx->buf_info[head].msg;
1243
1244 length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
1245
1246 /* Ignore NULL mailbox descriptors */
1247 if (!length)
1248 goto next_page;
1249
1250 /* Report descriptors with length greater than page size */
1251 if (length > PAGE_SIZE) {
1252 dev_warn(fbd->dev,
1253 "Invalid mailbox descriptor length: %lld\n",
1254 length);
1255 goto next_page;
1256 }
1257
1258 if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
1259 dev_warn(fbd->dev, "Mailbox message length mismatch\n");
1260
1261 /* If parsing fails dump contents of message to dmesg */
1262 err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
1263 if (err) {
1264 dev_warn(fbd->dev, "Unable to process message: %d\n",
1265 err);
1266 print_hex_dump(KERN_WARNING, "fbnic:",
1267 DUMP_PREFIX_OFFSET, 16, 2,
1268 msg, length, true);
1269 }
1270
1271 dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
1272 next_page:
1273
1274 free_page((unsigned long)rx_mbx->buf_info[head].msg);
1275 rx_mbx->buf_info[head].msg = NULL;
1276
1277 head++;
1278 head %= FBNIC_IPC_MBX_DESC_LEN;
1279 }
1280
1281 /* Record head for next interrupt */
1282 rx_mbx->head = head;
1283
1284 /* Make sure we have at least one page for the FW to write to */
1285 fbnic_mbx_alloc_rx_msgs(fbd);
1286 }
1287
fbnic_mbx_poll(struct fbnic_dev * fbd)1288 void fbnic_mbx_poll(struct fbnic_dev *fbd)
1289 {
1290 fbnic_mbx_event(fbd);
1291
1292 fbnic_mbx_process_tx_msgs(fbd);
1293 fbnic_mbx_process_rx_msgs(fbd);
1294 }
1295
fbnic_mbx_poll_tx_ready(struct fbnic_dev * fbd)1296 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
1297 {
1298 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1299 unsigned long timeout = jiffies + 10 * HZ + 1;
1300 int err, i;
1301
1302 do {
1303 if (!time_is_after_jiffies(timeout))
1304 return -ETIMEDOUT;
1305
1306 /* Force the firmware to trigger an interrupt response to
1307 * avoid the mailbox getting stuck closed if the interrupt
1308 * is reset.
1309 */
1310 fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
1311
1312 /* Immediate fail if BAR4 went away */
1313 if (!fbnic_fw_present(fbd))
1314 return -ENODEV;
1315
1316 msleep(20);
1317 } while (!fbnic_mbx_event(fbd));
1318
1319 /* FW has shown signs of life. Enable DMA and start Tx/Rx */
1320 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
1321 fbnic_mbx_init_desc_ring(fbd, i);
1322
1323 /* Request an update from the firmware. This should overwrite
1324 * mgmt.version once we get the actual version from the firmware
1325 * in the capabilities request message.
1326 */
1327 err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
1328 if (err)
1329 goto clean_mbx;
1330
1331 /* Poll until we get a current management firmware version, use "1"
1332 * to indicate we entered the polling state waiting for a response
1333 */
1334 for (fbd->fw_cap.running.mgmt.version = 1;
1335 fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE;) {
1336 if (!tx_mbx->ready)
1337 err = -ENODEV;
1338 if (err)
1339 goto clean_mbx;
1340
1341 msleep(20);
1342 fbnic_mbx_poll(fbd);
1343
1344 /* set err, but wait till mgmt.version check to report it */
1345 if (!time_is_after_jiffies(timeout))
1346 err = -ETIMEDOUT;
1347 }
1348
1349 return 0;
1350 clean_mbx:
1351 /* Cleanup Rx buffers and disable mailbox */
1352 fbnic_mbx_clean(fbd);
1353 return err;
1354 }
1355
__fbnic_fw_evict_cmpl(struct fbnic_fw_completion * cmpl_data)1356 static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
1357 {
1358 cmpl_data->result = -EPIPE;
1359 complete(&cmpl_data->done);
1360 }
1361
fbnic_mbx_evict_all_cmpl(struct fbnic_dev * fbd)1362 static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
1363 {
1364 int i;
1365
1366 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
1367 struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
1368
1369 if (cmpl_data)
1370 __fbnic_fw_evict_cmpl(cmpl_data);
1371 }
1372
1373 memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
1374 }
1375
fbnic_mbx_flush_tx(struct fbnic_dev * fbd)1376 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
1377 {
1378 unsigned long timeout = jiffies + 10 * HZ + 1;
1379 struct fbnic_fw_mbx *tx_mbx;
1380 u8 tail;
1381
1382 /* Record current Rx stats */
1383 tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1384
1385 spin_lock_irq(&fbd->fw_tx_lock);
1386
1387 /* Clear ready to prevent any further attempts to transmit */
1388 tx_mbx->ready = false;
1389
1390 /* Read tail to determine the last tail state for the ring */
1391 tail = tx_mbx->tail;
1392
1393 /* Flush any completions as we are no longer processing Rx */
1394 fbnic_mbx_evict_all_cmpl(fbd);
1395
1396 spin_unlock_irq(&fbd->fw_tx_lock);
1397
1398 /* Give firmware time to process packet,
1399 * we will wait up to 10 seconds which is 500 waits of 20ms.
1400 */
1401 do {
1402 u8 head = tx_mbx->head;
1403
1404 /* Tx ring is empty once head == tail */
1405 if (head == tail)
1406 break;
1407
1408 msleep(20);
1409 fbnic_mbx_process_tx_msgs(fbd);
1410 } while (time_is_after_jiffies(timeout));
1411 }
1412
fbnic_get_fw_ver_commit_str(struct fbnic_dev * fbd,char * fw_version,const size_t str_sz)1413 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
1414 const size_t str_sz)
1415 {
1416 struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
1417 const char *delim = "";
1418
1419 if (mgmt->commit[0])
1420 delim = "_";
1421
1422 fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
1423 fw_version, str_sz);
1424 }
1425
fbnic_fw_alloc_cmpl(u32 msg_type)1426 struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
1427 {
1428 struct fbnic_fw_completion *cmpl;
1429
1430 cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
1431 if (!cmpl)
1432 return NULL;
1433
1434 cmpl->msg_type = msg_type;
1435 init_completion(&cmpl->done);
1436 kref_init(&cmpl->ref_count);
1437
1438 return cmpl;
1439 }
1440
fbnic_fw_put_cmpl(struct fbnic_fw_completion * fw_cmpl)1441 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
1442 {
1443 kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
1444 }
1445