1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14
__fbnic_mbx_wr_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u64 desc)15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 int desc_idx, u64 desc)
17 {
18 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19
20 /* Write the upper 32b and then the lower 32b. Doing this the
21 * FW can then read lower, upper, lower to verify that the state
22 * of the descriptor wasn't changed mid-transaction.
23 */
24 fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
25 fw_wrfl(fbd);
26 fw_wr32(fbd, desc_offset, lower_32_bits(desc));
27 }
28
__fbnic_mbx_invalidate_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u32 desc)29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
30 int desc_idx, u32 desc)
31 {
32 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
33
34 /* For initialization we write the lower 32b of the descriptor first.
35 * This way we can set the state to mark it invalid before we clear the
36 * upper 32b.
37 */
38 fw_wr32(fbd, desc_offset, desc);
39 fw_wrfl(fbd);
40 fw_wr32(fbd, desc_offset + 1, 0);
41 }
42
__fbnic_mbx_rd_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
44 {
45 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
46 u64 desc;
47
48 desc = fw_rd32(fbd, desc_offset);
49 desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
50
51 return desc;
52 }
53
fbnic_mbx_reset_desc_ring(struct fbnic_dev * fbd,int mbx_idx)54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
55 {
56 int desc_idx;
57
58 /* Disable DMA transactions from the device,
59 * and flush any transactions triggered during cleaning
60 */
61 switch (mbx_idx) {
62 case FBNIC_IPC_MBX_RX_IDX:
63 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
64 FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
65 break;
66 case FBNIC_IPC_MBX_TX_IDX:
67 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
68 FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
69 break;
70 }
71
72 wrfl(fbd);
73
74 /* Initialize first descriptor to all 0s. Doing this gives us a
75 * solid stop for the firmware to hit when it is done looping
76 * through the ring.
77 */
78 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
79
80 /* We then fill the rest of the ring starting at the end and moving
81 * back toward descriptor 0 with skip descriptors that have no
82 * length nor address, and tell the firmware that they can skip
83 * them and just move past them to the one we initialized to 0.
84 */
85 for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
86 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
87 FBNIC_IPC_MBX_DESC_FW_CMPL |
88 FBNIC_IPC_MBX_DESC_HOST_CMPL);
89 }
90
fbnic_mbx_init(struct fbnic_dev * fbd)91 void fbnic_mbx_init(struct fbnic_dev *fbd)
92 {
93 int i;
94
95 /* Initialize lock to protect Tx ring */
96 spin_lock_init(&fbd->fw_tx_lock);
97
98 /* Reset FW Capabilities */
99 memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap));
100
101 /* Reinitialize mailbox memory */
102 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
103 memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
104
105 /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
106 wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
107
108 /* Clear any stale causes in vector 0 as that is used for doorbell */
109 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
110
111 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
112 fbnic_mbx_reset_desc_ring(fbd, i);
113 }
114
fbnic_mbx_map_msg(struct fbnic_dev * fbd,int mbx_idx,struct fbnic_tlv_msg * msg,u16 length,u8 eom)115 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
116 struct fbnic_tlv_msg *msg, u16 length, u8 eom)
117 {
118 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
119 u8 tail = mbx->tail;
120 dma_addr_t addr;
121 int direction;
122
123 if (!mbx->ready || !fbnic_fw_present(fbd))
124 return -ENODEV;
125
126 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
127 DMA_TO_DEVICE;
128
129 if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
130 return -EBUSY;
131
132 addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
133 if (dma_mapping_error(fbd->dev, addr))
134 return -ENOSPC;
135
136 mbx->buf_info[tail].msg = msg;
137 mbx->buf_info[tail].addr = addr;
138
139 mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
140
141 fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
142
143 __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
144 FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
145 (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
146 (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
147 FBNIC_IPC_MBX_DESC_HOST_CMPL);
148
149 return 0;
150 }
151
fbnic_mbx_unmap_and_free_msg(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)152 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
153 int desc_idx)
154 {
155 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
156 int direction;
157
158 if (!mbx->buf_info[desc_idx].msg)
159 return;
160
161 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
162 DMA_TO_DEVICE;
163 dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
164 PAGE_SIZE, direction);
165
166 free_page((unsigned long)mbx->buf_info[desc_idx].msg);
167 mbx->buf_info[desc_idx].msg = NULL;
168 }
169
fbnic_mbx_clean_desc_ring(struct fbnic_dev * fbd,int mbx_idx)170 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
171 {
172 int i;
173
174 fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
175
176 for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
177 fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
178 }
179
fbnic_mbx_clean(struct fbnic_dev * fbd)180 void fbnic_mbx_clean(struct fbnic_dev *fbd)
181 {
182 int i;
183
184 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
185 fbnic_mbx_clean_desc_ring(fbd, i);
186 }
187
188 #define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
189 #define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
190
fbnic_mbx_alloc_rx_msgs(struct fbnic_dev * fbd)191 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
192 {
193 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
194 u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
195 int err = 0;
196
197 /* Do nothing if mailbox is not ready, or we already have pages on
198 * the ring that can be used by the firmware
199 */
200 if (!rx_mbx->ready)
201 return -ENODEV;
202
203 /* Fill all but 1 unused descriptors in the Rx queue. */
204 count = (head - tail - 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
205 while (!err && count--) {
206 struct fbnic_tlv_msg *msg;
207
208 msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
209 __GFP_NOWARN);
210 if (!msg) {
211 err = -ENOMEM;
212 break;
213 }
214
215 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
216 FBNIC_RX_PAGE_SIZE, 0);
217 if (err)
218 free_page((unsigned long)msg);
219 }
220
221 return err;
222 }
223
fbnic_mbx_map_tlv_msg(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg)224 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
225 struct fbnic_tlv_msg *msg)
226 {
227 unsigned long flags;
228 int err;
229
230 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
231
232 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
233 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
234
235 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
236
237 return err;
238 }
239
fbnic_mbx_set_cmpl_slot(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)240 static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
241 struct fbnic_fw_completion *cmpl_data)
242 {
243 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
244 int free = -EXFULL;
245 int i;
246
247 if (!tx_mbx->ready)
248 return -ENODEV;
249
250 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
251 if (!fbd->cmpl_data[i])
252 free = i;
253 else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
254 return -EEXIST;
255 }
256
257 if (free == -EXFULL)
258 return -EXFULL;
259
260 fbd->cmpl_data[free] = cmpl_data;
261
262 return 0;
263 }
264
fbnic_mbx_clear_cmpl_slot(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)265 static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
266 struct fbnic_fw_completion *cmpl_data)
267 {
268 int i;
269
270 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
271 if (fbd->cmpl_data[i] == cmpl_data) {
272 fbd->cmpl_data[i] = NULL;
273 break;
274 }
275 }
276 }
277
fbnic_mbx_process_tx_msgs(struct fbnic_dev * fbd)278 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
279 {
280 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
281 u8 head = tx_mbx->head;
282 u64 desc;
283
284 while (head != tx_mbx->tail) {
285 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
286 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
287 break;
288
289 fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
290
291 head++;
292 head %= FBNIC_IPC_MBX_DESC_LEN;
293 }
294
295 /* Record head for next interrupt */
296 tx_mbx->head = head;
297 }
298
fbnic_mbx_set_cmpl(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)299 int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
300 struct fbnic_fw_completion *cmpl_data)
301 {
302 unsigned long flags;
303 int err;
304
305 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
306 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
307 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
308
309 return err;
310 }
311
fbnic_mbx_map_req_w_cmpl(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg,struct fbnic_fw_completion * cmpl_data)312 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
313 struct fbnic_tlv_msg *msg,
314 struct fbnic_fw_completion *cmpl_data)
315 {
316 unsigned long flags;
317 int err;
318
319 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
320 if (cmpl_data) {
321 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
322 if (err)
323 goto unlock_mbx;
324 }
325
326 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
327 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
328
329 /* If we successfully reserved a completion and msg failed
330 * then clear completion data for next caller
331 */
332 if (err && cmpl_data)
333 fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
334
335 unlock_mbx:
336 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
337
338 return err;
339 }
340
fbnic_mbx_clear_cmpl(struct fbnic_dev * fbd,struct fbnic_fw_completion * fw_cmpl)341 void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
342 struct fbnic_fw_completion *fw_cmpl)
343 {
344 unsigned long flags;
345
346 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
347 fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
348 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
349 }
350
fbnic_fw_release_cmpl_data(struct kref * kref)351 static void fbnic_fw_release_cmpl_data(struct kref *kref)
352 {
353 struct fbnic_fw_completion *cmpl_data;
354
355 cmpl_data = container_of(kref, struct fbnic_fw_completion,
356 ref_count);
357 kfree(cmpl_data);
358 }
359
360 static struct fbnic_fw_completion *
fbnic_fw_get_cmpl_by_type(struct fbnic_dev * fbd,u32 msg_type)361 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
362 {
363 struct fbnic_fw_completion *cmpl_data = NULL;
364 unsigned long flags;
365 int i;
366
367 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
368 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
369 if (fbd->cmpl_data[i] &&
370 fbd->cmpl_data[i]->msg_type == msg_type) {
371 cmpl_data = fbd->cmpl_data[i];
372 kref_get(&cmpl_data->ref_count);
373 break;
374 }
375 }
376
377 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
378
379 return cmpl_data;
380 }
381
382 /**
383 * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
384 * @fbd: FBNIC device structure
385 * @msg_type: ENUM value indicating message type to send
386 *
387 * Return:
388 * One the following values:
389 * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
390 * -ENODEV: Device I/O error
391 * -ENOMEM: Failed to allocate message
392 * -EBUSY: No space in mailbox
393 * -ENOSPC: DMA mapping failed
394 *
395 * This function sends a single TLV header indicating the host wants to take
396 * some action. However there are no other side effects which means that any
397 * response will need to be caught via a completion if this action is
398 * expected to kick off a resultant action.
399 */
fbnic_fw_xmit_simple_msg(struct fbnic_dev * fbd,u32 msg_type)400 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
401 {
402 struct fbnic_tlv_msg *msg;
403 int err = 0;
404
405 if (!fbnic_fw_present(fbd))
406 return -ENODEV;
407
408 msg = fbnic_tlv_msg_alloc(msg_type);
409 if (!msg)
410 return -ENOMEM;
411
412 err = fbnic_mbx_map_tlv_msg(fbd, msg);
413 if (err)
414 free_page((unsigned long)msg);
415
416 return err;
417 }
418
fbnic_mbx_init_desc_ring(struct fbnic_dev * fbd,int mbx_idx)419 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
420 {
421 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
422
423 mbx->ready = true;
424
425 switch (mbx_idx) {
426 case FBNIC_IPC_MBX_RX_IDX:
427 /* Enable DMA writes from the device */
428 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
429 FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
430
431 /* Make sure we have a page for the FW to write to */
432 fbnic_mbx_alloc_rx_msgs(fbd);
433 break;
434 case FBNIC_IPC_MBX_TX_IDX:
435 /* Enable DMA reads from the device */
436 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
437 FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
438 break;
439 }
440 }
441
fbnic_mbx_event(struct fbnic_dev * fbd)442 static bool fbnic_mbx_event(struct fbnic_dev *fbd)
443 {
444 /* We only need to do this on the first interrupt following reset.
445 * this primes the mailbox so that we will have cleared all the
446 * skip descriptors.
447 */
448 if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
449 return false;
450
451 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
452
453 return true;
454 }
455
456 /**
457 * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
458 * to FW mailbox
459 *
460 * @fbd: FBNIC device structure
461 * @take_ownership: take/release the ownership
462 *
463 * Return: zero on success, negative value on failure
464 *
465 * Notifies the firmware that the driver either takes ownership of the NIC
466 * (when @take_ownership is true) or releases it.
467 */
fbnic_fw_xmit_ownership_msg(struct fbnic_dev * fbd,bool take_ownership)468 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
469 {
470 unsigned long req_time = jiffies;
471 struct fbnic_tlv_msg *msg;
472 int err = 0;
473
474 if (!fbnic_fw_present(fbd))
475 return -ENODEV;
476
477 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
478 if (!msg)
479 return -ENOMEM;
480
481 if (take_ownership) {
482 err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
483 if (err)
484 goto free_message;
485 }
486
487 err = fbnic_mbx_map_tlv_msg(fbd, msg);
488 if (err)
489 goto free_message;
490
491 /* Initialize heartbeat, set last response to 1 second in the past
492 * so that we will trigger a timeout if the firmware doesn't respond
493 */
494 fbd->last_heartbeat_response = req_time - HZ;
495
496 fbd->last_heartbeat_request = req_time;
497
498 /* Set prev_firmware_time to 0 to avoid triggering firmware crash
499 * detection until we receive the second uptime in a heartbeat resp.
500 */
501 fbd->prev_firmware_time = 0;
502
503 /* Set heartbeat detection based on if we are taking ownership */
504 fbd->fw_heartbeat_enabled = take_ownership;
505
506 return err;
507
508 free_message:
509 free_page((unsigned long)msg);
510 return err;
511 }
512
513 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
514 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
515 FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
516 FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
517 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
518 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
519 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
520 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
521 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
522 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
523 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
524 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
525 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
526 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
527 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
528 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
529 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
530 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
531 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
532 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
533 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
534 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
535 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
536 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
537 FBNIC_TLV_ATTR_LAST
538 };
539
fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],struct fbnic_tlv_msg * attr,int len)540 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
541 struct fbnic_tlv_msg *attr, int len)
542 {
543 int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
544 struct fbnic_tlv_msg *mac_results[8];
545 int err, i = 0;
546
547 /* Make sure we have enough room to process all the MAC addresses */
548 if (len > 8)
549 return -ENOSPC;
550
551 /* Parse the array */
552 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
553 fbnic_fw_cap_resp_index,
554 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
555 if (err)
556 return err;
557
558 /* Copy results into MAC addr array */
559 for (i = 0; i < len && mac_results[i]; i++)
560 fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
561
562 /* Zero remaining unused addresses */
563 while (i < len)
564 eth_zero_addr(bmc_mac_addr[i++]);
565
566 return 0;
567 }
568
fbnic_fw_parse_cap_resp(void * opaque,struct fbnic_tlv_msg ** results)569 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
570 {
571 u32 all_multi = 0, version = 0;
572 struct fbnic_dev *fbd = opaque;
573 bool bmc_present;
574 int err;
575
576 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
577 fbd->fw_cap.running.mgmt.version = version;
578 if (!fbd->fw_cap.running.mgmt.version)
579 return -EINVAL;
580
581 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE) {
582 char required_ver[FBNIC_FW_VER_MAX_SIZE];
583 char running_ver[FBNIC_FW_VER_MAX_SIZE];
584
585 fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
586 running_ver);
587 fbnic_mk_fw_ver_str(MIN_FW_VER_CODE, required_ver);
588 dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%s)\n",
589 running_ver, required_ver);
590 /* Disable TX mailbox to prevent card use until firmware is
591 * updated.
592 */
593 fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
594 return -EINVAL;
595 }
596
597 if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
598 fbd->fw_cap.running.mgmt.commit,
599 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
600 dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
601
602 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
603 fbd->fw_cap.stored.mgmt.version = version;
604 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
605 fbd->fw_cap.stored.mgmt.commit,
606 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
607
608 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
609 fbd->fw_cap.running.bootloader.version = version;
610 fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
611 fbd->fw_cap.running.bootloader.commit,
612 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
613
614 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
615 fbd->fw_cap.stored.bootloader.version = version;
616 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
617 fbd->fw_cap.stored.bootloader.commit,
618 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
619
620 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
621 fbd->fw_cap.stored.undi.version = version;
622 fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
623 fbd->fw_cap.stored.undi.commit,
624 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
625
626 fbd->fw_cap.active_slot =
627 fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
628 fbd->fw_cap.link_speed =
629 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
630 fbd->fw_cap.link_fec =
631 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
632
633 bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
634 if (bmc_present) {
635 struct fbnic_tlv_msg *attr;
636
637 attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
638 if (!attr)
639 return -EINVAL;
640
641 err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
642 attr, 4);
643 if (err)
644 return err;
645
646 all_multi =
647 fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
648 } else {
649 memset(fbd->fw_cap.bmc_mac_addr, 0,
650 sizeof(fbd->fw_cap.bmc_mac_addr));
651 }
652
653 fbd->fw_cap.bmc_present = bmc_present;
654
655 if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
656 fbd->fw_cap.all_multi = all_multi;
657
658 fbd->fw_cap.anti_rollback_version =
659 fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
660
661 /* Always assume we need a BMC reinit */
662 fbd->fw_cap.need_bmc_tcam_reinit = true;
663
664 return 0;
665 }
666
667 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
668 FBNIC_TLV_ATTR_U64(FBNIC_FW_OWNERSHIP_TIME),
669 FBNIC_TLV_ATTR_LAST
670 };
671
fbnic_fw_parse_ownership_resp(void * opaque,struct fbnic_tlv_msg ** results)672 static int fbnic_fw_parse_ownership_resp(void *opaque,
673 struct fbnic_tlv_msg **results)
674 {
675 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
676
677 /* Count the ownership response as a heartbeat reply */
678 fbd->last_heartbeat_response = jiffies;
679
680 /* Capture firmware time for logging and firmware crash check */
681 fbd->firmware_time = fta_get_uint(results, FBNIC_FW_OWNERSHIP_TIME);
682
683 return 0;
684 }
685
686 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
687 FBNIC_TLV_ATTR_U64(FBNIC_FW_HEARTBEAT_UPTIME),
688 FBNIC_TLV_ATTR_LAST
689 };
690
fbnic_fw_parse_heartbeat_resp(void * opaque,struct fbnic_tlv_msg ** results)691 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
692 struct fbnic_tlv_msg **results)
693 {
694 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
695
696 fbd->last_heartbeat_response = jiffies;
697
698 /* Capture firmware time for logging and firmware crash check */
699 fbd->firmware_time = fta_get_uint(results, FBNIC_FW_HEARTBEAT_UPTIME);
700
701 return 0;
702 }
703
fbnic_fw_xmit_heartbeat_message(struct fbnic_dev * fbd)704 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
705 {
706 unsigned long req_time = jiffies;
707 struct fbnic_tlv_msg *msg;
708 int err = 0;
709
710 if (!fbnic_fw_present(fbd))
711 return -ENODEV;
712
713 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
714 if (!msg)
715 return -ENOMEM;
716
717 err = fbnic_mbx_map_tlv_msg(fbd, msg);
718 if (err)
719 goto free_message;
720
721 fbd->last_heartbeat_request = req_time;
722 fbd->prev_firmware_time = fbd->firmware_time;
723
724 return err;
725
726 free_message:
727 free_page((unsigned long)msg);
728 return err;
729 }
730
fbnic_fw_heartbeat_current(struct fbnic_dev * fbd)731 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
732 {
733 unsigned long last_response = fbd->last_heartbeat_response;
734 unsigned long last_request = fbd->last_heartbeat_request;
735
736 return !time_before(last_response, last_request);
737 }
738
fbnic_fw_init_heartbeat(struct fbnic_dev * fbd,bool poll)739 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
740 {
741 int err = -ETIMEDOUT;
742 int attempts = 50;
743
744 if (!fbnic_fw_present(fbd))
745 return -ENODEV;
746
747 while (attempts--) {
748 msleep(200);
749 if (poll)
750 fbnic_mbx_poll(fbd);
751
752 if (!fbnic_fw_heartbeat_current(fbd))
753 continue;
754
755 /* Place new message on mailbox to elicit a response */
756 err = fbnic_fw_xmit_heartbeat_message(fbd);
757 if (err)
758 dev_warn(fbd->dev,
759 "Failed to send heartbeat message: %d\n",
760 err);
761 break;
762 }
763
764 return err;
765 }
766
fbnic_fw_check_heartbeat(struct fbnic_dev * fbd)767 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
768 {
769 unsigned long last_request = fbd->last_heartbeat_request;
770 int err;
771
772 /* Do not check heartbeat or send another request until current
773 * period has expired. Otherwise we might start spamming requests.
774 */
775 if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
776 return;
777
778 /* We already reported no mailbox. Wait for it to come back */
779 if (!fbd->fw_heartbeat_enabled)
780 return;
781
782 /* Was the last heartbeat response long time ago? */
783 if (!fbnic_fw_heartbeat_current(fbd) ||
784 fbd->firmware_time < fbd->prev_firmware_time) {
785 dev_warn(fbd->dev,
786 "Firmware did not respond to heartbeat message\n");
787 fbd->fw_heartbeat_enabled = false;
788 }
789
790 /* Place new message on mailbox to elicit a response */
791 err = fbnic_fw_xmit_heartbeat_message(fbd);
792 if (err)
793 dev_warn(fbd->dev, "Failed to send heartbeat message\n");
794 }
795
796 /**
797 * fbnic_fw_xmit_coredump_info_msg - Create and transmit a coredump info message
798 * @fbd: FBNIC device structure
799 * @cmpl_data: Structure to store info in
800 * @force: Force coredump event if one hasn't already occurred
801 *
802 * Return: zero on success, negative errno on failure
803 *
804 * Asks the FW for info related to coredump. If a coredump doesn't exist it
805 * can optionally force one if force is true.
806 */
fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data,bool force)807 int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
808 struct fbnic_fw_completion *cmpl_data,
809 bool force)
810 {
811 struct fbnic_tlv_msg *msg;
812 int err = 0;
813
814 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ);
815 if (!msg)
816 return -ENOMEM;
817
818 if (force) {
819 err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_COREDUMP_REQ_INFO_CREATE);
820 if (err)
821 goto free_msg;
822 }
823
824 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
825 if (err)
826 goto free_msg;
827
828 return 0;
829
830 free_msg:
831 free_page((unsigned long)msg);
832 return err;
833 }
834
835 static const struct fbnic_tlv_index fbnic_coredump_info_resp_index[] = {
836 FBNIC_TLV_ATTR_FLAG(FBNIC_FW_COREDUMP_INFO_AVAILABLE),
837 FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_INFO_SIZE),
838 FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_INFO_ERROR),
839 FBNIC_TLV_ATTR_LAST
840 };
841
842 static int
fbnic_fw_parse_coredump_info_resp(void * opaque,struct fbnic_tlv_msg ** results)843 fbnic_fw_parse_coredump_info_resp(void *opaque, struct fbnic_tlv_msg **results)
844 {
845 struct fbnic_fw_completion *cmpl_data;
846 struct fbnic_dev *fbd = opaque;
847 u32 msg_type;
848 s32 err;
849
850 /* Verify we have a completion pointer to provide with data */
851 msg_type = FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP;
852 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
853 if (!cmpl_data)
854 return -ENOSPC;
855
856 err = fta_get_sint(results, FBNIC_FW_COREDUMP_INFO_ERROR);
857 if (err)
858 goto msg_err;
859
860 if (!results[FBNIC_FW_COREDUMP_INFO_AVAILABLE]) {
861 err = -ENOENT;
862 goto msg_err;
863 }
864
865 cmpl_data->u.coredump_info.size =
866 fta_get_uint(results, FBNIC_FW_COREDUMP_INFO_SIZE);
867
868 msg_err:
869 cmpl_data->result = err;
870 complete(&cmpl_data->done);
871 fbnic_fw_put_cmpl(cmpl_data);
872
873 return err;
874 }
875
876 /**
877 * fbnic_fw_xmit_coredump_read_msg - Create and transmit a coredump read request
878 * @fbd: FBNIC device structure
879 * @cmpl_data: Completion struct to store coredump
880 * @offset: Offset into coredump requested
881 * @length: Length of section of cordeump to fetch
882 *
883 * Return: zero on success, negative errno on failure
884 *
885 * Asks the firmware to provide a section of the cordeump back in a message.
886 * The response will have an offset and size matching the values provided.
887 */
fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data,u32 offset,u32 length)888 int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
889 struct fbnic_fw_completion *cmpl_data,
890 u32 offset, u32 length)
891 {
892 struct fbnic_tlv_msg *msg;
893 int err = 0;
894
895 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ);
896 if (!msg)
897 return -ENOMEM;
898
899 if (offset) {
900 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_OFFSET,
901 offset);
902 if (err)
903 goto free_message;
904 }
905
906 if (length) {
907 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_LENGTH,
908 length);
909 if (err)
910 goto free_message;
911 }
912
913 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
914 if (err)
915 goto free_message;
916
917 return 0;
918
919 free_message:
920 free_page((unsigned long)msg);
921 return err;
922 }
923
924 static const struct fbnic_tlv_index fbnic_coredump_resp_index[] = {
925 FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_OFFSET),
926 FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_LENGTH),
927 FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_COREDUMP_READ_DATA),
928 FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_READ_ERROR),
929 FBNIC_TLV_ATTR_LAST
930 };
931
fbnic_fw_parse_coredump_resp(void * opaque,struct fbnic_tlv_msg ** results)932 static int fbnic_fw_parse_coredump_resp(void *opaque,
933 struct fbnic_tlv_msg **results)
934 {
935 struct fbnic_fw_completion *cmpl_data;
936 u32 index, last_offset, last_length;
937 struct fbnic_dev *fbd = opaque;
938 struct fbnic_tlv_msg *data_hdr;
939 u32 length, offset;
940 u32 msg_type;
941 s32 err;
942
943 /* Verify we have a completion pointer to provide with data */
944 msg_type = FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP;
945 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
946 if (!cmpl_data)
947 return -ENOSPC;
948
949 err = fta_get_sint(results, FBNIC_FW_COREDUMP_READ_ERROR);
950 if (err)
951 goto msg_err;
952
953 data_hdr = results[FBNIC_FW_COREDUMP_READ_DATA];
954 if (!data_hdr) {
955 err = -ENODATA;
956 goto msg_err;
957 }
958
959 offset = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_OFFSET);
960 length = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_LENGTH);
961
962 if (length > le16_to_cpu(data_hdr->hdr.len) - sizeof(u32)) {
963 dev_err(fbd->dev, "length greater than size of message\n");
964 err = -EINVAL;
965 goto msg_err;
966 }
967
968 /* Only the last offset can have a length != stride */
969 last_length =
970 (cmpl_data->u.coredump.size % cmpl_data->u.coredump.stride) ? :
971 cmpl_data->u.coredump.stride;
972 last_offset = cmpl_data->u.coredump.size - last_length;
973
974 /* Verify offset and length */
975 if (offset % cmpl_data->u.coredump.stride || offset > last_offset) {
976 dev_err(fbd->dev, "offset %d out of range\n", offset);
977 err = -EINVAL;
978 } else if (length != ((offset == last_offset) ?
979 last_length : cmpl_data->u.coredump.stride)) {
980 dev_err(fbd->dev, "length %d out of range for offset %d\n",
981 length, offset);
982 err = -EINVAL;
983 }
984 if (err)
985 goto msg_err;
986
987 /* If data pointer is NULL it is already filled, just skip the copy */
988 index = offset / cmpl_data->u.coredump.stride;
989 if (!cmpl_data->u.coredump.data[index])
990 goto msg_err;
991
992 /* Copy data and mark index filled by setting pointer to NULL */
993 memcpy(cmpl_data->u.coredump.data[index],
994 fbnic_tlv_attr_get_value_ptr(data_hdr), length);
995 cmpl_data->u.coredump.data[index] = NULL;
996
997 msg_err:
998 cmpl_data->result = err;
999 complete(&cmpl_data->done);
1000 fbnic_fw_put_cmpl(cmpl_data);
1001
1002 return err;
1003 }
1004
fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data,unsigned int id,unsigned int len)1005 int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
1006 struct fbnic_fw_completion *cmpl_data,
1007 unsigned int id, unsigned int len)
1008 {
1009 struct fbnic_tlv_msg *msg;
1010 int err;
1011
1012 if (!fbnic_fw_present(fbd))
1013 return -ENODEV;
1014
1015 if (!len)
1016 return -EINVAL;
1017
1018 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
1019 if (!msg)
1020 return -ENOMEM;
1021
1022 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
1023 if (err)
1024 goto free_message;
1025
1026 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
1027 len);
1028 if (err)
1029 goto free_message;
1030
1031 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
1032 if (err)
1033 goto free_message;
1034
1035 return 0;
1036
1037 free_message:
1038 free_page((unsigned long)msg);
1039 return err;
1040 }
1041
1042 static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
1043 FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
1044 FBNIC_TLV_ATTR_LAST
1045 };
1046
fbnic_fw_parse_fw_start_upgrade_resp(void * opaque,struct fbnic_tlv_msg ** results)1047 static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
1048 struct fbnic_tlv_msg **results)
1049 {
1050 struct fbnic_fw_completion *cmpl_data;
1051 struct fbnic_dev *fbd = opaque;
1052 u32 msg_type;
1053 s32 err;
1054
1055 /* Verify we have a completion pointer */
1056 msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
1057 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
1058 if (!cmpl_data)
1059 return -ENOSPC;
1060
1061 /* Check for errors */
1062 err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
1063
1064 cmpl_data->result = err;
1065 complete(&cmpl_data->done);
1066 fbnic_fw_put_cmpl(cmpl_data);
1067
1068 return 0;
1069 }
1070
fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev * fbd,const u8 * data,u32 offset,u16 length,int cancel_error)1071 int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
1072 const u8 *data, u32 offset, u16 length,
1073 int cancel_error)
1074 {
1075 struct fbnic_tlv_msg *msg;
1076 int err;
1077
1078 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
1079 if (!msg)
1080 return -ENOMEM;
1081
1082 /* Report error to FW to cancel upgrade */
1083 if (cancel_error) {
1084 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
1085 cancel_error);
1086 if (err)
1087 goto free_message;
1088 }
1089
1090 if (data) {
1091 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
1092 offset);
1093 if (err)
1094 goto free_message;
1095
1096 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
1097 length);
1098 if (err)
1099 goto free_message;
1100
1101 err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
1102 data + offset, length);
1103 if (err)
1104 goto free_message;
1105 }
1106
1107 err = fbnic_mbx_map_tlv_msg(fbd, msg);
1108 if (err)
1109 goto free_message;
1110
1111 return 0;
1112
1113 free_message:
1114 free_page((unsigned long)msg);
1115 return err;
1116 }
1117
1118 static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
1119 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
1120 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
1121 FBNIC_TLV_ATTR_LAST
1122 };
1123
fbnic_fw_parse_fw_write_chunk_req(void * opaque,struct fbnic_tlv_msg ** results)1124 static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
1125 struct fbnic_tlv_msg **results)
1126 {
1127 struct fbnic_fw_completion *cmpl_data;
1128 struct fbnic_dev *fbd = opaque;
1129 u32 msg_type;
1130 u32 offset;
1131 u32 length;
1132
1133 /* Verify we have a completion pointer */
1134 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
1135 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
1136 if (!cmpl_data)
1137 return -ENOSPC;
1138
1139 /* Pull length/offset pair and mark it as complete */
1140 offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
1141 length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
1142 cmpl_data->u.fw_update.offset = offset;
1143 cmpl_data->u.fw_update.length = length;
1144
1145 complete(&cmpl_data->done);
1146 fbnic_fw_put_cmpl(cmpl_data);
1147
1148 return 0;
1149 }
1150
1151 static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
1152 FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
1153 FBNIC_TLV_ATTR_LAST
1154 };
1155
fbnic_fw_parse_fw_finish_upgrade_req(void * opaque,struct fbnic_tlv_msg ** results)1156 static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
1157 struct fbnic_tlv_msg **results)
1158 {
1159 struct fbnic_fw_completion *cmpl_data;
1160 struct fbnic_dev *fbd = opaque;
1161 u32 msg_type;
1162 s32 err;
1163
1164 /* Verify we have a completion pointer */
1165 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
1166 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
1167 if (!cmpl_data)
1168 return -ENOSPC;
1169
1170 /* Check for errors */
1171 err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
1172
1173 /* Close out update by incrementing offset by length which should
1174 * match the total size of the component. Set length to 0 since no
1175 * new chunks will be requested.
1176 */
1177 cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
1178 cmpl_data->u.fw_update.length = 0;
1179
1180 cmpl_data->result = err;
1181 complete(&cmpl_data->done);
1182 fbnic_fw_put_cmpl(cmpl_data);
1183
1184 return 0;
1185 }
1186
1187 /**
1188 * fbnic_fw_xmit_qsfp_read_msg - Transmit a QSFP read request
1189 * @fbd: FBNIC device structure
1190 * @cmpl_data: Structure to store EEPROM response in
1191 * @page: Refers to page number on page enabled QSFP modules
1192 * @bank: Refers to a collection of pages
1193 * @offset: Offset into QSFP EEPROM requested
1194 * @length: Length of section of QSFP EEPROM to fetch
1195 *
1196 * Return: zero on success, negative value on failure
1197 *
1198 * Asks the firmware to provide a section of the QSFP EEPROM back in a
1199 * message. The response will have an offset and size matching the values
1200 * provided.
1201 */
fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data,u32 page,u32 bank,u32 offset,u32 length)1202 int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
1203 struct fbnic_fw_completion *cmpl_data,
1204 u32 page, u32 bank, u32 offset, u32 length)
1205 {
1206 struct fbnic_tlv_msg *msg;
1207 int err = 0;
1208
1209 if (!length || length > TLV_MAX_DATA)
1210 return -EINVAL;
1211
1212 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_QSFP_READ_REQ);
1213 if (!msg)
1214 return -ENOMEM;
1215
1216 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_BANK, bank);
1217 if (err)
1218 goto free_message;
1219
1220 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_PAGE, page);
1221 if (err)
1222 goto free_message;
1223
1224 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_OFFSET, offset);
1225 if (err)
1226 goto free_message;
1227
1228 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_LENGTH, length);
1229 if (err)
1230 goto free_message;
1231
1232 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
1233 if (err)
1234 goto free_message;
1235
1236 return 0;
1237
1238 free_message:
1239 free_page((unsigned long)msg);
1240 return err;
1241 }
1242
1243 static const struct fbnic_tlv_index fbnic_qsfp_read_resp_index[] = {
1244 FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_BANK),
1245 FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_PAGE),
1246 FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_OFFSET),
1247 FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_LENGTH),
1248 FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_QSFP_DATA),
1249 FBNIC_TLV_ATTR_S32(FBNIC_FW_QSFP_ERROR),
1250 FBNIC_TLV_ATTR_LAST
1251 };
1252
fbnic_fw_parse_qsfp_read_resp(void * opaque,struct fbnic_tlv_msg ** results)1253 static int fbnic_fw_parse_qsfp_read_resp(void *opaque,
1254 struct fbnic_tlv_msg **results)
1255 {
1256 struct fbnic_fw_completion *cmpl_data;
1257 struct fbnic_dev *fbd = opaque;
1258 struct fbnic_tlv_msg *data_hdr;
1259 u32 length, offset, page, bank;
1260 u8 *data;
1261 s32 err;
1262
1263 /* Verify we have a completion pointer to provide with data */
1264 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
1265 FBNIC_TLV_MSG_ID_QSFP_READ_RESP);
1266 if (!cmpl_data)
1267 return -ENOSPC;
1268
1269 bank = fta_get_uint(results, FBNIC_FW_QSFP_BANK);
1270 if (bank != cmpl_data->u.qsfp.bank) {
1271 dev_warn(fbd->dev, "bank not equal to bank requested: %d vs %d\n",
1272 bank, cmpl_data->u.qsfp.bank);
1273 err = -EINVAL;
1274 goto msg_err;
1275 }
1276
1277 page = fta_get_uint(results, FBNIC_FW_QSFP_PAGE);
1278 if (page != cmpl_data->u.qsfp.page) {
1279 dev_warn(fbd->dev, "page not equal to page requested: %d vs %d\n",
1280 page, cmpl_data->u.qsfp.page);
1281 err = -EINVAL;
1282 goto msg_err;
1283 }
1284
1285 offset = fta_get_uint(results, FBNIC_FW_QSFP_OFFSET);
1286 length = fta_get_uint(results, FBNIC_FW_QSFP_LENGTH);
1287
1288 if (length != cmpl_data->u.qsfp.length ||
1289 offset != cmpl_data->u.qsfp.offset) {
1290 dev_warn(fbd->dev,
1291 "offset/length not equal to size requested: %d/%d vs %d/%d\n",
1292 offset, length,
1293 cmpl_data->u.qsfp.offset, cmpl_data->u.qsfp.length);
1294 err = -EINVAL;
1295 goto msg_err;
1296 }
1297
1298 err = fta_get_sint(results, FBNIC_FW_QSFP_ERROR);
1299 if (err)
1300 goto msg_err;
1301
1302 data_hdr = results[FBNIC_FW_QSFP_DATA];
1303 if (!data_hdr) {
1304 err = -ENODATA;
1305 goto msg_err;
1306 }
1307
1308 /* Copy data */
1309 data = fbnic_tlv_attr_get_value_ptr(data_hdr);
1310 memcpy(cmpl_data->u.qsfp.data, data, length);
1311 msg_err:
1312 cmpl_data->result = err;
1313 complete(&cmpl_data->done);
1314 fbnic_fw_put_cmpl(cmpl_data);
1315
1316 return err;
1317 }
1318
1319 /**
1320 * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
1321 * @fbd: FBNIC device structure
1322 * @cmpl_data: Completion data structure to store sensor response
1323 *
1324 * Asks the firmware to provide an update with the latest sensor data.
1325 * The response will contain temperature and voltage readings.
1326 *
1327 * Return: 0 on success, negative error value on failure
1328 */
fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)1329 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
1330 struct fbnic_fw_completion *cmpl_data)
1331 {
1332 struct fbnic_tlv_msg *msg;
1333 int err;
1334
1335 if (!fbnic_fw_present(fbd))
1336 return -ENODEV;
1337
1338 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
1339 if (!msg)
1340 return -ENOMEM;
1341
1342 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
1343 if (err)
1344 goto free_message;
1345
1346 return 0;
1347
1348 free_message:
1349 free_page((unsigned long)msg);
1350 return err;
1351 }
1352
1353 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
1354 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
1355 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
1356 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
1357 FBNIC_TLV_ATTR_LAST
1358 };
1359
fbnic_fw_parse_tsene_read_resp(void * opaque,struct fbnic_tlv_msg ** results)1360 static int fbnic_fw_parse_tsene_read_resp(void *opaque,
1361 struct fbnic_tlv_msg **results)
1362 {
1363 struct fbnic_fw_completion *cmpl_data;
1364 struct fbnic_dev *fbd = opaque;
1365 s32 err_resp;
1366 int err = 0;
1367
1368 /* Verify we have a completion pointer to provide with data */
1369 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
1370 FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
1371 if (!cmpl_data)
1372 return -ENOSPC;
1373
1374 err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
1375 if (err_resp)
1376 goto msg_err;
1377
1378 if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
1379 err = -EINVAL;
1380 goto msg_err;
1381 }
1382
1383 cmpl_data->u.tsene.millidegrees =
1384 fta_get_sint(results, FBNIC_FW_TSENE_THERM);
1385 cmpl_data->u.tsene.millivolts =
1386 fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
1387
1388 msg_err:
1389 cmpl_data->result = err_resp ? : err;
1390 complete(&cmpl_data->done);
1391 fbnic_fw_put_cmpl(cmpl_data);
1392
1393 return err;
1394 }
1395
1396 static const struct fbnic_tlv_index fbnic_fw_log_req_index[] = {
1397 FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_MSEC),
1398 FBNIC_TLV_ATTR_U64(FBNIC_FW_LOG_INDEX),
1399 FBNIC_TLV_ATTR_STRING(FBNIC_FW_LOG_MSG, FBNIC_FW_LOG_MAX_SIZE),
1400 FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_LENGTH),
1401 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSEC_ARRAY),
1402 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_INDEX_ARRAY),
1403 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSG_ARRAY),
1404 FBNIC_TLV_ATTR_LAST
1405 };
1406
fbnic_fw_process_log_array(struct fbnic_tlv_msg ** results,u16 length,u16 arr_type_idx,u16 attr_type_idx,struct fbnic_tlv_msg ** tlv_array_out)1407 static int fbnic_fw_process_log_array(struct fbnic_tlv_msg **results,
1408 u16 length, u16 arr_type_idx,
1409 u16 attr_type_idx,
1410 struct fbnic_tlv_msg **tlv_array_out)
1411 {
1412 struct fbnic_tlv_msg *attr;
1413 int attr_len;
1414 int err;
1415
1416 if (!results[attr_type_idx])
1417 return -EINVAL;
1418
1419 tlv_array_out[0] = results[attr_type_idx];
1420
1421 if (!length)
1422 return 0;
1423
1424 if (!results[arr_type_idx])
1425 return -EINVAL;
1426
1427 attr = results[arr_type_idx];
1428 attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
1429 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, &tlv_array_out[1],
1430 fbnic_fw_log_req_index,
1431 attr_type_idx,
1432 length);
1433 if (err)
1434 return err;
1435
1436 return 0;
1437 }
1438
fbnic_fw_parse_logs(struct fbnic_dev * fbd,struct fbnic_tlv_msg ** msec_tlv,struct fbnic_tlv_msg ** index_tlv,struct fbnic_tlv_msg ** log_tlv,int count)1439 static int fbnic_fw_parse_logs(struct fbnic_dev *fbd,
1440 struct fbnic_tlv_msg **msec_tlv,
1441 struct fbnic_tlv_msg **index_tlv,
1442 struct fbnic_tlv_msg **log_tlv,
1443 int count)
1444 {
1445 int i;
1446
1447 for (i = 0; i < count; i++) {
1448 char log[FBNIC_FW_LOG_MAX_SIZE];
1449 ssize_t len;
1450 u64 index;
1451 u32 msec;
1452 int err;
1453
1454 if (!msec_tlv[i] || !index_tlv[i] || !log_tlv[i]) {
1455 dev_warn(fbd->dev, "Received log message with missing attributes!\n");
1456 return -EINVAL;
1457 }
1458
1459 index = fbnic_tlv_attr_get_signed(index_tlv[i], 0);
1460 msec = fbnic_tlv_attr_get_signed(msec_tlv[i], 0);
1461 len = fbnic_tlv_attr_get_string(log_tlv[i], log,
1462 FBNIC_FW_LOG_MAX_SIZE);
1463 if (len < 0)
1464 return len;
1465
1466 err = fbnic_fw_log_write(fbd, index, msec, log);
1467 if (err)
1468 return err;
1469 }
1470
1471 return 0;
1472 }
1473
fbnic_fw_parse_log_req(void * opaque,struct fbnic_tlv_msg ** results)1474 static int fbnic_fw_parse_log_req(void *opaque,
1475 struct fbnic_tlv_msg **results)
1476 {
1477 struct fbnic_tlv_msg *index_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1478 struct fbnic_tlv_msg *msec_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1479 struct fbnic_tlv_msg *log_tlv[FBNIC_FW_MAX_LOG_HISTORY];
1480 struct fbnic_dev *fbd = opaque;
1481 u16 length;
1482 int err;
1483
1484 length = fta_get_uint(results, FBNIC_FW_LOG_LENGTH);
1485 if (length >= FBNIC_FW_MAX_LOG_HISTORY)
1486 return -E2BIG;
1487
1488 err = fbnic_fw_process_log_array(results, length,
1489 FBNIC_FW_LOG_MSEC_ARRAY,
1490 FBNIC_FW_LOG_MSEC, msec_tlv);
1491 if (err)
1492 return err;
1493
1494 err = fbnic_fw_process_log_array(results, length,
1495 FBNIC_FW_LOG_INDEX_ARRAY,
1496 FBNIC_FW_LOG_INDEX, index_tlv);
1497 if (err)
1498 return err;
1499
1500 err = fbnic_fw_process_log_array(results, length,
1501 FBNIC_FW_LOG_MSG_ARRAY,
1502 FBNIC_FW_LOG_MSG, log_tlv);
1503 if (err)
1504 return err;
1505
1506 err = fbnic_fw_parse_logs(fbd, msec_tlv, index_tlv, log_tlv,
1507 length + 1);
1508 if (err)
1509 return err;
1510
1511 return 0;
1512 }
1513
fbnic_fw_xmit_send_logs(struct fbnic_dev * fbd,bool enable,bool send_log_history)1514 int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
1515 bool send_log_history)
1516 {
1517 struct fbnic_tlv_msg *msg;
1518 int err;
1519
1520 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_LOG) {
1521 dev_warn(fbd->dev, "Firmware version is too old to support firmware logs!\n");
1522 return -EOPNOTSUPP;
1523 }
1524
1525 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ);
1526 if (!msg)
1527 return -ENOMEM;
1528
1529 if (enable) {
1530 err = fbnic_tlv_attr_put_flag(msg, FBNIC_SEND_LOGS);
1531 if (err)
1532 goto free_message;
1533
1534 /* Report request for version 1 of logs */
1535 err = fbnic_tlv_attr_put_int(msg, FBNIC_SEND_LOGS_VERSION,
1536 FBNIC_FW_LOG_VERSION);
1537 if (err)
1538 goto free_message;
1539
1540 if (send_log_history) {
1541 err = fbnic_tlv_attr_put_flag(msg,
1542 FBNIC_SEND_LOGS_HISTORY);
1543 if (err)
1544 goto free_message;
1545 }
1546 }
1547
1548 err = fbnic_mbx_map_tlv_msg(fbd, msg);
1549 if (err)
1550 goto free_message;
1551
1552 return 0;
1553
1554 free_message:
1555 free_page((unsigned long)msg);
1556 return err;
1557 }
1558
1559 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
1560 FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
1561 fbnic_fw_parse_cap_resp),
1562 FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
1563 fbnic_fw_parse_ownership_resp),
1564 FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
1565 fbnic_fw_parse_heartbeat_resp),
1566 FBNIC_TLV_PARSER(COREDUMP_GET_INFO_RESP,
1567 fbnic_coredump_info_resp_index,
1568 fbnic_fw_parse_coredump_info_resp),
1569 FBNIC_TLV_PARSER(COREDUMP_READ_RESP, fbnic_coredump_resp_index,
1570 fbnic_fw_parse_coredump_resp),
1571 FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
1572 fbnic_fw_start_upgrade_resp_index,
1573 fbnic_fw_parse_fw_start_upgrade_resp),
1574 FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
1575 fbnic_fw_write_chunk_req_index,
1576 fbnic_fw_parse_fw_write_chunk_req),
1577 FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
1578 fbnic_fw_finish_upgrade_req_index,
1579 fbnic_fw_parse_fw_finish_upgrade_req),
1580 FBNIC_TLV_PARSER(QSFP_READ_RESP,
1581 fbnic_qsfp_read_resp_index,
1582 fbnic_fw_parse_qsfp_read_resp),
1583 FBNIC_TLV_PARSER(TSENE_READ_RESP,
1584 fbnic_tsene_read_resp_index,
1585 fbnic_fw_parse_tsene_read_resp),
1586 FBNIC_TLV_PARSER(LOG_MSG_REQ,
1587 fbnic_fw_log_req_index,
1588 fbnic_fw_parse_log_req),
1589 FBNIC_TLV_MSG_ERROR
1590 };
1591
fbnic_mbx_process_rx_msgs(struct fbnic_dev * fbd)1592 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
1593 {
1594 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
1595 u8 head = rx_mbx->head;
1596 u64 desc, length;
1597
1598 while (head != rx_mbx->tail) {
1599 struct fbnic_tlv_msg *msg;
1600 int err;
1601
1602 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
1603 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
1604 break;
1605
1606 dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
1607 PAGE_SIZE, DMA_FROM_DEVICE);
1608
1609 msg = rx_mbx->buf_info[head].msg;
1610
1611 length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
1612
1613 /* Ignore NULL mailbox descriptors */
1614 if (!length)
1615 goto next_page;
1616
1617 /* Report descriptors with length greater than page size */
1618 if (length > PAGE_SIZE) {
1619 dev_warn(fbd->dev,
1620 "Invalid mailbox descriptor length: %lld\n",
1621 length);
1622 goto next_page;
1623 }
1624
1625 if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
1626 dev_warn(fbd->dev, "Mailbox message length mismatch\n");
1627
1628 /* If parsing fails dump contents of message to dmesg */
1629 err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
1630 if (err) {
1631 dev_warn(fbd->dev, "Unable to process message: %d\n",
1632 err);
1633 print_hex_dump(KERN_WARNING, "fbnic:",
1634 DUMP_PREFIX_OFFSET, 16, 2,
1635 msg, length, true);
1636 }
1637
1638 dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
1639 next_page:
1640
1641 free_page((unsigned long)rx_mbx->buf_info[head].msg);
1642 rx_mbx->buf_info[head].msg = NULL;
1643
1644 head++;
1645 head %= FBNIC_IPC_MBX_DESC_LEN;
1646 }
1647
1648 /* Record head for next interrupt */
1649 rx_mbx->head = head;
1650
1651 /* Make sure we have at least one page for the FW to write to */
1652 fbnic_mbx_alloc_rx_msgs(fbd);
1653 }
1654
fbnic_mbx_poll(struct fbnic_dev * fbd)1655 void fbnic_mbx_poll(struct fbnic_dev *fbd)
1656 {
1657 fbnic_mbx_event(fbd);
1658
1659 fbnic_mbx_process_tx_msgs(fbd);
1660 fbnic_mbx_process_rx_msgs(fbd);
1661 }
1662
fbnic_mbx_poll_tx_ready(struct fbnic_dev * fbd)1663 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
1664 {
1665 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1666 unsigned long timeout = jiffies + 10 * HZ + 1;
1667 int err, i;
1668
1669 do {
1670 if (!time_is_after_jiffies(timeout))
1671 return -ETIMEDOUT;
1672
1673 /* Force the firmware to trigger an interrupt response to
1674 * avoid the mailbox getting stuck closed if the interrupt
1675 * is reset.
1676 */
1677 fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
1678
1679 /* Immediate fail if BAR4 went away */
1680 if (!fbnic_fw_present(fbd))
1681 return -ENODEV;
1682
1683 msleep(20);
1684 } while (!fbnic_mbx_event(fbd));
1685
1686 /* FW has shown signs of life. Enable DMA and start Tx/Rx */
1687 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
1688 fbnic_mbx_init_desc_ring(fbd, i);
1689
1690 /* Request an update from the firmware. This should overwrite
1691 * mgmt.version once we get the actual version from the firmware
1692 * in the capabilities request message.
1693 */
1694 err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
1695 if (err)
1696 goto clean_mbx;
1697
1698 /* Poll until we get a current management firmware version, use "1"
1699 * to indicate we entered the polling state waiting for a response
1700 */
1701 for (fbd->fw_cap.running.mgmt.version = 1;
1702 fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE;) {
1703 if (!tx_mbx->ready)
1704 err = -ENODEV;
1705 if (err)
1706 goto clean_mbx;
1707
1708 msleep(20);
1709 fbnic_mbx_poll(fbd);
1710
1711 /* set err, but wait till mgmt.version check to report it */
1712 if (!time_is_after_jiffies(timeout))
1713 err = -ETIMEDOUT;
1714 }
1715
1716 return 0;
1717 clean_mbx:
1718 /* Cleanup Rx buffers and disable mailbox */
1719 fbnic_mbx_clean(fbd);
1720 return err;
1721 }
1722
__fbnic_fw_evict_cmpl(struct fbnic_fw_completion * cmpl_data)1723 static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
1724 {
1725 cmpl_data->result = -EPIPE;
1726 complete(&cmpl_data->done);
1727 }
1728
fbnic_mbx_evict_all_cmpl(struct fbnic_dev * fbd)1729 static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
1730 {
1731 int i;
1732
1733 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
1734 struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
1735
1736 if (cmpl_data)
1737 __fbnic_fw_evict_cmpl(cmpl_data);
1738 }
1739
1740 memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
1741 }
1742
fbnic_mbx_flush_tx(struct fbnic_dev * fbd)1743 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
1744 {
1745 unsigned long timeout = jiffies + 10 * HZ + 1;
1746 struct fbnic_fw_mbx *tx_mbx;
1747 u8 tail;
1748
1749 /* Record current Rx stats */
1750 tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
1751
1752 spin_lock_irq(&fbd->fw_tx_lock);
1753
1754 /* Clear ready to prevent any further attempts to transmit */
1755 tx_mbx->ready = false;
1756
1757 /* Read tail to determine the last tail state for the ring */
1758 tail = tx_mbx->tail;
1759
1760 /* Flush any completions as we are no longer processing Rx */
1761 fbnic_mbx_evict_all_cmpl(fbd);
1762
1763 spin_unlock_irq(&fbd->fw_tx_lock);
1764
1765 /* Give firmware time to process packet,
1766 * we will wait up to 10 seconds which is 500 waits of 20ms.
1767 */
1768 do {
1769 u8 head = tx_mbx->head;
1770
1771 /* Tx ring is empty once head == tail */
1772 if (head == tail)
1773 break;
1774
1775 msleep(20);
1776 fbnic_mbx_process_tx_msgs(fbd);
1777 } while (time_is_after_jiffies(timeout));
1778 }
1779
fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev * fbd)1780 int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd)
1781 {
1782 struct fbnic_tlv_msg *mac_array;
1783 int i, addr_count = 0, err;
1784 struct fbnic_tlv_msg *msg;
1785 u32 rx_flags = 0;
1786
1787 /* Nothing to do if there is no FW to sync with */
1788 if (!fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready)
1789 return 0;
1790
1791 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ);
1792 if (!msg)
1793 return -ENOMEM;
1794
1795 mac_array = fbnic_tlv_attr_nest_start(msg,
1796 FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY);
1797 if (!mac_array)
1798 goto free_message_nospc;
1799
1800 /* Populate the unicast MAC addrs and capture PROMISC/ALLMULTI flags */
1801 for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_PROMISC_IDX;
1802 i >= fbd->mac_addr_boundary; i--) {
1803 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
1804
1805 if (mac_addr->state != FBNIC_TCAM_S_VALID)
1806 continue;
1807 if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
1808 rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
1809 if (test_bit(FBNIC_MAC_ADDR_T_PROMISC, mac_addr->act_tcam))
1810 rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
1811 if (!test_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam))
1812 continue;
1813 if (addr_count == FW_RPC_MAC_SYNC_UC_ARRAY_SIZE) {
1814 rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
1815 continue;
1816 }
1817
1818 err = fbnic_tlv_attr_put_value(mac_array,
1819 FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
1820 mac_addr->value.addr8,
1821 ETH_ALEN);
1822 if (err)
1823 goto free_message;
1824 addr_count++;
1825 }
1826
1827 /* Close array */
1828 fbnic_tlv_attr_nest_stop(msg);
1829
1830 mac_array = fbnic_tlv_attr_nest_start(msg,
1831 FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY);
1832 if (!mac_array)
1833 goto free_message_nospc;
1834
1835 /* Repeat for multicast addrs, record BROADCAST/ALLMULTI flags */
1836 for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;
1837 i < fbd->mac_addr_boundary; i++) {
1838 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
1839
1840 if (mac_addr->state != FBNIC_TCAM_S_VALID)
1841 continue;
1842 if (test_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam))
1843 rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST;
1844 if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
1845 rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
1846 if (!test_bit(FBNIC_MAC_ADDR_T_MULTICAST, mac_addr->act_tcam))
1847 continue;
1848 if (addr_count == FW_RPC_MAC_SYNC_MC_ARRAY_SIZE) {
1849 rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
1850 continue;
1851 }
1852
1853 err = fbnic_tlv_attr_put_value(mac_array,
1854 FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
1855 mac_addr->value.addr8,
1856 ETH_ALEN);
1857 if (err)
1858 goto free_message;
1859 addr_count++;
1860 }
1861
1862 /* Close array */
1863 fbnic_tlv_attr_nest_stop(msg);
1864
1865 /* Report flags at end of list */
1866 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS,
1867 rx_flags);
1868 if (err)
1869 goto free_message;
1870
1871 /* Send message of to FW notifying it of current RPC config */
1872 err = fbnic_mbx_map_tlv_msg(fbd, msg);
1873 if (err)
1874 goto free_message;
1875 return 0;
1876 free_message_nospc:
1877 err = -ENOSPC;
1878 free_message:
1879 free_page((unsigned long)msg);
1880 return err;
1881 }
1882
fbnic_get_fw_ver_commit_str(struct fbnic_dev * fbd,char * fw_version,const size_t str_sz)1883 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
1884 const size_t str_sz)
1885 {
1886 struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
1887 const char *delim = "";
1888
1889 if (mgmt->commit[0])
1890 delim = "_";
1891
1892 fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
1893 fw_version, str_sz);
1894 }
1895
__fbnic_fw_alloc_cmpl(u32 msg_type,size_t priv_size)1896 struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
1897 size_t priv_size)
1898 {
1899 struct fbnic_fw_completion *cmpl;
1900
1901 cmpl = kzalloc(sizeof(*cmpl) + priv_size, GFP_KERNEL);
1902 if (!cmpl)
1903 return NULL;
1904
1905 cmpl->msg_type = msg_type;
1906 init_completion(&cmpl->done);
1907 kref_init(&cmpl->ref_count);
1908
1909 return cmpl;
1910 }
1911
fbnic_fw_alloc_cmpl(u32 msg_type)1912 struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
1913 {
1914 return __fbnic_fw_alloc_cmpl(msg_type, 0);
1915 }
1916
fbnic_fw_put_cmpl(struct fbnic_fw_completion * fw_cmpl)1917 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
1918 {
1919 kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
1920 }
1921