1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/etherdevice.h> 6 #include <linux/delay.h> 7 #include <linux/dev_printk.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/gfp.h> 10 #include <linux/types.h> 11 12 #include "fbnic.h" 13 #include "fbnic_tlv.h" 14 15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx, 16 int desc_idx, u64 desc) 17 { 18 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); 19 20 /* Write the upper 32b and then the lower 32b. Doing this the 21 * FW can then read lower, upper, lower to verify that the state 22 * of the descriptor wasn't changed mid-transaction. 23 */ 24 fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc)); 25 fw_wrfl(fbd); 26 fw_wr32(fbd, desc_offset, lower_32_bits(desc)); 27 } 28 29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx, 30 int desc_idx, u32 desc) 31 { 32 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); 33 34 /* For initialization we write the lower 32b of the descriptor first. 35 * This way we can set the state to mark it invalid before we clear the 36 * upper 32b. 37 */ 38 fw_wr32(fbd, desc_offset, desc); 39 fw_wrfl(fbd); 40 fw_wr32(fbd, desc_offset + 1, 0); 41 } 42 43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) 44 { 45 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); 46 u64 desc; 47 48 desc = fw_rd32(fbd, desc_offset); 49 desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32; 50 51 return desc; 52 } 53 54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx) 55 { 56 int desc_idx; 57 58 /* Disable DMA transactions from the device, 59 * and flush any transactions triggered during cleaning 60 */ 61 switch (mbx_idx) { 62 case FBNIC_IPC_MBX_RX_IDX: 63 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, 64 FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH); 65 break; 66 case FBNIC_IPC_MBX_TX_IDX: 67 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, 68 FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH); 69 break; 70 } 71 72 wrfl(fbd); 73 74 /* Initialize first descriptor to all 0s. Doing this gives us a 75 * solid stop for the firmware to hit when it is done looping 76 * through the ring. 77 */ 78 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0); 79 80 /* We then fill the rest of the ring starting at the end and moving 81 * back toward descriptor 0 with skip descriptors that have no 82 * length nor address, and tell the firmware that they can skip 83 * them and just move past them to the one we initialized to 0. 84 */ 85 for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) 86 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx, 87 FBNIC_IPC_MBX_DESC_FW_CMPL | 88 FBNIC_IPC_MBX_DESC_HOST_CMPL); 89 } 90 91 void fbnic_mbx_init(struct fbnic_dev *fbd) 92 { 93 int i; 94 95 /* Initialize lock to protect Tx ring */ 96 spin_lock_init(&fbd->fw_tx_lock); 97 98 /* Reset FW Capabilities */ 99 memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap)); 100 101 /* Reinitialize mailbox memory */ 102 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) 103 memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx)); 104 105 /* Do not auto-clear the FW mailbox interrupt, let SW clear it */ 106 wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY)); 107 108 /* Clear any stale causes in vector 0 as that is used for doorbell */ 109 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); 110 111 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) 112 fbnic_mbx_reset_desc_ring(fbd, i); 113 } 114 115 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, 116 struct fbnic_tlv_msg *msg, u16 length, u8 eom) 117 { 118 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; 119 u8 tail = mbx->tail; 120 dma_addr_t addr; 121 int direction; 122 123 if (!mbx->ready || !fbnic_fw_present(fbd)) 124 return -ENODEV; 125 126 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE : 127 DMA_TO_DEVICE; 128 129 if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN)) 130 return -EBUSY; 131 132 addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction); 133 if (dma_mapping_error(fbd->dev, addr)) 134 return -ENOSPC; 135 136 mbx->buf_info[tail].msg = msg; 137 mbx->buf_info[tail].addr = addr; 138 139 mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN; 140 141 fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0); 142 143 __fbnic_mbx_wr_desc(fbd, mbx_idx, tail, 144 FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) | 145 (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) | 146 (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) | 147 FBNIC_IPC_MBX_DESC_HOST_CMPL); 148 149 return 0; 150 } 151 152 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx, 153 int desc_idx) 154 { 155 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; 156 int direction; 157 158 if (!mbx->buf_info[desc_idx].msg) 159 return; 160 161 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE : 162 DMA_TO_DEVICE; 163 dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr, 164 PAGE_SIZE, direction); 165 166 free_page((unsigned long)mbx->buf_info[desc_idx].msg); 167 mbx->buf_info[desc_idx].msg = NULL; 168 } 169 170 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx) 171 { 172 int i; 173 174 fbnic_mbx_reset_desc_ring(fbd, mbx_idx); 175 176 for (i = FBNIC_IPC_MBX_DESC_LEN; i--;) 177 fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i); 178 } 179 180 void fbnic_mbx_clean(struct fbnic_dev *fbd) 181 { 182 int i; 183 184 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) 185 fbnic_mbx_clean_desc_ring(fbd, i); 186 } 187 188 #define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK) 189 #define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE) 190 191 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd) 192 { 193 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX]; 194 u8 tail = rx_mbx->tail, head = rx_mbx->head, count; 195 int err = 0; 196 197 /* Do nothing if mailbox is not ready, or we already have pages on 198 * the ring that can be used by the firmware 199 */ 200 if (!rx_mbx->ready) 201 return -ENODEV; 202 203 /* Fill all but 1 unused descriptors in the Rx queue. */ 204 count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN; 205 while (!err && count--) { 206 struct fbnic_tlv_msg *msg; 207 208 msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC | 209 __GFP_NOWARN); 210 if (!msg) { 211 err = -ENOMEM; 212 break; 213 } 214 215 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg, 216 FBNIC_RX_PAGE_SIZE, 0); 217 if (err) 218 free_page((unsigned long)msg); 219 } 220 221 return err; 222 } 223 224 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd, 225 struct fbnic_tlv_msg *msg) 226 { 227 unsigned long flags; 228 int err; 229 230 spin_lock_irqsave(&fbd->fw_tx_lock, flags); 231 232 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg, 233 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1); 234 235 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); 236 237 return err; 238 } 239 240 static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd, 241 struct fbnic_fw_completion *cmpl_data) 242 { 243 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; 244 int free = -EXFULL; 245 int i; 246 247 if (!tx_mbx->ready) 248 return -ENODEV; 249 250 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { 251 if (!fbd->cmpl_data[i]) 252 free = i; 253 else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type) 254 return -EEXIST; 255 } 256 257 if (free == -EXFULL) 258 return -EXFULL; 259 260 fbd->cmpl_data[free] = cmpl_data; 261 262 return 0; 263 } 264 265 static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd, 266 struct fbnic_fw_completion *cmpl_data) 267 { 268 int i; 269 270 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { 271 if (fbd->cmpl_data[i] == cmpl_data) { 272 fbd->cmpl_data[i] = NULL; 273 break; 274 } 275 } 276 } 277 278 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) 279 { 280 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; 281 u8 head = tx_mbx->head; 282 u64 desc; 283 284 while (head != tx_mbx->tail) { 285 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head); 286 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL)) 287 break; 288 289 fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head); 290 291 head++; 292 head %= FBNIC_IPC_MBX_DESC_LEN; 293 } 294 295 /* Record head for next interrupt */ 296 tx_mbx->head = head; 297 } 298 299 int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd, 300 struct fbnic_fw_completion *cmpl_data) 301 { 302 unsigned long flags; 303 int err; 304 305 spin_lock_irqsave(&fbd->fw_tx_lock, flags); 306 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data); 307 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); 308 309 return err; 310 } 311 312 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd, 313 struct fbnic_tlv_msg *msg, 314 struct fbnic_fw_completion *cmpl_data) 315 { 316 unsigned long flags; 317 int err; 318 319 spin_lock_irqsave(&fbd->fw_tx_lock, flags); 320 if (cmpl_data) { 321 err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data); 322 if (err) 323 goto unlock_mbx; 324 } 325 326 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg, 327 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1); 328 329 /* If we successfully reserved a completion and msg failed 330 * then clear completion data for next caller 331 */ 332 if (err && cmpl_data) 333 fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data); 334 335 unlock_mbx: 336 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); 337 338 return err; 339 } 340 341 void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd, 342 struct fbnic_fw_completion *fw_cmpl) 343 { 344 unsigned long flags; 345 346 spin_lock_irqsave(&fbd->fw_tx_lock, flags); 347 fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl); 348 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); 349 } 350 351 static void fbnic_fw_release_cmpl_data(struct kref *kref) 352 { 353 struct fbnic_fw_completion *cmpl_data; 354 355 cmpl_data = container_of(kref, struct fbnic_fw_completion, 356 ref_count); 357 kfree(cmpl_data); 358 } 359 360 static struct fbnic_fw_completion * 361 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type) 362 { 363 struct fbnic_fw_completion *cmpl_data = NULL; 364 unsigned long flags; 365 int i; 366 367 spin_lock_irqsave(&fbd->fw_tx_lock, flags); 368 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { 369 if (fbd->cmpl_data[i] && 370 fbd->cmpl_data[i]->msg_type == msg_type) { 371 cmpl_data = fbd->cmpl_data[i]; 372 kref_get(&cmpl_data->ref_count); 373 break; 374 } 375 } 376 377 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); 378 379 return cmpl_data; 380 } 381 382 /** 383 * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data 384 * @fbd: FBNIC device structure 385 * @msg_type: ENUM value indicating message type to send 386 * 387 * Return: 388 * One the following values: 389 * -EOPNOTSUPP: Is not ASIC so mailbox is not supported 390 * -ENODEV: Device I/O error 391 * -ENOMEM: Failed to allocate message 392 * -EBUSY: No space in mailbox 393 * -ENOSPC: DMA mapping failed 394 * 395 * This function sends a single TLV header indicating the host wants to take 396 * some action. However there are no other side effects which means that any 397 * response will need to be caught via a completion if this action is 398 * expected to kick off a resultant action. 399 */ 400 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type) 401 { 402 struct fbnic_tlv_msg *msg; 403 int err = 0; 404 405 if (!fbnic_fw_present(fbd)) 406 return -ENODEV; 407 408 msg = fbnic_tlv_msg_alloc(msg_type); 409 if (!msg) 410 return -ENOMEM; 411 412 err = fbnic_mbx_map_tlv_msg(fbd, msg); 413 if (err) 414 free_page((unsigned long)msg); 415 416 return err; 417 } 418 419 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) 420 { 421 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; 422 423 mbx->ready = true; 424 425 switch (mbx_idx) { 426 case FBNIC_IPC_MBX_RX_IDX: 427 /* Enable DMA writes from the device */ 428 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, 429 FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); 430 431 /* Make sure we have a page for the FW to write to */ 432 fbnic_mbx_alloc_rx_msgs(fbd); 433 break; 434 case FBNIC_IPC_MBX_TX_IDX: 435 /* Enable DMA reads from the device */ 436 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, 437 FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); 438 break; 439 } 440 } 441 442 static bool fbnic_mbx_event(struct fbnic_dev *fbd) 443 { 444 /* We only need to do this on the first interrupt following reset. 445 * this primes the mailbox so that we will have cleared all the 446 * skip descriptors. 447 */ 448 if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY))) 449 return false; 450 451 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); 452 453 return true; 454 } 455 456 /** 457 * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message 458 * to FW mailbox 459 * 460 * @fbd: FBNIC device structure 461 * @take_ownership: take/release the ownership 462 * 463 * Return: zero on success, negative value on failure 464 * 465 * Notifies the firmware that the driver either takes ownership of the NIC 466 * (when @take_ownership is true) or releases it. 467 */ 468 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership) 469 { 470 unsigned long req_time = jiffies; 471 struct fbnic_tlv_msg *msg; 472 int err = 0; 473 474 if (!fbnic_fw_present(fbd)) 475 return -ENODEV; 476 477 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ); 478 if (!msg) 479 return -ENOMEM; 480 481 if (take_ownership) { 482 err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG); 483 if (err) 484 goto free_message; 485 } 486 487 err = fbnic_mbx_map_tlv_msg(fbd, msg); 488 if (err) 489 goto free_message; 490 491 /* Initialize heartbeat, set last response to 1 second in the past 492 * so that we will trigger a timeout if the firmware doesn't respond 493 */ 494 fbd->last_heartbeat_response = req_time - HZ; 495 496 fbd->last_heartbeat_request = req_time; 497 498 /* Set heartbeat detection based on if we are taking ownership */ 499 fbd->fw_heartbeat_enabled = take_ownership; 500 501 return err; 502 503 free_message: 504 free_page((unsigned long)msg); 505 return err; 506 } 507 508 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = { 509 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION), 510 FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT), 511 FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR), 512 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY), 513 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION), 514 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT), 515 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, 516 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), 517 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI), 518 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED), 519 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC), 520 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, 521 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), 522 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION), 523 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION), 524 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, 525 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), 526 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, 527 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), 528 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION), 529 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, 530 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), 531 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION), 532 FBNIC_TLV_ATTR_LAST 533 }; 534 535 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN], 536 struct fbnic_tlv_msg *attr, int len) 537 { 538 int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1; 539 struct fbnic_tlv_msg *mac_results[8]; 540 int err, i = 0; 541 542 /* Make sure we have enough room to process all the MAC addresses */ 543 if (len > 8) 544 return -ENOSPC; 545 546 /* Parse the array */ 547 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results, 548 fbnic_fw_cap_resp_index, 549 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len); 550 if (err) 551 return err; 552 553 /* Copy results into MAC addr array */ 554 for (i = 0; i < len && mac_results[i]; i++) 555 fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]); 556 557 /* Zero remaining unused addresses */ 558 while (i < len) 559 eth_zero_addr(bmc_mac_addr[i++]); 560 561 return 0; 562 } 563 564 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) 565 { 566 u32 all_multi = 0, version = 0; 567 struct fbnic_dev *fbd = opaque; 568 bool bmc_present; 569 int err; 570 571 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION); 572 fbd->fw_cap.running.mgmt.version = version; 573 if (!fbd->fw_cap.running.mgmt.version) 574 return -EINVAL; 575 576 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) { 577 char running_ver[FBNIC_FW_VER_MAX_SIZE]; 578 579 fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version, 580 running_ver); 581 dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n", 582 running_ver, 583 MIN_FW_MAJOR_VERSION, 584 MIN_FW_MINOR_VERSION, 585 MIN_FW_BUILD_VERSION); 586 /* Disable TX mailbox to prevent card use until firmware is 587 * updated. 588 */ 589 fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false; 590 return -EINVAL; 591 } 592 593 if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, 594 fbd->fw_cap.running.mgmt.commit, 595 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0) 596 dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n"); 597 598 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION); 599 fbd->fw_cap.stored.mgmt.version = version; 600 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, 601 fbd->fw_cap.stored.mgmt.commit, 602 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); 603 604 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION); 605 fbd->fw_cap.running.bootloader.version = version; 606 fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, 607 fbd->fw_cap.running.bootloader.commit, 608 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); 609 610 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION); 611 fbd->fw_cap.stored.bootloader.version = version; 612 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, 613 fbd->fw_cap.stored.bootloader.commit, 614 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); 615 616 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION); 617 fbd->fw_cap.stored.undi.version = version; 618 fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, 619 fbd->fw_cap.stored.undi.commit, 620 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); 621 622 fbd->fw_cap.active_slot = 623 fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT); 624 fbd->fw_cap.link_speed = 625 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED); 626 fbd->fw_cap.link_fec = 627 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC); 628 629 bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT]; 630 if (bmc_present) { 631 struct fbnic_tlv_msg *attr; 632 633 attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY]; 634 if (!attr) 635 return -EINVAL; 636 637 err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr, 638 attr, 4); 639 if (err) 640 return err; 641 642 all_multi = 643 fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI); 644 } else { 645 memset(fbd->fw_cap.bmc_mac_addr, 0, 646 sizeof(fbd->fw_cap.bmc_mac_addr)); 647 } 648 649 fbd->fw_cap.bmc_present = bmc_present; 650 651 if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present) 652 fbd->fw_cap.all_multi = all_multi; 653 654 fbd->fw_cap.anti_rollback_version = 655 fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION); 656 657 return 0; 658 } 659 660 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = { 661 FBNIC_TLV_ATTR_LAST 662 }; 663 664 static int fbnic_fw_parse_ownership_resp(void *opaque, 665 struct fbnic_tlv_msg **results) 666 { 667 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque; 668 669 /* Count the ownership response as a heartbeat reply */ 670 fbd->last_heartbeat_response = jiffies; 671 672 return 0; 673 } 674 675 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = { 676 FBNIC_TLV_ATTR_LAST 677 }; 678 679 static int fbnic_fw_parse_heartbeat_resp(void *opaque, 680 struct fbnic_tlv_msg **results) 681 { 682 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque; 683 684 fbd->last_heartbeat_response = jiffies; 685 686 return 0; 687 } 688 689 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd) 690 { 691 unsigned long req_time = jiffies; 692 struct fbnic_tlv_msg *msg; 693 int err = 0; 694 695 if (!fbnic_fw_present(fbd)) 696 return -ENODEV; 697 698 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ); 699 if (!msg) 700 return -ENOMEM; 701 702 err = fbnic_mbx_map_tlv_msg(fbd, msg); 703 if (err) 704 goto free_message; 705 706 fbd->last_heartbeat_request = req_time; 707 708 return err; 709 710 free_message: 711 free_page((unsigned long)msg); 712 return err; 713 } 714 715 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd) 716 { 717 unsigned long last_response = fbd->last_heartbeat_response; 718 unsigned long last_request = fbd->last_heartbeat_request; 719 720 return !time_before(last_response, last_request); 721 } 722 723 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll) 724 { 725 int err = -ETIMEDOUT; 726 int attempts = 50; 727 728 if (!fbnic_fw_present(fbd)) 729 return -ENODEV; 730 731 while (attempts--) { 732 msleep(200); 733 if (poll) 734 fbnic_mbx_poll(fbd); 735 736 if (!fbnic_fw_heartbeat_current(fbd)) 737 continue; 738 739 /* Place new message on mailbox to elicit a response */ 740 err = fbnic_fw_xmit_heartbeat_message(fbd); 741 if (err) 742 dev_warn(fbd->dev, 743 "Failed to send heartbeat message: %d\n", 744 err); 745 break; 746 } 747 748 return err; 749 } 750 751 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd) 752 { 753 unsigned long last_request = fbd->last_heartbeat_request; 754 int err; 755 756 /* Do not check heartbeat or send another request until current 757 * period has expired. Otherwise we might start spamming requests. 758 */ 759 if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD)) 760 return; 761 762 /* We already reported no mailbox. Wait for it to come back */ 763 if (!fbd->fw_heartbeat_enabled) 764 return; 765 766 /* Was the last heartbeat response long time ago? */ 767 if (!fbnic_fw_heartbeat_current(fbd)) { 768 dev_warn(fbd->dev, 769 "Firmware did not respond to heartbeat message\n"); 770 fbd->fw_heartbeat_enabled = false; 771 } 772 773 /* Place new message on mailbox to elicit a response */ 774 err = fbnic_fw_xmit_heartbeat_message(fbd); 775 if (err) 776 dev_warn(fbd->dev, "Failed to send heartbeat message\n"); 777 } 778 779 int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd, 780 struct fbnic_fw_completion *cmpl_data, 781 unsigned int id, unsigned int len) 782 { 783 struct fbnic_tlv_msg *msg; 784 int err; 785 786 if (!fbnic_fw_present(fbd)) 787 return -ENODEV; 788 789 if (!len) 790 return -EINVAL; 791 792 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ); 793 if (!msg) 794 return -ENOMEM; 795 796 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id); 797 if (err) 798 goto free_message; 799 800 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH, 801 len); 802 if (err) 803 goto free_message; 804 805 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data); 806 if (err) 807 goto free_message; 808 809 return 0; 810 811 free_message: 812 free_page((unsigned long)msg); 813 return err; 814 } 815 816 static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = { 817 FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR), 818 FBNIC_TLV_ATTR_LAST 819 }; 820 821 static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque, 822 struct fbnic_tlv_msg **results) 823 { 824 struct fbnic_fw_completion *cmpl_data; 825 struct fbnic_dev *fbd = opaque; 826 u32 msg_type; 827 s32 err; 828 829 /* Verify we have a completion pointer */ 830 msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ; 831 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); 832 if (!cmpl_data) 833 return -ENOSPC; 834 835 /* Check for errors */ 836 err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR); 837 838 cmpl_data->result = err; 839 complete(&cmpl_data->done); 840 fbnic_fw_put_cmpl(cmpl_data); 841 842 return 0; 843 } 844 845 int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd, 846 const u8 *data, u32 offset, u16 length, 847 int cancel_error) 848 { 849 struct fbnic_tlv_msg *msg; 850 int err; 851 852 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP); 853 if (!msg) 854 return -ENOMEM; 855 856 /* Report error to FW to cancel upgrade */ 857 if (cancel_error) { 858 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR, 859 cancel_error); 860 if (err) 861 goto free_message; 862 } 863 864 if (data) { 865 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET, 866 offset); 867 if (err) 868 goto free_message; 869 870 err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH, 871 length); 872 if (err) 873 goto free_message; 874 875 err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA, 876 data + offset, length); 877 if (err) 878 goto free_message; 879 } 880 881 err = fbnic_mbx_map_tlv_msg(fbd, msg); 882 if (err) 883 goto free_message; 884 885 return 0; 886 887 free_message: 888 free_page((unsigned long)msg); 889 return err; 890 } 891 892 static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = { 893 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET), 894 FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH), 895 FBNIC_TLV_ATTR_LAST 896 }; 897 898 static int fbnic_fw_parse_fw_write_chunk_req(void *opaque, 899 struct fbnic_tlv_msg **results) 900 { 901 struct fbnic_fw_completion *cmpl_data; 902 struct fbnic_dev *fbd = opaque; 903 u32 msg_type; 904 u32 offset; 905 u32 length; 906 907 /* Verify we have a completion pointer */ 908 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ; 909 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); 910 if (!cmpl_data) 911 return -ENOSPC; 912 913 /* Pull length/offset pair and mark it as complete */ 914 offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET); 915 length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH); 916 cmpl_data->u.fw_update.offset = offset; 917 cmpl_data->u.fw_update.length = length; 918 919 complete(&cmpl_data->done); 920 fbnic_fw_put_cmpl(cmpl_data); 921 922 return 0; 923 } 924 925 static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = { 926 FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR), 927 FBNIC_TLV_ATTR_LAST 928 }; 929 930 static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque, 931 struct fbnic_tlv_msg **results) 932 { 933 struct fbnic_fw_completion *cmpl_data; 934 struct fbnic_dev *fbd = opaque; 935 u32 msg_type; 936 s32 err; 937 938 /* Verify we have a completion pointer */ 939 msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ; 940 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); 941 if (!cmpl_data) 942 return -ENOSPC; 943 944 /* Check for errors */ 945 err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR); 946 947 /* Close out update by incrementing offset by length which should 948 * match the total size of the component. Set length to 0 since no 949 * new chunks will be requested. 950 */ 951 cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length; 952 cmpl_data->u.fw_update.length = 0; 953 954 cmpl_data->result = err; 955 complete(&cmpl_data->done); 956 fbnic_fw_put_cmpl(cmpl_data); 957 958 return 0; 959 } 960 961 /** 962 * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request 963 * @fbd: FBNIC device structure 964 * @cmpl_data: Completion data structure to store sensor response 965 * 966 * Asks the firmware to provide an update with the latest sensor data. 967 * The response will contain temperature and voltage readings. 968 * 969 * Return: 0 on success, negative error value on failure 970 */ 971 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd, 972 struct fbnic_fw_completion *cmpl_data) 973 { 974 struct fbnic_tlv_msg *msg; 975 int err; 976 977 if (!fbnic_fw_present(fbd)) 978 return -ENODEV; 979 980 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ); 981 if (!msg) 982 return -ENOMEM; 983 984 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data); 985 if (err) 986 goto free_message; 987 988 return 0; 989 990 free_message: 991 free_page((unsigned long)msg); 992 return err; 993 } 994 995 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = { 996 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM), 997 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT), 998 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR), 999 FBNIC_TLV_ATTR_LAST 1000 }; 1001 1002 static int fbnic_fw_parse_tsene_read_resp(void *opaque, 1003 struct fbnic_tlv_msg **results) 1004 { 1005 struct fbnic_fw_completion *cmpl_data; 1006 struct fbnic_dev *fbd = opaque; 1007 s32 err_resp; 1008 int err = 0; 1009 1010 /* Verify we have a completion pointer to provide with data */ 1011 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, 1012 FBNIC_TLV_MSG_ID_TSENE_READ_RESP); 1013 if (!cmpl_data) 1014 return -ENOSPC; 1015 1016 err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR); 1017 if (err_resp) 1018 goto msg_err; 1019 1020 if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) { 1021 err = -EINVAL; 1022 goto msg_err; 1023 } 1024 1025 cmpl_data->u.tsene.millidegrees = 1026 fta_get_sint(results, FBNIC_FW_TSENE_THERM); 1027 cmpl_data->u.tsene.millivolts = 1028 fta_get_sint(results, FBNIC_FW_TSENE_VOLT); 1029 1030 msg_err: 1031 cmpl_data->result = err_resp ? : err; 1032 complete(&cmpl_data->done); 1033 fbnic_fw_put_cmpl(cmpl_data); 1034 1035 return err; 1036 } 1037 1038 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = { 1039 FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index, 1040 fbnic_fw_parse_cap_resp), 1041 FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index, 1042 fbnic_fw_parse_ownership_resp), 1043 FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index, 1044 fbnic_fw_parse_heartbeat_resp), 1045 FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP, 1046 fbnic_fw_start_upgrade_resp_index, 1047 fbnic_fw_parse_fw_start_upgrade_resp), 1048 FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ, 1049 fbnic_fw_write_chunk_req_index, 1050 fbnic_fw_parse_fw_write_chunk_req), 1051 FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ, 1052 fbnic_fw_finish_upgrade_req_index, 1053 fbnic_fw_parse_fw_finish_upgrade_req), 1054 FBNIC_TLV_PARSER(TSENE_READ_RESP, 1055 fbnic_tsene_read_resp_index, 1056 fbnic_fw_parse_tsene_read_resp), 1057 FBNIC_TLV_MSG_ERROR 1058 }; 1059 1060 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd) 1061 { 1062 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX]; 1063 u8 head = rx_mbx->head; 1064 u64 desc, length; 1065 1066 while (head != rx_mbx->tail) { 1067 struct fbnic_tlv_msg *msg; 1068 int err; 1069 1070 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head); 1071 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL)) 1072 break; 1073 1074 dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr, 1075 PAGE_SIZE, DMA_FROM_DEVICE); 1076 1077 msg = rx_mbx->buf_info[head].msg; 1078 1079 length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc); 1080 1081 /* Ignore NULL mailbox descriptors */ 1082 if (!length) 1083 goto next_page; 1084 1085 /* Report descriptors with length greater than page size */ 1086 if (length > PAGE_SIZE) { 1087 dev_warn(fbd->dev, 1088 "Invalid mailbox descriptor length: %lld\n", 1089 length); 1090 goto next_page; 1091 } 1092 1093 if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length) 1094 dev_warn(fbd->dev, "Mailbox message length mismatch\n"); 1095 1096 /* If parsing fails dump contents of message to dmesg */ 1097 err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser); 1098 if (err) { 1099 dev_warn(fbd->dev, "Unable to process message: %d\n", 1100 err); 1101 print_hex_dump(KERN_WARNING, "fbnic:", 1102 DUMP_PREFIX_OFFSET, 16, 2, 1103 msg, length, true); 1104 } 1105 1106 dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type); 1107 next_page: 1108 1109 free_page((unsigned long)rx_mbx->buf_info[head].msg); 1110 rx_mbx->buf_info[head].msg = NULL; 1111 1112 head++; 1113 head %= FBNIC_IPC_MBX_DESC_LEN; 1114 } 1115 1116 /* Record head for next interrupt */ 1117 rx_mbx->head = head; 1118 1119 /* Make sure we have at least one page for the FW to write to */ 1120 fbnic_mbx_alloc_rx_msgs(fbd); 1121 } 1122 1123 void fbnic_mbx_poll(struct fbnic_dev *fbd) 1124 { 1125 fbnic_mbx_event(fbd); 1126 1127 fbnic_mbx_process_tx_msgs(fbd); 1128 fbnic_mbx_process_rx_msgs(fbd); 1129 } 1130 1131 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd) 1132 { 1133 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; 1134 unsigned long timeout = jiffies + 10 * HZ + 1; 1135 int err, i; 1136 1137 do { 1138 if (!time_is_after_jiffies(timeout)) 1139 return -ETIMEDOUT; 1140 1141 /* Force the firmware to trigger an interrupt response to 1142 * avoid the mailbox getting stuck closed if the interrupt 1143 * is reset. 1144 */ 1145 fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); 1146 1147 /* Immediate fail if BAR4 went away */ 1148 if (!fbnic_fw_present(fbd)) 1149 return -ENODEV; 1150 1151 msleep(20); 1152 } while (!fbnic_mbx_event(fbd)); 1153 1154 /* FW has shown signs of life. Enable DMA and start Tx/Rx */ 1155 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) 1156 fbnic_mbx_init_desc_ring(fbd, i); 1157 1158 /* Request an update from the firmware. This should overwrite 1159 * mgmt.version once we get the actual version from the firmware 1160 * in the capabilities request message. 1161 */ 1162 err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); 1163 if (err) 1164 goto clean_mbx; 1165 1166 /* Poll until we get a current management firmware version, use "1" 1167 * to indicate we entered the polling state waiting for a response 1168 */ 1169 for (fbd->fw_cap.running.mgmt.version = 1; 1170 fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE;) { 1171 if (!tx_mbx->ready) 1172 err = -ENODEV; 1173 if (err) 1174 goto clean_mbx; 1175 1176 msleep(20); 1177 fbnic_mbx_poll(fbd); 1178 1179 /* set err, but wait till mgmt.version check to report it */ 1180 if (!time_is_after_jiffies(timeout)) 1181 err = -ETIMEDOUT; 1182 } 1183 1184 return 0; 1185 clean_mbx: 1186 /* Cleanup Rx buffers and disable mailbox */ 1187 fbnic_mbx_clean(fbd); 1188 return err; 1189 } 1190 1191 static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data) 1192 { 1193 cmpl_data->result = -EPIPE; 1194 complete(&cmpl_data->done); 1195 } 1196 1197 static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd) 1198 { 1199 int i; 1200 1201 for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { 1202 struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i]; 1203 1204 if (cmpl_data) 1205 __fbnic_fw_evict_cmpl(cmpl_data); 1206 } 1207 1208 memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data)); 1209 } 1210 1211 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) 1212 { 1213 unsigned long timeout = jiffies + 10 * HZ + 1; 1214 struct fbnic_fw_mbx *tx_mbx; 1215 u8 tail; 1216 1217 /* Record current Rx stats */ 1218 tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; 1219 1220 spin_lock_irq(&fbd->fw_tx_lock); 1221 1222 /* Clear ready to prevent any further attempts to transmit */ 1223 tx_mbx->ready = false; 1224 1225 /* Read tail to determine the last tail state for the ring */ 1226 tail = tx_mbx->tail; 1227 1228 /* Flush any completions as we are no longer processing Rx */ 1229 fbnic_mbx_evict_all_cmpl(fbd); 1230 1231 spin_unlock_irq(&fbd->fw_tx_lock); 1232 1233 /* Give firmware time to process packet, 1234 * we will wait up to 10 seconds which is 500 waits of 20ms. 1235 */ 1236 do { 1237 u8 head = tx_mbx->head; 1238 1239 /* Tx ring is empty once head == tail */ 1240 if (head == tail) 1241 break; 1242 1243 msleep(20); 1244 fbnic_mbx_process_tx_msgs(fbd); 1245 } while (time_is_after_jiffies(timeout)); 1246 } 1247 1248 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, 1249 const size_t str_sz) 1250 { 1251 struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt; 1252 const char *delim = ""; 1253 1254 if (mgmt->commit[0]) 1255 delim = "_"; 1256 1257 fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit, 1258 fw_version, str_sz); 1259 } 1260 1261 struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type) 1262 { 1263 struct fbnic_fw_completion *cmpl; 1264 1265 cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); 1266 if (!cmpl) 1267 return NULL; 1268 1269 cmpl->msg_type = msg_type; 1270 init_completion(&cmpl->done); 1271 kref_init(&cmpl->ref_count); 1272 1273 return cmpl; 1274 } 1275 1276 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl) 1277 { 1278 kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data); 1279 } 1280