1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018-2020 Broadcom. 4 */ 5 6 #include <linux/delay.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/firmware.h> 9 #include <linux/fs.h> 10 #include <linux/idr.h> 11 #include <linux/kref.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/pci_regs.h> 15 #include <uapi/linux/misc/bcm_vk.h> 16 17 #include "bcm_vk.h" 18 19 #define PCI_DEVICE_ID_VALKYRIE 0x5e87 20 #define PCI_DEVICE_ID_VIPER 0x5e88 21 22 static DEFINE_IDA(bcm_vk_ida); 23 24 enum soc_idx { 25 VALKYRIE_A0 = 0, 26 VALKYRIE_B0, 27 VIPER, 28 VK_IDX_INVALID 29 }; 30 31 enum img_idx { 32 IMG_PRI = 0, 33 IMG_SEC, 34 IMG_PER_TYPE_MAX 35 }; 36 37 struct load_image_entry { 38 const u32 image_type; 39 const char *image_name[IMG_PER_TYPE_MAX]; 40 }; 41 42 #define NUM_BOOT_STAGES 2 43 /* default firmware images names */ 44 static const struct load_image_entry image_tab[][NUM_BOOT_STAGES] = { 45 [VALKYRIE_A0] = { 46 {VK_IMAGE_TYPE_BOOT1, {"vk_a0-boot1.bin", "vk-boot1.bin"}}, 47 {VK_IMAGE_TYPE_BOOT2, {"vk_a0-boot2.bin", "vk-boot2.bin"}} 48 }, 49 [VALKYRIE_B0] = { 50 {VK_IMAGE_TYPE_BOOT1, {"vk_b0-boot1.bin", "vk-boot1.bin"}}, 51 {VK_IMAGE_TYPE_BOOT2, {"vk_b0-boot2.bin", "vk-boot2.bin"}} 52 }, 53 54 [VIPER] = { 55 {VK_IMAGE_TYPE_BOOT1, {"vp-boot1.bin", ""}}, 56 {VK_IMAGE_TYPE_BOOT2, {"vp-boot2.bin", ""}} 57 }, 58 }; 59 60 /* Location of memory base addresses of interest in BAR1 */ 61 /* Load Boot1 to start of ITCM */ 62 #define BAR1_CODEPUSH_BASE_BOOT1 0x100000 63 64 /* Allow minimum 1s for Load Image timeout responses */ 65 #define LOAD_IMAGE_TIMEOUT_MS (1 * MSEC_PER_SEC) 66 67 /* Image startup timeouts */ 68 #define BOOT1_STARTUP_TIMEOUT_MS (5 * MSEC_PER_SEC) 69 #define BOOT2_STARTUP_TIMEOUT_MS (10 * MSEC_PER_SEC) 70 71 /* 1ms wait for checking the transfer complete status */ 72 #define TXFR_COMPLETE_TIMEOUT_MS 1 73 74 /* MSIX usages */ 75 #define VK_MSIX_MSGQ_MAX 3 76 #define VK_MSIX_NOTF_MAX 1 77 #define VK_MSIX_TTY_MAX BCM_VK_NUM_TTY 78 #define VK_MSIX_IRQ_MAX (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX + \ 79 VK_MSIX_TTY_MAX) 80 #define VK_MSIX_IRQ_MIN_REQ (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX) 81 82 /* Number of bits set in DMA mask*/ 83 #define BCM_VK_DMA_BITS 64 84 85 /* Ucode boot wait time */ 86 #define BCM_VK_UCODE_BOOT_US (100 * USEC_PER_MSEC) 87 /* 50% margin */ 88 #define BCM_VK_UCODE_BOOT_MAX_US ((BCM_VK_UCODE_BOOT_US * 3) >> 1) 89 90 /* deinit time for the card os after receiving doorbell */ 91 #define BCM_VK_DEINIT_TIME_MS (2 * MSEC_PER_SEC) 92 93 /* 94 * module parameters 95 */ 96 static bool auto_load = true; 97 module_param(auto_load, bool, 0444); 98 MODULE_PARM_DESC(auto_load, 99 "Load images automatically at PCIe probe time.\n"); 100 static uint nr_scratch_pages = VK_BAR1_SCRATCH_DEF_NR_PAGES; 101 module_param(nr_scratch_pages, uint, 0444); 102 MODULE_PARM_DESC(nr_scratch_pages, 103 "Number of pre allocated DMAable coherent pages.\n"); 104 105 static int bcm_vk_intf_ver_chk(struct bcm_vk *vk) 106 { 107 struct device *dev = &vk->pdev->dev; 108 u32 reg; 109 u16 major, minor; 110 int ret = 0; 111 112 /* read interface register */ 113 reg = vkread32(vk, BAR_0, BAR_INTF_VER); 114 major = (reg >> BAR_INTF_VER_MAJOR_SHIFT) & BAR_INTF_VER_MASK; 115 minor = reg & BAR_INTF_VER_MASK; 116 117 /* 118 * if major number is 0, it is pre-release and it would be allowed 119 * to continue, else, check versions accordingly 120 */ 121 if (!major) { 122 dev_warn(dev, "Pre-release major.minor=%d.%d - drv %d.%d\n", 123 major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR); 124 } else if (major != SEMANTIC_MAJOR) { 125 dev_err(dev, 126 "Intf major.minor=%d.%d rejected - drv %d.%d\n", 127 major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR); 128 ret = -EPFNOSUPPORT; 129 } else { 130 dev_dbg(dev, 131 "Intf major.minor=%d.%d passed - drv %d.%d\n", 132 major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR); 133 } 134 return ret; 135 } 136 137 static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar, 138 u64 offset, u32 mask, u32 value, 139 unsigned long timeout_ms) 140 { 141 struct device *dev = &vk->pdev->dev; 142 unsigned long start_time; 143 unsigned long timeout; 144 u32 rd_val, boot_status; 145 146 start_time = jiffies; 147 timeout = start_time + msecs_to_jiffies(timeout_ms); 148 149 do { 150 rd_val = vkread32(vk, bar, offset); 151 dev_dbg(dev, "BAR%d Offset=0x%llx: 0x%x\n", 152 bar, offset, rd_val); 153 154 /* check for any boot err condition */ 155 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); 156 if (boot_status & BOOT_ERR_MASK) { 157 dev_err(dev, "Boot Err 0x%x, progress 0x%x after %d ms\n", 158 (boot_status & BOOT_ERR_MASK) >> BOOT_ERR_SHIFT, 159 boot_status & BOOT_PROG_MASK, 160 jiffies_to_msecs(jiffies - start_time)); 161 return -EFAULT; 162 } 163 164 if (time_after(jiffies, timeout)) 165 return -ETIMEDOUT; 166 167 cpu_relax(); 168 cond_resched(); 169 } while ((rd_val & mask) != value); 170 171 return 0; 172 } 173 174 static int bcm_vk_sync_card_info(struct bcm_vk *vk) 175 { 176 u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY); 177 178 /* check for marker, but allow diags mode to skip sync */ 179 if (!bcm_vk_msgq_marker_valid(vk)) 180 return (rdy_marker == VK_BAR1_DIAG_RDY_MARKER ? 0 : -EINVAL); 181 182 /* 183 * Write down scratch addr which is used for DMA. For 184 * signed part, BAR1 is accessible only after boot2 has come 185 * up 186 */ 187 if (vk->tdma_addr) { 188 vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1, 189 VK_BAR1_SCRATCH_OFF_HI); 190 vkwrite32(vk, (u32)vk->tdma_addr, BAR_1, 191 VK_BAR1_SCRATCH_OFF_LO); 192 vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1, 193 VK_BAR1_SCRATCH_SZ_ADDR); 194 } 195 return 0; 196 } 197 198 static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp, 199 dma_addr_t host_buf_addr, u32 buf_size) 200 { 201 /* update the dma address to the card */ 202 vkwrite32(vk, (u64)host_buf_addr >> 32, BAR_1, 203 VK_BAR1_DMA_BUF_OFF_HI); 204 vkwrite32(vk, (u32)host_buf_addr, BAR_1, 205 VK_BAR1_DMA_BUF_OFF_LO); 206 vkwrite32(vk, buf_size, BAR_1, VK_BAR1_DMA_BUF_SZ); 207 } 208 209 static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type, 210 const char *filename) 211 { 212 struct device *dev = &vk->pdev->dev; 213 const struct firmware *fw = NULL; 214 void *bufp = NULL; 215 size_t max_buf, offset; 216 int ret; 217 u64 offset_codepush; 218 u32 codepush; 219 u32 value; 220 dma_addr_t boot_dma_addr; 221 bool is_stdalone; 222 223 if (load_type == VK_IMAGE_TYPE_BOOT1) { 224 /* 225 * After POR, enable VK soft BOOTSRC so bootrom do not clear 226 * the pushed image (the TCM memories). 227 */ 228 value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT); 229 value |= BOOTSRC_SOFT_ENABLE; 230 vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT); 231 232 codepush = CODEPUSH_BOOTSTART + CODEPUSH_BOOT1_ENTRY; 233 offset_codepush = BAR_CODEPUSH_SBL; 234 235 /* Write a 1 to request SRAM open bit */ 236 vkwrite32(vk, CODEPUSH_BOOTSTART, BAR_0, offset_codepush); 237 238 /* Wait for VK to respond */ 239 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, SRAM_OPEN, 240 SRAM_OPEN, LOAD_IMAGE_TIMEOUT_MS); 241 if (ret < 0) { 242 dev_err(dev, "boot1 wait SRAM err - ret(%d)\n", ret); 243 goto err_buf_out; 244 } 245 246 max_buf = SZ_256K; 247 bufp = dma_alloc_coherent(dev, 248 max_buf, 249 &boot_dma_addr, GFP_KERNEL); 250 if (!bufp) { 251 dev_err(dev, "Error allocating 0x%zx\n", max_buf); 252 ret = -ENOMEM; 253 goto err_buf_out; 254 } 255 } else if (load_type == VK_IMAGE_TYPE_BOOT2) { 256 codepush = CODEPUSH_BOOT2_ENTRY; 257 offset_codepush = BAR_CODEPUSH_SBI; 258 259 /* Wait for VK to respond */ 260 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, DDR_OPEN, 261 DDR_OPEN, LOAD_IMAGE_TIMEOUT_MS); 262 if (ret < 0) { 263 dev_err(dev, "boot2 wait DDR open error - ret(%d)\n", 264 ret); 265 goto err_buf_out; 266 } 267 268 max_buf = SZ_4M; 269 bufp = dma_alloc_coherent(dev, 270 max_buf, 271 &boot_dma_addr, GFP_KERNEL); 272 if (!bufp) { 273 dev_err(dev, "Error allocating 0x%zx\n", max_buf); 274 ret = -ENOMEM; 275 goto err_buf_out; 276 } 277 278 bcm_vk_buf_notify(vk, bufp, boot_dma_addr, max_buf); 279 } else { 280 dev_err(dev, "Error invalid image type 0x%x\n", load_type); 281 ret = -EINVAL; 282 goto err_buf_out; 283 } 284 285 offset = 0; 286 ret = request_partial_firmware_into_buf(&fw, filename, dev, 287 bufp, max_buf, offset); 288 if (ret) { 289 dev_err(dev, "Error %d requesting firmware file: %s\n", 290 ret, filename); 291 goto err_firmware_out; 292 } 293 dev_dbg(dev, "size=0x%zx\n", fw->size); 294 if (load_type == VK_IMAGE_TYPE_BOOT1) 295 memcpy_toio(vk->bar[BAR_1] + BAR1_CODEPUSH_BASE_BOOT1, 296 bufp, 297 fw->size); 298 299 dev_dbg(dev, "Signaling 0x%x to 0x%llx\n", codepush, offset_codepush); 300 vkwrite32(vk, codepush, BAR_0, offset_codepush); 301 302 if (load_type == VK_IMAGE_TYPE_BOOT1) { 303 u32 boot_status; 304 305 /* wait until done */ 306 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, 307 BOOT1_RUNNING, 308 BOOT1_RUNNING, 309 BOOT1_STARTUP_TIMEOUT_MS); 310 311 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); 312 is_stdalone = !BCM_VK_INTF_IS_DOWN(boot_status) && 313 (boot_status & BOOT_STDALONE_RUNNING); 314 if (ret && !is_stdalone) { 315 dev_err(dev, 316 "Timeout %ld ms waiting for boot1 to come up - ret(%d)\n", 317 BOOT1_STARTUP_TIMEOUT_MS, ret); 318 goto err_firmware_out; 319 } else if (is_stdalone) { 320 u32 reg; 321 322 reg = vkread32(vk, BAR_0, BAR_BOOT1_STDALONE_PROGRESS); 323 if ((reg & BOOT1_STDALONE_PROGRESS_MASK) == 324 BOOT1_STDALONE_SUCCESS) { 325 dev_info(dev, "Boot1 standalone success\n"); 326 ret = 0; 327 } else { 328 dev_err(dev, "Timeout %ld ms - Boot1 standalone failure\n", 329 BOOT1_STARTUP_TIMEOUT_MS); 330 ret = -EINVAL; 331 goto err_firmware_out; 332 } 333 } 334 } else if (load_type == VK_IMAGE_TYPE_BOOT2) { 335 unsigned long timeout; 336 337 timeout = jiffies + msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS); 338 339 /* To send more data to VK than max_buf allowed at a time */ 340 do { 341 /* 342 * Check for ack from card. when Ack is received, 343 * it means all the data is received by card. 344 * Exit the loop after ack is received. 345 */ 346 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, 347 FW_LOADER_ACK_RCVD_ALL_DATA, 348 FW_LOADER_ACK_RCVD_ALL_DATA, 349 TXFR_COMPLETE_TIMEOUT_MS); 350 if (ret == 0) { 351 dev_dbg(dev, "Exit boot2 download\n"); 352 break; 353 } else if (ret == -EFAULT) { 354 dev_err(dev, "Error detected during ACK waiting"); 355 goto err_firmware_out; 356 } 357 358 /* exit the loop, if there is no response from card */ 359 if (time_after(jiffies, timeout)) { 360 dev_err(dev, "Error. No reply from card\n"); 361 ret = -ETIMEDOUT; 362 goto err_firmware_out; 363 } 364 365 /* Wait for VK to open BAR space to copy new data */ 366 ret = bcm_vk_wait(vk, BAR_0, offset_codepush, 367 codepush, 0, 368 TXFR_COMPLETE_TIMEOUT_MS); 369 if (ret == 0) { 370 offset += max_buf; 371 ret = request_partial_firmware_into_buf 372 (&fw, 373 filename, 374 dev, bufp, 375 max_buf, 376 offset); 377 if (ret) { 378 dev_err(dev, 379 "Error %d requesting firmware file: %s offset: 0x%zx\n", 380 ret, filename, offset); 381 goto err_firmware_out; 382 } 383 dev_dbg(dev, "size=0x%zx\n", fw->size); 384 dev_dbg(dev, "Signaling 0x%x to 0x%llx\n", 385 codepush, offset_codepush); 386 vkwrite32(vk, codepush, BAR_0, offset_codepush); 387 /* reload timeout after every codepush */ 388 timeout = jiffies + 389 msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS); 390 } else if (ret == -EFAULT) { 391 dev_err(dev, "Error detected waiting for transfer\n"); 392 goto err_firmware_out; 393 } 394 } while (1); 395 396 /* wait for fw status bits to indicate app ready */ 397 ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS, 398 VK_FWSTS_READY, 399 VK_FWSTS_READY, 400 BOOT2_STARTUP_TIMEOUT_MS); 401 if (ret < 0) { 402 dev_err(dev, "Boot2 not ready - ret(%d)\n", ret); 403 goto err_firmware_out; 404 } 405 406 is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) & 407 BOOT_STDALONE_RUNNING; 408 if (!is_stdalone) { 409 ret = bcm_vk_intf_ver_chk(vk); 410 if (ret) { 411 dev_err(dev, "failure in intf version check\n"); 412 goto err_firmware_out; 413 } 414 415 /* sync & channel other info */ 416 ret = bcm_vk_sync_card_info(vk); 417 if (ret) { 418 dev_err(dev, "Syncing Card Info failure\n"); 419 goto err_firmware_out; 420 } 421 } 422 } 423 424 err_firmware_out: 425 release_firmware(fw); 426 427 err_buf_out: 428 if (bufp) 429 dma_free_coherent(dev, max_buf, bufp, boot_dma_addr); 430 431 return ret; 432 } 433 434 static u32 bcm_vk_next_boot_image(struct bcm_vk *vk) 435 { 436 u32 boot_status; 437 u32 fw_status; 438 u32 load_type = 0; /* default for unknown */ 439 440 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); 441 fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS); 442 443 if (!BCM_VK_INTF_IS_DOWN(boot_status) && (boot_status & SRAM_OPEN)) 444 load_type = VK_IMAGE_TYPE_BOOT1; 445 else if (boot_status == BOOT1_RUNNING) 446 load_type = VK_IMAGE_TYPE_BOOT2; 447 448 /* Log status so that we know different stages */ 449 dev_info(&vk->pdev->dev, 450 "boot-status value for next image: 0x%x : fw-status 0x%x\n", 451 boot_status, fw_status); 452 453 return load_type; 454 } 455 456 static enum soc_idx get_soc_idx(struct bcm_vk *vk) 457 { 458 struct pci_dev *pdev = vk->pdev; 459 enum soc_idx idx = VK_IDX_INVALID; 460 u32 rev; 461 static enum soc_idx const vk_soc_tab[] = { VALKYRIE_A0, VALKYRIE_B0 }; 462 463 switch (pdev->device) { 464 case PCI_DEVICE_ID_VALKYRIE: 465 /* get the chip id to decide sub-class */ 466 rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID)); 467 if (rev < ARRAY_SIZE(vk_soc_tab)) { 468 idx = vk_soc_tab[rev]; 469 } else { 470 /* Default to A0 firmware for all other chip revs */ 471 idx = VALKYRIE_A0; 472 dev_warn(&pdev->dev, 473 "Rev %d not in image lookup table, default to idx=%d\n", 474 rev, idx); 475 } 476 break; 477 478 case PCI_DEVICE_ID_VIPER: 479 idx = VIPER; 480 break; 481 482 default: 483 dev_err(&pdev->dev, "no images for 0x%x\n", pdev->device); 484 } 485 return idx; 486 } 487 488 static const char *get_load_fw_name(struct bcm_vk *vk, 489 const struct load_image_entry *entry) 490 { 491 const struct firmware *fw; 492 struct device *dev = &vk->pdev->dev; 493 int ret; 494 unsigned long dummy; 495 int i; 496 497 for (i = 0; i < IMG_PER_TYPE_MAX; i++) { 498 fw = NULL; 499 ret = request_partial_firmware_into_buf(&fw, 500 entry->image_name[i], 501 dev, &dummy, 502 sizeof(dummy), 503 0); 504 release_firmware(fw); 505 if (!ret) 506 return entry->image_name[i]; 507 } 508 return NULL; 509 } 510 511 int bcm_vk_auto_load_all_images(struct bcm_vk *vk) 512 { 513 int i, ret = -1; 514 enum soc_idx idx; 515 struct device *dev = &vk->pdev->dev; 516 u32 curr_type; 517 const char *curr_name; 518 519 idx = get_soc_idx(vk); 520 if (idx == VK_IDX_INVALID) 521 goto auto_load_all_exit; 522 523 /* log a message to know the relative loading order */ 524 dev_dbg(dev, "Load All for device %d\n", vk->devid); 525 526 for (i = 0; i < NUM_BOOT_STAGES; i++) { 527 curr_type = image_tab[idx][i].image_type; 528 if (bcm_vk_next_boot_image(vk) == curr_type) { 529 curr_name = get_load_fw_name(vk, &image_tab[idx][i]); 530 if (!curr_name) { 531 dev_err(dev, "No suitable firmware exists for type %d", 532 curr_type); 533 ret = -ENOENT; 534 goto auto_load_all_exit; 535 } 536 ret = bcm_vk_load_image_by_type(vk, curr_type, 537 curr_name); 538 dev_info(dev, "Auto load %s, ret %d\n", 539 curr_name, ret); 540 541 if (ret) { 542 dev_err(dev, "Error loading default %s\n", 543 curr_name); 544 goto auto_load_all_exit; 545 } 546 } 547 } 548 549 auto_load_all_exit: 550 return ret; 551 } 552 553 static int bcm_vk_trigger_autoload(struct bcm_vk *vk) 554 { 555 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) 556 return -EPERM; 557 558 set_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload); 559 queue_work(vk->wq_thread, &vk->wq_work); 560 561 return 0; 562 } 563 564 /* 565 * deferred work queue for auto download. 566 */ 567 static void bcm_vk_wq_handler(struct work_struct *work) 568 { 569 struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work); 570 571 if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) { 572 bcm_vk_auto_load_all_images(vk); 573 574 /* 575 * at the end of operation, clear AUTO bit and pending 576 * bit 577 */ 578 clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload); 579 clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); 580 } 581 } 582 583 static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val) 584 { 585 vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE); 586 } 587 588 static int bcm_vk_trigger_reset(struct bcm_vk *vk) 589 { 590 u32 i; 591 u32 value, boot_status; 592 static const u32 bar0_reg_clr_list[] = { BAR_OS_UPTIME, 593 BAR_INTF_VER, 594 BAR_CARD_VOLTAGE, 595 BAR_CARD_TEMPERATURE, 596 BAR_CARD_PWR_AND_THRE }; 597 598 /* make tag '\0' terminated */ 599 vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG); 600 601 for (i = 0; i < VK_BAR1_DAUTH_MAX; i++) { 602 vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i)); 603 vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i)); 604 } 605 for (i = 0; i < VK_BAR1_SOTP_REVID_MAX; i++) 606 vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i)); 607 608 /* 609 * When boot request fails, the CODE_PUSH_OFFSET stays persistent. 610 * Allowing us to debug the failure. When we call reset, 611 * we should clear CODE_PUSH_OFFSET so ROM does not execute 612 * boot again (and fails again) and instead waits for a new 613 * codepush. And, if previous boot has encountered error, need 614 * to clear the entry values 615 */ 616 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); 617 if (boot_status & BOOT_ERR_MASK) { 618 dev_info(&vk->pdev->dev, 619 "Card in boot error 0x%x, clear CODEPUSH val\n", 620 boot_status); 621 value = 0; 622 } else { 623 value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL); 624 value &= CODEPUSH_MASK; 625 } 626 vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL); 627 628 /* reset fw_status with proper reason, and press db */ 629 vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS); 630 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT); 631 632 /* clear other necessary registers records */ 633 for (i = 0; i < ARRAY_SIZE(bar0_reg_clr_list); i++) 634 vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]); 635 636 return 0; 637 } 638 639 static const struct file_operations bcm_vk_fops = { 640 .owner = THIS_MODULE, 641 .open = bcm_vk_open, 642 .release = bcm_vk_release, 643 }; 644 645 static int bcm_vk_on_panic(struct notifier_block *nb, 646 unsigned long e, void *p) 647 { 648 struct bcm_vk *vk = container_of(nb, struct bcm_vk, panic_nb); 649 650 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD); 651 652 return 0; 653 } 654 655 static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 656 { 657 int err; 658 int i; 659 int id; 660 int irq; 661 char name[20]; 662 struct bcm_vk *vk; 663 struct device *dev = &pdev->dev; 664 struct miscdevice *misc_device; 665 u32 boot_status; 666 667 /* allocate vk structure which is tied to kref for freeing */ 668 vk = kzalloc(sizeof(*vk), GFP_KERNEL); 669 if (!vk) 670 return -ENOMEM; 671 672 kref_init(&vk->kref); 673 674 err = pci_enable_device(pdev); 675 if (err) { 676 dev_err(dev, "Cannot enable PCI device\n"); 677 goto err_free_exit; 678 } 679 vk->pdev = pci_dev_get(pdev); 680 681 err = pci_request_regions(pdev, DRV_MODULE_NAME); 682 if (err) { 683 dev_err(dev, "Cannot obtain PCI resources\n"); 684 goto err_disable_pdev; 685 } 686 687 /* make sure DMA is good */ 688 err = dma_set_mask_and_coherent(&pdev->dev, 689 DMA_BIT_MASK(BCM_VK_DMA_BITS)); 690 if (err) { 691 dev_err(dev, "failed to set DMA mask\n"); 692 goto err_disable_pdev; 693 } 694 695 /* The tdma is a scratch area for some DMA testings. */ 696 if (nr_scratch_pages) { 697 vk->tdma_vaddr = dma_alloc_coherent 698 (dev, 699 nr_scratch_pages * PAGE_SIZE, 700 &vk->tdma_addr, GFP_KERNEL); 701 if (!vk->tdma_vaddr) { 702 err = -ENOMEM; 703 goto err_disable_pdev; 704 } 705 } 706 707 pci_set_master(pdev); 708 pci_set_drvdata(pdev, vk); 709 710 irq = pci_alloc_irq_vectors(pdev, 711 1, 712 VK_MSIX_IRQ_MAX, 713 PCI_IRQ_MSI | PCI_IRQ_MSIX); 714 715 if (irq < VK_MSIX_IRQ_MIN_REQ) { 716 dev_err(dev, "failed to get min %d MSIX interrupts, irq(%d)\n", 717 VK_MSIX_IRQ_MIN_REQ, irq); 718 err = (irq >= 0) ? -EINVAL : irq; 719 goto err_disable_pdev; 720 } 721 722 if (irq != VK_MSIX_IRQ_MAX) 723 dev_warn(dev, "Number of IRQs %d allocated - requested(%d).\n", 724 irq, VK_MSIX_IRQ_MAX); 725 726 for (i = 0; i < MAX_BAR; i++) { 727 /* multiple by 2 for 64 bit BAR mapping */ 728 vk->bar[i] = pci_ioremap_bar(pdev, i * 2); 729 if (!vk->bar[i]) { 730 dev_err(dev, "failed to remap BAR%d\n", i); 731 goto err_iounmap; 732 } 733 } 734 735 id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL); 736 if (id < 0) { 737 err = id; 738 dev_err(dev, "unable to get id\n"); 739 goto err_iounmap; 740 } 741 742 vk->devid = id; 743 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); 744 misc_device = &vk->miscdev; 745 misc_device->minor = MISC_DYNAMIC_MINOR; 746 misc_device->name = kstrdup(name, GFP_KERNEL); 747 if (!misc_device->name) { 748 err = -ENOMEM; 749 goto err_ida_remove; 750 } 751 misc_device->fops = &bcm_vk_fops, 752 753 err = misc_register(misc_device); 754 if (err) { 755 dev_err(dev, "failed to register device\n"); 756 goto err_kfree_name; 757 } 758 759 INIT_WORK(&vk->wq_work, bcm_vk_wq_handler); 760 761 /* create dedicated workqueue */ 762 vk->wq_thread = create_singlethread_workqueue(name); 763 if (!vk->wq_thread) { 764 dev_err(dev, "Fail to create workqueue thread\n"); 765 err = -ENOMEM; 766 goto err_misc_deregister; 767 } 768 769 /* sync other info */ 770 bcm_vk_sync_card_info(vk); 771 772 /* register for panic notifier */ 773 vk->panic_nb.notifier_call = bcm_vk_on_panic; 774 err = atomic_notifier_chain_register(&panic_notifier_list, 775 &vk->panic_nb); 776 if (err) { 777 dev_err(dev, "Fail to register panic notifier\n"); 778 goto err_destroy_workqueue; 779 } 780 781 /* 782 * lets trigger an auto download. We don't want to do it serially here 783 * because at probing time, it is not supposed to block for a long time. 784 */ 785 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); 786 if (auto_load) { 787 if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) { 788 if (bcm_vk_trigger_autoload(vk)) 789 goto err_unregister_panic_notifier; 790 } else { 791 dev_err(dev, 792 "Auto-load skipped - BROM not in proper state (0x%x)\n", 793 boot_status); 794 } 795 } 796 797 dev_dbg(dev, "BCM-VK:%u created\n", id); 798 799 return 0; 800 801 err_unregister_panic_notifier: 802 atomic_notifier_chain_unregister(&panic_notifier_list, 803 &vk->panic_nb); 804 805 err_destroy_workqueue: 806 destroy_workqueue(vk->wq_thread); 807 808 err_misc_deregister: 809 misc_deregister(misc_device); 810 811 err_kfree_name: 812 kfree(misc_device->name); 813 misc_device->name = NULL; 814 815 err_ida_remove: 816 ida_simple_remove(&bcm_vk_ida, id); 817 818 err_iounmap: 819 for (i = 0; i < MAX_BAR; i++) { 820 if (vk->bar[i]) 821 pci_iounmap(pdev, vk->bar[i]); 822 } 823 pci_release_regions(pdev); 824 825 err_disable_pdev: 826 if (vk->tdma_vaddr) 827 dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE, 828 vk->tdma_vaddr, vk->tdma_addr); 829 830 pci_free_irq_vectors(pdev); 831 pci_disable_device(pdev); 832 pci_dev_put(pdev); 833 834 err_free_exit: 835 kfree(vk); 836 837 return err; 838 } 839 840 void bcm_vk_release_data(struct kref *kref) 841 { 842 struct bcm_vk *vk = container_of(kref, struct bcm_vk, kref); 843 struct pci_dev *pdev = vk->pdev; 844 845 dev_dbg(&pdev->dev, "BCM-VK:%d release data 0x%p\n", vk->devid, vk); 846 pci_dev_put(pdev); 847 kfree(vk); 848 } 849 850 static void bcm_vk_remove(struct pci_dev *pdev) 851 { 852 int i; 853 struct bcm_vk *vk = pci_get_drvdata(pdev); 854 struct miscdevice *misc_device = &vk->miscdev; 855 856 /* 857 * Trigger a reset to card and wait enough time for UCODE to rerun, 858 * which re-initialize the card into its default state. 859 * This ensures when driver is re-enumerated it will start from 860 * a completely clean state. 861 */ 862 bcm_vk_trigger_reset(vk); 863 usleep_range(BCM_VK_UCODE_BOOT_US, BCM_VK_UCODE_BOOT_MAX_US); 864 865 /* unregister panic notifier */ 866 atomic_notifier_chain_unregister(&panic_notifier_list, 867 &vk->panic_nb); 868 869 if (vk->tdma_vaddr) 870 dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE, 871 vk->tdma_vaddr, vk->tdma_addr); 872 873 /* remove if name is set which means misc dev registered */ 874 if (misc_device->name) { 875 misc_deregister(misc_device); 876 kfree(misc_device->name); 877 ida_simple_remove(&bcm_vk_ida, vk->devid); 878 } 879 880 cancel_work_sync(&vk->wq_work); 881 destroy_workqueue(vk->wq_thread); 882 883 for (i = 0; i < MAX_BAR; i++) { 884 if (vk->bar[i]) 885 pci_iounmap(pdev, vk->bar[i]); 886 } 887 888 dev_dbg(&pdev->dev, "BCM-VK:%d released\n", vk->devid); 889 890 pci_release_regions(pdev); 891 pci_free_irq_vectors(pdev); 892 pci_disable_device(pdev); 893 894 kref_put(&vk->kref, bcm_vk_release_data); 895 } 896 897 static void bcm_vk_shutdown(struct pci_dev *pdev) 898 { 899 struct bcm_vk *vk = pci_get_drvdata(pdev); 900 u32 reg, boot_stat; 901 902 reg = vkread32(vk, BAR_0, BAR_BOOT_STATUS); 903 boot_stat = reg & BOOT_STATE_MASK; 904 905 if (boot_stat == BOOT1_RUNNING) { 906 /* simply trigger a reset interrupt to park it */ 907 bcm_vk_trigger_reset(vk); 908 } else if (boot_stat == BROM_NOT_RUN) { 909 int err; 910 u16 lnksta; 911 912 /* 913 * The boot status only reflects boot condition since last reset 914 * As ucode will run only once to configure pcie, if multiple 915 * resets happen, we lost track if ucode has run or not. 916 * Here, read the current link speed and use that to 917 * sync up the bootstatus properly so that on reboot-back-up, 918 * it has the proper state to start with autoload 919 */ 920 err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); 921 if (!err && 922 (lnksta & PCI_EXP_LNKSTA_CLS) != PCI_EXP_LNKSTA_CLS_2_5GB) { 923 reg |= BROM_STATUS_COMPLETE; 924 vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS); 925 } 926 } 927 } 928 929 static const struct pci_device_id bcm_vk_ids[] = { 930 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VALKYRIE), }, 931 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VIPER), }, 932 { } 933 }; 934 MODULE_DEVICE_TABLE(pci, bcm_vk_ids); 935 936 static struct pci_driver pci_driver = { 937 .name = DRV_MODULE_NAME, 938 .id_table = bcm_vk_ids, 939 .probe = bcm_vk_probe, 940 .remove = bcm_vk_remove, 941 .shutdown = bcm_vk_shutdown, 942 }; 943 module_pci_driver(pci_driver); 944 945 MODULE_DESCRIPTION("Broadcom VK Host Driver"); 946 MODULE_AUTHOR("Scott Branden <scott.branden@broadcom.com>"); 947 MODULE_LICENSE("GPL v2"); 948 MODULE_VERSION("1.0"); 949