1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2022 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #define pr_fmt(fmt) "habanalabs: " fmt 9 10 #include <uapi/drm/habanalabs_accel.h> 11 #include "habanalabs.h" 12 13 #include <linux/pci.h> 14 #include <linux/hwmon.h> 15 #include <linux/vmalloc.h> 16 17 #include <drm/drm_accel.h> 18 #include <drm/drm_drv.h> 19 20 #include <trace/events/habanalabs.h> 21 22 #define HL_RESET_DELAY_USEC 10000 /* 10ms */ 23 24 #define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 30 25 26 enum dma_alloc_type { 27 DMA_ALLOC_COHERENT, 28 DMA_ALLOC_POOL, 29 }; 30 31 #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788 32 33 static void hl_device_heartbeat(struct work_struct *work); 34 35 /* 36 * hl_set_dram_bar- sets the bar to allow later access to address 37 * 38 * @hdev: pointer to habanalabs device structure. 39 * @addr: the address the caller wants to access. 40 * @region: the PCI region. 41 * @new_bar_region_base: the new BAR region base address. 42 * 43 * @return: the old BAR base address on success, U64_MAX for failure. 44 * The caller should set it back to the old address after use. 45 * 46 * In case the bar space does not cover the whole address space, 47 * the bar base address should be set to allow access to a given address. 48 * This function can be called also if the bar doesn't need to be set, 49 * in that case it just won't change the base. 50 */ 51 static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region, 52 u64 *new_bar_region_base) 53 { 54 struct asic_fixed_properties *prop = &hdev->asic_prop; 55 u64 bar_base_addr, old_base; 56 57 if (is_power_of_2(prop->dram_pci_bar_size)) 58 bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull); 59 else 60 bar_base_addr = region->region_base + 61 div64_u64((addr - region->region_base), prop->dram_pci_bar_size) * 62 prop->dram_pci_bar_size; 63 64 old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr); 65 66 /* in case of success we need to update the new BAR base */ 67 if ((old_base != U64_MAX) && new_bar_region_base) 68 *new_bar_region_base = bar_base_addr; 69 70 return old_base; 71 } 72 73 int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val, 74 enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar) 75 { 76 struct pci_mem_region *region = &hdev->pci_mem_region[region_type]; 77 u64 old_base = 0, rc, bar_region_base = region->region_base; 78 void __iomem *acc_addr; 79 80 if (set_dram_bar) { 81 old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base); 82 if (old_base == U64_MAX) 83 return -EIO; 84 } 85 86 acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar + 87 (addr - bar_region_base); 88 89 switch (acc_type) { 90 case DEBUGFS_READ8: 91 *val = readb(acc_addr); 92 break; 93 case DEBUGFS_WRITE8: 94 writeb(*val, acc_addr); 95 break; 96 case DEBUGFS_READ32: 97 *val = readl(acc_addr); 98 break; 99 case DEBUGFS_WRITE32: 100 writel(*val, acc_addr); 101 break; 102 case DEBUGFS_READ64: 103 *val = readq(acc_addr); 104 break; 105 case DEBUGFS_WRITE64: 106 writeq(*val, acc_addr); 107 break; 108 } 109 110 if (set_dram_bar) { 111 rc = hl_set_dram_bar(hdev, old_base, region, NULL); 112 if (rc == U64_MAX) 113 return -EIO; 114 } 115 116 return 0; 117 } 118 119 static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, 120 gfp_t flag, enum dma_alloc_type alloc_type, 121 const char *caller) 122 { 123 void *ptr = NULL; 124 125 switch (alloc_type) { 126 case DMA_ALLOC_COHERENT: 127 ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag); 128 break; 129 case DMA_ALLOC_POOL: 130 ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle); 131 break; 132 } 133 134 if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr)) 135 trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle, 136 size, caller); 137 138 return ptr; 139 } 140 141 static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr, 142 dma_addr_t dma_handle, enum dma_alloc_type alloc_type, 143 const char *caller) 144 { 145 /* this is needed to avoid warning on using freed pointer */ 146 u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr; 147 148 switch (alloc_type) { 149 case DMA_ALLOC_COHERENT: 150 hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle); 151 break; 152 case DMA_ALLOC_POOL: 153 hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle); 154 break; 155 } 156 157 trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller); 158 } 159 160 void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, 161 gfp_t flag, const char *caller) 162 { 163 return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller); 164 } 165 166 void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr, 167 dma_addr_t dma_handle, const char *caller) 168 { 169 hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller); 170 } 171 172 void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags, 173 dma_addr_t *dma_handle, const char *caller) 174 { 175 return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller); 176 } 177 178 void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr, 179 const char *caller) 180 { 181 hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller); 182 } 183 184 void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle) 185 { 186 return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle); 187 } 188 189 void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr) 190 { 191 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr); 192 } 193 194 int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt, 195 enum dma_data_direction dir, const char *caller) 196 { 197 struct asic_fixed_properties *prop = &hdev->asic_prop; 198 struct scatterlist *sg; 199 int rc, i; 200 201 rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir); 202 if (rc) 203 return rc; 204 205 if (!trace_habanalabs_dma_map_page_enabled()) 206 return 0; 207 208 for_each_sgtable_dma_sg(sgt, sg, i) 209 trace_habanalabs_dma_map_page(&(hdev)->pdev->dev, 210 page_to_phys(sg_page(sg)), 211 sg->dma_address - prop->device_dma_offset_for_host_access, 212 #ifdef CONFIG_NEED_SG_DMA_LENGTH 213 sg->dma_length, 214 #else 215 sg->length, 216 #endif 217 dir, caller); 218 219 return 0; 220 } 221 222 int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, 223 enum dma_data_direction dir) 224 { 225 struct asic_fixed_properties *prop = &hdev->asic_prop; 226 struct scatterlist *sg; 227 int rc, i; 228 229 rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0); 230 if (rc) 231 return rc; 232 233 /* Shift to the device's base physical address of host memory if necessary */ 234 if (prop->device_dma_offset_for_host_access) 235 for_each_sgtable_dma_sg(sgt, sg, i) 236 sg->dma_address += prop->device_dma_offset_for_host_access; 237 238 return 0; 239 } 240 241 void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt, 242 enum dma_data_direction dir, const char *caller) 243 { 244 struct asic_fixed_properties *prop = &hdev->asic_prop; 245 struct scatterlist *sg; 246 int i; 247 248 hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir); 249 250 if (trace_habanalabs_dma_unmap_page_enabled()) { 251 for_each_sgtable_dma_sg(sgt, sg, i) 252 trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev, 253 page_to_phys(sg_page(sg)), 254 sg->dma_address - prop->device_dma_offset_for_host_access, 255 #ifdef CONFIG_NEED_SG_DMA_LENGTH 256 sg->dma_length, 257 #else 258 sg->length, 259 #endif 260 dir, caller); 261 } 262 } 263 264 void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, 265 enum dma_data_direction dir) 266 { 267 struct asic_fixed_properties *prop = &hdev->asic_prop; 268 struct scatterlist *sg; 269 int i; 270 271 /* Cancel the device's base physical address of host memory if necessary */ 272 if (prop->device_dma_offset_for_host_access) 273 for_each_sgtable_dma_sg(sgt, sg, i) 274 sg->dma_address -= prop->device_dma_offset_for_host_access; 275 276 dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0); 277 } 278 279 /* 280 * hl_access_cfg_region - access the config region 281 * 282 * @hdev: pointer to habanalabs device structure 283 * @addr: the address to access 284 * @val: the value to write from or read to 285 * @acc_type: the type of access (read/write 64/32) 286 */ 287 int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val, 288 enum debugfs_access_type acc_type) 289 { 290 struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG]; 291 u32 val_h, val_l; 292 293 if (!IS_ALIGNED(addr, sizeof(u32))) { 294 dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32)); 295 return -EINVAL; 296 } 297 298 switch (acc_type) { 299 case DEBUGFS_READ32: 300 *val = RREG32(addr - cfg_region->region_base); 301 break; 302 case DEBUGFS_WRITE32: 303 WREG32(addr - cfg_region->region_base, *val); 304 break; 305 case DEBUGFS_READ64: 306 val_l = RREG32(addr - cfg_region->region_base); 307 val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base); 308 309 *val = (((u64) val_h) << 32) | val_l; 310 break; 311 case DEBUGFS_WRITE64: 312 WREG32(addr - cfg_region->region_base, lower_32_bits(*val)); 313 WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val)); 314 break; 315 default: 316 dev_err(hdev->dev, "access type %d is not supported\n", acc_type); 317 return -EOPNOTSUPP; 318 } 319 320 return 0; 321 } 322 323 /* 324 * hl_access_dev_mem - access device memory 325 * 326 * @hdev: pointer to habanalabs device structure 327 * @region_type: the type of the region the address belongs to 328 * @addr: the address to access 329 * @val: the value to write from or read to 330 * @acc_type: the type of access (r/w, 32/64) 331 */ 332 int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type, 333 u64 addr, u64 *val, enum debugfs_access_type acc_type) 334 { 335 switch (region_type) { 336 case PCI_REGION_CFG: 337 return hl_access_cfg_region(hdev, addr, val, acc_type); 338 case PCI_REGION_SRAM: 339 case PCI_REGION_DRAM: 340 return hl_access_sram_dram_region(hdev, addr, val, acc_type, 341 region_type, (region_type == PCI_REGION_DRAM)); 342 default: 343 return -EFAULT; 344 } 345 346 return 0; 347 } 348 349 void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...) 350 { 351 va_list args; 352 int str_size; 353 354 va_start(args, fmt); 355 /* Calculate formatted string length. Assuming each string is null terminated, hence 356 * increment result by 1 357 */ 358 str_size = vsnprintf(NULL, 0, fmt, args) + 1; 359 va_end(args); 360 361 if ((e->actual_size + str_size) < e->allocated_buf_size) { 362 va_start(args, fmt); 363 vsnprintf(e->buf + e->actual_size, str_size, fmt, args); 364 va_end(args); 365 } 366 367 /* Need to update the size even when not updating destination buffer to get the exact size 368 * of all input strings 369 */ 370 e->actual_size += str_size; 371 } 372 373 enum hl_device_status hl_device_status(struct hl_device *hdev) 374 { 375 enum hl_device_status status; 376 377 if (hdev->device_fini_pending) { 378 status = HL_DEVICE_STATUS_MALFUNCTION; 379 } else if (hdev->reset_info.in_reset) { 380 if (hdev->reset_info.in_compute_reset) 381 status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE; 382 else 383 status = HL_DEVICE_STATUS_IN_RESET; 384 } else if (hdev->reset_info.needs_reset) { 385 status = HL_DEVICE_STATUS_NEEDS_RESET; 386 } else if (hdev->disabled) { 387 status = HL_DEVICE_STATUS_MALFUNCTION; 388 } else if (!hdev->init_done) { 389 status = HL_DEVICE_STATUS_IN_DEVICE_CREATION; 390 } else { 391 status = HL_DEVICE_STATUS_OPERATIONAL; 392 } 393 394 return status; 395 } 396 397 bool hl_device_operational(struct hl_device *hdev, 398 enum hl_device_status *status) 399 { 400 enum hl_device_status current_status; 401 402 current_status = hl_device_status(hdev); 403 if (status) 404 *status = current_status; 405 406 switch (current_status) { 407 case HL_DEVICE_STATUS_MALFUNCTION: 408 case HL_DEVICE_STATUS_IN_RESET: 409 case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE: 410 case HL_DEVICE_STATUS_NEEDS_RESET: 411 return false; 412 case HL_DEVICE_STATUS_OPERATIONAL: 413 case HL_DEVICE_STATUS_IN_DEVICE_CREATION: 414 default: 415 return true; 416 } 417 } 418 419 bool hl_ctrl_device_operational(struct hl_device *hdev, 420 enum hl_device_status *status) 421 { 422 enum hl_device_status current_status; 423 424 current_status = hl_device_status(hdev); 425 if (status) 426 *status = current_status; 427 428 switch (current_status) { 429 case HL_DEVICE_STATUS_MALFUNCTION: 430 return false; 431 case HL_DEVICE_STATUS_IN_RESET: 432 case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE: 433 case HL_DEVICE_STATUS_NEEDS_RESET: 434 case HL_DEVICE_STATUS_OPERATIONAL: 435 case HL_DEVICE_STATUS_IN_DEVICE_CREATION: 436 default: 437 return true; 438 } 439 } 440 441 static void print_idle_status_mask(struct hl_device *hdev, const char *message, 442 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE]) 443 { 444 if (idle_mask[3]) 445 dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n", 446 dev_name(&hdev->pdev->dev), message, 447 idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]); 448 else if (idle_mask[2]) 449 dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n", 450 dev_name(&hdev->pdev->dev), message, 451 idle_mask[2], idle_mask[1], idle_mask[0]); 452 else if (idle_mask[1]) 453 dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n", 454 dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]); 455 else 456 dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message, 457 idle_mask[0]); 458 } 459 460 static void hpriv_release(struct kref *ref) 461 { 462 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; 463 bool reset_device, device_is_idle = true; 464 struct hl_fpriv *hpriv; 465 struct hl_device *hdev; 466 467 hpriv = container_of(ref, struct hl_fpriv, refcount); 468 469 hdev = hpriv->hdev; 470 471 hdev->asic_funcs->send_device_activity(hdev, false); 472 473 hl_debugfs_remove_file(hpriv); 474 475 mutex_destroy(&hpriv->ctx_lock); 476 mutex_destroy(&hpriv->restore_phase_mutex); 477 478 /* There should be no memory buffers at this point and handles IDR can be destroyed */ 479 hl_mem_mgr_idr_destroy(&hpriv->mem_mgr); 480 481 /* Device should be reset if reset-upon-device-release is enabled, or if there is a pending 482 * reset that waits for device release. 483 */ 484 reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active; 485 486 /* Check the device idle status and reset if not idle. 487 * Skip it if already in reset, or if device is going to be reset in any case. 488 */ 489 if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm) 490 device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask, 491 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); 492 if (!device_is_idle) { 493 print_idle_status_mask(hdev, "device is not idle after user context is closed", 494 idle_mask); 495 reset_device = true; 496 } 497 498 /* We need to remove the user from the list to make sure the reset process won't 499 * try to kill the user process. Because, if we got here, it means there are no 500 * more driver/device resources that the user process is occupying so there is 501 * no need to kill it 502 * 503 * However, we can't set the compute_ctx to NULL at this stage. This is to prevent 504 * a race between the release and opening the device again. We don't want to let 505 * a user open the device while there a reset is about to happen. 506 */ 507 mutex_lock(&hdev->fpriv_list_lock); 508 list_del(&hpriv->dev_node); 509 mutex_unlock(&hdev->fpriv_list_lock); 510 511 put_pid(hpriv->taskpid); 512 513 if (reset_device) { 514 hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE); 515 } else { 516 /* Scrubbing is handled within hl_device_reset(), so here need to do it directly */ 517 int rc = hdev->asic_funcs->scrub_device_mem(hdev); 518 519 if (rc) { 520 dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc); 521 hl_device_reset(hdev, HL_DRV_RESET_HARD); 522 } 523 } 524 525 /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different 526 * thread, we don't care because the in_reset is marked so if a user will try to open 527 * the device it will fail on that, even if compute_ctx is false. 528 */ 529 mutex_lock(&hdev->fpriv_list_lock); 530 hdev->is_compute_ctx_active = false; 531 mutex_unlock(&hdev->fpriv_list_lock); 532 533 hdev->compute_ctx_in_release = 0; 534 535 /* release the eventfd */ 536 if (hpriv->notifier_event.eventfd) 537 eventfd_ctx_put(hpriv->notifier_event.eventfd); 538 539 mutex_destroy(&hpriv->notifier_event.lock); 540 541 kfree(hpriv); 542 } 543 544 void hl_hpriv_get(struct hl_fpriv *hpriv) 545 { 546 kref_get(&hpriv->refcount); 547 } 548 549 int hl_hpriv_put(struct hl_fpriv *hpriv) 550 { 551 return kref_put(&hpriv->refcount, hpriv_release); 552 } 553 554 static void print_device_in_use_info(struct hl_device *hdev, 555 struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message) 556 { 557 u32 active_cs_num, dmabuf_export_cnt; 558 bool unknown_reason = true; 559 char buf[128]; 560 size_t size; 561 int offset; 562 563 size = sizeof(buf); 564 offset = 0; 565 566 active_cs_num = hl_get_active_cs_num(hdev); 567 if (active_cs_num) { 568 unknown_reason = false; 569 offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num); 570 } 571 572 dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt); 573 if (dmabuf_export_cnt) { 574 unknown_reason = false; 575 offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]", 576 dmabuf_export_cnt); 577 } 578 579 if (mm_fini_stats->n_busy_cb) { 580 unknown_reason = false; 581 offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]", 582 mm_fini_stats->n_busy_cb); 583 } 584 585 if (unknown_reason) 586 scnprintf(buf + offset, size - offset, " [unknown reason]"); 587 588 dev_notice(hdev->dev, "%s%s\n", message, buf); 589 } 590 591 /* 592 * hl_device_release() - release function for habanalabs device. 593 * @ddev: pointer to DRM device structure. 594 * @file: pointer to DRM file private data structure. 595 * 596 * Called when process closes an habanalabs device 597 */ 598 void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv) 599 { 600 struct hl_fpriv *hpriv = file_priv->driver_priv; 601 struct hl_device *hdev = to_hl_device(ddev); 602 struct hl_mem_mgr_fini_stats mm_fini_stats; 603 604 if (!hdev) { 605 pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n"); 606 put_pid(hpriv->taskpid); 607 } 608 609 hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr); 610 611 /* Memory buffers might be still in use at this point and thus the handles IDR destruction 612 * is postponed to hpriv_release(). 613 */ 614 hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats); 615 616 hdev->compute_ctx_in_release = 1; 617 618 if (!hl_hpriv_put(hpriv)) { 619 print_device_in_use_info(hdev, &mm_fini_stats, 620 "User process closed FD but device still in use"); 621 hl_device_reset(hdev, HL_DRV_RESET_HARD); 622 } 623 624 hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif; 625 } 626 627 static int hl_device_release_ctrl(struct inode *inode, struct file *filp) 628 { 629 struct hl_fpriv *hpriv = filp->private_data; 630 struct hl_device *hdev = hpriv->hdev; 631 632 filp->private_data = NULL; 633 634 if (!hdev) { 635 pr_err("Closing FD after device was removed\n"); 636 goto out; 637 } 638 639 mutex_lock(&hdev->fpriv_ctrl_list_lock); 640 list_del(&hpriv->dev_node); 641 mutex_unlock(&hdev->fpriv_ctrl_list_lock); 642 out: 643 put_pid(hpriv->taskpid); 644 645 kfree(hpriv); 646 647 return 0; 648 } 649 650 static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) 651 { 652 struct hl_device *hdev = hpriv->hdev; 653 unsigned long vm_pgoff; 654 655 if (!hdev) { 656 pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n"); 657 return -ENODEV; 658 } 659 660 vm_pgoff = vma->vm_pgoff; 661 662 switch (vm_pgoff & HL_MMAP_TYPE_MASK) { 663 case HL_MMAP_TYPE_BLOCK: 664 vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff); 665 return hl_hw_block_mmap(hpriv, vma); 666 667 case HL_MMAP_TYPE_CB: 668 case HL_MMAP_TYPE_TS_BUFF: 669 return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL); 670 } 671 return -EINVAL; 672 } 673 674 /* 675 * hl_mmap - mmap function for habanalabs device 676 * 677 * @*filp: pointer to file structure 678 * @*vma: pointer to vm_area_struct of the process 679 * 680 * Called when process does an mmap on habanalabs device. Call the relevant mmap 681 * function at the end of the common code. 682 */ 683 int hl_mmap(struct file *filp, struct vm_area_struct *vma) 684 { 685 struct drm_file *file_priv = filp->private_data; 686 struct hl_fpriv *hpriv = file_priv->driver_priv; 687 688 return __hl_mmap(hpriv, vma); 689 } 690 691 static const struct file_operations hl_ctrl_ops = { 692 .owner = THIS_MODULE, 693 .open = hl_device_open_ctrl, 694 .release = hl_device_release_ctrl, 695 .unlocked_ioctl = hl_ioctl_control, 696 .compat_ioctl = hl_ioctl_control 697 }; 698 699 static void device_release_func(struct device *dev) 700 { 701 kfree(dev); 702 } 703 704 /* 705 * device_init_cdev - Initialize cdev and device for habanalabs device 706 * 707 * @hdev: pointer to habanalabs device structure 708 * @class: pointer to the class object of the device 709 * @minor: minor number of the specific device 710 * @fops: file operations to install for this device 711 * @name: name of the device as it will appear in the filesystem 712 * @cdev: pointer to the char device object that will be initialized 713 * @dev: pointer to the device object that will be initialized 714 * 715 * Initialize a cdev and a Linux device for habanalabs's device. 716 */ 717 static int device_init_cdev(struct hl_device *hdev, const struct class *class, 718 int minor, const struct file_operations *fops, 719 char *name, struct cdev *cdev, 720 struct device **dev) 721 { 722 cdev_init(cdev, fops); 723 cdev->owner = THIS_MODULE; 724 725 *dev = kzalloc_obj(**dev); 726 if (!*dev) 727 return -ENOMEM; 728 729 device_initialize(*dev); 730 (*dev)->devt = MKDEV(hdev->major, minor); 731 (*dev)->class = class; 732 (*dev)->release = device_release_func; 733 dev_set_drvdata(*dev, hdev); 734 dev_set_name(*dev, "%s", name); 735 736 return 0; 737 } 738 739 static int cdev_sysfs_debugfs_add(struct hl_device *hdev) 740 { 741 const struct class *accel_class = hdev->drm.accel->kdev->class; 742 char name[32]; 743 int rc; 744 745 hdev->cdev_idx = hdev->drm.accel->index; 746 747 /* Initialize cdev and device structures for the control device */ 748 snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx); 749 rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name, 750 &hdev->cdev_ctrl, &hdev->dev_ctrl); 751 if (rc) 752 return rc; 753 754 rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl); 755 if (rc) { 756 dev_err(hdev->dev_ctrl, 757 "failed to add an accel control char device to the system\n"); 758 goto free_ctrl_device; 759 } 760 761 rc = hl_sysfs_init(hdev); 762 if (rc) { 763 dev_err(hdev->dev, "failed to initialize sysfs\n"); 764 goto delete_ctrl_cdev_device; 765 } 766 767 hl_debugfs_add_device(hdev); 768 769 hdev->cdev_sysfs_debugfs_created = true; 770 771 return 0; 772 773 delete_ctrl_cdev_device: 774 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); 775 free_ctrl_device: 776 put_device(hdev->dev_ctrl); 777 return rc; 778 } 779 780 static void cdev_sysfs_debugfs_remove(struct hl_device *hdev) 781 { 782 if (!hdev->cdev_sysfs_debugfs_created) 783 return; 784 785 hl_sysfs_fini(hdev); 786 787 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); 788 put_device(hdev->dev_ctrl); 789 } 790 791 static void device_hard_reset_pending(struct work_struct *work) 792 { 793 struct hl_device_reset_work *device_reset_work = 794 container_of(work, struct hl_device_reset_work, reset_work.work); 795 struct hl_device *hdev = device_reset_work->hdev; 796 u32 flags; 797 int rc; 798 799 flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR; 800 801 rc = hl_device_reset(hdev, flags); 802 803 if ((rc == -EBUSY) && !hdev->device_fini_pending) { 804 struct hl_ctx *ctx = hl_get_compute_ctx(hdev); 805 806 if (ctx) { 807 /* The read refcount value should subtracted by one, because the read is 808 * protected with hl_get_compute_ctx(). 809 */ 810 dev_info(hdev->dev, 811 "Could not reset device (compute_ctx refcount %u). will try again in %u seconds", 812 kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC); 813 hl_ctx_put(ctx); 814 } else { 815 dev_info(hdev->dev, "Could not reset device. will try again in %u seconds", 816 HL_PENDING_RESET_PER_SEC); 817 } 818 819 queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work, 820 secs_to_jiffies(HL_PENDING_RESET_PER_SEC)); 821 } 822 } 823 824 static void device_release_watchdog_func(struct work_struct *work) 825 { 826 struct hl_device_reset_work *watchdog_work = 827 container_of(work, struct hl_device_reset_work, reset_work.work); 828 struct hl_device *hdev = watchdog_work->hdev; 829 u32 flags; 830 831 dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n"); 832 833 flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR; 834 835 hl_device_reset(hdev, flags); 836 } 837 838 /* 839 * device_early_init - do some early initialization for the habanalabs device 840 * 841 * @hdev: pointer to habanalabs device structure 842 * 843 * Install the relevant function pointers and call the early_init function, 844 * if such a function exists 845 */ 846 static int device_early_init(struct hl_device *hdev) 847 { 848 int i, rc; 849 char workq_name[32]; 850 851 switch (hdev->asic_type) { 852 case ASIC_GOYA: 853 goya_set_asic_funcs(hdev); 854 strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name)); 855 break; 856 case ASIC_GAUDI: 857 gaudi_set_asic_funcs(hdev); 858 strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name)); 859 break; 860 case ASIC_GAUDI_SEC: 861 gaudi_set_asic_funcs(hdev); 862 strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name)); 863 break; 864 case ASIC_GAUDI2: 865 gaudi2_set_asic_funcs(hdev); 866 strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name)); 867 break; 868 case ASIC_GAUDI2B: 869 gaudi2_set_asic_funcs(hdev); 870 strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name)); 871 break; 872 case ASIC_GAUDI2C: 873 gaudi2_set_asic_funcs(hdev); 874 strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name)); 875 break; 876 case ASIC_GAUDI2D: 877 gaudi2_set_asic_funcs(hdev); 878 strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name)); 879 break; 880 default: 881 dev_err(hdev->dev, "Unrecognized ASIC type %d\n", 882 hdev->asic_type); 883 return -EINVAL; 884 } 885 886 rc = hdev->asic_funcs->early_init(hdev); 887 if (rc) 888 return rc; 889 890 rc = hl_asid_init(hdev); 891 if (rc) 892 goto early_fini; 893 894 if (hdev->asic_prop.completion_queues_count) { 895 hdev->cq_wq = kzalloc_objs(struct workqueue_struct *, 896 hdev->asic_prop.completion_queues_count); 897 if (!hdev->cq_wq) { 898 rc = -ENOMEM; 899 goto asid_fini; 900 } 901 } 902 903 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) { 904 snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i); 905 hdev->cq_wq[i] = create_singlethread_workqueue(workq_name); 906 if (hdev->cq_wq[i] == NULL) { 907 dev_err(hdev->dev, "Failed to allocate CQ workqueue\n"); 908 rc = -ENOMEM; 909 goto free_cq_wq; 910 } 911 } 912 913 snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx); 914 hdev->eq_wq = create_singlethread_workqueue(workq_name); 915 if (hdev->eq_wq == NULL) { 916 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n"); 917 rc = -ENOMEM; 918 goto free_cq_wq; 919 } 920 921 snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx); 922 hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0); 923 if (!hdev->cs_cmplt_wq) { 924 dev_err(hdev->dev, 925 "Failed to allocate CS completions workqueue\n"); 926 rc = -ENOMEM; 927 goto free_eq_wq; 928 } 929 930 snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx); 931 hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0); 932 if (!hdev->ts_free_obj_wq) { 933 dev_err(hdev->dev, 934 "Failed to allocate Timestamp registration free workqueue\n"); 935 rc = -ENOMEM; 936 goto free_cs_cmplt_wq; 937 } 938 939 snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx); 940 hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0); 941 if (!hdev->prefetch_wq) { 942 dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n"); 943 rc = -ENOMEM; 944 goto free_ts_free_wq; 945 } 946 947 hdev->hl_chip_info = kzalloc_obj(struct hwmon_chip_info); 948 if (!hdev->hl_chip_info) { 949 rc = -ENOMEM; 950 goto free_prefetch_wq; 951 } 952 953 rc = hl_mmu_if_set_funcs(hdev); 954 if (rc) 955 goto free_chip_info; 956 957 hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr); 958 959 snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx); 960 hdev->reset_wq = create_singlethread_workqueue(workq_name); 961 if (!hdev->reset_wq) { 962 rc = -ENOMEM; 963 dev_err(hdev->dev, "Failed to create device reset WQ\n"); 964 goto free_cb_mgr; 965 } 966 967 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat); 968 969 INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending); 970 hdev->device_reset_work.hdev = hdev; 971 hdev->device_fini_pending = 0; 972 973 INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work, 974 device_release_watchdog_func); 975 hdev->device_release_watchdog_work.hdev = hdev; 976 977 mutex_init(&hdev->send_cpu_message_lock); 978 mutex_init(&hdev->debug_lock); 979 INIT_LIST_HEAD(&hdev->cs_mirror_list); 980 spin_lock_init(&hdev->cs_mirror_lock); 981 spin_lock_init(&hdev->reset_info.lock); 982 INIT_LIST_HEAD(&hdev->fpriv_list); 983 INIT_LIST_HEAD(&hdev->fpriv_ctrl_list); 984 mutex_init(&hdev->fpriv_list_lock); 985 mutex_init(&hdev->fpriv_ctrl_list_lock); 986 mutex_init(&hdev->clk_throttling.lock); 987 988 return 0; 989 990 free_cb_mgr: 991 hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL); 992 hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr); 993 free_chip_info: 994 kfree(hdev->hl_chip_info); 995 free_prefetch_wq: 996 destroy_workqueue(hdev->prefetch_wq); 997 free_ts_free_wq: 998 destroy_workqueue(hdev->ts_free_obj_wq); 999 free_cs_cmplt_wq: 1000 destroy_workqueue(hdev->cs_cmplt_wq); 1001 free_eq_wq: 1002 destroy_workqueue(hdev->eq_wq); 1003 free_cq_wq: 1004 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 1005 if (hdev->cq_wq[i]) 1006 destroy_workqueue(hdev->cq_wq[i]); 1007 kfree(hdev->cq_wq); 1008 asid_fini: 1009 hl_asid_fini(hdev); 1010 early_fini: 1011 if (hdev->asic_funcs->early_fini) 1012 hdev->asic_funcs->early_fini(hdev); 1013 1014 return rc; 1015 } 1016 1017 /* 1018 * device_early_fini - finalize all that was done in device_early_init 1019 * 1020 * @hdev: pointer to habanalabs device structure 1021 * 1022 */ 1023 static void device_early_fini(struct hl_device *hdev) 1024 { 1025 int i; 1026 1027 mutex_destroy(&hdev->debug_lock); 1028 mutex_destroy(&hdev->send_cpu_message_lock); 1029 1030 mutex_destroy(&hdev->fpriv_list_lock); 1031 mutex_destroy(&hdev->fpriv_ctrl_list_lock); 1032 1033 mutex_destroy(&hdev->clk_throttling.lock); 1034 1035 hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL); 1036 hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr); 1037 1038 kfree(hdev->hl_chip_info); 1039 1040 destroy_workqueue(hdev->prefetch_wq); 1041 destroy_workqueue(hdev->ts_free_obj_wq); 1042 destroy_workqueue(hdev->cs_cmplt_wq); 1043 destroy_workqueue(hdev->eq_wq); 1044 destroy_workqueue(hdev->reset_wq); 1045 1046 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 1047 destroy_workqueue(hdev->cq_wq[i]); 1048 kfree(hdev->cq_wq); 1049 1050 hl_asid_fini(hdev); 1051 1052 if (hdev->asic_funcs->early_fini) 1053 hdev->asic_funcs->early_fini(hdev); 1054 } 1055 1056 static bool is_pci_link_healthy(struct hl_device *hdev) 1057 { 1058 u16 device_id; 1059 1060 if (!hdev->pdev) 1061 return false; 1062 1063 pci_read_config_word(hdev->pdev, PCI_DEVICE_ID, &device_id); 1064 1065 return (device_id == hdev->pdev->device); 1066 } 1067 1068 static bool hl_device_eq_heartbeat_received(struct hl_device *hdev) 1069 { 1070 struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info; 1071 u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1; 1072 struct asic_fixed_properties *prop = &hdev->asic_prop; 1073 1074 if (!prop->cpucp_info.eq_health_check_supported) 1075 return true; 1076 1077 if (!hdev->eq_heartbeat_received) { 1078 dev_err(hdev->dev, "EQ heartbeat event was not received!\n"); 1079 1080 dev_err(hdev->dev, 1081 "EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n", 1082 hdev->event_queue.ci, 1083 heartbeat_debug_info->heartbeat_event_counter, 1084 &hdev->heartbeat_debug_info.last_eq_heartbeat_ts, 1085 hdev->kernel_queues[cpu_q_id].pi, 1086 atomic_read(&hdev->kernel_queues[cpu_q_id].ci), 1087 atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask, 1088 &hdev->heartbeat_debug_info.last_pq_heartbeat_ts); 1089 1090 hl_eq_dump(hdev, &hdev->event_queue); 1091 1092 return false; 1093 } 1094 1095 hdev->eq_heartbeat_received = false; 1096 1097 return true; 1098 } 1099 1100 static void hl_device_heartbeat(struct work_struct *work) 1101 { 1102 struct hl_device *hdev = container_of(work, struct hl_device, 1103 work_heartbeat.work); 1104 struct hl_info_fw_err_info info = {0}; 1105 u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE; 1106 1107 /* Start heartbeat checks only after driver has enabled events from FW */ 1108 if (!hl_device_operational(hdev, NULL) || !hdev->init_done) 1109 goto reschedule; 1110 1111 /* 1112 * For EQ health check need to check if driver received the heartbeat eq event 1113 * in order to validate the eq is working. 1114 * Only if both the EQ is healthy and we managed to send the next heartbeat reschedule. 1115 */ 1116 if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev))) 1117 goto reschedule; 1118 1119 if (hl_device_operational(hdev, NULL)) 1120 dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n", 1121 is_pci_link_healthy(hdev) ? "healthy" : "broken"); 1122 1123 info.err_type = HL_INFO_FW_HEARTBEAT_ERR; 1124 info.event_mask = &event_mask; 1125 hl_handle_fw_err(hdev, &info); 1126 hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask); 1127 1128 return; 1129 1130 reschedule: 1131 /* 1132 * prev_reset_trigger tracks consecutive fatal h/w errors until first 1133 * heartbeat immediately post reset. 1134 * If control reached here, then at least one heartbeat work has been 1135 * scheduled since last reset/init cycle. 1136 * So if the device is not already in reset cycle, reset the flag 1137 * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR 1138 * status for at least one heartbeat. From this point driver restarts 1139 * tracking future consecutive fatal errors. 1140 */ 1141 if (!hdev->reset_info.in_reset) 1142 hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT; 1143 1144 schedule_delayed_work(&hdev->work_heartbeat, 1145 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); 1146 } 1147 1148 /* 1149 * device_late_init - do late stuff initialization for the habanalabs device 1150 * 1151 * @hdev: pointer to habanalabs device structure 1152 * 1153 * Do stuff that either needs the device H/W queues to be active or needs 1154 * to happen after all the rest of the initialization is finished 1155 */ 1156 static int device_late_init(struct hl_device *hdev) 1157 { 1158 int rc; 1159 1160 if (hdev->asic_funcs->late_init) { 1161 rc = hdev->asic_funcs->late_init(hdev); 1162 if (rc) { 1163 dev_err(hdev->dev, 1164 "failed late initialization for the H/W\n"); 1165 return rc; 1166 } 1167 } 1168 1169 hdev->high_pll = hdev->asic_prop.high_pll; 1170 hdev->late_init_done = true; 1171 1172 return 0; 1173 } 1174 1175 /* 1176 * device_late_fini - finalize all that was done in device_late_init 1177 * 1178 * @hdev: pointer to habanalabs device structure 1179 * 1180 */ 1181 static void device_late_fini(struct hl_device *hdev) 1182 { 1183 if (!hdev->late_init_done) 1184 return; 1185 1186 if (hdev->asic_funcs->late_fini) 1187 hdev->asic_funcs->late_fini(hdev); 1188 1189 hdev->late_init_done = false; 1190 } 1191 1192 int hl_device_utilization(struct hl_device *hdev, u32 *utilization) 1193 { 1194 u64 max_power, curr_power, dc_power, dividend, divisor; 1195 int rc; 1196 1197 max_power = hdev->max_power; 1198 dc_power = hdev->asic_prop.dc_power_default; 1199 divisor = max_power - dc_power; 1200 if (!divisor) { 1201 dev_warn(hdev->dev, "device utilization is not supported\n"); 1202 return -EOPNOTSUPP; 1203 } 1204 rc = hl_fw_cpucp_power_get(hdev, &curr_power); 1205 1206 if (rc) 1207 return rc; 1208 1209 curr_power = clamp(curr_power, dc_power, max_power); 1210 1211 dividend = (curr_power - dc_power) * 100; 1212 *utilization = (u32) div_u64(dividend, divisor); 1213 1214 return 0; 1215 } 1216 1217 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable) 1218 { 1219 int rc = 0; 1220 1221 mutex_lock(&hdev->debug_lock); 1222 1223 if (!enable) { 1224 if (!hdev->in_debug) { 1225 dev_err(hdev->dev, 1226 "Failed to disable debug mode because device was not in debug mode\n"); 1227 rc = -EFAULT; 1228 goto out; 1229 } 1230 1231 if (!hdev->reset_info.hard_reset_pending) 1232 hdev->asic_funcs->halt_coresight(hdev, ctx); 1233 1234 hdev->in_debug = 0; 1235 1236 goto out; 1237 } 1238 1239 if (hdev->in_debug) { 1240 dev_err(hdev->dev, 1241 "Failed to enable debug mode because device is already in debug mode\n"); 1242 rc = -EFAULT; 1243 goto out; 1244 } 1245 1246 hdev->in_debug = 1; 1247 1248 out: 1249 mutex_unlock(&hdev->debug_lock); 1250 1251 return rc; 1252 } 1253 1254 static void take_release_locks(struct hl_device *hdev) 1255 { 1256 /* Flush anyone that is inside the critical section of enqueue 1257 * jobs to the H/W 1258 */ 1259 hdev->asic_funcs->hw_queues_lock(hdev); 1260 hdev->asic_funcs->hw_queues_unlock(hdev); 1261 1262 /* Flush processes that are sending message to CPU */ 1263 mutex_lock(&hdev->send_cpu_message_lock); 1264 mutex_unlock(&hdev->send_cpu_message_lock); 1265 1266 /* Flush anyone that is inside device open */ 1267 mutex_lock(&hdev->fpriv_list_lock); 1268 mutex_unlock(&hdev->fpriv_list_lock); 1269 mutex_lock(&hdev->fpriv_ctrl_list_lock); 1270 mutex_unlock(&hdev->fpriv_ctrl_list_lock); 1271 } 1272 1273 static void hl_abort_waiting_for_completions(struct hl_device *hdev) 1274 { 1275 hl_abort_waiting_for_cs_completions(hdev); 1276 1277 /* Release all pending user interrupts, each pending user interrupt 1278 * holds a reference to a user context. 1279 */ 1280 hl_release_pending_user_interrupts(hdev); 1281 } 1282 1283 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset, 1284 bool skip_wq_flush) 1285 { 1286 if (hard_reset) { 1287 if (hdev->heartbeat) 1288 cancel_delayed_work_sync(&hdev->work_heartbeat); 1289 1290 device_late_fini(hdev); 1291 } 1292 1293 /* 1294 * Halt the engines and disable interrupts so we won't get any more 1295 * completions from H/W and we won't have any accesses from the 1296 * H/W to the host machine 1297 */ 1298 hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset); 1299 1300 /* Go over all the queues, release all CS and their jobs */ 1301 hl_cs_rollback_all(hdev, skip_wq_flush); 1302 1303 /* flush the MMU prefetch workqueue */ 1304 flush_workqueue(hdev->prefetch_wq); 1305 1306 hl_abort_waiting_for_completions(hdev); 1307 } 1308 1309 /* 1310 * hl_device_suspend - initiate device suspend 1311 * 1312 * @hdev: pointer to habanalabs device structure 1313 * 1314 * Puts the hw in the suspend state (all asics). 1315 * Returns 0 for success or an error on failure. 1316 * Called at driver suspend. 1317 */ 1318 int hl_device_suspend(struct hl_device *hdev) 1319 { 1320 int rc; 1321 1322 pci_save_state(hdev->pdev); 1323 1324 /* Block future CS/VM/JOB completion operations */ 1325 spin_lock(&hdev->reset_info.lock); 1326 if (hdev->reset_info.in_reset) { 1327 spin_unlock(&hdev->reset_info.lock); 1328 dev_err(hdev->dev, "Can't suspend while in reset\n"); 1329 return -EIO; 1330 } 1331 hdev->reset_info.in_reset = 1; 1332 spin_unlock(&hdev->reset_info.lock); 1333 1334 /* This blocks all other stuff that is not blocked by in_reset */ 1335 hdev->disabled = true; 1336 1337 take_release_locks(hdev); 1338 1339 rc = hdev->asic_funcs->suspend(hdev); 1340 if (rc) 1341 dev_err(hdev->dev, 1342 "Failed to disable PCI access of device CPU\n"); 1343 1344 /* Shut down the device */ 1345 pci_disable_device(hdev->pdev); 1346 pci_set_power_state(hdev->pdev, PCI_D3hot); 1347 1348 return 0; 1349 } 1350 1351 /* 1352 * hl_device_resume - initiate device resume 1353 * 1354 * @hdev: pointer to habanalabs device structure 1355 * 1356 * Bring the hw back to operating state (all asics). 1357 * Returns 0 for success or an error on failure. 1358 * Called at driver resume. 1359 */ 1360 int hl_device_resume(struct hl_device *hdev) 1361 { 1362 int rc; 1363 1364 pci_set_power_state(hdev->pdev, PCI_D0); 1365 pci_restore_state(hdev->pdev); 1366 rc = pci_enable_device_mem(hdev->pdev); 1367 if (rc) { 1368 dev_err(hdev->dev, 1369 "Failed to enable PCI device in resume\n"); 1370 return rc; 1371 } 1372 1373 pci_set_master(hdev->pdev); 1374 1375 rc = hdev->asic_funcs->resume(hdev); 1376 if (rc) { 1377 dev_err(hdev->dev, "Failed to resume device after suspend\n"); 1378 goto disable_device; 1379 } 1380 1381 1382 /* 'in_reset' was set to true during suspend, now we must clear it in order 1383 * for hard reset to be performed 1384 */ 1385 spin_lock(&hdev->reset_info.lock); 1386 hdev->reset_info.in_reset = 0; 1387 spin_unlock(&hdev->reset_info.lock); 1388 1389 rc = hl_device_reset(hdev, HL_DRV_RESET_HARD); 1390 if (rc) { 1391 dev_err(hdev->dev, "Failed to reset device during resume\n"); 1392 goto disable_device; 1393 } 1394 1395 return 0; 1396 1397 disable_device: 1398 pci_disable_device(hdev->pdev); 1399 1400 return rc; 1401 } 1402 1403 static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev) 1404 { 1405 struct task_struct *task = NULL; 1406 struct list_head *hpriv_list; 1407 struct hl_fpriv *hpriv; 1408 struct mutex *hpriv_lock; 1409 u32 pending_cnt; 1410 1411 hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; 1412 hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; 1413 1414 /* Giving time for user to close FD, and for processes that are inside 1415 * hl_device_open to finish 1416 */ 1417 if (!list_empty(hpriv_list)) 1418 ssleep(1); 1419 1420 if (timeout) { 1421 pending_cnt = timeout; 1422 } else { 1423 if (hdev->process_kill_trial_cnt) { 1424 /* Processes have been already killed */ 1425 pending_cnt = 1; 1426 goto wait_for_processes; 1427 } else { 1428 /* Wait a small period after process kill */ 1429 pending_cnt = HL_PENDING_RESET_PER_SEC; 1430 } 1431 } 1432 1433 mutex_lock(hpriv_lock); 1434 1435 /* This section must be protected because we are dereferencing 1436 * pointers that are freed if the process exits 1437 */ 1438 list_for_each_entry(hpriv, hpriv_list, dev_node) { 1439 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID); 1440 if (task) { 1441 dev_info(hdev->dev, "Killing user process pid=%d\n", 1442 task_pid_nr(task)); 1443 send_sig(SIGKILL, task, 1); 1444 usleep_range(1000, 10000); 1445 1446 put_task_struct(task); 1447 } else { 1448 dev_dbg(hdev->dev, 1449 "Can't get task struct for user process %d, process was killed from outside the driver\n", 1450 pid_nr(hpriv->taskpid)); 1451 } 1452 } 1453 1454 mutex_unlock(hpriv_lock); 1455 1456 /* 1457 * We killed the open users, but that doesn't mean they are closed. 1458 * It could be that they are running a long cleanup phase in the driver 1459 * e.g. MMU unmappings, or running other long teardown flow even before 1460 * our cleanup. 1461 * Therefore we need to wait again to make sure they are closed before 1462 * continuing with the reset. 1463 */ 1464 1465 wait_for_processes: 1466 while ((!list_empty(hpriv_list)) && (pending_cnt)) { 1467 dev_dbg(hdev->dev, 1468 "Waiting for all unmap operations to finish before hard reset\n"); 1469 1470 pending_cnt--; 1471 1472 ssleep(1); 1473 } 1474 1475 /* All processes exited successfully */ 1476 if (list_empty(hpriv_list)) 1477 return 0; 1478 1479 /* Give up waiting for processes to exit */ 1480 if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS) 1481 return -ETIME; 1482 1483 hdev->process_kill_trial_cnt++; 1484 1485 return -EBUSY; 1486 } 1487 1488 static void device_disable_open_processes(struct hl_device *hdev, bool control_dev) 1489 { 1490 struct list_head *hpriv_list; 1491 struct hl_fpriv *hpriv; 1492 struct mutex *hpriv_lock; 1493 1494 hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; 1495 hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; 1496 1497 mutex_lock(hpriv_lock); 1498 list_for_each_entry(hpriv, hpriv_list, dev_node) 1499 hpriv->hdev = NULL; 1500 mutex_unlock(hpriv_lock); 1501 } 1502 1503 static void send_disable_pci_access(struct hl_device *hdev, u32 flags) 1504 { 1505 /* If reset is due to heartbeat, device CPU is no responsive in 1506 * which case no point sending PCI disable message to it. 1507 */ 1508 if ((flags & HL_DRV_RESET_HARD) && 1509 !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) { 1510 /* Disable PCI access from device F/W so he won't send 1511 * us additional interrupts. We disable MSI/MSI-X at 1512 * the halt_engines function and we can't have the F/W 1513 * sending us interrupts after that. We need to disable 1514 * the access here because if the device is marked 1515 * disable, the message won't be send. Also, in case 1516 * of heartbeat, the device CPU is marked as disable 1517 * so this message won't be sent 1518 */ 1519 if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) 1520 return; 1521 1522 /* disable_irq also generates sync irq, this verifies that last EQs are handled 1523 * before disabled is set. The IRQ will be enabled again in request_irq call. 1524 */ 1525 if (hdev->cpu_queues_enable) 1526 disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id)); 1527 } 1528 } 1529 1530 static void handle_reset_trigger(struct hl_device *hdev, u32 flags) 1531 { 1532 u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT; 1533 1534 /* No consecutive mechanism when user context exists */ 1535 if (hdev->is_compute_ctx_active) 1536 return; 1537 1538 /* 1539 * 'reset cause' is being updated here, because getting here 1540 * means that it's the 1st time and the last time we're here 1541 * ('in_reset' makes sure of it). This makes sure that 1542 * 'reset_cause' will continue holding its 1st recorded reason! 1543 */ 1544 if (flags & HL_DRV_RESET_HEARTBEAT) { 1545 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT; 1546 cur_reset_trigger = HL_DRV_RESET_HEARTBEAT; 1547 } else if (flags & HL_DRV_RESET_TDR) { 1548 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR; 1549 cur_reset_trigger = HL_DRV_RESET_TDR; 1550 } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) { 1551 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; 1552 cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR; 1553 } else { 1554 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; 1555 } 1556 1557 /* 1558 * If reset cause is same twice, then reset_trigger_repeated 1559 * is set and if this reset is due to a fatal FW error 1560 * device is set to an unstable state. 1561 */ 1562 if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) { 1563 hdev->reset_info.prev_reset_trigger = cur_reset_trigger; 1564 hdev->reset_info.reset_trigger_repeated = 0; 1565 } else { 1566 hdev->reset_info.reset_trigger_repeated = 1; 1567 } 1568 } 1569 1570 static void reset_heartbeat_debug_info(struct hl_device *hdev) 1571 { 1572 hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0; 1573 hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0; 1574 hdev->heartbeat_debug_info.heartbeat_event_counter = 0; 1575 } 1576 1577 static inline void device_heartbeat_schedule(struct hl_device *hdev) 1578 { 1579 if (!hdev->heartbeat) 1580 return; 1581 1582 reset_heartbeat_debug_info(hdev); 1583 1584 /* 1585 * Before scheduling the heartbeat driver will check if eq event has received. 1586 * for the first schedule we need to set the indication as true then for the next 1587 * one this indication will be true only if eq event was sent by FW. 1588 */ 1589 hdev->eq_heartbeat_received = true; 1590 1591 schedule_delayed_work(&hdev->work_heartbeat, 1592 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); 1593 } 1594 1595 /* 1596 * hl_device_reset - reset the device 1597 * 1598 * @hdev: pointer to habanalabs device structure 1599 * @flags: reset flags. 1600 * 1601 * Block future CS and wait for pending CS to be enqueued 1602 * Call ASIC H/W fini 1603 * Flush all completions 1604 * Re-initialize all internal data structures 1605 * Call ASIC H/W init, late_init 1606 * Test queues 1607 * Enable device 1608 * 1609 * Returns 0 for success or an error on failure. 1610 */ 1611 int hl_device_reset(struct hl_device *hdev, u32 flags) 1612 { 1613 bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release, 1614 schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread; 1615 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; 1616 struct hl_ctx *ctx; 1617 int i, rc, hw_fini_rc; 1618 1619 if (!hdev->init_done) { 1620 dev_err(hdev->dev, "Can't reset before initialization is done\n"); 1621 return 0; 1622 } 1623 1624 hard_reset = !!(flags & HL_DRV_RESET_HARD); 1625 from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR); 1626 fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW); 1627 from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE); 1628 delay_reset = !!(flags & HL_DRV_RESET_DELAY); 1629 from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR); 1630 reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release; 1631 1632 if (hdev->cpld_shutdown) { 1633 dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n"); 1634 return -EIO; 1635 } 1636 1637 if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) { 1638 dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n"); 1639 return 0; 1640 } 1641 1642 if (!hard_reset && !hdev->asic_prop.supports_compute_reset) { 1643 dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n"); 1644 hard_reset = true; 1645 } 1646 1647 if (reset_upon_device_release) { 1648 if (hard_reset) { 1649 dev_crit(hdev->dev, 1650 "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n"); 1651 return -EINVAL; 1652 } 1653 1654 goto do_reset; 1655 } 1656 1657 if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) { 1658 dev_dbg(hdev->dev, 1659 "asic doesn't allow inference soft reset - do hard-reset instead\n"); 1660 hard_reset = true; 1661 } 1662 1663 do_reset: 1664 /* Re-entry of reset thread */ 1665 if (from_hard_reset_thread && hdev->process_kill_trial_cnt) 1666 goto kill_processes; 1667 1668 /* 1669 * Prevent concurrency in this function - only one reset should be 1670 * done at any given time. We need to perform this only if we didn't 1671 * get here from a dedicated hard reset thread. 1672 */ 1673 if (!from_hard_reset_thread) { 1674 /* Block future CS/VM/JOB completion operations */ 1675 spin_lock(&hdev->reset_info.lock); 1676 if (hdev->reset_info.in_reset) { 1677 /* We allow scheduling of a hard reset only during a compute reset */ 1678 if (hard_reset && hdev->reset_info.in_compute_reset) 1679 hdev->reset_info.hard_reset_schedule_flags = flags; 1680 spin_unlock(&hdev->reset_info.lock); 1681 return 0; 1682 } 1683 1684 /* This still allows the completion of some KDMA ops 1685 * Update this before in_reset because in_compute_reset implies we are in reset 1686 */ 1687 hdev->reset_info.in_compute_reset = !hard_reset; 1688 1689 hdev->reset_info.in_reset = 1; 1690 1691 spin_unlock(&hdev->reset_info.lock); 1692 1693 /* Cancel the device release watchdog work if required. 1694 * In case of reset-upon-device-release while the release watchdog work is 1695 * scheduled due to a hard-reset, do hard-reset instead of compute-reset. 1696 */ 1697 if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) { 1698 struct hl_device_reset_work *watchdog_work = 1699 &hdev->device_release_watchdog_work; 1700 1701 hdev->reset_info.watchdog_active = 0; 1702 if (!from_watchdog_thread) 1703 cancel_delayed_work_sync(&watchdog_work->reset_work); 1704 1705 if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) { 1706 hdev->reset_info.in_compute_reset = 0; 1707 flags |= HL_DRV_RESET_HARD; 1708 flags &= ~HL_DRV_RESET_DEV_RELEASE; 1709 hard_reset = true; 1710 } 1711 } 1712 1713 if (delay_reset) 1714 usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1); 1715 1716 escalate_reset_flow: 1717 handle_reset_trigger(hdev, flags); 1718 send_disable_pci_access(hdev, flags); 1719 1720 /* This also blocks future CS/VM/JOB completion operations */ 1721 hdev->disabled = true; 1722 1723 take_release_locks(hdev); 1724 1725 if (hard_reset) 1726 dev_info(hdev->dev, "Going to reset device\n"); 1727 else if (reset_upon_device_release) 1728 dev_dbg(hdev->dev, "Going to reset device after release by user\n"); 1729 else 1730 dev_dbg(hdev->dev, "Going to reset engines of inference device\n"); 1731 } 1732 1733 if ((hard_reset) && (!from_hard_reset_thread)) { 1734 hdev->reset_info.hard_reset_pending = true; 1735 1736 hdev->process_kill_trial_cnt = 0; 1737 1738 hdev->device_reset_work.flags = flags; 1739 1740 /* 1741 * Because the reset function can't run from heartbeat work, 1742 * we need to call the reset function from a dedicated work. 1743 */ 1744 queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0); 1745 1746 return 0; 1747 } 1748 1749 cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release); 1750 1751 kill_processes: 1752 if (hard_reset) { 1753 /* Kill processes here after CS rollback. This is because the 1754 * process can't really exit until all its CSs are done, which 1755 * is what we do in cs rollback 1756 */ 1757 rc = device_kill_open_processes(hdev, 0, false); 1758 1759 if (rc == -EBUSY) { 1760 if (hdev->device_fini_pending) { 1761 dev_crit(hdev->dev, 1762 "%s Failed to kill all open processes, stopping hard reset\n", 1763 dev_name(&(hdev)->pdev->dev)); 1764 goto out_err; 1765 } 1766 1767 /* signal reset thread to reschedule */ 1768 return rc; 1769 } 1770 1771 if (rc) { 1772 dev_crit(hdev->dev, 1773 "%s Failed to kill all open processes, stopping hard reset\n", 1774 dev_name(&(hdev)->pdev->dev)); 1775 goto out_err; 1776 } 1777 1778 /* Flush the Event queue workers to make sure no other thread is 1779 * reading or writing to registers during the reset 1780 */ 1781 flush_workqueue(hdev->eq_wq); 1782 } 1783 1784 /* Reset the H/W. It will be in idle state after this returns */ 1785 hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset); 1786 1787 if (hard_reset) { 1788 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE; 1789 1790 /* Release kernel context */ 1791 if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1) 1792 hdev->kernel_ctx = NULL; 1793 1794 hl_vm_fini(hdev); 1795 hl_mmu_fini(hdev); 1796 hl_eq_reset(hdev, &hdev->event_queue); 1797 } 1798 1799 /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */ 1800 hl_hw_queue_reset(hdev, hard_reset); 1801 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 1802 hl_cq_reset(hdev, &hdev->completion_queue[i]); 1803 1804 /* Make sure the context switch phase will run again */ 1805 ctx = hl_get_compute_ctx(hdev); 1806 if (ctx) { 1807 atomic_set(&ctx->thread_ctx_switch_token, 1); 1808 ctx->thread_ctx_switch_wait_token = 0; 1809 hl_ctx_put(ctx); 1810 } 1811 1812 if (hw_fini_rc) { 1813 rc = hw_fini_rc; 1814 goto out_err; 1815 } 1816 /* Finished tear-down, starting to re-initialize */ 1817 1818 if (hard_reset) { 1819 hdev->device_cpu_disabled = false; 1820 hdev->reset_info.hard_reset_pending = false; 1821 1822 /* 1823 * Put the device in an unusable state if there are 2 back to back resets due to 1824 * fatal errors. 1825 */ 1826 if (hdev->reset_info.reset_trigger_repeated && 1827 (hdev->reset_info.prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR || 1828 hdev->reset_info.prev_reset_trigger == 1829 HL_DRV_RESET_HEARTBEAT)) { 1830 dev_crit(hdev->dev, 1831 "%s Consecutive fatal errors, stopping hard reset\n", 1832 dev_name(&(hdev)->pdev->dev)); 1833 rc = -EIO; 1834 goto out_err; 1835 } 1836 1837 if (hdev->kernel_ctx) { 1838 dev_crit(hdev->dev, 1839 "%s kernel ctx was alive during hard reset, something is terribly wrong\n", 1840 dev_name(&(hdev)->pdev->dev)); 1841 rc = -EBUSY; 1842 goto out_err; 1843 } 1844 1845 rc = hl_mmu_init(hdev); 1846 if (rc) { 1847 dev_err(hdev->dev, 1848 "Failed to initialize MMU S/W after hard reset\n"); 1849 goto out_err; 1850 } 1851 1852 /* Allocate the kernel context */ 1853 hdev->kernel_ctx = kzalloc_obj(*hdev->kernel_ctx); 1854 if (!hdev->kernel_ctx) { 1855 rc = -ENOMEM; 1856 hl_mmu_fini(hdev); 1857 goto out_err; 1858 } 1859 1860 hdev->is_compute_ctx_active = false; 1861 1862 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 1863 if (rc) { 1864 dev_err(hdev->dev, 1865 "failed to init kernel ctx in hard reset\n"); 1866 kfree(hdev->kernel_ctx); 1867 hdev->kernel_ctx = NULL; 1868 hl_mmu_fini(hdev); 1869 goto out_err; 1870 } 1871 } 1872 1873 /* Device is now enabled as part of the initialization requires 1874 * communication with the device firmware to get information that 1875 * is required for the initialization itself 1876 */ 1877 hdev->disabled = false; 1878 1879 /* F/W security enabled indication might be updated after hard-reset */ 1880 if (hard_reset) { 1881 rc = hl_fw_read_preboot_status(hdev); 1882 if (rc) 1883 goto out_err; 1884 } 1885 1886 rc = hdev->asic_funcs->hw_init(hdev); 1887 if (rc) { 1888 dev_err(hdev->dev, "failed to initialize the H/W after reset\n"); 1889 goto out_err; 1890 } 1891 1892 /* If device is not idle fail the reset process */ 1893 if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask, 1894 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) { 1895 print_idle_status_mask(hdev, "device is not idle after reset", idle_mask); 1896 rc = -EIO; 1897 goto out_err; 1898 } 1899 1900 /* Check that the communication with the device is working */ 1901 rc = hdev->asic_funcs->test_queues(hdev); 1902 if (rc) { 1903 dev_err(hdev->dev, "Failed to detect if device is alive after reset\n"); 1904 goto out_err; 1905 } 1906 1907 if (hard_reset) { 1908 rc = device_late_init(hdev); 1909 if (rc) { 1910 dev_err(hdev->dev, "Failed late init after hard reset\n"); 1911 goto out_err; 1912 } 1913 1914 rc = hl_vm_init(hdev); 1915 if (rc) { 1916 dev_err(hdev->dev, "Failed to init memory module after hard reset\n"); 1917 goto out_err; 1918 } 1919 1920 if (!hdev->asic_prop.fw_security_enabled) 1921 hl_fw_set_max_power(hdev); 1922 } else { 1923 rc = hdev->asic_funcs->compute_reset_late_init(hdev); 1924 if (rc) { 1925 if (reset_upon_device_release) 1926 dev_err(hdev->dev, 1927 "Failed late init in reset after device release\n"); 1928 else 1929 dev_err(hdev->dev, "Failed late init after compute reset\n"); 1930 goto out_err; 1931 } 1932 } 1933 1934 rc = hdev->asic_funcs->scrub_device_mem(hdev); 1935 if (rc) { 1936 dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc); 1937 goto out_err; 1938 } 1939 1940 spin_lock(&hdev->reset_info.lock); 1941 hdev->reset_info.in_compute_reset = 0; 1942 1943 /* Schedule hard reset only if requested and if not already in hard reset. 1944 * We keep 'in_reset' enabled, so no other reset can go in during the hard 1945 * reset schedule 1946 */ 1947 if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags) 1948 schedule_hard_reset = true; 1949 else 1950 hdev->reset_info.in_reset = 0; 1951 1952 spin_unlock(&hdev->reset_info.lock); 1953 1954 hdev->reset_info.needs_reset = false; 1955 1956 if (hard_reset) 1957 dev_info(hdev->dev, 1958 "Successfully finished resetting the %s device\n", 1959 dev_name(&(hdev)->pdev->dev)); 1960 else 1961 dev_dbg(hdev->dev, 1962 "Successfully finished resetting the %s device\n", 1963 dev_name(&(hdev)->pdev->dev)); 1964 1965 if (hard_reset) { 1966 hdev->reset_info.hard_reset_cnt++; 1967 1968 device_heartbeat_schedule(hdev); 1969 1970 /* After reset is done, we are ready to receive events from 1971 * the F/W. We can't do it before because we will ignore events 1972 * and if those events are fatal, we won't know about it and 1973 * the device will be operational although it shouldn't be 1974 */ 1975 hdev->asic_funcs->enable_events_from_fw(hdev); 1976 } else { 1977 if (!reset_upon_device_release) 1978 hdev->reset_info.compute_reset_cnt++; 1979 1980 if (schedule_hard_reset) { 1981 dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n"); 1982 flags = hdev->reset_info.hard_reset_schedule_flags; 1983 hdev->reset_info.hard_reset_schedule_flags = 0; 1984 hard_reset = true; 1985 goto escalate_reset_flow; 1986 } 1987 } 1988 1989 return 0; 1990 1991 out_err: 1992 hdev->disabled = true; 1993 1994 spin_lock(&hdev->reset_info.lock); 1995 hdev->reset_info.in_compute_reset = 0; 1996 1997 if (hard_reset) { 1998 dev_err(hdev->dev, 1999 "%s Failed to reset! Device is NOT usable\n", 2000 dev_name(&(hdev)->pdev->dev)); 2001 hdev->reset_info.hard_reset_cnt++; 2002 } else { 2003 if (reset_upon_device_release) { 2004 dev_err(hdev->dev, "Failed to reset device after user release\n"); 2005 flags &= ~HL_DRV_RESET_DEV_RELEASE; 2006 } else { 2007 dev_err(hdev->dev, "Failed to do compute reset\n"); 2008 hdev->reset_info.compute_reset_cnt++; 2009 } 2010 2011 spin_unlock(&hdev->reset_info.lock); 2012 flags |= HL_DRV_RESET_HARD; 2013 hard_reset = true; 2014 goto escalate_reset_flow; 2015 } 2016 2017 hdev->reset_info.in_reset = 0; 2018 2019 spin_unlock(&hdev->reset_info.lock); 2020 2021 return rc; 2022 } 2023 2024 /* 2025 * hl_device_cond_reset() - conditionally reset the device. 2026 * @hdev: pointer to habanalabs device structure. 2027 * @reset_flags: reset flags. 2028 * @event_mask: events to notify user about. 2029 * 2030 * Conditionally reset the device, or alternatively schedule a watchdog work to reset the device 2031 * unless another reset precedes it. 2032 */ 2033 int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask) 2034 { 2035 struct hl_ctx *ctx = NULL; 2036 2037 /* F/W reset cannot be postponed */ 2038 if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW) 2039 goto device_reset; 2040 2041 /* Device release watchdog is relevant only if user exists and gets a reset notification */ 2042 if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) { 2043 dev_err(hdev->dev, "Resetting device without a reset indication to user\n"); 2044 goto device_reset; 2045 } 2046 2047 ctx = hl_get_compute_ctx(hdev); 2048 if (!ctx) 2049 goto device_reset; 2050 2051 /* 2052 * There is no point in postponing the reset if user is not registered for events. 2053 * However if no eventfd_ctx exists but the device release watchdog is already scheduled, it 2054 * just implies that user has unregistered as part of handling a previous event. In this 2055 * case an immediate reset is not required. 2056 */ 2057 if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active) 2058 goto device_reset; 2059 2060 /* Schedule the device release watchdog work unless reset is already in progress or if the 2061 * work is already scheduled. 2062 */ 2063 spin_lock(&hdev->reset_info.lock); 2064 if (hdev->reset_info.in_reset) { 2065 spin_unlock(&hdev->reset_info.lock); 2066 goto device_reset; 2067 } 2068 2069 if (hdev->reset_info.watchdog_active) { 2070 hdev->device_release_watchdog_work.flags |= flags; 2071 goto out; 2072 } 2073 2074 hdev->device_release_watchdog_work.flags = flags; 2075 dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n", 2076 hdev->device_release_watchdog_timeout_sec); 2077 schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work, 2078 secs_to_jiffies(hdev->device_release_watchdog_timeout_sec)); 2079 hdev->reset_info.watchdog_active = 1; 2080 out: 2081 spin_unlock(&hdev->reset_info.lock); 2082 2083 hl_notifier_event_send_all(hdev, event_mask); 2084 2085 hl_ctx_put(ctx); 2086 2087 hl_abort_waiting_for_completions(hdev); 2088 2089 return 0; 2090 2091 device_reset: 2092 if (event_mask) 2093 hl_notifier_event_send_all(hdev, event_mask); 2094 if (ctx) 2095 hl_ctx_put(ctx); 2096 2097 return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD); 2098 } 2099 2100 static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask) 2101 { 2102 mutex_lock(¬ifier_event->lock); 2103 notifier_event->events_mask |= event_mask; 2104 2105 if (notifier_event->eventfd) 2106 eventfd_signal(notifier_event->eventfd); 2107 2108 mutex_unlock(¬ifier_event->lock); 2109 } 2110 2111 /* 2112 * hl_notifier_event_send_all - notify all user processes via eventfd 2113 * 2114 * @hdev: pointer to habanalabs device structure 2115 * @event_mask: the occurred event/s 2116 * Returns 0 for success or an error on failure. 2117 */ 2118 void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask) 2119 { 2120 struct hl_fpriv *hpriv; 2121 2122 if (!event_mask) { 2123 dev_warn(hdev->dev, "Skip sending zero event"); 2124 return; 2125 } 2126 2127 mutex_lock(&hdev->fpriv_list_lock); 2128 2129 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) 2130 hl_notifier_event_send(&hpriv->notifier_event, event_mask); 2131 2132 mutex_unlock(&hdev->fpriv_list_lock); 2133 } 2134 2135 /* 2136 * hl_device_init - main initialization function for habanalabs device 2137 * 2138 * @hdev: pointer to habanalabs device structure 2139 * 2140 * Allocate an id for the device, do early initialization and then call the 2141 * ASIC specific initialization functions. Finally, create the cdev and the 2142 * Linux device to expose it to the user 2143 */ 2144 int hl_device_init(struct hl_device *hdev) 2145 { 2146 int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt; 2147 struct hl_ts_free_jobs *free_jobs_data; 2148 bool expose_interfaces_on_err = false; 2149 void *p; 2150 2151 /* Initialize ASIC function pointers and perform early init */ 2152 rc = device_early_init(hdev); 2153 if (rc) 2154 goto out_disabled; 2155 2156 user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count + 2157 hdev->asic_prop.user_interrupt_count; 2158 2159 if (user_interrupt_cnt) { 2160 hdev->user_interrupt = kzalloc_objs(*hdev->user_interrupt, 2161 user_interrupt_cnt); 2162 if (!hdev->user_interrupt) { 2163 rc = -ENOMEM; 2164 goto early_fini; 2165 } 2166 2167 /* Timestamp records supported only if CQ supported in device */ 2168 if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) { 2169 for (i = 0 ; i < user_interrupt_cnt ; i++) { 2170 p = vzalloc(TIMESTAMP_FREE_NODES_NUM * 2171 sizeof(struct timestamp_reg_free_node)); 2172 if (!p) { 2173 rc = -ENOMEM; 2174 goto free_usr_intr_mem; 2175 } 2176 free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data; 2177 free_jobs_data->free_nodes_pool = p; 2178 free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM; 2179 free_jobs_data->next_avail_free_node_idx = 0; 2180 } 2181 } 2182 } 2183 2184 free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data; 2185 p = vzalloc(TIMESTAMP_FREE_NODES_NUM * 2186 sizeof(struct timestamp_reg_free_node)); 2187 if (!p) { 2188 rc = -ENOMEM; 2189 goto free_usr_intr_mem; 2190 } 2191 2192 free_jobs_data->free_nodes_pool = p; 2193 free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM; 2194 free_jobs_data->next_avail_free_node_idx = 0; 2195 2196 /* 2197 * Start calling ASIC initialization. First S/W then H/W and finally 2198 * late init 2199 */ 2200 rc = hdev->asic_funcs->sw_init(hdev); 2201 if (rc) 2202 goto free_common_usr_intr_mem; 2203 2204 2205 /* initialize completion structure for multi CS wait */ 2206 hl_multi_cs_completion_init(hdev); 2207 2208 /* 2209 * Initialize the H/W queues. Must be done before hw_init, because 2210 * there the addresses of the kernel queue are being written to the 2211 * registers of the device 2212 */ 2213 rc = hl_hw_queues_create(hdev); 2214 if (rc) { 2215 dev_err(hdev->dev, "failed to initialize kernel queues\n"); 2216 goto sw_fini; 2217 } 2218 2219 cq_cnt = hdev->asic_prop.completion_queues_count; 2220 2221 /* 2222 * Initialize the completion queues. Must be done before hw_init, 2223 * because there the addresses of the completion queues are being 2224 * passed as arguments to request_irq 2225 */ 2226 if (cq_cnt) { 2227 hdev->completion_queue = kzalloc_objs(*hdev->completion_queue, 2228 cq_cnt); 2229 2230 if (!hdev->completion_queue) { 2231 dev_err(hdev->dev, 2232 "failed to allocate completion queues\n"); 2233 rc = -ENOMEM; 2234 goto hw_queues_destroy; 2235 } 2236 } 2237 2238 for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) { 2239 rc = hl_cq_init(hdev, &hdev->completion_queue[i], 2240 hdev->asic_funcs->get_queue_id_for_cq(hdev, i)); 2241 if (rc) { 2242 dev_err(hdev->dev, 2243 "failed to initialize completion queue\n"); 2244 goto cq_fini; 2245 } 2246 hdev->completion_queue[i].cq_idx = i; 2247 } 2248 2249 hdev->shadow_cs_queue = kzalloc_objs(struct hl_cs *, 2250 hdev->asic_prop.max_pending_cs); 2251 if (!hdev->shadow_cs_queue) { 2252 rc = -ENOMEM; 2253 goto cq_fini; 2254 } 2255 2256 /* 2257 * Initialize the event queue. Must be done before hw_init, 2258 * because there the address of the event queue is being 2259 * passed as argument to request_irq 2260 */ 2261 rc = hl_eq_init(hdev, &hdev->event_queue); 2262 if (rc) { 2263 dev_err(hdev->dev, "failed to initialize event queue\n"); 2264 goto free_shadow_cs_queue; 2265 } 2266 2267 /* MMU S/W must be initialized before kernel context is created */ 2268 rc = hl_mmu_init(hdev); 2269 if (rc) { 2270 dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n"); 2271 goto eq_fini; 2272 } 2273 2274 /* Allocate the kernel context */ 2275 hdev->kernel_ctx = kzalloc_obj(*hdev->kernel_ctx); 2276 if (!hdev->kernel_ctx) { 2277 rc = -ENOMEM; 2278 goto mmu_fini; 2279 } 2280 2281 hdev->is_compute_ctx_active = false; 2282 2283 hdev->asic_funcs->state_dump_init(hdev); 2284 2285 hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC; 2286 2287 hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL; 2288 2289 rc = hl_debugfs_device_init(hdev); 2290 if (rc) { 2291 dev_err(hdev->dev, "failed to initialize debugfs entry structure\n"); 2292 kfree(hdev->kernel_ctx); 2293 goto mmu_fini; 2294 } 2295 2296 /* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after 2297 * hl_debugfs_device_init(). 2298 */ 2299 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 2300 if (rc) { 2301 dev_err(hdev->dev, "failed to initialize kernel context\n"); 2302 kfree(hdev->kernel_ctx); 2303 goto debugfs_device_fini; 2304 } 2305 2306 rc = hl_cb_pool_init(hdev); 2307 if (rc) { 2308 dev_err(hdev->dev, "failed to initialize CB pool\n"); 2309 goto release_ctx; 2310 } 2311 2312 rc = hl_dec_init(hdev); 2313 if (rc) { 2314 dev_err(hdev->dev, "Failed to initialize the decoder module\n"); 2315 goto cb_pool_fini; 2316 } 2317 2318 /* 2319 * From this point, override rc (=0) in case of an error to allow debugging 2320 * (by adding char devices and creating sysfs/debugfs files as part of the error flow). 2321 */ 2322 expose_interfaces_on_err = true; 2323 2324 /* Device is now enabled as part of the initialization requires 2325 * communication with the device firmware to get information that 2326 * is required for the initialization itself 2327 */ 2328 hdev->disabled = false; 2329 2330 rc = hdev->asic_funcs->hw_init(hdev); 2331 if (rc) { 2332 dev_err(hdev->dev, "failed to initialize the H/W\n"); 2333 rc = 0; 2334 goto out_disabled; 2335 } 2336 2337 /* Check that the communication with the device is working */ 2338 rc = hdev->asic_funcs->test_queues(hdev); 2339 if (rc) { 2340 dev_err(hdev->dev, "Failed to detect if device is alive\n"); 2341 rc = 0; 2342 goto out_disabled; 2343 } 2344 2345 rc = device_late_init(hdev); 2346 if (rc) { 2347 dev_err(hdev->dev, "Failed late initialization\n"); 2348 rc = 0; 2349 goto out_disabled; 2350 } 2351 2352 dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n", 2353 hdev->asic_name, 2354 hdev->asic_prop.dram_size / SZ_1G); 2355 2356 rc = hl_vm_init(hdev); 2357 if (rc) { 2358 dev_err(hdev->dev, "Failed to initialize memory module\n"); 2359 rc = 0; 2360 goto out_disabled; 2361 } 2362 2363 /* 2364 * Expose devices and sysfs/debugfs files to user. 2365 * From here there is no need to expose them in case of an error. 2366 */ 2367 expose_interfaces_on_err = false; 2368 2369 rc = drm_dev_register(&hdev->drm, 0); 2370 if (rc) { 2371 dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc); 2372 rc = 0; 2373 goto out_disabled; 2374 } 2375 2376 rc = cdev_sysfs_debugfs_add(hdev); 2377 if (rc) { 2378 dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n"); 2379 rc = 0; 2380 goto out_disabled; 2381 } 2382 2383 /* Need to call this again because the max power might change, 2384 * depending on card type for certain ASICs 2385 */ 2386 if (hdev->asic_prop.set_max_power_on_device_init && 2387 !hdev->asic_prop.fw_security_enabled) 2388 hl_fw_set_max_power(hdev); 2389 2390 /* 2391 * hl_hwmon_init() must be called after device_late_init(), because only 2392 * there we get the information from the device about which 2393 * hwmon-related sensors the device supports. 2394 * Furthermore, it must be done after adding the device to the system. 2395 */ 2396 rc = hl_hwmon_init(hdev); 2397 if (rc) { 2398 dev_err(hdev->dev, "Failed to initialize hwmon\n"); 2399 rc = 0; 2400 goto out_disabled; 2401 } 2402 2403 /* Scheduling the EQ heartbeat thread must come after driver is done with all 2404 * initializations, as we want to make sure the FW gets enough time to be prepared 2405 * to respond to heartbeat packets. 2406 */ 2407 device_heartbeat_schedule(hdev); 2408 2409 dev_notice(hdev->dev, 2410 "Successfully added device %s to habanalabs driver\n", 2411 dev_name(&(hdev)->pdev->dev)); 2412 2413 /* After initialization is done, we are ready to receive events from 2414 * the F/W. We can't do it before because we will ignore events and if 2415 * those events are fatal, we won't know about it and the device will 2416 * be operational although it shouldn't be 2417 */ 2418 hdev->asic_funcs->enable_events_from_fw(hdev); 2419 2420 hdev->init_done = true; 2421 2422 return 0; 2423 2424 cb_pool_fini: 2425 hl_cb_pool_fini(hdev); 2426 release_ctx: 2427 if (hl_ctx_put(hdev->kernel_ctx) != 1) 2428 dev_err(hdev->dev, 2429 "kernel ctx is still alive on initialization failure\n"); 2430 debugfs_device_fini: 2431 hl_debugfs_device_fini(hdev); 2432 mmu_fini: 2433 hl_mmu_fini(hdev); 2434 eq_fini: 2435 hl_eq_fini(hdev, &hdev->event_queue); 2436 free_shadow_cs_queue: 2437 kfree(hdev->shadow_cs_queue); 2438 cq_fini: 2439 for (i = 0 ; i < cq_ready_cnt ; i++) 2440 hl_cq_fini(hdev, &hdev->completion_queue[i]); 2441 kfree(hdev->completion_queue); 2442 hw_queues_destroy: 2443 hl_hw_queues_destroy(hdev); 2444 sw_fini: 2445 hdev->asic_funcs->sw_fini(hdev); 2446 free_common_usr_intr_mem: 2447 vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool); 2448 free_usr_intr_mem: 2449 if (user_interrupt_cnt) { 2450 for (i = 0 ; i < user_interrupt_cnt ; i++) { 2451 if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool) 2452 break; 2453 vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool); 2454 } 2455 kfree(hdev->user_interrupt); 2456 } 2457 early_fini: 2458 device_early_fini(hdev); 2459 out_disabled: 2460 hdev->disabled = true; 2461 if (expose_interfaces_on_err) { 2462 drm_dev_register(&hdev->drm, 0); 2463 cdev_sysfs_debugfs_add(hdev); 2464 } 2465 2466 pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n", 2467 hdev->cdev_idx, dev_name(&hdev->pdev->dev)); 2468 2469 return rc; 2470 } 2471 2472 /* 2473 * hl_device_fini - main tear-down function for habanalabs device 2474 * 2475 * @hdev: pointer to habanalabs device structure 2476 * 2477 * Destroy the device, call ASIC fini functions and release the id 2478 */ 2479 void hl_device_fini(struct hl_device *hdev) 2480 { 2481 u32 user_interrupt_cnt; 2482 bool device_in_reset; 2483 ktime_t timeout; 2484 u64 reset_sec; 2485 int i, rc; 2486 2487 dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev)); 2488 2489 hdev->device_fini_pending = 1; 2490 flush_delayed_work(&hdev->device_reset_work.reset_work); 2491 2492 if (hdev->pldm) 2493 reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT; 2494 else 2495 reset_sec = HL_HARD_RESET_MAX_TIMEOUT; 2496 2497 /* 2498 * This function is competing with the reset function, so try to 2499 * take the reset atomic and if we are already in middle of reset, 2500 * wait until reset function is finished. Reset function is designed 2501 * to always finish. However, in Gaudi, because of all the network 2502 * ports, the hard reset could take between 10-30 seconds 2503 */ 2504 2505 timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000); 2506 2507 spin_lock(&hdev->reset_info.lock); 2508 device_in_reset = !!hdev->reset_info.in_reset; 2509 if (!device_in_reset) 2510 hdev->reset_info.in_reset = 1; 2511 spin_unlock(&hdev->reset_info.lock); 2512 2513 while (device_in_reset) { 2514 usleep_range(50, 200); 2515 2516 spin_lock(&hdev->reset_info.lock); 2517 device_in_reset = !!hdev->reset_info.in_reset; 2518 if (!device_in_reset) 2519 hdev->reset_info.in_reset = 1; 2520 spin_unlock(&hdev->reset_info.lock); 2521 2522 if (ktime_compare(ktime_get(), timeout) > 0) { 2523 dev_crit(hdev->dev, 2524 "%s Failed to remove device because reset function did not finish\n", 2525 dev_name(&(hdev)->pdev->dev)); 2526 return; 2527 } 2528 } 2529 2530 cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work); 2531 2532 /* Disable PCI access from device F/W so it won't send us additional 2533 * interrupts. We disable MSI/MSI-X at the halt_engines function and we 2534 * can't have the F/W sending us interrupts after that. We need to 2535 * disable the access here because if the device is marked disable, the 2536 * message won't be send. Also, in case of heartbeat, the device CPU is 2537 * marked as disable so this message won't be sent 2538 */ 2539 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0); 2540 2541 /* Mark device as disabled */ 2542 hdev->disabled = true; 2543 2544 take_release_locks(hdev); 2545 2546 hdev->reset_info.hard_reset_pending = true; 2547 2548 hl_hwmon_fini(hdev); 2549 2550 cleanup_resources(hdev, true, false, false); 2551 2552 /* Kill processes here after CS rollback. This is because the process 2553 * can't really exit until all its CSs are done, which is what we 2554 * do in cs rollback 2555 */ 2556 dev_info(hdev->dev, 2557 "Waiting for all processes to exit (timeout of %u seconds)", 2558 HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI); 2559 2560 hdev->process_kill_trial_cnt = 0; 2561 rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false); 2562 if (rc) { 2563 dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc); 2564 device_disable_open_processes(hdev, false); 2565 } 2566 2567 hdev->process_kill_trial_cnt = 0; 2568 rc = device_kill_open_processes(hdev, 0, true); 2569 if (rc) { 2570 dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc); 2571 device_disable_open_processes(hdev, true); 2572 } 2573 2574 hl_cb_pool_fini(hdev); 2575 2576 /* Reset the H/W. It will be in idle state after this returns */ 2577 rc = hdev->asic_funcs->hw_fini(hdev, true, false); 2578 if (rc) 2579 dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc); 2580 2581 /* Reset the H/W (if it accessible). It will be in idle state after this returns */ 2582 if (!hdev->cpld_shutdown) { 2583 rc = hdev->asic_funcs->hw_fini(hdev, true, false); 2584 if (rc) 2585 dev_err(hdev->dev, 2586 "hw_fini failed in device fini while removing device %d\n", rc); 2587 } 2588 2589 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE; 2590 2591 /* Release kernel context */ 2592 if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1)) 2593 dev_err(hdev->dev, "kernel ctx is still alive\n"); 2594 2595 hl_dec_fini(hdev); 2596 2597 hl_vm_fini(hdev); 2598 2599 hl_mmu_fini(hdev); 2600 2601 vfree(hdev->captured_err_info.page_fault_info.user_mappings); 2602 2603 hl_eq_fini(hdev, &hdev->event_queue); 2604 2605 kfree(hdev->shadow_cs_queue); 2606 2607 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 2608 hl_cq_fini(hdev, &hdev->completion_queue[i]); 2609 kfree(hdev->completion_queue); 2610 2611 user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count + 2612 hdev->asic_prop.user_interrupt_count; 2613 2614 if (user_interrupt_cnt) { 2615 if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) { 2616 for (i = 0 ; i < user_interrupt_cnt ; i++) 2617 vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool); 2618 } 2619 2620 kfree(hdev->user_interrupt); 2621 } 2622 2623 vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool); 2624 2625 hl_hw_queues_destroy(hdev); 2626 2627 /* Call ASIC S/W finalize function */ 2628 hdev->asic_funcs->sw_fini(hdev); 2629 2630 device_early_fini(hdev); 2631 2632 /* Hide devices and sysfs/debugfs files from user */ 2633 cdev_sysfs_debugfs_remove(hdev); 2634 drm_dev_unregister(&hdev->drm); 2635 2636 hl_debugfs_device_fini(hdev); 2637 2638 pr_info("removed device successfully\n"); 2639 } 2640 2641 /* 2642 * MMIO register access helper functions. 2643 */ 2644 2645 /* 2646 * hl_rreg - Read an MMIO register 2647 * 2648 * @hdev: pointer to habanalabs device structure 2649 * @reg: MMIO register offset (in bytes) 2650 * 2651 * Returns the value of the MMIO register we are asked to read 2652 * 2653 */ 2654 inline u32 hl_rreg(struct hl_device *hdev, u32 reg) 2655 { 2656 u32 val = readl(hdev->rmmio + reg); 2657 2658 if (unlikely(trace_habanalabs_rreg32_enabled())) 2659 trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val); 2660 2661 return val; 2662 } 2663 2664 /* 2665 * hl_wreg - Write to an MMIO register 2666 * 2667 * @hdev: pointer to habanalabs device structure 2668 * @reg: MMIO register offset (in bytes) 2669 * @val: 32-bit value 2670 * 2671 * Writes the 32-bit value into the MMIO register 2672 * 2673 */ 2674 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) 2675 { 2676 if (unlikely(trace_habanalabs_wreg32_enabled())) 2677 trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val); 2678 2679 writel(val, hdev->rmmio + reg); 2680 } 2681 2682 void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, 2683 u8 flags) 2684 { 2685 struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info; 2686 2687 if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) { 2688 dev_err(hdev->dev, 2689 "Number of possible razwi initiators (%u) exceeded limit (%u)\n", 2690 num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR); 2691 return; 2692 } 2693 2694 /* In case it's the first razwi since the device was opened, capture its parameters */ 2695 if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1)) 2696 return; 2697 2698 razwi_info->razwi.timestamp = ktime_to_ns(ktime_get()); 2699 razwi_info->razwi.addr = addr; 2700 razwi_info->razwi.num_of_possible_engines = num_of_engines; 2701 memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0], 2702 num_of_engines * sizeof(u16)); 2703 razwi_info->razwi.flags = flags; 2704 2705 razwi_info->razwi_info_available = true; 2706 } 2707 2708 void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, 2709 u8 flags, u64 *event_mask) 2710 { 2711 hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags); 2712 2713 if (event_mask) 2714 *event_mask |= HL_NOTIFIER_EVENT_RAZWI; 2715 } 2716 2717 static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu) 2718 { 2719 struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info; 2720 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; 2721 struct hl_vm_hash_node *hnode; 2722 struct hl_userptr *userptr; 2723 enum vm_type *vm_type; 2724 struct hl_ctx *ctx; 2725 u32 map_idx = 0; 2726 int i; 2727 2728 /* Reset previous session count*/ 2729 pgf_info->num_of_user_mappings = 0; 2730 2731 ctx = hl_get_compute_ctx(hdev); 2732 if (!ctx) { 2733 dev_err(hdev->dev, "Can't get user context for user mappings\n"); 2734 return; 2735 } 2736 2737 mutex_lock(&ctx->mem_hash_lock); 2738 hash_for_each(ctx->mem_hash, i, hnode, node) { 2739 vm_type = hnode->ptr; 2740 if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) || 2741 ((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu)) 2742 pgf_info->num_of_user_mappings++; 2743 2744 } 2745 2746 if (!pgf_info->num_of_user_mappings) 2747 goto finish; 2748 2749 /* In case we already allocated in previous session, need to release it before 2750 * allocating new buffer. 2751 */ 2752 vfree(pgf_info->user_mappings); 2753 pgf_info->user_mappings = 2754 vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping)); 2755 if (!pgf_info->user_mappings) { 2756 pgf_info->num_of_user_mappings = 0; 2757 goto finish; 2758 } 2759 2760 hash_for_each(ctx->mem_hash, i, hnode, node) { 2761 vm_type = hnode->ptr; 2762 if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) { 2763 userptr = hnode->ptr; 2764 pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr; 2765 pgf_info->user_mappings[map_idx].size = userptr->size; 2766 map_idx++; 2767 } else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) { 2768 phys_pg_pack = hnode->ptr; 2769 pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr; 2770 pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size; 2771 map_idx++; 2772 } 2773 } 2774 finish: 2775 mutex_unlock(&ctx->mem_hash_lock); 2776 hl_ctx_put(ctx); 2777 } 2778 2779 void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu) 2780 { 2781 struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info; 2782 2783 /* Capture only the first page fault */ 2784 if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1)) 2785 return; 2786 2787 pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get()); 2788 pgf_info->page_fault.addr = addr; 2789 pgf_info->page_fault.engine_id = eng_id; 2790 hl_capture_user_mappings(hdev, is_pmmu); 2791 2792 pgf_info->page_fault_info_available = true; 2793 } 2794 2795 void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu, 2796 u64 *event_mask) 2797 { 2798 hl_capture_page_fault(hdev, addr, eng_id, is_pmmu); 2799 2800 if (event_mask) 2801 *event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT; 2802 } 2803 2804 static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id) 2805 { 2806 struct hw_err_info *info = &hdev->captured_err_info.hw_err; 2807 2808 /* Capture only the first HW err */ 2809 if (atomic_cmpxchg(&info->event_detected, 0, 1)) 2810 return; 2811 2812 info->event.timestamp = ktime_to_ns(ktime_get()); 2813 info->event.event_id = event_id; 2814 2815 info->event_info_available = true; 2816 } 2817 2818 void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask) 2819 { 2820 hl_capture_hw_err(hdev, event_id); 2821 2822 if (event_mask) 2823 *event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR; 2824 } 2825 2826 static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info) 2827 { 2828 struct fw_err_info *info = &hdev->captured_err_info.fw_err; 2829 2830 /* Capture only the first FW error */ 2831 if (atomic_cmpxchg(&info->event_detected, 0, 1)) 2832 return; 2833 2834 info->event.timestamp = ktime_to_ns(ktime_get()); 2835 info->event.err_type = fw_info->err_type; 2836 if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR) 2837 info->event.event_id = fw_info->event_id; 2838 2839 info->event_info_available = true; 2840 } 2841 2842 void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info) 2843 { 2844 hl_capture_fw_err(hdev, info); 2845 2846 if (info->event_mask) 2847 *info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR; 2848 } 2849 2850 void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count) 2851 { 2852 struct engine_err_info *info = &hdev->captured_err_info.engine_err; 2853 2854 /* Capture only the first engine error */ 2855 if (atomic_cmpxchg(&info->event_detected, 0, 1)) 2856 return; 2857 2858 info->event.timestamp = ktime_to_ns(ktime_get()); 2859 info->event.engine_id = engine_id; 2860 info->event.error_count = error_count; 2861 info->event_info_available = true; 2862 } 2863 2864 void hl_enable_err_info_capture(struct hl_error_info *captured_err_info) 2865 { 2866 vfree(captured_err_info->page_fault_info.user_mappings); 2867 memset(captured_err_info, 0, sizeof(struct hl_error_info)); 2868 atomic_set(&captured_err_info->cs_timeout.write_enable, 1); 2869 captured_err_info->undef_opcode.write_enable = true; 2870 } 2871 2872 void hl_init_cpu_for_irq(struct hl_device *hdev) 2873 { 2874 #ifdef CONFIG_NUMA 2875 struct cpumask *available_mask = &hdev->irq_affinity_mask; 2876 int numa_node = hdev->pdev->dev.numa_node, i; 2877 static struct cpumask cpu_mask; 2878 2879 if (numa_node < 0) 2880 return; 2881 2882 if (!cpumask_and(&cpu_mask, cpumask_of_node(numa_node), cpu_online_mask)) { 2883 dev_err(hdev->dev, "No available affinities in current numa node\n"); 2884 return; 2885 } 2886 2887 /* Remove HT siblings */ 2888 for_each_cpu(i, &cpu_mask) 2889 cpumask_set_cpu(cpumask_first(topology_sibling_cpumask(i)), available_mask); 2890 #endif 2891 } 2892 2893 void hl_set_irq_affinity(struct hl_device *hdev, int irq) 2894 { 2895 if (cpumask_empty(&hdev->irq_affinity_mask)) { 2896 dev_dbg(hdev->dev, "affinity mask is empty\n"); 2897 return; 2898 } 2899 2900 if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask)) 2901 dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq); 2902 } 2903 2904 void hl_eq_heartbeat_event_handle(struct hl_device *hdev) 2905 { 2906 hdev->heartbeat_debug_info.heartbeat_event_counter++; 2907 hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds(); 2908 hdev->eq_heartbeat_received = true; 2909 } 2910 2911 void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask) 2912 { 2913 struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling; 2914 ktime_t zero_time = ktime_set(0, 0); 2915 2916 mutex_lock(&clk_throttle->lock); 2917 2918 switch (event_type) { 2919 case EQ_EVENT_POWER_EVT_START: 2920 clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER; 2921 clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER; 2922 clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); 2923 clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; 2924 dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n"); 2925 break; 2926 2927 case EQ_EVENT_POWER_EVT_END: 2928 clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER; 2929 clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); 2930 dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n"); 2931 break; 2932 2933 case EQ_EVENT_THERMAL_EVT_START: 2934 clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL; 2935 clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL; 2936 clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); 2937 clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; 2938 *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; 2939 dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n"); 2940 break; 2941 2942 case EQ_EVENT_THERMAL_EVT_END: 2943 clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL; 2944 clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); 2945 *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; 2946 dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n"); 2947 break; 2948 2949 default: 2950 dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type); 2951 break; 2952 } 2953 2954 mutex_unlock(&clk_throttle->lock); 2955 } 2956 2957 void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask) 2958 { 2959 hl_handle_critical_hw_err(hdev, event_id, event_mask); 2960 *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE; 2961 2962 /* Avoid any new accesses to the H/W */ 2963 hdev->disabled = true; 2964 hdev->cpld_shutdown = true; 2965 } 2966