1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2024 Intel Corporation 4 */ 5 6 #include <linux/firmware.h> 7 #include <linux/module.h> 8 #include <linux/pci.h> 9 #include <linux/pm_runtime.h> 10 11 #include <drm/drm_accel.h> 12 #include <drm/drm_file.h> 13 #include <drm/drm_gem.h> 14 #include <drm/drm_ioctl.h> 15 #include <drm/drm_prime.h> 16 17 #include "vpu_boot_api.h" 18 #include "ivpu_debugfs.h" 19 #include "ivpu_drv.h" 20 #include "ivpu_fw.h" 21 #include "ivpu_fw_log.h" 22 #include "ivpu_gem.h" 23 #include "ivpu_hw.h" 24 #include "ivpu_ipc.h" 25 #include "ivpu_job.h" 26 #include "ivpu_jsm_msg.h" 27 #include "ivpu_mmu.h" 28 #include "ivpu_mmu_context.h" 29 #include "ivpu_ms.h" 30 #include "ivpu_pm.h" 31 #include "ivpu_sysfs.h" 32 33 #ifndef DRIVER_VERSION_STR 34 #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \ 35 __stringify(DRM_IVPU_DRIVER_MINOR) "." 36 #endif 37 38 static struct lock_class_key submitted_jobs_xa_lock_class_key; 39 40 int ivpu_dbg_mask; 41 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644); 42 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); 43 44 int ivpu_test_mode; 45 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); 46 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros."); 47 48 u8 ivpu_pll_min_ratio; 49 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); 50 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency"); 51 52 u8 ivpu_pll_max_ratio = U8_MAX; 53 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); 54 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency"); 55 56 int ivpu_sched_mode; 57 module_param_named(sched_mode, ivpu_sched_mode, int, 0444); 58 MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler"); 59 60 bool ivpu_disable_mmu_cont_pages; 61 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444); 62 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization"); 63 64 bool ivpu_force_snoop; 65 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444); 66 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access"); 67 68 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) 69 { 70 struct ivpu_device *vdev = file_priv->vdev; 71 72 kref_get(&file_priv->ref); 73 74 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n", 75 file_priv->ctx.id, kref_read(&file_priv->ref)); 76 77 return file_priv; 78 } 79 80 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv) 81 { 82 mutex_lock(&file_priv->lock); 83 if (file_priv->bound) { 84 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id); 85 86 ivpu_cmdq_release_all_locked(file_priv); 87 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx); 88 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); 89 file_priv->bound = false; 90 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id)); 91 } 92 mutex_unlock(&file_priv->lock); 93 } 94 95 static void file_priv_release(struct kref *ref) 96 { 97 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); 98 struct ivpu_device *vdev = file_priv->vdev; 99 100 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n", 101 file_priv->ctx.id, (bool)file_priv->bound); 102 103 pm_runtime_get_sync(vdev->drm.dev); 104 mutex_lock(&vdev->context_list_lock); 105 file_priv_unbind(vdev, file_priv); 106 mutex_unlock(&vdev->context_list_lock); 107 pm_runtime_put_autosuspend(vdev->drm.dev); 108 109 mutex_destroy(&file_priv->ms_lock); 110 mutex_destroy(&file_priv->lock); 111 kfree(file_priv); 112 } 113 114 void ivpu_file_priv_put(struct ivpu_file_priv **link) 115 { 116 struct ivpu_file_priv *file_priv = *link; 117 struct ivpu_device *vdev = file_priv->vdev; 118 119 drm_WARN_ON(&vdev->drm, !file_priv); 120 121 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n", 122 file_priv->ctx.id, kref_read(&file_priv->ref)); 123 124 *link = NULL; 125 kref_put(&file_priv->ref, file_priv_release); 126 } 127 128 static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args) 129 { 130 switch (args->index) { 131 case DRM_IVPU_CAP_METRIC_STREAMER: 132 args->value = 1; 133 break; 134 case DRM_IVPU_CAP_DMA_MEMORY_RANGE: 135 args->value = 1; 136 break; 137 default: 138 return -EINVAL; 139 } 140 141 return 0; 142 } 143 144 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 145 { 146 struct ivpu_file_priv *file_priv = file->driver_priv; 147 struct ivpu_device *vdev = file_priv->vdev; 148 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 149 struct drm_ivpu_param *args = data; 150 int ret = 0; 151 int idx; 152 153 if (!drm_dev_enter(dev, &idx)) 154 return -ENODEV; 155 156 switch (args->param) { 157 case DRM_IVPU_PARAM_DEVICE_ID: 158 args->value = pdev->device; 159 break; 160 case DRM_IVPU_PARAM_DEVICE_REVISION: 161 args->value = pdev->revision; 162 break; 163 case DRM_IVPU_PARAM_PLATFORM_TYPE: 164 args->value = vdev->platform; 165 break; 166 case DRM_IVPU_PARAM_CORE_CLOCK_RATE: 167 args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio); 168 break; 169 case DRM_IVPU_PARAM_NUM_CONTEXTS: 170 args->value = ivpu_get_context_count(vdev); 171 break; 172 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: 173 args->value = vdev->hw->ranges.user.start; 174 break; 175 case DRM_IVPU_PARAM_CONTEXT_ID: 176 args->value = file_priv->ctx.id; 177 break; 178 case DRM_IVPU_PARAM_FW_API_VERSION: 179 if (args->index < VPU_FW_API_VER_NUM) { 180 struct vpu_firmware_header *fw_hdr; 181 182 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data; 183 args->value = fw_hdr->api_version[args->index]; 184 } else { 185 ret = -EINVAL; 186 } 187 break; 188 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT: 189 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value); 190 break; 191 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID: 192 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter); 193 break; 194 case DRM_IVPU_PARAM_TILE_CONFIG: 195 args->value = vdev->hw->tile_fuse; 196 break; 197 case DRM_IVPU_PARAM_SKU: 198 args->value = vdev->hw->sku; 199 break; 200 case DRM_IVPU_PARAM_CAPABILITIES: 201 ret = ivpu_get_capabilities(vdev, args); 202 break; 203 default: 204 ret = -EINVAL; 205 break; 206 } 207 208 drm_dev_exit(idx); 209 return ret; 210 } 211 212 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 213 { 214 struct drm_ivpu_param *args = data; 215 int ret = 0; 216 217 switch (args->param) { 218 default: 219 ret = -EINVAL; 220 } 221 222 return ret; 223 } 224 225 static int ivpu_open(struct drm_device *dev, struct drm_file *file) 226 { 227 struct ivpu_device *vdev = to_ivpu_device(dev); 228 struct ivpu_file_priv *file_priv; 229 u32 ctx_id; 230 int idx, ret; 231 232 if (!drm_dev_enter(dev, &idx)) 233 return -ENODEV; 234 235 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 236 if (!file_priv) { 237 ret = -ENOMEM; 238 goto err_dev_exit; 239 } 240 241 INIT_LIST_HEAD(&file_priv->ms_instance_list); 242 243 file_priv->vdev = vdev; 244 file_priv->bound = true; 245 kref_init(&file_priv->ref); 246 mutex_init(&file_priv->lock); 247 mutex_init(&file_priv->ms_lock); 248 249 mutex_lock(&vdev->context_list_lock); 250 251 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv, 252 vdev->context_xa_limit, GFP_KERNEL); 253 if (ret) { 254 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret); 255 goto err_unlock; 256 } 257 258 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); 259 if (ret) 260 goto err_xa_erase; 261 262 mutex_unlock(&vdev->context_list_lock); 263 drm_dev_exit(idx); 264 265 file->driver_priv = file_priv; 266 267 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", 268 ctx_id, current->comm, task_pid_nr(current)); 269 270 return 0; 271 272 err_xa_erase: 273 xa_erase_irq(&vdev->context_xa, ctx_id); 274 err_unlock: 275 mutex_unlock(&vdev->context_list_lock); 276 mutex_destroy(&file_priv->ms_lock); 277 mutex_destroy(&file_priv->lock); 278 kfree(file_priv); 279 err_dev_exit: 280 drm_dev_exit(idx); 281 return ret; 282 } 283 284 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) 285 { 286 struct ivpu_file_priv *file_priv = file->driver_priv; 287 struct ivpu_device *vdev = to_ivpu_device(dev); 288 289 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n", 290 file_priv->ctx.id, current->comm, task_pid_nr(current)); 291 292 ivpu_ms_cleanup(file_priv); 293 ivpu_file_priv_put(&file_priv); 294 } 295 296 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { 297 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0), 298 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0), 299 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0), 300 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), 301 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), 302 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), 303 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0), 304 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0), 305 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0), 306 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0), 307 }; 308 309 static int ivpu_wait_for_ready(struct ivpu_device *vdev) 310 { 311 struct ivpu_ipc_consumer cons; 312 struct ivpu_ipc_hdr ipc_hdr; 313 unsigned long timeout; 314 int ret; 315 316 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST) 317 return 0; 318 319 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL); 320 321 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot); 322 while (1) { 323 ivpu_ipc_irq_handler(vdev); 324 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0); 325 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) 326 break; 327 328 cond_resched(); 329 } 330 331 ivpu_ipc_consumer_del(vdev, &cons); 332 333 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) { 334 ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", 335 ipc_hdr.data_addr); 336 return -EIO; 337 } 338 339 if (!ret) 340 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n"); 341 342 return ret; 343 } 344 345 static int ivpu_hw_sched_init(struct ivpu_device *vdev) 346 { 347 int ret = 0; 348 349 if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) { 350 ret = ivpu_jsm_hws_setup_priority_bands(vdev); 351 if (ret) { 352 ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret); 353 return ret; 354 } 355 } 356 357 return ret; 358 } 359 360 /** 361 * ivpu_boot() - Start VPU firmware 362 * @vdev: VPU device 363 * 364 * This function is paired with ivpu_shutdown() but it doesn't power up the 365 * VPU because power up has to be called very early in ivpu_probe(). 366 */ 367 int ivpu_boot(struct ivpu_device *vdev) 368 { 369 int ret; 370 371 /* Update boot params located at first 4KB of FW memory */ 372 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); 373 374 ret = ivpu_hw_boot_fw(vdev); 375 if (ret) { 376 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret); 377 return ret; 378 } 379 380 ret = ivpu_wait_for_ready(vdev); 381 if (ret) { 382 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); 383 ivpu_hw_diagnose_failure(vdev); 384 ivpu_mmu_evtq_dump(vdev); 385 ivpu_fw_log_dump(vdev); 386 return ret; 387 } 388 389 ivpu_hw_irq_clear(vdev); 390 enable_irq(vdev->irq); 391 ivpu_hw_irq_enable(vdev); 392 ivpu_ipc_enable(vdev); 393 394 if (ivpu_fw_is_cold_boot(vdev)) { 395 ret = ivpu_pm_dct_init(vdev); 396 if (ret) 397 return ret; 398 399 return ivpu_hw_sched_init(vdev); 400 } 401 402 return 0; 403 } 404 405 void ivpu_prepare_for_reset(struct ivpu_device *vdev) 406 { 407 ivpu_hw_irq_disable(vdev); 408 disable_irq(vdev->irq); 409 ivpu_ipc_disable(vdev); 410 ivpu_mmu_disable(vdev); 411 } 412 413 int ivpu_shutdown(struct ivpu_device *vdev) 414 { 415 int ret; 416 417 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ 418 pci_save_state(to_pci_dev(vdev->drm.dev)); 419 420 ret = ivpu_hw_power_down(vdev); 421 if (ret) 422 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret); 423 424 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); 425 426 return ret; 427 } 428 429 static const struct file_operations ivpu_fops = { 430 .owner = THIS_MODULE, 431 DRM_ACCEL_FOPS, 432 }; 433 434 static const struct drm_driver driver = { 435 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, 436 437 .open = ivpu_open, 438 .postclose = ivpu_postclose, 439 440 .gem_create_object = ivpu_gem_create_object, 441 .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, 442 443 .ioctls = ivpu_drm_ioctls, 444 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), 445 .fops = &ivpu_fops, 446 447 .name = DRIVER_NAME, 448 .desc = DRIVER_DESC, 449 .date = DRIVER_DATE, 450 .major = DRM_IVPU_DRIVER_MAJOR, 451 .minor = DRM_IVPU_DRIVER_MINOR, 452 }; 453 454 static void ivpu_context_abort_invalid(struct ivpu_device *vdev) 455 { 456 struct ivpu_file_priv *file_priv; 457 unsigned long ctx_id; 458 459 mutex_lock(&vdev->context_list_lock); 460 461 xa_for_each(&vdev->context_xa, ctx_id, file_priv) { 462 if (!file_priv->has_mmu_faults || file_priv->aborted) 463 continue; 464 465 mutex_lock(&file_priv->lock); 466 ivpu_context_abort_locked(file_priv); 467 file_priv->aborted = true; 468 mutex_unlock(&file_priv->lock); 469 } 470 471 mutex_unlock(&vdev->context_list_lock); 472 } 473 474 static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg) 475 { 476 struct ivpu_device *vdev = arg; 477 u8 irq_src; 478 479 if (kfifo_is_empty(&vdev->hw->irq.fifo)) 480 return IRQ_NONE; 481 482 while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) { 483 switch (irq_src) { 484 case IVPU_HW_IRQ_SRC_IPC: 485 ivpu_ipc_irq_thread_handler(vdev); 486 break; 487 case IVPU_HW_IRQ_SRC_MMU_EVTQ: 488 ivpu_context_abort_invalid(vdev); 489 break; 490 case IVPU_HW_IRQ_SRC_DCT: 491 ivpu_pm_dct_irq_thread_handler(vdev); 492 break; 493 default: 494 ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src); 495 break; 496 } 497 } 498 499 return IRQ_HANDLED; 500 } 501 502 static int ivpu_irq_init(struct ivpu_device *vdev) 503 { 504 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 505 int ret; 506 507 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); 508 if (ret < 0) { 509 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret); 510 return ret; 511 } 512 513 ivpu_irq_handlers_init(vdev); 514 515 vdev->irq = pci_irq_vector(pdev, 0); 516 517 ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler, 518 ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev); 519 if (ret) 520 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret); 521 522 return ret; 523 } 524 525 static int ivpu_pci_init(struct ivpu_device *vdev) 526 { 527 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 528 struct resource *bar0 = &pdev->resource[0]; 529 struct resource *bar4 = &pdev->resource[4]; 530 int ret; 531 532 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0); 533 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0); 534 if (IS_ERR(vdev->regv)) { 535 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv); 536 return PTR_ERR(vdev->regv); 537 } 538 539 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4); 540 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4); 541 if (IS_ERR(vdev->regb)) { 542 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb); 543 return PTR_ERR(vdev->regb); 544 } 545 546 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits)); 547 if (ret) { 548 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret); 549 return ret; 550 } 551 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX); 552 553 /* Clear any pending errors */ 554 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); 555 556 /* NPU does not require 10m D3hot delay */ 557 pdev->d3hot_delay = 0; 558 559 ret = pcim_enable_device(pdev); 560 if (ret) { 561 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret); 562 return ret; 563 } 564 565 pci_set_master(pdev); 566 567 return 0; 568 } 569 570 static int ivpu_dev_init(struct ivpu_device *vdev) 571 { 572 int ret; 573 574 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL); 575 if (!vdev->hw) 576 return -ENOMEM; 577 578 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL); 579 if (!vdev->mmu) 580 return -ENOMEM; 581 582 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL); 583 if (!vdev->fw) 584 return -ENOMEM; 585 586 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL); 587 if (!vdev->ipc) 588 return -ENOMEM; 589 590 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL); 591 if (!vdev->pm) 592 return -ENOMEM; 593 594 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) 595 vdev->hw->dma_bits = 48; 596 else 597 vdev->hw->dma_bits = 38; 598 599 vdev->platform = IVPU_PLATFORM_INVALID; 600 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; 601 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; 602 atomic64_set(&vdev->unique_id_counter, 0); 603 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 604 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); 605 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1); 606 lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); 607 INIT_LIST_HEAD(&vdev->bo_list); 608 609 ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock); 610 if (ret) 611 goto err_xa_destroy; 612 613 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock); 614 if (ret) 615 goto err_xa_destroy; 616 617 ret = ivpu_pci_init(vdev); 618 if (ret) 619 goto err_xa_destroy; 620 621 ret = ivpu_irq_init(vdev); 622 if (ret) 623 goto err_xa_destroy; 624 625 /* Init basic HW info based on buttress registers which are accessible before power up */ 626 ret = ivpu_hw_init(vdev); 627 if (ret) 628 goto err_xa_destroy; 629 630 /* Power up early so the rest of init code can access VPU registers */ 631 ret = ivpu_hw_power_up(vdev); 632 if (ret) 633 goto err_shutdown; 634 635 ret = ivpu_mmu_global_context_init(vdev); 636 if (ret) 637 goto err_shutdown; 638 639 ret = ivpu_mmu_init(vdev); 640 if (ret) 641 goto err_mmu_gctx_fini; 642 643 ret = ivpu_mmu_reserved_context_init(vdev); 644 if (ret) 645 goto err_mmu_gctx_fini; 646 647 ret = ivpu_fw_init(vdev); 648 if (ret) 649 goto err_mmu_rctx_fini; 650 651 ret = ivpu_ipc_init(vdev); 652 if (ret) 653 goto err_fw_fini; 654 655 ivpu_pm_init(vdev); 656 657 ret = ivpu_boot(vdev); 658 if (ret) 659 goto err_ipc_fini; 660 661 ivpu_job_done_consumer_init(vdev); 662 ivpu_pm_enable(vdev); 663 664 return 0; 665 666 err_ipc_fini: 667 ivpu_ipc_fini(vdev); 668 err_fw_fini: 669 ivpu_fw_fini(vdev); 670 err_mmu_rctx_fini: 671 ivpu_mmu_reserved_context_fini(vdev); 672 err_mmu_gctx_fini: 673 ivpu_mmu_global_context_fini(vdev); 674 err_shutdown: 675 ivpu_shutdown(vdev); 676 err_xa_destroy: 677 xa_destroy(&vdev->db_xa); 678 xa_destroy(&vdev->submitted_jobs_xa); 679 xa_destroy(&vdev->context_xa); 680 return ret; 681 } 682 683 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev) 684 { 685 struct ivpu_file_priv *file_priv; 686 unsigned long ctx_id; 687 688 mutex_lock(&vdev->context_list_lock); 689 690 xa_for_each(&vdev->context_xa, ctx_id, file_priv) 691 file_priv_unbind(vdev, file_priv); 692 693 mutex_unlock(&vdev->context_list_lock); 694 } 695 696 static void ivpu_dev_fini(struct ivpu_device *vdev) 697 { 698 ivpu_jobs_abort_all(vdev); 699 ivpu_pm_cancel_recovery(vdev); 700 ivpu_pm_disable(vdev); 701 ivpu_prepare_for_reset(vdev); 702 ivpu_shutdown(vdev); 703 704 ivpu_ms_cleanup_all(vdev); 705 ivpu_job_done_consumer_fini(vdev); 706 ivpu_bo_unbind_all_user_contexts(vdev); 707 708 ivpu_ipc_fini(vdev); 709 ivpu_fw_fini(vdev); 710 ivpu_mmu_reserved_context_fini(vdev); 711 ivpu_mmu_global_context_fini(vdev); 712 713 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa)); 714 xa_destroy(&vdev->db_xa); 715 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); 716 xa_destroy(&vdev->submitted_jobs_xa); 717 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa)); 718 xa_destroy(&vdev->context_xa); 719 } 720 721 static struct pci_device_id ivpu_pci_ids[] = { 722 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, 723 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) }, 724 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, 725 { } 726 }; 727 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); 728 729 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 730 { 731 struct ivpu_device *vdev; 732 int ret; 733 734 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm); 735 if (IS_ERR(vdev)) 736 return PTR_ERR(vdev); 737 738 pci_set_drvdata(pdev, vdev); 739 740 ret = ivpu_dev_init(vdev); 741 if (ret) 742 return ret; 743 744 ivpu_debugfs_init(vdev); 745 ivpu_sysfs_init(vdev); 746 747 ret = drm_dev_register(&vdev->drm, 0); 748 if (ret) { 749 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret); 750 ivpu_dev_fini(vdev); 751 } 752 753 return ret; 754 } 755 756 static void ivpu_remove(struct pci_dev *pdev) 757 { 758 struct ivpu_device *vdev = pci_get_drvdata(pdev); 759 760 drm_dev_unplug(&vdev->drm); 761 ivpu_dev_fini(vdev); 762 } 763 764 static const struct dev_pm_ops ivpu_drv_pci_pm = { 765 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb) 766 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL) 767 }; 768 769 static const struct pci_error_handlers ivpu_drv_pci_err = { 770 .reset_prepare = ivpu_pm_reset_prepare_cb, 771 .reset_done = ivpu_pm_reset_done_cb, 772 }; 773 774 static struct pci_driver ivpu_pci_driver = { 775 .name = KBUILD_MODNAME, 776 .id_table = ivpu_pci_ids, 777 .probe = ivpu_probe, 778 .remove = ivpu_remove, 779 .driver = { 780 .pm = &ivpu_drv_pci_pm, 781 }, 782 .err_handler = &ivpu_drv_pci_err, 783 }; 784 785 module_pci_driver(ivpu_pci_driver); 786 787 MODULE_AUTHOR("Intel Corporation"); 788 MODULE_DESCRIPTION(DRIVER_DESC); 789 MODULE_LICENSE("GPL and additional rights"); 790 MODULE_VERSION(DRIVER_VERSION_STR); 791