1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2024 Intel Corporation 4 */ 5 6 #include <linux/firmware.h> 7 #include <linux/module.h> 8 #include <linux/pci.h> 9 #include <linux/pm_runtime.h> 10 11 #include <drm/drm_accel.h> 12 #include <drm/drm_file.h> 13 #include <drm/drm_gem.h> 14 #include <drm/drm_ioctl.h> 15 #include <drm/drm_prime.h> 16 17 #include "vpu_boot_api.h" 18 #include "ivpu_debugfs.h" 19 #include "ivpu_drv.h" 20 #include "ivpu_fw.h" 21 #include "ivpu_fw_log.h" 22 #include "ivpu_gem.h" 23 #include "ivpu_hw.h" 24 #include "ivpu_ipc.h" 25 #include "ivpu_job.h" 26 #include "ivpu_jsm_msg.h" 27 #include "ivpu_mmu.h" 28 #include "ivpu_mmu_context.h" 29 #include "ivpu_pm.h" 30 31 #ifndef DRIVER_VERSION_STR 32 #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \ 33 __stringify(DRM_IVPU_DRIVER_MINOR) "." 34 #endif 35 36 static struct lock_class_key submitted_jobs_xa_lock_class_key; 37 38 int ivpu_dbg_mask; 39 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644); 40 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); 41 42 int ivpu_test_mode; 43 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); 44 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros."); 45 46 u8 ivpu_pll_min_ratio; 47 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); 48 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency"); 49 50 u8 ivpu_pll_max_ratio = U8_MAX; 51 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); 52 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency"); 53 54 bool ivpu_disable_mmu_cont_pages; 55 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644); 56 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization"); 57 58 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) 59 { 60 struct ivpu_device *vdev = file_priv->vdev; 61 62 kref_get(&file_priv->ref); 63 64 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n", 65 file_priv->ctx.id, kref_read(&file_priv->ref)); 66 67 return file_priv; 68 } 69 70 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv) 71 { 72 mutex_lock(&file_priv->lock); 73 if (file_priv->bound) { 74 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id); 75 76 ivpu_cmdq_release_all_locked(file_priv); 77 ivpu_jsm_context_release(vdev, file_priv->ctx.id); 78 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx); 79 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); 80 file_priv->bound = false; 81 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id)); 82 } 83 mutex_unlock(&file_priv->lock); 84 } 85 86 static void file_priv_release(struct kref *ref) 87 { 88 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); 89 struct ivpu_device *vdev = file_priv->vdev; 90 91 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n", 92 file_priv->ctx.id, (bool)file_priv->bound); 93 94 pm_runtime_get_sync(vdev->drm.dev); 95 mutex_lock(&vdev->context_list_lock); 96 file_priv_unbind(vdev, file_priv); 97 mutex_unlock(&vdev->context_list_lock); 98 pm_runtime_put_autosuspend(vdev->drm.dev); 99 100 mutex_destroy(&file_priv->lock); 101 kfree(file_priv); 102 } 103 104 void ivpu_file_priv_put(struct ivpu_file_priv **link) 105 { 106 struct ivpu_file_priv *file_priv = *link; 107 struct ivpu_device *vdev = file_priv->vdev; 108 109 drm_WARN_ON(&vdev->drm, !file_priv); 110 111 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n", 112 file_priv->ctx.id, kref_read(&file_priv->ref)); 113 114 *link = NULL; 115 kref_put(&file_priv->ref, file_priv_release); 116 } 117 118 static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args) 119 { 120 switch (args->index) { 121 case DRM_IVPU_CAP_METRIC_STREAMER: 122 args->value = 0; 123 break; 124 case DRM_IVPU_CAP_DMA_MEMORY_RANGE: 125 args->value = 1; 126 break; 127 default: 128 return -EINVAL; 129 } 130 131 return 0; 132 } 133 134 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 135 { 136 struct ivpu_file_priv *file_priv = file->driver_priv; 137 struct ivpu_device *vdev = file_priv->vdev; 138 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 139 struct drm_ivpu_param *args = data; 140 int ret = 0; 141 int idx; 142 143 if (!drm_dev_enter(dev, &idx)) 144 return -ENODEV; 145 146 switch (args->param) { 147 case DRM_IVPU_PARAM_DEVICE_ID: 148 args->value = pdev->device; 149 break; 150 case DRM_IVPU_PARAM_DEVICE_REVISION: 151 args->value = pdev->revision; 152 break; 153 case DRM_IVPU_PARAM_PLATFORM_TYPE: 154 args->value = vdev->platform; 155 break; 156 case DRM_IVPU_PARAM_CORE_CLOCK_RATE: 157 args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio); 158 break; 159 case DRM_IVPU_PARAM_NUM_CONTEXTS: 160 args->value = ivpu_get_context_count(vdev); 161 break; 162 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: 163 args->value = vdev->hw->ranges.user.start; 164 break; 165 case DRM_IVPU_PARAM_CONTEXT_ID: 166 args->value = file_priv->ctx.id; 167 break; 168 case DRM_IVPU_PARAM_FW_API_VERSION: 169 if (args->index < VPU_FW_API_VER_NUM) { 170 struct vpu_firmware_header *fw_hdr; 171 172 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data; 173 args->value = fw_hdr->api_version[args->index]; 174 } else { 175 ret = -EINVAL; 176 } 177 break; 178 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT: 179 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value); 180 break; 181 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID: 182 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter); 183 break; 184 case DRM_IVPU_PARAM_TILE_CONFIG: 185 args->value = vdev->hw->tile_fuse; 186 break; 187 case DRM_IVPU_PARAM_SKU: 188 args->value = vdev->hw->sku; 189 break; 190 case DRM_IVPU_PARAM_CAPABILITIES: 191 ret = ivpu_get_capabilities(vdev, args); 192 break; 193 default: 194 ret = -EINVAL; 195 break; 196 } 197 198 drm_dev_exit(idx); 199 return ret; 200 } 201 202 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 203 { 204 struct drm_ivpu_param *args = data; 205 int ret = 0; 206 207 switch (args->param) { 208 default: 209 ret = -EINVAL; 210 } 211 212 return ret; 213 } 214 215 static int ivpu_open(struct drm_device *dev, struct drm_file *file) 216 { 217 struct ivpu_device *vdev = to_ivpu_device(dev); 218 struct ivpu_file_priv *file_priv; 219 u32 ctx_id; 220 int idx, ret; 221 222 if (!drm_dev_enter(dev, &idx)) 223 return -ENODEV; 224 225 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 226 if (!file_priv) { 227 ret = -ENOMEM; 228 goto err_dev_exit; 229 } 230 231 file_priv->vdev = vdev; 232 file_priv->bound = true; 233 kref_init(&file_priv->ref); 234 mutex_init(&file_priv->lock); 235 236 mutex_lock(&vdev->context_list_lock); 237 238 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv, 239 vdev->context_xa_limit, GFP_KERNEL); 240 if (ret) { 241 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret); 242 goto err_unlock; 243 } 244 245 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); 246 if (ret) 247 goto err_xa_erase; 248 249 mutex_unlock(&vdev->context_list_lock); 250 drm_dev_exit(idx); 251 252 file->driver_priv = file_priv; 253 254 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", 255 ctx_id, current->comm, task_pid_nr(current)); 256 257 return 0; 258 259 err_xa_erase: 260 xa_erase_irq(&vdev->context_xa, ctx_id); 261 err_unlock: 262 mutex_unlock(&vdev->context_list_lock); 263 mutex_destroy(&file_priv->lock); 264 kfree(file_priv); 265 err_dev_exit: 266 drm_dev_exit(idx); 267 return ret; 268 } 269 270 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) 271 { 272 struct ivpu_file_priv *file_priv = file->driver_priv; 273 struct ivpu_device *vdev = to_ivpu_device(dev); 274 275 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n", 276 file_priv->ctx.id, current->comm, task_pid_nr(current)); 277 278 ivpu_file_priv_put(&file_priv); 279 } 280 281 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { 282 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0), 283 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0), 284 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0), 285 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), 286 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), 287 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), 288 }; 289 290 static int ivpu_wait_for_ready(struct ivpu_device *vdev) 291 { 292 struct ivpu_ipc_consumer cons; 293 struct ivpu_ipc_hdr ipc_hdr; 294 unsigned long timeout; 295 int ret; 296 297 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST) 298 return 0; 299 300 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL); 301 302 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot); 303 while (1) { 304 ivpu_ipc_irq_handler(vdev, NULL); 305 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0); 306 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) 307 break; 308 309 cond_resched(); 310 } 311 312 ivpu_ipc_consumer_del(vdev, &cons); 313 314 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) { 315 ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", 316 ipc_hdr.data_addr); 317 return -EIO; 318 } 319 320 if (!ret) 321 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n"); 322 323 return ret; 324 } 325 326 /** 327 * ivpu_boot() - Start VPU firmware 328 * @vdev: VPU device 329 * 330 * This function is paired with ivpu_shutdown() but it doesn't power up the 331 * VPU because power up has to be called very early in ivpu_probe(). 332 */ 333 int ivpu_boot(struct ivpu_device *vdev) 334 { 335 int ret; 336 337 /* Update boot params located at first 4KB of FW memory */ 338 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); 339 340 ret = ivpu_hw_boot_fw(vdev); 341 if (ret) { 342 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret); 343 return ret; 344 } 345 346 ret = ivpu_wait_for_ready(vdev); 347 if (ret) { 348 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); 349 ivpu_hw_diagnose_failure(vdev); 350 ivpu_mmu_evtq_dump(vdev); 351 ivpu_fw_log_dump(vdev); 352 return ret; 353 } 354 355 ivpu_hw_irq_clear(vdev); 356 enable_irq(vdev->irq); 357 ivpu_hw_irq_enable(vdev); 358 ivpu_ipc_enable(vdev); 359 return 0; 360 } 361 362 void ivpu_prepare_for_reset(struct ivpu_device *vdev) 363 { 364 ivpu_hw_irq_disable(vdev); 365 disable_irq(vdev->irq); 366 ivpu_ipc_disable(vdev); 367 ivpu_mmu_disable(vdev); 368 } 369 370 int ivpu_shutdown(struct ivpu_device *vdev) 371 { 372 int ret; 373 374 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ 375 pci_save_state(to_pci_dev(vdev->drm.dev)); 376 377 ret = ivpu_hw_power_down(vdev); 378 if (ret) 379 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret); 380 381 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); 382 383 return ret; 384 } 385 386 static const struct file_operations ivpu_fops = { 387 .owner = THIS_MODULE, 388 DRM_ACCEL_FOPS, 389 }; 390 391 static const struct drm_driver driver = { 392 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, 393 394 .open = ivpu_open, 395 .postclose = ivpu_postclose, 396 397 .gem_create_object = ivpu_gem_create_object, 398 .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, 399 400 .ioctls = ivpu_drm_ioctls, 401 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), 402 .fops = &ivpu_fops, 403 404 .name = DRIVER_NAME, 405 .desc = DRIVER_DESC, 406 .date = DRIVER_DATE, 407 .major = DRM_IVPU_DRIVER_MAJOR, 408 .minor = DRM_IVPU_DRIVER_MINOR, 409 }; 410 411 static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg) 412 { 413 struct ivpu_device *vdev = arg; 414 415 return ivpu_ipc_irq_thread_handler(vdev); 416 } 417 418 static int ivpu_irq_init(struct ivpu_device *vdev) 419 { 420 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 421 int ret; 422 423 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); 424 if (ret < 0) { 425 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret); 426 return ret; 427 } 428 429 vdev->irq = pci_irq_vector(pdev, 0); 430 431 ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler, 432 ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev); 433 if (ret) 434 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret); 435 436 return ret; 437 } 438 439 static int ivpu_pci_init(struct ivpu_device *vdev) 440 { 441 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 442 struct resource *bar0 = &pdev->resource[0]; 443 struct resource *bar4 = &pdev->resource[4]; 444 int ret; 445 446 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0); 447 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0); 448 if (IS_ERR(vdev->regv)) { 449 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv); 450 return PTR_ERR(vdev->regv); 451 } 452 453 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4); 454 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4); 455 if (IS_ERR(vdev->regb)) { 456 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb); 457 return PTR_ERR(vdev->regb); 458 } 459 460 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits)); 461 if (ret) { 462 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret); 463 return ret; 464 } 465 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX); 466 467 /* Clear any pending errors */ 468 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); 469 470 /* NPU does not require 10m D3hot delay */ 471 pdev->d3hot_delay = 0; 472 473 ret = pcim_enable_device(pdev); 474 if (ret) { 475 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret); 476 return ret; 477 } 478 479 pci_set_master(pdev); 480 481 return 0; 482 } 483 484 static int ivpu_dev_init(struct ivpu_device *vdev) 485 { 486 int ret; 487 488 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL); 489 if (!vdev->hw) 490 return -ENOMEM; 491 492 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL); 493 if (!vdev->mmu) 494 return -ENOMEM; 495 496 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL); 497 if (!vdev->fw) 498 return -ENOMEM; 499 500 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL); 501 if (!vdev->ipc) 502 return -ENOMEM; 503 504 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL); 505 if (!vdev->pm) 506 return -ENOMEM; 507 508 if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) { 509 vdev->hw->ops = &ivpu_hw_40xx_ops; 510 vdev->hw->dma_bits = 48; 511 } else { 512 vdev->hw->ops = &ivpu_hw_37xx_ops; 513 vdev->hw->dma_bits = 38; 514 } 515 516 vdev->platform = IVPU_PLATFORM_INVALID; 517 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; 518 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; 519 atomic64_set(&vdev->unique_id_counter, 0); 520 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 521 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); 522 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1); 523 lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); 524 INIT_LIST_HEAD(&vdev->bo_list); 525 526 ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock); 527 if (ret) 528 goto err_xa_destroy; 529 530 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock); 531 if (ret) 532 goto err_xa_destroy; 533 534 ret = ivpu_pci_init(vdev); 535 if (ret) 536 goto err_xa_destroy; 537 538 ret = ivpu_irq_init(vdev); 539 if (ret) 540 goto err_xa_destroy; 541 542 /* Init basic HW info based on buttress registers which are accessible before power up */ 543 ret = ivpu_hw_info_init(vdev); 544 if (ret) 545 goto err_xa_destroy; 546 547 /* Power up early so the rest of init code can access VPU registers */ 548 ret = ivpu_hw_power_up(vdev); 549 if (ret) 550 goto err_shutdown; 551 552 ret = ivpu_mmu_global_context_init(vdev); 553 if (ret) 554 goto err_shutdown; 555 556 ret = ivpu_mmu_init(vdev); 557 if (ret) 558 goto err_mmu_gctx_fini; 559 560 ret = ivpu_mmu_reserved_context_init(vdev); 561 if (ret) 562 goto err_mmu_gctx_fini; 563 564 ret = ivpu_fw_init(vdev); 565 if (ret) 566 goto err_mmu_rctx_fini; 567 568 ret = ivpu_ipc_init(vdev); 569 if (ret) 570 goto err_fw_fini; 571 572 ivpu_pm_init(vdev); 573 574 ret = ivpu_boot(vdev); 575 if (ret) 576 goto err_ipc_fini; 577 578 ivpu_job_done_consumer_init(vdev); 579 ivpu_pm_enable(vdev); 580 581 return 0; 582 583 err_ipc_fini: 584 ivpu_ipc_fini(vdev); 585 err_fw_fini: 586 ivpu_fw_fini(vdev); 587 err_mmu_rctx_fini: 588 ivpu_mmu_reserved_context_fini(vdev); 589 err_mmu_gctx_fini: 590 ivpu_mmu_global_context_fini(vdev); 591 err_shutdown: 592 ivpu_shutdown(vdev); 593 err_xa_destroy: 594 xa_destroy(&vdev->db_xa); 595 xa_destroy(&vdev->submitted_jobs_xa); 596 xa_destroy(&vdev->context_xa); 597 return ret; 598 } 599 600 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev) 601 { 602 struct ivpu_file_priv *file_priv; 603 unsigned long ctx_id; 604 605 mutex_lock(&vdev->context_list_lock); 606 607 xa_for_each(&vdev->context_xa, ctx_id, file_priv) 608 file_priv_unbind(vdev, file_priv); 609 610 mutex_unlock(&vdev->context_list_lock); 611 } 612 613 static void ivpu_dev_fini(struct ivpu_device *vdev) 614 { 615 ivpu_pm_disable(vdev); 616 ivpu_prepare_for_reset(vdev); 617 ivpu_shutdown(vdev); 618 619 ivpu_jobs_abort_all(vdev); 620 ivpu_job_done_consumer_fini(vdev); 621 ivpu_pm_cancel_recovery(vdev); 622 ivpu_bo_unbind_all_user_contexts(vdev); 623 624 ivpu_ipc_fini(vdev); 625 ivpu_fw_fini(vdev); 626 ivpu_mmu_reserved_context_fini(vdev); 627 ivpu_mmu_global_context_fini(vdev); 628 629 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa)); 630 xa_destroy(&vdev->db_xa); 631 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); 632 xa_destroy(&vdev->submitted_jobs_xa); 633 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa)); 634 xa_destroy(&vdev->context_xa); 635 } 636 637 static struct pci_device_id ivpu_pci_ids[] = { 638 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, 639 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) }, 640 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, 641 { } 642 }; 643 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); 644 645 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 646 { 647 struct ivpu_device *vdev; 648 int ret; 649 650 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm); 651 if (IS_ERR(vdev)) 652 return PTR_ERR(vdev); 653 654 pci_set_drvdata(pdev, vdev); 655 656 ret = ivpu_dev_init(vdev); 657 if (ret) 658 return ret; 659 660 ivpu_debugfs_init(vdev); 661 662 ret = drm_dev_register(&vdev->drm, 0); 663 if (ret) { 664 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret); 665 ivpu_dev_fini(vdev); 666 } 667 668 return ret; 669 } 670 671 static void ivpu_remove(struct pci_dev *pdev) 672 { 673 struct ivpu_device *vdev = pci_get_drvdata(pdev); 674 675 drm_dev_unplug(&vdev->drm); 676 ivpu_dev_fini(vdev); 677 } 678 679 static const struct dev_pm_ops ivpu_drv_pci_pm = { 680 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb) 681 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL) 682 }; 683 684 static const struct pci_error_handlers ivpu_drv_pci_err = { 685 .reset_prepare = ivpu_pm_reset_prepare_cb, 686 .reset_done = ivpu_pm_reset_done_cb, 687 }; 688 689 static struct pci_driver ivpu_pci_driver = { 690 .name = KBUILD_MODNAME, 691 .id_table = ivpu_pci_ids, 692 .probe = ivpu_probe, 693 .remove = ivpu_remove, 694 .driver = { 695 .pm = &ivpu_drv_pci_pm, 696 }, 697 .err_handler = &ivpu_drv_pci_err, 698 }; 699 700 module_pci_driver(ivpu_pci_driver); 701 702 MODULE_AUTHOR("Intel Corporation"); 703 MODULE_DESCRIPTION(DRIVER_DESC); 704 MODULE_LICENSE("GPL and additional rights"); 705 MODULE_VERSION(DRIVER_VERSION_STR); 706