1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2025 Intel Corporation 4 */ 5 6 #include <linux/firmware.h> 7 #include <linux/module.h> 8 #include <linux/pci.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/workqueue.h> 11 #include <generated/utsrelease.h> 12 13 #include <drm/drm_accel.h> 14 #include <drm/drm_file.h> 15 #include <drm/drm_gem.h> 16 #include <drm/drm_ioctl.h> 17 #include <drm/drm_prime.h> 18 19 #include "ivpu_coredump.h" 20 #include "ivpu_debugfs.h" 21 #include "ivpu_drv.h" 22 #include "ivpu_fw.h" 23 #include "ivpu_fw_log.h" 24 #include "ivpu_gem.h" 25 #include "ivpu_hw.h" 26 #include "ivpu_ipc.h" 27 #include "ivpu_job.h" 28 #include "ivpu_jsm_msg.h" 29 #include "ivpu_mmu.h" 30 #include "ivpu_mmu_context.h" 31 #include "ivpu_ms.h" 32 #include "ivpu_pm.h" 33 #include "ivpu_sysfs.h" 34 #include "vpu_boot_api.h" 35 36 #ifndef DRIVER_VERSION_STR 37 #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE 38 #endif 39 40 int ivpu_dbg_mask; 41 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644); 42 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); 43 44 int ivpu_test_mode; 45 #if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG) 46 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); 47 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros."); 48 #endif 49 50 u8 ivpu_pll_min_ratio; 51 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); 52 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency"); 53 54 u8 ivpu_pll_max_ratio = U8_MAX; 55 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); 56 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency"); 57 58 int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO; 59 module_param_named(sched_mode, ivpu_sched_mode, int, 0444); 60 MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler"); 61 62 bool ivpu_disable_mmu_cont_pages; 63 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444); 64 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization"); 65 66 bool ivpu_force_snoop; 67 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444); 68 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access"); 69 70 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) 71 { 72 struct ivpu_device *vdev = file_priv->vdev; 73 74 kref_get(&file_priv->ref); 75 76 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n", 77 file_priv->ctx.id, kref_read(&file_priv->ref)); 78 79 return file_priv; 80 } 81 82 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv) 83 { 84 mutex_lock(&file_priv->lock); 85 if (file_priv->bound) { 86 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id); 87 88 ivpu_cmdq_release_all_locked(file_priv); 89 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx); 90 ivpu_mmu_context_fini(vdev, &file_priv->ctx); 91 file_priv->bound = false; 92 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id)); 93 } 94 mutex_unlock(&file_priv->lock); 95 } 96 97 static void file_priv_release(struct kref *ref) 98 { 99 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); 100 struct ivpu_device *vdev = file_priv->vdev; 101 102 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n", 103 file_priv->ctx.id, (bool)file_priv->bound); 104 105 pm_runtime_get_sync(vdev->drm.dev); 106 mutex_lock(&vdev->context_list_lock); 107 file_priv_unbind(vdev, file_priv); 108 drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa)); 109 xa_destroy(&file_priv->cmdq_xa); 110 mutex_unlock(&vdev->context_list_lock); 111 pm_runtime_put_autosuspend(vdev->drm.dev); 112 113 mutex_destroy(&file_priv->ms_lock); 114 mutex_destroy(&file_priv->lock); 115 kfree(file_priv); 116 } 117 118 void ivpu_file_priv_put(struct ivpu_file_priv **link) 119 { 120 struct ivpu_file_priv *file_priv = *link; 121 struct ivpu_device *vdev = file_priv->vdev; 122 123 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n", 124 file_priv->ctx.id, kref_read(&file_priv->ref)); 125 126 *link = NULL; 127 kref_put(&file_priv->ref, file_priv_release); 128 } 129 130 bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability) 131 { 132 switch (capability) { 133 case DRM_IVPU_CAP_METRIC_STREAMER: 134 return true; 135 case DRM_IVPU_CAP_DMA_MEMORY_RANGE: 136 return true; 137 case DRM_IVPU_CAP_MANAGE_CMDQ: 138 return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW; 139 default: 140 return false; 141 } 142 } 143 144 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 145 { 146 struct ivpu_file_priv *file_priv = file->driver_priv; 147 struct ivpu_device *vdev = file_priv->vdev; 148 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 149 struct drm_ivpu_param *args = data; 150 int ret = 0; 151 int idx; 152 153 if (!drm_dev_enter(dev, &idx)) 154 return -ENODEV; 155 156 switch (args->param) { 157 case DRM_IVPU_PARAM_DEVICE_ID: 158 args->value = pdev->device; 159 break; 160 case DRM_IVPU_PARAM_DEVICE_REVISION: 161 args->value = pdev->revision; 162 break; 163 case DRM_IVPU_PARAM_PLATFORM_TYPE: 164 args->value = vdev->platform; 165 break; 166 case DRM_IVPU_PARAM_CORE_CLOCK_RATE: 167 args->value = ivpu_hw_dpu_max_freq_get(vdev); 168 break; 169 case DRM_IVPU_PARAM_NUM_CONTEXTS: 170 args->value = ivpu_get_context_count(vdev); 171 break; 172 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: 173 args->value = vdev->hw->ranges.user.start; 174 break; 175 case DRM_IVPU_PARAM_CONTEXT_ID: 176 args->value = file_priv->ctx.id; 177 break; 178 case DRM_IVPU_PARAM_FW_API_VERSION: 179 if (args->index < VPU_FW_API_VER_NUM) { 180 struct vpu_firmware_header *fw_hdr; 181 182 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data; 183 args->value = fw_hdr->api_version[args->index]; 184 } else { 185 ret = -EINVAL; 186 } 187 break; 188 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT: 189 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value); 190 break; 191 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID: 192 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter); 193 break; 194 case DRM_IVPU_PARAM_TILE_CONFIG: 195 args->value = vdev->hw->tile_fuse; 196 break; 197 case DRM_IVPU_PARAM_SKU: 198 args->value = vdev->hw->sku; 199 break; 200 case DRM_IVPU_PARAM_CAPABILITIES: 201 args->value = ivpu_is_capable(vdev, args->index); 202 break; 203 case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: 204 args->value = ivpu_fw_preempt_buf_size(vdev); 205 break; 206 default: 207 ret = -EINVAL; 208 break; 209 } 210 211 drm_dev_exit(idx); 212 return ret; 213 } 214 215 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 216 { 217 struct drm_ivpu_param *args = data; 218 int ret = 0; 219 220 switch (args->param) { 221 default: 222 ret = -EINVAL; 223 } 224 225 return ret; 226 } 227 228 static int ivpu_open(struct drm_device *dev, struct drm_file *file) 229 { 230 struct ivpu_device *vdev = to_ivpu_device(dev); 231 struct ivpu_file_priv *file_priv; 232 u32 ctx_id; 233 int idx, ret; 234 235 if (!drm_dev_enter(dev, &idx)) 236 return -ENODEV; 237 238 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 239 if (!file_priv) { 240 ret = -ENOMEM; 241 goto err_dev_exit; 242 } 243 244 INIT_LIST_HEAD(&file_priv->ms_instance_list); 245 246 file_priv->vdev = vdev; 247 file_priv->bound = true; 248 kref_init(&file_priv->ref); 249 mutex_init(&file_priv->lock); 250 mutex_init(&file_priv->ms_lock); 251 252 mutex_lock(&vdev->context_list_lock); 253 254 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv, 255 vdev->context_xa_limit, GFP_KERNEL); 256 if (ret) { 257 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret); 258 goto err_unlock; 259 } 260 261 ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id); 262 263 file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); 264 file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK; 265 266 xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1); 267 file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID; 268 file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID; 269 270 mutex_unlock(&vdev->context_list_lock); 271 drm_dev_exit(idx); 272 273 file->driver_priv = file_priv; 274 275 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", 276 ctx_id, current->comm, task_pid_nr(current)); 277 278 return 0; 279 280 err_unlock: 281 mutex_unlock(&vdev->context_list_lock); 282 mutex_destroy(&file_priv->ms_lock); 283 mutex_destroy(&file_priv->lock); 284 kfree(file_priv); 285 err_dev_exit: 286 drm_dev_exit(idx); 287 return ret; 288 } 289 290 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) 291 { 292 struct ivpu_file_priv *file_priv = file->driver_priv; 293 struct ivpu_device *vdev = to_ivpu_device(dev); 294 295 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n", 296 file_priv->ctx.id, current->comm, task_pid_nr(current)); 297 298 ivpu_ms_cleanup(file_priv); 299 ivpu_file_priv_put(&file_priv); 300 } 301 302 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { 303 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0), 304 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0), 305 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0), 306 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), 307 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), 308 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), 309 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0), 310 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0), 311 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0), 312 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0), 313 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0), 314 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0), 315 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0), 316 }; 317 318 static int ivpu_wait_for_ready(struct ivpu_device *vdev) 319 { 320 struct ivpu_ipc_consumer cons; 321 struct ivpu_ipc_hdr ipc_hdr; 322 unsigned long timeout; 323 int ret; 324 325 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST) 326 return 0; 327 328 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL); 329 330 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot); 331 while (1) { 332 ivpu_ipc_irq_handler(vdev); 333 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0); 334 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) 335 break; 336 337 cond_resched(); 338 } 339 340 ivpu_ipc_consumer_del(vdev, &cons); 341 342 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) { 343 ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", 344 ipc_hdr.data_addr); 345 return -EIO; 346 } 347 348 if (!ret) 349 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n"); 350 351 return ret; 352 } 353 354 static int ivpu_hw_sched_init(struct ivpu_device *vdev) 355 { 356 int ret = 0; 357 358 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { 359 ret = ivpu_jsm_hws_setup_priority_bands(vdev); 360 if (ret) { 361 ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret); 362 return ret; 363 } 364 } 365 366 return ret; 367 } 368 369 /** 370 * ivpu_boot() - Start VPU firmware 371 * @vdev: VPU device 372 * 373 * This function is paired with ivpu_shutdown() but it doesn't power up the 374 * VPU because power up has to be called very early in ivpu_probe(). 375 */ 376 int ivpu_boot(struct ivpu_device *vdev) 377 { 378 int ret; 379 380 drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter)); 381 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); 382 383 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp)); 384 385 ret = ivpu_hw_boot_fw(vdev); 386 if (ret) { 387 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret); 388 return ret; 389 } 390 391 ret = ivpu_wait_for_ready(vdev); 392 if (ret) { 393 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); 394 goto err_diagnose_failure; 395 } 396 397 ivpu_hw_irq_clear(vdev); 398 enable_irq(vdev->irq); 399 ivpu_hw_irq_enable(vdev); 400 ivpu_ipc_enable(vdev); 401 402 if (ivpu_fw_is_cold_boot(vdev)) { 403 ret = ivpu_pm_dct_init(vdev); 404 if (ret) 405 goto err_disable_ipc; 406 407 ret = ivpu_hw_sched_init(vdev); 408 if (ret) 409 goto err_disable_ipc; 410 } 411 412 return 0; 413 414 err_disable_ipc: 415 ivpu_ipc_disable(vdev); 416 ivpu_hw_irq_disable(vdev); 417 disable_irq(vdev->irq); 418 err_diagnose_failure: 419 ivpu_hw_diagnose_failure(vdev); 420 ivpu_mmu_evtq_dump(vdev); 421 ivpu_dev_coredump(vdev); 422 return ret; 423 } 424 425 void ivpu_prepare_for_reset(struct ivpu_device *vdev) 426 { 427 ivpu_hw_irq_disable(vdev); 428 disable_irq(vdev->irq); 429 flush_work(&vdev->irq_ipc_work); 430 flush_work(&vdev->irq_dct_work); 431 flush_work(&vdev->context_abort_work); 432 ivpu_ipc_disable(vdev); 433 ivpu_mmu_disable(vdev); 434 } 435 436 int ivpu_shutdown(struct ivpu_device *vdev) 437 { 438 int ret; 439 440 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ 441 pci_save_state(to_pci_dev(vdev->drm.dev)); 442 443 ret = ivpu_hw_power_down(vdev); 444 if (ret) 445 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret); 446 447 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); 448 449 return ret; 450 } 451 452 static const struct file_operations ivpu_fops = { 453 .owner = THIS_MODULE, 454 DRM_ACCEL_FOPS, 455 }; 456 457 static const struct drm_driver driver = { 458 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, 459 460 .open = ivpu_open, 461 .postclose = ivpu_postclose, 462 463 .gem_create_object = ivpu_gem_create_object, 464 .gem_prime_import = ivpu_gem_prime_import, 465 466 .ioctls = ivpu_drm_ioctls, 467 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), 468 .fops = &ivpu_fops, 469 470 .name = DRIVER_NAME, 471 .desc = DRIVER_DESC, 472 473 .major = 1, 474 }; 475 476 static int ivpu_irq_init(struct ivpu_device *vdev) 477 { 478 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 479 int ret; 480 481 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); 482 if (ret < 0) { 483 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret); 484 return ret; 485 } 486 487 INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn); 488 INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn); 489 INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn); 490 491 ivpu_irq_handlers_init(vdev); 492 493 vdev->irq = pci_irq_vector(pdev, 0); 494 495 ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler, 496 IRQF_NO_AUTOEN, DRIVER_NAME, vdev); 497 if (ret) 498 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret); 499 500 return ret; 501 } 502 503 static int ivpu_pci_init(struct ivpu_device *vdev) 504 { 505 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 506 struct resource *bar0 = &pdev->resource[0]; 507 struct resource *bar4 = &pdev->resource[4]; 508 int ret; 509 510 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0); 511 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0); 512 if (IS_ERR(vdev->regv)) { 513 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv); 514 return PTR_ERR(vdev->regv); 515 } 516 517 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4); 518 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4); 519 if (IS_ERR(vdev->regb)) { 520 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb); 521 return PTR_ERR(vdev->regb); 522 } 523 524 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits)); 525 if (ret) { 526 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret); 527 return ret; 528 } 529 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX); 530 531 /* Clear any pending errors */ 532 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); 533 534 /* NPU does not require 10m D3hot delay */ 535 pdev->d3hot_delay = 0; 536 537 ret = pcim_enable_device(pdev); 538 if (ret) { 539 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret); 540 return ret; 541 } 542 543 pci_set_master(pdev); 544 545 return 0; 546 } 547 548 static int ivpu_dev_init(struct ivpu_device *vdev) 549 { 550 int ret; 551 552 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL); 553 if (!vdev->hw) 554 return -ENOMEM; 555 556 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL); 557 if (!vdev->mmu) 558 return -ENOMEM; 559 560 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL); 561 if (!vdev->fw) 562 return -ENOMEM; 563 564 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL); 565 if (!vdev->ipc) 566 return -ENOMEM; 567 568 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL); 569 if (!vdev->pm) 570 return -ENOMEM; 571 572 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) 573 vdev->hw->dma_bits = 48; 574 else 575 vdev->hw->dma_bits = 38; 576 577 vdev->platform = IVPU_PLATFORM_INVALID; 578 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; 579 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; 580 atomic64_set(&vdev->unique_id_counter, 0); 581 atomic_set(&vdev->job_timeout_counter, 0); 582 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 583 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); 584 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1); 585 INIT_LIST_HEAD(&vdev->bo_list); 586 587 vdev->db_limit.min = IVPU_MIN_DB; 588 vdev->db_limit.max = IVPU_MAX_DB; 589 590 ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock); 591 if (ret) 592 goto err_xa_destroy; 593 594 ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock); 595 if (ret) 596 goto err_xa_destroy; 597 598 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock); 599 if (ret) 600 goto err_xa_destroy; 601 602 ret = ivpu_pci_init(vdev); 603 if (ret) 604 goto err_xa_destroy; 605 606 ret = ivpu_irq_init(vdev); 607 if (ret) 608 goto err_xa_destroy; 609 610 /* Init basic HW info based on buttress registers which are accessible before power up */ 611 ret = ivpu_hw_init(vdev); 612 if (ret) 613 goto err_xa_destroy; 614 615 /* Power up early so the rest of init code can access VPU registers */ 616 ret = ivpu_hw_power_up(vdev); 617 if (ret) 618 goto err_shutdown; 619 620 ivpu_mmu_global_context_init(vdev); 621 622 ret = ivpu_mmu_init(vdev); 623 if (ret) 624 goto err_mmu_gctx_fini; 625 626 ret = ivpu_mmu_reserved_context_init(vdev); 627 if (ret) 628 goto err_mmu_gctx_fini; 629 630 ret = ivpu_fw_init(vdev); 631 if (ret) 632 goto err_mmu_rctx_fini; 633 634 ret = ivpu_ipc_init(vdev); 635 if (ret) 636 goto err_fw_fini; 637 638 ivpu_pm_init(vdev); 639 640 ret = ivpu_boot(vdev); 641 if (ret) 642 goto err_ipc_fini; 643 644 ivpu_job_done_consumer_init(vdev); 645 ivpu_pm_enable(vdev); 646 647 return 0; 648 649 err_ipc_fini: 650 ivpu_ipc_fini(vdev); 651 err_fw_fini: 652 ivpu_fw_fini(vdev); 653 err_mmu_rctx_fini: 654 ivpu_mmu_reserved_context_fini(vdev); 655 err_mmu_gctx_fini: 656 ivpu_mmu_global_context_fini(vdev); 657 err_shutdown: 658 ivpu_shutdown(vdev); 659 err_xa_destroy: 660 xa_destroy(&vdev->db_xa); 661 xa_destroy(&vdev->submitted_jobs_xa); 662 xa_destroy(&vdev->context_xa); 663 return ret; 664 } 665 666 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev) 667 { 668 struct ivpu_file_priv *file_priv; 669 unsigned long ctx_id; 670 671 mutex_lock(&vdev->context_list_lock); 672 673 xa_for_each(&vdev->context_xa, ctx_id, file_priv) 674 file_priv_unbind(vdev, file_priv); 675 676 mutex_unlock(&vdev->context_list_lock); 677 } 678 679 static void ivpu_dev_fini(struct ivpu_device *vdev) 680 { 681 ivpu_jobs_abort_all(vdev); 682 ivpu_pm_disable_recovery(vdev); 683 ivpu_pm_disable(vdev); 684 ivpu_prepare_for_reset(vdev); 685 ivpu_shutdown(vdev); 686 687 ivpu_ms_cleanup_all(vdev); 688 ivpu_job_done_consumer_fini(vdev); 689 ivpu_bo_unbind_all_user_contexts(vdev); 690 691 ivpu_ipc_fini(vdev); 692 ivpu_fw_fini(vdev); 693 ivpu_mmu_reserved_context_fini(vdev); 694 ivpu_mmu_global_context_fini(vdev); 695 696 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa)); 697 xa_destroy(&vdev->db_xa); 698 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); 699 xa_destroy(&vdev->submitted_jobs_xa); 700 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa)); 701 xa_destroy(&vdev->context_xa); 702 } 703 704 static struct pci_device_id ivpu_pci_ids[] = { 705 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, 706 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) }, 707 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, 708 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) }, 709 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) }, 710 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) }, 711 { } 712 }; 713 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); 714 715 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 716 { 717 struct ivpu_device *vdev; 718 int ret; 719 720 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm); 721 if (IS_ERR(vdev)) 722 return PTR_ERR(vdev); 723 724 pci_set_drvdata(pdev, vdev); 725 726 ret = ivpu_dev_init(vdev); 727 if (ret) 728 return ret; 729 730 ivpu_debugfs_init(vdev); 731 ivpu_sysfs_init(vdev); 732 733 ret = drm_dev_register(&vdev->drm, 0); 734 if (ret) { 735 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret); 736 ivpu_dev_fini(vdev); 737 } 738 739 return ret; 740 } 741 742 static void ivpu_remove(struct pci_dev *pdev) 743 { 744 struct ivpu_device *vdev = pci_get_drvdata(pdev); 745 746 drm_dev_unplug(&vdev->drm); 747 ivpu_dev_fini(vdev); 748 } 749 750 static const struct dev_pm_ops ivpu_drv_pci_pm = { 751 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb) 752 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL) 753 }; 754 755 static const struct pci_error_handlers ivpu_drv_pci_err = { 756 .reset_prepare = ivpu_pm_reset_prepare_cb, 757 .reset_done = ivpu_pm_reset_done_cb, 758 }; 759 760 static struct pci_driver ivpu_pci_driver = { 761 .name = KBUILD_MODNAME, 762 .id_table = ivpu_pci_ids, 763 .probe = ivpu_probe, 764 .remove = ivpu_remove, 765 .driver = { 766 .pm = &ivpu_drv_pci_pm, 767 }, 768 .err_handler = &ivpu_drv_pci_err, 769 }; 770 771 module_pci_driver(ivpu_pci_driver); 772 773 MODULE_AUTHOR("Intel Corporation"); 774 MODULE_DESCRIPTION(DRIVER_DESC); 775 MODULE_LICENSE("GPL and additional rights"); 776 MODULE_VERSION(DRIVER_VERSION_STR); 777