1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_device.h" 7 8 #include <linux/aperture.h> 9 #include <linux/delay.h> 10 #include <linux/fault-inject.h> 11 #include <linux/units.h> 12 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_client.h> 15 #include <drm/drm_gem_ttm_helper.h> 16 #include <drm/drm_ioctl.h> 17 #include <drm/drm_managed.h> 18 #include <drm/drm_print.h> 19 #include <uapi/drm/xe_drm.h> 20 21 #include "display/xe_display.h" 22 #include "instructions/xe_gpu_commands.h" 23 #include "regs/xe_gt_regs.h" 24 #include "regs/xe_regs.h" 25 #include "xe_bo.h" 26 #include "xe_debugfs.h" 27 #include "xe_devcoredump.h" 28 #include "xe_dma_buf.h" 29 #include "xe_drm_client.h" 30 #include "xe_drv.h" 31 #include "xe_exec.h" 32 #include "xe_exec_queue.h" 33 #include "xe_force_wake.h" 34 #include "xe_ggtt.h" 35 #include "xe_gsc_proxy.h" 36 #include "xe_gt.h" 37 #include "xe_gt_mcr.h" 38 #include "xe_gt_printk.h" 39 #include "xe_gt_sriov_vf.h" 40 #include "xe_guc.h" 41 #include "xe_hw_engine_group.h" 42 #include "xe_hwmon.h" 43 #include "xe_irq.h" 44 #include "xe_memirq.h" 45 #include "xe_mmio.h" 46 #include "xe_module.h" 47 #include "xe_oa.h" 48 #include "xe_observation.h" 49 #include "xe_pat.h" 50 #include "xe_pcode.h" 51 #include "xe_pm.h" 52 #include "xe_pmu.h" 53 #include "xe_pxp.h" 54 #include "xe_query.h" 55 #include "xe_sriov.h" 56 #include "xe_tile.h" 57 #include "xe_ttm_stolen_mgr.h" 58 #include "xe_ttm_sys_mgr.h" 59 #include "xe_vm.h" 60 #include "xe_vram.h" 61 #include "xe_vsec.h" 62 #include "xe_wait_user_fence.h" 63 #include "xe_wa.h" 64 65 #include <generated/xe_wa_oob.h> 66 67 static int xe_file_open(struct drm_device *dev, struct drm_file *file) 68 { 69 struct xe_device *xe = to_xe_device(dev); 70 struct xe_drm_client *client; 71 struct xe_file *xef; 72 int ret = -ENOMEM; 73 struct task_struct *task = NULL; 74 75 xef = kzalloc(sizeof(*xef), GFP_KERNEL); 76 if (!xef) 77 return ret; 78 79 client = xe_drm_client_alloc(); 80 if (!client) { 81 kfree(xef); 82 return ret; 83 } 84 85 xef->drm = file; 86 xef->client = client; 87 xef->xe = xe; 88 89 mutex_init(&xef->vm.lock); 90 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); 91 92 mutex_init(&xef->exec_queue.lock); 93 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); 94 95 file->driver_priv = xef; 96 kref_init(&xef->refcount); 97 98 task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); 99 if (task) { 100 xef->process_name = kstrdup(task->comm, GFP_KERNEL); 101 xef->pid = task->pid; 102 put_task_struct(task); 103 } 104 105 return 0; 106 } 107 108 static void xe_file_destroy(struct kref *ref) 109 { 110 struct xe_file *xef = container_of(ref, struct xe_file, refcount); 111 112 xa_destroy(&xef->exec_queue.xa); 113 mutex_destroy(&xef->exec_queue.lock); 114 xa_destroy(&xef->vm.xa); 115 mutex_destroy(&xef->vm.lock); 116 117 xe_drm_client_put(xef->client); 118 kfree(xef->process_name); 119 kfree(xef); 120 } 121 122 /** 123 * xe_file_get() - Take a reference to the xe file object 124 * @xef: Pointer to the xe file 125 * 126 * Anyone with a pointer to xef must take a reference to the xe file 127 * object using this call. 128 * 129 * Return: xe file pointer 130 */ 131 struct xe_file *xe_file_get(struct xe_file *xef) 132 { 133 kref_get(&xef->refcount); 134 return xef; 135 } 136 137 /** 138 * xe_file_put() - Drop a reference to the xe file object 139 * @xef: Pointer to the xe file 140 * 141 * Used to drop reference to the xef object 142 */ 143 void xe_file_put(struct xe_file *xef) 144 { 145 kref_put(&xef->refcount, xe_file_destroy); 146 } 147 148 static void xe_file_close(struct drm_device *dev, struct drm_file *file) 149 { 150 struct xe_device *xe = to_xe_device(dev); 151 struct xe_file *xef = file->driver_priv; 152 struct xe_vm *vm; 153 struct xe_exec_queue *q; 154 unsigned long idx; 155 156 xe_pm_runtime_get(xe); 157 158 /* 159 * No need for exec_queue.lock here as there is no contention for it 160 * when FD is closing as IOCTLs presumably can't be modifying the 161 * xarray. Taking exec_queue.lock here causes undue dependency on 162 * vm->lock taken during xe_exec_queue_kill(). 163 */ 164 xa_for_each(&xef->exec_queue.xa, idx, q) { 165 if (q->vm && q->hwe->hw_engine_group) 166 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); 167 xe_exec_queue_kill(q); 168 xe_exec_queue_put(q); 169 } 170 xa_for_each(&xef->vm.xa, idx, vm) 171 xe_vm_close_and_put(vm); 172 173 xe_file_put(xef); 174 175 xe_pm_runtime_put(xe); 176 } 177 178 static const struct drm_ioctl_desc xe_ioctls[] = { 179 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW), 180 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW), 181 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl, 182 DRM_RENDER_ALLOW), 183 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW), 184 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW), 185 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW), 186 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW), 187 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl, 188 DRM_RENDER_ALLOW), 189 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl, 190 DRM_RENDER_ALLOW), 191 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl, 192 DRM_RENDER_ALLOW), 193 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, 194 DRM_RENDER_ALLOW), 195 DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW), 196 }; 197 198 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 199 { 200 struct drm_file *file_priv = file->private_data; 201 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 202 long ret; 203 204 if (xe_device_wedged(xe)) 205 return -ECANCELED; 206 207 ret = xe_pm_runtime_get_ioctl(xe); 208 if (ret >= 0) 209 ret = drm_ioctl(file, cmd, arg); 210 xe_pm_runtime_put(xe); 211 212 return ret; 213 } 214 215 #ifdef CONFIG_COMPAT 216 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 217 { 218 struct drm_file *file_priv = file->private_data; 219 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 220 long ret; 221 222 if (xe_device_wedged(xe)) 223 return -ECANCELED; 224 225 ret = xe_pm_runtime_get_ioctl(xe); 226 if (ret >= 0) 227 ret = drm_compat_ioctl(file, cmd, arg); 228 xe_pm_runtime_put(xe); 229 230 return ret; 231 } 232 #else 233 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */ 234 #define xe_drm_compat_ioctl NULL 235 #endif 236 237 static void barrier_open(struct vm_area_struct *vma) 238 { 239 drm_dev_get(vma->vm_private_data); 240 } 241 242 static void barrier_close(struct vm_area_struct *vma) 243 { 244 drm_dev_put(vma->vm_private_data); 245 } 246 247 static void barrier_release_dummy_page(struct drm_device *dev, void *res) 248 { 249 struct page *dummy_page = (struct page *)res; 250 251 __free_page(dummy_page); 252 } 253 254 static vm_fault_t barrier_fault(struct vm_fault *vmf) 255 { 256 struct drm_device *dev = vmf->vma->vm_private_data; 257 struct vm_area_struct *vma = vmf->vma; 258 vm_fault_t ret = VM_FAULT_NOPAGE; 259 pgprot_t prot; 260 int idx; 261 262 prot = vm_get_page_prot(vma->vm_flags); 263 264 if (drm_dev_enter(dev, &idx)) { 265 unsigned long pfn; 266 267 #define LAST_DB_PAGE_OFFSET 0x7ff001 268 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) + 269 LAST_DB_PAGE_OFFSET); 270 ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn, 271 pgprot_noncached(prot)); 272 drm_dev_exit(idx); 273 } else { 274 struct page *page; 275 276 /* Allocate new dummy page to map all the VA range in this VMA to it*/ 277 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 278 if (!page) 279 return VM_FAULT_OOM; 280 281 /* Set the page to be freed using drmm release action */ 282 if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page)) 283 return VM_FAULT_OOM; 284 285 ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page), 286 prot); 287 } 288 289 return ret; 290 } 291 292 static const struct vm_operations_struct vm_ops_barrier = { 293 .open = barrier_open, 294 .close = barrier_close, 295 .fault = barrier_fault, 296 }; 297 298 static int xe_pci_barrier_mmap(struct file *filp, 299 struct vm_area_struct *vma) 300 { 301 struct drm_file *priv = filp->private_data; 302 struct drm_device *dev = priv->minor->dev; 303 struct xe_device *xe = to_xe_device(dev); 304 305 if (!IS_DGFX(xe)) 306 return -EINVAL; 307 308 if (vma->vm_end - vma->vm_start > SZ_4K) 309 return -EINVAL; 310 311 if (is_cow_mapping(vma->vm_flags)) 312 return -EINVAL; 313 314 if (vma->vm_flags & (VM_READ | VM_EXEC)) 315 return -EINVAL; 316 317 vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC); 318 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); 319 vma->vm_ops = &vm_ops_barrier; 320 vma->vm_private_data = dev; 321 drm_dev_get(vma->vm_private_data); 322 323 return 0; 324 } 325 326 static int xe_mmap(struct file *filp, struct vm_area_struct *vma) 327 { 328 struct drm_file *priv = filp->private_data; 329 struct drm_device *dev = priv->minor->dev; 330 331 if (drm_dev_is_unplugged(dev)) 332 return -ENODEV; 333 334 switch (vma->vm_pgoff) { 335 case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT: 336 return xe_pci_barrier_mmap(filp, vma); 337 } 338 339 return drm_gem_mmap(filp, vma); 340 } 341 342 static const struct file_operations xe_driver_fops = { 343 .owner = THIS_MODULE, 344 .open = drm_open, 345 .release = drm_release_noglobal, 346 .unlocked_ioctl = xe_drm_ioctl, 347 .mmap = xe_mmap, 348 .poll = drm_poll, 349 .read = drm_read, 350 .compat_ioctl = xe_drm_compat_ioctl, 351 .llseek = noop_llseek, 352 #ifdef CONFIG_PROC_FS 353 .show_fdinfo = drm_show_fdinfo, 354 #endif 355 .fop_flags = FOP_UNSIGNED_OFFSET, 356 }; 357 358 static struct drm_driver driver = { 359 /* Don't use MTRRs here; the Xserver or userspace app should 360 * deal with them for Intel hardware. 361 */ 362 .driver_features = 363 DRIVER_GEM | 364 DRIVER_RENDER | DRIVER_SYNCOBJ | 365 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, 366 .open = xe_file_open, 367 .postclose = xe_file_close, 368 369 .gem_prime_import = xe_gem_prime_import, 370 371 .dumb_create = xe_bo_dumb_create, 372 .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 373 #ifdef CONFIG_PROC_FS 374 .show_fdinfo = xe_drm_client_fdinfo, 375 #endif 376 .ioctls = xe_ioctls, 377 .num_ioctls = ARRAY_SIZE(xe_ioctls), 378 .fops = &xe_driver_fops, 379 .name = DRIVER_NAME, 380 .desc = DRIVER_DESC, 381 .major = DRIVER_MAJOR, 382 .minor = DRIVER_MINOR, 383 .patchlevel = DRIVER_PATCHLEVEL, 384 }; 385 386 static void xe_device_destroy(struct drm_device *dev, void *dummy) 387 { 388 struct xe_device *xe = to_xe_device(dev); 389 390 xe_bo_dev_fini(&xe->bo_device); 391 392 if (xe->preempt_fence_wq) 393 destroy_workqueue(xe->preempt_fence_wq); 394 395 if (xe->ordered_wq) 396 destroy_workqueue(xe->ordered_wq); 397 398 if (xe->unordered_wq) 399 destroy_workqueue(xe->unordered_wq); 400 401 if (xe->destroy_wq) 402 destroy_workqueue(xe->destroy_wq); 403 404 ttm_device_fini(&xe->ttm); 405 } 406 407 struct xe_device *xe_device_create(struct pci_dev *pdev, 408 const struct pci_device_id *ent) 409 { 410 struct xe_device *xe; 411 int err; 412 413 xe_display_driver_set_hooks(&driver); 414 415 err = aperture_remove_conflicting_pci_devices(pdev, driver.name); 416 if (err) 417 return ERR_PTR(err); 418 419 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); 420 if (IS_ERR(xe)) 421 return xe; 422 423 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, 424 xe->drm.anon_inode->i_mapping, 425 xe->drm.vma_offset_manager, false, false); 426 if (WARN_ON(err)) 427 goto err; 428 429 xe_bo_dev_init(&xe->bo_device); 430 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); 431 if (err) 432 goto err; 433 434 xe->info.devid = pdev->device; 435 xe->info.revid = pdev->revision; 436 xe->info.force_execlist = xe_modparam.force_execlist; 437 438 err = xe_irq_init(xe); 439 if (err) 440 goto err; 441 442 init_waitqueue_head(&xe->ufence_wq); 443 444 init_rwsem(&xe->usm.lock); 445 446 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); 447 448 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 449 /* Trigger a large asid and an early asid wrap. */ 450 u32 asid; 451 452 BUILD_BUG_ON(XE_MAX_ASID < 2); 453 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, 454 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), 455 &xe->usm.next_asid, GFP_KERNEL); 456 drm_WARN_ON(&xe->drm, err); 457 if (err >= 0) 458 xa_erase(&xe->usm.asid_to_vm, asid); 459 } 460 461 spin_lock_init(&xe->pinned.lock); 462 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); 463 INIT_LIST_HEAD(&xe->pinned.external_vram); 464 INIT_LIST_HEAD(&xe->pinned.evicted); 465 466 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 467 WQ_MEM_RECLAIM); 468 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); 469 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); 470 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); 471 if (!xe->ordered_wq || !xe->unordered_wq || 472 !xe->preempt_fence_wq || !xe->destroy_wq) { 473 /* 474 * Cleanup done in xe_device_destroy via 475 * drmm_add_action_or_reset register above 476 */ 477 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); 478 err = -ENOMEM; 479 goto err; 480 } 481 482 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); 483 if (err) 484 goto err; 485 486 err = xe_display_create(xe); 487 if (WARN_ON(err)) 488 goto err; 489 490 return xe; 491 492 err: 493 return ERR_PTR(err); 494 } 495 ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */ 496 497 static bool xe_driver_flr_disabled(struct xe_device *xe) 498 { 499 return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; 500 } 501 502 /* 503 * The driver-initiated FLR is the highest level of reset that we can trigger 504 * from within the driver. It is different from the PCI FLR in that it doesn't 505 * fully reset the SGUnit and doesn't modify the PCI config space and therefore 506 * it doesn't require a re-enumeration of the PCI BARs. However, the 507 * driver-initiated FLR does still cause a reset of both GT and display and a 508 * memory wipe of local and stolen memory, so recovery would require a full HW 509 * re-init and saving/restoring (or re-populating) the wiped memory. Since we 510 * perform the FLR as the very last action before releasing access to the HW 511 * during the driver release flow, we don't attempt recovery at all, because 512 * if/when a new instance of i915 is bound to the device it will do a full 513 * re-init anyway. 514 */ 515 static void __xe_driver_flr(struct xe_device *xe) 516 { 517 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ 518 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 519 int ret; 520 521 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); 522 523 /* 524 * Make sure any pending FLR requests have cleared by waiting for the 525 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS 526 * to make sure it's not still set from a prior attempt (it's a write to 527 * clear bit). 528 * Note that we should never be in a situation where a previous attempt 529 * is still pending (unless the HW is totally dead), but better to be 530 * safe in case something unexpected happens 531 */ 532 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 533 if (ret) { 534 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); 535 return; 536 } 537 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); 538 539 /* Trigger the actual Driver-FLR */ 540 xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); 541 542 /* Wait for hardware teardown to complete */ 543 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 544 if (ret) { 545 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); 546 return; 547 } 548 549 /* Wait for hardware/firmware re-init to complete */ 550 ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, 551 flr_timeout, NULL, false); 552 if (ret) { 553 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); 554 return; 555 } 556 557 /* Clear sticky completion status */ 558 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); 559 } 560 561 static void xe_driver_flr(struct xe_device *xe) 562 { 563 if (xe_driver_flr_disabled(xe)) { 564 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); 565 return; 566 } 567 568 __xe_driver_flr(xe); 569 } 570 571 static void xe_driver_flr_fini(void *arg) 572 { 573 struct xe_device *xe = arg; 574 575 if (xe->needs_flr_on_fini) 576 xe_driver_flr(xe); 577 } 578 579 static void xe_device_sanitize(void *arg) 580 { 581 struct xe_device *xe = arg; 582 struct xe_gt *gt; 583 u8 id; 584 585 for_each_gt(gt, xe, id) 586 xe_gt_sanitize(gt); 587 } 588 589 static int xe_set_dma_info(struct xe_device *xe) 590 { 591 unsigned int mask_size = xe->info.dma_mask_size; 592 int err; 593 594 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); 595 596 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 597 if (err) 598 goto mask_err; 599 600 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 601 if (err) 602 goto mask_err; 603 604 return 0; 605 606 mask_err: 607 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); 608 return err; 609 } 610 611 static bool verify_lmem_ready(struct xe_device *xe) 612 { 613 u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; 614 615 return !!val; 616 } 617 618 static int wait_for_lmem_ready(struct xe_device *xe) 619 { 620 unsigned long timeout, start; 621 622 if (!IS_DGFX(xe)) 623 return 0; 624 625 if (IS_SRIOV_VF(xe)) 626 return 0; 627 628 if (verify_lmem_ready(xe)) 629 return 0; 630 631 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); 632 633 start = jiffies; 634 timeout = start + secs_to_jiffies(60); /* 60 sec! */ 635 636 do { 637 if (signal_pending(current)) 638 return -EINTR; 639 640 /* 641 * The boot firmware initializes local memory and 642 * assesses its health. If memory training fails, 643 * the punit will have been instructed to keep the GT powered 644 * down.we won't be able to communicate with it 645 * 646 * If the status check is done before punit updates the register, 647 * it can lead to the system being unusable. 648 * use a timeout and defer the probe to prevent this. 649 */ 650 if (time_after(jiffies, timeout)) { 651 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); 652 return -EPROBE_DEFER; 653 } 654 655 msleep(20); 656 657 } while (!verify_lmem_ready(xe)); 658 659 drm_dbg(&xe->drm, "lmem ready after %ums", 660 jiffies_to_msecs(jiffies - start)); 661 662 return 0; 663 } 664 ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */ 665 666 static void sriov_update_device_info(struct xe_device *xe) 667 { 668 /* disable features that are not available/applicable to VFs */ 669 if (IS_SRIOV_VF(xe)) { 670 xe->info.probe_display = 0; 671 xe->info.has_heci_gscfi = 0; 672 xe->info.skip_guc_pc = 1; 673 xe->info.skip_pcode = 1; 674 } 675 } 676 677 /** 678 * xe_device_probe_early: Device early probe 679 * @xe: xe device instance 680 * 681 * Initialize MMIO resources that don't require any 682 * knowledge about tile count. Also initialize pcode and 683 * check vram initialization on root tile. 684 * 685 * Return: 0 on success, error code on failure 686 */ 687 int xe_device_probe_early(struct xe_device *xe) 688 { 689 int err; 690 691 err = xe_mmio_probe_early(xe); 692 if (err) 693 return err; 694 695 xe_sriov_probe_early(xe); 696 697 sriov_update_device_info(xe); 698 699 err = xe_pcode_probe_early(xe); 700 if (err) 701 return err; 702 703 err = wait_for_lmem_ready(xe); 704 if (err) 705 return err; 706 707 xe->wedged.mode = xe_modparam.wedged_mode; 708 709 return 0; 710 } 711 712 static int probe_has_flat_ccs(struct xe_device *xe) 713 { 714 struct xe_gt *gt; 715 unsigned int fw_ref; 716 u32 reg; 717 718 /* Always enabled/disabled, no runtime check to do */ 719 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) 720 return 0; 721 722 gt = xe_root_mmio_gt(xe); 723 724 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 725 if (!fw_ref) 726 return -ETIMEDOUT; 727 728 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); 729 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); 730 731 if (!xe->info.has_flat_ccs) 732 drm_dbg(&xe->drm, 733 "Flat CCS has been disabled in bios, May lead to performance impact"); 734 735 xe_force_wake_put(gt_to_fw(gt), fw_ref); 736 737 return 0; 738 } 739 740 int xe_device_probe(struct xe_device *xe) 741 { 742 struct xe_tile *tile; 743 struct xe_gt *gt; 744 int err; 745 u8 id; 746 747 xe_pat_init_early(xe); 748 749 err = xe_sriov_init(xe); 750 if (err) 751 return err; 752 753 xe->info.mem_region_mask = 1; 754 755 err = xe_set_dma_info(xe); 756 if (err) 757 return err; 758 759 err = xe_mmio_probe_tiles(xe); 760 if (err) 761 return err; 762 763 err = xe_ttm_sys_mgr_init(xe); 764 if (err) 765 return err; 766 767 for_each_gt(gt, xe, id) { 768 err = xe_gt_init_early(gt); 769 if (err) 770 return err; 771 772 /* 773 * Only after this point can GT-specific MMIO operations 774 * (including things like communication with the GuC) 775 * be performed. 776 */ 777 xe_gt_mmio_init(gt); 778 } 779 780 for_each_tile(tile, xe, id) { 781 if (IS_SRIOV_VF(xe)) { 782 xe_guc_comm_init_early(&tile->primary_gt->uc.guc); 783 err = xe_gt_sriov_vf_bootstrap(tile->primary_gt); 784 if (err) 785 return err; 786 err = xe_gt_sriov_vf_query_config(tile->primary_gt); 787 if (err) 788 return err; 789 } 790 err = xe_ggtt_init_early(tile->mem.ggtt); 791 if (err) 792 return err; 793 err = xe_memirq_init(&tile->memirq); 794 if (err) 795 return err; 796 } 797 798 for_each_gt(gt, xe, id) { 799 err = xe_gt_init_hwconfig(gt); 800 if (err) 801 return err; 802 } 803 804 err = xe_devcoredump_init(xe); 805 if (err) 806 return err; 807 808 /* 809 * From here on, if a step fails, make sure a Driver-FLR is triggereed 810 */ 811 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); 812 if (err) 813 return err; 814 815 err = probe_has_flat_ccs(xe); 816 if (err) 817 return err; 818 819 err = xe_vram_probe(xe); 820 if (err) 821 return err; 822 823 for_each_tile(tile, xe, id) { 824 err = xe_tile_init_noalloc(tile); 825 if (err) 826 return err; 827 } 828 829 /* Allocate and map stolen after potential VRAM resize */ 830 err = xe_ttm_stolen_mgr_init(xe); 831 if (err) 832 return err; 833 834 /* 835 * Now that GT is initialized (TTM in particular), 836 * we can try to init display, and inherit the initial fb. 837 * This is the reason the first allocation needs to be done 838 * inside display. 839 */ 840 err = xe_display_init_early(xe); 841 if (err) 842 return err; 843 844 for_each_tile(tile, xe, id) { 845 err = xe_tile_init(tile); 846 if (err) 847 return err; 848 } 849 850 err = xe_irq_install(xe); 851 if (err) 852 return err; 853 854 for_each_gt(gt, xe, id) { 855 err = xe_gt_init(gt); 856 if (err) 857 return err; 858 } 859 860 err = xe_heci_gsc_init(xe); 861 if (err) 862 return err; 863 864 err = xe_oa_init(xe); 865 if (err) 866 return err; 867 868 err = xe_display_init(xe); 869 if (err) 870 return err; 871 872 err = xe_pxp_init(xe); 873 if (err) 874 return err; 875 876 err = drm_dev_register(&xe->drm, 0); 877 if (err) 878 return err; 879 880 xe_display_register(xe); 881 882 err = xe_oa_register(xe); 883 if (err) 884 goto err_unregister_display; 885 886 err = xe_pmu_register(&xe->pmu); 887 if (err) 888 goto err_unregister_display; 889 890 xe_debugfs_register(xe); 891 892 err = xe_hwmon_register(xe); 893 if (err) 894 goto err_unregister_display; 895 896 for_each_gt(gt, xe, id) 897 xe_gt_sanitize_freq(gt); 898 899 xe_vsec_init(xe); 900 901 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); 902 903 err_unregister_display: 904 xe_display_unregister(xe); 905 906 return err; 907 } 908 909 void xe_device_remove(struct xe_device *xe) 910 { 911 xe_display_unregister(xe); 912 913 drm_dev_unplug(&xe->drm); 914 } 915 916 void xe_device_shutdown(struct xe_device *xe) 917 { 918 struct xe_gt *gt; 919 u8 id; 920 921 drm_dbg(&xe->drm, "Shutting down device\n"); 922 923 if (xe_driver_flr_disabled(xe)) { 924 xe_display_pm_shutdown(xe); 925 926 xe_irq_suspend(xe); 927 928 for_each_gt(gt, xe, id) 929 xe_gt_shutdown(gt); 930 931 xe_display_pm_shutdown_late(xe); 932 } else { 933 /* BOOM! */ 934 __xe_driver_flr(xe); 935 } 936 } 937 938 /** 939 * xe_device_wmb() - Device specific write memory barrier 940 * @xe: the &xe_device 941 * 942 * While wmb() is sufficient for a barrier if we use system memory, on discrete 943 * platforms with device memory we additionally need to issue a register write. 944 * Since it doesn't matter which register we write to, use the read-only VF_CAP 945 * register that is also marked as accessible by the VFs. 946 */ 947 void xe_device_wmb(struct xe_device *xe) 948 { 949 wmb(); 950 if (IS_DGFX(xe)) 951 xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); 952 } 953 954 /** 955 * xe_device_td_flush() - Flush transient L3 cache entries 956 * @xe: The device 957 * 958 * Display engine has direct access to memory and is never coherent with L3/L4 959 * caches (or CPU caches), however KMD is responsible for specifically flushing 960 * transient L3 GPU cache entries prior to the flip sequence to ensure scanout 961 * can happen from such a surface without seeing corruption. 962 * 963 * Display surfaces can be tagged as transient by mapping it using one of the 964 * various L3:XD PAT index modes on Xe2. 965 * 966 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed 967 * at the end of each submission via PIPE_CONTROL for compute/render, since SA 968 * Media is not coherent with L3 and we want to support render-vs-media 969 * usescases. For other engines like copy/blt the HW internally forces uncached 970 * behaviour, hence why we can skip the TDF on such platforms. 971 */ 972 void xe_device_td_flush(struct xe_device *xe) 973 { 974 struct xe_gt *gt; 975 unsigned int fw_ref; 976 u8 id; 977 978 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) 979 return; 980 981 if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { 982 xe_device_l2_flush(xe); 983 return; 984 } 985 986 for_each_gt(gt, xe, id) { 987 if (xe_gt_is_media_type(gt)) 988 continue; 989 990 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 991 if (!fw_ref) 992 return; 993 994 xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); 995 /* 996 * FIXME: We can likely do better here with our choice of 997 * timeout. Currently we just assume the worst case, i.e. 150us, 998 * which is believed to be sufficient to cover the worst case 999 * scenario on current platforms if all cache entries are 1000 * transient and need to be flushed.. 1001 */ 1002 if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, 1003 150, NULL, false)) 1004 xe_gt_err_once(gt, "TD flush timeout\n"); 1005 1006 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1007 } 1008 } 1009 1010 void xe_device_l2_flush(struct xe_device *xe) 1011 { 1012 struct xe_gt *gt; 1013 unsigned int fw_ref; 1014 1015 gt = xe_root_mmio_gt(xe); 1016 1017 if (!XE_WA(gt, 16023588340)) 1018 return; 1019 1020 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1021 if (!fw_ref) 1022 return; 1023 1024 spin_lock(>->global_invl_lock); 1025 xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); 1026 1027 if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) 1028 xe_gt_err_once(gt, "Global invalidation timeout\n"); 1029 spin_unlock(>->global_invl_lock); 1030 1031 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1032 } 1033 1034 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) 1035 { 1036 return xe_device_has_flat_ccs(xe) ? 1037 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; 1038 } 1039 1040 /** 1041 * xe_device_assert_mem_access - Inspect the current runtime_pm state. 1042 * @xe: xe device instance 1043 * 1044 * To be used before any kind of memory access. It will splat a debug warning 1045 * if the device is currently sleeping. But it doesn't guarantee in any way 1046 * that the device is going to remain awake. Xe PM runtime get and put 1047 * functions might be added to the outer bound of the memory access, while 1048 * this check is intended for inner usage to splat some warning if the worst 1049 * case has just happened. 1050 */ 1051 void xe_device_assert_mem_access(struct xe_device *xe) 1052 { 1053 xe_assert(xe, !xe_pm_runtime_suspended(xe)); 1054 } 1055 1056 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) 1057 { 1058 struct xe_gt *gt; 1059 u8 id; 1060 1061 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); 1062 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); 1063 1064 for_each_gt(gt, xe, id) { 1065 drm_printf(p, "GT id: %u\n", id); 1066 drm_printf(p, "\tTile: %u\n", gt->tile->id); 1067 drm_printf(p, "\tType: %s\n", 1068 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); 1069 drm_printf(p, "\tIP ver: %u.%u.%u\n", 1070 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), 1071 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), 1072 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); 1073 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); 1074 } 1075 } 1076 1077 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) 1078 { 1079 return sign_extend64(address, xe->info.va_bits - 1); 1080 } 1081 1082 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) 1083 { 1084 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); 1085 } 1086 1087 static void xe_device_wedged_fini(struct drm_device *drm, void *arg) 1088 { 1089 struct xe_device *xe = arg; 1090 1091 xe_pm_runtime_put(xe); 1092 } 1093 1094 /** 1095 * xe_device_declare_wedged - Declare device wedged 1096 * @xe: xe device instance 1097 * 1098 * This is a final state that can only be cleared with a module 1099 * re-probe (unbind + bind). 1100 * In this state every IOCTL will be blocked so the GT cannot be used. 1101 * In general it will be called upon any critical error such as gt reset 1102 * failure or guc loading failure. Userspace will be notified of this state 1103 * through device wedged uevent. 1104 * If xe.wedged module parameter is set to 2, this function will be called 1105 * on every single execution timeout (a.k.a. GPU hang) right after devcoredump 1106 * snapshot capture. In this mode, GT reset won't be attempted so the state of 1107 * the issue is preserved for further debugging. 1108 */ 1109 void xe_device_declare_wedged(struct xe_device *xe) 1110 { 1111 struct xe_gt *gt; 1112 u8 id; 1113 1114 if (xe->wedged.mode == 0) { 1115 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); 1116 return; 1117 } 1118 1119 xe_pm_runtime_get_noresume(xe); 1120 1121 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { 1122 drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); 1123 return; 1124 } 1125 1126 if (!atomic_xchg(&xe->wedged.flag, 1)) { 1127 xe->needs_flr_on_fini = true; 1128 drm_err(&xe->drm, 1129 "CRITICAL: Xe has declared device %s as wedged.\n" 1130 "IOCTLs and executions are blocked. Only a rebind may clear the failure\n" 1131 "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n", 1132 dev_name(xe->drm.dev)); 1133 1134 /* Notify userspace of wedged device */ 1135 drm_dev_wedged_event(&xe->drm, 1136 DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET); 1137 } 1138 1139 for_each_gt(gt, xe, id) 1140 xe_gt_declare_wedged(gt); 1141 } 1142