1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_device.h" 7 8 #include <linux/aperture.h> 9 #include <linux/delay.h> 10 #include <linux/fault-inject.h> 11 #include <linux/units.h> 12 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_client.h> 15 #include <drm/drm_gem_ttm_helper.h> 16 #include <drm/drm_ioctl.h> 17 #include <drm/drm_managed.h> 18 #include <drm/drm_print.h> 19 #include <uapi/drm/xe_drm.h> 20 21 #include "display/xe_display.h" 22 #include "instructions/xe_gpu_commands.h" 23 #include "regs/xe_gt_regs.h" 24 #include "regs/xe_regs.h" 25 #include "xe_bo.h" 26 #include "xe_bo_evict.h" 27 #include "xe_debugfs.h" 28 #include "xe_devcoredump.h" 29 #include "xe_device_sysfs.h" 30 #include "xe_dma_buf.h" 31 #include "xe_drm_client.h" 32 #include "xe_drv.h" 33 #include "xe_exec.h" 34 #include "xe_exec_queue.h" 35 #include "xe_force_wake.h" 36 #include "xe_ggtt.h" 37 #include "xe_gsc_proxy.h" 38 #include "xe_gt.h" 39 #include "xe_gt_mcr.h" 40 #include "xe_gt_printk.h" 41 #include "xe_gt_sriov_vf.h" 42 #include "xe_guc.h" 43 #include "xe_guc_pc.h" 44 #include "xe_hw_engine_group.h" 45 #include "xe_hwmon.h" 46 #include "xe_i2c.h" 47 #include "xe_irq.h" 48 #include "xe_late_bind_fw.h" 49 #include "xe_mmio.h" 50 #include "xe_module.h" 51 #include "xe_nvm.h" 52 #include "xe_oa.h" 53 #include "xe_observation.h" 54 #include "xe_pat.h" 55 #include "xe_pcode.h" 56 #include "xe_pm.h" 57 #include "xe_pmu.h" 58 #include "xe_psmi.h" 59 #include "xe_pxp.h" 60 #include "xe_query.h" 61 #include "xe_shrinker.h" 62 #include "xe_survivability_mode.h" 63 #include "xe_sriov.h" 64 #include "xe_tile.h" 65 #include "xe_ttm_stolen_mgr.h" 66 #include "xe_ttm_sys_mgr.h" 67 #include "xe_vm.h" 68 #include "xe_vm_madvise.h" 69 #include "xe_vram.h" 70 #include "xe_vram_types.h" 71 #include "xe_vsec.h" 72 #include "xe_wait_user_fence.h" 73 #include "xe_wa.h" 74 75 #include <generated/xe_device_wa_oob.h> 76 #include <generated/xe_wa_oob.h> 77 78 static int xe_file_open(struct drm_device *dev, struct drm_file *file) 79 { 80 struct xe_device *xe = to_xe_device(dev); 81 struct xe_drm_client *client; 82 struct xe_file *xef; 83 int ret = -ENOMEM; 84 struct task_struct *task = NULL; 85 86 xef = kzalloc(sizeof(*xef), GFP_KERNEL); 87 if (!xef) 88 return ret; 89 90 client = xe_drm_client_alloc(); 91 if (!client) { 92 kfree(xef); 93 return ret; 94 } 95 96 xef->drm = file; 97 xef->client = client; 98 xef->xe = xe; 99 100 mutex_init(&xef->vm.lock); 101 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); 102 103 mutex_init(&xef->exec_queue.lock); 104 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); 105 106 file->driver_priv = xef; 107 kref_init(&xef->refcount); 108 109 task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); 110 if (task) { 111 xef->process_name = kstrdup(task->comm, GFP_KERNEL); 112 xef->pid = task->pid; 113 put_task_struct(task); 114 } 115 116 return 0; 117 } 118 119 static void xe_file_destroy(struct kref *ref) 120 { 121 struct xe_file *xef = container_of(ref, struct xe_file, refcount); 122 123 xa_destroy(&xef->exec_queue.xa); 124 mutex_destroy(&xef->exec_queue.lock); 125 xa_destroy(&xef->vm.xa); 126 mutex_destroy(&xef->vm.lock); 127 128 xe_drm_client_put(xef->client); 129 kfree(xef->process_name); 130 kfree(xef); 131 } 132 133 /** 134 * xe_file_get() - Take a reference to the xe file object 135 * @xef: Pointer to the xe file 136 * 137 * Anyone with a pointer to xef must take a reference to the xe file 138 * object using this call. 139 * 140 * Return: xe file pointer 141 */ 142 struct xe_file *xe_file_get(struct xe_file *xef) 143 { 144 kref_get(&xef->refcount); 145 return xef; 146 } 147 148 /** 149 * xe_file_put() - Drop a reference to the xe file object 150 * @xef: Pointer to the xe file 151 * 152 * Used to drop reference to the xef object 153 */ 154 void xe_file_put(struct xe_file *xef) 155 { 156 kref_put(&xef->refcount, xe_file_destroy); 157 } 158 159 static void xe_file_close(struct drm_device *dev, struct drm_file *file) 160 { 161 struct xe_device *xe = to_xe_device(dev); 162 struct xe_file *xef = file->driver_priv; 163 struct xe_vm *vm; 164 struct xe_exec_queue *q; 165 unsigned long idx; 166 167 xe_pm_runtime_get(xe); 168 169 /* 170 * No need for exec_queue.lock here as there is no contention for it 171 * when FD is closing as IOCTLs presumably can't be modifying the 172 * xarray. Taking exec_queue.lock here causes undue dependency on 173 * vm->lock taken during xe_exec_queue_kill(). 174 */ 175 xa_for_each(&xef->exec_queue.xa, idx, q) { 176 if (q->vm && q->hwe->hw_engine_group) 177 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); 178 xe_exec_queue_kill(q); 179 xe_exec_queue_put(q); 180 } 181 xa_for_each(&xef->vm.xa, idx, vm) 182 xe_vm_close_and_put(vm); 183 184 xe_file_put(xef); 185 186 xe_pm_runtime_put(xe); 187 } 188 189 static const struct drm_ioctl_desc xe_ioctls[] = { 190 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW), 191 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW), 192 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl, 193 DRM_RENDER_ALLOW), 194 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW), 195 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW), 196 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW), 197 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW), 198 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl, 199 DRM_RENDER_ALLOW), 200 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl, 201 DRM_RENDER_ALLOW), 202 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl, 203 DRM_RENDER_ALLOW), 204 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, 205 DRM_RENDER_ALLOW), 206 DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW), 207 DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW), 208 DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl, 209 DRM_RENDER_ALLOW), 210 }; 211 212 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 213 { 214 struct drm_file *file_priv = file->private_data; 215 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 216 long ret; 217 218 if (xe_device_wedged(xe)) 219 return -ECANCELED; 220 221 ret = xe_pm_runtime_get_ioctl(xe); 222 if (ret >= 0) 223 ret = drm_ioctl(file, cmd, arg); 224 xe_pm_runtime_put(xe); 225 226 return ret; 227 } 228 229 #ifdef CONFIG_COMPAT 230 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 231 { 232 struct drm_file *file_priv = file->private_data; 233 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 234 long ret; 235 236 if (xe_device_wedged(xe)) 237 return -ECANCELED; 238 239 ret = xe_pm_runtime_get_ioctl(xe); 240 if (ret >= 0) 241 ret = drm_compat_ioctl(file, cmd, arg); 242 xe_pm_runtime_put(xe); 243 244 return ret; 245 } 246 #else 247 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */ 248 #define xe_drm_compat_ioctl NULL 249 #endif 250 251 static void barrier_open(struct vm_area_struct *vma) 252 { 253 drm_dev_get(vma->vm_private_data); 254 } 255 256 static void barrier_close(struct vm_area_struct *vma) 257 { 258 drm_dev_put(vma->vm_private_data); 259 } 260 261 static void barrier_release_dummy_page(struct drm_device *dev, void *res) 262 { 263 struct page *dummy_page = (struct page *)res; 264 265 __free_page(dummy_page); 266 } 267 268 static vm_fault_t barrier_fault(struct vm_fault *vmf) 269 { 270 struct drm_device *dev = vmf->vma->vm_private_data; 271 struct vm_area_struct *vma = vmf->vma; 272 vm_fault_t ret = VM_FAULT_NOPAGE; 273 pgprot_t prot; 274 int idx; 275 276 prot = vm_get_page_prot(vma->vm_flags); 277 278 if (drm_dev_enter(dev, &idx)) { 279 unsigned long pfn; 280 281 #define LAST_DB_PAGE_OFFSET 0x7ff001 282 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) + 283 LAST_DB_PAGE_OFFSET); 284 ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn, 285 pgprot_noncached(prot)); 286 drm_dev_exit(idx); 287 } else { 288 struct page *page; 289 290 /* Allocate new dummy page to map all the VA range in this VMA to it*/ 291 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 292 if (!page) 293 return VM_FAULT_OOM; 294 295 /* Set the page to be freed using drmm release action */ 296 if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page)) 297 return VM_FAULT_OOM; 298 299 ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page), 300 prot); 301 } 302 303 return ret; 304 } 305 306 static const struct vm_operations_struct vm_ops_barrier = { 307 .open = barrier_open, 308 .close = barrier_close, 309 .fault = barrier_fault, 310 }; 311 312 static int xe_pci_barrier_mmap(struct file *filp, 313 struct vm_area_struct *vma) 314 { 315 struct drm_file *priv = filp->private_data; 316 struct drm_device *dev = priv->minor->dev; 317 struct xe_device *xe = to_xe_device(dev); 318 319 if (!IS_DGFX(xe)) 320 return -EINVAL; 321 322 if (vma->vm_end - vma->vm_start > SZ_4K) 323 return -EINVAL; 324 325 if (is_cow_mapping(vma->vm_flags)) 326 return -EINVAL; 327 328 if (vma->vm_flags & (VM_READ | VM_EXEC)) 329 return -EINVAL; 330 331 vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC); 332 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); 333 vma->vm_ops = &vm_ops_barrier; 334 vma->vm_private_data = dev; 335 drm_dev_get(vma->vm_private_data); 336 337 return 0; 338 } 339 340 static int xe_mmap(struct file *filp, struct vm_area_struct *vma) 341 { 342 struct drm_file *priv = filp->private_data; 343 struct drm_device *dev = priv->minor->dev; 344 345 if (drm_dev_is_unplugged(dev)) 346 return -ENODEV; 347 348 switch (vma->vm_pgoff) { 349 case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT: 350 return xe_pci_barrier_mmap(filp, vma); 351 } 352 353 return drm_gem_mmap(filp, vma); 354 } 355 356 static const struct file_operations xe_driver_fops = { 357 .owner = THIS_MODULE, 358 .open = drm_open, 359 .release = drm_release_noglobal, 360 .unlocked_ioctl = xe_drm_ioctl, 361 .mmap = xe_mmap, 362 .poll = drm_poll, 363 .read = drm_read, 364 .compat_ioctl = xe_drm_compat_ioctl, 365 .llseek = noop_llseek, 366 #ifdef CONFIG_PROC_FS 367 .show_fdinfo = drm_show_fdinfo, 368 #endif 369 .fop_flags = FOP_UNSIGNED_OFFSET, 370 }; 371 372 static struct drm_driver driver = { 373 /* Don't use MTRRs here; the Xserver or userspace app should 374 * deal with them for Intel hardware. 375 */ 376 .driver_features = 377 DRIVER_GEM | 378 DRIVER_RENDER | DRIVER_SYNCOBJ | 379 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, 380 .open = xe_file_open, 381 .postclose = xe_file_close, 382 383 .gem_prime_import = xe_gem_prime_import, 384 385 .dumb_create = xe_bo_dumb_create, 386 .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 387 #ifdef CONFIG_PROC_FS 388 .show_fdinfo = xe_drm_client_fdinfo, 389 #endif 390 .ioctls = xe_ioctls, 391 .num_ioctls = ARRAY_SIZE(xe_ioctls), 392 .fops = &xe_driver_fops, 393 .name = DRIVER_NAME, 394 .desc = DRIVER_DESC, 395 .major = DRIVER_MAJOR, 396 .minor = DRIVER_MINOR, 397 .patchlevel = DRIVER_PATCHLEVEL, 398 }; 399 400 static void xe_device_destroy(struct drm_device *dev, void *dummy) 401 { 402 struct xe_device *xe = to_xe_device(dev); 403 404 xe_bo_dev_fini(&xe->bo_device); 405 406 if (xe->preempt_fence_wq) 407 destroy_workqueue(xe->preempt_fence_wq); 408 409 if (xe->ordered_wq) 410 destroy_workqueue(xe->ordered_wq); 411 412 if (xe->unordered_wq) 413 destroy_workqueue(xe->unordered_wq); 414 415 if (xe->destroy_wq) 416 destroy_workqueue(xe->destroy_wq); 417 418 ttm_device_fini(&xe->ttm); 419 } 420 421 struct xe_device *xe_device_create(struct pci_dev *pdev, 422 const struct pci_device_id *ent) 423 { 424 struct xe_device *xe; 425 int err; 426 427 xe_display_driver_set_hooks(&driver); 428 429 err = aperture_remove_conflicting_pci_devices(pdev, driver.name); 430 if (err) 431 return ERR_PTR(err); 432 433 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); 434 if (IS_ERR(xe)) 435 return xe; 436 437 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, 438 xe->drm.anon_inode->i_mapping, 439 xe->drm.vma_offset_manager, false, false); 440 if (WARN_ON(err)) 441 goto err; 442 443 xe_bo_dev_init(&xe->bo_device); 444 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); 445 if (err) 446 goto err; 447 448 err = xe_shrinker_create(xe); 449 if (err) 450 goto err; 451 452 xe->info.devid = pdev->device; 453 xe->info.revid = pdev->revision; 454 xe->info.force_execlist = xe_modparam.force_execlist; 455 xe->atomic_svm_timeslice_ms = 5; 456 457 err = xe_irq_init(xe); 458 if (err) 459 goto err; 460 461 xe_validation_device_init(&xe->val); 462 463 init_waitqueue_head(&xe->ufence_wq); 464 465 init_rwsem(&xe->usm.lock); 466 467 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); 468 469 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 470 /* Trigger a large asid and an early asid wrap. */ 471 u32 asid; 472 473 BUILD_BUG_ON(XE_MAX_ASID < 2); 474 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, 475 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), 476 &xe->usm.next_asid, GFP_KERNEL); 477 drm_WARN_ON(&xe->drm, err); 478 if (err >= 0) 479 xa_erase(&xe->usm.asid_to_vm, asid); 480 } 481 482 err = xe_bo_pinned_init(xe); 483 if (err) 484 goto err; 485 486 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 487 WQ_MEM_RECLAIM); 488 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); 489 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); 490 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); 491 if (!xe->ordered_wq || !xe->unordered_wq || 492 !xe->preempt_fence_wq || !xe->destroy_wq) { 493 /* 494 * Cleanup done in xe_device_destroy via 495 * drmm_add_action_or_reset register above 496 */ 497 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); 498 err = -ENOMEM; 499 goto err; 500 } 501 502 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); 503 if (err) 504 goto err; 505 506 return xe; 507 508 err: 509 return ERR_PTR(err); 510 } 511 ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */ 512 513 static bool xe_driver_flr_disabled(struct xe_device *xe) 514 { 515 if (IS_SRIOV_VF(xe)) 516 return true; 517 518 if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { 519 drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); 520 return true; 521 } 522 523 return false; 524 } 525 526 /* 527 * The driver-initiated FLR is the highest level of reset that we can trigger 528 * from within the driver. It is different from the PCI FLR in that it doesn't 529 * fully reset the SGUnit and doesn't modify the PCI config space and therefore 530 * it doesn't require a re-enumeration of the PCI BARs. However, the 531 * driver-initiated FLR does still cause a reset of both GT and display and a 532 * memory wipe of local and stolen memory, so recovery would require a full HW 533 * re-init and saving/restoring (or re-populating) the wiped memory. Since we 534 * perform the FLR as the very last action before releasing access to the HW 535 * during the driver release flow, we don't attempt recovery at all, because 536 * if/when a new instance of Xe is bound to the device it will do a full 537 * re-init anyway. 538 */ 539 static void __xe_driver_flr(struct xe_device *xe) 540 { 541 const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */ 542 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 543 int ret; 544 545 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); 546 547 /* 548 * Make sure any pending FLR requests have cleared by waiting for the 549 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS 550 * to make sure it's not still set from a prior attempt (it's a write to 551 * clear bit). 552 * Note that we should never be in a situation where a previous attempt 553 * is still pending (unless the HW is totally dead), but better to be 554 * safe in case something unexpected happens 555 */ 556 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 557 if (ret) { 558 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); 559 return; 560 } 561 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); 562 563 /* Trigger the actual Driver-FLR */ 564 xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); 565 566 /* Wait for hardware teardown to complete */ 567 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 568 if (ret) { 569 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); 570 return; 571 } 572 573 /* Wait for hardware/firmware re-init to complete */ 574 ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, 575 flr_timeout, NULL, false); 576 if (ret) { 577 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); 578 return; 579 } 580 581 /* Clear sticky completion status */ 582 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); 583 } 584 585 static void xe_driver_flr(struct xe_device *xe) 586 { 587 if (xe_driver_flr_disabled(xe)) 588 return; 589 590 __xe_driver_flr(xe); 591 } 592 593 static void xe_driver_flr_fini(void *arg) 594 { 595 struct xe_device *xe = arg; 596 597 if (xe->needs_flr_on_fini) 598 xe_driver_flr(xe); 599 } 600 601 static void xe_device_sanitize(void *arg) 602 { 603 struct xe_device *xe = arg; 604 struct xe_gt *gt; 605 u8 id; 606 607 for_each_gt(gt, xe, id) 608 xe_gt_sanitize(gt); 609 } 610 611 static int xe_set_dma_info(struct xe_device *xe) 612 { 613 unsigned int mask_size = xe->info.dma_mask_size; 614 int err; 615 616 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); 617 618 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 619 if (err) 620 goto mask_err; 621 622 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 623 if (err) 624 goto mask_err; 625 626 return 0; 627 628 mask_err: 629 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); 630 return err; 631 } 632 633 static bool verify_lmem_ready(struct xe_device *xe) 634 { 635 u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; 636 637 return !!val; 638 } 639 640 static int wait_for_lmem_ready(struct xe_device *xe) 641 { 642 unsigned long timeout, start; 643 644 if (!IS_DGFX(xe)) 645 return 0; 646 647 if (IS_SRIOV_VF(xe)) 648 return 0; 649 650 if (verify_lmem_ready(xe)) 651 return 0; 652 653 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); 654 655 start = jiffies; 656 timeout = start + secs_to_jiffies(60); /* 60 sec! */ 657 658 do { 659 if (signal_pending(current)) 660 return -EINTR; 661 662 /* 663 * The boot firmware initializes local memory and 664 * assesses its health. If memory training fails, 665 * the punit will have been instructed to keep the GT powered 666 * down.we won't be able to communicate with it 667 * 668 * If the status check is done before punit updates the register, 669 * it can lead to the system being unusable. 670 * use a timeout and defer the probe to prevent this. 671 */ 672 if (time_after(jiffies, timeout)) { 673 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); 674 return -EPROBE_DEFER; 675 } 676 677 msleep(20); 678 679 } while (!verify_lmem_ready(xe)); 680 681 drm_dbg(&xe->drm, "lmem ready after %ums", 682 jiffies_to_msecs(jiffies - start)); 683 684 return 0; 685 } 686 ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */ 687 688 static void sriov_update_device_info(struct xe_device *xe) 689 { 690 /* disable features that are not available/applicable to VFs */ 691 if (IS_SRIOV_VF(xe)) { 692 xe->info.probe_display = 0; 693 xe->info.has_heci_cscfi = 0; 694 xe->info.has_heci_gscfi = 0; 695 xe->info.skip_guc_pc = 1; 696 xe->info.skip_pcode = 1; 697 } 698 } 699 700 static int xe_device_vram_alloc(struct xe_device *xe) 701 { 702 struct xe_vram_region *vram; 703 704 if (!IS_DGFX(xe)) 705 return 0; 706 707 vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL); 708 if (!vram) 709 return -ENOMEM; 710 711 xe->mem.vram = vram; 712 return 0; 713 } 714 715 /** 716 * xe_device_probe_early: Device early probe 717 * @xe: xe device instance 718 * 719 * Initialize MMIO resources that don't require any 720 * knowledge about tile count. Also initialize pcode and 721 * check vram initialization on root tile. 722 * 723 * Return: 0 on success, error code on failure 724 */ 725 int xe_device_probe_early(struct xe_device *xe) 726 { 727 int err; 728 729 xe_wa_device_init(xe); 730 xe_wa_process_device_oob(xe); 731 732 err = xe_mmio_probe_early(xe); 733 if (err) 734 return err; 735 736 xe_sriov_probe_early(xe); 737 738 sriov_update_device_info(xe); 739 740 err = xe_pcode_probe_early(xe); 741 if (err || xe_survivability_mode_is_requested(xe)) { 742 int save_err = err; 743 744 /* 745 * Try to leave device in survivability mode if device is 746 * possible, but still return the previous error for error 747 * propagation 748 */ 749 err = xe_survivability_mode_boot_enable(xe); 750 if (err) 751 return err; 752 753 return save_err; 754 } 755 756 err = wait_for_lmem_ready(xe); 757 if (err) 758 return err; 759 760 xe->wedged.mode = xe_modparam.wedged_mode; 761 762 err = xe_device_vram_alloc(xe); 763 if (err) 764 return err; 765 766 return 0; 767 } 768 ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */ 769 770 static int probe_has_flat_ccs(struct xe_device *xe) 771 { 772 struct xe_gt *gt; 773 unsigned int fw_ref; 774 u32 reg; 775 776 /* Always enabled/disabled, no runtime check to do */ 777 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) 778 return 0; 779 780 gt = xe_root_mmio_gt(xe); 781 782 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 783 if (!fw_ref) 784 return -ETIMEDOUT; 785 786 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); 787 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); 788 789 if (!xe->info.has_flat_ccs) 790 drm_dbg(&xe->drm, 791 "Flat CCS has been disabled in bios, May lead to performance impact"); 792 793 xe_force_wake_put(gt_to_fw(gt), fw_ref); 794 795 return 0; 796 } 797 798 int xe_device_probe(struct xe_device *xe) 799 { 800 struct xe_tile *tile; 801 struct xe_gt *gt; 802 int err; 803 u8 id; 804 805 xe_pat_init_early(xe); 806 807 err = xe_sriov_init(xe); 808 if (err) 809 return err; 810 811 xe->info.mem_region_mask = 1; 812 813 err = xe_set_dma_info(xe); 814 if (err) 815 return err; 816 817 err = xe_mmio_probe_tiles(xe); 818 if (err) 819 return err; 820 821 for_each_gt(gt, xe, id) { 822 err = xe_gt_init_early(gt); 823 if (err) 824 return err; 825 } 826 827 for_each_tile(tile, xe, id) { 828 err = xe_ggtt_init_early(tile->mem.ggtt); 829 if (err) 830 return err; 831 } 832 833 /* 834 * From here on, if a step fails, make sure a Driver-FLR is triggereed 835 */ 836 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); 837 if (err) 838 return err; 839 840 err = probe_has_flat_ccs(xe); 841 if (err) 842 return err; 843 844 err = xe_vram_probe(xe); 845 if (err) 846 return err; 847 848 for_each_tile(tile, xe, id) { 849 err = xe_tile_init_noalloc(tile); 850 if (err) 851 return err; 852 } 853 854 /* 855 * Allow allocations only now to ensure xe_display_init_early() 856 * is the first to allocate, always. 857 */ 858 err = xe_ttm_sys_mgr_init(xe); 859 if (err) 860 return err; 861 862 /* Allocate and map stolen after potential VRAM resize */ 863 err = xe_ttm_stolen_mgr_init(xe); 864 if (err) 865 return err; 866 867 /* 868 * Now that GT is initialized (TTM in particular), 869 * we can try to init display, and inherit the initial fb. 870 * This is the reason the first allocation needs to be done 871 * inside display. 872 */ 873 err = xe_display_init_early(xe); 874 if (err) 875 return err; 876 877 for_each_tile(tile, xe, id) { 878 err = xe_tile_init(tile); 879 if (err) 880 return err; 881 } 882 883 err = xe_irq_install(xe); 884 if (err) 885 return err; 886 887 for_each_gt(gt, xe, id) { 888 err = xe_gt_init(gt); 889 if (err) 890 return err; 891 } 892 893 if (xe->tiles->media_gt && 894 XE_GT_WA(xe->tiles->media_gt, 15015404425_disable)) 895 XE_DEVICE_WA_DISABLE(xe, 15015404425); 896 897 err = xe_devcoredump_init(xe); 898 if (err) 899 return err; 900 901 xe_nvm_init(xe); 902 903 err = xe_heci_gsc_init(xe); 904 if (err) 905 return err; 906 907 err = xe_late_bind_init(&xe->late_bind); 908 if (err) 909 return err; 910 911 err = xe_oa_init(xe); 912 if (err) 913 return err; 914 915 err = xe_display_init(xe); 916 if (err) 917 return err; 918 919 err = xe_pxp_init(xe); 920 if (err) 921 return err; 922 923 err = xe_psmi_init(xe); 924 if (err) 925 return err; 926 927 err = drm_dev_register(&xe->drm, 0); 928 if (err) 929 return err; 930 931 xe_display_register(xe); 932 933 err = xe_oa_register(xe); 934 if (err) 935 goto err_unregister_display; 936 937 err = xe_pmu_register(&xe->pmu); 938 if (err) 939 goto err_unregister_display; 940 941 err = xe_device_sysfs_init(xe); 942 if (err) 943 goto err_unregister_display; 944 945 xe_debugfs_register(xe); 946 947 err = xe_hwmon_register(xe); 948 if (err) 949 goto err_unregister_display; 950 951 err = xe_i2c_probe(xe); 952 if (err) 953 goto err_unregister_display; 954 955 for_each_gt(gt, xe, id) 956 xe_gt_sanitize_freq(gt); 957 958 xe_vsec_init(xe); 959 960 err = xe_sriov_init_late(xe); 961 if (err) 962 goto err_unregister_display; 963 964 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); 965 966 err_unregister_display: 967 xe_display_unregister(xe); 968 969 return err; 970 } 971 972 void xe_device_remove(struct xe_device *xe) 973 { 974 xe_display_unregister(xe); 975 976 xe_nvm_fini(xe); 977 978 drm_dev_unplug(&xe->drm); 979 980 xe_bo_pci_dev_remove_all(xe); 981 } 982 983 void xe_device_shutdown(struct xe_device *xe) 984 { 985 struct xe_gt *gt; 986 u8 id; 987 988 drm_dbg(&xe->drm, "Shutting down device\n"); 989 990 if (xe_driver_flr_disabled(xe)) { 991 xe_display_pm_shutdown(xe); 992 993 xe_irq_suspend(xe); 994 995 for_each_gt(gt, xe, id) 996 xe_gt_shutdown(gt); 997 998 xe_display_pm_shutdown_late(xe); 999 } else { 1000 /* BOOM! */ 1001 __xe_driver_flr(xe); 1002 } 1003 } 1004 1005 /** 1006 * xe_device_wmb() - Device specific write memory barrier 1007 * @xe: the &xe_device 1008 * 1009 * While wmb() is sufficient for a barrier if we use system memory, on discrete 1010 * platforms with device memory we additionally need to issue a register write. 1011 * Since it doesn't matter which register we write to, use the read-only VF_CAP 1012 * register that is also marked as accessible by the VFs. 1013 */ 1014 void xe_device_wmb(struct xe_device *xe) 1015 { 1016 wmb(); 1017 if (IS_DGFX(xe)) 1018 xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); 1019 } 1020 1021 /* 1022 * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt. 1023 */ 1024 static void tdf_request_sync(struct xe_device *xe) 1025 { 1026 unsigned int fw_ref; 1027 struct xe_gt *gt; 1028 u8 id; 1029 1030 for_each_gt(gt, xe, id) { 1031 if (xe_gt_is_media_type(gt)) 1032 continue; 1033 1034 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1035 if (!fw_ref) 1036 return; 1037 1038 xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); 1039 1040 /* 1041 * FIXME: We can likely do better here with our choice of 1042 * timeout. Currently we just assume the worst case, i.e. 150us, 1043 * which is believed to be sufficient to cover the worst case 1044 * scenario on current platforms if all cache entries are 1045 * transient and need to be flushed.. 1046 */ 1047 if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, 1048 150, NULL, false)) 1049 xe_gt_err_once(gt, "TD flush timeout\n"); 1050 1051 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1052 } 1053 } 1054 1055 void xe_device_l2_flush(struct xe_device *xe) 1056 { 1057 struct xe_gt *gt; 1058 unsigned int fw_ref; 1059 1060 gt = xe_root_mmio_gt(xe); 1061 1062 if (!XE_GT_WA(gt, 16023588340)) 1063 return; 1064 1065 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1066 if (!fw_ref) 1067 return; 1068 1069 spin_lock(>->global_invl_lock); 1070 1071 xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); 1072 if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) 1073 xe_gt_err_once(gt, "Global invalidation timeout\n"); 1074 1075 spin_unlock(>->global_invl_lock); 1076 1077 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1078 } 1079 1080 /** 1081 * xe_device_td_flush() - Flush transient L3 cache entries 1082 * @xe: The device 1083 * 1084 * Display engine has direct access to memory and is never coherent with L3/L4 1085 * caches (or CPU caches), however KMD is responsible for specifically flushing 1086 * transient L3 GPU cache entries prior to the flip sequence to ensure scanout 1087 * can happen from such a surface without seeing corruption. 1088 * 1089 * Display surfaces can be tagged as transient by mapping it using one of the 1090 * various L3:XD PAT index modes on Xe2. 1091 * 1092 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed 1093 * at the end of each submission via PIPE_CONTROL for compute/render, since SA 1094 * Media is not coherent with L3 and we want to support render-vs-media 1095 * usescases. For other engines like copy/blt the HW internally forces uncached 1096 * behaviour, hence why we can skip the TDF on such platforms. 1097 */ 1098 void xe_device_td_flush(struct xe_device *xe) 1099 { 1100 struct xe_gt *root_gt; 1101 1102 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) 1103 return; 1104 1105 root_gt = xe_root_mmio_gt(xe); 1106 if (XE_GT_WA(root_gt, 16023588340)) { 1107 /* A transient flush is not sufficient: flush the L2 */ 1108 xe_device_l2_flush(xe); 1109 } else { 1110 xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc); 1111 tdf_request_sync(xe); 1112 xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc); 1113 } 1114 } 1115 1116 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) 1117 { 1118 return xe_device_has_flat_ccs(xe) ? 1119 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; 1120 } 1121 1122 /** 1123 * xe_device_assert_mem_access - Inspect the current runtime_pm state. 1124 * @xe: xe device instance 1125 * 1126 * To be used before any kind of memory access. It will splat a debug warning 1127 * if the device is currently sleeping. But it doesn't guarantee in any way 1128 * that the device is going to remain awake. Xe PM runtime get and put 1129 * functions might be added to the outer bound of the memory access, while 1130 * this check is intended for inner usage to splat some warning if the worst 1131 * case has just happened. 1132 */ 1133 void xe_device_assert_mem_access(struct xe_device *xe) 1134 { 1135 xe_assert(xe, !xe_pm_runtime_suspended(xe)); 1136 } 1137 1138 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) 1139 { 1140 struct xe_gt *gt; 1141 u8 id; 1142 1143 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); 1144 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); 1145 1146 for_each_gt(gt, xe, id) { 1147 drm_printf(p, "GT id: %u\n", id); 1148 drm_printf(p, "\tTile: %u\n", gt->tile->id); 1149 drm_printf(p, "\tType: %s\n", 1150 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); 1151 drm_printf(p, "\tIP ver: %u.%u.%u\n", 1152 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), 1153 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), 1154 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); 1155 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); 1156 } 1157 } 1158 1159 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) 1160 { 1161 return sign_extend64(address, xe->info.va_bits - 1); 1162 } 1163 1164 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) 1165 { 1166 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); 1167 } 1168 1169 static void xe_device_wedged_fini(struct drm_device *drm, void *arg) 1170 { 1171 struct xe_device *xe = arg; 1172 1173 xe_pm_runtime_put(xe); 1174 } 1175 1176 /** 1177 * DOC: Xe Device Wedging 1178 * 1179 * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst. 1180 * When device is in wedged state, every IOCTL will be blocked and GT cannot be 1181 * used. Certain critical errors like gt reset failure, firmware failures can cause 1182 * the device to be wedged. The default recovery method for a wedged state 1183 * is rebind/bus-reset. 1184 * 1185 * Another recovery method is vendor-specific. Below are the cases that send 1186 * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent. 1187 * 1188 * Case: Firmware Flash 1189 * -------------------- 1190 * 1191 * Identification Hint 1192 * +++++++++++++++++++ 1193 * 1194 * ``WEDGED=vendor-specific`` drm device wedged uevent with 1195 * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify 1196 * admin/userspace consumer about the need for a firmware flash. 1197 * 1198 * Recovery Procedure 1199 * ++++++++++++++++++ 1200 * 1201 * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow 1202 * the below steps 1203 * 1204 * - Check Runtime Survivability mode sysfs. 1205 * If enabled, firmware flash is required to recover the device. 1206 * 1207 * /sys/bus/pci/devices/<device>/survivability_mode 1208 * 1209 * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash 1210 * firmware and restore device to normal operation. 1211 */ 1212 1213 /** 1214 * xe_device_set_wedged_method - Set wedged recovery method 1215 * @xe: xe device instance 1216 * @method: recovery method to set 1217 * 1218 * Set wedged recovery method to be sent in drm wedged uevent. 1219 */ 1220 void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method) 1221 { 1222 xe->wedged.method = method; 1223 } 1224 1225 /** 1226 * xe_device_declare_wedged - Declare device wedged 1227 * @xe: xe device instance 1228 * 1229 * This is a final state that can only be cleared with the recovery method 1230 * specified in the drm wedged uevent. The method can be set using 1231 * xe_device_set_wedged_method before declaring the device as wedged. If no method 1232 * is set, reprobe (unbind/re-bind) will be sent by default. 1233 * 1234 * In this state every IOCTL will be blocked so the GT cannot be used. 1235 * In general it will be called upon any critical error such as gt reset 1236 * failure or guc loading failure. Userspace will be notified of this state 1237 * through device wedged uevent. 1238 * If xe.wedged module parameter is set to 2, this function will be called 1239 * on every single execution timeout (a.k.a. GPU hang) right after devcoredump 1240 * snapshot capture. In this mode, GT reset won't be attempted so the state of 1241 * the issue is preserved for further debugging. 1242 */ 1243 void xe_device_declare_wedged(struct xe_device *xe) 1244 { 1245 struct xe_gt *gt; 1246 u8 id; 1247 1248 if (xe->wedged.mode == 0) { 1249 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); 1250 return; 1251 } 1252 1253 xe_pm_runtime_get_noresume(xe); 1254 1255 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { 1256 drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); 1257 return; 1258 } 1259 1260 if (!atomic_xchg(&xe->wedged.flag, 1)) { 1261 xe->needs_flr_on_fini = true; 1262 drm_err(&xe->drm, 1263 "CRITICAL: Xe has declared device %s as wedged.\n" 1264 "IOCTLs and executions are blocked. Only a rebind may clear the failure\n" 1265 "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n", 1266 dev_name(xe->drm.dev)); 1267 } 1268 1269 for_each_gt(gt, xe, id) 1270 xe_gt_declare_wedged(gt); 1271 1272 if (xe_device_wedged(xe)) { 1273 /* If no wedge recovery method is set, use default */ 1274 if (!xe->wedged.method) 1275 xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND | 1276 DRM_WEDGE_RECOVERY_BUS_RESET); 1277 1278 /* Notify userspace of wedged device */ 1279 drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL); 1280 } 1281 } 1282