1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_device.h" 7 8 #include <linux/aperture.h> 9 #include <linux/delay.h> 10 #include <linux/fault-inject.h> 11 #include <linux/units.h> 12 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_client.h> 15 #include <drm/drm_gem_ttm_helper.h> 16 #include <drm/drm_ioctl.h> 17 #include <drm/drm_managed.h> 18 #include <drm/drm_print.h> 19 #include <uapi/drm/xe_drm.h> 20 21 #include "display/xe_display.h" 22 #include "instructions/xe_gpu_commands.h" 23 #include "regs/xe_gt_regs.h" 24 #include "regs/xe_regs.h" 25 #include "xe_bo.h" 26 #include "xe_bo_evict.h" 27 #include "xe_debugfs.h" 28 #include "xe_devcoredump.h" 29 #include "xe_device_sysfs.h" 30 #include "xe_dma_buf.h" 31 #include "xe_drm_client.h" 32 #include "xe_drv.h" 33 #include "xe_exec.h" 34 #include "xe_exec_queue.h" 35 #include "xe_force_wake.h" 36 #include "xe_ggtt.h" 37 #include "xe_gsc_proxy.h" 38 #include "xe_gt.h" 39 #include "xe_gt_mcr.h" 40 #include "xe_gt_printk.h" 41 #include "xe_gt_sriov_vf.h" 42 #include "xe_guc.h" 43 #include "xe_guc_pc.h" 44 #include "xe_hw_engine_group.h" 45 #include "xe_hwmon.h" 46 #include "xe_i2c.h" 47 #include "xe_irq.h" 48 #include "xe_mmio.h" 49 #include "xe_module.h" 50 #include "xe_nvm.h" 51 #include "xe_oa.h" 52 #include "xe_observation.h" 53 #include "xe_pat.h" 54 #include "xe_pcode.h" 55 #include "xe_pm.h" 56 #include "xe_pmu.h" 57 #include "xe_psmi.h" 58 #include "xe_pxp.h" 59 #include "xe_query.h" 60 #include "xe_shrinker.h" 61 #include "xe_survivability_mode.h" 62 #include "xe_sriov.h" 63 #include "xe_tile.h" 64 #include "xe_ttm_stolen_mgr.h" 65 #include "xe_ttm_sys_mgr.h" 66 #include "xe_vm.h" 67 #include "xe_vm_madvise.h" 68 #include "xe_vram.h" 69 #include "xe_vram_types.h" 70 #include "xe_vsec.h" 71 #include "xe_wait_user_fence.h" 72 #include "xe_wa.h" 73 74 #include <generated/xe_device_wa_oob.h> 75 #include <generated/xe_wa_oob.h> 76 77 static int xe_file_open(struct drm_device *dev, struct drm_file *file) 78 { 79 struct xe_device *xe = to_xe_device(dev); 80 struct xe_drm_client *client; 81 struct xe_file *xef; 82 int ret = -ENOMEM; 83 struct task_struct *task = NULL; 84 85 xef = kzalloc(sizeof(*xef), GFP_KERNEL); 86 if (!xef) 87 return ret; 88 89 client = xe_drm_client_alloc(); 90 if (!client) { 91 kfree(xef); 92 return ret; 93 } 94 95 xef->drm = file; 96 xef->client = client; 97 xef->xe = xe; 98 99 mutex_init(&xef->vm.lock); 100 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); 101 102 mutex_init(&xef->exec_queue.lock); 103 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); 104 105 file->driver_priv = xef; 106 kref_init(&xef->refcount); 107 108 task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); 109 if (task) { 110 xef->process_name = kstrdup(task->comm, GFP_KERNEL); 111 xef->pid = task->pid; 112 put_task_struct(task); 113 } 114 115 return 0; 116 } 117 118 static void xe_file_destroy(struct kref *ref) 119 { 120 struct xe_file *xef = container_of(ref, struct xe_file, refcount); 121 122 xa_destroy(&xef->exec_queue.xa); 123 mutex_destroy(&xef->exec_queue.lock); 124 xa_destroy(&xef->vm.xa); 125 mutex_destroy(&xef->vm.lock); 126 127 xe_drm_client_put(xef->client); 128 kfree(xef->process_name); 129 kfree(xef); 130 } 131 132 /** 133 * xe_file_get() - Take a reference to the xe file object 134 * @xef: Pointer to the xe file 135 * 136 * Anyone with a pointer to xef must take a reference to the xe file 137 * object using this call. 138 * 139 * Return: xe file pointer 140 */ 141 struct xe_file *xe_file_get(struct xe_file *xef) 142 { 143 kref_get(&xef->refcount); 144 return xef; 145 } 146 147 /** 148 * xe_file_put() - Drop a reference to the xe file object 149 * @xef: Pointer to the xe file 150 * 151 * Used to drop reference to the xef object 152 */ 153 void xe_file_put(struct xe_file *xef) 154 { 155 kref_put(&xef->refcount, xe_file_destroy); 156 } 157 158 static void xe_file_close(struct drm_device *dev, struct drm_file *file) 159 { 160 struct xe_device *xe = to_xe_device(dev); 161 struct xe_file *xef = file->driver_priv; 162 struct xe_vm *vm; 163 struct xe_exec_queue *q; 164 unsigned long idx; 165 166 xe_pm_runtime_get(xe); 167 168 /* 169 * No need for exec_queue.lock here as there is no contention for it 170 * when FD is closing as IOCTLs presumably can't be modifying the 171 * xarray. Taking exec_queue.lock here causes undue dependency on 172 * vm->lock taken during xe_exec_queue_kill(). 173 */ 174 xa_for_each(&xef->exec_queue.xa, idx, q) { 175 if (q->vm && q->hwe->hw_engine_group) 176 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); 177 xe_exec_queue_kill(q); 178 xe_exec_queue_put(q); 179 } 180 xa_for_each(&xef->vm.xa, idx, vm) 181 xe_vm_close_and_put(vm); 182 183 xe_file_put(xef); 184 185 xe_pm_runtime_put(xe); 186 } 187 188 static const struct drm_ioctl_desc xe_ioctls[] = { 189 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW), 190 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW), 191 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl, 192 DRM_RENDER_ALLOW), 193 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW), 194 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW), 195 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW), 196 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW), 197 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl, 198 DRM_RENDER_ALLOW), 199 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl, 200 DRM_RENDER_ALLOW), 201 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl, 202 DRM_RENDER_ALLOW), 203 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, 204 DRM_RENDER_ALLOW), 205 DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW), 206 DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW), 207 DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl, 208 DRM_RENDER_ALLOW), 209 }; 210 211 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 212 { 213 struct drm_file *file_priv = file->private_data; 214 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 215 long ret; 216 217 if (xe_device_wedged(xe)) 218 return -ECANCELED; 219 220 ret = xe_pm_runtime_get_ioctl(xe); 221 if (ret >= 0) 222 ret = drm_ioctl(file, cmd, arg); 223 xe_pm_runtime_put(xe); 224 225 return ret; 226 } 227 228 #ifdef CONFIG_COMPAT 229 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 230 { 231 struct drm_file *file_priv = file->private_data; 232 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 233 long ret; 234 235 if (xe_device_wedged(xe)) 236 return -ECANCELED; 237 238 ret = xe_pm_runtime_get_ioctl(xe); 239 if (ret >= 0) 240 ret = drm_compat_ioctl(file, cmd, arg); 241 xe_pm_runtime_put(xe); 242 243 return ret; 244 } 245 #else 246 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */ 247 #define xe_drm_compat_ioctl NULL 248 #endif 249 250 static void barrier_open(struct vm_area_struct *vma) 251 { 252 drm_dev_get(vma->vm_private_data); 253 } 254 255 static void barrier_close(struct vm_area_struct *vma) 256 { 257 drm_dev_put(vma->vm_private_data); 258 } 259 260 static void barrier_release_dummy_page(struct drm_device *dev, void *res) 261 { 262 struct page *dummy_page = (struct page *)res; 263 264 __free_page(dummy_page); 265 } 266 267 static vm_fault_t barrier_fault(struct vm_fault *vmf) 268 { 269 struct drm_device *dev = vmf->vma->vm_private_data; 270 struct vm_area_struct *vma = vmf->vma; 271 vm_fault_t ret = VM_FAULT_NOPAGE; 272 pgprot_t prot; 273 int idx; 274 275 prot = vm_get_page_prot(vma->vm_flags); 276 277 if (drm_dev_enter(dev, &idx)) { 278 unsigned long pfn; 279 280 #define LAST_DB_PAGE_OFFSET 0x7ff001 281 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) + 282 LAST_DB_PAGE_OFFSET); 283 ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn, 284 pgprot_noncached(prot)); 285 drm_dev_exit(idx); 286 } else { 287 struct page *page; 288 289 /* Allocate new dummy page to map all the VA range in this VMA to it*/ 290 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 291 if (!page) 292 return VM_FAULT_OOM; 293 294 /* Set the page to be freed using drmm release action */ 295 if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page)) 296 return VM_FAULT_OOM; 297 298 ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page), 299 prot); 300 } 301 302 return ret; 303 } 304 305 static const struct vm_operations_struct vm_ops_barrier = { 306 .open = barrier_open, 307 .close = barrier_close, 308 .fault = barrier_fault, 309 }; 310 311 static int xe_pci_barrier_mmap(struct file *filp, 312 struct vm_area_struct *vma) 313 { 314 struct drm_file *priv = filp->private_data; 315 struct drm_device *dev = priv->minor->dev; 316 struct xe_device *xe = to_xe_device(dev); 317 318 if (!IS_DGFX(xe)) 319 return -EINVAL; 320 321 if (vma->vm_end - vma->vm_start > SZ_4K) 322 return -EINVAL; 323 324 if (is_cow_mapping(vma->vm_flags)) 325 return -EINVAL; 326 327 if (vma->vm_flags & (VM_READ | VM_EXEC)) 328 return -EINVAL; 329 330 vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC); 331 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); 332 vma->vm_ops = &vm_ops_barrier; 333 vma->vm_private_data = dev; 334 drm_dev_get(vma->vm_private_data); 335 336 return 0; 337 } 338 339 static int xe_mmap(struct file *filp, struct vm_area_struct *vma) 340 { 341 struct drm_file *priv = filp->private_data; 342 struct drm_device *dev = priv->minor->dev; 343 344 if (drm_dev_is_unplugged(dev)) 345 return -ENODEV; 346 347 switch (vma->vm_pgoff) { 348 case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT: 349 return xe_pci_barrier_mmap(filp, vma); 350 } 351 352 return drm_gem_mmap(filp, vma); 353 } 354 355 static const struct file_operations xe_driver_fops = { 356 .owner = THIS_MODULE, 357 .open = drm_open, 358 .release = drm_release_noglobal, 359 .unlocked_ioctl = xe_drm_ioctl, 360 .mmap = xe_mmap, 361 .poll = drm_poll, 362 .read = drm_read, 363 .compat_ioctl = xe_drm_compat_ioctl, 364 .llseek = noop_llseek, 365 #ifdef CONFIG_PROC_FS 366 .show_fdinfo = drm_show_fdinfo, 367 #endif 368 .fop_flags = FOP_UNSIGNED_OFFSET, 369 }; 370 371 static struct drm_driver driver = { 372 /* Don't use MTRRs here; the Xserver or userspace app should 373 * deal with them for Intel hardware. 374 */ 375 .driver_features = 376 DRIVER_GEM | 377 DRIVER_RENDER | DRIVER_SYNCOBJ | 378 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, 379 .open = xe_file_open, 380 .postclose = xe_file_close, 381 382 .gem_prime_import = xe_gem_prime_import, 383 384 .dumb_create = xe_bo_dumb_create, 385 .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 386 #ifdef CONFIG_PROC_FS 387 .show_fdinfo = xe_drm_client_fdinfo, 388 #endif 389 .ioctls = xe_ioctls, 390 .num_ioctls = ARRAY_SIZE(xe_ioctls), 391 .fops = &xe_driver_fops, 392 .name = DRIVER_NAME, 393 .desc = DRIVER_DESC, 394 .major = DRIVER_MAJOR, 395 .minor = DRIVER_MINOR, 396 .patchlevel = DRIVER_PATCHLEVEL, 397 }; 398 399 static void xe_device_destroy(struct drm_device *dev, void *dummy) 400 { 401 struct xe_device *xe = to_xe_device(dev); 402 403 xe_bo_dev_fini(&xe->bo_device); 404 405 if (xe->preempt_fence_wq) 406 destroy_workqueue(xe->preempt_fence_wq); 407 408 if (xe->ordered_wq) 409 destroy_workqueue(xe->ordered_wq); 410 411 if (xe->unordered_wq) 412 destroy_workqueue(xe->unordered_wq); 413 414 if (xe->destroy_wq) 415 destroy_workqueue(xe->destroy_wq); 416 417 ttm_device_fini(&xe->ttm); 418 } 419 420 struct xe_device *xe_device_create(struct pci_dev *pdev, 421 const struct pci_device_id *ent) 422 { 423 struct xe_device *xe; 424 int err; 425 426 xe_display_driver_set_hooks(&driver); 427 428 err = aperture_remove_conflicting_pci_devices(pdev, driver.name); 429 if (err) 430 return ERR_PTR(err); 431 432 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); 433 if (IS_ERR(xe)) 434 return xe; 435 436 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, 437 xe->drm.anon_inode->i_mapping, 438 xe->drm.vma_offset_manager, false, false); 439 if (WARN_ON(err)) 440 goto err; 441 442 xe_bo_dev_init(&xe->bo_device); 443 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); 444 if (err) 445 goto err; 446 447 err = xe_shrinker_create(xe); 448 if (err) 449 goto err; 450 451 xe->info.devid = pdev->device; 452 xe->info.revid = pdev->revision; 453 xe->info.force_execlist = xe_modparam.force_execlist; 454 xe->atomic_svm_timeslice_ms = 5; 455 456 err = xe_irq_init(xe); 457 if (err) 458 goto err; 459 460 xe_validation_device_init(&xe->val); 461 462 init_waitqueue_head(&xe->ufence_wq); 463 464 init_rwsem(&xe->usm.lock); 465 466 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); 467 468 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 469 /* Trigger a large asid and an early asid wrap. */ 470 u32 asid; 471 472 BUILD_BUG_ON(XE_MAX_ASID < 2); 473 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, 474 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), 475 &xe->usm.next_asid, GFP_KERNEL); 476 drm_WARN_ON(&xe->drm, err); 477 if (err >= 0) 478 xa_erase(&xe->usm.asid_to_vm, asid); 479 } 480 481 err = xe_bo_pinned_init(xe); 482 if (err) 483 goto err; 484 485 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 486 WQ_MEM_RECLAIM); 487 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); 488 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); 489 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); 490 if (!xe->ordered_wq || !xe->unordered_wq || 491 !xe->preempt_fence_wq || !xe->destroy_wq) { 492 /* 493 * Cleanup done in xe_device_destroy via 494 * drmm_add_action_or_reset register above 495 */ 496 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); 497 err = -ENOMEM; 498 goto err; 499 } 500 501 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); 502 if (err) 503 goto err; 504 505 return xe; 506 507 err: 508 return ERR_PTR(err); 509 } 510 ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */ 511 512 static bool xe_driver_flr_disabled(struct xe_device *xe) 513 { 514 if (IS_SRIOV_VF(xe)) 515 return true; 516 517 if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { 518 drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); 519 return true; 520 } 521 522 return false; 523 } 524 525 /* 526 * The driver-initiated FLR is the highest level of reset that we can trigger 527 * from within the driver. It is different from the PCI FLR in that it doesn't 528 * fully reset the SGUnit and doesn't modify the PCI config space and therefore 529 * it doesn't require a re-enumeration of the PCI BARs. However, the 530 * driver-initiated FLR does still cause a reset of both GT and display and a 531 * memory wipe of local and stolen memory, so recovery would require a full HW 532 * re-init and saving/restoring (or re-populating) the wiped memory. Since we 533 * perform the FLR as the very last action before releasing access to the HW 534 * during the driver release flow, we don't attempt recovery at all, because 535 * if/when a new instance of Xe is bound to the device it will do a full 536 * re-init anyway. 537 */ 538 static void __xe_driver_flr(struct xe_device *xe) 539 { 540 const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */ 541 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 542 int ret; 543 544 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); 545 546 /* 547 * Make sure any pending FLR requests have cleared by waiting for the 548 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS 549 * to make sure it's not still set from a prior attempt (it's a write to 550 * clear bit). 551 * Note that we should never be in a situation where a previous attempt 552 * is still pending (unless the HW is totally dead), but better to be 553 * safe in case something unexpected happens 554 */ 555 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 556 if (ret) { 557 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); 558 return; 559 } 560 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); 561 562 /* Trigger the actual Driver-FLR */ 563 xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); 564 565 /* Wait for hardware teardown to complete */ 566 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 567 if (ret) { 568 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); 569 return; 570 } 571 572 /* Wait for hardware/firmware re-init to complete */ 573 ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, 574 flr_timeout, NULL, false); 575 if (ret) { 576 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); 577 return; 578 } 579 580 /* Clear sticky completion status */ 581 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); 582 } 583 584 static void xe_driver_flr(struct xe_device *xe) 585 { 586 if (xe_driver_flr_disabled(xe)) 587 return; 588 589 __xe_driver_flr(xe); 590 } 591 592 static void xe_driver_flr_fini(void *arg) 593 { 594 struct xe_device *xe = arg; 595 596 if (xe->needs_flr_on_fini) 597 xe_driver_flr(xe); 598 } 599 600 static void xe_device_sanitize(void *arg) 601 { 602 struct xe_device *xe = arg; 603 struct xe_gt *gt; 604 u8 id; 605 606 for_each_gt(gt, xe, id) 607 xe_gt_sanitize(gt); 608 } 609 610 static int xe_set_dma_info(struct xe_device *xe) 611 { 612 unsigned int mask_size = xe->info.dma_mask_size; 613 int err; 614 615 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); 616 617 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 618 if (err) 619 goto mask_err; 620 621 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 622 if (err) 623 goto mask_err; 624 625 return 0; 626 627 mask_err: 628 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); 629 return err; 630 } 631 632 static bool verify_lmem_ready(struct xe_device *xe) 633 { 634 u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; 635 636 return !!val; 637 } 638 639 static int wait_for_lmem_ready(struct xe_device *xe) 640 { 641 unsigned long timeout, start; 642 643 if (!IS_DGFX(xe)) 644 return 0; 645 646 if (IS_SRIOV_VF(xe)) 647 return 0; 648 649 if (verify_lmem_ready(xe)) 650 return 0; 651 652 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); 653 654 start = jiffies; 655 timeout = start + secs_to_jiffies(60); /* 60 sec! */ 656 657 do { 658 if (signal_pending(current)) 659 return -EINTR; 660 661 /* 662 * The boot firmware initializes local memory and 663 * assesses its health. If memory training fails, 664 * the punit will have been instructed to keep the GT powered 665 * down.we won't be able to communicate with it 666 * 667 * If the status check is done before punit updates the register, 668 * it can lead to the system being unusable. 669 * use a timeout and defer the probe to prevent this. 670 */ 671 if (time_after(jiffies, timeout)) { 672 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); 673 return -EPROBE_DEFER; 674 } 675 676 msleep(20); 677 678 } while (!verify_lmem_ready(xe)); 679 680 drm_dbg(&xe->drm, "lmem ready after %ums", 681 jiffies_to_msecs(jiffies - start)); 682 683 return 0; 684 } 685 ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */ 686 687 static void sriov_update_device_info(struct xe_device *xe) 688 { 689 /* disable features that are not available/applicable to VFs */ 690 if (IS_SRIOV_VF(xe)) { 691 xe->info.probe_display = 0; 692 xe->info.has_heci_cscfi = 0; 693 xe->info.has_heci_gscfi = 0; 694 xe->info.skip_guc_pc = 1; 695 xe->info.skip_pcode = 1; 696 } 697 } 698 699 static int xe_device_vram_alloc(struct xe_device *xe) 700 { 701 struct xe_vram_region *vram; 702 703 if (!IS_DGFX(xe)) 704 return 0; 705 706 vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL); 707 if (!vram) 708 return -ENOMEM; 709 710 xe->mem.vram = vram; 711 return 0; 712 } 713 714 /** 715 * xe_device_probe_early: Device early probe 716 * @xe: xe device instance 717 * 718 * Initialize MMIO resources that don't require any 719 * knowledge about tile count. Also initialize pcode and 720 * check vram initialization on root tile. 721 * 722 * Return: 0 on success, error code on failure 723 */ 724 int xe_device_probe_early(struct xe_device *xe) 725 { 726 int err; 727 728 xe_wa_device_init(xe); 729 xe_wa_process_device_oob(xe); 730 731 err = xe_mmio_probe_early(xe); 732 if (err) 733 return err; 734 735 xe_sriov_probe_early(xe); 736 737 sriov_update_device_info(xe); 738 739 err = xe_pcode_probe_early(xe); 740 if (err || xe_survivability_mode_is_requested(xe)) { 741 int save_err = err; 742 743 /* 744 * Try to leave device in survivability mode if device is 745 * possible, but still return the previous error for error 746 * propagation 747 */ 748 err = xe_survivability_mode_boot_enable(xe); 749 if (err) 750 return err; 751 752 return save_err; 753 } 754 755 err = wait_for_lmem_ready(xe); 756 if (err) 757 return err; 758 759 xe->wedged.mode = xe_modparam.wedged_mode; 760 761 err = xe_device_vram_alloc(xe); 762 if (err) 763 return err; 764 765 return 0; 766 } 767 ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */ 768 769 static int probe_has_flat_ccs(struct xe_device *xe) 770 { 771 struct xe_gt *gt; 772 unsigned int fw_ref; 773 u32 reg; 774 775 /* Always enabled/disabled, no runtime check to do */ 776 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) 777 return 0; 778 779 gt = xe_root_mmio_gt(xe); 780 781 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 782 if (!fw_ref) 783 return -ETIMEDOUT; 784 785 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); 786 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); 787 788 if (!xe->info.has_flat_ccs) 789 drm_dbg(&xe->drm, 790 "Flat CCS has been disabled in bios, May lead to performance impact"); 791 792 xe_force_wake_put(gt_to_fw(gt), fw_ref); 793 794 return 0; 795 } 796 797 int xe_device_probe(struct xe_device *xe) 798 { 799 struct xe_tile *tile; 800 struct xe_gt *gt; 801 int err; 802 u8 id; 803 804 xe_pat_init_early(xe); 805 806 err = xe_sriov_init(xe); 807 if (err) 808 return err; 809 810 xe->info.mem_region_mask = 1; 811 812 err = xe_set_dma_info(xe); 813 if (err) 814 return err; 815 816 err = xe_mmio_probe_tiles(xe); 817 if (err) 818 return err; 819 820 for_each_gt(gt, xe, id) { 821 err = xe_gt_init_early(gt); 822 if (err) 823 return err; 824 } 825 826 for_each_tile(tile, xe, id) { 827 err = xe_ggtt_init_early(tile->mem.ggtt); 828 if (err) 829 return err; 830 } 831 832 /* 833 * From here on, if a step fails, make sure a Driver-FLR is triggereed 834 */ 835 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); 836 if (err) 837 return err; 838 839 err = probe_has_flat_ccs(xe); 840 if (err) 841 return err; 842 843 err = xe_vram_probe(xe); 844 if (err) 845 return err; 846 847 for_each_tile(tile, xe, id) { 848 err = xe_tile_init_noalloc(tile); 849 if (err) 850 return err; 851 } 852 853 /* 854 * Allow allocations only now to ensure xe_display_init_early() 855 * is the first to allocate, always. 856 */ 857 err = xe_ttm_sys_mgr_init(xe); 858 if (err) 859 return err; 860 861 /* Allocate and map stolen after potential VRAM resize */ 862 err = xe_ttm_stolen_mgr_init(xe); 863 if (err) 864 return err; 865 866 /* 867 * Now that GT is initialized (TTM in particular), 868 * we can try to init display, and inherit the initial fb. 869 * This is the reason the first allocation needs to be done 870 * inside display. 871 */ 872 err = xe_display_init_early(xe); 873 if (err) 874 return err; 875 876 for_each_tile(tile, xe, id) { 877 err = xe_tile_init(tile); 878 if (err) 879 return err; 880 } 881 882 err = xe_irq_install(xe); 883 if (err) 884 return err; 885 886 for_each_gt(gt, xe, id) { 887 err = xe_gt_init(gt); 888 if (err) 889 return err; 890 } 891 892 if (xe->tiles->media_gt && 893 XE_GT_WA(xe->tiles->media_gt, 15015404425_disable)) 894 XE_DEVICE_WA_DISABLE(xe, 15015404425); 895 896 err = xe_devcoredump_init(xe); 897 if (err) 898 return err; 899 900 xe_nvm_init(xe); 901 902 err = xe_heci_gsc_init(xe); 903 if (err) 904 return err; 905 906 err = xe_oa_init(xe); 907 if (err) 908 return err; 909 910 err = xe_display_init(xe); 911 if (err) 912 return err; 913 914 err = xe_pxp_init(xe); 915 if (err) 916 return err; 917 918 err = xe_psmi_init(xe); 919 if (err) 920 return err; 921 922 err = drm_dev_register(&xe->drm, 0); 923 if (err) 924 return err; 925 926 xe_display_register(xe); 927 928 err = xe_oa_register(xe); 929 if (err) 930 goto err_unregister_display; 931 932 err = xe_pmu_register(&xe->pmu); 933 if (err) 934 goto err_unregister_display; 935 936 err = xe_device_sysfs_init(xe); 937 if (err) 938 goto err_unregister_display; 939 940 xe_debugfs_register(xe); 941 942 err = xe_hwmon_register(xe); 943 if (err) 944 goto err_unregister_display; 945 946 err = xe_i2c_probe(xe); 947 if (err) 948 goto err_unregister_display; 949 950 for_each_gt(gt, xe, id) 951 xe_gt_sanitize_freq(gt); 952 953 xe_vsec_init(xe); 954 955 err = xe_sriov_init_late(xe); 956 if (err) 957 goto err_unregister_display; 958 959 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); 960 961 err_unregister_display: 962 xe_display_unregister(xe); 963 964 return err; 965 } 966 967 void xe_device_remove(struct xe_device *xe) 968 { 969 xe_display_unregister(xe); 970 971 xe_nvm_fini(xe); 972 973 drm_dev_unplug(&xe->drm); 974 975 xe_bo_pci_dev_remove_all(xe); 976 } 977 978 void xe_device_shutdown(struct xe_device *xe) 979 { 980 struct xe_gt *gt; 981 u8 id; 982 983 drm_dbg(&xe->drm, "Shutting down device\n"); 984 985 if (xe_driver_flr_disabled(xe)) { 986 xe_display_pm_shutdown(xe); 987 988 xe_irq_suspend(xe); 989 990 for_each_gt(gt, xe, id) 991 xe_gt_shutdown(gt); 992 993 xe_display_pm_shutdown_late(xe); 994 } else { 995 /* BOOM! */ 996 __xe_driver_flr(xe); 997 } 998 } 999 1000 /** 1001 * xe_device_wmb() - Device specific write memory barrier 1002 * @xe: the &xe_device 1003 * 1004 * While wmb() is sufficient for a barrier if we use system memory, on discrete 1005 * platforms with device memory we additionally need to issue a register write. 1006 * Since it doesn't matter which register we write to, use the read-only VF_CAP 1007 * register that is also marked as accessible by the VFs. 1008 */ 1009 void xe_device_wmb(struct xe_device *xe) 1010 { 1011 wmb(); 1012 if (IS_DGFX(xe)) 1013 xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); 1014 } 1015 1016 /* 1017 * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt. 1018 */ 1019 static void tdf_request_sync(struct xe_device *xe) 1020 { 1021 unsigned int fw_ref; 1022 struct xe_gt *gt; 1023 u8 id; 1024 1025 for_each_gt(gt, xe, id) { 1026 if (xe_gt_is_media_type(gt)) 1027 continue; 1028 1029 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1030 if (!fw_ref) 1031 return; 1032 1033 xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); 1034 1035 /* 1036 * FIXME: We can likely do better here with our choice of 1037 * timeout. Currently we just assume the worst case, i.e. 150us, 1038 * which is believed to be sufficient to cover the worst case 1039 * scenario on current platforms if all cache entries are 1040 * transient and need to be flushed.. 1041 */ 1042 if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, 1043 150, NULL, false)) 1044 xe_gt_err_once(gt, "TD flush timeout\n"); 1045 1046 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1047 } 1048 } 1049 1050 void xe_device_l2_flush(struct xe_device *xe) 1051 { 1052 struct xe_gt *gt; 1053 unsigned int fw_ref; 1054 1055 gt = xe_root_mmio_gt(xe); 1056 1057 if (!XE_GT_WA(gt, 16023588340)) 1058 return; 1059 1060 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1061 if (!fw_ref) 1062 return; 1063 1064 spin_lock(>->global_invl_lock); 1065 1066 xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); 1067 if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) 1068 xe_gt_err_once(gt, "Global invalidation timeout\n"); 1069 1070 spin_unlock(>->global_invl_lock); 1071 1072 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1073 } 1074 1075 /** 1076 * xe_device_td_flush() - Flush transient L3 cache entries 1077 * @xe: The device 1078 * 1079 * Display engine has direct access to memory and is never coherent with L3/L4 1080 * caches (or CPU caches), however KMD is responsible for specifically flushing 1081 * transient L3 GPU cache entries prior to the flip sequence to ensure scanout 1082 * can happen from such a surface without seeing corruption. 1083 * 1084 * Display surfaces can be tagged as transient by mapping it using one of the 1085 * various L3:XD PAT index modes on Xe2. 1086 * 1087 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed 1088 * at the end of each submission via PIPE_CONTROL for compute/render, since SA 1089 * Media is not coherent with L3 and we want to support render-vs-media 1090 * usescases. For other engines like copy/blt the HW internally forces uncached 1091 * behaviour, hence why we can skip the TDF on such platforms. 1092 */ 1093 void xe_device_td_flush(struct xe_device *xe) 1094 { 1095 struct xe_gt *root_gt; 1096 1097 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) 1098 return; 1099 1100 root_gt = xe_root_mmio_gt(xe); 1101 if (XE_GT_WA(root_gt, 16023588340)) { 1102 /* A transient flush is not sufficient: flush the L2 */ 1103 xe_device_l2_flush(xe); 1104 } else { 1105 xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc); 1106 tdf_request_sync(xe); 1107 xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc); 1108 } 1109 } 1110 1111 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) 1112 { 1113 return xe_device_has_flat_ccs(xe) ? 1114 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; 1115 } 1116 1117 /** 1118 * xe_device_assert_mem_access - Inspect the current runtime_pm state. 1119 * @xe: xe device instance 1120 * 1121 * To be used before any kind of memory access. It will splat a debug warning 1122 * if the device is currently sleeping. But it doesn't guarantee in any way 1123 * that the device is going to remain awake. Xe PM runtime get and put 1124 * functions might be added to the outer bound of the memory access, while 1125 * this check is intended for inner usage to splat some warning if the worst 1126 * case has just happened. 1127 */ 1128 void xe_device_assert_mem_access(struct xe_device *xe) 1129 { 1130 xe_assert(xe, !xe_pm_runtime_suspended(xe)); 1131 } 1132 1133 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) 1134 { 1135 struct xe_gt *gt; 1136 u8 id; 1137 1138 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); 1139 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); 1140 1141 for_each_gt(gt, xe, id) { 1142 drm_printf(p, "GT id: %u\n", id); 1143 drm_printf(p, "\tTile: %u\n", gt->tile->id); 1144 drm_printf(p, "\tType: %s\n", 1145 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); 1146 drm_printf(p, "\tIP ver: %u.%u.%u\n", 1147 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), 1148 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), 1149 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); 1150 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); 1151 } 1152 } 1153 1154 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) 1155 { 1156 return sign_extend64(address, xe->info.va_bits - 1); 1157 } 1158 1159 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) 1160 { 1161 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); 1162 } 1163 1164 static void xe_device_wedged_fini(struct drm_device *drm, void *arg) 1165 { 1166 struct xe_device *xe = arg; 1167 1168 xe_pm_runtime_put(xe); 1169 } 1170 1171 /** 1172 * DOC: Xe Device Wedging 1173 * 1174 * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst. 1175 * When device is in wedged state, every IOCTL will be blocked and GT cannot be 1176 * used. Certain critical errors like gt reset failure, firmware failures can cause 1177 * the device to be wedged. The default recovery method for a wedged state 1178 * is rebind/bus-reset. 1179 * 1180 * Another recovery method is vendor-specific. Below are the cases that send 1181 * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent. 1182 * 1183 * Case: Firmware Flash 1184 * -------------------- 1185 * 1186 * Identification Hint 1187 * +++++++++++++++++++ 1188 * 1189 * ``WEDGED=vendor-specific`` drm device wedged uevent with 1190 * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify 1191 * admin/userspace consumer about the need for a firmware flash. 1192 * 1193 * Recovery Procedure 1194 * ++++++++++++++++++ 1195 * 1196 * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow 1197 * the below steps 1198 * 1199 * - Check Runtime Survivability mode sysfs. 1200 * If enabled, firmware flash is required to recover the device. 1201 * 1202 * /sys/bus/pci/devices/<device>/survivability_mode 1203 * 1204 * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash 1205 * firmware and restore device to normal operation. 1206 */ 1207 1208 /** 1209 * xe_device_set_wedged_method - Set wedged recovery method 1210 * @xe: xe device instance 1211 * @method: recovery method to set 1212 * 1213 * Set wedged recovery method to be sent in drm wedged uevent. 1214 */ 1215 void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method) 1216 { 1217 xe->wedged.method = method; 1218 } 1219 1220 /** 1221 * xe_device_declare_wedged - Declare device wedged 1222 * @xe: xe device instance 1223 * 1224 * This is a final state that can only be cleared with the recovery method 1225 * specified in the drm wedged uevent. The method can be set using 1226 * xe_device_set_wedged_method before declaring the device as wedged. If no method 1227 * is set, reprobe (unbind/re-bind) will be sent by default. 1228 * 1229 * In this state every IOCTL will be blocked so the GT cannot be used. 1230 * In general it will be called upon any critical error such as gt reset 1231 * failure or guc loading failure. Userspace will be notified of this state 1232 * through device wedged uevent. 1233 * If xe.wedged module parameter is set to 2, this function will be called 1234 * on every single execution timeout (a.k.a. GPU hang) right after devcoredump 1235 * snapshot capture. In this mode, GT reset won't be attempted so the state of 1236 * the issue is preserved for further debugging. 1237 */ 1238 void xe_device_declare_wedged(struct xe_device *xe) 1239 { 1240 struct xe_gt *gt; 1241 u8 id; 1242 1243 if (xe->wedged.mode == 0) { 1244 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); 1245 return; 1246 } 1247 1248 xe_pm_runtime_get_noresume(xe); 1249 1250 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { 1251 drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); 1252 return; 1253 } 1254 1255 if (!atomic_xchg(&xe->wedged.flag, 1)) { 1256 xe->needs_flr_on_fini = true; 1257 drm_err(&xe->drm, 1258 "CRITICAL: Xe has declared device %s as wedged.\n" 1259 "IOCTLs and executions are blocked. Only a rebind may clear the failure\n" 1260 "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n", 1261 dev_name(xe->drm.dev)); 1262 } 1263 1264 for_each_gt(gt, xe, id) 1265 xe_gt_declare_wedged(gt); 1266 1267 if (xe_device_wedged(xe)) { 1268 /* If no wedge recovery method is set, use default */ 1269 if (!xe->wedged.method) 1270 xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND | 1271 DRM_WEDGE_RECOVERY_BUS_RESET); 1272 1273 /* Notify userspace of wedged device */ 1274 drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL); 1275 } 1276 } 1277