Lines Matching +full:tcs +full:- +full:wait
1 // SPDX-License-Identifier: MIT
10 #include <linux/fault-inject.h>
83 int ret = -ENOMEM; in xe_file_open()
96 xef->drm = file; in xe_file_open()
97 xef->client = client; in xe_file_open()
98 xef->xe = xe; in xe_file_open()
100 mutex_init(&xef->vm.lock); in xe_file_open()
101 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); in xe_file_open()
103 mutex_init(&xef->exec_queue.lock); in xe_file_open()
104 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); in xe_file_open()
106 file->driver_priv = xef; in xe_file_open()
107 kref_init(&xef->refcount); in xe_file_open()
109 task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); in xe_file_open()
111 xef->process_name = kstrdup(task->comm, GFP_KERNEL); in xe_file_open()
112 xef->pid = task->pid; in xe_file_open()
123 xa_destroy(&xef->exec_queue.xa); in xe_file_destroy()
124 mutex_destroy(&xef->exec_queue.lock); in xe_file_destroy()
125 xa_destroy(&xef->vm.xa); in xe_file_destroy()
126 mutex_destroy(&xef->vm.lock); in xe_file_destroy()
128 xe_drm_client_put(xef->client); in xe_file_destroy()
129 kfree(xef->process_name); in xe_file_destroy()
134 * xe_file_get() - Take a reference to the xe file object
144 kref_get(&xef->refcount); in xe_file_get()
149 * xe_file_put() - Drop a reference to the xe file object
156 kref_put(&xef->refcount, xe_file_destroy); in xe_file_put()
162 struct xe_file *xef = file->driver_priv; in xe_file_close()
173 * vm->lock taken during xe_exec_queue_kill(). in xe_file_close()
175 xa_for_each(&xef->exec_queue.xa, idx, q) { in xe_file_close()
176 if (q->vm && q->hwe->hw_engine_group) in xe_file_close()
177 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_file_close()
181 xa_for_each(&xef->vm.xa, idx, vm) in xe_file_close()
214 struct drm_file *file_priv = file->private_data; in xe_drm_ioctl()
215 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl()
219 return -ECANCELED; in xe_drm_ioctl()
232 struct drm_file *file_priv = file->private_data; in xe_drm_compat_ioctl()
233 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl()
237 return -ECANCELED; in xe_drm_compat_ioctl()
253 drm_dev_get(vma->vm_private_data); in barrier_open()
258 drm_dev_put(vma->vm_private_data); in barrier_close()
270 struct drm_device *dev = vmf->vma->vm_private_data; in barrier_fault()
271 struct vm_area_struct *vma = vmf->vma; in barrier_fault()
276 prot = vm_get_page_prot(vma->vm_flags); in barrier_fault()
282 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) + in barrier_fault()
284 ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn, in barrier_fault()
299 ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page), in barrier_fault()
315 struct drm_file *priv = filp->private_data; in xe_pci_barrier_mmap()
316 struct drm_device *dev = priv->minor->dev; in xe_pci_barrier_mmap()
320 return -EINVAL; in xe_pci_barrier_mmap()
322 if (vma->vm_end - vma->vm_start > SZ_4K) in xe_pci_barrier_mmap()
323 return -EINVAL; in xe_pci_barrier_mmap()
325 if (is_cow_mapping(vma->vm_flags)) in xe_pci_barrier_mmap()
326 return -EINVAL; in xe_pci_barrier_mmap()
328 if (vma->vm_flags & (VM_READ | VM_EXEC)) in xe_pci_barrier_mmap()
329 return -EINVAL; in xe_pci_barrier_mmap()
333 vma->vm_ops = &vm_ops_barrier; in xe_pci_barrier_mmap()
334 vma->vm_private_data = dev; in xe_pci_barrier_mmap()
335 drm_dev_get(vma->vm_private_data); in xe_pci_barrier_mmap()
342 struct drm_file *priv = filp->private_data; in xe_mmap()
343 struct drm_device *dev = priv->minor->dev; in xe_mmap()
346 return -ENODEV; in xe_mmap()
348 switch (vma->vm_pgoff) { in xe_mmap()
404 xe_bo_dev_fini(&xe->bo_device); in xe_device_destroy()
406 if (xe->preempt_fence_wq) in xe_device_destroy()
407 destroy_workqueue(xe->preempt_fence_wq); in xe_device_destroy()
409 if (xe->ordered_wq) in xe_device_destroy()
410 destroy_workqueue(xe->ordered_wq); in xe_device_destroy()
412 if (xe->unordered_wq) in xe_device_destroy()
413 destroy_workqueue(xe->unordered_wq); in xe_device_destroy()
415 if (xe->destroy_wq) in xe_device_destroy()
416 destroy_workqueue(xe->destroy_wq); in xe_device_destroy()
418 ttm_device_fini(&xe->ttm); in xe_device_destroy()
433 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); in xe_device_create()
437 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, in xe_device_create()
438 xe->drm.anon_inode->i_mapping, in xe_device_create()
439 xe->drm.vma_offset_manager, false, false); in xe_device_create()
443 xe_bo_dev_init(&xe->bo_device); in xe_device_create()
444 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); in xe_device_create()
452 xe->info.devid = pdev->device; in xe_device_create()
453 xe->info.revid = pdev->revision; in xe_device_create()
454 xe->info.force_execlist = xe_modparam.force_execlist; in xe_device_create()
455 xe->atomic_svm_timeslice_ms = 5; in xe_device_create()
461 xe_validation_device_init(&xe->val); in xe_device_create()
463 init_waitqueue_head(&xe->ufence_wq); in xe_device_create()
465 init_rwsem(&xe->usm.lock); in xe_device_create()
467 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); in xe_device_create()
474 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, in xe_device_create()
475 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), in xe_device_create()
476 &xe->usm.next_asid, GFP_KERNEL); in xe_device_create()
477 drm_WARN_ON(&xe->drm, err); in xe_device_create()
479 xa_erase(&xe->usm.asid_to_vm, asid); in xe_device_create()
486 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", in xe_device_create()
488 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); in xe_device_create()
489 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); in xe_device_create()
490 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); in xe_device_create()
491 if (!xe->ordered_wq || !xe->unordered_wq || in xe_device_create()
492 !xe->preempt_fence_wq || !xe->destroy_wq) { in xe_device_create()
497 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); in xe_device_create()
498 err = -ENOMEM; in xe_device_create()
502 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); in xe_device_create()
519 drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); in xe_driver_flr_disabled()
527 * The driver-initiated FLR is the highest level of reset that we can trigger
530 * it doesn't require a re-enumeration of the PCI BARs. However, the
531 * driver-initiated FLR does still cause a reset of both GT and display and a
533 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
537 * re-init anyway.
541 const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */ in __xe_driver_flr()
545 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); in __xe_driver_flr()
558 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); in __xe_driver_flr()
563 /* Trigger the actual Driver-FLR */ in __xe_driver_flr()
566 /* Wait for hardware teardown to complete */ in __xe_driver_flr()
569 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); in __xe_driver_flr()
573 /* Wait for hardware/firmware re-init to complete */ in __xe_driver_flr()
577 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); in __xe_driver_flr()
597 if (xe->needs_flr_on_fini) in xe_driver_flr_fini()
613 unsigned int mask_size = xe->info.dma_mask_size; in xe_set_dma_info()
616 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); in xe_set_dma_info()
618 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
622 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
629 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); in xe_set_dma_info()
653 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); in wait_for_lmem_ready()
660 return -EINTR; in wait_for_lmem_ready()
673 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); in wait_for_lmem_ready()
674 return -EPROBE_DEFER; in wait_for_lmem_ready()
681 drm_dbg(&xe->drm, "lmem ready after %ums", in wait_for_lmem_ready()
682 jiffies_to_msecs(jiffies - start)); in wait_for_lmem_ready()
692 xe->info.probe_display = 0; in vf_update_device_info()
693 xe->info.has_heci_cscfi = 0; in vf_update_device_info()
694 xe->info.has_heci_gscfi = 0; in vf_update_device_info()
695 xe->info.has_late_bind = 0; in vf_update_device_info()
696 xe->info.skip_guc_pc = 1; in vf_update_device_info()
697 xe->info.skip_pcode = 1; in vf_update_device_info()
707 vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL); in xe_device_vram_alloc()
709 return -ENOMEM; in xe_device_vram_alloc()
711 xe->mem.vram = vram; in xe_device_vram_alloc()
761 xe->wedged.mode = xe_modparam.wedged_mode; in xe_device_probe_early()
778 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) in probe_has_flat_ccs()
785 return -ETIMEDOUT; in probe_has_flat_ccs()
788 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); in probe_has_flat_ccs()
790 if (!xe->info.has_flat_ccs) in probe_has_flat_ccs()
791 drm_dbg(&xe->drm, in probe_has_flat_ccs()
812 xe->info.mem_region_mask = 1; in xe_device_probe()
829 err = xe_ggtt_init_early(tile->mem.ggtt); in xe_device_probe()
835 * From here on, if a step fails, make sure a Driver-FLR is triggereed in xe_device_probe()
837 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); in xe_device_probe()
894 if (xe->tiles->media_gt && in xe_device_probe()
895 XE_GT_WA(xe->tiles->media_gt, 15015404425_disable)) in xe_device_probe()
908 err = xe_late_bind_init(&xe->late_bind); in xe_device_probe()
928 err = drm_dev_register(&xe->drm, 0); in xe_device_probe()
938 err = xe_pmu_register(&xe->pmu); in xe_device_probe()
965 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); in xe_device_probe()
979 drm_dev_unplug(&xe->drm); in xe_device_remove()
989 drm_dbg(&xe->drm, "Shutting down device\n"); in xe_device_shutdown()
1007 * xe_device_wmb() - Device specific write memory barrier
1012 * Since it doesn't matter which register we write to, use the read-only VF_CAP
1023 * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt.
1039 xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); in tdf_request_sync()
1048 if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, in tdf_request_sync()
1070 spin_lock(>->global_invl_lock); in xe_device_l2_flush()
1072 xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); in xe_device_l2_flush()
1073 if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true)) in xe_device_l2_flush()
1076 spin_unlock(>->global_invl_lock); in xe_device_l2_flush()
1082 * xe_device_td_flush() - Flush transient L3 cache entries
1093 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
1095 * Media is not coherent with L3 and we want to support render-vs-media
1111 xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc); in xe_device_td_flush()
1113 xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc); in xe_device_td_flush()
1124 * xe_device_assert_mem_access - Inspect the current runtime_pm state.
1144 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); in xe_device_snapshot_print()
1145 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); in xe_device_snapshot_print()
1149 drm_printf(p, "\tTile: %u\n", gt->tile->id); in xe_device_snapshot_print()
1151 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); in xe_device_snapshot_print()
1153 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), in xe_device_snapshot_print()
1154 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), in xe_device_snapshot_print()
1155 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); in xe_device_snapshot_print()
1156 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); in xe_device_snapshot_print()
1162 return sign_extend64(address, xe->info.va_bits - 1); in xe_device_canonicalize_addr()
1167 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); in xe_device_uncanonicalize_addr()
1180 * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
1184 * is rebind/bus-reset.
1186 * Another recovery method is vendor-specific. Below are the cases that send
1187 * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
1190 * --------------------
1195 * ``WEDGED=vendor-specific`` drm device wedged uevent with
1196 * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
1202 * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
1205 * - Check Runtime Survivability mode sysfs.
1210 * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash
1215 * xe_device_set_wedged_method - Set wedged recovery method
1223 xe->wedged.method = method; in xe_device_set_wedged_method()
1227 * xe_device_declare_wedged - Declare device wedged
1233 * is set, reprobe (unbind/re-bind) will be sent by default.
1249 if (xe->wedged.mode == 0) { in xe_device_declare_wedged()
1250 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); in xe_device_declare_wedged()
1256 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { in xe_device_declare_wedged()
1257 …drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n… in xe_device_declare_wedged()
1261 if (!atomic_xchg(&xe->wedged.flag, 1)) { in xe_device_declare_wedged()
1262 xe->needs_flr_on_fini = true; in xe_device_declare_wedged()
1263 drm_err(&xe->drm, in xe_device_declare_wedged()
1267 dev_name(xe->drm.dev)); in xe_device_declare_wedged()
1275 if (!xe->wedged.method) in xe_device_declare_wedged()
1280 drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL); in xe_device_declare_wedged()