Lines Matching +full:p +full:- +full:tile

1 // SPDX-License-Identifier: MIT
10 #include <linux/fault-inject.h>
85 int ret = -ENOMEM; in xe_file_open()
98 xef->drm = file; in xe_file_open()
99 xef->client = client; in xe_file_open()
100 xef->xe = xe; in xe_file_open()
102 mutex_init(&xef->vm.lock); in xe_file_open()
103 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); in xe_file_open()
105 mutex_init(&xef->exec_queue.lock); in xe_file_open()
106 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); in xe_file_open()
108 file->driver_priv = xef; in xe_file_open()
109 kref_init(&xef->refcount); in xe_file_open()
111 task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); in xe_file_open()
113 xef->process_name = kstrdup(task->comm, GFP_KERNEL); in xe_file_open()
114 xef->pid = task->pid; in xe_file_open()
125 xa_destroy(&xef->exec_queue.xa); in xe_file_destroy()
126 mutex_destroy(&xef->exec_queue.lock); in xe_file_destroy()
127 xa_destroy(&xef->vm.xa); in xe_file_destroy()
128 mutex_destroy(&xef->vm.lock); in xe_file_destroy()
130 xe_drm_client_put(xef->client); in xe_file_destroy()
131 kfree(xef->process_name); in xe_file_destroy()
136 * xe_file_get() - Take a reference to the xe file object
146 kref_get(&xef->refcount); in xe_file_get()
151 * xe_file_put() - Drop a reference to the xe file object
158 kref_put(&xef->refcount, xe_file_destroy); in xe_file_put()
164 struct xe_file *xef = file->driver_priv; in xe_file_close()
175 * vm->lock taken during xe_exec_queue_kill(). in xe_file_close()
177 xa_for_each(&xef->exec_queue.xa, idx, q) { in xe_file_close()
178 if (q->vm && q->hwe->hw_engine_group) in xe_file_close()
179 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_file_close()
183 xa_for_each(&xef->vm.xa, idx, vm) in xe_file_close()
216 struct drm_file *file_priv = file->private_data; in xe_drm_ioctl()
217 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl()
221 return -ECANCELED; in xe_drm_ioctl()
234 struct drm_file *file_priv = file->private_data; in xe_drm_compat_ioctl()
235 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl()
239 return -ECANCELED; in xe_drm_compat_ioctl()
255 drm_dev_get(vma->vm_private_data); in barrier_open()
260 drm_dev_put(vma->vm_private_data); in barrier_close()
272 struct drm_device *dev = vmf->vma->vm_private_data; in barrier_fault()
273 struct vm_area_struct *vma = vmf->vma; in barrier_fault()
278 prot = vm_get_page_prot(vma->vm_flags); in barrier_fault()
284 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) + in barrier_fault()
286 ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn, in barrier_fault()
301 ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page), in barrier_fault()
317 struct drm_file *priv = filp->private_data; in xe_pci_barrier_mmap()
318 struct drm_device *dev = priv->minor->dev; in xe_pci_barrier_mmap()
322 return -EINVAL; in xe_pci_barrier_mmap()
324 if (vma->vm_end - vma->vm_start > SZ_4K) in xe_pci_barrier_mmap()
325 return -EINVAL; in xe_pci_barrier_mmap()
327 if (is_cow_mapping(vma->vm_flags)) in xe_pci_barrier_mmap()
328 return -EINVAL; in xe_pci_barrier_mmap()
330 if (vma->vm_flags & (VM_READ | VM_EXEC)) in xe_pci_barrier_mmap()
331 return -EINVAL; in xe_pci_barrier_mmap()
335 vma->vm_ops = &vm_ops_barrier; in xe_pci_barrier_mmap()
336 vma->vm_private_data = dev; in xe_pci_barrier_mmap()
337 drm_dev_get(vma->vm_private_data); in xe_pci_barrier_mmap()
344 struct drm_file *priv = filp->private_data; in xe_mmap()
345 struct drm_device *dev = priv->minor->dev; in xe_mmap()
348 return -ENODEV; in xe_mmap()
350 switch (vma->vm_pgoff) { in xe_mmap()
406 xe_bo_dev_fini(&xe->bo_device); in xe_device_destroy()
408 if (xe->preempt_fence_wq) in xe_device_destroy()
409 destroy_workqueue(xe->preempt_fence_wq); in xe_device_destroy()
411 if (xe->ordered_wq) in xe_device_destroy()
412 destroy_workqueue(xe->ordered_wq); in xe_device_destroy()
414 if (xe->unordered_wq) in xe_device_destroy()
415 destroy_workqueue(xe->unordered_wq); in xe_device_destroy()
417 if (xe->destroy_wq) in xe_device_destroy()
418 destroy_workqueue(xe->destroy_wq); in xe_device_destroy()
420 ttm_device_fini(&xe->ttm); in xe_device_destroy()
435 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); in xe_device_create()
439 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, in xe_device_create()
440 xe->drm.anon_inode->i_mapping, in xe_device_create()
441 xe->drm.vma_offset_manager, 0); in xe_device_create()
445 xe_bo_dev_init(&xe->bo_device); in xe_device_create()
446 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); in xe_device_create()
454 xe->info.devid = pdev->device; in xe_device_create()
455 xe->info.revid = pdev->revision; in xe_device_create()
456 xe->info.force_execlist = xe_modparam.force_execlist; in xe_device_create()
457 xe->atomic_svm_timeslice_ms = 5; in xe_device_create()
463 xe_validation_device_init(&xe->val); in xe_device_create()
465 init_waitqueue_head(&xe->ufence_wq); in xe_device_create()
467 init_rwsem(&xe->usm.lock); in xe_device_create()
469 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); in xe_device_create()
476 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, in xe_device_create()
477 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), in xe_device_create()
478 &xe->usm.next_asid, GFP_KERNEL); in xe_device_create()
479 drm_WARN_ON(&xe->drm, err); in xe_device_create()
481 xa_erase(&xe->usm.asid_to_vm, asid); in xe_device_create()
488 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", in xe_device_create()
490 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); in xe_device_create()
491 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); in xe_device_create()
492 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); in xe_device_create()
493 if (!xe->ordered_wq || !xe->unordered_wq || in xe_device_create()
494 !xe->preempt_fence_wq || !xe->destroy_wq) { in xe_device_create()
499 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); in xe_device_create()
500 err = -ENOMEM; in xe_device_create()
504 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); in xe_device_create()
521 drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); in xe_driver_flr_disabled()
529 * The driver-initiated FLR is the highest level of reset that we can trigger
532 * it doesn't require a re-enumeration of the PCI BARs. However, the
533 * driver-initiated FLR does still cause a reset of both GT and display and a
535 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
539 * re-init anyway.
547 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); in __xe_driver_flr()
560 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); in __xe_driver_flr()
565 /* Trigger the actual Driver-FLR */ in __xe_driver_flr()
571 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); in __xe_driver_flr()
575 /* Wait for hardware/firmware re-init to complete */ in __xe_driver_flr()
579 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); in __xe_driver_flr()
599 if (xe->needs_flr_on_fini) in xe_driver_flr_fini()
615 unsigned int mask_size = xe->info.dma_mask_size; in xe_set_dma_info()
618 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); in xe_set_dma_info()
620 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
624 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
631 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); in xe_set_dma_info()
641 return -EINTR; in lmem_initializing()
661 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); in wait_for_lmem_ready()
681 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); in wait_for_lmem_ready()
682 return -EPROBE_DEFER; in wait_for_lmem_ready()
685 drm_dbg(&xe->drm, "lmem ready after %ums", in wait_for_lmem_ready()
686 jiffies_to_msecs(jiffies - prev_jiffies)); in wait_for_lmem_ready()
696 xe->info.probe_display = 0; in vf_update_device_info()
697 xe->info.has_heci_cscfi = 0; in vf_update_device_info()
698 xe->info.has_heci_gscfi = 0; in vf_update_device_info()
699 xe->info.has_late_bind = 0; in vf_update_device_info()
700 xe->info.skip_guc_pc = 1; in vf_update_device_info()
701 xe->info.skip_pcode = 1; in vf_update_device_info()
711 vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL); in xe_device_vram_alloc()
713 return -ENOMEM; in xe_device_vram_alloc()
715 xe->mem.vram = vram; in xe_device_vram_alloc()
724 * knowledge about tile count. Also initialize pcode and
725 * check vram initialization on root tile.
765 xe->wedged.mode = xe_modparam.wedged_mode; in xe_device_probe_early()
782 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) in probe_has_flat_ccs()
791 return -ETIMEDOUT; in probe_has_flat_ccs()
794 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); in probe_has_flat_ccs()
796 if (!xe->info.has_flat_ccs) in probe_has_flat_ccs()
797 drm_dbg(&xe->drm, in probe_has_flat_ccs()
807 struct xe_tile *tile; in xe_device_probe() local
818 xe->info.mem_region_mask = 1; in xe_device_probe()
834 for_each_tile(tile, xe, id) { in xe_device_probe()
835 err = xe_ggtt_init_early(tile->mem.ggtt); in xe_device_probe()
841 * From here on, if a step fails, make sure a Driver-FLR is triggereed in xe_device_probe()
843 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); in xe_device_probe()
855 for_each_tile(tile, xe, id) { in xe_device_probe()
856 err = xe_tile_init_noalloc(tile); in xe_device_probe()
884 for_each_tile(tile, xe, id) { in xe_device_probe()
885 err = xe_tile_init(tile); in xe_device_probe()
904 if (xe->tiles->media_gt && in xe_device_probe()
905 XE_GT_WA(xe->tiles->media_gt, 15015404425_disable)) in xe_device_probe()
918 err = xe_late_bind_init(&xe->late_bind); in xe_device_probe()
938 err = drm_dev_register(&xe->drm, 0); in xe_device_probe()
948 err = xe_pmu_register(&xe->pmu); in xe_device_probe()
975 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); in xe_device_probe()
989 drm_dev_unplug(&xe->drm); in xe_device_remove()
999 drm_dbg(&xe->drm, "Shutting down device\n"); in xe_device_shutdown()
1017 * xe_device_wmb() - Device specific write memory barrier
1022 * Since it doesn't matter which register we write to, use the read-only VF_CAP
1049 xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); in tdf_request_sync()
1058 if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, in tdf_request_sync()
1082 spin_lock(&gt->global_invl_lock); in xe_device_l2_flush()
1084 xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1); in xe_device_l2_flush()
1085 if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true)) in xe_device_l2_flush()
1088 spin_unlock(&gt->global_invl_lock); in xe_device_l2_flush()
1094 * xe_device_td_flush() - Flush transient L3 cache entries
1105 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
1107 * Media is not coherent with L3 and we want to support render-vs-media
1126 xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc); in xe_device_td_flush()
1128 xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc); in xe_device_td_flush()
1139 * xe_device_assert_mem_access - Inspect the current runtime_pm state.
1154 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) in xe_device_snapshot_print() argument
1159 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); in xe_device_snapshot_print()
1160 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); in xe_device_snapshot_print()
1163 drm_printf(p, "GT id: %u\n", id); in xe_device_snapshot_print()
1164 drm_printf(p, "\tTile: %u\n", gt->tile->id); in xe_device_snapshot_print()
1165 drm_printf(p, "\tType: %s\n", in xe_device_snapshot_print()
1166 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); in xe_device_snapshot_print()
1167 drm_printf(p, "\tIP ver: %u.%u.%u\n", in xe_device_snapshot_print()
1168 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), in xe_device_snapshot_print()
1169 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), in xe_device_snapshot_print()
1170 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); in xe_device_snapshot_print()
1171 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); in xe_device_snapshot_print()
1177 return sign_extend64(address, xe->info.va_bits - 1); in xe_device_canonicalize_addr()
1182 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); in xe_device_uncanonicalize_addr()
1195 * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
1199 * is rebind/bus-reset.
1201 * Another recovery method is vendor-specific. Below are the cases that send
1202 * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
1205 * --------------------
1210 * ``WEDGED=vendor-specific`` drm device wedged uevent with
1211 * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
1217 * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
1220 * - Check Runtime Survivability mode sysfs.
1225 * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
1230 * xe_device_set_wedged_method - Set wedged recovery method
1238 xe->wedged.method = method; in xe_device_set_wedged_method()
1242 * xe_device_declare_wedged - Declare device wedged
1248 * is set, reprobe (unbind/re-bind) will be sent by default.
1264 if (xe->wedged.mode == 0) { in xe_device_declare_wedged()
1265 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); in xe_device_declare_wedged()
1271 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { in xe_device_declare_wedged()
1272 …drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n… in xe_device_declare_wedged()
1276 if (!atomic_xchg(&xe->wedged.flag, 1)) { in xe_device_declare_wedged()
1277 xe->needs_flr_on_fini = true; in xe_device_declare_wedged()
1278 drm_err(&xe->drm, in xe_device_declare_wedged()
1282 dev_name(xe->drm.dev)); in xe_device_declare_wedged()
1290 if (!xe->wedged.method) in xe_device_declare_wedged()
1295 drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL); in xe_device_declare_wedged()