Lines Matching +full:0 +full:xe

80 	struct xe_device *xe = to_xe_device(dev);  in xe_file_open()  local
98 xef->xe = xe; in xe_file_open()
116 return 0; in xe_file_open()
134 * xe_file_get() - Take a reference to the xe file object
135 * @xef: Pointer to the xe file
137 * Anyone with a pointer to xef must take a reference to the xe file
140 * Return: xe file pointer
149 * xe_file_put() - Drop a reference to the xe file object
150 * @xef: Pointer to the xe file
161 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local
167 xe_pm_runtime_get(xe); in xe_file_close()
186 xe_pm_runtime_put(xe); in xe_file_close()
215 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl() local
218 if (xe_device_wedged(xe)) in xe_drm_ioctl()
221 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_ioctl()
222 if (ret >= 0) in xe_drm_ioctl()
224 xe_pm_runtime_put(xe); in xe_drm_ioctl()
233 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl() local
236 if (xe_device_wedged(xe)) in xe_drm_compat_ioctl()
239 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_compat_ioctl()
240 if (ret >= 0) in xe_drm_compat_ioctl()
242 xe_pm_runtime_put(xe); in xe_drm_compat_ioctl()
281 #define LAST_DB_PAGE_OFFSET 0x7ff001 in barrier_fault()
282 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) + in barrier_fault()
317 struct xe_device *xe = to_xe_device(dev); in xe_pci_barrier_mmap() local
319 if (!IS_DGFX(xe)) in xe_pci_barrier_mmap()
337 return 0; in xe_pci_barrier_mmap()
402 struct xe_device *xe = to_xe_device(dev); in xe_device_destroy() local
404 xe_bo_dev_fini(&xe->bo_device); in xe_device_destroy()
406 if (xe->preempt_fence_wq) in xe_device_destroy()
407 destroy_workqueue(xe->preempt_fence_wq); in xe_device_destroy()
409 if (xe->ordered_wq) in xe_device_destroy()
410 destroy_workqueue(xe->ordered_wq); in xe_device_destroy()
412 if (xe->unordered_wq) in xe_device_destroy()
413 destroy_workqueue(xe->unordered_wq); in xe_device_destroy()
415 if (xe->destroy_wq) in xe_device_destroy()
416 destroy_workqueue(xe->destroy_wq); in xe_device_destroy()
418 ttm_device_fini(&xe->ttm); in xe_device_destroy()
424 struct xe_device *xe; in xe_device_create() local
433 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); in xe_device_create()
434 if (IS_ERR(xe)) in xe_device_create()
435 return xe; in xe_device_create()
437 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, in xe_device_create()
438 xe->drm.anon_inode->i_mapping, in xe_device_create()
439 xe->drm.vma_offset_manager, false, false); in xe_device_create()
443 xe_bo_dev_init(&xe->bo_device); in xe_device_create()
444 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); in xe_device_create()
448 err = xe_shrinker_create(xe); in xe_device_create()
452 xe->info.devid = pdev->device; in xe_device_create()
453 xe->info.revid = pdev->revision; in xe_device_create()
454 xe->info.force_execlist = xe_modparam.force_execlist; in xe_device_create()
455 xe->atomic_svm_timeslice_ms = 5; in xe_device_create()
457 err = xe_irq_init(xe); in xe_device_create()
461 xe_validation_device_init(&xe->val); in xe_device_create()
463 init_waitqueue_head(&xe->ufence_wq); in xe_device_create()
465 init_rwsem(&xe->usm.lock); in xe_device_create()
467 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); in xe_device_create()
474 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, in xe_device_create()
476 &xe->usm.next_asid, GFP_KERNEL); in xe_device_create()
477 drm_WARN_ON(&xe->drm, err); in xe_device_create()
478 if (err >= 0) in xe_device_create()
479 xa_erase(&xe->usm.asid_to_vm, asid); in xe_device_create()
482 err = xe_bo_pinned_init(xe); in xe_device_create()
486 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", in xe_device_create()
488 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); in xe_device_create()
489 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); in xe_device_create()
490 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); in xe_device_create()
491 if (!xe->ordered_wq || !xe->unordered_wq || in xe_device_create()
492 !xe->preempt_fence_wq || !xe->destroy_wq) { in xe_device_create()
497 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); in xe_device_create()
502 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); in xe_device_create()
506 return xe; in xe_device_create()
513 static bool xe_driver_flr_disabled(struct xe_device *xe) in xe_driver_flr_disabled() argument
515 if (IS_SRIOV_VF(xe)) in xe_driver_flr_disabled()
518 if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { in xe_driver_flr_disabled()
519 drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); in xe_driver_flr_disabled()
536 * if/when a new instance of Xe is bound to the device it will do a full
539 static void __xe_driver_flr(struct xe_device *xe) in __xe_driver_flr() argument
542 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in __xe_driver_flr()
545 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); in __xe_driver_flr()
556 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); in __xe_driver_flr()
558 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); in __xe_driver_flr()
564 xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); in __xe_driver_flr()
567 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); in __xe_driver_flr()
569 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); in __xe_driver_flr()
577 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); in __xe_driver_flr()
585 static void xe_driver_flr(struct xe_device *xe) in xe_driver_flr() argument
587 if (xe_driver_flr_disabled(xe)) in xe_driver_flr()
590 __xe_driver_flr(xe); in xe_driver_flr()
595 struct xe_device *xe = arg; in xe_driver_flr_fini() local
597 if (xe->needs_flr_on_fini) in xe_driver_flr_fini()
598 xe_driver_flr(xe); in xe_driver_flr_fini()
603 struct xe_device *xe = arg; in xe_device_sanitize() local
607 for_each_gt(gt, xe, id) in xe_device_sanitize()
611 static int xe_set_dma_info(struct xe_device *xe) in xe_set_dma_info() argument
613 unsigned int mask_size = xe->info.dma_mask_size; in xe_set_dma_info()
616 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); in xe_set_dma_info()
618 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
622 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
626 return 0; in xe_set_dma_info()
629 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); in xe_set_dma_info()
633 static bool verify_lmem_ready(struct xe_device *xe) in verify_lmem_ready() argument
635 u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; in verify_lmem_ready()
640 static int wait_for_lmem_ready(struct xe_device *xe) in wait_for_lmem_ready() argument
644 if (!IS_DGFX(xe)) in wait_for_lmem_ready()
645 return 0; in wait_for_lmem_ready()
647 if (IS_SRIOV_VF(xe)) in wait_for_lmem_ready()
648 return 0; in wait_for_lmem_ready()
650 if (verify_lmem_ready(xe)) in wait_for_lmem_ready()
651 return 0; in wait_for_lmem_ready()
653 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); in wait_for_lmem_ready()
673 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); in wait_for_lmem_ready()
679 } while (!verify_lmem_ready(xe)); in wait_for_lmem_ready()
681 drm_dbg(&xe->drm, "lmem ready after %ums", in wait_for_lmem_ready()
684 return 0; in wait_for_lmem_ready()
688 static void vf_update_device_info(struct xe_device *xe) in vf_update_device_info() argument
690 xe_assert(xe, IS_SRIOV_VF(xe)); in vf_update_device_info()
692 xe->info.probe_display = 0; in vf_update_device_info()
693 xe->info.has_heci_cscfi = 0; in vf_update_device_info()
694 xe->info.has_heci_gscfi = 0; in vf_update_device_info()
695 xe->info.has_late_bind = 0; in vf_update_device_info()
696 xe->info.skip_guc_pc = 1; in vf_update_device_info()
697 xe->info.skip_pcode = 1; in vf_update_device_info()
700 static int xe_device_vram_alloc(struct xe_device *xe) in xe_device_vram_alloc() argument
704 if (!IS_DGFX(xe)) in xe_device_vram_alloc()
705 return 0; in xe_device_vram_alloc()
707 vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL); in xe_device_vram_alloc()
711 xe->mem.vram = vram; in xe_device_vram_alloc()
712 return 0; in xe_device_vram_alloc()
717 * @xe: xe device instance
723 * Return: 0 on success, error code on failure
725 int xe_device_probe_early(struct xe_device *xe) in xe_device_probe_early() argument
729 xe_wa_device_init(xe); in xe_device_probe_early()
730 xe_wa_process_device_oob(xe); in xe_device_probe_early()
732 err = xe_mmio_probe_early(xe); in xe_device_probe_early()
736 xe_sriov_probe_early(xe); in xe_device_probe_early()
738 if (IS_SRIOV_VF(xe)) in xe_device_probe_early()
739 vf_update_device_info(xe); in xe_device_probe_early()
741 err = xe_pcode_probe_early(xe); in xe_device_probe_early()
742 if (err || xe_survivability_mode_is_requested(xe)) { in xe_device_probe_early()
750 err = xe_survivability_mode_boot_enable(xe); in xe_device_probe_early()
757 err = wait_for_lmem_ready(xe); in xe_device_probe_early()
761 xe->wedged.mode = xe_modparam.wedged_mode; in xe_device_probe_early()
763 err = xe_device_vram_alloc(xe); in xe_device_probe_early()
767 return 0; in xe_device_probe_early()
771 static int probe_has_flat_ccs(struct xe_device *xe) in probe_has_flat_ccs() argument
778 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) in probe_has_flat_ccs()
779 return 0; in probe_has_flat_ccs()
781 gt = xe_root_mmio_gt(xe); in probe_has_flat_ccs()
788 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); in probe_has_flat_ccs()
790 if (!xe->info.has_flat_ccs) in probe_has_flat_ccs()
791 drm_dbg(&xe->drm, in probe_has_flat_ccs()
796 return 0; in probe_has_flat_ccs()
799 int xe_device_probe(struct xe_device *xe) in xe_device_probe() argument
806 xe_pat_init_early(xe); in xe_device_probe()
808 err = xe_sriov_init(xe); in xe_device_probe()
812 xe->info.mem_region_mask = 1; in xe_device_probe()
814 err = xe_set_dma_info(xe); in xe_device_probe()
818 err = xe_mmio_probe_tiles(xe); in xe_device_probe()
822 for_each_gt(gt, xe, id) { in xe_device_probe()
828 for_each_tile(tile, xe, id) { in xe_device_probe()
837 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); in xe_device_probe()
841 err = probe_has_flat_ccs(xe); in xe_device_probe()
845 err = xe_vram_probe(xe); in xe_device_probe()
849 for_each_tile(tile, xe, id) { in xe_device_probe()
859 err = xe_ttm_sys_mgr_init(xe); in xe_device_probe()
864 err = xe_ttm_stolen_mgr_init(xe); in xe_device_probe()
874 err = xe_display_init_early(xe); in xe_device_probe()
878 for_each_tile(tile, xe, id) { in xe_device_probe()
884 err = xe_irq_install(xe); in xe_device_probe()
888 for_each_gt(gt, xe, id) { in xe_device_probe()
894 if (xe->tiles->media_gt && in xe_device_probe()
895 XE_GT_WA(xe->tiles->media_gt, 15015404425_disable)) in xe_device_probe()
896 XE_DEVICE_WA_DISABLE(xe, 15015404425); in xe_device_probe()
898 err = xe_devcoredump_init(xe); in xe_device_probe()
902 xe_nvm_init(xe); in xe_device_probe()
904 err = xe_heci_gsc_init(xe); in xe_device_probe()
908 err = xe_late_bind_init(&xe->late_bind); in xe_device_probe()
912 err = xe_oa_init(xe); in xe_device_probe()
916 err = xe_display_init(xe); in xe_device_probe()
920 err = xe_pxp_init(xe); in xe_device_probe()
924 err = xe_psmi_init(xe); in xe_device_probe()
928 err = drm_dev_register(&xe->drm, 0); in xe_device_probe()
932 xe_display_register(xe); in xe_device_probe()
934 err = xe_oa_register(xe); in xe_device_probe()
938 err = xe_pmu_register(&xe->pmu); in xe_device_probe()
942 err = xe_device_sysfs_init(xe); in xe_device_probe()
946 xe_debugfs_register(xe); in xe_device_probe()
948 err = xe_hwmon_register(xe); in xe_device_probe()
952 err = xe_i2c_probe(xe); in xe_device_probe()
956 for_each_gt(gt, xe, id) in xe_device_probe()
959 xe_vsec_init(xe); in xe_device_probe()
961 err = xe_sriov_init_late(xe); in xe_device_probe()
965 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); in xe_device_probe()
968 xe_display_unregister(xe); in xe_device_probe()
973 void xe_device_remove(struct xe_device *xe) in xe_device_remove() argument
975 xe_display_unregister(xe); in xe_device_remove()
977 xe_nvm_fini(xe); in xe_device_remove()
979 drm_dev_unplug(&xe->drm); in xe_device_remove()
981 xe_bo_pci_dev_remove_all(xe); in xe_device_remove()
984 void xe_device_shutdown(struct xe_device *xe) in xe_device_shutdown() argument
989 drm_dbg(&xe->drm, "Shutting down device\n"); in xe_device_shutdown()
991 if (xe_driver_flr_disabled(xe)) { in xe_device_shutdown()
992 xe_display_pm_shutdown(xe); in xe_device_shutdown()
994 xe_irq_suspend(xe); in xe_device_shutdown()
996 for_each_gt(gt, xe, id) in xe_device_shutdown()
999 xe_display_pm_shutdown_late(xe); in xe_device_shutdown()
1002 __xe_driver_flr(xe); in xe_device_shutdown()
1008 * @xe: the &xe_device
1015 void xe_device_wmb(struct xe_device *xe) in xe_device_wmb() argument
1018 if (IS_DGFX(xe)) in xe_device_wmb()
1019 xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); in xe_device_wmb()
1025 static void tdf_request_sync(struct xe_device *xe) in tdf_request_sync() argument
1031 for_each_gt(gt, xe, id) { in tdf_request_sync()
1048 if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, in tdf_request_sync()
1056 void xe_device_l2_flush(struct xe_device *xe) in xe_device_l2_flush() argument
1061 gt = xe_root_mmio_gt(xe); in xe_device_l2_flush()
1072 xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1); in xe_device_l2_flush()
1073 if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) in xe_device_l2_flush()
1083 * @xe: The device
1099 void xe_device_td_flush(struct xe_device *xe) in xe_device_td_flush() argument
1103 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) in xe_device_td_flush()
1106 root_gt = xe_root_mmio_gt(xe); in xe_device_td_flush()
1109 xe_device_l2_flush(xe); in xe_device_td_flush()
1112 tdf_request_sync(xe); in xe_device_td_flush()
1117 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) in xe_device_ccs_bytes() argument
1119 return xe_device_has_flat_ccs(xe) ? in xe_device_ccs_bytes()
1120 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; in xe_device_ccs_bytes()
1125 * @xe: xe device instance
1129 * that the device is going to remain awake. Xe PM runtime get and put
1134 void xe_device_assert_mem_access(struct xe_device *xe) in xe_device_assert_mem_access() argument
1136 xe_assert(xe, !xe_pm_runtime_suspended(xe)); in xe_device_assert_mem_access()
1139 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) in xe_device_snapshot_print() argument
1144 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); in xe_device_snapshot_print()
1145 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); in xe_device_snapshot_print()
1147 for_each_gt(gt, xe, id) { in xe_device_snapshot_print()
1160 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) in xe_device_canonicalize_addr() argument
1162 return sign_extend64(address, xe->info.va_bits - 1); in xe_device_canonicalize_addr()
1165 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) in xe_device_uncanonicalize_addr() argument
1167 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); in xe_device_uncanonicalize_addr()
1172 struct xe_device *xe = arg; in xe_device_wedged_fini() local
1174 xe_pm_runtime_put(xe); in xe_device_wedged_fini()
1178 * DOC: Xe Device Wedging
1180 * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
1196 * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
1216 * @xe: xe device instance
1221 void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method) in xe_device_set_wedged_method() argument
1223 xe->wedged.method = method; in xe_device_set_wedged_method()
1228 * @xe: xe device instance
1239 * If xe.wedged module parameter is set to 2, this function will be called
1244 void xe_device_declare_wedged(struct xe_device *xe) in xe_device_declare_wedged() argument
1249 if (xe->wedged.mode == 0) { in xe_device_declare_wedged()
1250 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); in xe_device_declare_wedged()
1254 xe_pm_runtime_get_noresume(xe); in xe_device_declare_wedged()
1256 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { in xe_device_declare_wedged()
1257 …drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n… in xe_device_declare_wedged()
1261 if (!atomic_xchg(&xe->wedged.flag, 1)) { in xe_device_declare_wedged()
1262 xe->needs_flr_on_fini = true; in xe_device_declare_wedged()
1263 drm_err(&xe->drm, in xe_device_declare_wedged()
1264 "CRITICAL: Xe has declared device %s as wedged.\n" in xe_device_declare_wedged()
1266 "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n", in xe_device_declare_wedged()
1267 dev_name(xe->drm.dev)); in xe_device_declare_wedged()
1270 for_each_gt(gt, xe, id) in xe_device_declare_wedged()
1273 if (xe_device_wedged(xe)) { in xe_device_declare_wedged()
1275 if (!xe->wedged.method) in xe_device_declare_wedged()
1276 xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND | in xe_device_declare_wedged()
1280 drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL); in xe_device_declare_wedged()