Home
last modified time | relevance | path

Searched refs:xe (Results 1 – 25 of 224) sorted by relevance

123456789

/linux/drivers/gpu/drm/xe/
H A Dxe_pm.c131 int xe_pm_block_on_suspend(struct xe_device *xe) in xe_pm_block_on_suspend() argument
135 return wait_for_completion_interruptible(&xe->pm_block); in xe_pm_block_on_suspend()
145 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
147 return !xe->d3cold.capable; in xe_rpm_reclaim_safe()
150 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
152 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
157 static void xe_rpm_lockmap_release(const struct xe_device *xe) in xe_rpm_lockmap_release() argument
159 lock_map_release(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_release()
170 int xe_pm_suspend(struct xe_device *xe) in xe_pm_suspend() argument
176 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
[all …]
H A Dxe_bo_evict.c15 static int xe_bo_apply_to_pinned(struct xe_device *xe, in xe_bo_apply_to_pinned() argument
24 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
32 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
36 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
44 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
47 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
50 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
64 int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe) in xe_bo_notifier_prepare_all_pinned() argument
68 ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present, in xe_bo_notifier_prepare_all_pinned()
69 &xe->pinned.early.kernel_bo_present, in xe_bo_notifier_prepare_all_pinned()
[all …]
H A Dxe_sriov_pf_migration.c19 static struct xe_sriov_migration_state *pf_pick_migration(struct xe_device *xe, unsigned int vfid) in pf_pick_migration() argument
21 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_pick_migration()
22 xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); in pf_pick_migration()
24 return &xe->sriov.pf.vfs[vfid].migration; in pf_pick_migration()
34 wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid) in xe_sriov_pf_migration_waitqueue() argument
36 return &pf_pick_migration(xe, vfid)->wq; in xe_sriov_pf_migration_waitqueue()
45 bool xe_sriov_pf_migration_supported(struct xe_device *xe) in xe_sriov_pf_migration_supported() argument
47 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_migration_supported()
49 return IS_ENABLED(CONFIG_DRM_XE_DEBUG) || !xe->sriov.pf.migration.disabled; in xe_sriov_pf_migration_supported()
57 void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...) in xe_sriov_pf_migration_disable() argument
[all …]
H A Dxe_pci.c500 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) in find_subplatform() argument
507 if (*id == xe->info.devid) in find_subplatform()
518 static int read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) in read_gmdid() argument
520 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in read_gmdid()
524 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); in read_gmdid()
526 if (IS_SRIOV_VF(xe)) { in read_gmdid()
544 gt->tile = &xe->tiles[0]; in read_gmdid()
606 static int handle_gmdid(struct xe_device *xe, in handle_gmdid() argument
618 ret = read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); in handle_gmdid()
624 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", in handle_gmdid()
[all …]
H A Dxe_sriov_pf_service.c24 void xe_sriov_pf_service_init(struct xe_device *xe) in xe_sriov_pf_service_init() argument
29 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_service_init()
32 xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; in xe_sriov_pf_service_init()
33 xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; in xe_sriov_pf_service_init()
36 xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR; in xe_sriov_pf_service_init()
37 xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR; in xe_sriov_pf_service_init()
41 static int pf_negotiate_version(struct xe_device *xe, in pf_negotiate_version() argument
45 struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base; in pf_negotiate_version()
46 struct xe_sriov_pf_service_version latest = xe->sriov.pf.service.version.latest; in pf_negotiate_version()
48 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_negotiate_version()
[all …]
H A Dxe_survivability_mode.c95 static void populate_survivability_info(struct xe_device *xe) in populate_survivability_info() argument
97 struct xe_survivability *survivability = &xe->survivability; in populate_survivability_info()
104 mmio = xe_root_tile_mmio(xe); in populate_survivability_info()
131 struct xe_device *xe = pdev_to_xe_device(pdev); in log_survivability_info() local
132 struct xe_survivability *survivability = &xe->survivability; in log_survivability_info()
145 static int check_boot_failure(struct xe_device *xe) in check_boot_failure() argument
147 struct xe_survivability *survivability = &xe->survivability; in check_boot_failure()
157 struct xe_device *xe = pdev_to_xe_device(pdev); in survivability_mode_show() local
158 struct xe_survivability *survivability = &xe->survivability; in survivability_mode_show()
165 if (!check_boot_failure(xe)) in survivability_mode_show()
[all …]
H A Dxe_pm.h16 int xe_pm_suspend(struct xe_device *xe);
17 int xe_pm_resume(struct xe_device *xe);
19 int xe_pm_init_early(struct xe_device *xe);
20 int xe_pm_init(struct xe_device *xe);
21 void xe_pm_fini(struct xe_device *xe);
22 bool xe_pm_runtime_suspended(struct xe_device *xe);
23 int xe_pm_runtime_suspend(struct xe_device *xe);
24 int xe_pm_runtime_resume(struct xe_device *xe);
25 void xe_pm_runtime_get(struct xe_device *xe);
26 int xe_pm_runtime_get_ioctl(struct xe_device *xe);
[all …]
H A Dxe_vram.c28 static void resize_bar(struct xe_device *xe, int resno, resource_size_t size) in resize_bar() argument
30 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in resize_bar()
36 …drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support… in resize_bar()
41 drm_info(&xe->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size); in resize_bar()
48 void xe_vram_resize_bar(struct xe_device *xe) in xe_vram_resize_bar() argument
51 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_vram_resize_bar()
71 drm_info(&xe->drm, in xe_vram_resize_bar()
93 drm_info(&xe->drm, "Attempting to resize bar from %lluMiB -> %lluMiB\n", in xe_vram_resize_bar()
106 …drm_info(&xe->drm, "Can't resize VRAM BAR - platform support is missing. Consider enabling 'Resiza… in xe_vram_resize_bar()
113 resize_bar(xe, LMEM_BAR, rebar_size); in xe_vram_resize_bar()
[all …]
H A Dxe_nvm.c44 static bool xe_nvm_non_posted_erase(struct xe_device *xe) in xe_nvm_non_posted_erase() argument
46 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in xe_nvm_non_posted_erase()
48 if (xe->info.platform != XE_BATTLEMAGE) in xe_nvm_non_posted_erase()
54 static bool xe_nvm_writable_override(struct xe_device *xe) in xe_nvm_writable_override() argument
56 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in xe_nvm_writable_override()
60 switch (xe->info.platform) { in xe_nvm_writable_override()
74 drm_err(&xe->drm, "Unknown platform\n"); in xe_nvm_writable_override()
82 drm_info(&xe->drm, "NVM access overridden by jumper\n"); in xe_nvm_writable_override()
86 int xe_nvm_init(struct xe_device *xe) in xe_nvm_init() argument
88 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_nvm_init()
[all …]
H A Dxe_late_bind_fw.c58 struct xe_device *xe = late_bind_fw_to_xe(lb_fw); in parse_cpd_header() local
67 xe_assert(xe, manifest_entry); in parse_cpd_header()
73 drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n", in parse_cpd_header()
80 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
92 drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n", in parse_cpd_header()
99 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
115 struct xe_device *xe = late_bind_fw_to_xe(lb_fw); in parse_lb_layout() local
123 xe_assert(xe, fpt_entry); in parse_lb_layout()
129 drm_err(&xe->drm, "%s late binding fw: Invalid FPT header length %u!\n", in parse_lb_layout()
136 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_lb_layout()
[all …]
H A Dxe_exec_queue.c63 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
85 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
106 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
118 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
163 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
178 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
257 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create() argument
266 …xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0))… in xe_exec_queue_create()
268 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
285 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
[all …]
H A Dxe_shrinker.c27 struct xe_device *xe; member
57 static s64 __xe_shrinker_walk(struct xe_device *xe, in __xe_shrinker_walk() argument
66 struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); in __xe_shrinker_walk()
90 xe_assert(xe, !IS_ERR(ttm_bo)); in __xe_shrinker_walk()
102 static s64 xe_shrinker_walk(struct xe_device *xe, in xe_shrinker_walk() argument
113 lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); in xe_shrinker_walk()
120 lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); in xe_shrinker_walk()
129 lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned); in xe_shrinker_walk()
169 struct xe_device *xe = shrinker->xe; in xe_shrinker_runtime_pm_get() local
171 if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) || in xe_shrinker_runtime_pm_get()
[all …]
H A Dxe_step.c117 struct xe_step_info xe_step_pre_gmdid_get(struct xe_device *xe) in xe_step_pre_gmdid_get() argument
121 u16 revid = xe->info.revid; in xe_step_pre_gmdid_get()
127 if (xe->info.platform == XE_PVC) { in xe_step_pre_gmdid_get()
128 baseid = FIELD_GET(GENMASK(5, 3), xe->info.revid); in xe_step_pre_gmdid_get()
129 revid = FIELD_GET(GENMASK(2, 0), xe->info.revid); in xe_step_pre_gmdid_get()
134 } else if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G10) { in xe_step_pre_gmdid_get()
137 } else if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G11) { in xe_step_pre_gmdid_get()
140 } else if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G12) { in xe_step_pre_gmdid_get()
143 } else if (xe->info.platform == XE_ALDERLAKE_N) { in xe_step_pre_gmdid_get()
146 } else if (xe->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_S_RPLS) { in xe_step_pre_gmdid_get()
[all …]
H A Dxe_migrate.c123 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) in xe_migrate_vram_ofs() argument
131 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) in xe_migrate_vram_ofs()
133 (xe->mem.vram), SZ_1G); in xe_migrate_vram_ofs()
135 addr -= xe_vram_region_dpa_base(xe->mem.vram); in xe_migrate_vram_ofs()
139 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, in xe_migrate_program_identity() argument
142 struct xe_vram_region *vram = xe->mem.vram; in xe_migrate_program_identity()
151 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, in xe_migrate_program_identity()
154 xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M)); in xe_migrate_program_identity()
165 xe_map_wr(xe, &bo->vmap, ofs, u64, entry); in xe_migrate_program_identity()
167 flags = vm->pt_ops->pte_encode_addr(xe, 0, in xe_migrate_program_identity()
[all …]
H A Dxe_pagefault.c78 needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic); in xe_pagefault_handle_vma()
104 xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); in xe_pagefault_handle_vma()
142 static struct xe_vm *xe_pagefault_asid_to_vm(struct xe_device *xe, u32 asid) in xe_pagefault_asid_to_vm() argument
146 down_read(&xe->usm.lock); in xe_pagefault_asid_to_vm()
147 vm = xa_load(&xe->usm.asid_to_vm, asid); in xe_pagefault_asid_to_vm()
152 up_read(&xe->usm.lock); in xe_pagefault_asid_to_vm()
160 struct xe_device *xe = gt_to_xe(gt); in xe_pagefault_service() local
170 vm = xe_pagefault_asid_to_vm(xe, pf->consumer.asid); in xe_pagefault_service()
277 static int xe_pagefault_queue_init(struct xe_device *xe, in xe_pagefault_queue_init() argument
284 for_each_gt(gt, xe, id) { in xe_pagefault_queue_init()
[all …]
H A Dxe_gsc_submit.c70 u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, in xe_gsc_emit_header() argument
73 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK)); in xe_gsc_emit_header()
78 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE); in xe_gsc_emit_header()
80 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER); in xe_gsc_emit_header()
81 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id); in xe_gsc_emit_header()
82 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id); in xe_gsc_emit_header()
83 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION); in xe_gsc_emit_header()
84 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE); in xe_gsc_emit_header()
95 void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset) in xe_gsc_poison_header() argument
97 xe_map_memset(xe, map, offset, POISON_FREE, GSC_HDR_SIZE); in xe_gsc_poison_header()
[all …]
H A Dxe_wait_user_fence.c58 static long to_jiffies_timeout(struct xe_device *xe, in to_jiffies_timeout() argument
104 struct xe_device *xe = to_xe_device(dev); in xe_wait_user_fence_ioctl() local
114 if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) || in xe_wait_user_fence_ioctl()
115 XE_IOCTL_DBG(xe, args->pad2) || in xe_wait_user_fence_ioctl()
116 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_wait_user_fence_ioctl()
119 if (XE_IOCTL_DBG(xe, args->flags & ~VALID_FLAGS)) in xe_wait_user_fence_ioctl()
122 if (XE_IOCTL_DBG(xe, args->op > MAX_OP)) in xe_wait_user_fence_ioctl()
125 if (XE_IOCTL_DBG(xe, addr & 0x7)) in xe_wait_user_fence_ioctl()
130 if (XE_IOCTL_DBG(xe, !q)) in xe_wait_user_fence_ioctl()
134 timeout = to_jiffies_timeout(xe, args); in xe_wait_user_fence_ioctl()
[all …]
H A Dxe_sriov_pf_migration.h15 int xe_sriov_pf_migration_init(struct xe_device *xe);
16 bool xe_sriov_pf_migration_supported(struct xe_device *xe);
17 void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...);
18 int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
21 xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid);
22 ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid);
23 wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid);
25 ssize_t xe_sriov_pf_migration_read(struct xe_device *xe, unsigned int vfid,
27 ssize_t xe_sriov_pf_migration_write(struct xe_device *xe, unsigned int vfid,
H A Dxe_gsc_proxy.c206 static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset) in emit_proxy_header() argument
208 xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE); in emit_proxy_header()
210 proxy_header_wr(xe, map, offset, hdr, in emit_proxy_header()
214 proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD); in emit_proxy_header()
215 proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC); in emit_proxy_header()
216 proxy_header_wr(xe, map, offset, status, 0); in emit_proxy_header()
224 struct xe_device *xe = gt_to_xe(gt); in proxy_query() local
232 wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0, in proxy_query()
234 wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset); in proxy_query()
243 xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0); in proxy_query()
[all …]
H A Dxe_svm.c54 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
95 struct xe_device *xe = vm->xe; in xe_svm_garbage_collector_add_range() local
107 queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range()
121 struct xe_device *xe = vm->xe; in xe_svm_range_notifier_event_begin() local
145 for_each_tile(tile, xe, id) in xe_svm_range_notifier_event_begin()
202 struct xe_device *xe = vm->xe; in xe_svm_invalidate() local
212 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm, in xe_svm_invalidate()
251 xe_device_wmb(xe); in xe_svm_invalidate()
260 for_each_tile(tile, xe, id) { in xe_svm_invalidate()
305 drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n"); in xe_svm_range_set_default_attr()
[all …]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_sriov_pf_service_kunit.c19 struct xe_device *xe; in pf_service_test_init() local
24 xe = test->priv; in pf_service_test_init()
25 KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); in pf_service_test_init()
27 xe_sriov_pf_service_init(xe); in pf_service_test_init()
33 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major); in pf_service_test_init()
34 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
35 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major, in pf_service_test_init()
36 xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
37 if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) in pf_service_test_init()
38 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor, in pf_service_test_init()
[all …]
H A Dxe_kunit_helpers.c36 struct xe_device *xe; in xe_kunit_helper_alloc_xe_device() local
38 xe = drm_kunit_helper_alloc_drm_device(test, dev, in xe_kunit_helper_alloc_xe_device()
41 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_alloc_xe_device()
42 return xe; in xe_kunit_helper_alloc_xe_device()
73 struct xe_device *xe; in xe_kunit_helper_xe_device_test_init() local
80 xe = xe_kunit_helper_alloc_xe_device(test, dev); in xe_kunit_helper_xe_device_test_init()
81 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_xe_device_test_init()
83 err = xe_pci_fake_device_init(xe); in xe_kunit_helper_xe_device_test_init()
89 test->priv = xe; in xe_kunit_helper_xe_device_test_init()
117 struct xe_device *xe = xe_device_const_cast(test->param_value); in xe_kunit_helper_xe_device_live_test_init() local
[all …]
H A Dxe_bo.c126 static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, in ccs_test_run_tile() argument
137 if (IS_DGFX(xe)) in ccs_test_run_tile()
142 bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, in ccs_test_run_tile()
171 static int ccs_test_run_device(struct xe_device *xe) in ccs_test_run_device() argument
177 if (!xe_device_has_flat_ccs(xe)) { in ccs_test_run_device()
183 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) { in ccs_test_run_device()
188 xe_pm_runtime_get(xe); in ccs_test_run_device()
190 for_each_tile(tile, xe, id) { in ccs_test_run_device()
192 if (!IS_DGFX(xe) && id > 0) in ccs_test_run_device()
194 ccs_test_run_tile(xe, tile, test); in ccs_test_run_device()
[all …]
/linux/include/drm/intel/
H A Dxe_sriov_vfio.h28 bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
40 int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid);
51 int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid);
62 int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid);
71 int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid);
80 int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid);
89 int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid);
98 int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid);
109 int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid);
121 ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
[all …]
/linux/Documentation/ABI/testing/
H A Dsysfs-driver-intel-xe-hwmon1 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
4 Contact: intel-xe@lists.freedesktop.org
15 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
18 Contact: intel-xe@lists.freedesktop.org
24 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
27 Contact: intel-xe@lists.freedesktop.org
32 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
35 Contact: intel-xe@lists.freedesktop.org
41 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_max
44 Contact: intel-xe@lists.freedesktop.org
[all …]

123456789