Home
last modified time | relevance | path

Searched refs:xe (Results 1 – 25 of 326) sorted by relevance

12345678910>>...14

/linux/drivers/gpu/drm/xe/
H A Dxe_pm.c91 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
93 return !xe->d3cold.capable && !xe->info.has_sriov; in xe_rpm_reclaim_safe()
96 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
98 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
103 static void xe_rpm_lockmap_release(const struct xe_device *xe) in xe_rpm_lockmap_release() argument
105 lock_map_release(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_release()
116 int xe_pm_suspend(struct xe_device *xe) in xe_pm_suspend() argument
122 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
123 trace_xe_pm_suspend(xe, __builtin_return_address(0)); in xe_pm_suspend()
125 for_each_gt(gt, xe, id) in xe_pm_suspend()
[all …]
H A Dxe_device.c67 struct xe_device *xe = to_xe_device(dev); in xe_file_open() local
85 xef->xe = xe; in xe_file_open()
148 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local
154 xe_pm_runtime_get(xe); in xe_file_close()
173 xe_pm_runtime_put(xe); in xe_file_close()
199 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl() local
202 if (xe_device_wedged(xe)) in xe_drm_ioctl()
205 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_ioctl()
208 xe_pm_runtime_put(xe); in xe_drm_ioctl()
217 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl() local
[all …]
H A Dxe_irq.c33 static int xe_irq_msix_init(struct xe_device *xe);
34 static void xe_irq_msix_free(struct xe_device *xe);
35 static int xe_irq_msix_request_irqs(struct xe_device *xe);
36 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
45 drm_WARN(&mmio->tile->xe->drm, 1, in assert_iir_is_zero()
93 static u32 xelp_intr_disable(struct xe_device *xe) in xelp_intr_disable() argument
95 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in xelp_intr_disable()
109 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) in gu_misc_irq_ack() argument
111 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in gu_misc_irq_ack()
124 static inline void xelp_intr_enable(struct xe_device *xe, bool stall) in xelp_intr_enable() argument
[all …]
H A Dxe_sriov_vf.c130 void xe_sriov_vf_init_early(struct xe_device *xe) in xe_sriov_vf_init_early() argument
132 INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); in xe_sriov_vf_init_early()
146 static int vf_post_migration_requery_guc(struct xe_device *xe) in vf_post_migration_requery_guc() argument
152 for_each_gt(gt, xe, id) { in vf_post_migration_requery_guc()
167 static bool vf_post_migration_imminent(struct xe_device *xe) in vf_post_migration_imminent() argument
169 return xe->sriov.vf.migration.gt_flags != 0 || in vf_post_migration_imminent()
170 work_pending(&xe->sriov.vf.migration.worker); in vf_post_migration_imminent()
176 static void vf_post_migration_notify_resfix_done(struct xe_device *xe) in vf_post_migration_notify_resfix_done() argument
181 for_each_gt(gt, xe, id) { in vf_post_migration_notify_resfix_done()
182 if (vf_post_migration_imminent(xe)) in vf_post_migration_notify_resfix_done()
[all …]
H A Dxe_sriov.c39 static bool test_is_vf(struct xe_device *xe) in test_is_vf() argument
41 u32 value = xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG); in test_is_vf()
57 void xe_sriov_probe_early(struct xe_device *xe) in xe_sriov_probe_early() argument
59 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_sriov_probe_early()
61 bool has_sriov = xe->info.has_sriov; in xe_sriov_probe_early()
64 if (test_is_vf(xe)) in xe_sriov_probe_early()
66 else if (xe_sriov_pf_readiness(xe)) in xe_sriov_probe_early()
76 drm_info(&xe->drm, "Support for SR-IOV is not available\n"); in xe_sriov_probe_early()
80 xe_assert(xe, !xe->sriov.__mode); in xe_sriov_probe_early()
81 xe->sriov.__mode = mode; in xe_sriov_probe_early()
[all …]
H A Dxe_device.h34 static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe) in xe_device_const_cast() argument
36 return (struct xe_device *)xe; in xe_device_const_cast()
46 int xe_device_probe_early(struct xe_device *xe);
47 int xe_device_probe(struct xe_device *xe);
48 void xe_device_remove(struct xe_device *xe);
49 void xe_device_shutdown(struct xe_device *xe);
51 void xe_device_wmb(struct xe_device *xe);
58 static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe) in xe_device_get_root_tile() argument
60 return &xe->tiles[0]; in xe_device_get_root_tile()
73 static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id) in xe_device_get_gt() argument
[all …]
H A Dxe_pci_sriov.c27 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_provision_vfs() argument
33 for_each_gt(gt, xe, id) { in pf_provision_vfs()
43 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_unprovision_vfs() argument
49 for_each_gt(gt, xe, id) in pf_unprovision_vfs()
54 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_reset_vfs() argument
60 for_each_gt(gt, xe, id) in pf_reset_vfs()
65 static int pf_enable_vfs(struct xe_device *xe, int num_vfs) in pf_enable_vfs() argument
67 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in pf_enable_vfs()
68 int total_vfs = xe_sriov_pf_get_totalvfs(xe); in pf_enable_vfs()
71 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_enable_vfs()
[all …]
H A Dxe_pat.c156 u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index) in xe_pat_index_get_coh_mode() argument
158 WARN_ON(pat_index >= xe->pat.n_entries); in xe_pat_index_get_coh_mode()
159 return xe->pat.table[pat_index].coh_mode; in xe_pat_index_get_coh_mode()
184 struct xe_device *xe = gt_to_xe(gt); in xelp_dump() local
194 for (i = 0; i < xe->pat.n_entries; i++) { in xelp_dump()
212 struct xe_device *xe = gt_to_xe(gt); in xehp_dump() local
222 for (i = 0; i < xe->pat.n_entries; i++) { in xehp_dump()
242 struct xe_device *xe = gt_to_xe(gt); in xehpc_dump() local
252 for (i = 0; i < xe->pat.n_entries; i++) { in xehpc_dump()
270 struct xe_device *xe = gt_to_xe(gt); in xelpg_dump() local
[all …]
H A Dxe_sriov_pf.c15 static unsigned int wanted_max_vfs(struct xe_device *xe) in wanted_max_vfs() argument
20 static int pf_reduce_totalvfs(struct xe_device *xe, int limit) in pf_reduce_totalvfs() argument
22 struct device *dev = xe->drm.dev; in pf_reduce_totalvfs()
28 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", in pf_reduce_totalvfs()
33 static bool pf_continue_as_native(struct xe_device *xe, const char *why) in pf_continue_as_native() argument
35 xe_sriov_dbg(xe, "%s, continuing as native\n", why); in pf_continue_as_native()
36 pf_reduce_totalvfs(xe, 0); in pf_continue_as_native()
49 bool xe_sriov_pf_readiness(struct xe_device *xe) in xe_sriov_pf_readiness() argument
51 struct device *dev = xe->drm.dev; in xe_sriov_pf_readiness()
54 int newlimit = min_t(u16, wanted_max_vfs(xe), totalvfs); in xe_sriov_pf_readiness()
[all …]
H A Dxe_bo_evict.c27 int xe_bo_evict_all(struct xe_device *xe) in xe_bo_evict_all() argument
29 struct ttm_device *bdev = &xe->ttm; in xe_bo_evict_all()
50 if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))) in xe_bo_evict_all()
62 spin_lock(&xe->pinned.lock); in xe_bo_evict_all()
64 bo = list_first_entry_or_null(&xe->pinned.external_vram, in xe_bo_evict_all()
70 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all()
77 spin_lock(&xe->pinned.lock); in xe_bo_evict_all()
79 &xe->pinned.external_vram); in xe_bo_evict_all()
80 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all()
84 spin_lock(&xe->pinned.lock); in xe_bo_evict_all()
[all …]
H A Dxe_pci.c454 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) in find_subplatform() argument
461 if (*id == xe->info.devid) in find_subplatform()
472 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) in read_gmdid() argument
474 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in read_gmdid()
478 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); in read_gmdid()
480 if (IS_SRIOV_VF(xe)) { in read_gmdid()
481 struct xe_gt *gt = xe_root_mmio_gt(xe); in read_gmdid()
541 static void handle_pre_gmdid(struct xe_device *xe, in handle_pre_gmdid() argument
545 xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel; in handle_pre_gmdid()
548 xe->info.media_verx100 = media->ver * 100 + media->rel; in handle_pre_gmdid()
[all …]
H A Dxe_ttm_stolen_mgr.c55 bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe) in xe_ttm_stolen_cpu_access_needs_ggtt() argument
57 return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe); in xe_ttm_stolen_cpu_access_needs_ggtt()
60 static u32 get_wopcm_size(struct xe_device *xe) in get_wopcm_size() argument
65 val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED); in get_wopcm_size()
83 static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) in detect_bar2_dgfx() argument
85 struct xe_tile *tile = xe_device_get_root_tile(xe); in detect_bar2_dgfx()
86 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in detect_bar2_dgfx()
87 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in detect_bar2_dgfx()
92 tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start; in detect_bar2_dgfx()
97 if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base)) in detect_bar2_dgfx()
[all …]
H A Dxe_debugfs.c39 struct xe_device *xe = node_to_xe(m->private); in info() local
44 xe_pm_runtime_get(xe); in info()
46 drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100); in info()
47 drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100); in info()
49 xe_step_name(xe->info.step.graphics), in info()
50 xe_step_name(xe->info.step.media), in info()
51 xe_step_name(xe->info.step.basedie)); in info()
52 drm_printf(&p, "is_dgfx %s\n", str_yes_no(xe->info.is_dgfx)); in info()
53 drm_printf(&p, "platform %d\n", xe->info.platform); in info()
55 xe->info.subplatform > XE_SUBPLATFORM_NONE ? xe->info.subplatform : 0); in info()
[all …]
H A Dxe_vram.c27 _resize_bar(struct xe_device *xe, int resno, resource_size_t size) in _resize_bar() argument
29 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in _resize_bar()
38 …drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support… in _resize_bar()
43 drm_info(&xe->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size); in _resize_bar()
50 static void resize_vram_bar(struct xe_device *xe) in resize_vram_bar() argument
53 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in resize_vram_bar()
78 drm_info(&xe->drm, in resize_vram_bar()
96 drm_info(&xe->drm, "Attempting to resize bar from %lluMiB -> %lluMiB\n", in resize_vram_bar()
109 …drm_info(&xe->drm, "Can't resize VRAM BAR - platform support is missing. Consider enabling 'Resiza… in resize_vram_bar()
116 _resize_bar(xe, LMEM_BAR, rebar_size); in resize_vram_bar()
[all …]
H A Dxe_sriov_printk.h14 #define xe_sriov_printk_prefix(xe) \ argument
15 ((xe)->sriov.__mode == XE_SRIOV_MODE_PF ? "PF: " : \
16 (xe)->sriov.__mode == XE_SRIOV_MODE_VF ? "VF: " : "")
18 #define xe_sriov_printk(xe, _level, fmt, ...) \ argument
19 drm_##_level(&(xe)->drm, "%s" fmt, xe_sriov_printk_prefix(xe), ##__VA_ARGS__)
21 #define xe_sriov_err(xe, fmt, ...) \ argument
22 xe_sriov_printk((xe), err, fmt, ##__VA_ARGS__)
24 #define xe_sriov_err_ratelimited(xe, fmt, ...) \ argument
25 xe_sriov_printk((xe), err_ratelimited, fmt, ##__VA_ARGS__)
27 #define xe_sriov_warn(xe, fmt, ...) \ argument
[all …]
H A Dxe_heci_gsc.c91 void xe_heci_gsc_fini(struct xe_device *xe) in xe_heci_gsc_fini() argument
93 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in xe_heci_gsc_fini()
95 if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi) in xe_heci_gsc_fini()
111 static int heci_gsc_irq_setup(struct xe_device *xe) in heci_gsc_irq_setup() argument
113 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in heci_gsc_irq_setup()
118 drm_err(&xe->drm, "gsc irq error %d\n", heci_gsc->irq); in heci_gsc_irq_setup()
124 drm_err(&xe->drm, "gsc irq init failed %d\n", ret); in heci_gsc_irq_setup()
129 static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *def) in heci_gsc_add_device() argument
131 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in heci_gsc_add_device()
132 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in heci_gsc_add_device()
[all …]
H A Dxe_rtp.c27 static bool has_samedia(const struct xe_device *xe) in has_samedia() argument
29 return xe->info.media_verx100 >= 1300; in has_samedia()
32 static bool rule_matches(const struct xe_device *xe, in rule_matches() argument
52 match = xe->info.platform == r->platform; in rule_matches()
55 match = xe->info.platform == r->platform && in rule_matches()
56 xe->info.subplatform == r->subplatform; in rule_matches()
59 match = xe->info.graphics_verx100 == r->ver_start && in rule_matches()
60 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); in rule_matches()
63 match = xe->info.graphics_verx100 >= r->ver_start && in rule_matches()
64 xe->info.graphics_verx100 <= r->ver_end && in rule_matches()
[all …]
H A Dxe_sriov.h18 void xe_sriov_probe_early(struct xe_device *xe);
19 void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
20 int xe_sriov_init(struct xe_device *xe);
22 static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe) in xe_device_sriov_mode() argument
24 xe_assert(xe, xe->sriov.__mode); in xe_device_sriov_mode()
25 return xe->sriov.__mode; in xe_device_sriov_mode()
28 static inline bool xe_device_is_sriov_pf(const struct xe_device *xe) in xe_device_is_sriov_pf() argument
30 return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_PF; in xe_device_is_sriov_pf()
33 static inline bool xe_device_is_sriov_vf(const struct xe_device *xe) in xe_device_is_sriov_vf() argument
35 return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_VF; in xe_device_is_sriov_vf()
[all …]
H A Dxe_pm.h15 int xe_pm_suspend(struct xe_device *xe);
16 int xe_pm_resume(struct xe_device *xe);
18 int xe_pm_init_early(struct xe_device *xe);
19 int xe_pm_init(struct xe_device *xe);
20 void xe_pm_runtime_fini(struct xe_device *xe);
21 bool xe_pm_runtime_suspended(struct xe_device *xe);
22 int xe_pm_runtime_suspend(struct xe_device *xe);
23 int xe_pm_runtime_resume(struct xe_device *xe);
24 void xe_pm_runtime_get(struct xe_device *xe);
25 int xe_pm_runtime_get_ioctl(struct xe_device *xe);
[all …]
H A Dxe_mmio.c28 struct xe_device *xe = arg; in tiles_fini() local
32 for_each_remote_tile(tile, xe, id) in tiles_fini()
55 static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) in mmio_multi_tile_setup() argument
65 if (xe->info.tile_count == 1) in mmio_multi_tile_setup()
69 if (!xe->info.skip_mtcfg) { in mmio_multi_tile_setup()
70 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in mmio_multi_tile_setup()
82 if (tile_count < xe->info.tile_count) { in mmio_multi_tile_setup()
83 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", in mmio_multi_tile_setup()
84 xe->info.tile_count, tile_count); in mmio_multi_tile_setup()
85 xe->info.tile_count = tile_count; in mmio_multi_tile_setup()
[all …]
H A Dxe_bo.c77 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) in resource_is_stolen_vram() argument
79 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
133 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) in mem_type_to_migrate() argument
137 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); in mem_type_to_migrate()
138 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
144 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region() local
147 xe_assert(xe, resource_is_vram(res)); in res_to_mem_region()
148 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
152 static void try_add_system(struct xe_device *xe, struct xe_bo *bo, in try_add_system() argument
156 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
[all …]
/linux/drivers/gpu/drm/xe/display/
H A Dxe_display.c36 static bool has_display(struct xe_device *xe) in has_display() argument
38 return HAS_DISPLAY(&xe->display); in has_display()
72 static void unset_display_features(struct xe_device *xe) in unset_display_features() argument
74 xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); in unset_display_features()
79 struct xe_device *xe = to_xe_device(dev); in display_destroy() local
81 destroy_workqueue(xe->display.hotplug.dp_wq); in display_destroy()
95 int xe_display_create(struct xe_device *xe) in xe_display_create() argument
97 spin_lock_init(&xe->display.fb_tracking.lock); in xe_display_create()
99 xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); in xe_display_create()
101 return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); in xe_display_create()
[all …]
H A Dxe_display.h17 void xe_display_driver_remove(struct xe_device *xe);
19 int xe_display_create(struct xe_device *xe);
21 int xe_display_probe(struct xe_device *xe);
23 int xe_display_init_nommio(struct xe_device *xe);
24 int xe_display_init_noirq(struct xe_device *xe);
25 int xe_display_init_noaccel(struct xe_device *xe);
26 int xe_display_init(struct xe_device *xe);
27 void xe_display_fini(struct xe_device *xe);
29 void xe_display_register(struct xe_device *xe);
30 void xe_display_unregister(struct xe_device *xe);
[all …]
/linux/drivers/gpu/drm/xe/compat-i915-headers/
H A Di915_drv.h25 #define IS_PLATFORM(xe, x) ((xe)->info.platform == x) argument
76 #define IS_MOBILE(xe) (xe && 0) argument
78 #define IS_TIGERLAKE_UY(xe) (xe && 0) argument
79 #define IS_COMETLAKE_ULX(xe) (xe && 0) argument
80 #define IS_COFFEELAKE_ULX(xe) (xe && 0) argument
81 #define IS_KABYLAKE_ULX(xe) (xe && 0) argument
82 #define IS_SKYLAKE_ULX(xe) (xe && 0) argument
83 #define IS_HASWELL_ULX(xe) (xe && 0) argument
84 #define IS_COMETLAKE_ULT(xe) (xe && 0) argument
85 #define IS_COFFEELAKE_ULT(xe) (xe && 0) argument
[all …]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_migrate.c15 static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence, in sanity_fence_failed() argument
37 static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe, in run_sanity_job() argument
41 u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm); in run_sanity_job()
57 if (sanity_fence_failed(xe, fence, str, test)) in run_sanity_job()
75 struct xe_device *xe = tile_to_xe(m->tile); in test_copy() local
82 struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL, in test_copy()
108 xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); in test_copy()
111 if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" : in test_copy()
113 retval = xe_map_rd(xe, &remote->vmap, 0, u64); in test_copy()
116 retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64); in test_copy()
[all …]

12345678910>>...14