Home
last modified time | relevance | path

Searched +full:0 +full:xe (Results 1 – 25 of 1007) sorted by relevance

12345678910>>...41

/linux/drivers/gpu/drm/xe/
H A Dxe_pm.c34 * DOC: Xe Power Management
36 * Xe PM implements the main routines for both system level suspend states and
56 * to perform the transition from D3hot to D3cold. Xe may disallow this
64 * (PC-states), and/or other low level power states. Xe PM component provides
68 * Also, Xe PM provides get and put functions that Xe driver will use to
95 lock_acquire_shared_recursive(&xe_pm_block_lockdep_map, 0, 1, NULL, _RET_IP_); in xe_pm_block_begin_signalling()
117 * @xe: The xe device about to be suspended.
129 * Return: %0 on success, %-ERESTARTSYS on signal pending or
132 int xe_pm_block_on_suspend(struct xe_device *xe) in xe_pm_block_on_suspend() argument
136 return wait_for_completion_interruptible(&xe->pm_block); in xe_pm_block_on_suspend()
[all …]
H A Dxe_device.c84 struct xe_device *xe = to_xe_device(dev); in xe_file_open() local
102 xef->xe = xe; in xe_file_open()
120 return 0; in xe_file_open()
138 * xe_file_get() - Take a reference to the xe file object
139 * @xef: Pointer to the xe file
141 * Anyone with a pointer to xef must take a reference to the xe file
144 * Return: xe file pointer
153 * xe_file_put() - Drop a reference to the xe file object
154 * @xef: Pointer to the xe file
165 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local
[all …]
H A Dxe_sriov_pf_migration.c19 static struct xe_sriov_migration_state *pf_pick_migration(struct xe_device *xe, unsigned int vfid) in pf_pick_migration() argument
21 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_pick_migration()
22 xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); in pf_pick_migration()
24 return &xe->sriov.pf.vfs[vfid].migration; in pf_pick_migration()
29 * @xe: the &xe_device
34 wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid) in xe_sriov_pf_migration_waitqueue() argument
36 return &pf_pick_migration(xe, vfid)->wq; in xe_sriov_pf_migration_waitqueue()
41 * @xe: the &xe_device
45 bool xe_sriov_pf_migration_supported(struct xe_device *xe) in xe_sriov_pf_migration_supported() argument
47 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_migration_supported()
[all …]
H A Dxe_sriov_packet.c15 static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid) in pf_migration_mutex() argument
17 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_migration_mutex()
18 xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); in pf_migration_mutex()
20 return &xe->sriov.pf.vfs[vfid].migration.lock; in pf_migration_mutex()
23 static struct xe_sriov_packet **pf_pick_pending(struct xe_device *xe, unsigned int vfid) in pf_pick_pending() argument
25 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_pick_pending()
26 xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); in pf_pick_pending()
27 lockdep_assert_held(pf_migration_mutex(xe, vfid)); in pf_pick_pending()
29 return &xe->sriov.pf.vfs[vfid].migration.pending; in pf_pick_pending()
33 pf_pick_descriptor(struct xe_device *xe, unsigned int vfid) in pf_pick_descriptor() argument
[all …]
H A Dxe_bo_evict.c15 static int xe_bo_apply_to_pinned(struct xe_device *xe, in xe_bo_apply_to_pinned() argument
22 int ret = 0; in xe_bo_apply_to_pinned()
24 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
32 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
36 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
44 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
47 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
50 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
58 * @xe: xe device
62 * Return: 0 on success, negative error code on error.
[all …]
H A Dxe_pci.c129 .has_access_counter = 0,
226 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
248 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
305 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
306 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 };
307 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
396 static const u16 bmg_g21_ids[] = { INTEL_BMG_G21_IDS(NOP), 0 };
535 if (negative && strcmp(devices, "!*") == 0) in device_id_in_list()
537 if (!negative && strcmp(devices, "*") == 0) in device_id_in_list()
547 if (negative && tok[0] == '!') in device_id_in_list()
[all …]
H A Dxe_sriov_pf_service.c18 * @xe: the &xe_device to initialize
24 void xe_sriov_pf_service_init(struct xe_device *xe) in xe_sriov_pf_service_init() argument
29 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_service_init()
32 xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; in xe_sriov_pf_service_init()
33 xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; in xe_sriov_pf_service_init()
36 xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR; in xe_sriov_pf_service_init()
37 xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR; in xe_sriov_pf_service_init()
40 /* Return: 0 on success or a negative error code on failure. */
41 static int pf_negotiate_version(struct xe_device *xe, in pf_negotiate_version() argument
45 struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base; in pf_negotiate_version()
[all …]
H A Dxe_heci_gsc.c19 #define GSC_BAR_LENGTH 0x00000FFC
102 if (heci_gsc->irq >= 0) in xe_heci_gsc_fini()
108 static int heci_gsc_irq_setup(struct xe_device *xe) in heci_gsc_irq_setup() argument
110 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in heci_gsc_irq_setup()
113 heci_gsc->irq = irq_alloc_desc(0); in heci_gsc_irq_setup()
114 if (heci_gsc->irq < 0) { in heci_gsc_irq_setup()
115 drm_err(&xe->drm, "gsc irq error %d\n", heci_gsc->irq); in heci_gsc_irq_setup()
120 if (ret < 0) in heci_gsc_irq_setup()
121 drm_err(&xe->drm, "gsc irq init failed %d\n", ret); in heci_gsc_irq_setup()
126 static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *def) in heci_gsc_add_device() argument
[all …]
H A Dxe_bo.c53 .fpfn = 0,
54 .lpfn = 0,
56 .flags = 0,
68 .fpfn = 0,
69 .lpfn = 0,
74 .fpfn = 0,
75 .lpfn = 0,
87 for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \
95 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) in resource_is_stolen_vram() argument
97 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
[all …]
H A Dxe_late_bind_fw.c58 struct xe_device *xe = late_bind_fw_to_xe(lb_fw); in parse_cpd_header() local
63 u32 offset = 0; in parse_cpd_header()
67 xe_assert(xe, manifest_entry); in parse_cpd_header()
73 drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n", in parse_cpd_header()
80 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
87 for (i = 0; i < header->num_of_entries; i++, entry++) in parse_cpd_header()
88 if (strcmp(entry->name, manifest_entry) == 0) in parse_cpd_header()
92 drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n", in parse_cpd_header()
99 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
108 return 0; in parse_cpd_header()
[all …]
H A Dxe_exec_queue.c104 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
110 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
145 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) in __xe_exec_queue_free()
167 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
172 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) { in alloc_dep_schedulers()
188 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
197 return 0; in alloc_dep_schedulers()
200 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
249 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
264 err = exec_queue_user_extensions(xe, q, extensions); in __xe_exec_queue_alloc()
[all …]
H A Dxe_pmu.c20 * DOC: Xe PMU (Performance Monitoring Unit)
28 * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/events/
29 * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/format/
42 * For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
58 #define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
85 #define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
86 #define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
87 #define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
88 #define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04
89 #define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05
[all …]
H A Dxe_sriov_vf_ccs.c121 static u64 get_ccs_bb_pool_size(struct xe_device *xe) in get_ccs_bb_pool_size() argument
128 ccs_mem_size = div64_u64(sys_mem_size, NUM_BYTES_PER_CCS_BYTE(xe)); in get_ccs_bb_pool_size()
145 struct xe_device *xe = tile_to_xe(tile); in alloc_bb_pool() local
150 bb_pool_size = get_ccs_bb_pool_size(xe); in alloc_bb_pool()
151 xe_sriov_info(xe, "Allocating %s CCS BB pool size = %lldMB\n", in alloc_bb_pool()
157 xe_sriov_err(xe, "xe_mem_pool_init failed with error: %pe\n", in alloc_bb_pool()
164 memset(pool_cpu_addr, 0, bb_pool_size); in alloc_bb_pool()
179 return 0; in alloc_bb_pool()
186 u32 dw[10], i = 0; in ccs_rw_update_ring()
194 lrc->ring.tail = 0; in ccs_rw_update_ring()
[all …]
H A Dxe_vm_madvise.c55 madvise_range->num_vmas = 0; in get_vmas()
60 vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range); in get_vmas()
89 vm_dbg(&vm->xe->drm, "madvise_range-num_vmas = %d\n", madvise_range->num_vmas); in get_vmas()
91 return 0; in get_vmas()
94 static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm, in madvise_preferred_mem_loc() argument
101 xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC); in madvise_preferred_mem_loc()
103 for (i = 0; i < num_vmas; i++) { in madvise_preferred_mem_loc()
127 static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm, in madvise_atomic() argument
135 xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC); in madvise_atomic()
136 xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU); in madvise_atomic()
[all …]
H A Dxe_shrinker.c19 * @xe: Back pointer to the device.
27 struct xe_device *xe; member
57 static s64 __xe_shrinker_walk(struct xe_device *xe, in __xe_shrinker_walk() argument
63 s64 freed = 0, lret; in __xe_shrinker_walk()
66 struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); in __xe_shrinker_walk()
82 if (lret < 0) in __xe_shrinker_walk()
90 xe_assert(xe, !IS_ERR(ttm_bo)); in __xe_shrinker_walk()
102 static s64 xe_shrinker_walk(struct xe_device *xe, in xe_shrinker_walk() argument
113 lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); in xe_shrinker_walk()
115 if (lret < 0 || *scanned >= to_scan) in xe_shrinker_walk()
[all …]
H A Dxe_pagefault.c24 * DOC: Xe page faults
26 * Xe page faults are handled in two layers. The producer layer interacts with
60 return 0; in xe_pagefault_begin()
72 return 0; in xe_pagefault_begin()
91 needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic); in xe_pagefault_handle_vma()
92 if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma))) in xe_pagefault_handle_vma()
93 return needs_vram < 0 ? needs_vram : -EACCES; in xe_pagefault_handle_vma()
104 return 0; in xe_pagefault_handle_vma()
117 xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); in xe_pagefault_handle_vma()
155 static struct xe_vm *xe_pagefault_asid_to_vm(struct xe_device *xe, u32 asid) in xe_pagefault_asid_to_vm() argument
[all …]
H A Dxe_vm.c58 * Return: %0 on success. See drm_exec_lock_obj() for error codes.
98 return 0; in alloc_preempt_fences()
109 return 0; in alloc_preempt_fences()
115 bool vf_migration = IS_SRIOV_VF(vm->xe) && in wait_for_existing_preempt_fences()
116 xe_sriov_vf_migration_supported(vm->xe); in wait_for_existing_preempt_fences()
128 xe_assert(vm->xe, vf_migration); in wait_for_existing_preempt_fences()
133 if (timeout < 0 || q->lr.pfence->error == -ETIME) in wait_for_existing_preempt_fences()
141 return 0; in wait_for_existing_preempt_fences()
166 xe_assert(vm->xe, link != list); in arm_preempt_fences()
184 return 0; in add_preempt_fences()
[all …]
H A Dxe_migrate.c95 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
97 * (val-2) format, this translates to 0x400 dwords for the true maximum length
99 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
101 #define MAX_PTE_PER_SDI 0x1FEU
127 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) in xe_migrate_vram_ofs() argument
135 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) in xe_migrate_vram_ofs()
137 (xe->mem.vram), SZ_1G); in xe_migrate_vram_ofs()
139 addr -= xe_vram_region_dpa_base(xe->mem.vram); in xe_migrate_vram_ofs()
143 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, in xe_migrate_program_identity() argument
146 struct xe_vram_region *vram = xe->mem.vram; in xe_migrate_program_identity()
[all …]
H A Dxe_svm.c29 #define XE_PEER_PAGEMAP ((void *)0ul)
33 * DOC: drm_pagemap reference-counting in xe:
36 * device data, the xe driver holds the following long-time references:
92 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
94 "start=0x%014lx, end=0x%014lx, size=%lu", \
97 xe_svm_range_in_vram((r__)) ? 1 : 0, \
98 xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
133 struct xe_device *xe = vm->xe; in xe_svm_garbage_collector_add_range() local
145 queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range()
159 struct xe_device *xe = vm->xe; in xe_svm_range_notifier_event_begin() local
[all …]
H A Dxe_pxp.c31 * PXP (Protected Xe Path) allows execution and flip to display of protected
47 bool xe_pxp_is_supported(const struct xe_device *xe) in xe_pxp_is_supported() argument
49 return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY); in xe_pxp_is_supported()
85 * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value
93 int ret = 0; in xe_pxp_get_readiness_status()
103 guard(xe_pm_runtime)(pxp->xe); in xe_pxp_get_readiness_status()
124 return xe_mmio_wait32(&gt->mmio, KCR_SIP, mask, in_play ? mask : 0, in pxp_wait_for_session_state()
133 int ret = 0; in pxp_terminate_hw()
135 drm_dbg(&pxp->xe->drm, "Terminating PXP\n"); in pxp_terminate_hw()
167 int ret = 0; in pxp_terminate()
[all …]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_sriov_pf_service_kunit.c19 struct xe_device *xe; in pf_service_test_init() local
24 xe = test->priv; in pf_service_test_init()
25 KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); in pf_service_test_init()
27 xe_sriov_pf_service_init(xe); in pf_service_test_init()
33 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major); in pf_service_test_init()
34 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
35 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major, in pf_service_test_init()
36 xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
37 if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) in pf_service_test_init()
38 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor, in pf_service_test_init()
[all …]
H A Dxe_kunit_helpers.c36 struct xe_device *xe; in xe_kunit_helper_alloc_xe_device() local
38 xe = drm_kunit_helper_alloc_drm_device(test, dev, in xe_kunit_helper_alloc_xe_device()
41 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_alloc_xe_device()
42 return xe; in xe_kunit_helper_alloc_xe_device()
69 * Return: Always 0.
73 struct xe_device *xe; in xe_kunit_helper_xe_device_test_init() local
80 xe = xe_kunit_helper_alloc_xe_device(test, dev); in xe_kunit_helper_xe_device_test_init()
81 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_xe_device_test_init()
83 err = xe_pci_fake_device_init(xe); in xe_kunit_helper_xe_device_test_init()
84 KUNIT_ASSERT_EQ(test, err, 0); in xe_kunit_helper_xe_device_test_init()
[all …]
H A Dxe_bo.c54 if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) { in ccs_test_migrate()
75 if (timeout <= 0) { in ccs_test_migrate()
101 if (cpu_map[0] != get_val) { in ccs_test_migrate()
103 "Expected CCS readout 0x%016llx, got 0x%016llx.\n", in ccs_test_migrate()
105 (unsigned long long)cpu_map[0]); in ccs_test_migrate()
114 "Expected CCS readout 0x%016llx, got 0x%016llx.\n", in ccs_test_migrate()
120 cpu_map[0] = assign_val; in ccs_test_migrate()
127 static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, in ccs_test_run_tile() argument
138 if (IS_DGFX(xe)) in ccs_test_run_tile()
143 bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, in ccs_test_run_tile()
[all …]
/linux/drivers/gpu/drm/xe/display/
H A Dxe_hdcp_gsc.c27 struct xe_device *xe; member
37 struct xe_device *xe = to_xe_device(drm); in intel_hdcp_gsc_check_status() local
38 struct xe_tile *tile = xe_device_get_root_tile(xe); in intel_hdcp_gsc_check_status()
43 drm_dbg_kms(&xe->drm, in intel_hdcp_gsc_check_status()
51 drm_dbg_kms(&xe->drm, in intel_hdcp_gsc_check_status()
56 guard(xe_pm_runtime)(xe); in intel_hdcp_gsc_check_status()
59 drm_dbg_kms(&xe->drm, in intel_hdcp_gsc_check_status()
68 static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, in intel_hdcp_gsc_initialize_message() argument
73 int ret = 0; in intel_hdcp_gsc_initialize_message()
76 bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2, in intel_hdcp_gsc_initialize_message()
[all …]
/linux/drivers/gpu/drm/amd/include/asic_reg/umc/
H A Dumc_6_7_0_sh_mask.h29 …C_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0
30 …_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10
31 …_UMC0_MCUMC_STATUST0__RESERV22__SHIFT 0x16
32 …_UMC0_MCUMC_STATUST0__AddrLsb__SHIFT 0x18
33 …_UMC0_MCUMC_STATUST0__RESERV30__SHIFT 0x1e
34 …_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20
35 …_UMC0_MCUMC_STATUST0__RESERV38__SHIFT 0x26
36 …_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28
37 …_UMC0_MCUMC_STATUST0__RESERV41__SHIFT 0x29
38 …_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b
[all …]

12345678910>>...41