Home
last modified time | relevance | path

Searched +full:0 +full:xe (Results 1 – 25 of 1013) sorted by relevance

12345678910>>...41

/linux/drivers/gpu/drm/xe/
H A Dxe_pm.c33 * DOC: Xe Power Management
35 * Xe PM implements the main routines for both system level suspend states and
55 * to perform the transition from D3hot to D3cold. Xe may disallow this
63 * (PC-states), and/or other low level power states. Xe PM component provides
67 * Also, Xe PM provides get and put functions that Xe driver will use to
90 * @xe: The xe device.
95 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
97 return !xe->d3cold.capable; in xe_rpm_reclaim_safe()
100 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
102 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
[all …]
H A Dxe_device.c80 struct xe_device *xe = to_xe_device(dev); in xe_file_open() local
98 xef->xe = xe; in xe_file_open()
116 return 0; in xe_file_open()
134 * xe_file_get() - Take a reference to the xe file object
135 * @xef: Pointer to the xe file
137 * Anyone with a pointer to xef must take a reference to the xe file
140 * Return: xe file pointer
149 * xe_file_put() - Drop a reference to the xe file object
150 * @xef: Pointer to the xe file
161 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local
[all …]
H A Dxe_pci_sriov.c34 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_provision_vfs() argument
38 int result = 0, err; in pf_provision_vfs()
40 for_each_gt(gt, xe, id) { in pf_provision_vfs()
50 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_unprovision_vfs() argument
56 for_each_gt(gt, xe, id) in pf_unprovision_vfs()
61 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_reset_vfs() argument
67 for_each_gt(gt, xe, id) in pf_reset_vfs()
72 static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id) in xe_pci_pf_get_vf_dev() argument
74 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pci_pf_get_vf_dev()
76 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_pci_pf_get_vf_dev()
[all …]
H A Dxe_sriov_pf.c20 static unsigned int wanted_max_vfs(struct xe_device *xe) in wanted_max_vfs() argument
25 static int pf_reduce_totalvfs(struct xe_device *xe, int limit) in pf_reduce_totalvfs() argument
27 struct device *dev = xe->drm.dev; in pf_reduce_totalvfs()
33 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", in pf_reduce_totalvfs()
38 static bool pf_continue_as_native(struct xe_device *xe, const char *why) in pf_continue_as_native() argument
40 xe_sriov_dbg(xe, "%s, continuing as native\n", why); in pf_continue_as_native()
41 pf_reduce_totalvfs(xe, 0); in pf_continue_as_native()
47 * @xe: the &xe_device to check
54 bool xe_sriov_pf_readiness(struct xe_device *xe) in xe_sriov_pf_readiness() argument
56 struct device *dev = xe->drm.dev; in xe_sriov_pf_readiness()
[all …]
H A Dxe_bo_evict.c15 static int xe_bo_apply_to_pinned(struct xe_device *xe, in xe_bo_apply_to_pinned() argument
22 int ret = 0; in xe_bo_apply_to_pinned()
24 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
32 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
36 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
44 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
47 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
50 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
58 * @xe: xe device
62 * Return: 0 on success, negative error code on error.
[all …]
H A Dxe_sriov_pf_service.c18 * @xe: the &xe_device to initialize
24 void xe_sriov_pf_service_init(struct xe_device *xe) in xe_sriov_pf_service_init() argument
29 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_service_init()
32 xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; in xe_sriov_pf_service_init()
33 xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; in xe_sriov_pf_service_init()
36 xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR; in xe_sriov_pf_service_init()
37 xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR; in xe_sriov_pf_service_init()
40 /* Return: 0 on success or a negative error code on failure. */
41 static int pf_negotiate_version(struct xe_device *xe, in pf_negotiate_version() argument
45 struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base; in pf_negotiate_version()
[all …]
H A Dxe_survivability_mode.c45 * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
69 * (:ref:`xe-device-wedging`) requiring a firmware flash to restore normal operation.
95 static void populate_survivability_info(struct xe_device *xe) in populate_survivability_info() argument
97 struct xe_survivability *survivability = &xe->survivability; in populate_survivability_info()
100 u32 id = 0, reg_value; in populate_survivability_info()
104 mmio = xe_root_tile_mmio(xe); in populate_survivability_info()
121 for (index = 0; id && reg_value; index++, reg_value = info[id].value, in populate_survivability_info()
131 struct xe_device *xe = pdev_to_xe_device(pdev); in log_survivability_info() local
132 struct xe_survivability *survivability = &xe->survivability; in log_survivability_info()
138 for (id = 0; id < MAX_SCRATCH_MMIO; id++) { in log_survivability_info()
[all …]
H A Dxe_nvm.c16 #define GEN12_GUNIT_NVM_BASE 0x00102040
17 #define GEN12_DEBUG_NVM_BASE 0x00101018
19 #define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
21 #define GEN12_GUNIT_NVM_SIZE 0x80
22 #define GEN12_DEBUG_NVM_SIZE 0x4
29 [0] = { .name = "DESCRIPTOR", },
44 static bool xe_nvm_non_posted_erase(struct xe_device *xe) in xe_nvm_non_posted_erase() argument
46 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in xe_nvm_non_posted_erase()
48 if (xe->info.platform != XE_BATTLEMAGE) in xe_nvm_non_posted_erase()
54 static bool xe_nvm_writable_override(struct xe_device *xe) in xe_nvm_writable_override() argument
[all …]
H A Dxe_bo.c51 .fpfn = 0,
52 .lpfn = 0,
54 .flags = 0,
66 .fpfn = 0,
67 .lpfn = 0,
72 .fpfn = 0,
73 .lpfn = 0,
89 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) in resource_is_stolen_vram() argument
91 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
161 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) in mem_type_to_migrate() argument
[all …]
H A Dxe_late_bind_fw.c58 struct xe_device *xe = late_bind_fw_to_xe(lb_fw); in parse_cpd_header() local
63 u32 offset = 0; in parse_cpd_header()
67 xe_assert(xe, manifest_entry); in parse_cpd_header()
73 drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n", in parse_cpd_header()
80 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
87 for (i = 0; i < header->num_of_entries; i++, entry++) in parse_cpd_header()
88 if (strcmp(entry->name, manifest_entry) == 0) in parse_cpd_header()
92 drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n", in parse_cpd_header()
99 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
108 return 0; in parse_cpd_header()
[all …]
H A Dxe_query.c50 static size_t calc_hw_engine_info_size(struct xe_device *xe) in calc_hw_engine_info_size() argument
56 int i = 0; in calc_hw_engine_info_size()
58 for_each_gt(gt, xe, gt_id) in calc_hw_engine_info_size()
97 u32 upper, lower, old_upper, loop = 0; in hwe_read_timestamp()
115 query_engine_cycles(struct xe_device *xe, in query_engine_cycles() argument
127 if (IS_SRIOV_VF(xe)) in query_engine_cycles()
130 if (query->size == 0) { in query_engine_cycles()
132 return 0; in query_engine_cycles()
133 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engine_cycles()
146 if (eci->gt_id >= xe->info.max_gt_per_tile) in query_engine_cycles()
[all …]
H A Dxe_device_sysfs.c19 * DOC: Xe device sysfs
20 * Xe driver requires exposing certain tunable knobs controlled by user space for
40 struct xe_device *xe = pdev_to_xe_device(pdev); in vram_d3cold_threshold_show() local
43 xe_pm_runtime_get(xe); in vram_d3cold_threshold_show()
44 ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold); in vram_d3cold_threshold_show()
45 xe_pm_runtime_put(xe); in vram_d3cold_threshold_show()
55 struct xe_device *xe = pdev_to_xe_device(pdev); in vram_d3cold_threshold_store() local
59 ret = kstrtou32(buff, 0, &vram_d3cold_threshold); in vram_d3cold_threshold_store()
63 drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold); in vram_d3cold_threshold_store()
65 xe_pm_runtime_get(xe); in vram_d3cold_threshold_store()
[all …]
H A Dxe_pmu.c20 * DOC: Xe PMU (Performance Monitoring Unit)
28 * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/events/
29 * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/format/
42 * For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
58 #define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
85 #define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
86 #define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
87 #define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
88 #define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04
89 #define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05
[all …]
H A Dxe_hwmon.c68 #define PWR_UNIT 0x3
69 #define ENERGY_UNIT 0xe
70 #define TIME_UNIT 0xa
115 * struct xe_hwmon - xe hwmon data structure
118 /** @hwmon_dev: hwmon device for xe */
120 /** @xe: Xe device */
121 struct xe_device *xe; member
146 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); in xe_hwmon_pcode_read_power_limit()
147 u32 val0 = 0, val1 = 0; in xe_hwmon_pcode_read_power_limit()
148 int ret = 0; in xe_hwmon_pcode_read_power_limit()
[all …]
H A Dxe_shrinker.c19 * @xe: Back pointer to the device.
27 struct xe_device *xe; member
57 static s64 __xe_shrinker_walk(struct xe_device *xe, in __xe_shrinker_walk() argument
63 s64 freed = 0, lret; in __xe_shrinker_walk()
66 struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); in __xe_shrinker_walk()
82 if (lret < 0) in __xe_shrinker_walk()
90 xe_assert(xe, !IS_ERR(ttm_bo)); in __xe_shrinker_walk()
102 static s64 xe_shrinker_walk(struct xe_device *xe, in xe_shrinker_walk() argument
113 lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); in xe_shrinker_walk()
115 if (lret < 0 || *scanned >= to_scan) in xe_shrinker_walk()
[all …]
H A Dxe_exec_queue.c32 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
38 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
45 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) in __xe_exec_queue_free()
60 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
65 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) { in alloc_dep_schedulers()
81 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
90 return 0; in alloc_dep_schedulers()
93 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
138 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
153 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
[all …]
H A Dxe_step.c41 [0] = { COMMON_STEP(A0) },
46 [0] = { COMMON_STEP(A0) },
51 [0x0] = { COMMON_STEP(A0) },
52 [0x1] = { COMMON_STEP(A0) },
53 [0x4] = { COMMON_STEP(B0) },
54 [0x8] = { COMMON_STEP(C0) },
55 [0xC] = { COMMON_STEP(D0) },
59 [0x4] = { COMMON_STEP(D0) },
60 [0xC] = { COMMON_STEP(D0) },
64 [0x0] = { COMMON_STEP(A0) },
[all …]
H A Dxe_gsc_submit.c31 * as we use unique identifier for each user, with handle 0 being reserved for
61 * @xe: the Xe device
70 u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, in xe_gsc_emit_header() argument
73 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK)); in xe_gsc_emit_header()
78 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE); in xe_gsc_emit_header()
80 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER); in xe_gsc_emit_header()
81 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id); in xe_gsc_emit_header()
82 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id); in xe_gsc_emit_header()
83 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION); in xe_gsc_emit_header()
84 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE); in xe_gsc_emit_header()
[all …]
H A Dxe_vm.c57 * Return: %0 on success. See drm_exec_lock_obj() for error codes.
97 return 0; in alloc_preempt_fences()
108 return 0; in alloc_preempt_fences()
122 if (timeout < 0 || q->lr.pfence->error == -ETIME) in wait_for_existing_preempt_fences()
130 return 0; in wait_for_existing_preempt_fences()
155 xe_assert(vm->xe, link != list); in arm_preempt_fences()
173 return 0; in add_preempt_fences()
186 return 0; in add_preempt_fences()
218 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue()
221 err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val); in xe_vm_add_compute_exec_queue()
[all …]
H A Dxe_wait_user_fence.c52 return passed ? 0 : 1; in do_compare()
58 static long to_jiffies_timeout(struct xe_device *xe, in to_jiffies_timeout() argument
70 if (args->timeout < 0) { in to_jiffies_timeout()
75 if (args->timeout == 0) in to_jiffies_timeout()
76 return 0; in to_jiffies_timeout()
104 struct xe_device *xe = to_xe_device(dev); in xe_wait_user_fence_ioctl() local
110 int err = 0; in xe_wait_user_fence_ioctl()
114 if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) || in xe_wait_user_fence_ioctl()
115 XE_IOCTL_DBG(xe, args->pad2) || in xe_wait_user_fence_ioctl()
116 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_wait_user_fence_ioctl()
[all …]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_sriov_pf_service_kunit.c19 struct xe_device *xe; in pf_service_test_init() local
24 xe = test->priv; in pf_service_test_init()
25 KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); in pf_service_test_init()
27 xe_sriov_pf_service_init(xe); in pf_service_test_init()
33 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major); in pf_service_test_init()
34 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
35 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major, in pf_service_test_init()
36 xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
37 if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) in pf_service_test_init()
38 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor, in pf_service_test_init()
[all …]
H A Dxe_bo.c53 if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) { in ccs_test_migrate()
74 if (timeout <= 0) { in ccs_test_migrate()
100 if (cpu_map[0] != get_val) { in ccs_test_migrate()
102 "Expected CCS readout 0x%016llx, got 0x%016llx.\n", in ccs_test_migrate()
104 (unsigned long long)cpu_map[0]); in ccs_test_migrate()
113 "Expected CCS readout 0x%016llx, got 0x%016llx.\n", in ccs_test_migrate()
119 cpu_map[0] = assign_val; in ccs_test_migrate()
126 static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, in ccs_test_run_tile() argument
137 if (IS_DGFX(xe)) in ccs_test_run_tile()
142 bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, in ccs_test_run_tile()
[all …]
H A Dxe_kunit_helpers.c36 struct xe_device *xe; in xe_kunit_helper_alloc_xe_device() local
38 xe = drm_kunit_helper_alloc_drm_device(test, dev, in xe_kunit_helper_alloc_xe_device()
41 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_alloc_xe_device()
42 return xe; in xe_kunit_helper_alloc_xe_device()
69 * Return: Always 0.
73 struct xe_device *xe; in xe_kunit_helper_xe_device_test_init() local
80 xe = xe_kunit_helper_alloc_xe_device(test, dev); in xe_kunit_helper_xe_device_test_init()
81 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_xe_device_test_init()
83 err = xe_pci_fake_device_init(xe); in xe_kunit_helper_xe_device_test_init()
84 KUNIT_ASSERT_EQ(test, err, 0); in xe_kunit_helper_xe_device_test_init()
[all …]
/linux/Documentation/ABI/testing/
H A Dsysfs-driver-intel-xe-hwmon1 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
4 Contact: intel-xe@lists.freedesktop.org
9 exceeds this limit. A read value of 0 means that the PL1
10 power limit is disabled, writing 0 disables the
11 limit. Writing values > 0 and <= TDP will enable the power limit.
13 Only supported for particular Intel Xe graphics platforms.
15 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
18 Contact: intel-xe@lists.freedesktop.org
21 Only supported for particular Intel Xe graphics platforms.
24 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
[all …]
/linux/drivers/gpu/drm/amd/include/asic_reg/umc/
H A Dumc_6_7_0_sh_mask.h29 …C_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0
30 …_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10
31 …_UMC0_MCUMC_STATUST0__RESERV22__SHIFT 0x16
32 …_UMC0_MCUMC_STATUST0__AddrLsb__SHIFT 0x18
33 …_UMC0_MCUMC_STATUST0__RESERV30__SHIFT 0x1e
34 …_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20
35 …_UMC0_MCUMC_STATUST0__RESERV38__SHIFT 0x26
36 …_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28
37 …_UMC0_MCUMC_STATUST0__RESERV41__SHIFT 0x29
38 …_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b
[all …]

12345678910>>...41