/linux/drivers/gpu/drm/xe/ |
H A D | xe_irq.c | 30 #define IMR(offset) XE_REG(offset + 0x4) 31 #define IIR(offset) XE_REG(offset + 0x8) 32 #define IER(offset) XE_REG(offset + 0xc) 34 static int xe_irq_msix_init(struct xe_device *xe); 35 static void xe_irq_msix_free(struct xe_device *xe); 36 static int xe_irq_msix_request_irqs(struct xe_device *xe); 37 static void xe_irq_msix_synchronize_irq(struct xe_device *xe); 43 if (val == 0) in assert_iir_is_zero() 46 drm_WARN(&mmio->tile->xe->drm, 1, in assert_iir_is_zero() 47 "Interrupt register 0x%x is not zero: 0x%08x\n", in assert_iir_is_zero() [all …]
|
H A D | xe_pat.c | 22 #define _PAT_ATS 0x47fc 24 0x4800, 0x4804, \ 25 0x4848, 0x484c) 26 #define _PAT_PTA 0x4820 33 #define XE2_COH_MODE REG_GENMASK(1, 0) 38 #define XELPG_PAT_0_WB REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0) 39 #define XELPG_INDEX_COH_MODE_MASK REG_GENMASK(1, 0) 42 #define XELPG_0_COH_NON REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0) 47 #define XELP_MEM_TYPE_MASK REG_GENMASK(1, 0) 51 #define XELP_PAT_UC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0) [all …]
|
H A D | xe_ttm_stolen_mgr.c | 49 * @xe: xe device 55 bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe) in xe_ttm_stolen_cpu_access_needs_ggtt() argument 57 return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe); in xe_ttm_stolen_cpu_access_needs_ggtt() 60 static u32 get_wopcm_size(struct xe_device *xe) in get_wopcm_size() argument 65 val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED); in get_wopcm_size() 69 case 0x5 ... 0x6: in get_wopcm_size() 72 case 0x0 ... 0x3: in get_wopcm_size() 77 wopcm_size = 0; in get_wopcm_size() 83 static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) in detect_bar2_dgfx() argument 85 struct xe_tile *tile = xe_device_get_root_tile(xe); in detect_bar2_dgfx() [all …]
|
H A D | xe_pci.c | 220 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 236 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 276 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 277 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 278 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 402 if (negative && strcmp(devices, "!*") == 0) in device_id_in_list() 404 if (!negative && strcmp(devices, "*") == 0) in device_id_in_list() 414 if (negative && tok[0] == '!') in device_id_in_list() 416 else if ((negative && tok[0] != '!') || in device_id_in_list() 417 (!negative && tok[0] == '!')) in device_id_in_list() [all …]
|
H A D | xe_bo.c | 47 .fpfn = 0, 48 .lpfn = 0, 50 .flags = 0, 62 .fpfn = 0, 63 .lpfn = 0, 68 .fpfn = 0, 69 .lpfn = 0, 85 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) in resource_is_stolen_vram() argument 87 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram() 157 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) in mem_type_to_migrate() argument [all …]
|
H A D | xe_device_sysfs.c | 19 * DOC: Xe device sysfs 20 * Xe driver requires exposing certain tunable knobs controlled by user space for 34 struct xe_device *xe = pdev_to_xe_device(pdev); in vram_d3cold_threshold_show() local 37 xe_pm_runtime_get(xe); in vram_d3cold_threshold_show() 38 ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold); in vram_d3cold_threshold_show() 39 xe_pm_runtime_put(xe); in vram_d3cold_threshold_show() 49 struct xe_device *xe = pdev_to_xe_device(pdev); in vram_d3cold_threshold_store() local 53 ret = kstrtou32(buff, 0, &vram_d3cold_threshold); in vram_d3cold_threshold_store() 57 drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold); in vram_d3cold_threshold_store() 59 xe_pm_runtime_get(xe); in vram_d3cold_threshold_store() [all …]
|
H A D | xe_exec_queue.c | 31 XE_EXEC_QUEUE_JOB_TIMEOUT = 0, 37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 53 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument 105 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc() 118 u32 flags = 0; in __xe_exec_queue_init() 134 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init() 146 return 0; in __xe_exec_queue_init() 149 for (i = i - 1; i >= 0; --i) in __xe_exec_queue_init() 154 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create() argument 163 …xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0))… in xe_exec_queue_create() [all …]
|
H A D | xe_hwmon.c | 58 #define PWR_UNIT 0x3 59 #define ENERGY_UNIT 0xe 60 #define TIME_UNIT 0xa 104 * struct xe_hwmon - xe hwmon data structure 107 /** @hwmon_dev: hwmon device for xe */ 109 /** @xe: Xe device */ 110 struct xe_device *xe; member 132 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); in xe_hwmon_pcode_read_power_limit() 133 u32 val0 = 0, val1 = 0; in xe_hwmon_pcode_read_power_limit() 134 int ret = 0; in xe_hwmon_pcode_read_power_limit() [all …]
|
H A D | xe_sriov_pf.c | 15 static unsigned int wanted_max_vfs(struct xe_device *xe) in wanted_max_vfs() argument 20 static int pf_reduce_totalvfs(struct xe_device *xe, int limit) in pf_reduce_totalvfs() argument 22 struct device *dev = xe->drm.dev; in pf_reduce_totalvfs() 28 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", in pf_reduce_totalvfs() 33 static bool pf_continue_as_native(struct xe_device *xe, const char *why) in pf_continue_as_native() argument 35 xe_sriov_dbg(xe, "%s, continuing as native\n", why); in pf_continue_as_native() 36 pf_reduce_totalvfs(xe, 0); in pf_continue_as_native() 42 * @xe: the &xe_device to check 49 bool xe_sriov_pf_readiness(struct xe_device *xe) in xe_sriov_pf_readiness() argument 51 struct device *dev = xe->drm.dev; in xe_sriov_pf_readiness() [all …]
|
H A D | xe_step.c | 40 [0] = { COMMON_STEP(A0) }, 45 [0] = { COMMON_STEP(A0) }, 50 [0x0] = { COMMON_STEP(A0) }, 51 [0x1] = { COMMON_STEP(A0) }, 52 [0x4] = { COMMON_STEP(B0) }, 53 [0x8] = { COMMON_STEP(C0) }, 54 [0xC] = { COMMON_STEP(D0) }, 58 [0x4] = { COMMON_STEP(D0) }, 59 [0xC] = { COMMON_STEP(D0) }, 63 [0x0] = { COMMON_STEP(A0) }, [all …]
|
H A D | xe_shrinker.c | 18 * @xe: Back pointer to the device. 26 struct xe_device *xe; member 56 static s64 xe_shrinker_walk(struct xe_device *xe, in xe_shrinker_walk() argument 62 s64 freed = 0, lret; in xe_shrinker_walk() 65 struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); in xe_shrinker_walk() 77 if (lret < 0) in xe_shrinker_walk() 102 num_pages = 0; in xe_shrinker_count() 120 struct xe_device *xe = shrinker->xe; in xe_shrinker_runtime_pm_get() local 122 if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) || in xe_shrinker_runtime_pm_get() 134 if (!xe_pm_runtime_get_if_active(xe)) { in xe_shrinker_runtime_pm_get() [all …]
|
H A D | xe_gsc_submit.c | 31 * as we use unique identifier for each user, with handle 0 being reserved for 61 * @xe: the Xe device 70 u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, in xe_gsc_emit_header() argument 73 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK)); in xe_gsc_emit_header() 78 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE); in xe_gsc_emit_header() 80 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER); in xe_gsc_emit_header() 81 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id); in xe_gsc_emit_header() 82 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id); in xe_gsc_emit_header() 83 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION); in xe_gsc_emit_header() 84 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE); in xe_gsc_emit_header() [all …]
|
H A D | xe_migrate.c | 79 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest 81 * (val-2) format, this translates to 0x400 dwords for the true maximum length 83 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values. 85 #define MAX_PTE_PER_SDI 0x1FE 124 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) in xe_migrate_vram_ofs() argument 132 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) in xe_migrate_vram_ofs() 133 identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G); in xe_migrate_vram_ofs() 135 addr -= xe->mem.vram.dpa_base; in xe_migrate_vram_ofs() 139 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, in xe_migrate_program_identity() argument 145 u64 vram_limit = xe->mem.vram.actual_physical_size + in xe_migrate_program_identity() [all …]
|
H A D | xe_sriov_vf.c | 128 * @xe: the &xe_device to initialize 130 void xe_sriov_vf_init_early(struct xe_device *xe) in xe_sriov_vf_init_early() argument 132 INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); in xe_sriov_vf_init_early() 137 * @xe: the &xe_device struct instance 143 * Returns: 0 if the operation completed successfully, or a negative error 146 static int vf_post_migration_requery_guc(struct xe_device *xe) in vf_post_migration_requery_guc() argument 150 int err, ret = 0; in vf_post_migration_requery_guc() 152 for_each_gt(gt, xe, id) { in vf_post_migration_requery_guc() 162 * @xe: the &xe_device struct instance 167 static bool vf_post_migration_imminent(struct xe_device *xe) in vf_post_migration_imminent() argument [all …]
|
H A D | xe_mmio.c | 28 struct xe_device *xe = arg; in tiles_fini() local 32 for_each_remote_tile(tile, xe, id) in tiles_fini() 53 * '----------------------' <- 0MB 55 static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) in mmio_multi_tile_setup() argument 61 * Nothing to be done as tile 0 has already been setup earlier with the in mmio_multi_tile_setup() 64 if (xe->info.tile_count == 1) in mmio_multi_tile_setup() 68 if (!xe->info.skip_mtcfg) { in mmio_multi_tile_setup() 69 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in mmio_multi_tile_setup() 81 if (tile_count < xe->info.tile_count) { in mmio_multi_tile_setup() 82 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", in mmio_multi_tile_setup() [all …]
|
H A D | xe_vm.c | 60 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended. 66 -EAGAIN : 0; in xe_vma_userptr_check_repin() 73 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages() local 76 xe_assert(xe, xe_vma_is_userptr(vma)); in xe_vma_userptr_pin_pages() 114 return 0; in alloc_preempt_fences() 125 return 0; in alloc_preempt_fences() 139 if (timeout < 0 || q->lr.pfence->error == -ETIME) in wait_for_existing_preempt_fences() 147 return 0; in wait_for_existing_preempt_fences() 172 xe_assert(vm->xe, link != list); in arm_preempt_fences() 190 return 0; in add_preempt_fences() [all …]
|
H A D | xe_pxp.c | 32 * PXP (Protected Xe Path) allows execution and flip to display of protected 48 bool xe_pxp_is_supported(const struct xe_device *xe) in xe_pxp_is_supported() argument 50 return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY); in xe_pxp_is_supported() 89 * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value 97 int ret = 0; in xe_pxp_get_readiness_status() 107 xe_pm_runtime_get(pxp->xe); in xe_pxp_get_readiness_status() 113 xe_pm_runtime_put(pxp->xe); in xe_pxp_get_readiness_status() 129 return xe_mmio_wait32(>->mmio, KCR_SIP, mask, in_play ? mask : 0, in pxp_wait_for_session_state() 139 int ret = 0; in pxp_terminate_hw() 141 drm_dbg(&pxp->xe->drm, "Terminating PXP\n"); in pxp_terminate_hw() [all …]
|
H A D | xe_gsc_proxy.c | 37 * 1 - Xe submits a request to GSC asking for the message to CSME 39 * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component 41 * 5 - Xe submits a request to GSC with the reply from CSME 98 __gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS); in gsc_proxy_irq_clear() 103 u32 set = enabled ? HECI_H_CSR_IE : 0; in gsc_proxy_irq_toggle() 104 u32 clr = enabled ? 0 : HECI_H_CSR_IE; in gsc_proxy_irq_toggle() 116 if (ret < 0) { in proxy_send_to_csme() 122 if (ret < 0) { in proxy_send_to_csme() 150 return 0; in proxy_send_to_gsc() 159 int ret = 0; in validate_proxy_header() [all …]
|
H A D | xe_wait_user_fence.c | 52 return passed ? 0 : 1; in do_compare() 58 static long to_jiffies_timeout(struct xe_device *xe, in to_jiffies_timeout() argument 70 if (args->timeout < 0) { in to_jiffies_timeout() 75 if (args->timeout == 0) in to_jiffies_timeout() 76 return 0; in to_jiffies_timeout() 104 struct xe_device *xe = to_xe_device(dev); in xe_wait_user_fence_ioctl() local 110 int err = 0; in xe_wait_user_fence_ioctl() 114 if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) || in xe_wait_user_fence_ioctl() 115 XE_IOCTL_DBG(xe, args->pad2) || in xe_wait_user_fence_ioctl() 116 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_wait_user_fence_ioctl() [all …]
|
H A D | xe_huc.c | 50 struct xe_device *xe = gt_to_xe(gt); in huc_alloc_gsc_pkt() local 54 bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt), in huc_alloc_gsc_pkt() 63 return 0; in huc_alloc_gsc_pkt() 70 struct xe_device *xe = gt_to_xe(gt); in xe_huc_init() local 78 return 0; in xe_huc_init() 86 return 0; in xe_huc_init() 88 if (IS_SRIOV_VF(xe)) in xe_huc_init() 89 return 0; in xe_huc_init() 99 return 0; in xe_huc_init() 109 struct xe_device *xe = huc_to_xe(huc); in xe_huc_init_post_hwconfig() local [all …]
|
H A D | xe_vsec.c | 24 #define BMG_DEVICE_ID 0xE2F8 27 .length = 0x10, 31 .tbir = 0, 36 .length = 0x10, 40 .tbir = 0, 41 .offset = BMG_DISCOVERY_OFFSET + 0x60, 45 .length = 0x10, 49 .tbir = 0, 50 .offset = BMG_DISCOVERY_OFFSET + 0x78, 61 XE_VSEC_UNKNOWN = 0, [all …]
|
/linux/drivers/gpu/drm/xe/display/ |
H A D | xe_display.c | 37 /* Xe device functions */ 39 static bool has_display(struct xe_device *xe) in has_display() argument 41 return HAS_DISPLAY(&xe->display); in has_display() 54 return 0; in xe_display_driver_probe_defer() 79 static void unset_display_features(struct xe_device *xe) in unset_display_features() argument 81 xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); in unset_display_features() 86 struct xe_device *xe = to_xe_device(dev); in display_destroy() local 88 destroy_workqueue(xe->display.hotplug.dp_wq); in display_destroy() 93 * @xe: XE device instance 98 * to the rest of xe and return it to be xe->display. [all …]
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-driver-intel-xe-hwmon | 1 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max 4 Contact: intel-xe@lists.freedesktop.org 9 exceeds this limit. A read value of 0 means that the PL1 10 power limit is disabled, writing 0 disables the 11 limit. Writing values > 0 and <= TDP will enable the power limit. 13 Only supported for particular Intel Xe graphics platforms. 15 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max 18 Contact: intel-xe@lists.freedesktop.org 21 Only supported for particular Intel Xe graphics platforms. 24 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input [all …]
|
/linux/drivers/gpu/drm/amd/include/asic_reg/umc/ |
H A D | umc_6_7_0_sh_mask.h | 29 …C_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0 30 …_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10 31 …_UMC0_MCUMC_STATUST0__RESERV22__SHIFT 0x16 32 …_UMC0_MCUMC_STATUST0__AddrLsb__SHIFT 0x18 33 …_UMC0_MCUMC_STATUST0__RESERV30__SHIFT 0x1e 34 …_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20 35 …_UMC0_MCUMC_STATUST0__RESERV38__SHIFT 0x26 36 …_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28 37 …_UMC0_MCUMC_STATUST0__RESERV41__SHIFT 0x29 38 …_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b [all …]
|
/linux/drivers/gpu/drm/xe/tests/ |
H A D | xe_kunit_helpers.c | 36 struct xe_device *xe; in xe_kunit_helper_alloc_xe_device() local 38 xe = drm_kunit_helper_alloc_drm_device(test, dev, in xe_kunit_helper_alloc_xe_device() 41 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_alloc_xe_device() 42 return xe; in xe_kunit_helper_alloc_xe_device() 69 * Return: Always 0. 73 struct xe_device *xe; in xe_kunit_helper_xe_device_test_init() local 80 xe = xe_kunit_helper_alloc_xe_device(test, dev); in xe_kunit_helper_xe_device_test_init() 81 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_xe_device_test_init() 83 err = xe_pci_fake_device_init(xe); in xe_kunit_helper_xe_device_test_init() 84 KUNIT_ASSERT_EQ(test, err, 0); in xe_kunit_helper_xe_device_test_init() [all …]
|