Home
last modified time | relevance | path

Searched +full:0 +full:xe (Results 1 – 25 of 1066) sorted by relevance

12345678910>>...43

/linux/drivers/gpu/drm/xe/
H A Dxe_pm.c30 * DOC: Xe Power Management
32 * Xe PM implements the main routines for both system level suspend states and
52 * to perform the transition from D3hot to D3cold. Xe may disallow this
60 * (PC-states), and/or other low level power states. Xe PM component provides
64 * Also, Xe PM provides get and put functions that Xe driver will use to
87 * @xe: The xe device.
92 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
94 return !xe->d3cold.capable; in xe_rpm_reclaim_safe()
97 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
99 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
[all …]
H A Dxe_irq.c30 #define IMR(offset) XE_REG(offset + 0x4)
31 #define IIR(offset) XE_REG(offset + 0x8)
32 #define IER(offset) XE_REG(offset + 0xc)
34 static int xe_irq_msix_init(struct xe_device *xe);
35 static void xe_irq_msix_free(struct xe_device *xe);
36 static int xe_irq_msix_request_irqs(struct xe_device *xe);
37 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
43 if (val == 0) in assert_iir_is_zero()
46 drm_WARN(&mmio->tile->xe->drm, 1, in assert_iir_is_zero()
47 "Interrupt register 0x%x is not zero: 0x%08x\n", in assert_iir_is_zero()
[all …]
H A Dxe_pat.c22 #define _PAT_ATS 0x47fc
24 0x4800, 0x4804, \
25 0x4848, 0x484c)
26 #define _PAT_PTA 0x4820
33 #define XE2_COH_MODE REG_GENMASK(1, 0)
38 #define XELPG_PAT_0_WB REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
39 #define XELPG_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
42 #define XELPG_0_COH_NON REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
47 #define XELP_MEM_TYPE_MASK REG_GENMASK(1, 0)
51 #define XELP_PAT_UC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
[all …]
H A Dxe_ttm_stolen_mgr.c49 * @xe: xe device
55 bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe) in xe_ttm_stolen_cpu_access_needs_ggtt() argument
57 return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe); in xe_ttm_stolen_cpu_access_needs_ggtt()
60 static u32 get_wopcm_size(struct xe_device *xe) in get_wopcm_size() argument
65 val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED); in get_wopcm_size()
69 case 0x5 ... 0x6: in get_wopcm_size()
72 case 0x0 ... 0x3: in get_wopcm_size()
77 wopcm_size = 0; in get_wopcm_size()
83 static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) in detect_bar2_dgfx() argument
85 struct xe_tile *tile = xe_device_get_root_tile(xe); in detect_bar2_dgfx()
[all …]
H A Dxe_vram.c27 _resize_bar(struct xe_device *xe, int resno, resource_size_t size) in _resize_bar() argument
29 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in _resize_bar()
38 …drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support… in _resize_bar()
43 drm_info(&xe->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size); in _resize_bar()
50 static void resize_vram_bar(struct xe_device *xe) in resize_vram_bar() argument
53 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in resize_vram_bar()
78 drm_info(&xe->drm, in resize_vram_bar()
79 "Requested size: %lluMiB is not supported by rebar sizes: 0x%x. Leaving default: %lluMiB\n", in resize_vram_bar()
96 drm_info(&xe->drm, "Attempting to resize bar from %lluMiB -> %lluMiB\n", in resize_vram_bar()
104 (u64)root_res->start > 0x100000000ul) in resize_vram_bar()
[all …]
H A Dxe_bo_evict.c16 * @xe: xe device
27 int xe_bo_evict_all(struct xe_device *xe) in xe_bo_evict_all() argument
29 struct ttm_device *bdev = &xe->ttm; in xe_bo_evict_all()
50 if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))) in xe_bo_evict_all()
62 spin_lock(&xe->pinned.lock); in xe_bo_evict_all()
64 bo = list_first_entry_or_null(&xe->pinned.external_vram, in xe_bo_evict_all()
70 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all()
77 spin_lock(&xe->pinned.lock); in xe_bo_evict_all()
79 &xe->pinned.external_vram); in xe_bo_evict_all()
80 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all()
[all …]
H A Dxe_sriov_pf.c15 static unsigned int wanted_max_vfs(struct xe_device *xe) in wanted_max_vfs() argument
20 static int pf_reduce_totalvfs(struct xe_device *xe, int limit) in pf_reduce_totalvfs() argument
22 struct device *dev = xe->drm.dev; in pf_reduce_totalvfs()
28 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", in pf_reduce_totalvfs()
33 static bool pf_continue_as_native(struct xe_device *xe, const char *why) in pf_continue_as_native() argument
35 xe_sriov_dbg(xe, "%s, continuing as native\n", why); in pf_continue_as_native()
36 pf_reduce_totalvfs(xe, 0); in pf_continue_as_native()
42 * @xe: the &xe_device to check
49 bool xe_sriov_pf_readiness(struct xe_device *xe) in xe_sriov_pf_readiness() argument
51 struct device *dev = xe->drm.dev; in xe_sriov_pf_readiness()
[all …]
H A Dxe_step.c40 [0] = { COMMON_STEP(A0) },
45 [0] = { COMMON_STEP(A0) },
50 [0x0] = { COMMON_STEP(A0) },
51 [0x1] = { COMMON_STEP(A0) },
52 [0x4] = { COMMON_STEP(B0) },
53 [0x8] = { COMMON_STEP(C0) },
54 [0xC] = { COMMON_STEP(D0) },
58 [0x4] = { COMMON_STEP(D0) },
59 [0xC] = { COMMON_STEP(D0) },
63 [0x0] = { COMMON_STEP(A0) },
[all …]
H A Dxe_gsc_submit.c31 * as we use unique identifier for each user, with handle 0 being reserved for
61 * @xe: the Xe device
70 u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, in xe_gsc_emit_header() argument
73 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK)); in xe_gsc_emit_header()
78 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE); in xe_gsc_emit_header()
80 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER); in xe_gsc_emit_header()
81 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id); in xe_gsc_emit_header()
82 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id); in xe_gsc_emit_header()
83 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION); in xe_gsc_emit_header()
84 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE); in xe_gsc_emit_header()
[all …]
H A Dxe_sriov_vf.c128 * @xe: the &xe_device to initialize
130 void xe_sriov_vf_init_early(struct xe_device *xe) in xe_sriov_vf_init_early() argument
132 INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); in xe_sriov_vf_init_early()
137 * @xe: the &xe_device struct instance
143 * Returns: 0 if the operation completed successfully, or a negative error
146 static int vf_post_migration_requery_guc(struct xe_device *xe) in vf_post_migration_requery_guc() argument
150 int err, ret = 0; in vf_post_migration_requery_guc()
152 for_each_gt(gt, xe, id) { in vf_post_migration_requery_guc()
162 * @xe: the &xe_device struct instance
167 static bool vf_post_migration_imminent(struct xe_device *xe) in vf_post_migration_imminent() argument
[all …]
H A Dxe_vm.c60 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
66 -EAGAIN : 0; in xe_vma_userptr_check_repin()
73 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages() local
76 xe_assert(xe, xe_vma_is_userptr(vma)); in xe_vma_userptr_pin_pages()
114 return 0; in alloc_preempt_fences()
125 return 0; in alloc_preempt_fences()
139 if (timeout < 0 || q->lr.pfence->error == -ETIME) in wait_for_existing_preempt_fences()
147 return 0; in wait_for_existing_preempt_fences()
172 xe_assert(vm->xe, link != list); in arm_preempt_fences()
190 return 0; in add_preempt_fences()
[all …]
H A Dxe_wait_user_fence.c52 return passed ? 0 : 1; in do_compare()
58 static long to_jiffies_timeout(struct xe_device *xe, in to_jiffies_timeout() argument
70 if (args->timeout < 0) { in to_jiffies_timeout()
75 if (args->timeout == 0) in to_jiffies_timeout()
76 return 0; in to_jiffies_timeout()
104 struct xe_device *xe = to_xe_device(dev); in xe_wait_user_fence_ioctl() local
110 int err = 0; in xe_wait_user_fence_ioctl()
114 if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) || in xe_wait_user_fence_ioctl()
115 XE_IOCTL_DBG(xe, args->pad2) || in xe_wait_user_fence_ioctl()
116 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_wait_user_fence_ioctl()
[all …]
H A Dxe_oa.c120 [XE_OA_FORMAT_A12] = { 0, 64, DRM_FMT(OAG) },
129 [XE_OA_FORMAT_PEC64u64] = { 1, 576, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
132 [XE_OA_FORMAT_PEC32u64_G1] = { 5, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
134 [XE_OA_FORMAT_PEC32u64_G2] = { 6, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
136 [XE_OA_FORMAT_PEC36u64_G1_32_G2_4] = { 3, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
137 [XE_OA_FORMAT_PEC36u64_G1_4_G2_32] = { 4, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
217 *(u64 *)report = 0; in oa_report_id_clear()
219 *report = 0; in oa_report_id_clear()
232 *(u64 *)&report[2] = 0; in oa_timestamp_clear()
234 report[1] = 0; in oa_timestamp_clear()
[all …]
H A Dxe_huc.c50 struct xe_device *xe = gt_to_xe(gt); in huc_alloc_gsc_pkt() local
54 bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt), in huc_alloc_gsc_pkt()
63 return 0; in huc_alloc_gsc_pkt()
70 struct xe_device *xe = gt_to_xe(gt); in xe_huc_init() local
78 return 0; in xe_huc_init()
86 return 0; in xe_huc_init()
88 if (IS_SRIOV_VF(xe)) in xe_huc_init()
89 return 0; in xe_huc_init()
99 return 0; in xe_huc_init()
109 struct xe_device *xe = huc_to_xe(huc); in xe_huc_init_post_hwconfig() local
[all …]
H A Dxe_vsec.c24 #define BMG_DEVICE_ID 0xE2F8
27 .length = 0x10,
31 .tbir = 0,
36 .length = 0x10,
40 .tbir = 0,
41 .offset = BMG_DISCOVERY_OFFSET + 0x60,
45 .length = 0x10,
49 .tbir = 0,
50 .offset = BMG_DISCOVERY_OFFSET + 0x78,
61 XE_VSEC_UNKNOWN = 0,
[all …]
H A Dxe_uc_fw.c65 * - xe/wipplat_guc_<major>.<minor>.<patch>.bin
66 * - xe/wipplat_huc_<major>.<minor>.<patch>.bin
72 * 3) Platform officially supported by xe and out of force-probe. Using
77 * - xe/plat_guc_<major>.bin
78 * - xe/plat_huc.bin
88 * TODO: Currently there's no fallback on major version. That's because xe
110 fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
111 fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
122 fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
123 fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
[all …]
H A Dxe_drm_client.c27 * The drm/xe driver implements the DRM client usage stats specification as
35 * pos: 0
39 * drm-driver: xe
42 * drm-total-system: 0
43 * drm-shared-system: 0
44 * drm-active-system: 0
45 * drm-resident-system: 0
46 * drm-purgeable-system: 0
48 * drm-shared-gtt: 0
49 * drm-active-gtt: 0
[all …]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_migrate.c15 static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence, in sanity_fence_failed() argument
29 if (ret <= 0) { in sanity_fence_failed()
37 static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe, in run_sanity_job() argument
41 u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm); in run_sanity_job()
57 if (sanity_fence_failed(xe, fence, str, test)) in run_sanity_job()
62 return 0; in run_sanity_job()
70 } } while (0)
75 struct xe_device *xe = tile_to_xe(m->tile); in test_copy() local
76 u64 retval, expected = 0; in test_copy()
82 struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL, in test_copy()
[all …]
H A Dxe_kunit_helpers.c36 struct xe_device *xe; in xe_kunit_helper_alloc_xe_device() local
38 xe = drm_kunit_helper_alloc_drm_device(test, dev, in xe_kunit_helper_alloc_xe_device()
41 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_alloc_xe_device()
42 return xe; in xe_kunit_helper_alloc_xe_device()
69 * Return: Always 0.
73 struct xe_device *xe; in xe_kunit_helper_xe_device_test_init() local
80 xe = xe_kunit_helper_alloc_xe_device(test, dev); in xe_kunit_helper_xe_device_test_init()
81 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_xe_device_test_init()
83 err = xe_pci_fake_device_init(xe); in xe_kunit_helper_xe_device_test_init()
84 KUNIT_ASSERT_EQ(test, err, 0); in xe_kunit_helper_xe_device_test_init()
[all …]
H A Dxe_mocs.c28 memset(arg, 0, sizeof(*arg)); in live_mocs_init()
50 KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); in read_l3cc_table()
52 for (i = 0; i < info->num_mocs_regs; i++) { in read_l3cc_table()
59 mocs_dbg(gt, "reg_val=0x%x\n", reg_val); in read_l3cc_table()
66 l3cc = reg_val & 0xffff; in read_l3cc_table()
68 mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n", in read_l3cc_table()
89 KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); in read_mocs_table()
91 for (i = 0; i < info->num_mocs_regs; i++) { in read_mocs_table()
100 mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n", in read_mocs_table()
104 "mocs reg 0x%x has incorrect val.\n", i); in read_mocs_table()
[all …]
/linux/drivers/gpu/drm/amd/include/asic_reg/umc/
H A Dumc_6_7_0_sh_mask.h29 …C_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0
30 …_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10
31 …_UMC0_MCUMC_STATUST0__RESERV22__SHIFT 0x16
32 …_UMC0_MCUMC_STATUST0__AddrLsb__SHIFT 0x18
33 …_UMC0_MCUMC_STATUST0__RESERV30__SHIFT 0x1e
34 …_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20
35 …_UMC0_MCUMC_STATUST0__RESERV38__SHIFT 0x26
36 …_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28
37 …_UMC0_MCUMC_STATUST0__RESERV41__SHIFT 0x29
38 …_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b
[all …]
/linux/drivers/gpu/drm/xe/display/
H A Dxe_hdcp_gsc.c40 struct xe_device *xe = to_xe_device(display->drm); in intel_hdcp_gsc_check_status() local
41 struct xe_tile *tile = xe_device_get_root_tile(xe); in intel_hdcp_gsc_check_status()
48 drm_dbg_kms(&xe->drm, in intel_hdcp_gsc_check_status()
53 xe_pm_runtime_get(xe); in intel_hdcp_gsc_check_status()
56 drm_dbg_kms(&xe->drm, in intel_hdcp_gsc_check_status()
67 xe_pm_runtime_put(xe); in intel_hdcp_gsc_check_status()
75 struct xe_device *xe = to_xe_device(display->drm); in intel_hdcp_gsc_initialize_message() local
78 int ret = 0; in intel_hdcp_gsc_initialize_message()
81 bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2, in intel_hdcp_gsc_initialize_message()
94 xe_map_memset(xe, &bo->vmap, 0, 0, bo->size); in intel_hdcp_gsc_initialize_message()
[all …]
H A Dxe_plane_initial.c29 /* Early xe has no irq */ in intel_plane_initial_vblank_wait()
30 struct xe_device *xe = to_xe_device(crtc->base.dev); in intel_plane_initial_vblank_wait() local
35 timestamp = xe_mmio_read32(xe_root_tile_mmio(xe), pipe_frmtmstmp); in intel_plane_initial_vblank_wait()
37 …ret = xe_mmio_wait32_not(xe_root_tile_mmio(xe), pipe_frmtmstmp, ~0U, timestamp, 40000U, &timestamp… in intel_plane_initial_vblank_wait()
38 if (ret < 0) in intel_plane_initial_vblank_wait()
39 drm_warn(&xe->drm, "waiting for early vblank failed with %i\n", ret); in intel_plane_initial_vblank_wait()
47 struct xe_device *xe = to_xe_device(this->base.dev); in intel_reuse_initial_plane_obj() local
50 for_each_intel_crtc(&xe->drm, crtc) { in intel_reuse_initial_plane_obj()
74 initial_plane_bo(struct xe_device *xe, in initial_plane_bo() argument
77 struct xe_tile *tile0 = xe_device_get_root_tile(xe); in initial_plane_bo()
[all …]
/linux/arch/mips/math-emu/
H A Dieee754dp.c65 xm += 0x3 + ((xm >> 3) & 1); in ieee754dp_get_rounding()
66 /* xm += (xm&0x8)?0x4:0x3 */ in ieee754dp_get_rounding()
70 xm += 0x8; in ieee754dp_get_rounding()
74 xm += 0x8; in ieee754dp_get_rounding()
84 * xe is an unbiased exponent
87 union ieee754dp ieee754dp_format(int sn, int xe, u64 xm) in ieee754dp_format() argument
91 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */ in ieee754dp_format()
94 if (xe < DP_EMIN) { in ieee754dp_format()
96 int es = DP_EMIN - xe; in ieee754dp_format()
107 if (sn == 0) in ieee754dp_format()
[all …]
H A Dieee754sp.c65 xm += 0x3 + ((xm >> 3) & 1); in ieee754sp_get_rounding()
66 /* xm += (xm&0x8)?0x4:0x3 */ in ieee754sp_get_rounding()
70 xm += 0x8; in ieee754sp_get_rounding()
74 xm += 0x8; in ieee754sp_get_rounding()
84 * xe is an unbiased exponent
87 union ieee754sp ieee754sp_format(int sn, int xe, unsigned int xm) in ieee754sp_format() argument
91 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */ in ieee754sp_format()
94 if (xe < SP_EMIN) { in ieee754sp_format()
96 int es = SP_EMIN - xe; in ieee754sp_format()
107 if (sn == 0) in ieee754sp_format()
[all …]

12345678910>>...43