Home
last modified time | relevance | path

Searched refs:gt (Results 1 – 25 of 337) sorted by relevance

12345678910>>...14

/linux/drivers/gpu/drm/xe/
H A Dxe_gt_sriov_pf_control.c56 static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd) in pf_send_vf_control_cmd() argument
60 xe_gt_assert(gt, vfid != PFID); in pf_send_vf_control_cmd()
61 xe_gt_sriov_dbg_verbose(gt, "sending VF%u control command %s\n", in pf_send_vf_control_cmd()
64 err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd); in pf_send_vf_control_cmd()
66 xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n", in pf_send_vf_control_cmd()
71 static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid) in pf_send_vf_pause() argument
73 return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_PAUSE); in pf_send_vf_pause()
76 static int pf_send_vf_resume(struct xe_gt *gt, unsigned int vfid) in pf_send_vf_resume() argument
78 return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_RESUME); in pf_send_vf_resume()
81 static int pf_send_vf_stop(struct xe_gt *gt, unsigned int vfid) in pf_send_vf_stop() argument
[all …]
H A Dxe_gt.c65 struct xe_gt *gt = arg; in gt_fini() local
67 destroy_workqueue(gt->ordered_wq); in gt_fini()
72 struct xe_gt *gt; in xe_gt_alloc() local
75 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL); in xe_gt_alloc()
76 if (!gt) in xe_gt_alloc()
79 gt->tile = tile; in xe_gt_alloc()
80 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", in xe_gt_alloc()
83 err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt); in xe_gt_alloc()
87 return gt; in xe_gt_alloc()
90 void xe_gt_sanitize(struct xe_gt *gt) in xe_gt_sanitize() argument
[all …]
H A Dxe_gt_sriov_pf_policy.c37 static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords) in pf_send_policy_klvs() argument
40 struct xe_tile *tile = gt_to_tile(gt); in pf_send_policy_klvs()
42 struct xe_guc *guc = &gt->uc.guc; in pf_send_policy_klvs()
67 static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs, in pf_push_policy_klvs() argument
72 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); in pf_push_policy_klvs()
74 ret = pf_send_policy_klvs(gt, klvs, num_dwords); in pf_push_policy_klvs()
78 struct drm_printer p = xe_gt_info_printer(gt); in pf_push_policy_klvs()
80 xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n", in pf_push_policy_klvs()
89 static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value) in pf_push_policy_u32() argument
96 return pf_push_policy_klvs(gt, 1, klv, ARRAY_SIZE(klv)); in pf_push_policy_u32()
[all …]
H A Dxe_gt_sriov_pf_config.c60 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) in pf_send_vf_cfg_reset() argument
62 struct xe_guc *guc = &gt->uc.guc; in pf_send_vf_cfg_reset()
74 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords) in pf_send_vf_cfg_klvs() argument
77 struct xe_tile *tile = gt_to_tile(gt); in pf_send_vf_cfg_klvs()
79 struct xe_guc *guc = &gt->uc.guc; in pf_send_vf_cfg_klvs()
105 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, in pf_push_vf_cfg_klvs() argument
110 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); in pf_push_vf_cfg_klvs()
112 ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords); in pf_push_vf_cfg_klvs()
116 struct drm_printer p = xe_gt_info_printer(gt); in pf_push_vf_cfg_klvs()
119 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", in pf_push_vf_cfg_klvs()
[all …]
H A Dxe_gt_mcr.c251 static void init_steering_l3bank(struct xe_gt *gt) in init_steering_l3bank() argument
253 struct xe_mmio *mmio = &gt->mmio; in init_steering_l3bank()
255 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { in init_steering_l3bank()
265 gt->steering[L3BANK].group_target = __ffs(mslice_mask); in init_steering_l3bank()
266 gt->steering[L3BANK].instance_target = in init_steering_l3bank()
268 } else if (gt_to_xe(gt)->info.platform == XE_DG2) { in init_steering_l3bank()
278 gt->steering[L3BANK].group_target = (bank >> 2) & 0x7; in init_steering_l3bank()
279 gt->steering[L3BANK].instance_target = bank & 0x3; in init_steering_l3bank()
284 gt->steering[L3BANK].group_target = 0; /* unused */ in init_steering_l3bank()
285 gt->steering[L3BANK].instance_target = __ffs(fuse); in init_steering_l3bank()
[all …]
H A Dxe_gt_debugfs.c79 struct xe_gt *gt = parent->d_inode->i_private; in xe_gt_debugfs_simple_show() local
85 return print(gt, &p); in xe_gt_debugfs_simple_show()
88 static int hw_engines(struct xe_gt *gt, struct drm_printer *p) in hw_engines() argument
90 struct xe_device *xe = gt_to_xe(gt); in hw_engines()
96 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); in hw_engines()
99 xe_force_wake_put(gt_to_fw(gt), fw_ref); in hw_engines()
103 for_each_hw_engine(hwe, gt, id) in hw_engines()
106 xe_force_wake_put(gt_to_fw(gt), fw_ref); in hw_engines()
112 static int powergate_info(struct xe_gt *gt, struct drm_printer *p) in powergate_info() argument
116 xe_pm_runtime_get(gt_to_xe(gt)); in powergate_info()
[all …]
H A Dxe_gt_sriov_vf.c49 static int vf_reset_guc_state(struct xe_gt *gt) in vf_reset_guc_state() argument
51 struct xe_guc *guc = &gt->uc.guc; in vf_reset_guc_state()
56 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err)); in vf_reset_guc_state()
93 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) in vf_minimum_guc_version() argument
95 struct xe_device *xe = gt_to_xe(gt); in vf_minimum_guc_version()
113 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) in vf_wanted_guc_version() argument
116 return vf_minimum_guc_version(gt, branch, major, minor); in vf_wanted_guc_version()
119 static int vf_handshake_with_guc(struct xe_gt *gt) in vf_handshake_with_guc() argument
121 struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version; in vf_handshake_with_guc()
122 struct xe_guc *guc = &gt->uc.guc; in vf_handshake_with_guc()
[all …]
H A Dxe_gt_sriov_pf_config.h15 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid);
16 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size);
17 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt,
19 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt,
22 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid);
23 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs);
24 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
25 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
28 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid);
29 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs);
[all …]
H A Dxe_gt.h22 #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0) argument
31 int xe_gt_init_hwconfig(struct xe_gt *gt);
32 int xe_gt_init_early(struct xe_gt *gt);
33 int xe_gt_init(struct xe_gt *gt);
34 void xe_gt_mmio_init(struct xe_gt *gt);
35 void xe_gt_declare_wedged(struct xe_gt *gt);
36 int xe_gt_record_default_lrcs(struct xe_gt *gt);
48 void xe_gt_record_user_engines(struct xe_gt *gt);
50 void xe_gt_suspend_prepare(struct xe_gt *gt);
51 int xe_gt_suspend(struct xe_gt *gt);
[all …]
H A Dxe_gt_sriov_pf.c32 static int pf_alloc_metadata(struct xe_gt *gt) in pf_alloc_metadata() argument
34 unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt); in pf_alloc_metadata()
36 gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs, in pf_alloc_metadata()
37 sizeof(*gt->sriov.pf.vfs), GFP_KERNEL); in pf_alloc_metadata()
38 if (!gt->sriov.pf.vfs) in pf_alloc_metadata()
52 int xe_gt_sriov_pf_init_early(struct xe_gt *gt) in xe_gt_sriov_pf_init_early() argument
56 err = pf_alloc_metadata(gt); in xe_gt_sriov_pf_init_early()
60 err = xe_gt_sriov_pf_service_init(gt); in xe_gt_sriov_pf_init_early()
64 err = xe_gt_sriov_pf_control_init(gt); in xe_gt_sriov_pf_init_early()
76 static void pf_enable_ggtt_guest_update(struct xe_gt *gt) in pf_enable_ggtt_guest_update() argument
[all …]
H A Dxe_hw_engine.c279 hwe->gt = NULL; in hw_engine_fini()
295 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base)); in xe_hw_engine_mmio_write32()
296 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); in xe_hw_engine_mmio_write32()
300 xe_mmio_write32(&hwe->gt->mmio, reg, val); in xe_hw_engine_mmio_write32()
315 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base)); in xe_hw_engine_mmio_read32()
316 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); in xe_hw_engine_mmio_read32()
320 return xe_mmio_read32(&hwe->gt->mmio, reg); in xe_hw_engine_mmio_read32()
326 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE); in xe_hw_engine_enable_ring()
329 xe_mmio_write32(&hwe->gt->mmio, RCU_MODE, in xe_hw_engine_enable_ring()
342 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt, in xe_hw_engine_match_fixed_cslice_mode() argument
[all …]
H A Dxe_gt_sriov_pf_monitor.c24 void xe_gt_sriov_pf_monitor_flr(struct xe_gt *gt, u32 vfid) in xe_gt_sriov_pf_monitor_flr() argument
28 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_monitor_flr()
29 xe_gt_sriov_pf_assert_vfid(gt, vfid); in xe_gt_sriov_pf_monitor_flr()
32 gt->sriov.pf.vfs[vfid].monitor.guc.events[e] = 0; in xe_gt_sriov_pf_monitor_flr()
35 static void pf_update_event_counter(struct xe_gt *gt, u32 vfid, in pf_update_event_counter() argument
38 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_update_event_counter()
39 xe_gt_assert(gt, e < XE_GUC_KLV_NUM_THRESHOLDS); in pf_update_event_counter()
41 gt->sriov.pf.vfs[vfid].monitor.guc.events[e]++; in pf_update_event_counter()
44 static int pf_handle_vf_threshold_event(struct xe_gt *gt, u32 vfid, u32 threshold) in pf_handle_vf_threshold_event() argument
54 xe_gt_sriov_notice(gt, "unknown threshold key %#x reported for %s\n", in pf_handle_vf_threshold_event()
[all …]
H A Dxe_gt_topology.c19 load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) in load_dss_mask() argument
25 if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS)) in load_dss_mask()
30 fuse_val[i] = xe_mmio_read32(&gt->mmio, va_arg(argp, struct xe_reg)); in load_dss_mask()
37 load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type) in load_eu_mask() argument
39 struct xe_device *xe = gt_to_xe(gt); in load_eu_mask()
40 u32 reg_val = xe_mmio_read32(&gt->mmio, XELP_EU_ENABLE); in load_eu_mask()
129 load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) in load_l3_bank_mask() argument
131 struct xe_device *xe = gt_to_xe(gt); in load_l3_bank_mask()
132 u32 fuse3 = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3); in load_l3_bank_mask()
143 if (XE_WA(gt, no_media_l3)) in load_l3_bank_mask()
[all …]
H A Dxe_gsc.c47 struct xe_gt *gt = gsc_to_gt(gsc); in memcpy_fw() local
48 struct xe_device *xe = gt_to_xe(gt); in memcpy_fw()
71 struct xe_gt *gt = gsc_to_gt(gsc); in emit_gsc_upload() local
78 bb = xe_bb_new(gt, 4, false); in emit_gsc_upload()
128 struct xe_gt *gt = gsc_to_gt(gsc); in query_compatibility_version() local
129 struct xe_tile *tile = gt_to_tile(gt); in query_compatibility_version()
130 struct xe_device *xe = gt_to_xe(gt); in query_compatibility_version()
142 xe_gt_err(gt, "failed to allocate bo for GSC version query\n"); in query_compatibility_version()
156 xe_gt_err(gt, in query_compatibility_version()
166 xe_gt_err(gt, "HuC: invalid GSC reply for version query (err=%d)\n", err); in query_compatibility_version()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_gt_pm.c28 static void user_forcewake(struct intel_gt *gt, bool suspend) in user_forcewake() argument
30 int count = atomic_read(&gt->user_wakeref); in user_forcewake()
37 wakeref = intel_gt_pm_get(gt); in user_forcewake()
39 GEM_BUG_ON(count > atomic_read(&gt->wakeref.count)); in user_forcewake()
40 atomic_sub(count, &gt->wakeref.count); in user_forcewake()
42 atomic_add(count, &gt->wakeref.count); in user_forcewake()
44 intel_gt_pm_put(gt, wakeref); in user_forcewake()
47 static void runtime_begin(struct intel_gt *gt) in runtime_begin() argument
50 write_seqcount_begin(&gt->stats.lock); in runtime_begin()
51 gt->stats.start = ktime_get(); in runtime_begin()
[all …]
H A Dintel_reset.c158 static int i915_do_reset(struct intel_gt *gt, in i915_do_reset() argument
162 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in i915_do_reset()
187 static int g33_do_reset(struct intel_gt *gt, in g33_do_reset() argument
191 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g33_do_reset()
197 static int g4x_do_reset(struct intel_gt *gt, in g4x_do_reset() argument
201 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g4x_do_reset()
202 struct intel_uncore *uncore = gt->uncore; in g4x_do_reset()
213 GT_TRACE(gt, "Wait for media reset failed\n"); in g4x_do_reset()
221 GT_TRACE(gt, "Wait for render reset failed\n"); in g4x_do_reset()
234 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, in ilk_do_reset() argument
[all …]
H A Dintel_gt_mcr.c108 void intel_gt_mcr_init(struct intel_gt *gt) in intel_gt_mcr_init() argument
110 struct drm_i915_private *i915 = gt->i915; in intel_gt_mcr_init()
114 spin_lock_init(&gt->mcr_lock); in intel_gt_mcr_init()
121 gt->info.mslice_mask = in intel_gt_mcr_init()
122 intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask, in intel_gt_mcr_init()
124 gt->info.mslice_mask |= in intel_gt_mcr_init()
125 (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) & in intel_gt_mcr_init()
128 if (!gt->info.mslice_mask) /* should be impossible! */ in intel_gt_mcr_init()
129 gt_warn(gt, "mslice mask all zero!\n"); in intel_gt_mcr_init()
132 if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) { in intel_gt_mcr_init()
[all …]
H A Dintel_gt.h20 #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \ argument
23 ((gt)->type != GT_MEDIA && \
24 GRAPHICS_VER_FULL((gt)->i915) >= (from) && \
25 GRAPHICS_VER_FULL((gt)->i915) <= (until)))
34 #define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \ argument
37 ((gt) && (gt)->type == GT_MEDIA && \
38 MEDIA_VER_FULL((gt)->i915) >= (from) && \
39 MEDIA_VER_FULL((gt)->i915) <= (until)))
56 #define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \ argument
58 (IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \
[all …]
H A Dselftest_reset.c18 __igt_reset_stolen(struct intel_gt *gt, in __igt_reset_stolen() argument
22 struct i915_ggtt *ggtt = gt->ggtt; in __igt_reset_stolen()
23 const struct resource *dsm = &gt->i915->dsm.stolen; in __igt_reset_stolen()
51 igt_global_reset_lock(gt); in __igt_reset_stolen()
52 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in __igt_reset_stolen()
54 err = igt_spinner_init(&spin, gt); in __igt_reset_stolen()
58 for_each_engine(engine, gt, id) { in __igt_reset_stolen()
89 i915_gem_get_pat_index(gt->i915, in __igt_reset_stolen()
98 if (!__drm_mm_interval_first(&gt->i915->mm.stolen, in __igt_reset_stolen()
114 intel_gt_reset(gt, mask, NULL); in __igt_reset_stolen()
[all …]
H A Dintel_gt_pm_irq.c13 static void write_pm_imr(struct intel_gt *gt) in write_pm_imr() argument
15 struct drm_i915_private *i915 = gt->i915; in write_pm_imr()
16 struct intel_uncore *uncore = gt->uncore; in write_pm_imr()
17 u32 mask = gt->pm_imr; in write_pm_imr()
32 static void gen6_gt_pm_update_irq(struct intel_gt *gt, in gen6_gt_pm_update_irq() argument
40 lockdep_assert_held(gt->irq_lock); in gen6_gt_pm_update_irq()
42 new_val = gt->pm_imr; in gen6_gt_pm_update_irq()
46 if (new_val != gt->pm_imr) { in gen6_gt_pm_update_irq()
47 gt->pm_imr = new_val; in gen6_gt_pm_update_irq()
48 write_pm_imr(gt); in gen6_gt_pm_update_irq()
[all …]
H A Dintel_gt_sysfs_pm.c29 int (func)(struct intel_gt *gt, u32 val), u32 val) in sysfs_gt_attribute_w_func() argument
31 struct intel_gt *gt; in sysfs_gt_attribute_w_func() local
39 for_each_gt(gt, i915, i) { in sysfs_gt_attribute_w_func()
40 ret = func(gt, val); in sysfs_gt_attribute_w_func()
45 gt = intel_gt_sysfs_get_drvdata(kobj, attr->name); in sysfs_gt_attribute_w_func()
46 ret = func(gt, val); in sysfs_gt_attribute_w_func()
54 u32 (func)(struct intel_gt *gt), in sysfs_gt_attribute_r_func() argument
57 struct intel_gt *gt; in sysfs_gt_attribute_r_func() local
67 for_each_gt(gt, i915, i) { in sysfs_gt_attribute_r_func()
68 u32 val = func(gt); in sysfs_gt_attribute_r_func()
[all …]
H A Dintel_gt_sysfs.c49 static struct kobject *gt_get_parent_obj(struct intel_gt *gt) in gt_get_parent_obj() argument
51 return &gt->i915->drm.primary->kdev->kobj; in gt_get_parent_obj()
58 struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); in id_show() local
60 return sysfs_emit(buf, "%u\n", gt->info.id); in id_show()
81 void intel_gt_sysfs_register(struct intel_gt *gt) in intel_gt_sysfs_register() argument
91 if (gt_is_root(gt)) in intel_gt_sysfs_register()
92 intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt)); in intel_gt_sysfs_register()
95 if (kobject_init_and_add(&gt->sysfs_gt, &kobj_gt_type, in intel_gt_sysfs_register()
96 gt->i915->sysfs_gt, "gt%d", gt->info.id)) in intel_gt_sysfs_register()
99 gt->sysfs_defaults = kobject_create_and_add(".defaults", &gt->sysfs_gt); in intel_gt_sysfs_register()
[all …]
H A Dselftest_slpc.c22 struct intel_gt *gt; member
53 static int slpc_set_freq(struct intel_gt *gt, u32 freq) in slpc_set_freq() argument
56 struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc; in slpc_set_freq()
98 static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power) in measure_power_at_freq() argument
102 err = slpc_set_freq(gt, *freq); in measure_power_at_freq()
105 *freq = intel_rps_read_actual_frequency(&gt->rps); in measure_power_at_freq()
106 *power = measure_power(&gt->rps, freq); in measure_power_at_freq()
183 static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine) in slpc_power() argument
185 struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc; in slpc_power()
197 if (!librapl_supported(gt->i915)) in slpc_power()
[all …]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_gt_sriov_pf_service_test.c20 struct xe_gt *gt; in pf_service_test_init() local
28 gt = xe_device_get_gt(xe, 0); in pf_service_test_init()
29 pf_init_versions(gt); in pf_service_test_init()
36 KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major); in pf_service_test_init()
37 KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major); in pf_service_test_init()
38 KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major, in pf_service_test_init()
39 gt->sriov.pf.service.version.latest.major); in pf_service_test_init()
40 if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major) in pf_service_test_init()
41 KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor, in pf_service_test_init()
42 gt->sriov.pf.service.version.latest.minor); in pf_service_test_init()
[all …]
H A Dxe_mocs.c23 static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt) in live_mocs_init() argument
30 flags = get_mocs_settings(gt_to_xe(gt), &arg->table); in live_mocs_init()
32 kunit_info(test, "gt %d", gt->info.id); in live_mocs_init()
33 kunit_info(test, "gt type %d", gt->info.type); in live_mocs_init()
41 static void read_l3cc_table(struct xe_gt *gt, in read_l3cc_table() argument
49 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); in read_l3cc_table()
54 if (regs_are_mcr(gt)) in read_l3cc_table()
55 reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1)); in read_l3cc_table()
57 reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i >> 1)); in read_l3cc_table()
59 mocs_dbg(gt, "reg_val=0x%x\n", reg_val); in read_l3cc_table()
[all …]

12345678910>>...14