/linux/drivers/gpu/drm/xe/ |
H A D | xe_gt_sriov_pf_control.c | 1 // SPDX-License-Identifier: MIT 3 * Copyright © 2023-2024 Intel Corporation 53 ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); in guc_action_vf_control_cmd() 54 return ret > 0 ? -EPROTO : ret; in guc_action_vf_control_cmd() 57 static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd) in pf_send_vf_control_cmd() argument 61 xe_gt_assert(gt, vfid != PFID); in pf_send_vf_control_cmd() 62 xe_gt_sriov_dbg_verbose(gt, "sending VF%u control command %s\n", in pf_send_vf_control_cmd() 65 err = guc_action_vf_control_cmd(>->uc.guc, vfid, cmd); in pf_send_vf_control_cmd() 67 xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n", in pf_send_vf_control_cmd() 72 static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid) in pf_send_vf_pause() argument [all …]
|
H A D | xe_gt.h | 1 /* SPDX-License-Identifier: MIT */ 9 #include <linux/fault-inject.h> 18 for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \ 19 for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \ 22 #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0) argument 31 int xe_gt_init_early(struct xe_gt *gt); 32 int xe_gt_init(struct xe_gt *gt); 33 void xe_gt_mmio_init(struct xe_gt *gt); 34 void xe_gt_declare_wedged(struct xe_gt *gt); 35 int xe_gt_record_default_lrcs(struct xe_gt *gt); [all …]
|
H A D | xe_gt_ccs_mode.c | 1 // SPDX-License-Identifier: MIT 17 static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) in __xe_gt_apply_ccs_mode() argument 20 int num_slices = hweight32(CCS_MASK(gt)); in __xe_gt_apply_ccs_mode() 21 struct xe_device *xe = gt_to_xe(gt); in __xe_gt_apply_ccs_mode() 25 xe_assert(xe, xe_gt_ccs_mode_enabled(gt)); in __xe_gt_apply_ccs_mode() 48 for (width = num_slices / num_engines; width; width--) { in __xe_gt_apply_ccs_mode() 52 for_each_hw_engine(hwe, gt, id) { in __xe_gt_apply_ccs_mode() 53 if (hwe->class != XE_ENGINE_CLASS_COMPUTE) in __xe_gt_apply_ccs_mode() 56 if (hwe->logical_instance >= num_engines) in __xe_gt_apply_ccs_mode() 59 config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0; in __xe_gt_apply_ccs_mode() [all …]
|
H A D | xe_gt_sriov_pf_monitor.c | 1 // SPDX-License-Identifier: MIT 3 * Copyright © 2023-2024 Intel Corporation 17 * xe_gt_sriov_pf_monitor_flr - Cleanup VF data after VF FLR. 18 * @gt: the &xe_gt 24 void xe_gt_sriov_pf_monitor_flr(struct xe_gt *gt, u32 vfid) in xe_gt_sriov_pf_monitor_flr() argument 28 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_monitor_flr() 29 xe_gt_sriov_pf_assert_vfid(gt, vfid); in xe_gt_sriov_pf_monitor_flr() 32 gt->sriov.pf.vfs[vfid].monitor.guc.events[e] = 0; in xe_gt_sriov_pf_monitor_flr() 35 static void pf_update_event_counter(struct xe_gt *gt, u32 vfid, in pf_update_event_counter() argument 38 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_update_event_counter() [all …]
|
H A D | xe_gt_sriov_pf_service.c | 1 // SPDX-License-Identifier: MIT 3 * Copyright © 2023-2024 Intel Corporation 125 regs = ERR_PTR(-ENOPKG); in pick_runtime_regs() 132 static int pf_alloc_runtime_info(struct xe_gt *gt) in pf_alloc_runtime_info() argument 134 struct xe_device *xe = gt_to_xe(gt); in pf_alloc_runtime_info() 139 xe_gt_assert(gt, IS_SRIOV_PF(xe)); in pf_alloc_runtime_info() 140 xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size); in pf_alloc_runtime_info() 141 xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs); in pf_alloc_runtime_info() 142 xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values); in pf_alloc_runtime_info() 151 values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL); in pf_alloc_runtime_info() [all …]
|
H A D | xe_force_wake.c | 1 // SPDX-License-Identifier: MIT 27 fw->initialized_domains |= BIT(id); in mark_domain_initialized() 34 struct xe_force_wake_domain *domain = &fw->domains[id]; in init_domain() 36 domain->id = id; in init_domain() 37 domain->reg_ctl = reg; in init_domain() 38 domain->reg_ack = ack; in init_domain() 39 domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL); in init_domain() 40 domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL); in init_domain() 45 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) in xe_force_wake_init_gt() argument 47 struct xe_device *xe = gt_to_xe(gt); in xe_force_wake_init_gt() [all …]
|
H A D | xe_gt_sriov_pf_debugfs.c | 1 // SPDX-License-Identifier: MIT 3 * Copyright © 2023-2024 Intel Corporation 29 * ├── gt0 # d_inode->i_private = gt 30 * │ ├── pf # d_inode->i_private = gt 31 * │ ├── vf1 # d_inode->i_private = VFID(1) 33 * │ ├── vfN # d_inode->i_private = VFID(N) 38 return d->d_inode->i_private; in extract_priv() 43 return extract_priv(d->d_parent); in extract_gt() 137 struct xe_gt *gt = extract_gt(data); \ 138 struct xe_device *xe = gt_to_xe(gt); \ [all …]
|
H A D | xe_gsc_proxy.c | 1 // SPDX-License-Identifier: MIT 32 * is integrated in GT, the graphics driver needs to transfer the messages from 38 * 1 - Xe submits a request to GSC asking for the message to CSME 39 * 2 - GSC replies with the proxy header + payload for CSME 40 * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component 41 * 4 - CSME replies with the proxy header + payload for GSC 42 * 5 - Xe submits a request to GSC with the reply from CSME 43 * 6 - GSC replies either with a new header + payload (same as step 2, so we 68 struct xe_gt *gt = gsc_to_gt(gsc); in xe_gsc_proxy_init_done() local 69 u32 fwsts1 = xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); in xe_gsc_proxy_init_done() [all …]
|
H A D | xe_gt_sriov_pf.h | 1 /* SPDX-License-Identifier: MIT */ 3 * Copyright © 2023-2024 Intel Corporation 12 int xe_gt_sriov_pf_init_early(struct xe_gt *gt); 13 int xe_gt_sriov_pf_init(struct xe_gt *gt); 14 int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt); 15 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); 16 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); 17 void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt); 18 void xe_gt_sriov_pf_restart(struct xe_gt *gt); 20 static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) in xe_gt_sriov_pf_init_early() argument [all …]
|
H A D | xe_pmu.c | 1 // SPDX-License-Identifier: MIT 22 * Expose events/counters like GT-C6 residency, GT frequency and per-class-engine 27 * $ ls -ld /sys/bus/event_source/devices/xe_* 34 * gt[60:63] Selects gt for the event 35 * engine_class[20:27] Selects engine-class for event 36 * engine_instance[12:19] Selects the engine-instance for the event 39 * For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be 42 * For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0. 47 * $ perf list | grep gt-c6 49 * To sample a specific event for a GT at regular intervals: [all …]
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt.h | 1 /* SPDX-License-Identifier: MIT */ 17 * Check that the GT is a graphics GT and has an IP version within the 20 #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \ argument 23 ((gt)->type != GT_MEDIA && \ 24 GRAPHICS_VER_FULL((gt)->i915) >= (from) && \ 25 GRAPHICS_VER_FULL((gt)->i915) <= (until))) 28 * Check that the GT is a media GT and has an IP version within the 34 #define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \ argument 37 ((gt) && (gt)->type == GT_MEDIA && \ 38 MEDIA_VER_FULL((gt)->i915) >= (from) && \ [all …]
|
H A D | intel_gt_pm_irq.c | 1 // SPDX-License-Identifier: MIT 13 static void write_pm_imr(struct intel_gt *gt) in write_pm_imr() argument 15 struct drm_i915_private *i915 = gt->i915; in write_pm_imr() 16 struct intel_uncore *uncore = gt->uncore; in write_pm_imr() 17 u32 mask = gt->pm_imr; in write_pm_imr() 32 static void gen6_gt_pm_update_irq(struct intel_gt *gt, in gen6_gt_pm_update_irq() argument 40 lockdep_assert_held(gt->irq_lock); in gen6_gt_pm_update_irq() 42 new_val = gt->pm_imr; in gen6_gt_pm_update_irq() 46 if (new_val != gt->pm_imr) { in gen6_gt_pm_update_irq() 47 gt->pm_imr = new_val; in gen6_gt_pm_update_irq() [all …]
|
H A D | selftest_reset.c | 1 // SPDX-License-Identifier: MIT 18 __igt_reset_stolen(struct intel_gt *gt, in __igt_reset_stolen() argument 22 struct i915_ggtt *ggtt = gt->ggtt; in __igt_reset_stolen() 23 const struct resource *dsm = >->i915->dsm.stolen; in __igt_reset_stolen() 34 if (!drm_mm_node_allocated(&ggtt->error_capture)) in __igt_reset_stolen() 43 return -ENOMEM; in __igt_reset_stolen() 47 err = -ENOMEM; in __igt_reset_stolen() 51 igt_global_reset_lock(gt); in __igt_reset_stolen() 52 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in __igt_reset_stolen() 54 err = igt_spinner_init(&spin, gt); in __igt_reset_stolen() [all …]
|
H A D | intel_gt_sysfs.c | 1 // SPDX-License-Identifier: MIT 23 return !strncmp(kobj->name, "gt", 2); in is_object_gt() 31 * has been called, whether it's called from gt/ or from in intel_gt_sysfs_get_drvdata() 35 * If the interface is called from gt/ then private data is in intel_gt_sysfs_get_drvdata() 49 static struct kobject *gt_get_parent_obj(struct intel_gt *gt) in gt_get_parent_obj() argument 51 return >->i915->drm.primary->kdev->kobj; in gt_get_parent_obj() 58 struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); in id_show() local 60 return sysfs_emit(buf, "%u\n", gt->info.id); in id_show() 81 void intel_gt_sysfs_register(struct intel_gt *gt) in intel_gt_sysfs_register() argument 88 * We generate the files only for gt 0 in intel_gt_sysfs_register() [all …]
|
H A D | selftest_gt_pm.c | 1 // SPDX-License-Identifier: MIT 20 return -1; in cmp_u64() 32 return -1; in cmp_u32() 41 struct drm_i915_private *i915 = engine->i915; in read_timestamp() 61 cycles[i] = -read_timestamp(engine); in measure_clocks() 81 struct intel_gt *gt = arg; in live_gt_clocks() local 87 if (!gt->clock_frequency) { /* unknown */ in live_gt_clocks() 92 if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ in live_gt_clocks() 95 wakeref = intel_gt_pm_get(gt); in live_gt_clocks() 96 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in live_gt_clocks() [all …]
|
H A D | intel_gt_mcr.h | 1 /* SPDX-License-Identifier: MIT */ 11 void intel_gt_mcr_init(struct intel_gt *gt); 12 void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags); 13 void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags); 14 void intel_gt_mcr_lock_sanitize(struct intel_gt *gt); 16 u32 intel_gt_mcr_read(struct intel_gt *gt, 19 u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg); 20 u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg); 22 void intel_gt_mcr_unicast_write(struct intel_gt *gt, 25 void intel_gt_mcr_multicast_write(struct intel_gt *gt, [all …]
|
/linux/drivers/gpu/drm/xe/tests/ |
H A D | xe_mocs.c | 1 // SPDX-License-Identifier: GPL-2.0 AND MIT 23 static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt) in live_mocs_init() argument 30 flags = get_mocs_settings(gt_to_xe(gt), &arg->table); in live_mocs_init() 32 kunit_info(test, "gt %d", gt->info.id); in live_mocs_init() 33 kunit_info(test, "gt type %d", gt->info.type); in live_mocs_init() 34 kunit_info(test, "table size %d", arg->table.table_size); in live_mocs_init() 35 kunit_info(test, "table uc_index %d", arg->table.uc_index); in live_mocs_init() 36 kunit_info(test, "table num_mocs_regs %d", arg->table.num_mocs_regs); in live_mocs_init() 41 static void read_l3cc_table(struct xe_gt *gt, in read_l3cc_table() argument 49 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); in read_l3cc_table() [all …]
|
/linux/drivers/gpu/drm/i915/pxp/ |
H A D | intel_pxp_irq.c | 1 // SPDX-License-Identifier: MIT 7 #include "gt/intel_gt_irq.h" 8 #include "gt/intel_gt_regs.h" 9 #include "gt/intel_gt_types.h" 21 * intel_pxp_irq_handler - Handles PXP interrupts. 27 struct intel_gt *gt; in intel_pxp_irq_handler() local 32 gt = pxp->ctrl_gt; in intel_pxp_irq_handler() 34 lockdep_assert_held(gt->irq_lock); in intel_pxp_irq_handler() 43 pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED | in intel_pxp_irq_handler() 48 pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ; in intel_pxp_irq_handler() [all …]
|
/linux/drivers/gpu/drm/i915/selftests/ |
H A D | igt_reset.c | 2 * SPDX-License-Identifier: MIT 9 #include "gt/intel_engine.h" 10 #include "gt/intel_gt.h" 14 void igt_global_reset_lock(struct intel_gt *gt) in igt_global_reset_lock() argument 19 pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags); in igt_global_reset_lock() 21 while (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) in igt_global_reset_lock() 22 wait_event(gt->reset.queue, in igt_global_reset_lock() 23 !test_bit(I915_RESET_BACKOFF, >->reset.flags)); in igt_global_reset_lock() 25 for_each_engine(engine, gt, id) { in igt_global_reset_lock() 27 >->reset.flags)) in igt_global_reset_lock() [all …]
|
/linux/drivers/gpu/drm/i915/gt/uc/ |
H A D | selftest_guc.c | 1 // SPDX-License-Identifier: MIT 6 #include "gt/intel_gt_print.h" 18 err = -ETIMEDOUT; in request_add_spin() 34 ret = i915_sw_fence_await_dma_fence(&rq->submit, in nop_user_request() 35 &from->fence, 0, in nop_user_request() 51 struct intel_gt *gt = arg; in intel_guc_scrub_ctbs() local 59 if (!intel_has_gpu_reset(gt)) in intel_guc_scrub_ctbs() 62 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_guc_scrub_ctbs() 63 engine = intel_selftest_find_any_engine(gt); in intel_guc_scrub_ctbs() 70 gt_err(gt, "Failed to create context %d: %pe\n", i, ce); in intel_guc_scrub_ctbs() [all …]
|
H A D | intel_gsc_uc.c | 1 // SPDX-License-Identifier: MIT 8 #include "gt/intel_gt.h" 9 #include "gt/intel_gt_print.h" 19 struct intel_gt *gt = gsc_uc_to_gt(gsc); in gsc_work() local 24 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in gsc_work() 26 spin_lock_irq(gt->irq_lock); in gsc_work() 27 actions = gsc->gsc_work_actions; in gsc_work() 28 gsc->gsc_work_actions = 0; in gsc_work() 29 spin_unlock_irq(gt->irq_lock); in gsc_work() 36 else if (ret != -EEXIST) in gsc_work() [all …]
|
H A D | selftest_guc_multi_lrc.c | 1 // SPDX-License-Identifier: MIT 6 #include "gt/intel_gt_print.h" 10 #include "gt/intel_engine_heartbeat.h" 20 if (engines[j]->logical_mask & BIT(i)) { in logical_sort() 31 multi_lrc_create_parent(struct intel_gt *gt, u8 class, in multi_lrc_create_parent() argument 39 for_each_engine(engine, gt, id) { in multi_lrc_create_parent() 40 if (engine->class != class) in multi_lrc_create_parent() 97 if (++i == ce->parallel.number_children) in multi_lrc_nop_request() 99 &child_rq->fence.flags); in multi_lrc_nop_request() 108 return ERR_PTR(-ENOMEM); in multi_lrc_nop_request() [all …]
|
H A D | intel_guc_ads.c | 1 // SPDX-License-Identifier: MIT 3 * Copyright © 2014-2019 Intel Corporation 9 #include "gt/intel_engine_regs.h" 10 #include "gt/intel_gt.h" 11 #include "gt/intel_gt_mcr.h" 12 #include "gt/intel_gt_regs.h" 13 #include "gt/intel_lrc.h" 14 #include "gt/shmem_utils.h" 29 * +---------------------------------------+ <== base 31 * +---------------------------------------+ [all …]
|
H A D | intel_uc.c | 1 // SPDX-License-Identifier: MIT 3 * Copyright © 2016-2019 Intel Corporation 8 #include "gt/intel_gt.h" 9 #include "gt/intel_gt_print.h" 10 #include "gt/intel_reset.h" 17 #include "gt/intel_rps.h" 28 struct drm_i915_private *i915 = uc_to_gt(uc)->i915; in uc_expand_default_options() 30 if (i915->params.enable_guc != -1) in uc_expand_default_options() 33 /* Don't enable GuC/HuC on pre-Gen12 */ in uc_expand_default_options() 35 i915->params.enable_guc = 0; in uc_expand_default_options() [all …]
|
/linux/arch/riscv/kvm/ |
H A D | vcpu_timer.c | 1 // SPDX-License-Identifier: GPL-2.0 13 #include <clocksource/timer-riscv.h> 18 static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) in kvm_riscv_current_cycles() argument 20 return get_cycles64() + gt->time_delta; in kvm_riscv_current_cycles() 24 struct kvm_guest_timer *gt, in kvm_riscv_delta_cycles2ns() argument 31 cycles_now = kvm_riscv_current_cycles(gt); in kvm_riscv_delta_cycles2ns() 33 cycles_delta = cycles - cycles_now; in kvm_riscv_delta_cycles2ns() 36 delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift; in kvm_riscv_delta_cycles2ns() 47 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_hrtimer_expired() local 49 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { in kvm_riscv_vcpu_hrtimer_expired() [all …]
|