Home
last modified time | relevance | path

Searched full:gt (Results 1 – 25 of 499) sorted by relevance

12345678910>>...20

/linux/drivers/gpu/drm/xe/
H A Dxe_gt_sriov_pf_config.c63 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) in pf_send_vf_cfg_reset() argument
65 struct xe_guc *guc = &gt->uc.guc; in pf_send_vf_cfg_reset()
77 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords) in pf_send_vf_buf_klvs() argument
79 struct xe_guc *guc = &gt->uc.guc; in pf_send_vf_buf_klvs()
88 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, in pf_push_vf_buf_klvs() argument
93 ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords); in pf_push_vf_buf_klvs()
98 struct drm_printer p = xe_gt_info_printer(gt); in pf_push_vf_buf_klvs()
101 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", in pf_push_vf_buf_klvs()
109 struct drm_printer p = xe_gt_dbg_printer(gt); in pf_push_vf_buf_klvs()
113 xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n", in pf_push_vf_buf_klvs()
[all …]
H A Dxe_gt_sriov_pf_migration.c32 static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt, unsigned int vfid) in pf_pick_gt_migration() argument
34 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_pick_gt_migration()
35 xe_gt_assert(gt, vfid != PFID); in pf_pick_gt_migration()
36 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in pf_pick_gt_migration()
38 return &gt->sriov.pf.vfs[vfid].migration; in pf_pick_gt_migration()
41 static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid, in pf_dump_mig_data() argument
46 struct drm_printer p = xe_gt_dbg_printer(gt); in pf_dump_mig_data()
54 static ssize_t pf_migration_ggtt_size(struct xe_gt *gt, unsigned int vfid) in pf_migration_ggtt_size() argument
56 if (!xe_gt_is_main_type(gt)) in pf_migration_ggtt_size()
59 return xe_gt_sriov_pf_config_ggtt_save(gt, vfid, NULL, 0); in pf_migration_ggtt_size()
[all …]
H A Dxe_gt_ccs_mode.c17 static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) in __xe_gt_apply_ccs_mode() argument
20 int num_slices = hweight32(CCS_MASK(gt)); in __xe_gt_apply_ccs_mode()
21 struct xe_device *xe = gt_to_xe(gt); in __xe_gt_apply_ccs_mode()
25 xe_assert(xe, xe_gt_ccs_mode_enabled(gt)); in __xe_gt_apply_ccs_mode()
52 for_each_hw_engine(hwe, gt, id) { in __xe_gt_apply_ccs_mode()
62 while ((CCS_MASK(gt) & BIT(cslice)) == 0) in __xe_gt_apply_ccs_mode()
77 xe_mmio_write32(&gt->mmio, CCS_MODE, mode); in __xe_gt_apply_ccs_mode()
79 xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", in __xe_gt_apply_ccs_mode()
83 void xe_gt_apply_ccs_mode(struct xe_gt *gt) in xe_gt_apply_ccs_mode() argument
85 if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt))) in xe_gt_apply_ccs_mode()
[all …]
H A Dxe_gt_sriov_pf_monitor.c18 * @gt: the &xe_gt
24 void xe_gt_sriov_pf_monitor_flr(struct xe_gt *gt, u32 vfid) in xe_gt_sriov_pf_monitor_flr() argument
28 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_monitor_flr()
29 xe_gt_sriov_pf_assert_vfid(gt, vfid); in xe_gt_sriov_pf_monitor_flr()
32 gt->sriov.pf.vfs[vfid].monitor.guc.events[e] = 0; in xe_gt_sriov_pf_monitor_flr()
35 static void pf_update_event_counter(struct xe_gt *gt, u32 vfid, in pf_update_event_counter() argument
38 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_update_event_counter()
39 xe_gt_assert(gt, e < XE_GUC_KLV_NUM_THRESHOLDS); in pf_update_event_counter()
41 gt->sriov.pf.vfs[vfid].monitor.guc.events[e]++; in pf_update_event_counter()
44 static int pf_handle_vf_threshold_event(struct xe_gt *gt, u32 vfid, u32 threshold) in pf_handle_vf_threshold_event() argument
[all …]
H A Dxe_force_wake.c45 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) in xe_force_wake_init_gt() argument
47 struct xe_device *xe = gt_to_xe(gt); in xe_force_wake_init_gt()
49 fw->gt = gt; in xe_force_wake_init_gt()
63 void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) in xe_force_wake_init_engines() argument
67 if (xe_gt_is_main_type(gt)) in xe_force_wake_init_engines()
73 if (!(gt->info.engine_mask & BIT(i))) in xe_force_wake_init_engines()
82 if (!(gt->info.engine_mask & BIT(i))) in xe_force_wake_init_engines()
90 if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)) in xe_force_wake_init_engines()
96 static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake) in __domain_ctl() argument
98 if (IS_SRIOV_VF(gt_to_xe(gt))) in __domain_ctl()
[all …]
H A Dxe_gsc_proxy.c32 * is integrated in GT, the graphics driver needs to transfer the messages from
68 struct xe_gt *gt = gsc_to_gt(gsc); in xe_gsc_proxy_init_done() local
69 u32 fwsts1 = xe_mmio_read32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); in xe_gsc_proxy_init_done()
77 struct xe_gt *gt = gsc_to_gt(gsc); in xe_gsc_wait_for_proxy_init_done() local
80 return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE), in xe_gsc_wait_for_proxy_init_done()
88 struct xe_gt *gt = gsc_to_gt(gsc); in __gsc_proxy_irq_rmw() local
93 xe_mmio_rmw32(&gt->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set); in __gsc_proxy_irq_rmw()
112 struct xe_gt *gt = gsc_to_gt(gsc); in proxy_send_to_csme() local
118 xe_gt_err(gt, "Failed to send CSME proxy message\n"); in proxy_send_to_csme()
124 xe_gt_err(gt, "Failed to receive CSME proxy message\n"); in proxy_send_to_csme()
[all …]
H A Dxe_gt_sriov_pf.h12 int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
13 int xe_gt_sriov_pf_init(struct xe_gt *gt);
14 int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt);
15 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
16 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
17 void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
18 void xe_gt_sriov_pf_restart(struct xe_gt *gt);
20 static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) in xe_gt_sriov_pf_init_early() argument
25 static inline int xe_gt_sriov_pf_init(struct xe_gt *gt) in xe_gt_sriov_pf_init() argument
30 static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) in xe_gt_sriov_pf_init_hw() argument
[all …]
/linux/drivers/gpu/drm/i915/
H A DMakefile81 gt-y += \
82 gt/gen2_engine_cs.o \
83 gt/gen6_engine_cs.o \
84 gt/gen6_ppgtt.o \
85 gt/gen7_renderclear.o \
86 gt/gen8_engine_cs.o \
87 gt/gen8_ppgtt.o \
88 gt/intel_breadcrumbs.o \
89 gt/intel_context.o \
90 gt/intel_context_sseu.o \
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_gt.h17 * Check that the GT is a graphics GT and has an IP version within the
20 #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \ argument
23 ((gt)->type != GT_MEDIA && \
24 GRAPHICS_VER_FULL((gt)->i915) >= (from) && \
25 GRAPHICS_VER_FULL((gt)->i915) <= (until)))
28 * Check that the GT is a media GT and has an IP version within the
34 #define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \ argument
37 ((gt) && (gt)->type == GT_MEDIA && \
38 MEDIA_VER_FULL((gt)->i915) >= (from) && \
39 MEDIA_VER_FULL((gt)->i915) <= (until)))
[all …]
H A Dselftest_reset.c18 __igt_reset_stolen(struct intel_gt *gt, in __igt_reset_stolen() argument
22 struct i915_ggtt *ggtt = gt->ggtt; in __igt_reset_stolen()
23 const struct resource *dsm = &gt->i915->dsm.stolen; in __igt_reset_stolen()
51 igt_global_reset_lock(gt); in __igt_reset_stolen()
52 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in __igt_reset_stolen()
54 err = igt_spinner_init(&spin, gt); in __igt_reset_stolen()
58 for_each_engine(engine, gt, id) { in __igt_reset_stolen()
89 i915_gem_get_pat_index(gt->i915, in __igt_reset_stolen()
98 if (!__drm_mm_interval_first(&gt->i915->mm.stolen, in __igt_reset_stolen()
114 intel_gt_reset(gt, mask, NULL); in __igt_reset_stolen()
[all …]
H A Dintel_gt_pm_irq.c13 static void write_pm_imr(struct intel_gt *gt) in write_pm_imr() argument
15 struct drm_i915_private *i915 = gt->i915; in write_pm_imr()
16 struct intel_uncore *uncore = gt->uncore; in write_pm_imr()
17 u32 mask = gt->pm_imr; in write_pm_imr()
32 static void gen6_gt_pm_update_irq(struct intel_gt *gt, in gen6_gt_pm_update_irq() argument
40 lockdep_assert_held(gt->irq_lock); in gen6_gt_pm_update_irq()
42 new_val = gt->pm_imr; in gen6_gt_pm_update_irq()
46 if (new_val != gt->pm_imr) { in gen6_gt_pm_update_irq()
47 gt->pm_imr = new_val; in gen6_gt_pm_update_irq()
48 write_pm_imr(gt); in gen6_gt_pm_update_irq()
[all …]
H A Dintel_gt_sysfs.c23 return !strncmp(kobj->name, "gt", 2); in is_object_gt()
31 * has been called, whether it's called from gt/ or from in intel_gt_sysfs_get_drvdata()
35 * If the interface is called from gt/ then private data is in intel_gt_sysfs_get_drvdata()
49 static struct kobject *gt_get_parent_obj(struct intel_gt *gt) in gt_get_parent_obj() argument
51 return &gt->i915->drm.primary->kdev->kobj; in gt_get_parent_obj()
58 struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); in id_show() local
60 return sysfs_emit(buf, "%u\n", gt->info.id); in id_show()
81 void intel_gt_sysfs_register(struct intel_gt *gt) in intel_gt_sysfs_register() argument
88 * We generate the files only for gt 0 in intel_gt_sysfs_register()
91 if (gt_is_root(gt)) in intel_gt_sysfs_register()
[all …]
H A Dintel_gt_clock_utils.c172 void intel_gt_init_clock_frequency(struct intel_gt *gt) in intel_gt_init_clock_frequency() argument
174 gt->clock_frequency = read_clock_frequency(gt->uncore); in intel_gt_init_clock_frequency()
177 if (GRAPHICS_VER(gt->i915) == 11) in intel_gt_init_clock_frequency()
178 gt->clock_period_ns = NSEC_PER_SEC / 13750000; in intel_gt_init_clock_frequency()
179 else if (gt->clock_frequency) in intel_gt_init_clock_frequency()
180 gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); in intel_gt_init_clock_frequency()
182 GT_TRACE(gt, in intel_gt_init_clock_frequency()
184 gt->clock_frequency / 1000, in intel_gt_init_clock_frequency()
185 gt->clock_period_ns, in intel_gt_init_clock_frequency()
186 div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX), in intel_gt_init_clock_frequency()
[all …]
H A Dselftest_gt_pm.c81 struct intel_gt *gt = arg; in live_gt_clocks() local
87 if (!gt->clock_frequency) { /* unknown */ in live_gt_clocks()
92 if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ in live_gt_clocks()
95 wakeref = intel_gt_pm_get(gt); in live_gt_clocks()
96 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in live_gt_clocks()
98 for_each_engine(engine, gt, id) { in live_gt_clocks()
109 time = intel_gt_clock_interval_to_ns(engine->gt, cycles); in live_gt_clocks()
110 expected = intel_gt_ns_to_clock_interval(engine->gt, dt); in live_gt_clocks()
114 engine->gt->clock_frequency / 1000); in live_gt_clocks()
131 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in live_gt_clocks()
[all …]
H A Dintel_gt_mcr.h11 void intel_gt_mcr_init(struct intel_gt *gt);
12 void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags);
13 void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags);
14 void intel_gt_mcr_lock_sanitize(struct intel_gt *gt);
16 u32 intel_gt_mcr_read(struct intel_gt *gt,
19 u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg);
20 u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg);
22 void intel_gt_mcr_unicast_write(struct intel_gt *gt,
25 void intel_gt_mcr_multicast_write(struct intel_gt *gt,
27 void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt,
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/engine/device/
H A Dpci.c80 { 0x106b, 0x0605, "GeForce GT 130" },
86 { 0x1043, 0x202d, "GeForce GT 220M" },
92 { 0x152d, 0x0850, "GeForce GT 240M LE" },
98 { 0x1043, 0x14a2, "GeForce GT 320M" },
99 { 0x1043, 0x14d2, "GeForce GT 320M" },
105 { 0x106b, 0x0633, "GeForce GT 120" },
111 { 0x106b, 0x0693, "GeForce GT 120" },
232 { 0x1025, 0x0692, "GeForce GT 620M" },
233 { 0x1025, 0x0725, "GeForce GT 620M" },
234 { 0x1025, 0x0728, "GeForce GT 620M" },
[all …]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_mocs.c23 static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt) in live_mocs_init() argument
30 flags = get_mocs_settings(gt_to_xe(gt), &arg->table); in live_mocs_init()
32 kunit_info(test, "gt %d", gt->info.id); in live_mocs_init()
33 kunit_info(test, "gt type %d", gt->info.type); in live_mocs_init()
41 static void read_l3cc_table(struct xe_gt *gt, in read_l3cc_table() argument
49 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); in read_l3cc_table()
51 xe_force_wake_put(gt_to_fw(gt), fw_ref); in read_l3cc_table()
57 if (regs_are_mcr(gt)) in read_l3cc_table()
58 reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1)); in read_l3cc_table()
60 reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i >> 1)); in read_l3cc_table()
[all …]
/linux/drivers/gpu/drm/i915/pxp/
H A Dintel_pxp_irq.c7 #include "gt/intel_gt_irq.h"
8 #include "gt/intel_gt_regs.h"
9 #include "gt/intel_gt_types.h"
27 struct intel_gt *gt; in intel_pxp_irq_handler() local
32 gt = pxp->ctrl_gt; in intel_pxp_irq_handler()
34 lockdep_assert_held(gt->irq_lock); in intel_pxp_irq_handler()
54 static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts) in __pxp_set_interrupts() argument
56 struct intel_uncore *uncore = gt->uncore; in __pxp_set_interrupts()
63 static inline void pxp_irq_reset(struct intel_gt *gt) in pxp_irq_reset() argument
65 spin_lock_irq(gt->irq_lock); in pxp_irq_reset()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Digt_reset.c9 #include "gt/intel_engine.h"
10 #include "gt/intel_gt.h"
14 void igt_global_reset_lock(struct intel_gt *gt) in igt_global_reset_lock() argument
19 pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags); in igt_global_reset_lock()
21 while (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) in igt_global_reset_lock()
22 wait_event(gt->reset.queue, in igt_global_reset_lock()
23 !test_bit(I915_RESET_BACKOFF, &gt->reset.flags)); in igt_global_reset_lock()
25 for_each_engine(engine, gt, id) { in igt_global_reset_lock()
27 &gt->reset.flags)) in igt_global_reset_lock()
28 wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id, in igt_global_reset_lock()
[all …]
/linux/drivers/gpu/drm/i915/gt/uc/
H A Dselftest_guc.c6 #include "gt/intel_gt_print.h"
51 struct intel_gt *gt = arg; in intel_guc_scrub_ctbs() local
59 if (!intel_has_gpu_reset(gt)) in intel_guc_scrub_ctbs()
62 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_guc_scrub_ctbs()
63 engine = intel_selftest_find_any_engine(gt); in intel_guc_scrub_ctbs()
70 gt_err(gt, "Failed to create context %d: %pe\n", i, ce); in intel_guc_scrub_ctbs()
91 gt_err(gt, "Failed to create request %d: %pe\n", i, rq); in intel_guc_scrub_ctbs()
101 gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret)); in intel_guc_scrub_ctbs()
109 intel_gt_retire_requests(gt); in intel_guc_scrub_ctbs()
113 intel_gt_handle_error(engine->gt, -1, 0, "selftest reset"); in intel_guc_scrub_ctbs()
[all …]
H A Dintel_gsc_uc.c8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
19 struct intel_gt *gt = gsc_uc_to_gt(gsc); in gsc_work() local
24 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in gsc_work()
26 spin_lock_irq(gt->irq_lock); in gsc_work()
29 spin_unlock_irq(gt->irq_lock); in gsc_work()
53 if (intel_uc_uses_huc(&gt->uc) && in gsc_work()
54 intel_huc_is_authenticated(&gt->uc.huc, INTEL_HUC_AUTH_BY_GUC)) in gsc_work()
55 intel_huc_auth(&gt->uc.huc, INTEL_HUC_AUTH_BY_GSC); in gsc_work()
60 gt_err(gt, "Proxy request received with GSC not loaded!\n"); in gsc_work()
[all …]
H A Dselftest_guc_multi_lrc.c6 #include "gt/intel_gt_print.h"
10 #include "gt/intel_engine_heartbeat.h"
31 multi_lrc_create_parent(struct intel_gt *gt, u8 class, in multi_lrc_create_parent() argument
39 for_each_engine(engine, gt, id) { in multi_lrc_create_parent()
111 static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class) in __intel_guc_multi_lrc_basic() argument
117 parent = multi_lrc_create_parent(gt, class, 0); in __intel_guc_multi_lrc_basic()
119 gt_err(gt, "Failed creating contexts: %pe\n", parent); in __intel_guc_multi_lrc_basic()
122 gt_dbg(gt, "Not enough engines in class: %d\n", class); in __intel_guc_multi_lrc_basic()
129 gt_err(gt, "Failed creating requests: %pe\n", rq); in __intel_guc_multi_lrc_basic()
135 gt_err(gt, "Failed waiting on request: %pe\n", ERR_PTR(ret)); in __intel_guc_multi_lrc_basic()
[all …]
H A Dintel_guc_ads.c9 #include "gt/intel_engine_regs.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_gt_mcr.h"
12 #include "gt/intel_gt_regs.h"
13 #include "gt/intel_lrc.h"
14 #include "gt/shmem_utils.h"
163 struct intel_gt *gt = guc_to_gt(guc); in guc_policies_init() local
164 struct drm_i915_private *i915 = gt->i915; in guc_policies_init()
206 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_global_policies_update() local
222 with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) in intel_guc_global_policies_update()
[all …]
H A Dintel_uc.c8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "gt/intel_reset.h"
17 #include "gt/intel_rps.h"
59 struct intel_gt *gt = uc_to_gt(uc); in __intel_uc_reset_hw() local
63 ret = i915_inject_probe_error(gt->i915, -ENXIO); in __intel_uc_reset_hw()
67 ret = intel_reset_guc(gt); in __intel_uc_reset_hw()
69 gt_err(gt, "Failed to reset GuC, ret = %d\n", ret); in __intel_uc_reset_hw()
73 guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); in __intel_uc_reset_hw()
74 gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET), in __intel_uc_reset_hw()
[all …]
/linux/arch/riscv/kvm/
H A Dvcpu_timer.c18 static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) in kvm_riscv_current_cycles() argument
20 return get_cycles64() + gt->time_delta; in kvm_riscv_current_cycles()
24 struct kvm_guest_timer *gt, in kvm_riscv_delta_cycles2ns() argument
31 cycles_now = kvm_riscv_current_cycles(gt); in kvm_riscv_delta_cycles2ns()
36 delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift; in kvm_riscv_delta_cycles2ns()
47 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_hrtimer_expired() local
49 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { in kvm_riscv_vcpu_hrtimer_expired()
50 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); in kvm_riscv_vcpu_hrtimer_expired()
86 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_update_hrtimer() local
94 delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t); in kvm_riscv_vcpu_update_hrtimer()
[all …]

12345678910>>...20