xref: /linux/include/kvm/arm_arch_timer.h (revision d51c978b7d3e143381f871d28d8a0437d446b51b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ASM_ARM_KVM_ARCH_TIMER_H
8 #define __ASM_ARM_KVM_ARCH_TIMER_H
9 
10 #include <linux/clocksource.h>
11 #include <linux/hrtimer.h>
12 
13 #include <linux/irqchip/arm-gic-v5.h>
14 
15 enum kvm_arch_timers {
16 	TIMER_PTIMER,
17 	TIMER_VTIMER,
18 	NR_KVM_EL0_TIMERS,
19 	TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
20 	TIMER_HPTIMER,
21 	NR_KVM_TIMERS
22 };
23 
24 enum kvm_arch_timer_regs {
25 	TIMER_REG_CNT,
26 	TIMER_REG_CVAL,
27 	TIMER_REG_TVAL,
28 	TIMER_REG_CTL,
29 	TIMER_REG_VOFF,
30 };
31 
32 struct arch_timer_offset {
33 	/*
34 	 * If set, pointer to one of the offsets in the kvm's offset
35 	 * structure. If NULL, assume a zero offset.
36 	 */
37 	u64	*vm_offset;
38 	/*
39 	 * If set, pointer to one of the offsets in the vcpu's sysreg
40 	 * array. If NULL, assume a zero offset.
41 	 */
42 	u64	*vcpu_offset;
43 };
44 
45 struct arch_timer_vm_data {
46 	/* Offset applied to the virtual timer/counter */
47 	u64	voffset;
48 	/* Offset applied to the physical timer/counter */
49 	u64	poffset;
50 
51 	/* The PPI for each timer, global to the VM */
52 	u32	ppi[NR_KVM_TIMERS];
53 };
54 
55 struct arch_timer_context {
56 	/* Emulated Timer (may be unused) */
57 	struct hrtimer			hrtimer;
58 	u64				ns_frac;
59 
60 	/* Offset for this counter/timer */
61 	struct arch_timer_offset	offset;
62 	/*
63 	 * We have multiple paths which can save/restore the timer state onto
64 	 * the hardware, so we need some way of keeping track of where the
65 	 * latest state is.
66 	 */
67 	bool				loaded;
68 
69 	/* Output level of the timer IRQ */
70 	struct {
71 		bool			level;
72 	} irq;
73 
74 	/* Who am I? */
75 	enum kvm_arch_timers		timer_id;
76 
77 	/* Duplicated state from arch_timer.c for convenience */
78 	u32				host_timer_irq;
79 
80 	/* Is this a direct timer? */
81 	bool				direct;
82 };
83 
84 struct timer_map {
85 	struct arch_timer_context *direct_vtimer;
86 	struct arch_timer_context *direct_ptimer;
87 	struct arch_timer_context *emul_vtimer;
88 	struct arch_timer_context *emul_ptimer;
89 };
90 
91 void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
92 
93 struct arch_timer_cpu {
94 	struct arch_timer_context timers[NR_KVM_TIMERS];
95 
96 	/* Background timer used when the guest is not running */
97 	struct hrtimer			bg_timer;
98 
99 	/* Is the timer enabled */
100 	bool			enabled;
101 };
102 
103 int __init kvm_timer_hyp_init(bool has_gic);
104 int kvm_timer_enable(struct kvm_vcpu *vcpu);
105 void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
106 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
107 void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
108 void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
109 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
110 void kvm_timer_update_run(struct kvm_vcpu *vcpu);
111 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
112 
113 void kvm_timer_init_vm(struct kvm *kvm);
114 
115 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
116 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
117 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
118 
119 u64 kvm_phys_timer_read(void);
120 
121 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
122 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
123 
124 void kvm_timer_init_vhe(void);
125 
126 #define vcpu_timer(v)	(&(v)->arch.timer_cpu)
127 #define vcpu_get_timer(v,t)	(&vcpu_timer(v)->timers[(t)])
128 #define vcpu_vtimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
129 #define vcpu_ptimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
130 #define vcpu_hvtimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
131 #define vcpu_hptimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
132 
133 #define arch_timer_ctx_index(ctx)	((ctx)->timer_id)
134 #define timer_context_to_vcpu(ctx)	container_of((ctx), struct kvm_vcpu, arch.timer_cpu.timers[(ctx)->timer_id])
135 #define timer_vm_data(ctx)		(&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
136 #define timer_irq(ctx)			(timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
137 
138 #define get_vgic_ppi(k, i) (((k)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V5) ? \
139 			    (i) : (FIELD_PREP(GICV5_HWIRQ_ID, i) |	\
140 				   FIELD_PREP(GICV5_HWIRQ_TYPE, GICV5_HWIRQ_TYPE_PPI)))
141 
142 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
143 			      enum kvm_arch_timers tmr,
144 			      enum kvm_arch_timer_regs treg);
145 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
146 				enum kvm_arch_timers tmr,
147 				enum kvm_arch_timer_regs treg,
148 				u64 val);
149 
150 /* Needed for tracing */
151 u32 timer_get_ctl(struct arch_timer_context *ctxt);
152 u64 timer_get_cval(struct arch_timer_context *ctxt);
153 
154 /* CPU HP callbacks */
155 void kvm_timer_cpu_up(void);
156 void kvm_timer_cpu_down(void);
157 
158 /* CNTKCTL_EL1 valid bits as of DDI0487J.a */
159 #define CNTKCTL_VALID_BITS	(BIT(17) | GENMASK_ULL(9, 0))
160 
161 DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key);
162 
163 static inline bool has_broken_cntvoff(void)
164 {
165 	return static_branch_unlikely(&broken_cntvoff_key);
166 }
167 
168 static inline bool has_cntpoff(void)
169 {
170 	return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
171 }
172 
173 static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
174 {
175 	u64 offset = 0;
176 
177 	if (!ctxt)
178 		return 0;
179 
180 	if (ctxt->offset.vm_offset)
181 		offset += *ctxt->offset.vm_offset;
182 	if (ctxt->offset.vcpu_offset)
183 		offset += *ctxt->offset.vcpu_offset;
184 
185 	return offset;
186 }
187 
188 static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
189 {
190 	if (!ctxt->offset.vm_offset) {
191 		WARN(offset, "timer %d\n", arch_timer_ctx_index(ctxt));
192 		return;
193 	}
194 
195 	WRITE_ONCE(*ctxt->offset.vm_offset, offset);
196 }
197 
198 #endif
199