xref: /linux/include/kvm/arm_arch_timer.h (revision 8fe30434a81d36715ab83fdb4a5e6c967d2e3ecf)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ASM_ARM_KVM_ARCH_TIMER_H
8 #define __ASM_ARM_KVM_ARCH_TIMER_H
9 
10 #include <linux/clocksource.h>
11 #include <linux/hrtimer.h>
12 
13 #include <linux/irqchip/arm-gic-v5.h>
14 
15 enum kvm_arch_timers {
16 	TIMER_PTIMER,
17 	TIMER_VTIMER,
18 	NR_KVM_EL0_TIMERS,
19 	TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
20 	TIMER_HPTIMER,
21 	NR_KVM_TIMERS
22 };
23 
24 enum kvm_arch_timer_regs {
25 	TIMER_REG_CNT,
26 	TIMER_REG_CVAL,
27 	TIMER_REG_TVAL,
28 	TIMER_REG_CTL,
29 	TIMER_REG_VOFF,
30 };
31 
32 struct arch_timer_offset {
33 	/*
34 	 * If set, pointer to one of the offsets in the kvm's offset
35 	 * structure. If NULL, assume a zero offset.
36 	 */
37 	u64	*vm_offset;
38 	/*
39 	 * If set, pointer to one of the offsets in the vcpu's sysreg
40 	 * array. If NULL, assume a zero offset.
41 	 */
42 	u64	*vcpu_offset;
43 };
44 
45 struct arch_timer_vm_data {
46 	/* Offset applied to the virtual timer/counter */
47 	u64	voffset;
48 	/* Offset applied to the physical timer/counter */
49 	u64	poffset;
50 
51 	/* The PPI for each timer, global to the VM */
52 	u32	ppi[NR_KVM_TIMERS];
53 };
54 
55 struct arch_timer_context {
56 	/* Emulated Timer (may be unused) */
57 	struct hrtimer			hrtimer;
58 	u64				ns_frac;
59 
60 	/* Offset for this counter/timer */
61 	struct arch_timer_offset	offset;
62 	/*
63 	 * We have multiple paths which can save/restore the timer state onto
64 	 * the hardware, so we need some way of keeping track of where the
65 	 * latest state is.
66 	 */
67 	bool				loaded;
68 
69 	/* Output level of the timer IRQ */
70 	struct {
71 		bool			level;
72 	} irq;
73 
74 	/* Who am I? */
75 	enum kvm_arch_timers		timer_id;
76 
77 	/* Duplicated state from arch_timer.c for convenience */
78 	u32				host_timer_irq;
79 };
80 
81 struct timer_map {
82 	struct arch_timer_context *direct_vtimer;
83 	struct arch_timer_context *direct_ptimer;
84 	struct arch_timer_context *emul_vtimer;
85 	struct arch_timer_context *emul_ptimer;
86 };
87 
88 void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
89 
90 struct arch_timer_cpu {
91 	struct arch_timer_context timers[NR_KVM_TIMERS];
92 
93 	/* Background timer used when the guest is not running */
94 	struct hrtimer			bg_timer;
95 
96 	/* Is the timer enabled */
97 	bool			enabled;
98 };
99 
100 int __init kvm_timer_hyp_init(bool has_gic);
101 int kvm_timer_enable(struct kvm_vcpu *vcpu);
102 void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
103 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
104 void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
105 void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
106 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
107 void kvm_timer_update_run(struct kvm_vcpu *vcpu);
108 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
109 
110 void kvm_timer_init_vm(struct kvm *kvm);
111 
112 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
113 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
114 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
115 
116 u64 kvm_phys_timer_read(void);
117 
118 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
119 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
120 
121 void kvm_timer_init_vhe(void);
122 
123 #define vcpu_timer(v)	(&(v)->arch.timer_cpu)
124 #define vcpu_get_timer(v,t)	(&vcpu_timer(v)->timers[(t)])
125 #define vcpu_vtimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
126 #define vcpu_ptimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
127 #define vcpu_hvtimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
128 #define vcpu_hptimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
129 
130 #define arch_timer_ctx_index(ctx)	((ctx)->timer_id)
131 #define timer_context_to_vcpu(ctx)	container_of((ctx), struct kvm_vcpu, arch.timer_cpu.timers[(ctx)->timer_id])
132 #define timer_vm_data(ctx)		(&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
133 #define timer_irq(ctx)			(timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
134 
135 #define get_vgic_ppi(k, i) (((k)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V5) ? \
136 			    (i) : (FIELD_PREP(GICV5_HWIRQ_ID, i) |	\
137 				   FIELD_PREP(GICV5_HWIRQ_TYPE, GICV5_HWIRQ_TYPE_PPI)))
138 
139 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
140 			      enum kvm_arch_timers tmr,
141 			      enum kvm_arch_timer_regs treg);
142 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
143 				enum kvm_arch_timers tmr,
144 				enum kvm_arch_timer_regs treg,
145 				u64 val);
146 
147 /* Needed for tracing */
148 u32 timer_get_ctl(struct arch_timer_context *ctxt);
149 u64 timer_get_cval(struct arch_timer_context *ctxt);
150 
151 /* CPU HP callbacks */
152 void kvm_timer_cpu_up(void);
153 void kvm_timer_cpu_down(void);
154 
155 /* CNTKCTL_EL1 valid bits as of DDI0487J.a */
156 #define CNTKCTL_VALID_BITS	(BIT(17) | GENMASK_ULL(9, 0))
157 
158 DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key);
159 
160 static inline bool has_broken_cntvoff(void)
161 {
162 	return static_branch_unlikely(&broken_cntvoff_key);
163 }
164 
165 static inline bool has_cntpoff(void)
166 {
167 	return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
168 }
169 
170 static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
171 {
172 	u64 offset = 0;
173 
174 	if (!ctxt)
175 		return 0;
176 
177 	if (ctxt->offset.vm_offset)
178 		offset += *ctxt->offset.vm_offset;
179 	if (ctxt->offset.vcpu_offset)
180 		offset += *ctxt->offset.vcpu_offset;
181 
182 	return offset;
183 }
184 
185 static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
186 {
187 	if (!ctxt->offset.vm_offset) {
188 		WARN(offset, "timer %d\n", arch_timer_ctx_index(ctxt));
189 		return;
190 	}
191 
192 	WRITE_ONCE(*ctxt->offset.vm_offset, offset);
193 }
194 
195 #endif
196