xref: /linux/include/kvm/arm_vgic.h (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18 
19 #ifndef __ASM_ARM_KVM_VGIC_H
20 #define __ASM_ARM_KVM_VGIC_H
21 
22 #include <linux/kernel.h>
23 #include <linux/kvm.h>
24 #include <linux/irqreturn.h>
25 #include <linux/spinlock.h>
26 #include <linux/types.h>
27 
28 #define VGIC_NR_IRQS_LEGACY	256
29 #define VGIC_NR_SGIS		16
30 #define VGIC_NR_PPIS		16
31 #define VGIC_NR_PRIVATE_IRQS	(VGIC_NR_SGIS + VGIC_NR_PPIS)
32 
33 #define VGIC_V2_MAX_LRS		(1 << 6)
34 #define VGIC_V3_MAX_LRS		16
35 #define VGIC_MAX_IRQS		1024
36 #define VGIC_V2_MAX_CPUS	8
37 
38 /* Sanity checks... */
39 #if (KVM_MAX_VCPUS > 255)
40 #error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
41 #endif
42 
43 #if (VGIC_NR_IRQS_LEGACY & 31)
44 #error "VGIC_NR_IRQS must be a multiple of 32"
45 #endif
46 
47 #if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
48 #error "VGIC_NR_IRQS must be <= 1024"
49 #endif
50 
51 /*
52  * The GIC distributor registers describing interrupts have two parts:
53  * - 32 per-CPU interrupts (SGI + PPI)
54  * - a bunch of shared interrupts (SPI)
55  */
56 struct vgic_bitmap {
57 	/*
58 	 * - One UL per VCPU for private interrupts (assumes UL is at
59 	 *   least 32 bits)
60 	 * - As many UL as necessary for shared interrupts.
61 	 *
62 	 * The private interrupts are accessed via the "private"
63 	 * field, one UL per vcpu (the state for vcpu n is in
64 	 * private[n]). The shared interrupts are accessed via the
65 	 * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
66 	 */
67 	unsigned long *private;
68 	unsigned long *shared;
69 };
70 
71 struct vgic_bytemap {
72 	/*
73 	 * - 8 u32 per VCPU for private interrupts
74 	 * - As many u32 as necessary for shared interrupts.
75 	 *
76 	 * The private interrupts are accessed via the "private"
77 	 * field, (the state for vcpu n is in private[n*8] to
78 	 * private[n*8 + 7]). The shared interrupts are accessed via
79 	 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
80 	 * shared[(n-32)/4] word).
81 	 */
82 	u32 *private;
83 	u32 *shared;
84 };
85 
86 struct kvm_vcpu;
87 
88 enum vgic_type {
89 	VGIC_V2,		/* Good ol' GICv2 */
90 	VGIC_V3,		/* New fancy GICv3 */
91 };
92 
93 #define LR_STATE_PENDING	(1 << 0)
94 #define LR_STATE_ACTIVE		(1 << 1)
95 #define LR_STATE_MASK		(3 << 0)
96 #define LR_EOI_INT		(1 << 2)
97 
98 struct vgic_lr {
99 	u16	irq;
100 	u8	source;
101 	u8	state;
102 };
103 
104 struct vgic_vmcr {
105 	u32	ctlr;
106 	u32	abpr;
107 	u32	bpr;
108 	u32	pmr;
109 };
110 
111 struct vgic_ops {
112 	struct vgic_lr	(*get_lr)(const struct kvm_vcpu *, int);
113 	void	(*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
114 	void	(*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
115 	u64	(*get_elrsr)(const struct kvm_vcpu *vcpu);
116 	u64	(*get_eisr)(const struct kvm_vcpu *vcpu);
117 	void	(*clear_eisr)(struct kvm_vcpu *vcpu);
118 	u32	(*get_interrupt_status)(const struct kvm_vcpu *vcpu);
119 	void	(*enable_underflow)(struct kvm_vcpu *vcpu);
120 	void	(*disable_underflow)(struct kvm_vcpu *vcpu);
121 	void	(*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
122 	void	(*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
123 	void	(*enable)(struct kvm_vcpu *vcpu);
124 };
125 
126 struct vgic_params {
127 	/* vgic type */
128 	enum vgic_type	type;
129 	/* Physical address of vgic virtual cpu interface */
130 	phys_addr_t	vcpu_base;
131 	/* Number of list registers */
132 	u32		nr_lr;
133 	/* Interrupt number */
134 	unsigned int	maint_irq;
135 	/* Virtual control interface base address */
136 	void __iomem	*vctrl_base;
137 	int		max_gic_vcpus;
138 	/* Only needed for the legacy KVM_CREATE_IRQCHIP */
139 	bool		can_emulate_gicv2;
140 };
141 
142 struct vgic_vm_ops {
143 	bool	(*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
144 			       struct kvm_exit_mmio *);
145 	bool	(*queue_sgi)(struct kvm_vcpu *, int irq);
146 	void	(*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
147 	int	(*init_model)(struct kvm *);
148 	int	(*map_resources)(struct kvm *, const struct vgic_params *);
149 };
150 
151 struct vgic_dist {
152 #ifdef CONFIG_KVM_ARM_VGIC
153 	spinlock_t		lock;
154 	bool			in_kernel;
155 	bool			ready;
156 
157 	/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
158 	u32			vgic_model;
159 
160 	int			nr_cpus;
161 	int			nr_irqs;
162 
163 	/* Virtual control interface mapping */
164 	void __iomem		*vctrl_base;
165 
166 	/* Distributor and vcpu interface mapping in the guest */
167 	phys_addr_t		vgic_dist_base;
168 	/* GICv2 and GICv3 use different mapped register blocks */
169 	union {
170 		phys_addr_t		vgic_cpu_base;
171 		phys_addr_t		vgic_redist_base;
172 	};
173 
174 	/* Distributor enabled */
175 	u32			enabled;
176 
177 	/* Interrupt enabled (one bit per IRQ) */
178 	struct vgic_bitmap	irq_enabled;
179 
180 	/* Level-triggered interrupt external input is asserted */
181 	struct vgic_bitmap	irq_level;
182 
183 	/*
184 	 * Interrupt state is pending on the distributor
185 	 */
186 	struct vgic_bitmap	irq_pending;
187 
188 	/*
189 	 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
190 	 * interrupts.  Essentially holds the state of the flip-flop in
191 	 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
192 	 * Once set, it is only cleared for level-triggered interrupts on
193 	 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
194 	 */
195 	struct vgic_bitmap	irq_soft_pend;
196 
197 	/* Level-triggered interrupt queued on VCPU interface */
198 	struct vgic_bitmap	irq_queued;
199 
200 	/* Interrupt priority. Not used yet. */
201 	struct vgic_bytemap	irq_priority;
202 
203 	/* Level/edge triggered */
204 	struct vgic_bitmap	irq_cfg;
205 
206 	/*
207 	 * Source CPU per SGI and target CPU:
208 	 *
209 	 * Each byte represent a SGI observable on a VCPU, each bit of
210 	 * this byte indicating if the corresponding VCPU has
211 	 * generated this interrupt. This is a GICv2 feature only.
212 	 *
213 	 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
214 	 * the SGIs observable on VCPUn.
215 	 */
216 	u8			*irq_sgi_sources;
217 
218 	/*
219 	 * Target CPU for each SPI:
220 	 *
221 	 * Array of available SPI, each byte indicating the target
222 	 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
223 	 */
224 	u8			*irq_spi_cpu;
225 
226 	/*
227 	 * Reverse lookup of irq_spi_cpu for faster compute pending:
228 	 *
229 	 * Array of bitmaps, one per VCPU, describing if IRQn is
230 	 * routed to a particular VCPU.
231 	 */
232 	struct vgic_bitmap	*irq_spi_target;
233 
234 	/* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
235 	u32			*irq_spi_mpidr;
236 
237 	/* Bitmap indicating which CPU has something pending */
238 	unsigned long		*irq_pending_on_cpu;
239 
240 	struct vgic_vm_ops	vm_ops;
241 #endif
242 };
243 
244 struct vgic_v2_cpu_if {
245 	u32		vgic_hcr;
246 	u32		vgic_vmcr;
247 	u32		vgic_misr;	/* Saved only */
248 	u64		vgic_eisr;	/* Saved only */
249 	u64		vgic_elrsr;	/* Saved only */
250 	u32		vgic_apr;
251 	u32		vgic_lr[VGIC_V2_MAX_LRS];
252 };
253 
254 struct vgic_v3_cpu_if {
255 #ifdef CONFIG_ARM_GIC_V3
256 	u32		vgic_hcr;
257 	u32		vgic_vmcr;
258 	u32		vgic_sre;	/* Restored only, change ignored */
259 	u32		vgic_misr;	/* Saved only */
260 	u32		vgic_eisr;	/* Saved only */
261 	u32		vgic_elrsr;	/* Saved only */
262 	u32		vgic_ap0r[4];
263 	u32		vgic_ap1r[4];
264 	u64		vgic_lr[VGIC_V3_MAX_LRS];
265 #endif
266 };
267 
268 struct vgic_cpu {
269 #ifdef CONFIG_KVM_ARM_VGIC
270 	/* per IRQ to LR mapping */
271 	u8		*vgic_irq_lr_map;
272 
273 	/* Pending interrupts on this VCPU */
274 	DECLARE_BITMAP(	pending_percpu, VGIC_NR_PRIVATE_IRQS);
275 	unsigned long	*pending_shared;
276 
277 	/* Bitmap of used/free list registers */
278 	DECLARE_BITMAP(	lr_used, VGIC_V2_MAX_LRS);
279 
280 	/* Number of list registers on this CPU */
281 	int		nr_lr;
282 
283 	/* CPU vif control registers for world switch */
284 	union {
285 		struct vgic_v2_cpu_if	vgic_v2;
286 		struct vgic_v3_cpu_if	vgic_v3;
287 	};
288 #endif
289 };
290 
291 #define LR_EMPTY	0xff
292 
293 #define INT_STATUS_EOI		(1 << 0)
294 #define INT_STATUS_UNDERFLOW	(1 << 1)
295 
296 struct kvm;
297 struct kvm_vcpu;
298 struct kvm_run;
299 struct kvm_exit_mmio;
300 
301 #ifdef CONFIG_KVM_ARM_VGIC
302 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
303 int kvm_vgic_hyp_init(void);
304 int kvm_vgic_map_resources(struct kvm *kvm);
305 int kvm_vgic_get_max_vcpus(void);
306 int kvm_vgic_create(struct kvm *kvm, u32 type);
307 void kvm_vgic_destroy(struct kvm *kvm);
308 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
309 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
310 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
311 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
312 			bool level);
313 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
314 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
315 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
316 		      struct kvm_exit_mmio *mmio);
317 
318 #define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
319 #define vgic_initialized(k)	(!!((k)->arch.vgic.nr_cpus))
320 #define vgic_ready(k)		((k)->arch.vgic.ready)
321 
322 int vgic_v2_probe(struct device_node *vgic_node,
323 		  const struct vgic_ops **ops,
324 		  const struct vgic_params **params);
325 #ifdef CONFIG_ARM_GIC_V3
326 int vgic_v3_probe(struct device_node *vgic_node,
327 		  const struct vgic_ops **ops,
328 		  const struct vgic_params **params);
329 #else
330 static inline int vgic_v3_probe(struct device_node *vgic_node,
331 				const struct vgic_ops **ops,
332 				const struct vgic_params **params)
333 {
334 	return -ENODEV;
335 }
336 #endif
337 
338 #else
339 static inline int kvm_vgic_hyp_init(void)
340 {
341 	return 0;
342 }
343 
344 static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
345 {
346 	return 0;
347 }
348 
349 static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
350 {
351 	return -ENXIO;
352 }
353 
354 static inline int kvm_vgic_map_resources(struct kvm *kvm)
355 {
356 	return 0;
357 }
358 
359 static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
360 {
361 	return 0;
362 }
363 
364 static inline void kvm_vgic_destroy(struct kvm *kvm)
365 {
366 }
367 
368 static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
369 {
370 }
371 
372 static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
373 {
374 	return 0;
375 }
376 
377 static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
378 static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
379 
380 static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
381 				      unsigned int irq_num, bool level)
382 {
383 	return 0;
384 }
385 
386 static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
387 {
388 	return 0;
389 }
390 
391 static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
392 				    struct kvm_exit_mmio *mmio)
393 {
394 	return false;
395 }
396 
397 static inline int irqchip_in_kernel(struct kvm *kvm)
398 {
399 	return 0;
400 }
401 
402 static inline bool vgic_initialized(struct kvm *kvm)
403 {
404 	return true;
405 }
406 
407 static inline bool vgic_ready(struct kvm *kvm)
408 {
409 	return true;
410 }
411 
412 static inline int kvm_vgic_get_max_vcpus(void)
413 {
414 	return KVM_MAX_VCPUS;
415 }
416 #endif
417 
418 #endif
419