xref: /linux/arch/arm64/kvm/vgic/vgic-v2.c (revision 51d90a15fedf8366cb96ef68d0ea2d0bf15417d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015, 2016 ARM Ltd.
4  */
5 
6 #include <linux/irqchip/arm-gic.h>
7 #include <linux/kvm.h>
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <asm/kvm_mmu.h>
11 
12 #include "vgic-mmio.h"
13 #include "vgic.h"
14 
vgic_v2_write_lr(int lr,u32 val)15 static inline void vgic_v2_write_lr(int lr, u32 val)
16 {
17 	void __iomem *base = kvm_vgic_global_state.vctrl_base;
18 
19 	writel_relaxed(val, base + GICH_LR0 + (lr * 4));
20 }
21 
vgic_v2_init_lrs(void)22 void vgic_v2_init_lrs(void)
23 {
24 	int i;
25 
26 	for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
27 		vgic_v2_write_lr(i, 0);
28 }
29 
vgic_v2_configure_hcr(struct kvm_vcpu * vcpu,struct ap_list_summary * als)30 void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu,
31 			   struct ap_list_summary *als)
32 {
33 	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
34 
35 	cpuif->vgic_hcr = GICH_HCR_EN;
36 
37 	if (irqs_pending_outside_lrs(als))
38 		cpuif->vgic_hcr |= GICH_HCR_NPIE;
39 	if (irqs_active_outside_lrs(als))
40 		cpuif->vgic_hcr |= GICH_HCR_LRENPIE;
41 	if (irqs_outside_lrs(als))
42 		cpuif->vgic_hcr |= GICH_HCR_UIE;
43 
44 	cpuif->vgic_hcr |= (cpuif->vgic_vmcr & GICH_VMCR_ENABLE_GRP0_MASK) ?
45 		GICH_HCR_VGrp0DIE : GICH_HCR_VGrp0EIE;
46 	cpuif->vgic_hcr |= (cpuif->vgic_vmcr & GICH_VMCR_ENABLE_GRP1_MASK) ?
47 		GICH_HCR_VGrp1DIE : GICH_HCR_VGrp1EIE;
48 }
49 
lr_signals_eoi_mi(u32 lr_val)50 static bool lr_signals_eoi_mi(u32 lr_val)
51 {
52 	return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
53 	       !(lr_val & GICH_LR_HW);
54 }
55 
vgic_v2_fold_lr(struct kvm_vcpu * vcpu,u32 val)56 static void vgic_v2_fold_lr(struct kvm_vcpu *vcpu, u32 val)
57 {
58 	u32 cpuid, intid = val & GICH_LR_VIRTUALID;
59 	struct vgic_irq *irq;
60 	bool deactivated;
61 
62 	/* Extract the source vCPU id from the LR */
63 	cpuid = FIELD_GET(GICH_LR_PHYSID_CPUID, val) & 7;
64 
65 	/* Notify fds when the guest EOI'ed a level-triggered SPI */
66 	if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
67 		kvm_notify_acked_irq(vcpu->kvm, 0,
68 				     intid - VGIC_NR_PRIVATE_IRQS);
69 
70 	irq = vgic_get_vcpu_irq(vcpu, intid);
71 
72 	scoped_guard(raw_spinlock, &irq->irq_lock) {
73 		/* Always preserve the active bit, note deactivation */
74 		deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
75 		irq->active = !!(val & GICH_LR_ACTIVE_BIT);
76 
77 		if (irq->active && vgic_irq_is_sgi(intid))
78 			irq->active_source = cpuid;
79 
80 		/* Edge is the only case where we preserve the pending bit */
81 		if (irq->config == VGIC_CONFIG_EDGE &&
82 		    (val & GICH_LR_PENDING_BIT)) {
83 			irq->pending_latch = true;
84 
85 			if (vgic_irq_is_sgi(intid))
86 				irq->source |= (1 << cpuid);
87 		}
88 
89 		/*
90 		 * Clear soft pending state when level irqs have been acked.
91 		 */
92 		if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
93 			irq->pending_latch = false;
94 
95 		/* Handle resampling for mapped interrupts if required */
96 		vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
97 
98 		irq->on_lr = false;
99 	}
100 
101 	vgic_put_irq(vcpu->kvm, irq);
102 }
103 
104 static u32 vgic_v2_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq);
105 
106 /*
107  * transfer the content of the LRs back into the corresponding ap_list:
108  * - active bit is transferred as is
109  * - pending bit is
110  *   - transferred as is in case of edge sensitive IRQs
111  *   - set to the line-level (resample time) for level sensitive IRQs
112  */
vgic_v2_fold_lr_state(struct kvm_vcpu * vcpu)113 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
114 {
115 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
116 	struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
117 	u32 eoicount = FIELD_GET(GICH_HCR_EOICOUNT, cpuif->vgic_hcr);
118 	struct vgic_irq *irq;
119 
120 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
121 
122 	for (int lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++)
123 		vgic_v2_fold_lr(vcpu, cpuif->vgic_lr[lr]);
124 
125 	/* See the GICv3 equivalent for the EOIcount handling rationale */
126 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
127 		u32 lr;
128 
129 		if (!eoicount) {
130 			break;
131 		} else {
132 			guard(raw_spinlock)(&irq->irq_lock);
133 
134 			if (!(likely(vgic_target_oracle(irq) == vcpu) &&
135 			      irq->active))
136 				continue;
137 
138 			lr = vgic_v2_compute_lr(vcpu, irq) & ~GICH_LR_ACTIVE_BIT;
139 		}
140 
141 		if (lr & GICH_LR_HW)
142 			writel_relaxed(FIELD_GET(GICH_LR_PHYSID_CPUID, lr),
143 				       kvm_vgic_global_state.gicc_base + GIC_CPU_DEACTIVATE);
144 		vgic_v2_fold_lr(vcpu, lr);
145 		eoicount--;
146 	}
147 
148 	cpuif->used_lrs = 0;
149 }
150 
vgic_v2_deactivate(struct kvm_vcpu * vcpu,u32 val)151 void vgic_v2_deactivate(struct kvm_vcpu *vcpu, u32 val)
152 {
153 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
154 	struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
155 	struct kvm_vcpu *target_vcpu = NULL;
156 	bool mmio = false;
157 	struct vgic_irq *irq;
158 	unsigned long flags;
159 	u64 lr = 0;
160 	u8 cpuid;
161 
162 	/* Snapshot CPUID, and remove it from the INTID */
163 	cpuid = FIELD_GET(GENMASK_ULL(12, 10), val);
164 	val &= ~GENMASK_ULL(12, 10);
165 
166 	/* We only deal with DIR when EOIMode==1 */
167 	if (!(cpuif->vgic_vmcr & GICH_VMCR_EOI_MODE_MASK))
168 		return;
169 
170 	/* Make sure we're in the same context as LR handling */
171 	local_irq_save(flags);
172 
173 	irq = vgic_get_vcpu_irq(vcpu, val);
174 	if (WARN_ON_ONCE(!irq))
175 		goto out;
176 
177 	/* See the corresponding v3 code for the rationale */
178 	scoped_guard(raw_spinlock, &irq->irq_lock) {
179 		target_vcpu = irq->vcpu;
180 
181 		/* Not on any ap_list? */
182 		if (!target_vcpu)
183 			goto put;
184 
185 		/*
186 		 * Urgh. We're deactivating something that we cannot
187 		 * observe yet... Big hammer time.
188 		 */
189 		if (irq->on_lr) {
190 			mmio = true;
191 			goto put;
192 		}
193 
194 		/* SGI: check that the cpuid matches */
195 		if (val < VGIC_NR_SGIS && irq->active_source != cpuid) {
196 			target_vcpu = NULL;
197 			goto put;
198 		}
199 
200 		/* (with a Dalek voice) DEACTIVATE!!!! */
201 		lr = vgic_v2_compute_lr(vcpu, irq) & ~GICH_LR_ACTIVE_BIT;
202 	}
203 
204 	if (lr & GICH_LR_HW)
205 		writel_relaxed(FIELD_GET(GICH_LR_PHYSID_CPUID, lr),
206 			       kvm_vgic_global_state.gicc_base + GIC_CPU_DEACTIVATE);
207 
208 	vgic_v2_fold_lr(vcpu, lr);
209 
210 put:
211 	vgic_put_irq(vcpu->kvm, irq);
212 
213 out:
214 	local_irq_restore(flags);
215 
216 	if (mmio)
217 		vgic_mmio_write_cactive(vcpu, (val / 32) * 4, 4, BIT(val % 32));
218 
219 	/* Force the ap_list to be pruned */
220 	if (target_vcpu)
221 		kvm_make_request(KVM_REQ_VGIC_PROCESS_UPDATE, target_vcpu);
222 }
223 
vgic_v2_compute_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq)224 static u32 vgic_v2_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
225 {
226 	u32 val = irq->intid;
227 	bool allow_pending = true;
228 
229 	WARN_ON(irq->on_lr);
230 
231 	if (irq->active) {
232 		val |= GICH_LR_ACTIVE_BIT;
233 		if (vgic_irq_is_sgi(irq->intid))
234 			val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
235 		if (vgic_irq_is_multi_sgi(irq)) {
236 			allow_pending = false;
237 			val |= GICH_LR_EOI;
238 		}
239 	}
240 
241 	if (irq->group)
242 		val |= GICH_LR_GROUP1;
243 
244 	if (irq->hw && !vgic_irq_needs_resampling(irq)) {
245 		val |= GICH_LR_HW;
246 		val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
247 		/*
248 		 * Never set pending+active on a HW interrupt, as the
249 		 * pending state is kept at the physical distributor
250 		 * level.
251 		 */
252 		if (irq->active)
253 			allow_pending = false;
254 	} else {
255 		if (irq->config == VGIC_CONFIG_LEVEL) {
256 			val |= GICH_LR_EOI;
257 
258 			/*
259 			 * Software resampling doesn't work very well
260 			 * if we allow P+A, so let's not do that.
261 			 */
262 			if (irq->active)
263 				allow_pending = false;
264 		}
265 	}
266 
267 	if (allow_pending && irq_is_pending(irq)) {
268 		val |= GICH_LR_PENDING_BIT;
269 
270 		if (vgic_irq_is_sgi(irq->intid)) {
271 			u32 src = ffs(irq->source);
272 
273 			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
274 					   irq->intid))
275 				return 0;
276 
277 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
278 			if (irq->source & ~BIT(src - 1))
279 				val |= GICH_LR_EOI;
280 		}
281 	}
282 
283 	/* The GICv2 LR only holds five bits of priority. */
284 	val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
285 
286 	return val;
287 }
288 
289 /*
290  * Populates the particular LR with the state of a given IRQ:
291  * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
292  * - for a level sensitive IRQ the pending state value is unchanged;
293  *   it is dictated directly by the input level
294  *
295  * If @irq describes an SGI with multiple sources, we choose the
296  * lowest-numbered source VCPU and clear that bit in the source bitmap.
297  *
298  * The irq_lock must be held by the caller.
299  */
vgic_v2_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)300 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
301 {
302 	u32 val = vgic_v2_compute_lr(vcpu, irq);
303 
304 	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
305 
306 	if (val & GICH_LR_PENDING_BIT) {
307 		if (irq->config == VGIC_CONFIG_EDGE)
308 			irq->pending_latch = false;
309 
310 		if (vgic_irq_is_sgi(irq->intid)) {
311 			u32 src = ffs(irq->source);
312 
313 			irq->source &= ~BIT(src - 1);
314 			if (irq->source)
315 				irq->pending_latch = true;
316 		}
317 	}
318 
319 	/*
320 	 * Level-triggered mapped IRQs are special because we only observe
321 	 * rising edges as input to the VGIC.  We therefore lower the line
322 	 * level here, so that we can take new virtual IRQs.  See
323 	 * vgic_v2_fold_lr_state for more info.
324 	 */
325 	if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
326 		irq->line_level = false;
327 
328 	/* The GICv2 LR only holds five bits of priority. */
329 	val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
330 
331 	irq->on_lr = true;
332 }
333 
vgic_v2_clear_lr(struct kvm_vcpu * vcpu,int lr)334 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
335 {
336 	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
337 }
338 
vgic_v2_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)339 void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
340 {
341 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
342 	u32 vmcr;
343 
344 	vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
345 		GICH_VMCR_ENABLE_GRP0_MASK;
346 	vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
347 		GICH_VMCR_ENABLE_GRP1_MASK;
348 	vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
349 		GICH_VMCR_ACK_CTL_MASK;
350 	vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
351 		GICH_VMCR_FIQ_EN_MASK;
352 	vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
353 		GICH_VMCR_CBPR_MASK;
354 	vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
355 		GICH_VMCR_EOI_MODE_MASK;
356 	vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
357 		GICH_VMCR_ALIAS_BINPOINT_MASK;
358 	vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
359 		GICH_VMCR_BINPOINT_MASK;
360 	vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
361 		 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
362 
363 	cpu_if->vgic_vmcr = vmcr;
364 }
365 
vgic_v2_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)366 void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
367 {
368 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
369 	u32 vmcr;
370 
371 	vmcr = cpu_if->vgic_vmcr;
372 
373 	vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
374 		GICH_VMCR_ENABLE_GRP0_SHIFT;
375 	vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
376 		GICH_VMCR_ENABLE_GRP1_SHIFT;
377 	vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
378 		GICH_VMCR_ACK_CTL_SHIFT;
379 	vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
380 		GICH_VMCR_FIQ_EN_SHIFT;
381 	vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
382 		GICH_VMCR_CBPR_SHIFT;
383 	vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
384 		GICH_VMCR_EOI_MODE_SHIFT;
385 
386 	vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
387 			GICH_VMCR_ALIAS_BINPOINT_SHIFT;
388 	vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
389 			GICH_VMCR_BINPOINT_SHIFT;
390 	vmcrp->pmr  = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
391 			GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
392 }
393 
vgic_v2_reset(struct kvm_vcpu * vcpu)394 void vgic_v2_reset(struct kvm_vcpu *vcpu)
395 {
396 	/*
397 	 * By forcing VMCR to zero, the GIC will restore the binary
398 	 * points to their reset values. Anything else resets to zero
399 	 * anyway.
400 	 */
401 	vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
402 }
403 
404 /* check for overlapping regions and for regions crossing the end of memory */
vgic_v2_check_base(gpa_t dist_base,gpa_t cpu_base)405 static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
406 {
407 	if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
408 		return false;
409 	if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
410 		return false;
411 
412 	if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
413 		return true;
414 	if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
415 		return true;
416 
417 	return false;
418 }
419 
vgic_v2_map_resources(struct kvm * kvm)420 int vgic_v2_map_resources(struct kvm *kvm)
421 {
422 	struct vgic_dist *dist = &kvm->arch.vgic;
423 	unsigned int len;
424 	int ret = 0;
425 
426 	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
427 	    IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
428 		kvm_debug("Need to set vgic cpu and dist addresses first\n");
429 		return -ENXIO;
430 	}
431 
432 	if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
433 		kvm_debug("VGIC CPU and dist frames overlap\n");
434 		return -EINVAL;
435 	}
436 
437 	/*
438 	 * Initialize the vgic if this hasn't already been done on demand by
439 	 * accessing the vgic state from userspace.
440 	 */
441 	ret = vgic_init(kvm);
442 	if (ret) {
443 		kvm_err("Unable to initialize VGIC dynamic data structures\n");
444 		return ret;
445 	}
446 
447 	len = vgic_v2_init_cpuif_iodev(&dist->cpuif_iodev);
448 	dist->cpuif_iodev.base_addr = dist->vgic_cpu_base;
449 	dist->cpuif_iodev.iodev_type = IODEV_CPUIF;
450 	dist->cpuif_iodev.redist_vcpu = NULL;
451 
452 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist->vgic_cpu_base,
453 				      len, &dist->cpuif_iodev.dev);
454 	if (ret)
455 		return ret;
456 
457 	if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
458 		ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
459 					    kvm_vgic_global_state.vcpu_base,
460 					    KVM_VGIC_V2_CPU_SIZE - SZ_4K, true);
461 		if (ret) {
462 			kvm_err("Unable to remap VGIC CPU to VCPU\n");
463 			return ret;
464 		}
465 	}
466 
467 	return 0;
468 }
469 
470 DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
471 
472 /**
473  * vgic_v2_probe - probe for a VGICv2 compatible interrupt controller
474  * @info:	pointer to the GIC description
475  *
476  * Returns 0 if the VGICv2 has been probed successfully, returns an error code
477  * otherwise
478  */
vgic_v2_probe(const struct gic_kvm_info * info)479 int vgic_v2_probe(const struct gic_kvm_info *info)
480 {
481 	int ret;
482 	u32 vtr;
483 
484 	if (is_protected_kvm_enabled()) {
485 		kvm_err("GICv2 not supported in protected mode\n");
486 		return -ENXIO;
487 	}
488 
489 	if (!info->vctrl.start) {
490 		kvm_err("GICH not present in the firmware table\n");
491 		return -ENXIO;
492 	}
493 
494 	if (!PAGE_ALIGNED(info->vcpu.start) ||
495 	    !PAGE_ALIGNED(resource_size(&info->vcpu))) {
496 		kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
497 
498 		ret = create_hyp_io_mappings(info->vcpu.start,
499 					     resource_size(&info->vcpu),
500 					     &kvm_vgic_global_state.vcpu_base_va,
501 					     &kvm_vgic_global_state.vcpu_hyp_va);
502 		if (ret) {
503 			kvm_err("Cannot map GICV into hyp\n");
504 			goto out;
505 		}
506 
507 		static_branch_enable(&vgic_v2_cpuif_trap);
508 	}
509 
510 	ret = create_hyp_io_mappings(info->vctrl.start,
511 				     resource_size(&info->vctrl),
512 				     &kvm_vgic_global_state.vctrl_base,
513 				     &kvm_vgic_global_state.vctrl_hyp);
514 	if (ret) {
515 		kvm_err("Cannot map VCTRL into hyp\n");
516 		goto out;
517 	}
518 
519 	vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
520 	kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
521 
522 	ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
523 	if (ret) {
524 		kvm_err("Cannot register GICv2 KVM device\n");
525 		goto out;
526 	}
527 
528 	kvm_vgic_global_state.can_emulate_gicv2 = true;
529 	kvm_vgic_global_state.vcpu_base = info->vcpu.start;
530 	kvm_vgic_global_state.gicc_base = info->gicc_base;
531 	kvm_vgic_global_state.type = VGIC_V2;
532 	kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
533 
534 	kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
535 
536 	return 0;
537 out:
538 	if (kvm_vgic_global_state.vctrl_base)
539 		iounmap(kvm_vgic_global_state.vctrl_base);
540 	if (kvm_vgic_global_state.vcpu_base_va)
541 		iounmap(kvm_vgic_global_state.vcpu_base_va);
542 
543 	return ret;
544 }
545 
save_lrs(struct kvm_vcpu * vcpu,void __iomem * base)546 static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
547 {
548 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
549 	u64 used_lrs = cpu_if->used_lrs;
550 	u64 elrsr;
551 	int i;
552 
553 	elrsr = readl_relaxed(base + GICH_ELRSR0);
554 	if (unlikely(used_lrs > 32))
555 		elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
556 
557 	for (i = 0; i < used_lrs; i++) {
558 		if (elrsr & (1UL << i))
559 			cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
560 		else
561 			cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
562 
563 		writel_relaxed(0, base + GICH_LR0 + (i * 4));
564 	}
565 }
566 
vgic_v2_save_state(struct kvm_vcpu * vcpu)567 void vgic_v2_save_state(struct kvm_vcpu *vcpu)
568 {
569 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
570 	void __iomem *base = kvm_vgic_global_state.vctrl_base;
571 	u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
572 
573 	if (!base)
574 		return;
575 
576 	cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
577 
578 	if (used_lrs)
579 		save_lrs(vcpu, base);
580 
581 	if (cpu_if->vgic_hcr & GICH_HCR_LRENPIE) {
582 		u32 val = readl_relaxed(base + GICH_HCR);
583 
584 		cpu_if->vgic_hcr &= ~GICH_HCR_EOICOUNT;
585 		cpu_if->vgic_hcr |= val & GICH_HCR_EOICOUNT;
586 	}
587 
588 	writel_relaxed(0, base + GICH_HCR);
589 }
590 
vgic_v2_restore_state(struct kvm_vcpu * vcpu)591 void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
592 {
593 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
594 	void __iomem *base = kvm_vgic_global_state.vctrl_base;
595 	u64 used_lrs = cpu_if->used_lrs;
596 	int i;
597 
598 	if (!base)
599 		return;
600 
601 	writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
602 
603 	for (i = 0; i < used_lrs; i++)
604 		writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
605 }
606 
vgic_v2_load(struct kvm_vcpu * vcpu)607 void vgic_v2_load(struct kvm_vcpu *vcpu)
608 {
609 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
610 
611 	writel_relaxed(cpu_if->vgic_vmcr,
612 		       kvm_vgic_global_state.vctrl_base + GICH_VMCR);
613 	writel_relaxed(cpu_if->vgic_apr,
614 		       kvm_vgic_global_state.vctrl_base + GICH_APR);
615 }
616 
vgic_v2_put(struct kvm_vcpu * vcpu)617 void vgic_v2_put(struct kvm_vcpu *vcpu)
618 {
619 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
620 
621 	cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
622 }
623