1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGICv3 MMIO handling functions
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/irqchip/arm-gic-v3.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <kvm/iodev.h>
12 #include <kvm/arm_vgic.h>
13
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17
18 #include "vgic.h"
19 #include "vgic-mmio.h"
20
21 /* extract @num bytes at @offset bytes offset in data */
extract_bytes(u64 data,unsigned int offset,unsigned int num)22 unsigned long extract_bytes(u64 data, unsigned int offset,
23 unsigned int num)
24 {
25 return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
26 }
27
28 /* allows updates of any half of a 64-bit register (or the whole thing) */
update_64bit_reg(u64 reg,unsigned int offset,unsigned int len,unsigned long val)29 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
30 unsigned long val)
31 {
32 int lower = (offset & 4) * 8;
33 int upper = lower + 8 * len - 1;
34
35 reg &= ~GENMASK_ULL(upper, lower);
36 val &= GENMASK_ULL(len * 8 - 1, 0);
37
38 return reg | ((u64)val << lower);
39 }
40
vgic_has_its(struct kvm * kvm)41 bool vgic_has_its(struct kvm *kvm)
42 {
43 struct vgic_dist *dist = &kvm->arch.vgic;
44
45 if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
46 return false;
47
48 return dist->has_its;
49 }
50
vgic_supports_direct_msis(struct kvm * kvm)51 bool vgic_supports_direct_msis(struct kvm *kvm)
52 {
53 return (kvm_vgic_global_state.has_gicv4_1 ||
54 (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
55 }
56
57 /*
58 * The Revision field in the IIDR have the following meanings:
59 *
60 * Revision 2: Interrupt groups are guest-configurable and signaled using
61 * their configured groups.
62 */
63
vgic_mmio_read_v3_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)64 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
65 gpa_t addr, unsigned int len)
66 {
67 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
68 u32 value = 0;
69
70 switch (addr & 0x0c) {
71 case GICD_CTLR:
72 if (vgic->enabled)
73 value |= GICD_CTLR_ENABLE_SS_G1;
74 value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
75 if (vgic->nassgireq)
76 value |= GICD_CTLR_nASSGIreq;
77 break;
78 case GICD_TYPER:
79 value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
80 value = (value >> 5) - 1;
81 if (vgic_has_its(vcpu->kvm)) {
82 value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
83 value |= GICD_TYPER_LPIS;
84 } else {
85 value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
86 }
87 break;
88 case GICD_TYPER2:
89 if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi())
90 value = GICD_TYPER2_nASSGIcap;
91 break;
92 case GICD_IIDR:
93 value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
94 (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
95 (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
96 break;
97 default:
98 return 0;
99 }
100
101 return value;
102 }
103
vgic_mmio_write_v3_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)104 static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
105 gpa_t addr, unsigned int len,
106 unsigned long val)
107 {
108 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
109
110 switch (addr & 0x0c) {
111 case GICD_CTLR: {
112 bool was_enabled, is_hwsgi;
113
114 mutex_lock(&vcpu->kvm->arch.config_lock);
115
116 was_enabled = dist->enabled;
117 is_hwsgi = dist->nassgireq;
118
119 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
120
121 /* Not a GICv4.1? No HW SGIs */
122 if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi())
123 val &= ~GICD_CTLR_nASSGIreq;
124
125 /* Dist stays enabled? nASSGIreq is RO */
126 if (was_enabled && dist->enabled) {
127 val &= ~GICD_CTLR_nASSGIreq;
128 val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
129 }
130
131 /* Switching HW SGIs? */
132 dist->nassgireq = val & GICD_CTLR_nASSGIreq;
133 if (is_hwsgi != dist->nassgireq)
134 vgic_v4_configure_vsgis(vcpu->kvm);
135
136 if (kvm_vgic_global_state.has_gicv4_1 &&
137 was_enabled != dist->enabled)
138 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
139 else if (!was_enabled && dist->enabled)
140 vgic_kick_vcpus(vcpu->kvm);
141
142 mutex_unlock(&vcpu->kvm->arch.config_lock);
143 break;
144 }
145 case GICD_TYPER:
146 case GICD_TYPER2:
147 case GICD_IIDR:
148 /* This is at best for documentation purposes... */
149 return;
150 }
151 }
152
vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)153 static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
154 gpa_t addr, unsigned int len,
155 unsigned long val)
156 {
157 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
158 u32 reg;
159
160 switch (addr & 0x0c) {
161 case GICD_TYPER2:
162 if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
163 return -EINVAL;
164 return 0;
165 case GICD_IIDR:
166 reg = vgic_mmio_read_v3_misc(vcpu, addr, len);
167 if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
168 return -EINVAL;
169
170 reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
171 switch (reg) {
172 case KVM_VGIC_IMP_REV_2:
173 case KVM_VGIC_IMP_REV_3:
174 dist->implementation_rev = reg;
175 return 0;
176 default:
177 return -EINVAL;
178 }
179 case GICD_CTLR:
180 /* Not a GICv4.1? No HW SGIs */
181 if (!kvm_vgic_global_state.has_gicv4_1)
182 val &= ~GICD_CTLR_nASSGIreq;
183
184 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
185 dist->nassgireq = val & GICD_CTLR_nASSGIreq;
186 return 0;
187 }
188
189 vgic_mmio_write_v3_misc(vcpu, addr, len, val);
190 return 0;
191 }
192
vgic_mmio_read_irouter(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)193 static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
194 gpa_t addr, unsigned int len)
195 {
196 int intid = VGIC_ADDR_TO_INTID(addr, 64);
197 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, intid);
198 unsigned long ret = 0;
199
200 if (!irq)
201 return 0;
202
203 /* The upper word is RAZ for us. */
204 if (!(addr & 4))
205 ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
206
207 vgic_put_irq(vcpu->kvm, irq);
208 return ret;
209 }
210
vgic_mmio_write_irouter(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)211 static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
212 gpa_t addr, unsigned int len,
213 unsigned long val)
214 {
215 int intid = VGIC_ADDR_TO_INTID(addr, 64);
216 struct vgic_irq *irq;
217 unsigned long flags;
218
219 /* The upper word is WI for us since we don't implement Aff3. */
220 if (addr & 4)
221 return;
222
223 irq = vgic_get_irq(vcpu->kvm, intid);
224
225 if (!irq)
226 return;
227
228 raw_spin_lock_irqsave(&irq->irq_lock, flags);
229
230 /* We only care about and preserve Aff0, Aff1 and Aff2. */
231 irq->mpidr = val & GENMASK(23, 0);
232 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
233
234 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
235 vgic_put_irq(vcpu->kvm, irq);
236 }
237
vgic_lpis_enabled(struct kvm_vcpu * vcpu)238 bool vgic_lpis_enabled(struct kvm_vcpu *vcpu)
239 {
240 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
241
242 return atomic_read(&vgic_cpu->ctlr) == GICR_CTLR_ENABLE_LPIS;
243 }
244
vgic_mmio_read_v3r_ctlr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)245 static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
246 gpa_t addr, unsigned int len)
247 {
248 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
249 unsigned long val;
250
251 val = atomic_read(&vgic_cpu->ctlr);
252 if (vgic_get_implementation_rev(vcpu) >= KVM_VGIC_IMP_REV_3)
253 val |= GICR_CTLR_IR | GICR_CTLR_CES;
254
255 return val;
256 }
257
vgic_mmio_write_v3r_ctlr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)258 static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
259 gpa_t addr, unsigned int len,
260 unsigned long val)
261 {
262 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
263 u32 ctlr;
264
265 if (!vgic_has_its(vcpu->kvm))
266 return;
267
268 if (!(val & GICR_CTLR_ENABLE_LPIS)) {
269 /*
270 * Don't disable if RWP is set, as there already an
271 * ongoing disable. Funky guest...
272 */
273 ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr,
274 GICR_CTLR_ENABLE_LPIS,
275 GICR_CTLR_RWP);
276 if (ctlr != GICR_CTLR_ENABLE_LPIS)
277 return;
278
279 vgic_flush_pending_lpis(vcpu);
280 vgic_its_invalidate_all_caches(vcpu->kvm);
281 atomic_set_release(&vgic_cpu->ctlr, 0);
282 } else {
283 ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
284 GICR_CTLR_ENABLE_LPIS);
285 if (ctlr != 0)
286 return;
287
288 vgic_enable_lpis(vcpu);
289 }
290 }
291
vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu * vcpu)292 static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
293 {
294 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
295 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
296 struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
297
298 if (!rdreg)
299 return false;
300
301 if (vgic_cpu->rdreg_index < rdreg->free_index - 1) {
302 return false;
303 } else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) {
304 struct list_head *rd_regions = &vgic->rd_regions;
305 gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
306
307 /*
308 * the rdist is the last one of the redist region,
309 * check whether there is no other contiguous rdist region
310 */
311 list_for_each_entry(iter, rd_regions, list) {
312 if (iter->base == end && iter->free_index > 0)
313 return false;
314 }
315 }
316 return true;
317 }
318
vgic_mmio_read_v3r_typer(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)319 static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
320 gpa_t addr, unsigned int len)
321 {
322 unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
323 int target_vcpu_id = vcpu->vcpu_id;
324 u64 value;
325
326 value = (u64)(mpidr & GENMASK(23, 0)) << 32;
327 value |= ((target_vcpu_id & 0xffff) << 8);
328
329 if (vgic_has_its(vcpu->kvm))
330 value |= GICR_TYPER_PLPIS;
331
332 if (vgic_mmio_vcpu_rdist_is_last(vcpu))
333 value |= GICR_TYPER_LAST;
334
335 return extract_bytes(value, addr & 7, len);
336 }
337
vgic_mmio_read_v3r_iidr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)338 static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
339 gpa_t addr, unsigned int len)
340 {
341 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
342 }
343
vgic_mmio_read_v3_idregs(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)344 static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
345 gpa_t addr, unsigned int len)
346 {
347 switch (addr & 0xffff) {
348 case GICD_PIDR2:
349 /* report a GICv3 compliant implementation */
350 return 0x3b;
351 }
352
353 return 0;
354 }
355
vgic_v3_uaccess_write_pending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)356 static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
357 gpa_t addr, unsigned int len,
358 unsigned long val)
359 {
360 int ret;
361
362 ret = vgic_uaccess_write_spending(vcpu, addr, len, val);
363 if (ret)
364 return ret;
365
366 return vgic_uaccess_write_cpending(vcpu, addr, len, ~val);
367 }
368
369 /* We want to avoid outer shareable. */
vgic_sanitise_shareability(u64 field)370 u64 vgic_sanitise_shareability(u64 field)
371 {
372 switch (field) {
373 case GIC_BASER_OuterShareable:
374 return GIC_BASER_InnerShareable;
375 default:
376 return field;
377 }
378 }
379
380 /* Avoid any inner non-cacheable mapping. */
vgic_sanitise_inner_cacheability(u64 field)381 u64 vgic_sanitise_inner_cacheability(u64 field)
382 {
383 switch (field) {
384 case GIC_BASER_CACHE_nCnB:
385 case GIC_BASER_CACHE_nC:
386 return GIC_BASER_CACHE_RaWb;
387 default:
388 return field;
389 }
390 }
391
392 /* Non-cacheable or same-as-inner are OK. */
vgic_sanitise_outer_cacheability(u64 field)393 u64 vgic_sanitise_outer_cacheability(u64 field)
394 {
395 switch (field) {
396 case GIC_BASER_CACHE_SameAsInner:
397 case GIC_BASER_CACHE_nC:
398 return field;
399 default:
400 return GIC_BASER_CACHE_SameAsInner;
401 }
402 }
403
vgic_sanitise_field(u64 reg,u64 field_mask,int field_shift,u64 (* sanitise_fn)(u64))404 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
405 u64 (*sanitise_fn)(u64))
406 {
407 u64 field = (reg & field_mask) >> field_shift;
408
409 field = sanitise_fn(field) << field_shift;
410 return (reg & ~field_mask) | field;
411 }
412
413 #define PROPBASER_RES0_MASK \
414 (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
415 #define PENDBASER_RES0_MASK \
416 (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
417 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
418
vgic_sanitise_pendbaser(u64 reg)419 static u64 vgic_sanitise_pendbaser(u64 reg)
420 {
421 reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
422 GICR_PENDBASER_SHAREABILITY_SHIFT,
423 vgic_sanitise_shareability);
424 reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
425 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
426 vgic_sanitise_inner_cacheability);
427 reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
428 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
429 vgic_sanitise_outer_cacheability);
430
431 reg &= ~PENDBASER_RES0_MASK;
432
433 return reg;
434 }
435
vgic_sanitise_propbaser(u64 reg)436 static u64 vgic_sanitise_propbaser(u64 reg)
437 {
438 reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
439 GICR_PROPBASER_SHAREABILITY_SHIFT,
440 vgic_sanitise_shareability);
441 reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
442 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
443 vgic_sanitise_inner_cacheability);
444 reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
445 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
446 vgic_sanitise_outer_cacheability);
447
448 reg &= ~PROPBASER_RES0_MASK;
449 return reg;
450 }
451
vgic_mmio_read_propbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)452 static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
453 gpa_t addr, unsigned int len)
454 {
455 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
456
457 return extract_bytes(dist->propbaser, addr & 7, len);
458 }
459
vgic_mmio_write_propbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)460 static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
461 gpa_t addr, unsigned int len,
462 unsigned long val)
463 {
464 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
465 u64 old_propbaser, propbaser;
466
467 /* Storing a value with LPIs already enabled is undefined */
468 if (vgic_lpis_enabled(vcpu))
469 return;
470
471 do {
472 old_propbaser = READ_ONCE(dist->propbaser);
473 propbaser = old_propbaser;
474 propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
475 propbaser = vgic_sanitise_propbaser(propbaser);
476 } while (cmpxchg64(&dist->propbaser, old_propbaser,
477 propbaser) != old_propbaser);
478 }
479
vgic_mmio_read_pendbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)480 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
481 gpa_t addr, unsigned int len)
482 {
483 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
484 u64 value = vgic_cpu->pendbaser;
485
486 value &= ~GICR_PENDBASER_PTZ;
487
488 return extract_bytes(value, addr & 7, len);
489 }
490
vgic_mmio_write_pendbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)491 static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
492 gpa_t addr, unsigned int len,
493 unsigned long val)
494 {
495 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
496 u64 old_pendbaser, pendbaser;
497
498 /* Storing a value with LPIs already enabled is undefined */
499 if (vgic_lpis_enabled(vcpu))
500 return;
501
502 do {
503 old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
504 pendbaser = old_pendbaser;
505 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
506 pendbaser = vgic_sanitise_pendbaser(pendbaser);
507 } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
508 pendbaser) != old_pendbaser);
509 }
510
vgic_mmio_read_sync(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)511 static unsigned long vgic_mmio_read_sync(struct kvm_vcpu *vcpu,
512 gpa_t addr, unsigned int len)
513 {
514 return !!atomic_read(&vcpu->arch.vgic_cpu.syncr_busy);
515 }
516
vgic_set_rdist_busy(struct kvm_vcpu * vcpu,bool busy)517 static void vgic_set_rdist_busy(struct kvm_vcpu *vcpu, bool busy)
518 {
519 if (busy) {
520 atomic_inc(&vcpu->arch.vgic_cpu.syncr_busy);
521 smp_mb__after_atomic();
522 } else {
523 smp_mb__before_atomic();
524 atomic_dec(&vcpu->arch.vgic_cpu.syncr_busy);
525 }
526 }
527
vgic_mmio_write_invlpi(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)528 static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
529 gpa_t addr, unsigned int len,
530 unsigned long val)
531 {
532 struct vgic_irq *irq;
533 u32 intid;
534
535 /*
536 * If the guest wrote only to the upper 32bit part of the
537 * register, drop the write on the floor, as it is only for
538 * vPEs (which we don't support for obvious reasons).
539 *
540 * Also discard the access if LPIs are not enabled.
541 */
542 if ((addr & 4) || !vgic_lpis_enabled(vcpu))
543 return;
544
545 intid = lower_32_bits(val);
546 if (intid < VGIC_MIN_LPI)
547 return;
548
549 vgic_set_rdist_busy(vcpu, true);
550
551 irq = vgic_get_irq(vcpu->kvm, intid);
552 if (irq) {
553 vgic_its_inv_lpi(vcpu->kvm, irq);
554 vgic_put_irq(vcpu->kvm, irq);
555 }
556
557 vgic_set_rdist_busy(vcpu, false);
558 }
559
vgic_mmio_write_invall(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)560 static void vgic_mmio_write_invall(struct kvm_vcpu *vcpu,
561 gpa_t addr, unsigned int len,
562 unsigned long val)
563 {
564 /* See vgic_mmio_write_invlpi() for the early return rationale */
565 if ((addr & 4) || !vgic_lpis_enabled(vcpu))
566 return;
567
568 vgic_set_rdist_busy(vcpu, true);
569 vgic_its_invall(vcpu);
570 vgic_set_rdist_busy(vcpu, false);
571 }
572
573 /*
574 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
575 * redistributors, while SPIs are covered by registers in the distributor
576 * block. Trying to set private IRQs in this block gets ignored.
577 * We take some special care here to fix the calculation of the register
578 * offset.
579 */
580 #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
581 { \
582 .reg_offset = off, \
583 .bits_per_irq = bpi, \
584 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
585 .access_flags = acc, \
586 .read = vgic_mmio_read_raz, \
587 .write = vgic_mmio_write_wi, \
588 }, { \
589 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
590 .bits_per_irq = bpi, \
591 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
592 .access_flags = acc, \
593 .read = rd, \
594 .write = wr, \
595 .uaccess_read = ur, \
596 .uaccess_write = uw, \
597 }
598
599 static const struct vgic_register_region vgic_v3_dist_registers[] = {
600 REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR,
601 vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc,
602 NULL, vgic_mmio_uaccess_write_v3_misc,
603 16, VGIC_ACCESS_32bit),
604 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
605 vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
606 VGIC_ACCESS_32bit),
607 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
608 vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
609 VGIC_ACCESS_32bit),
610 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
611 vgic_mmio_read_enable, vgic_mmio_write_senable,
612 NULL, vgic_uaccess_write_senable, 1,
613 VGIC_ACCESS_32bit),
614 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
615 vgic_mmio_read_enable, vgic_mmio_write_cenable,
616 NULL, vgic_uaccess_write_cenable, 1,
617 VGIC_ACCESS_32bit),
618 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
619 vgic_mmio_read_pending, vgic_mmio_write_spending,
620 vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
621 VGIC_ACCESS_32bit),
622 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
623 vgic_mmio_read_pending, vgic_mmio_write_cpending,
624 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 1,
625 VGIC_ACCESS_32bit),
626 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
627 vgic_mmio_read_active, vgic_mmio_write_sactive,
628 vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
629 VGIC_ACCESS_32bit),
630 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
631 vgic_mmio_read_active, vgic_mmio_write_cactive,
632 vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
633 1, VGIC_ACCESS_32bit),
634 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
635 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
636 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
637 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
638 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
639 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
640 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
641 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
642 VGIC_ACCESS_32bit),
643 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
644 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
645 VGIC_ACCESS_32bit),
646 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
647 vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
648 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
649 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
650 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
651 VGIC_ACCESS_32bit),
652 };
653
654 static const struct vgic_register_region vgic_v3_rd_registers[] = {
655 /* RD_base registers */
656 REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
657 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
658 VGIC_ACCESS_32bit),
659 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
660 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
661 VGIC_ACCESS_32bit),
662 REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
663 vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
664 VGIC_ACCESS_32bit),
665 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
666 vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
667 NULL, vgic_mmio_uaccess_write_wi, 8,
668 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
669 REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
670 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
671 VGIC_ACCESS_32bit),
672 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
673 vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
674 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
675 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
676 vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
677 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
678 REGISTER_DESC_WITH_LENGTH(GICR_INVLPIR,
679 vgic_mmio_read_raz, vgic_mmio_write_invlpi, 8,
680 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
681 REGISTER_DESC_WITH_LENGTH(GICR_INVALLR,
682 vgic_mmio_read_raz, vgic_mmio_write_invall, 8,
683 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
684 REGISTER_DESC_WITH_LENGTH(GICR_SYNCR,
685 vgic_mmio_read_sync, vgic_mmio_write_wi, 4,
686 VGIC_ACCESS_32bit),
687 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
688 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
689 VGIC_ACCESS_32bit),
690 /* SGI_base registers */
691 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
692 vgic_mmio_read_group, vgic_mmio_write_group, 4,
693 VGIC_ACCESS_32bit),
694 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
695 vgic_mmio_read_enable, vgic_mmio_write_senable,
696 NULL, vgic_uaccess_write_senable, 4,
697 VGIC_ACCESS_32bit),
698 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
699 vgic_mmio_read_enable, vgic_mmio_write_cenable,
700 NULL, vgic_uaccess_write_cenable, 4,
701 VGIC_ACCESS_32bit),
702 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
703 vgic_mmio_read_pending, vgic_mmio_write_spending,
704 vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
705 VGIC_ACCESS_32bit),
706 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0,
707 vgic_mmio_read_pending, vgic_mmio_write_cpending,
708 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4,
709 VGIC_ACCESS_32bit),
710 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
711 vgic_mmio_read_active, vgic_mmio_write_sactive,
712 vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
713 VGIC_ACCESS_32bit),
714 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
715 vgic_mmio_read_active, vgic_mmio_write_cactive,
716 vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
717 VGIC_ACCESS_32bit),
718 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
719 vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
720 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
721 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0,
722 vgic_mmio_read_config, vgic_mmio_write_config, 8,
723 VGIC_ACCESS_32bit),
724 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0,
725 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
726 VGIC_ACCESS_32bit),
727 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR,
728 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
729 VGIC_ACCESS_32bit),
730 };
731
vgic_v3_init_dist_iodev(struct vgic_io_device * dev)732 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
733 {
734 dev->regions = vgic_v3_dist_registers;
735 dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
736
737 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
738
739 return SZ_64K;
740 }
741
742 /**
743 * vgic_register_redist_iodev - register a single redist iodev
744 * @vcpu: The VCPU to which the redistributor belongs
745 *
746 * Register a KVM iodev for this VCPU's redistributor using the address
747 * provided.
748 *
749 * Return 0 on success, -ERRNO otherwise.
750 */
vgic_register_redist_iodev(struct kvm_vcpu * vcpu)751 int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
752 {
753 struct kvm *kvm = vcpu->kvm;
754 struct vgic_dist *vgic = &kvm->arch.vgic;
755 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
756 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
757 struct vgic_redist_region *rdreg;
758 gpa_t rd_base;
759 int ret = 0;
760
761 lockdep_assert_held(&kvm->slots_lock);
762 mutex_lock(&kvm->arch.config_lock);
763
764 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
765 goto out_unlock;
766
767 /*
768 * We may be creating VCPUs before having set the base address for the
769 * redistributor region, in which case we will come back to this
770 * function for all VCPUs when the base address is set. Just return
771 * without doing any work for now.
772 */
773 rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
774 if (!rdreg)
775 goto out_unlock;
776
777 if (!vgic_v3_check_base(kvm)) {
778 ret = -EINVAL;
779 goto out_unlock;
780 }
781
782 vgic_cpu->rdreg = rdreg;
783 vgic_cpu->rdreg_index = rdreg->free_index;
784
785 rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
786
787 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
788 rd_dev->base_addr = rd_base;
789 rd_dev->iodev_type = IODEV_REDIST;
790 rd_dev->regions = vgic_v3_rd_registers;
791 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
792 rd_dev->redist_vcpu = vcpu;
793
794 mutex_unlock(&kvm->arch.config_lock);
795
796 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
797 2 * SZ_64K, &rd_dev->dev);
798 if (ret)
799 return ret;
800
801 /* Protected by slots_lock */
802 rdreg->free_index++;
803 return 0;
804
805 out_unlock:
806 mutex_unlock(&kvm->arch.config_lock);
807 return ret;
808 }
809
vgic_unregister_redist_iodev(struct kvm_vcpu * vcpu)810 void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
811 {
812 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
813
814 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
815 }
816
vgic_register_all_redist_iodevs(struct kvm * kvm)817 static int vgic_register_all_redist_iodevs(struct kvm *kvm)
818 {
819 struct kvm_vcpu *vcpu;
820 unsigned long c;
821 int ret = 0;
822
823 lockdep_assert_held(&kvm->slots_lock);
824
825 kvm_for_each_vcpu(c, vcpu, kvm) {
826 ret = vgic_register_redist_iodev(vcpu);
827 if (ret)
828 break;
829 }
830
831 if (ret) {
832 /* The current c failed, so iterate over the previous ones. */
833 int i;
834
835 for (i = 0; i < c; i++) {
836 vcpu = kvm_get_vcpu(kvm, i);
837 vgic_unregister_redist_iodev(vcpu);
838 }
839 }
840
841 return ret;
842 }
843
844 /**
845 * vgic_v3_alloc_redist_region - Allocate a new redistributor region
846 *
847 * Performs various checks before inserting the rdist region in the list.
848 * Those tests depend on whether the size of the rdist region is known
849 * (ie. count != 0). The list is sorted by rdist region index.
850 *
851 * @kvm: kvm handle
852 * @index: redist region index
853 * @base: base of the new rdist region
854 * @count: number of redistributors the region is made of (0 in the old style
855 * single region, whose size is induced from the number of vcpus)
856 *
857 * Return 0 on success, < 0 otherwise
858 */
vgic_v3_alloc_redist_region(struct kvm * kvm,uint32_t index,gpa_t base,uint32_t count)859 static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
860 gpa_t base, uint32_t count)
861 {
862 struct vgic_dist *d = &kvm->arch.vgic;
863 struct vgic_redist_region *rdreg;
864 struct list_head *rd_regions = &d->rd_regions;
865 int nr_vcpus = atomic_read(&kvm->online_vcpus);
866 size_t size = count ? count * KVM_VGIC_V3_REDIST_SIZE
867 : nr_vcpus * KVM_VGIC_V3_REDIST_SIZE;
868 int ret;
869
870 /* cross the end of memory ? */
871 if (base + size < base)
872 return -EINVAL;
873
874 if (list_empty(rd_regions)) {
875 if (index != 0)
876 return -EINVAL;
877 } else {
878 rdreg = list_last_entry(rd_regions,
879 struct vgic_redist_region, list);
880
881 /* Don't mix single region and discrete redist regions */
882 if (!count && rdreg->count)
883 return -EINVAL;
884
885 if (!count)
886 return -EEXIST;
887
888 if (index != rdreg->index + 1)
889 return -EINVAL;
890 }
891
892 /*
893 * For legacy single-region redistributor regions (!count),
894 * check that the redistributor region does not overlap with the
895 * distributor's address space.
896 */
897 if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
898 vgic_dist_overlap(kvm, base, size))
899 return -EINVAL;
900
901 /* collision with any other rdist region? */
902 if (vgic_v3_rdist_overlap(kvm, base, size))
903 return -EINVAL;
904
905 rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL_ACCOUNT);
906 if (!rdreg)
907 return -ENOMEM;
908
909 rdreg->base = VGIC_ADDR_UNDEF;
910
911 ret = vgic_check_iorange(kvm, rdreg->base, base, SZ_64K, size);
912 if (ret)
913 goto free;
914
915 rdreg->base = base;
916 rdreg->count = count;
917 rdreg->free_index = 0;
918 rdreg->index = index;
919
920 list_add_tail(&rdreg->list, rd_regions);
921 return 0;
922 free:
923 kfree(rdreg);
924 return ret;
925 }
926
vgic_v3_free_redist_region(struct kvm * kvm,struct vgic_redist_region * rdreg)927 void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
928 {
929 struct kvm_vcpu *vcpu;
930 unsigned long c;
931
932 lockdep_assert_held(&kvm->arch.config_lock);
933
934 /* Garbage collect the region */
935 kvm_for_each_vcpu(c, vcpu, kvm) {
936 if (vcpu->arch.vgic_cpu.rdreg == rdreg)
937 vcpu->arch.vgic_cpu.rdreg = NULL;
938 }
939
940 list_del(&rdreg->list);
941 kfree(rdreg);
942 }
943
vgic_v3_set_redist_base(struct kvm * kvm,u32 index,u64 addr,u32 count)944 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
945 {
946 int ret;
947
948 mutex_lock(&kvm->arch.config_lock);
949 ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
950 mutex_unlock(&kvm->arch.config_lock);
951 if (ret)
952 return ret;
953
954 /*
955 * Register iodevs for each existing VCPU. Adding more VCPUs
956 * afterwards will register the iodevs when needed.
957 */
958 ret = vgic_register_all_redist_iodevs(kvm);
959 if (ret) {
960 struct vgic_redist_region *rdreg;
961
962 mutex_lock(&kvm->arch.config_lock);
963 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
964 vgic_v3_free_redist_region(kvm, rdreg);
965 mutex_unlock(&kvm->arch.config_lock);
966 return ret;
967 }
968
969 return 0;
970 }
971
vgic_v3_has_attr_regs(struct kvm_device * dev,struct kvm_device_attr * attr)972 int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
973 {
974 const struct vgic_register_region *region;
975 struct vgic_io_device iodev;
976 struct vgic_reg_attr reg_attr;
977 struct kvm_vcpu *vcpu;
978 gpa_t addr;
979 int ret;
980
981 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
982 if (ret)
983 return ret;
984
985 vcpu = reg_attr.vcpu;
986 addr = reg_attr.addr;
987
988 switch (attr->group) {
989 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
990 iodev.regions = vgic_v3_dist_registers;
991 iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
992 iodev.base_addr = 0;
993 break;
994 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
995 iodev.regions = vgic_v3_rd_registers;
996 iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
997 iodev.base_addr = 0;
998 break;
999 }
1000 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
1001 return vgic_v3_has_cpu_sysregs_attr(vcpu, attr);
1002 default:
1003 return -ENXIO;
1004 }
1005
1006 /* We only support aligned 32-bit accesses. */
1007 if (addr & 3)
1008 return -ENXIO;
1009
1010 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
1011 if (!region)
1012 return -ENXIO;
1013
1014 return 0;
1015 }
1016
1017 /*
1018 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
1019 * so provide a wrapper to use the existing defines to isolate a certain
1020 * affinity level.
1021 */
1022 #define SGI_AFFINITY_LEVEL(reg, level) \
1023 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
1024 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
1025
vgic_v3_queue_sgi(struct kvm_vcpu * vcpu,u32 sgi,bool allow_group1)1026 static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1)
1027 {
1028 struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, sgi);
1029 unsigned long flags;
1030
1031 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1032
1033 /*
1034 * An access targeting Group0 SGIs can only generate
1035 * those, while an access targeting Group1 SGIs can
1036 * generate interrupts of either group.
1037 */
1038 if (!irq->group || allow_group1) {
1039 if (!irq->hw) {
1040 irq->pending_latch = true;
1041 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
1042 } else {
1043 /* HW SGI? Ask the GIC to inject it */
1044 int err;
1045 err = irq_set_irqchip_state(irq->host_irq,
1046 IRQCHIP_STATE_PENDING,
1047 true);
1048 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
1049 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1050 }
1051 } else {
1052 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1053 }
1054
1055 vgic_put_irq(vcpu->kvm, irq);
1056 }
1057
1058 /**
1059 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
1060 * @vcpu: The VCPU requesting a SGI
1061 * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
1062 * @allow_group1: Does the sysreg access allow generation of G1 SGIs
1063 *
1064 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
1065 * This will trap in sys_regs.c and call this function.
1066 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
1067 * target processors as well as a bitmask of 16 Aff0 CPUs.
1068 *
1069 * If the interrupt routing mode bit is not set, we iterate over the Aff0
1070 * bits and signal the VCPUs matching the provided Aff{3,2,1}.
1071 *
1072 * If this bit is set, we signal all, but not the calling VCPU.
1073 */
vgic_v3_dispatch_sgi(struct kvm_vcpu * vcpu,u64 reg,bool allow_group1)1074 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
1075 {
1076 struct kvm *kvm = vcpu->kvm;
1077 struct kvm_vcpu *c_vcpu;
1078 unsigned long target_cpus;
1079 u64 mpidr;
1080 u32 sgi, aff0;
1081 unsigned long c;
1082
1083 sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
1084
1085 /* Broadcast */
1086 if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) {
1087 kvm_for_each_vcpu(c, c_vcpu, kvm) {
1088 /* Don't signal the calling VCPU */
1089 if (c_vcpu == vcpu)
1090 continue;
1091
1092 vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
1093 }
1094
1095 return;
1096 }
1097
1098 /* We iterate over affinities to find the corresponding vcpus */
1099 mpidr = SGI_AFFINITY_LEVEL(reg, 3);
1100 mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
1101 mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
1102 target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
1103
1104 for_each_set_bit(aff0, &target_cpus, hweight_long(ICC_SGI1R_TARGET_LIST_MASK)) {
1105 c_vcpu = kvm_mpidr_to_vcpu(kvm, mpidr | aff0);
1106 if (c_vcpu)
1107 vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
1108 }
1109 }
1110
vgic_v3_dist_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)1111 int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1112 int offset, u32 *val)
1113 {
1114 struct vgic_io_device dev = {
1115 .regions = vgic_v3_dist_registers,
1116 .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
1117 };
1118
1119 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
1120 }
1121
vgic_v3_redist_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)1122 int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1123 int offset, u32 *val)
1124 {
1125 struct vgic_io_device rd_dev = {
1126 .regions = vgic_v3_rd_registers,
1127 .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers),
1128 };
1129
1130 return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
1131 }
1132
vgic_v3_line_level_info_uaccess(struct kvm_vcpu * vcpu,bool is_write,u32 intid,u32 * val)1133 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1134 u32 intid, u32 *val)
1135 {
1136 if (intid % 32)
1137 return -EINVAL;
1138
1139 if (is_write)
1140 vgic_write_irq_line_level_info(vcpu, intid, *val);
1141 else
1142 *val = vgic_read_irq_line_level_info(vcpu, intid);
1143
1144 return 0;
1145 }
1146