1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/irqchip/arm-gic-v3.h> 4 #include <linux/irq.h> 5 #include <linux/irqdomain.h> 6 #include <linux/kstrtox.h> 7 #include <linux/kvm.h> 8 #include <linux/kvm_host.h> 9 #include <linux/string_choices.h> 10 #include <kvm/arm_vgic.h> 11 #include <asm/kvm_hyp.h> 12 #include <asm/kvm_mmu.h> 13 #include <asm/kvm_asm.h> 14 15 #include "vgic.h" 16 17 static bool group0_trap; 18 static bool group1_trap; 19 static bool common_trap; 20 static bool dir_trap; 21 static bool gicv4_enable; 22 23 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) 24 { 25 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; 26 27 cpuif->vgic_hcr |= ICH_HCR_EL2_UIE; 28 } 29 30 static bool lr_signals_eoi_mi(u64 lr_val) 31 { 32 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) && 33 !(lr_val & ICH_LR_HW); 34 } 35 36 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) 37 { 38 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 39 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; 40 u32 model = vcpu->kvm->arch.vgic.vgic_model; 41 int lr; 42 43 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 44 45 cpuif->vgic_hcr &= ~ICH_HCR_EL2_UIE; 46 47 for (lr = 0; lr < cpuif->used_lrs; lr++) { 48 u64 val = cpuif->vgic_lr[lr]; 49 u32 intid, cpuid; 50 struct vgic_irq *irq; 51 bool is_v2_sgi = false; 52 bool deactivated; 53 54 cpuid = val & GICH_LR_PHYSID_CPUID; 55 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; 56 57 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) { 58 intid = val & ICH_LR_VIRTUAL_ID_MASK; 59 } else { 60 intid = val & GICH_LR_VIRTUALID; 61 is_v2_sgi = vgic_irq_is_sgi(intid); 62 } 63 64 /* Notify fds when the guest EOI'ed a level-triggered IRQ */ 65 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) 66 kvm_notify_acked_irq(vcpu->kvm, 0, 67 intid - VGIC_NR_PRIVATE_IRQS); 68 69 irq = vgic_get_vcpu_irq(vcpu, intid); 70 if (!irq) /* An LPI could have been unmapped. */ 71 continue; 72 73 raw_spin_lock(&irq->irq_lock); 74 75 /* Always preserve the active bit, note deactivation */ 76 deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT); 77 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 78 79 if (irq->active && is_v2_sgi) 80 irq->active_source = cpuid; 81 82 /* Edge is the only case where we preserve the pending bit */ 83 if (irq->config == VGIC_CONFIG_EDGE && 84 (val & ICH_LR_PENDING_BIT)) { 85 irq->pending_latch = true; 86 87 if (is_v2_sgi) 88 irq->source |= (1 << cpuid); 89 } 90 91 /* 92 * Clear soft pending state when level irqs have been acked. 93 */ 94 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE)) 95 irq->pending_latch = false; 96 97 /* Handle resampling for mapped interrupts if required */ 98 vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT); 99 100 raw_spin_unlock(&irq->irq_lock); 101 vgic_put_irq(vcpu->kvm, irq); 102 } 103 104 cpuif->used_lrs = 0; 105 } 106 107 /* Requires the irq to be locked already */ 108 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) 109 { 110 u32 model = vcpu->kvm->arch.vgic.vgic_model; 111 u64 val = irq->intid; 112 bool allow_pending = true, is_v2_sgi; 113 114 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) && 115 model == KVM_DEV_TYPE_ARM_VGIC_V2); 116 117 if (irq->active) { 118 val |= ICH_LR_ACTIVE_BIT; 119 if (is_v2_sgi) 120 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; 121 if (vgic_irq_is_multi_sgi(irq)) { 122 allow_pending = false; 123 val |= ICH_LR_EOI; 124 } 125 } 126 127 if (irq->hw && !vgic_irq_needs_resampling(irq)) { 128 val |= ICH_LR_HW; 129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; 130 /* 131 * Never set pending+active on a HW interrupt, as the 132 * pending state is kept at the physical distributor 133 * level. 134 */ 135 if (irq->active) 136 allow_pending = false; 137 } else { 138 if (irq->config == VGIC_CONFIG_LEVEL) { 139 val |= ICH_LR_EOI; 140 141 /* 142 * Software resampling doesn't work very well 143 * if we allow P+A, so let's not do that. 144 */ 145 if (irq->active) 146 allow_pending = false; 147 } 148 } 149 150 if (allow_pending && irq_is_pending(irq)) { 151 val |= ICH_LR_PENDING_BIT; 152 153 if (irq->config == VGIC_CONFIG_EDGE) 154 irq->pending_latch = false; 155 156 if (vgic_irq_is_sgi(irq->intid) && 157 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 158 u32 src = ffs(irq->source); 159 160 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n", 161 irq->intid)) 162 return; 163 164 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 165 irq->source &= ~(1 << (src - 1)); 166 if (irq->source) { 167 irq->pending_latch = true; 168 val |= ICH_LR_EOI; 169 } 170 } 171 } 172 173 /* 174 * Level-triggered mapped IRQs are special because we only observe 175 * rising edges as input to the VGIC. We therefore lower the line 176 * level here, so that we can take new virtual IRQs. See 177 * vgic_v3_fold_lr_state for more info. 178 */ 179 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) 180 irq->line_level = false; 181 182 if (irq->group) 183 val |= ICH_LR_GROUP; 184 185 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; 186 187 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; 188 } 189 190 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) 191 { 192 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; 193 } 194 195 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 196 { 197 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 198 u32 model = vcpu->kvm->arch.vgic.vgic_model; 199 u32 vmcr; 200 201 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 202 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & 203 ICH_VMCR_ACK_CTL_MASK; 204 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & 205 ICH_VMCR_FIQ_EN_MASK; 206 } else { 207 /* 208 * When emulating GICv3 on GICv3 with SRE=1 on the 209 * VFIQEn bit is RES1 and the VAckCtl bit is RES0. 210 */ 211 vmcr = ICH_VMCR_FIQ_EN_MASK; 212 } 213 214 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 215 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 216 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 217 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 218 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 219 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK; 220 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK; 221 222 cpu_if->vgic_vmcr = vmcr; 223 } 224 225 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 226 { 227 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 228 u32 model = vcpu->kvm->arch.vgic.vgic_model; 229 u32 vmcr; 230 231 vmcr = cpu_if->vgic_vmcr; 232 233 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 234 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> 235 ICH_VMCR_ACK_CTL_SHIFT; 236 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> 237 ICH_VMCR_FIQ_EN_SHIFT; 238 } else { 239 /* 240 * When emulating GICv3 on GICv3 with SRE=1 on the 241 * VFIQEn bit is RES1 and the VAckCtl bit is RES0. 242 */ 243 vmcrp->fiqen = 1; 244 vmcrp->ackctl = 0; 245 } 246 247 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 248 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; 249 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 250 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 251 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 252 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT; 253 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT; 254 } 255 256 #define INITIAL_PENDBASER_VALUE \ 257 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \ 258 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \ 259 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable)) 260 261 void vgic_v3_enable(struct kvm_vcpu *vcpu) 262 { 263 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; 264 265 /* 266 * By forcing VMCR to zero, the GIC will restore the binary 267 * points to their reset values. Anything else resets to zero 268 * anyway. 269 */ 270 vgic_v3->vgic_vmcr = 0; 271 272 /* 273 * If we are emulating a GICv3, we do it in an non-GICv2-compatible 274 * way, so we force SRE to 1 to demonstrate this to the guest. 275 * Also, we don't support any form of IRQ/FIQ bypass. 276 * This goes with the spec allowing the value to be RAO/WI. 277 */ 278 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 279 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB | 280 ICC_SRE_EL1_DFB | 281 ICC_SRE_EL1_SRE); 282 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; 283 } else { 284 vgic_v3->vgic_sre = 0; 285 } 286 287 vcpu->arch.vgic_cpu.num_id_bits = FIELD_GET(ICH_VTR_EL2_IDbits, 288 kvm_vgic_global_state.ich_vtr_el2); 289 vcpu->arch.vgic_cpu.num_pri_bits = FIELD_GET(ICH_VTR_EL2_PRIbits, 290 kvm_vgic_global_state.ich_vtr_el2) + 1; 291 292 /* Get the show on the road... */ 293 vgic_v3->vgic_hcr = ICH_HCR_EL2_En; 294 } 295 296 void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu) 297 { 298 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; 299 300 if (!vgic_is_v3(vcpu->kvm)) 301 return; 302 303 /* Hide GICv3 sysreg if necessary */ 304 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { 305 vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | 306 ICH_HCR_EL2_TC); 307 return; 308 } 309 310 if (group0_trap) 311 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL0; 312 if (group1_trap) 313 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL1; 314 if (common_trap) 315 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TC; 316 if (dir_trap) 317 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TDIR; 318 } 319 320 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) 321 { 322 struct kvm_vcpu *vcpu; 323 int byte_offset, bit_nr; 324 gpa_t pendbase, ptr; 325 bool status; 326 u8 val; 327 int ret; 328 unsigned long flags; 329 330 retry: 331 vcpu = irq->target_vcpu; 332 if (!vcpu) 333 return 0; 334 335 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); 336 337 byte_offset = irq->intid / BITS_PER_BYTE; 338 bit_nr = irq->intid % BITS_PER_BYTE; 339 ptr = pendbase + byte_offset; 340 341 ret = kvm_read_guest_lock(kvm, ptr, &val, 1); 342 if (ret) 343 return ret; 344 345 status = val & (1 << bit_nr); 346 347 raw_spin_lock_irqsave(&irq->irq_lock, flags); 348 if (irq->target_vcpu != vcpu) { 349 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 350 goto retry; 351 } 352 irq->pending_latch = status; 353 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 354 355 if (status) { 356 /* clear consumed data */ 357 val &= ~(1 << bit_nr); 358 ret = vgic_write_guest_lock(kvm, ptr, &val, 1); 359 if (ret) 360 return ret; 361 } 362 return 0; 363 } 364 365 /* 366 * The deactivation of the doorbell interrupt will trigger the 367 * unmapping of the associated vPE. 368 */ 369 static void unmap_all_vpes(struct kvm *kvm) 370 { 371 struct vgic_dist *dist = &kvm->arch.vgic; 372 int i; 373 374 for (i = 0; i < dist->its_vm.nr_vpes; i++) 375 free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i)); 376 } 377 378 static void map_all_vpes(struct kvm *kvm) 379 { 380 struct vgic_dist *dist = &kvm->arch.vgic; 381 int i; 382 383 for (i = 0; i < dist->its_vm.nr_vpes; i++) 384 WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i), 385 dist->its_vm.vpes[i]->irq)); 386 } 387 388 /* 389 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM 390 * kvm lock and all vcpu lock must be held 391 */ 392 int vgic_v3_save_pending_tables(struct kvm *kvm) 393 { 394 struct vgic_dist *dist = &kvm->arch.vgic; 395 struct vgic_irq *irq; 396 gpa_t last_ptr = ~(gpa_t)0; 397 bool vlpi_avail = false; 398 unsigned long index; 399 int ret = 0; 400 u8 val; 401 402 if (unlikely(!vgic_initialized(kvm))) 403 return -ENXIO; 404 405 /* 406 * A preparation for getting any VLPI states. 407 * The above vgic initialized check also ensures that the allocation 408 * and enabling of the doorbells have already been done. 409 */ 410 if (kvm_vgic_global_state.has_gicv4_1) { 411 unmap_all_vpes(kvm); 412 vlpi_avail = true; 413 } 414 415 xa_for_each(&dist->lpi_xa, index, irq) { 416 int byte_offset, bit_nr; 417 struct kvm_vcpu *vcpu; 418 gpa_t pendbase, ptr; 419 bool is_pending; 420 bool stored; 421 422 vcpu = irq->target_vcpu; 423 if (!vcpu) 424 continue; 425 426 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); 427 428 byte_offset = irq->intid / BITS_PER_BYTE; 429 bit_nr = irq->intid % BITS_PER_BYTE; 430 ptr = pendbase + byte_offset; 431 432 if (ptr != last_ptr) { 433 ret = kvm_read_guest_lock(kvm, ptr, &val, 1); 434 if (ret) 435 goto out; 436 last_ptr = ptr; 437 } 438 439 stored = val & (1U << bit_nr); 440 441 is_pending = irq->pending_latch; 442 443 if (irq->hw && vlpi_avail) 444 vgic_v4_get_vlpi_state(irq, &is_pending); 445 446 if (stored == is_pending) 447 continue; 448 449 if (is_pending) 450 val |= 1 << bit_nr; 451 else 452 val &= ~(1 << bit_nr); 453 454 ret = vgic_write_guest_lock(kvm, ptr, &val, 1); 455 if (ret) 456 goto out; 457 } 458 459 out: 460 if (vlpi_avail) 461 map_all_vpes(kvm); 462 463 return ret; 464 } 465 466 /** 467 * vgic_v3_rdist_overlap - check if a region overlaps with any 468 * existing redistributor region 469 * 470 * @kvm: kvm handle 471 * @base: base of the region 472 * @size: size of region 473 * 474 * Return: true if there is an overlap 475 */ 476 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size) 477 { 478 struct vgic_dist *d = &kvm->arch.vgic; 479 struct vgic_redist_region *rdreg; 480 481 list_for_each_entry(rdreg, &d->rd_regions, list) { 482 if ((base + size > rdreg->base) && 483 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg))) 484 return true; 485 } 486 return false; 487 } 488 489 /* 490 * Check for overlapping regions and for regions crossing the end of memory 491 * for base addresses which have already been set. 492 */ 493 bool vgic_v3_check_base(struct kvm *kvm) 494 { 495 struct vgic_dist *d = &kvm->arch.vgic; 496 struct vgic_redist_region *rdreg; 497 498 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) && 499 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) 500 return false; 501 502 list_for_each_entry(rdreg, &d->rd_regions, list) { 503 size_t sz = vgic_v3_rd_region_size(kvm, rdreg); 504 505 if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF, 506 rdreg->base, SZ_64K, sz)) 507 return false; 508 } 509 510 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base)) 511 return true; 512 513 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base, 514 KVM_VGIC_V3_DIST_SIZE); 515 } 516 517 /** 518 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one 519 * which has free space to put a new rdist region. 520 * 521 * @rd_regions: redistributor region list head 522 * 523 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB). 524 * Stride between redistributors is 0 and regions are filled in the index order. 525 * 526 * Return: the redist region handle, if any, that has space to map a new rdist 527 * region. 528 */ 529 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions) 530 { 531 struct vgic_redist_region *rdreg; 532 533 list_for_each_entry(rdreg, rd_regions, list) { 534 if (!vgic_v3_redist_region_full(rdreg)) 535 return rdreg; 536 } 537 return NULL; 538 } 539 540 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm, 541 u32 index) 542 { 543 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions; 544 struct vgic_redist_region *rdreg; 545 546 list_for_each_entry(rdreg, rd_regions, list) { 547 if (rdreg->index == index) 548 return rdreg; 549 } 550 return NULL; 551 } 552 553 554 int vgic_v3_map_resources(struct kvm *kvm) 555 { 556 struct vgic_dist *dist = &kvm->arch.vgic; 557 struct kvm_vcpu *vcpu; 558 unsigned long c; 559 560 kvm_for_each_vcpu(c, vcpu, kvm) { 561 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 562 563 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) { 564 kvm_debug("vcpu %ld redistributor base not set\n", c); 565 return -ENXIO; 566 } 567 } 568 569 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) { 570 kvm_debug("Need to set vgic distributor addresses first\n"); 571 return -ENXIO; 572 } 573 574 if (!vgic_v3_check_base(kvm)) { 575 kvm_debug("VGIC redist and dist frames overlap\n"); 576 return -EINVAL; 577 } 578 579 /* 580 * For a VGICv3 we require the userland to explicitly initialize 581 * the VGIC before we need to use it. 582 */ 583 if (!vgic_initialized(kvm)) { 584 return -EBUSY; 585 } 586 587 if (kvm_vgic_global_state.has_gicv4_1) 588 vgic_v4_configure_vsgis(kvm); 589 590 return 0; 591 } 592 593 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap); 594 DEFINE_STATIC_KEY_FALSE(vgic_v3_has_v2_compat); 595 596 static int __init early_group0_trap_cfg(char *buf) 597 { 598 return kstrtobool(buf, &group0_trap); 599 } 600 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg); 601 602 static int __init early_group1_trap_cfg(char *buf) 603 { 604 return kstrtobool(buf, &group1_trap); 605 } 606 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg); 607 608 static int __init early_common_trap_cfg(char *buf) 609 { 610 return kstrtobool(buf, &common_trap); 611 } 612 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg); 613 614 static int __init early_gicv4_enable(char *buf) 615 { 616 return kstrtobool(buf, &gicv4_enable); 617 } 618 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable); 619 620 static const struct midr_range broken_seis[] = { 621 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM), 622 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM), 623 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO), 624 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO), 625 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX), 626 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX), 627 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD), 628 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE), 629 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO), 630 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO), 631 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX), 632 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX), 633 {}, 634 }; 635 636 static bool vgic_v3_broken_seis(void) 637 { 638 return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_SEIS) && 639 is_midr_in_range_list(broken_seis)); 640 } 641 642 /** 643 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller 644 * @info: pointer to the GIC description 645 * 646 * Returns 0 if the VGICv3 has been probed successfully, returns an error code 647 * otherwise 648 */ 649 int vgic_v3_probe(const struct gic_kvm_info *info) 650 { 651 u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config); 652 bool has_v2; 653 int ret; 654 655 has_v2 = ich_vtr_el2 >> 63; 656 ich_vtr_el2 = (u32)ich_vtr_el2; 657 658 /* 659 * The ListRegs field is 5 bits, but there is an architectural 660 * maximum of 16 list registers. Just ignore bit 4... 661 */ 662 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; 663 kvm_vgic_global_state.can_emulate_gicv2 = false; 664 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2; 665 666 /* GICv4 support? */ 667 if (info->has_v4) { 668 kvm_vgic_global_state.has_gicv4 = gicv4_enable; 669 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable; 670 kvm_info("GICv4%s support %s\n", 671 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "", 672 str_enabled_disabled(gicv4_enable)); 673 } 674 675 kvm_vgic_global_state.vcpu_base = 0; 676 677 if (!info->vcpu.start) { 678 kvm_info("GICv3: no GICV resource entry\n"); 679 } else if (!has_v2) { 680 pr_warn(FW_BUG "CPU interface incapable of MMIO access\n"); 681 } else if (!PAGE_ALIGNED(info->vcpu.start)) { 682 pr_warn("GICV physical address 0x%llx not page aligned\n", 683 (unsigned long long)info->vcpu.start); 684 } else if (kvm_get_mode() != KVM_MODE_PROTECTED) { 685 kvm_vgic_global_state.vcpu_base = info->vcpu.start; 686 kvm_vgic_global_state.can_emulate_gicv2 = true; 687 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); 688 if (ret) { 689 kvm_err("Cannot register GICv2 KVM device.\n"); 690 return ret; 691 } 692 kvm_info("vgic-v2@%llx\n", info->vcpu.start); 693 } 694 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); 695 if (ret) { 696 kvm_err("Cannot register GICv3 KVM device.\n"); 697 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2); 698 return ret; 699 } 700 701 if (kvm_vgic_global_state.vcpu_base == 0) 702 kvm_info("disabling GICv2 emulation\n"); 703 704 /* 705 * Flip the static branch if the HW supports v2, even if we're 706 * not using it (such as in protected mode). 707 */ 708 if (has_v2) 709 static_branch_enable(&vgic_v3_has_v2_compat); 710 711 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) { 712 group0_trap = true; 713 group1_trap = true; 714 } 715 716 if (vgic_v3_broken_seis()) { 717 kvm_info("GICv3 with broken locally generated SEI\n"); 718 719 kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_EL2_SEIS; 720 group0_trap = true; 721 group1_trap = true; 722 if (ich_vtr_el2 & ICH_VTR_EL2_TDS) 723 dir_trap = true; 724 else 725 common_trap = true; 726 } 727 728 if (group0_trap || group1_trap || common_trap | dir_trap) { 729 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n", 730 group0_trap ? "G0" : "", 731 group1_trap ? "G1" : "", 732 common_trap ? "C" : "", 733 dir_trap ? "D" : ""); 734 static_branch_enable(&vgic_v3_cpuif_trap); 735 } 736 737 kvm_vgic_global_state.vctrl_base = NULL; 738 kvm_vgic_global_state.type = VGIC_V3; 739 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; 740 741 return 0; 742 } 743 744 void vgic_v3_load(struct kvm_vcpu *vcpu) 745 { 746 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 747 748 /* If the vgic is nested, perform the full state loading */ 749 if (vgic_state_is_nested(vcpu)) { 750 vgic_v3_load_nested(vcpu); 751 return; 752 } 753 754 if (likely(!is_protected_kvm_enabled())) 755 kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if); 756 757 if (has_vhe()) 758 __vgic_v3_activate_traps(cpu_if); 759 760 WARN_ON(vgic_v4_load(vcpu)); 761 } 762 763 void vgic_v3_put(struct kvm_vcpu *vcpu) 764 { 765 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 766 767 if (vgic_state_is_nested(vcpu)) { 768 vgic_v3_put_nested(vcpu); 769 return; 770 } 771 772 if (likely(!is_protected_kvm_enabled())) 773 kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if); 774 WARN_ON(vgic_v4_put(vcpu)); 775 776 if (has_vhe()) 777 __vgic_v3_deactivate_traps(cpu_if); 778 } 779