1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015, 2016 ARM Ltd. 4 */ 5 6 #include <linux/irqchip/arm-gic.h> 7 #include <linux/kvm.h> 8 #include <linux/kvm_host.h> 9 #include <kvm/arm_vgic.h> 10 #include <asm/kvm_mmu.h> 11 12 #include "vgic.h" 13 14 static inline void vgic_v2_write_lr(int lr, u32 val) 15 { 16 void __iomem *base = kvm_vgic_global_state.vctrl_base; 17 18 writel_relaxed(val, base + GICH_LR0 + (lr * 4)); 19 } 20 21 void vgic_v2_init_lrs(void) 22 { 23 int i; 24 25 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) 26 vgic_v2_write_lr(i, 0); 27 } 28 29 void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) 30 { 31 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 32 33 cpuif->vgic_hcr |= GICH_HCR_UIE; 34 } 35 36 static bool lr_signals_eoi_mi(u32 lr_val) 37 { 38 return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) && 39 !(lr_val & GICH_LR_HW); 40 } 41 42 /* 43 * transfer the content of the LRs back into the corresponding ap_list: 44 * - active bit is transferred as is 45 * - pending bit is 46 * - transferred as is in case of edge sensitive IRQs 47 * - set to the line-level (resample time) for level sensitive IRQs 48 */ 49 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) 50 { 51 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 52 struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; 53 int lr; 54 55 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 56 57 cpuif->vgic_hcr &= ~GICH_HCR_UIE; 58 59 for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) { 60 u32 val = cpuif->vgic_lr[lr]; 61 u32 cpuid, intid = val & GICH_LR_VIRTUALID; 62 struct vgic_irq *irq; 63 64 /* Extract the source vCPU id from the LR */ 65 cpuid = val & GICH_LR_PHYSID_CPUID; 66 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; 67 cpuid &= 7; 68 69 /* Notify fds when the guest EOI'ed a level-triggered SPI */ 70 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) 71 kvm_notify_acked_irq(vcpu->kvm, 0, 72 intid - VGIC_NR_PRIVATE_IRQS); 73 74 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 75 76 raw_spin_lock(&irq->irq_lock); 77 78 /* Always preserve the active bit */ 79 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 80 81 if (irq->active && vgic_irq_is_sgi(intid)) 82 irq->active_source = cpuid; 83 84 /* Edge is the only case where we preserve the pending bit */ 85 if (irq->config == VGIC_CONFIG_EDGE && 86 (val & GICH_LR_PENDING_BIT)) { 87 irq->pending_latch = true; 88 89 if (vgic_irq_is_sgi(intid)) 90 irq->source |= (1 << cpuid); 91 } 92 93 /* 94 * Clear soft pending state when level irqs have been acked. 95 */ 96 if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE)) 97 irq->pending_latch = false; 98 99 /* 100 * Level-triggered mapped IRQs are special because we only 101 * observe rising edges as input to the VGIC. 102 * 103 * If the guest never acked the interrupt we have to sample 104 * the physical line and set the line level, because the 105 * device state could have changed or we simply need to 106 * process the still pending interrupt later. 107 * 108 * If this causes us to lower the level, we have to also clear 109 * the physical active state, since we will otherwise never be 110 * told when the interrupt becomes asserted again. 111 * 112 * Another case is when the interrupt requires a helping hand 113 * on deactivation (no HW deactivation, for example). 114 */ 115 if (vgic_irq_is_mapped_level(irq)) { 116 bool resample = false; 117 118 if (val & GICH_LR_PENDING_BIT) { 119 irq->line_level = vgic_get_phys_line_level(irq); 120 resample = !irq->line_level; 121 } else if (vgic_irq_needs_resampling(irq) && 122 !(irq->active || irq->pending_latch)) { 123 resample = true; 124 } 125 126 if (resample) 127 vgic_irq_set_phys_active(irq, false); 128 } 129 130 raw_spin_unlock(&irq->irq_lock); 131 vgic_put_irq(vcpu->kvm, irq); 132 } 133 134 cpuif->used_lrs = 0; 135 } 136 137 /* 138 * Populates the particular LR with the state of a given IRQ: 139 * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq 140 * - for a level sensitive IRQ the pending state value is unchanged; 141 * it is dictated directly by the input level 142 * 143 * If @irq describes an SGI with multiple sources, we choose the 144 * lowest-numbered source VCPU and clear that bit in the source bitmap. 145 * 146 * The irq_lock must be held by the caller. 147 */ 148 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) 149 { 150 u32 val = irq->intid; 151 bool allow_pending = true; 152 153 if (irq->active) { 154 val |= GICH_LR_ACTIVE_BIT; 155 if (vgic_irq_is_sgi(irq->intid)) 156 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; 157 if (vgic_irq_is_multi_sgi(irq)) { 158 allow_pending = false; 159 val |= GICH_LR_EOI; 160 } 161 } 162 163 if (irq->group) 164 val |= GICH_LR_GROUP1; 165 166 if (irq->hw && !vgic_irq_needs_resampling(irq)) { 167 val |= GICH_LR_HW; 168 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; 169 /* 170 * Never set pending+active on a HW interrupt, as the 171 * pending state is kept at the physical distributor 172 * level. 173 */ 174 if (irq->active) 175 allow_pending = false; 176 } else { 177 if (irq->config == VGIC_CONFIG_LEVEL) { 178 val |= GICH_LR_EOI; 179 180 /* 181 * Software resampling doesn't work very well 182 * if we allow P+A, so let's not do that. 183 */ 184 if (irq->active) 185 allow_pending = false; 186 } 187 } 188 189 if (allow_pending && irq_is_pending(irq)) { 190 val |= GICH_LR_PENDING_BIT; 191 192 if (irq->config == VGIC_CONFIG_EDGE) 193 irq->pending_latch = false; 194 195 if (vgic_irq_is_sgi(irq->intid)) { 196 u32 src = ffs(irq->source); 197 198 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n", 199 irq->intid)) 200 return; 201 202 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 203 irq->source &= ~(1 << (src - 1)); 204 if (irq->source) { 205 irq->pending_latch = true; 206 val |= GICH_LR_EOI; 207 } 208 } 209 } 210 211 /* 212 * Level-triggered mapped IRQs are special because we only observe 213 * rising edges as input to the VGIC. We therefore lower the line 214 * level here, so that we can take new virtual IRQs. See 215 * vgic_v2_fold_lr_state for more info. 216 */ 217 if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) 218 irq->line_level = false; 219 220 /* The GICv2 LR only holds five bits of priority. */ 221 val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT; 222 223 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val; 224 } 225 226 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr) 227 { 228 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0; 229 } 230 231 void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 232 { 233 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 234 u32 vmcr; 235 236 vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) & 237 GICH_VMCR_ENABLE_GRP0_MASK; 238 vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) & 239 GICH_VMCR_ENABLE_GRP1_MASK; 240 vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) & 241 GICH_VMCR_ACK_CTL_MASK; 242 vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) & 243 GICH_VMCR_FIQ_EN_MASK; 244 vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) & 245 GICH_VMCR_CBPR_MASK; 246 vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) & 247 GICH_VMCR_EOI_MODE_MASK; 248 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & 249 GICH_VMCR_ALIAS_BINPOINT_MASK; 250 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 251 GICH_VMCR_BINPOINT_MASK; 252 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << 253 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; 254 255 cpu_if->vgic_vmcr = vmcr; 256 } 257 258 void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 259 { 260 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 261 u32 vmcr; 262 263 vmcr = cpu_if->vgic_vmcr; 264 265 vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >> 266 GICH_VMCR_ENABLE_GRP0_SHIFT; 267 vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >> 268 GICH_VMCR_ENABLE_GRP1_SHIFT; 269 vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >> 270 GICH_VMCR_ACK_CTL_SHIFT; 271 vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >> 272 GICH_VMCR_FIQ_EN_SHIFT; 273 vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >> 274 GICH_VMCR_CBPR_SHIFT; 275 vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >> 276 GICH_VMCR_EOI_MODE_SHIFT; 277 278 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> 279 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 280 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 281 GICH_VMCR_BINPOINT_SHIFT; 282 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >> 283 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT; 284 } 285 286 void vgic_v2_enable(struct kvm_vcpu *vcpu) 287 { 288 /* 289 * By forcing VMCR to zero, the GIC will restore the binary 290 * points to their reset values. Anything else resets to zero 291 * anyway. 292 */ 293 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; 294 295 /* Get the show on the road... */ 296 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; 297 } 298 299 /* check for overlapping regions and for regions crossing the end of memory */ 300 static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base) 301 { 302 if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base) 303 return false; 304 if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base) 305 return false; 306 307 if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base) 308 return true; 309 if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base) 310 return true; 311 312 return false; 313 } 314 315 int vgic_v2_map_resources(struct kvm *kvm) 316 { 317 struct vgic_dist *dist = &kvm->arch.vgic; 318 int ret = 0; 319 320 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || 321 IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { 322 kvm_err("Need to set vgic cpu and dist addresses first\n"); 323 return -ENXIO; 324 } 325 326 if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { 327 kvm_err("VGIC CPU and dist frames overlap\n"); 328 return -EINVAL; 329 } 330 331 /* 332 * Initialize the vgic if this hasn't already been done on demand by 333 * accessing the vgic state from userspace. 334 */ 335 ret = vgic_init(kvm); 336 if (ret) { 337 kvm_err("Unable to initialize VGIC dynamic data structures\n"); 338 return ret; 339 } 340 341 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); 342 if (ret) { 343 kvm_err("Unable to register VGIC MMIO regions\n"); 344 return ret; 345 } 346 347 if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { 348 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, 349 kvm_vgic_global_state.vcpu_base, 350 KVM_VGIC_V2_CPU_SIZE, true); 351 if (ret) { 352 kvm_err("Unable to remap VGIC CPU to VCPU\n"); 353 return ret; 354 } 355 } 356 357 return 0; 358 } 359 360 DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap); 361 362 /** 363 * vgic_v2_probe - probe for a VGICv2 compatible interrupt controller 364 * @info: pointer to the GIC description 365 * 366 * Returns 0 if the VGICv2 has been probed successfully, returns an error code 367 * otherwise 368 */ 369 int vgic_v2_probe(const struct gic_kvm_info *info) 370 { 371 int ret; 372 u32 vtr; 373 374 if (!info->vctrl.start) { 375 kvm_err("GICH not present in the firmware table\n"); 376 return -ENXIO; 377 } 378 379 if (!PAGE_ALIGNED(info->vcpu.start) || 380 !PAGE_ALIGNED(resource_size(&info->vcpu))) { 381 kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n"); 382 383 ret = create_hyp_io_mappings(info->vcpu.start, 384 resource_size(&info->vcpu), 385 &kvm_vgic_global_state.vcpu_base_va, 386 &kvm_vgic_global_state.vcpu_hyp_va); 387 if (ret) { 388 kvm_err("Cannot map GICV into hyp\n"); 389 goto out; 390 } 391 392 static_branch_enable(&vgic_v2_cpuif_trap); 393 } 394 395 ret = create_hyp_io_mappings(info->vctrl.start, 396 resource_size(&info->vctrl), 397 &kvm_vgic_global_state.vctrl_base, 398 &kvm_vgic_global_state.vctrl_hyp); 399 if (ret) { 400 kvm_err("Cannot map VCTRL into hyp\n"); 401 goto out; 402 } 403 404 vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR); 405 kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1; 406 407 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); 408 if (ret) { 409 kvm_err("Cannot register GICv2 KVM device\n"); 410 goto out; 411 } 412 413 kvm_vgic_global_state.can_emulate_gicv2 = true; 414 kvm_vgic_global_state.vcpu_base = info->vcpu.start; 415 kvm_vgic_global_state.type = VGIC_V2; 416 kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; 417 418 kvm_debug("vgic-v2@%llx\n", info->vctrl.start); 419 420 return 0; 421 out: 422 if (kvm_vgic_global_state.vctrl_base) 423 iounmap(kvm_vgic_global_state.vctrl_base); 424 if (kvm_vgic_global_state.vcpu_base_va) 425 iounmap(kvm_vgic_global_state.vcpu_base_va); 426 427 return ret; 428 } 429 430 static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) 431 { 432 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 433 u64 used_lrs = cpu_if->used_lrs; 434 u64 elrsr; 435 int i; 436 437 elrsr = readl_relaxed(base + GICH_ELRSR0); 438 if (unlikely(used_lrs > 32)) 439 elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32; 440 441 for (i = 0; i < used_lrs; i++) { 442 if (elrsr & (1UL << i)) 443 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; 444 else 445 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); 446 447 writel_relaxed(0, base + GICH_LR0 + (i * 4)); 448 } 449 } 450 451 void vgic_v2_save_state(struct kvm_vcpu *vcpu) 452 { 453 void __iomem *base = kvm_vgic_global_state.vctrl_base; 454 u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs; 455 456 if (!base) 457 return; 458 459 if (used_lrs) { 460 save_lrs(vcpu, base); 461 writel_relaxed(0, base + GICH_HCR); 462 } 463 } 464 465 void vgic_v2_restore_state(struct kvm_vcpu *vcpu) 466 { 467 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 468 void __iomem *base = kvm_vgic_global_state.vctrl_base; 469 u64 used_lrs = cpu_if->used_lrs; 470 int i; 471 472 if (!base) 473 return; 474 475 if (used_lrs) { 476 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); 477 for (i = 0; i < used_lrs; i++) { 478 writel_relaxed(cpu_if->vgic_lr[i], 479 base + GICH_LR0 + (i * 4)); 480 } 481 } 482 } 483 484 void vgic_v2_load(struct kvm_vcpu *vcpu) 485 { 486 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 487 488 writel_relaxed(cpu_if->vgic_vmcr, 489 kvm_vgic_global_state.vctrl_base + GICH_VMCR); 490 writel_relaxed(cpu_if->vgic_apr, 491 kvm_vgic_global_state.vctrl_base + GICH_APR); 492 } 493 494 void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu) 495 { 496 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 497 498 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); 499 } 500 501 void vgic_v2_put(struct kvm_vcpu *vcpu) 502 { 503 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 504 505 vgic_v2_vmcr_sync(vcpu); 506 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); 507 } 508