1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VGIC MMIO handling functions 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/bsearch.h> 8 #include <linux/interrupt.h> 9 #include <linux/irq.h> 10 #include <linux/kvm.h> 11 #include <linux/kvm_host.h> 12 #include <kvm/iodev.h> 13 #include <kvm/arm_arch_timer.h> 14 #include <kvm/arm_vgic.h> 15 16 #include "vgic.h" 17 #include "vgic-mmio.h" 18 19 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, 20 gpa_t addr, unsigned int len) 21 { 22 return 0; 23 } 24 25 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, 26 gpa_t addr, unsigned int len) 27 { 28 return -1UL; 29 } 30 31 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, 32 unsigned int len, unsigned long val) 33 { 34 /* Ignore */ 35 } 36 37 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, 38 unsigned int len, unsigned long val) 39 { 40 /* Ignore */ 41 return 0; 42 } 43 44 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, 45 gpa_t addr, unsigned int len) 46 { 47 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 48 u32 value = 0; 49 int i; 50 51 /* Loop over all IRQs affected by this read */ 52 for (i = 0; i < len * 8; i++) { 53 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 54 55 if (irq->group) 56 value |= BIT(i); 57 58 vgic_put_irq(vcpu->kvm, irq); 59 } 60 61 return value; 62 } 63 64 static void vgic_update_vsgi(struct vgic_irq *irq) 65 { 66 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group)); 67 } 68 69 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, 70 unsigned int len, unsigned long val) 71 { 72 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 73 int i; 74 unsigned long flags; 75 76 for (i = 0; i < len * 8; i++) { 77 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 78 79 raw_spin_lock_irqsave(&irq->irq_lock, flags); 80 irq->group = !!(val & BIT(i)); 81 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { 82 vgic_update_vsgi(irq); 83 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 84 } else { 85 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 86 } 87 88 vgic_put_irq(vcpu->kvm, irq); 89 } 90 } 91 92 /* 93 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value 94 * of the enabled bit, so there is only one function for both here. 95 */ 96 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, 97 gpa_t addr, unsigned int len) 98 { 99 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 100 u32 value = 0; 101 int i; 102 103 /* Loop over all IRQs affected by this read */ 104 for (i = 0; i < len * 8; i++) { 105 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 106 107 if (irq->enabled) 108 value |= (1U << i); 109 110 vgic_put_irq(vcpu->kvm, irq); 111 } 112 113 return value; 114 } 115 116 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, 117 gpa_t addr, unsigned int len, 118 unsigned long val) 119 { 120 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 121 int i; 122 unsigned long flags; 123 124 for_each_set_bit(i, &val, len * 8) { 125 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 126 127 raw_spin_lock_irqsave(&irq->irq_lock, flags); 128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { 129 if (!irq->enabled) { 130 struct irq_data *data; 131 132 irq->enabled = true; 133 data = &irq_to_desc(irq->host_irq)->irq_data; 134 while (irqd_irq_disabled(data)) 135 enable_irq(irq->host_irq); 136 } 137 138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 139 vgic_put_irq(vcpu->kvm, irq); 140 141 continue; 142 } else if (vgic_irq_is_mapped_level(irq)) { 143 bool was_high = irq->line_level; 144 145 /* 146 * We need to update the state of the interrupt because 147 * the guest might have changed the state of the device 148 * while the interrupt was disabled at the VGIC level. 149 */ 150 irq->line_level = vgic_get_phys_line_level(irq); 151 /* 152 * Deactivate the physical interrupt so the GIC will let 153 * us know when it is asserted again. 154 */ 155 if (!irq->active && was_high && !irq->line_level) 156 vgic_irq_set_phys_active(irq, false); 157 } 158 irq->enabled = true; 159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 160 161 vgic_put_irq(vcpu->kvm, irq); 162 } 163 } 164 165 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, 166 gpa_t addr, unsigned int len, 167 unsigned long val) 168 { 169 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 170 int i; 171 unsigned long flags; 172 173 for_each_set_bit(i, &val, len * 8) { 174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 175 176 raw_spin_lock_irqsave(&irq->irq_lock, flags); 177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled) 178 disable_irq_nosync(irq->host_irq); 179 180 irq->enabled = false; 181 182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 183 vgic_put_irq(vcpu->kvm, irq); 184 } 185 } 186 187 int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu, 188 gpa_t addr, unsigned int len, 189 unsigned long val) 190 { 191 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 192 int i; 193 unsigned long flags; 194 195 for_each_set_bit(i, &val, len * 8) { 196 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 197 198 raw_spin_lock_irqsave(&irq->irq_lock, flags); 199 irq->enabled = true; 200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 201 202 vgic_put_irq(vcpu->kvm, irq); 203 } 204 205 return 0; 206 } 207 208 int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, 209 gpa_t addr, unsigned int len, 210 unsigned long val) 211 { 212 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 213 int i; 214 unsigned long flags; 215 216 for_each_set_bit(i, &val, len * 8) { 217 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 218 219 raw_spin_lock_irqsave(&irq->irq_lock, flags); 220 irq->enabled = false; 221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 222 223 vgic_put_irq(vcpu->kvm, irq); 224 } 225 226 return 0; 227 } 228 229 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, 230 gpa_t addr, unsigned int len) 231 { 232 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 233 u32 value = 0; 234 int i; 235 236 /* Loop over all IRQs affected by this read */ 237 for (i = 0; i < len * 8; i++) { 238 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 239 unsigned long flags; 240 bool val; 241 242 raw_spin_lock_irqsave(&irq->irq_lock, flags); 243 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { 244 int err; 245 246 val = false; 247 err = irq_get_irqchip_state(irq->host_irq, 248 IRQCHIP_STATE_PENDING, 249 &val); 250 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); 251 } else if (vgic_irq_is_mapped_level(irq)) { 252 val = vgic_get_phys_line_level(irq); 253 } else { 254 val = irq_is_pending(irq); 255 } 256 257 value |= ((u32)val << i); 258 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 259 260 vgic_put_irq(vcpu->kvm, irq); 261 } 262 263 return value; 264 } 265 266 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) 267 { 268 return (vgic_irq_is_sgi(irq->intid) && 269 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2); 270 } 271 272 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, 273 gpa_t addr, unsigned int len, 274 unsigned long val) 275 { 276 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 277 int i; 278 unsigned long flags; 279 280 for_each_set_bit(i, &val, len * 8) { 281 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 282 283 /* GICD_ISPENDR0 SGI bits are WI */ 284 if (is_vgic_v2_sgi(vcpu, irq)) { 285 vgic_put_irq(vcpu->kvm, irq); 286 continue; 287 } 288 289 raw_spin_lock_irqsave(&irq->irq_lock, flags); 290 291 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { 292 /* HW SGI? Ask the GIC to inject it */ 293 int err; 294 err = irq_set_irqchip_state(irq->host_irq, 295 IRQCHIP_STATE_PENDING, 296 true); 297 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); 298 299 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 300 vgic_put_irq(vcpu->kvm, irq); 301 302 continue; 303 } 304 305 irq->pending_latch = true; 306 if (irq->hw) 307 vgic_irq_set_phys_active(irq, true); 308 309 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 310 vgic_put_irq(vcpu->kvm, irq); 311 } 312 } 313 314 int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu, 315 gpa_t addr, unsigned int len, 316 unsigned long val) 317 { 318 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 319 int i; 320 unsigned long flags; 321 322 for_each_set_bit(i, &val, len * 8) { 323 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 324 325 raw_spin_lock_irqsave(&irq->irq_lock, flags); 326 irq->pending_latch = true; 327 328 /* 329 * GICv2 SGIs are terribly broken. We can't restore 330 * the source of the interrupt, so just pick the vcpu 331 * itself as the source... 332 */ 333 if (is_vgic_v2_sgi(vcpu, irq)) 334 irq->source |= BIT(vcpu->vcpu_id); 335 336 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 337 338 vgic_put_irq(vcpu->kvm, irq); 339 } 340 341 return 0; 342 } 343 344 /* Must be called with irq->irq_lock held */ 345 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq) 346 { 347 irq->pending_latch = false; 348 349 /* 350 * We don't want the guest to effectively mask the physical 351 * interrupt by doing a write to SPENDR followed by a write to 352 * CPENDR for HW interrupts, so we clear the active state on 353 * the physical side if the virtual interrupt is not active. 354 * This may lead to taking an additional interrupt on the 355 * host, but that should not be a problem as the worst that 356 * can happen is an additional vgic injection. We also clear 357 * the pending state to maintain proper semantics for edge HW 358 * interrupts. 359 */ 360 vgic_irq_set_phys_pending(irq, false); 361 if (!irq->active) 362 vgic_irq_set_phys_active(irq, false); 363 } 364 365 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, 366 gpa_t addr, unsigned int len, 367 unsigned long val) 368 { 369 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 370 int i; 371 unsigned long flags; 372 373 for_each_set_bit(i, &val, len * 8) { 374 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 375 376 /* GICD_ICPENDR0 SGI bits are WI */ 377 if (is_vgic_v2_sgi(vcpu, irq)) { 378 vgic_put_irq(vcpu->kvm, irq); 379 continue; 380 } 381 382 raw_spin_lock_irqsave(&irq->irq_lock, flags); 383 384 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { 385 /* HW SGI? Ask the GIC to clear its pending bit */ 386 int err; 387 err = irq_set_irqchip_state(irq->host_irq, 388 IRQCHIP_STATE_PENDING, 389 false); 390 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); 391 392 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 393 vgic_put_irq(vcpu->kvm, irq); 394 395 continue; 396 } 397 398 if (irq->hw) 399 vgic_hw_irq_cpending(vcpu, irq); 400 else 401 irq->pending_latch = false; 402 403 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 404 vgic_put_irq(vcpu->kvm, irq); 405 } 406 } 407 408 int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu, 409 gpa_t addr, unsigned int len, 410 unsigned long val) 411 { 412 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 413 int i; 414 unsigned long flags; 415 416 for_each_set_bit(i, &val, len * 8) { 417 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 418 419 raw_spin_lock_irqsave(&irq->irq_lock, flags); 420 /* 421 * More fun with GICv2 SGIs! If we're clearing one of them 422 * from userspace, which source vcpu to clear? Let's not 423 * even think of it, and blow the whole set. 424 */ 425 if (is_vgic_v2_sgi(vcpu, irq)) 426 irq->source = 0; 427 428 irq->pending_latch = false; 429 430 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 431 432 vgic_put_irq(vcpu->kvm, irq); 433 } 434 435 return 0; 436 } 437 438 /* 439 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ 440 * is not queued on some running VCPU's LRs, because then the change to the 441 * active state can be overwritten when the VCPU's state is synced coming back 442 * from the guest. 443 * 444 * For shared interrupts as well as GICv3 private interrupts, we have to 445 * stop all the VCPUs because interrupts can be migrated while we don't hold 446 * the IRQ locks and we don't want to be chasing moving targets. 447 * 448 * For GICv2 private interrupts we don't have to do anything because 449 * userspace accesses to the VGIC state already require all VCPUs to be 450 * stopped, and only the VCPU itself can modify its private interrupts 451 * active state, which guarantees that the VCPU is not running. 452 */ 453 static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid) 454 { 455 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || 456 intid >= VGIC_NR_PRIVATE_IRQS) 457 kvm_arm_halt_guest(vcpu->kvm); 458 } 459 460 /* See vgic_access_active_prepare */ 461 static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid) 462 { 463 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || 464 intid >= VGIC_NR_PRIVATE_IRQS) 465 kvm_arm_resume_guest(vcpu->kvm); 466 } 467 468 static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu, 469 gpa_t addr, unsigned int len) 470 { 471 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 472 u32 value = 0; 473 int i; 474 475 /* Loop over all IRQs affected by this read */ 476 for (i = 0; i < len * 8; i++) { 477 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 478 479 /* 480 * Even for HW interrupts, don't evaluate the HW state as 481 * all the guest is interested in is the virtual state. 482 */ 483 if (irq->active) 484 value |= (1U << i); 485 486 vgic_put_irq(vcpu->kvm, irq); 487 } 488 489 return value; 490 } 491 492 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, 493 gpa_t addr, unsigned int len) 494 { 495 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 496 u32 val; 497 498 mutex_lock(&vcpu->kvm->lock); 499 vgic_access_active_prepare(vcpu, intid); 500 501 val = __vgic_mmio_read_active(vcpu, addr, len); 502 503 vgic_access_active_finish(vcpu, intid); 504 mutex_unlock(&vcpu->kvm->lock); 505 506 return val; 507 } 508 509 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, 510 gpa_t addr, unsigned int len) 511 { 512 return __vgic_mmio_read_active(vcpu, addr, len); 513 } 514 515 /* Must be called with irq->irq_lock held */ 516 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, 517 bool active, bool is_uaccess) 518 { 519 if (is_uaccess) 520 return; 521 522 irq->active = active; 523 vgic_irq_set_phys_active(irq, active); 524 } 525 526 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, 527 bool active) 528 { 529 unsigned long flags; 530 struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu(); 531 532 raw_spin_lock_irqsave(&irq->irq_lock, flags); 533 534 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) { 535 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 536 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) { 537 /* 538 * GICv4.1 VSGI feature doesn't track an active state, 539 * so let's not kid ourselves, there is nothing we can 540 * do here. 541 */ 542 irq->active = false; 543 } else { 544 u32 model = vcpu->kvm->arch.vgic.vgic_model; 545 u8 active_source; 546 547 irq->active = active; 548 549 /* 550 * The GICv2 architecture indicates that the source CPUID for 551 * an SGI should be provided during an EOI which implies that 552 * the active state is stored somewhere, but at the same time 553 * this state is not architecturally exposed anywhere and we 554 * have no way of knowing the right source. 555 * 556 * This may lead to a VCPU not being able to receive 557 * additional instances of a particular SGI after migration 558 * for a GICv2 VM on some GIC implementations. Oh well. 559 */ 560 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0; 561 562 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 && 563 active && vgic_irq_is_sgi(irq->intid)) 564 irq->active_source = active_source; 565 } 566 567 if (irq->active) 568 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 569 else 570 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 571 } 572 573 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, 574 gpa_t addr, unsigned int len, 575 unsigned long val) 576 { 577 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 578 int i; 579 580 for_each_set_bit(i, &val, len * 8) { 581 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 582 vgic_mmio_change_active(vcpu, irq, false); 583 vgic_put_irq(vcpu->kvm, irq); 584 } 585 } 586 587 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, 588 gpa_t addr, unsigned int len, 589 unsigned long val) 590 { 591 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 592 593 mutex_lock(&vcpu->kvm->lock); 594 vgic_access_active_prepare(vcpu, intid); 595 596 __vgic_mmio_write_cactive(vcpu, addr, len, val); 597 598 vgic_access_active_finish(vcpu, intid); 599 mutex_unlock(&vcpu->kvm->lock); 600 } 601 602 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, 603 gpa_t addr, unsigned int len, 604 unsigned long val) 605 { 606 __vgic_mmio_write_cactive(vcpu, addr, len, val); 607 return 0; 608 } 609 610 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, 611 gpa_t addr, unsigned int len, 612 unsigned long val) 613 { 614 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 615 int i; 616 617 for_each_set_bit(i, &val, len * 8) { 618 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 619 vgic_mmio_change_active(vcpu, irq, true); 620 vgic_put_irq(vcpu->kvm, irq); 621 } 622 } 623 624 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, 625 gpa_t addr, unsigned int len, 626 unsigned long val) 627 { 628 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 629 630 mutex_lock(&vcpu->kvm->lock); 631 vgic_access_active_prepare(vcpu, intid); 632 633 __vgic_mmio_write_sactive(vcpu, addr, len, val); 634 635 vgic_access_active_finish(vcpu, intid); 636 mutex_unlock(&vcpu->kvm->lock); 637 } 638 639 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, 640 gpa_t addr, unsigned int len, 641 unsigned long val) 642 { 643 __vgic_mmio_write_sactive(vcpu, addr, len, val); 644 return 0; 645 } 646 647 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, 648 gpa_t addr, unsigned int len) 649 { 650 u32 intid = VGIC_ADDR_TO_INTID(addr, 8); 651 int i; 652 u64 val = 0; 653 654 for (i = 0; i < len; i++) { 655 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 656 657 val |= (u64)irq->priority << (i * 8); 658 659 vgic_put_irq(vcpu->kvm, irq); 660 } 661 662 return val; 663 } 664 665 /* 666 * We currently don't handle changing the priority of an interrupt that 667 * is already pending on a VCPU. If there is a need for this, we would 668 * need to make this VCPU exit and re-evaluate the priorities, potentially 669 * leading to this interrupt getting presented now to the guest (if it has 670 * been masked by the priority mask before). 671 */ 672 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, 673 gpa_t addr, unsigned int len, 674 unsigned long val) 675 { 676 u32 intid = VGIC_ADDR_TO_INTID(addr, 8); 677 int i; 678 unsigned long flags; 679 680 for (i = 0; i < len; i++) { 681 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 682 683 raw_spin_lock_irqsave(&irq->irq_lock, flags); 684 /* Narrow the priority range to what we actually support */ 685 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 686 if (irq->hw && vgic_irq_is_sgi(irq->intid)) 687 vgic_update_vsgi(irq); 688 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 689 690 vgic_put_irq(vcpu->kvm, irq); 691 } 692 } 693 694 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, 695 gpa_t addr, unsigned int len) 696 { 697 u32 intid = VGIC_ADDR_TO_INTID(addr, 2); 698 u32 value = 0; 699 int i; 700 701 for (i = 0; i < len * 4; i++) { 702 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 703 704 if (irq->config == VGIC_CONFIG_EDGE) 705 value |= (2U << (i * 2)); 706 707 vgic_put_irq(vcpu->kvm, irq); 708 } 709 710 return value; 711 } 712 713 void vgic_mmio_write_config(struct kvm_vcpu *vcpu, 714 gpa_t addr, unsigned int len, 715 unsigned long val) 716 { 717 u32 intid = VGIC_ADDR_TO_INTID(addr, 2); 718 int i; 719 unsigned long flags; 720 721 for (i = 0; i < len * 4; i++) { 722 struct vgic_irq *irq; 723 724 /* 725 * The configuration cannot be changed for SGIs in general, 726 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer 727 * code relies on PPIs being level triggered, so we also 728 * make them read-only here. 729 */ 730 if (intid + i < VGIC_NR_PRIVATE_IRQS) 731 continue; 732 733 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 734 raw_spin_lock_irqsave(&irq->irq_lock, flags); 735 736 if (test_bit(i * 2 + 1, &val)) 737 irq->config = VGIC_CONFIG_EDGE; 738 else 739 irq->config = VGIC_CONFIG_LEVEL; 740 741 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 742 vgic_put_irq(vcpu->kvm, irq); 743 } 744 } 745 746 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid) 747 { 748 int i; 749 u64 val = 0; 750 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; 751 752 for (i = 0; i < 32; i++) { 753 struct vgic_irq *irq; 754 755 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) 756 continue; 757 758 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 759 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) 760 val |= (1U << i); 761 762 vgic_put_irq(vcpu->kvm, irq); 763 } 764 765 return val; 766 } 767 768 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, 769 const u64 val) 770 { 771 int i; 772 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; 773 unsigned long flags; 774 775 for (i = 0; i < 32; i++) { 776 struct vgic_irq *irq; 777 bool new_level; 778 779 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) 780 continue; 781 782 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 783 784 /* 785 * Line level is set irrespective of irq type 786 * (level or edge) to avoid dependency that VM should 787 * restore irq config before line level. 788 */ 789 new_level = !!(val & (1U << i)); 790 raw_spin_lock_irqsave(&irq->irq_lock, flags); 791 irq->line_level = new_level; 792 if (new_level) 793 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 794 else 795 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 796 797 vgic_put_irq(vcpu->kvm, irq); 798 } 799 } 800 801 static int match_region(const void *key, const void *elt) 802 { 803 const unsigned int offset = (unsigned long)key; 804 const struct vgic_register_region *region = elt; 805 806 if (offset < region->reg_offset) 807 return -1; 808 809 if (offset >= region->reg_offset + region->len) 810 return 1; 811 812 return 0; 813 } 814 815 const struct vgic_register_region * 816 vgic_find_mmio_region(const struct vgic_register_region *regions, 817 int nr_regions, unsigned int offset) 818 { 819 return bsearch((void *)(uintptr_t)offset, regions, nr_regions, 820 sizeof(regions[0]), match_region); 821 } 822 823 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 824 { 825 if (kvm_vgic_global_state.type == VGIC_V2) 826 vgic_v2_set_vmcr(vcpu, vmcr); 827 else 828 vgic_v3_set_vmcr(vcpu, vmcr); 829 } 830 831 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 832 { 833 if (kvm_vgic_global_state.type == VGIC_V2) 834 vgic_v2_get_vmcr(vcpu, vmcr); 835 else 836 vgic_v3_get_vmcr(vcpu, vmcr); 837 } 838 839 /* 840 * kvm_mmio_read_buf() returns a value in a format where it can be converted 841 * to a byte array and be directly observed as the guest wanted it to appear 842 * in memory if it had done the store itself, which is LE for the GIC, as the 843 * guest knows the GIC is always LE. 844 * 845 * We convert this value to the CPUs native format to deal with it as a data 846 * value. 847 */ 848 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len) 849 { 850 unsigned long data = kvm_mmio_read_buf(val, len); 851 852 switch (len) { 853 case 1: 854 return data; 855 case 2: 856 return le16_to_cpu(data); 857 case 4: 858 return le32_to_cpu(data); 859 default: 860 return le64_to_cpu(data); 861 } 862 } 863 864 /* 865 * kvm_mmio_write_buf() expects a value in a format such that if converted to 866 * a byte array it is observed as the guest would see it if it could perform 867 * the load directly. Since the GIC is LE, and the guest knows this, the 868 * guest expects a value in little endian format. 869 * 870 * We convert the data value from the CPUs native format to LE so that the 871 * value is returned in the proper format. 872 */ 873 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, 874 unsigned long data) 875 { 876 switch (len) { 877 case 1: 878 break; 879 case 2: 880 data = cpu_to_le16(data); 881 break; 882 case 4: 883 data = cpu_to_le32(data); 884 break; 885 default: 886 data = cpu_to_le64(data); 887 } 888 889 kvm_mmio_write_buf(buf, len, data); 890 } 891 892 static 893 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) 894 { 895 return container_of(dev, struct vgic_io_device, dev); 896 } 897 898 static bool check_region(const struct kvm *kvm, 899 const struct vgic_register_region *region, 900 gpa_t addr, int len) 901 { 902 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; 903 904 switch (len) { 905 case sizeof(u8): 906 flags = VGIC_ACCESS_8bit; 907 break; 908 case sizeof(u32): 909 flags = VGIC_ACCESS_32bit; 910 break; 911 case sizeof(u64): 912 flags = VGIC_ACCESS_64bit; 913 break; 914 default: 915 return false; 916 } 917 918 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { 919 if (!region->bits_per_irq) 920 return true; 921 922 /* Do we access a non-allocated IRQ? */ 923 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; 924 } 925 926 return false; 927 } 928 929 const struct vgic_register_region * 930 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, 931 gpa_t addr, int len) 932 { 933 const struct vgic_register_region *region; 934 935 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, 936 addr - iodev->base_addr); 937 if (!region || !check_region(vcpu->kvm, region, addr, len)) 938 return NULL; 939 940 return region; 941 } 942 943 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, 944 gpa_t addr, u32 *val) 945 { 946 const struct vgic_register_region *region; 947 struct kvm_vcpu *r_vcpu; 948 949 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); 950 if (!region) { 951 *val = 0; 952 return 0; 953 } 954 955 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; 956 if (region->uaccess_read) 957 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32)); 958 else 959 *val = region->read(r_vcpu, addr, sizeof(u32)); 960 961 return 0; 962 } 963 964 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, 965 gpa_t addr, const u32 *val) 966 { 967 const struct vgic_register_region *region; 968 struct kvm_vcpu *r_vcpu; 969 970 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); 971 if (!region) 972 return 0; 973 974 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; 975 if (region->uaccess_write) 976 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val); 977 978 region->write(r_vcpu, addr, sizeof(u32), *val); 979 return 0; 980 } 981 982 /* 983 * Userland access to VGIC registers. 984 */ 985 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, 986 bool is_write, int offset, u32 *val) 987 { 988 if (is_write) 989 return vgic_uaccess_write(vcpu, dev, offset, val); 990 else 991 return vgic_uaccess_read(vcpu, dev, offset, val); 992 } 993 994 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 995 gpa_t addr, int len, void *val) 996 { 997 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); 998 const struct vgic_register_region *region; 999 unsigned long data = 0; 1000 1001 region = vgic_get_mmio_region(vcpu, iodev, addr, len); 1002 if (!region) { 1003 memset(val, 0, len); 1004 return 0; 1005 } 1006 1007 switch (iodev->iodev_type) { 1008 case IODEV_CPUIF: 1009 data = region->read(vcpu, addr, len); 1010 break; 1011 case IODEV_DIST: 1012 data = region->read(vcpu, addr, len); 1013 break; 1014 case IODEV_REDIST: 1015 data = region->read(iodev->redist_vcpu, addr, len); 1016 break; 1017 case IODEV_ITS: 1018 data = region->its_read(vcpu->kvm, iodev->its, addr, len); 1019 break; 1020 } 1021 1022 vgic_data_host_to_mmio_bus(val, len, data); 1023 return 0; 1024 } 1025 1026 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 1027 gpa_t addr, int len, const void *val) 1028 { 1029 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); 1030 const struct vgic_register_region *region; 1031 unsigned long data = vgic_data_mmio_bus_to_host(val, len); 1032 1033 region = vgic_get_mmio_region(vcpu, iodev, addr, len); 1034 if (!region) 1035 return 0; 1036 1037 switch (iodev->iodev_type) { 1038 case IODEV_CPUIF: 1039 region->write(vcpu, addr, len, data); 1040 break; 1041 case IODEV_DIST: 1042 region->write(vcpu, addr, len, data); 1043 break; 1044 case IODEV_REDIST: 1045 region->write(iodev->redist_vcpu, addr, len, data); 1046 break; 1047 case IODEV_ITS: 1048 region->its_write(vcpu->kvm, iodev->its, addr, len, data); 1049 break; 1050 } 1051 1052 return 0; 1053 } 1054 1055 const struct kvm_io_device_ops kvm_io_gic_ops = { 1056 .read = dispatch_mmio_read, 1057 .write = dispatch_mmio_write, 1058 }; 1059 1060 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 1061 enum vgic_type type) 1062 { 1063 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; 1064 int ret = 0; 1065 unsigned int len; 1066 1067 switch (type) { 1068 case VGIC_V2: 1069 len = vgic_v2_init_dist_iodev(io_device); 1070 break; 1071 case VGIC_V3: 1072 len = vgic_v3_init_dist_iodev(io_device); 1073 break; 1074 default: 1075 BUG_ON(1); 1076 } 1077 1078 io_device->base_addr = dist_base_address; 1079 io_device->iodev_type = IODEV_DIST; 1080 io_device->redist_vcpu = NULL; 1081 1082 mutex_lock(&kvm->slots_lock); 1083 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, 1084 len, &io_device->dev); 1085 mutex_unlock(&kvm->slots_lock); 1086 1087 return ret; 1088 } 1089