1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015, 2016 ARM Ltd. 4 */ 5 #ifndef __KVM_ARM_VGIC_H 6 #define __KVM_ARM_VGIC_H 7 8 #include <linux/bits.h> 9 #include <linux/kvm.h> 10 #include <linux/irqreturn.h> 11 #include <linux/kref.h> 12 #include <linux/mutex.h> 13 #include <linux/spinlock.h> 14 #include <linux/static_key.h> 15 #include <linux/types.h> 16 #include <linux/xarray.h> 17 #include <kvm/iodev.h> 18 #include <linux/list.h> 19 #include <linux/jump_label.h> 20 21 #include <linux/irqchip/arm-gic-v4.h> 22 23 #define VGIC_V3_MAX_CPUS 512 24 #define VGIC_V2_MAX_CPUS 8 25 #define VGIC_NR_IRQS_LEGACY 256 26 #define VGIC_NR_SGIS 16 27 #define VGIC_NR_PPIS 16 28 #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) 29 #define VGIC_MAX_SPI 1019 30 #define VGIC_MAX_RESERVED 1023 31 #define VGIC_MIN_LPI 8192 32 #define KVM_IRQCHIP_NUM_PINS (1020 - 32) 33 34 #define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) 35 #define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ 36 (irq) <= VGIC_MAX_SPI) 37 38 enum vgic_type { 39 VGIC_V2, /* Good ol' GICv2 */ 40 VGIC_V3, /* New fancy GICv3 */ 41 }; 42 43 /* same for all guests, as depending only on the _host's_ GIC model */ 44 struct vgic_global { 45 /* type of the host GIC */ 46 enum vgic_type type; 47 48 /* Physical address of vgic virtual cpu interface */ 49 phys_addr_t vcpu_base; 50 51 /* GICV mapping, kernel VA */ 52 void __iomem *vcpu_base_va; 53 /* GICV mapping, HYP VA */ 54 void __iomem *vcpu_hyp_va; 55 56 /* virtual control interface mapping, kernel VA */ 57 void __iomem *vctrl_base; 58 /* virtual control interface mapping, HYP VA */ 59 void __iomem *vctrl_hyp; 60 61 /* Number of implemented list registers */ 62 int nr_lr; 63 64 /* Maintenance IRQ number */ 65 unsigned int maint_irq; 66 67 /* maximum number of VCPUs allowed (GICv2 limits us to 8) */ 68 int max_gic_vcpus; 69 70 /* Only needed for the legacy KVM_CREATE_IRQCHIP */ 71 bool can_emulate_gicv2; 72 73 /* Hardware has GICv4? */ 74 bool has_gicv4; 75 bool has_gicv4_1; 76 77 /* Pseudo GICv3 from outer space */ 78 bool no_hw_deactivation; 79 80 /* GIC system register CPU interface */ 81 struct static_key_false gicv3_cpuif; 82 83 u32 ich_vtr_el2; 84 }; 85 86 extern struct vgic_global kvm_vgic_global_state; 87 88 #define VGIC_V2_MAX_LRS (1 << 6) 89 #define VGIC_V3_MAX_LRS 16 90 #define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) 91 92 enum vgic_irq_config { 93 VGIC_CONFIG_EDGE = 0, 94 VGIC_CONFIG_LEVEL 95 }; 96 97 /* 98 * Per-irq ops overriding some common behavious. 99 * 100 * Always called in non-preemptible section and the functions can use 101 * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs. 102 */ 103 struct irq_ops { 104 /* Per interrupt flags for special-cased interrupts */ 105 unsigned long flags; 106 107 #define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */ 108 109 /* 110 * Callback function pointer to in-kernel devices that can tell us the 111 * state of the input level of mapped level-triggered IRQ faster than 112 * peaking into the physical GIC. 113 */ 114 bool (*get_input_level)(int vintid); 115 }; 116 117 struct vgic_irq { 118 raw_spinlock_t irq_lock; /* Protects the content of the struct */ 119 struct rcu_head rcu; 120 struct list_head ap_list; 121 122 struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU 123 * SPIs and LPIs: The VCPU whose ap_list 124 * this is queued on. 125 */ 126 127 struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should 128 * be sent to, as a result of the 129 * targets reg (v2) or the 130 * affinity reg (v3). 131 */ 132 133 u32 intid; /* Guest visible INTID */ 134 bool line_level; /* Level only */ 135 bool pending_latch; /* The pending latch state used to calculate 136 * the pending state for both level 137 * and edge triggered IRQs. */ 138 bool active; /* not used for LPIs */ 139 bool enabled; 140 bool hw; /* Tied to HW IRQ */ 141 struct kref refcount; /* Used for LPIs */ 142 u32 hwintid; /* HW INTID number */ 143 unsigned int host_irq; /* linux irq corresponding to hwintid */ 144 union { 145 u8 targets; /* GICv2 target VCPUs mask */ 146 u32 mpidr; /* GICv3 target VCPU */ 147 }; 148 u8 source; /* GICv2 SGIs only */ 149 u8 active_source; /* GICv2 SGIs only */ 150 u8 priority; 151 u8 group; /* 0 == group 0, 1 == group 1 */ 152 enum vgic_irq_config config; /* Level or edge */ 153 154 struct irq_ops *ops; 155 156 void *owner; /* Opaque pointer to reserve an interrupt 157 for in-kernel devices. */ 158 }; 159 160 static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq) 161 { 162 return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE); 163 } 164 165 struct vgic_register_region; 166 struct vgic_its; 167 168 enum iodev_type { 169 IODEV_CPUIF, 170 IODEV_DIST, 171 IODEV_REDIST, 172 IODEV_ITS 173 }; 174 175 struct vgic_io_device { 176 gpa_t base_addr; 177 union { 178 struct kvm_vcpu *redist_vcpu; 179 struct vgic_its *its; 180 }; 181 const struct vgic_register_region *regions; 182 enum iodev_type iodev_type; 183 int nr_regions; 184 struct kvm_io_device dev; 185 }; 186 187 struct vgic_its { 188 /* The base address of the ITS control register frame */ 189 gpa_t vgic_its_base; 190 191 bool enabled; 192 struct vgic_io_device iodev; 193 struct kvm_device *dev; 194 195 /* These registers correspond to GITS_BASER{0,1} */ 196 u64 baser_device_table; 197 u64 baser_coll_table; 198 199 /* Protects the command queue */ 200 struct mutex cmd_lock; 201 u64 cbaser; 202 u32 creadr; 203 u32 cwriter; 204 205 /* migration ABI revision in use */ 206 u32 abi_rev; 207 208 /* Protects the device and collection lists */ 209 struct mutex its_lock; 210 struct list_head device_list; 211 struct list_head collection_list; 212 213 /* 214 * Caches the (device_id, event_id) -> vgic_irq translation for 215 * LPIs that are mapped and enabled. 216 */ 217 struct xarray translation_cache; 218 }; 219 220 struct vgic_state_iter; 221 222 struct vgic_redist_region { 223 u32 index; 224 gpa_t base; 225 u32 count; /* number of redistributors or 0 if single region */ 226 u32 free_index; /* index of the next free redistributor */ 227 struct list_head list; 228 }; 229 230 struct vgic_dist { 231 bool in_kernel; 232 bool ready; 233 bool initialized; 234 235 /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ 236 u32 vgic_model; 237 238 /* Implementation revision as reported in the GICD_IIDR */ 239 u32 implementation_rev; 240 #define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */ 241 #define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */ 242 #define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3 243 244 /* Userspace can write to GICv2 IGROUPR */ 245 bool v2_groups_user_writable; 246 247 /* Do injected MSIs require an additional device ID? */ 248 bool msis_require_devid; 249 250 int nr_spis; 251 252 /* base addresses in guest physical address space: */ 253 gpa_t vgic_dist_base; /* distributor */ 254 union { 255 /* either a GICv2 CPU interface */ 256 gpa_t vgic_cpu_base; 257 /* or a number of GICv3 redistributor regions */ 258 struct list_head rd_regions; 259 }; 260 261 /* distributor enabled */ 262 bool enabled; 263 264 /* Wants SGIs without active state */ 265 bool nassgireq; 266 267 struct vgic_irq *spis; 268 269 struct vgic_io_device dist_iodev; 270 271 bool has_its; 272 bool table_write_in_progress; 273 274 /* 275 * Contains the attributes and gpa of the LPI configuration table. 276 * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share 277 * one address across all redistributors. 278 * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables" 279 */ 280 u64 propbaser; 281 282 #define LPI_XA_MARK_DEBUG_ITER XA_MARK_0 283 struct xarray lpi_xa; 284 285 /* used by vgic-debug */ 286 struct vgic_state_iter *iter; 287 288 /* 289 * GICv4 ITS per-VM data, containing the IRQ domain, the VPE 290 * array, the property table pointer as well as allocation 291 * data. This essentially ties the Linux IRQ core and ITS 292 * together, and avoids leaking KVM's data structures anywhere 293 * else. 294 */ 295 struct its_vm its_vm; 296 }; 297 298 struct vgic_v2_cpu_if { 299 u32 vgic_hcr; 300 u32 vgic_vmcr; 301 u32 vgic_apr; 302 u32 vgic_lr[VGIC_V2_MAX_LRS]; 303 304 unsigned int used_lrs; 305 }; 306 307 struct vgic_v3_cpu_if { 308 u32 vgic_hcr; 309 u32 vgic_vmcr; 310 u32 vgic_sre; /* Restored only, change ignored */ 311 u32 vgic_ap0r[4]; 312 u32 vgic_ap1r[4]; 313 u64 vgic_lr[VGIC_V3_MAX_LRS]; 314 315 /* 316 * GICv4 ITS per-VPE data, containing the doorbell IRQ, the 317 * pending table pointer, the its_vm pointer and a few other 318 * HW specific things. As for the its_vm structure, this is 319 * linking the Linux IRQ subsystem and the ITS together. 320 */ 321 struct its_vpe its_vpe; 322 323 unsigned int used_lrs; 324 }; 325 326 struct vgic_cpu { 327 /* CPU vif control registers for world switch */ 328 union { 329 struct vgic_v2_cpu_if vgic_v2; 330 struct vgic_v3_cpu_if vgic_v3; 331 }; 332 333 struct vgic_irq *private_irqs; 334 335 raw_spinlock_t ap_list_lock; /* Protects the ap_list */ 336 337 /* 338 * List of IRQs that this VCPU should consider because they are either 339 * Active or Pending (hence the name; AP list), or because they recently 340 * were one of the two and need to be migrated off this list to another 341 * VCPU. 342 */ 343 struct list_head ap_list_head; 344 345 /* 346 * Members below are used with GICv3 emulation only and represent 347 * parts of the redistributor. 348 */ 349 struct vgic_io_device rd_iodev; 350 struct vgic_redist_region *rdreg; 351 u32 rdreg_index; 352 atomic_t syncr_busy; 353 354 /* Contains the attributes and gpa of the LPI pending tables. */ 355 u64 pendbaser; 356 /* GICR_CTLR.{ENABLE_LPIS,RWP} */ 357 atomic_t ctlr; 358 359 /* Cache guest priority bits */ 360 u32 num_pri_bits; 361 362 /* Cache guest interrupt ID bits */ 363 u32 num_id_bits; 364 }; 365 366 extern struct static_key_false vgic_v2_cpuif_trap; 367 extern struct static_key_false vgic_v3_cpuif_trap; 368 369 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr); 370 void kvm_vgic_early_init(struct kvm *kvm); 371 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); 372 int kvm_vgic_create(struct kvm *kvm, u32 type); 373 void kvm_vgic_destroy(struct kvm *kvm); 374 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 375 int kvm_vgic_map_resources(struct kvm *kvm); 376 int kvm_vgic_hyp_init(void); 377 void kvm_vgic_init_cpu_hardware(void); 378 379 int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, 380 unsigned int intid, bool level, void *owner); 381 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, 382 u32 vintid, struct irq_ops *ops); 383 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); 384 int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid); 385 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); 386 387 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 388 389 void kvm_vgic_load(struct kvm_vcpu *vcpu); 390 void kvm_vgic_put(struct kvm_vcpu *vcpu); 391 392 #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 393 #define vgic_initialized(k) ((k)->arch.vgic.initialized) 394 #define vgic_ready(k) ((k)->arch.vgic.ready) 395 #define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \ 396 ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) 397 398 bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); 399 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); 400 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); 401 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); 402 403 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1); 404 405 /** 406 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW 407 * 408 * The host's GIC naturally limits the maximum amount of VCPUs a guest 409 * can use. 410 */ 411 static inline int kvm_vgic_get_max_vcpus(void) 412 { 413 return kvm_vgic_global_state.max_gic_vcpus; 414 } 415 416 /** 417 * kvm_vgic_setup_default_irq_routing: 418 * Setup a default flat gsi routing table mapping all SPIs 419 */ 420 int kvm_vgic_setup_default_irq_routing(struct kvm *kvm); 421 422 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner); 423 424 struct kvm_kernel_irq_routing_entry; 425 426 int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq, 427 struct kvm_kernel_irq_routing_entry *irq_entry); 428 429 int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, 430 struct kvm_kernel_irq_routing_entry *irq_entry); 431 432 int vgic_v4_load(struct kvm_vcpu *vcpu); 433 void vgic_v4_commit(struct kvm_vcpu *vcpu); 434 int vgic_v4_put(struct kvm_vcpu *vcpu); 435 436 /* CPU HP callbacks */ 437 void kvm_vgic_cpu_up(void); 438 void kvm_vgic_cpu_down(void); 439 440 #endif /* __KVM_ARM_VGIC_H */ 441