1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19 #ifndef __ASM_ARM_KVM_VGIC_H 20 #define __ASM_ARM_KVM_VGIC_H 21 22 #include <linux/kernel.h> 23 #include <linux/kvm.h> 24 #include <linux/irqreturn.h> 25 #include <linux/spinlock.h> 26 #include <linux/types.h> 27 #include <kvm/iodev.h> 28 29 #define VGIC_NR_IRQS_LEGACY 256 30 #define VGIC_NR_SGIS 16 31 #define VGIC_NR_PPIS 16 32 #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) 33 34 #define VGIC_V2_MAX_LRS (1 << 6) 35 #define VGIC_V3_MAX_LRS 16 36 #define VGIC_MAX_IRQS 1024 37 #define VGIC_V2_MAX_CPUS 8 38 #define VGIC_V3_MAX_CPUS 255 39 40 #if (VGIC_NR_IRQS_LEGACY & 31) 41 #error "VGIC_NR_IRQS must be a multiple of 32" 42 #endif 43 44 #if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS) 45 #error "VGIC_NR_IRQS must be <= 1024" 46 #endif 47 48 /* 49 * The GIC distributor registers describing interrupts have two parts: 50 * - 32 per-CPU interrupts (SGI + PPI) 51 * - a bunch of shared interrupts (SPI) 52 */ 53 struct vgic_bitmap { 54 /* 55 * - One UL per VCPU for private interrupts (assumes UL is at 56 * least 32 bits) 57 * - As many UL as necessary for shared interrupts. 58 * 59 * The private interrupts are accessed via the "private" 60 * field, one UL per vcpu (the state for vcpu n is in 61 * private[n]). The shared interrupts are accessed via the 62 * "shared" pointer (IRQn state is at bit n-32 in the bitmap). 63 */ 64 unsigned long *private; 65 unsigned long *shared; 66 }; 67 68 struct vgic_bytemap { 69 /* 70 * - 8 u32 per VCPU for private interrupts 71 * - As many u32 as necessary for shared interrupts. 72 * 73 * The private interrupts are accessed via the "private" 74 * field, (the state for vcpu n is in private[n*8] to 75 * private[n*8 + 7]). The shared interrupts are accessed via 76 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the 77 * shared[(n-32)/4] word). 78 */ 79 u32 *private; 80 u32 *shared; 81 }; 82 83 struct kvm_vcpu; 84 85 enum vgic_type { 86 VGIC_V2, /* Good ol' GICv2 */ 87 VGIC_V3, /* New fancy GICv3 */ 88 }; 89 90 #define LR_STATE_PENDING (1 << 0) 91 #define LR_STATE_ACTIVE (1 << 1) 92 #define LR_STATE_MASK (3 << 0) 93 #define LR_EOI_INT (1 << 2) 94 #define LR_HW (1 << 3) 95 96 struct vgic_lr { 97 unsigned irq:10; 98 union { 99 unsigned hwirq:10; 100 unsigned source:3; 101 }; 102 unsigned state:4; 103 }; 104 105 struct vgic_vmcr { 106 u32 ctlr; 107 u32 abpr; 108 u32 bpr; 109 u32 pmr; 110 }; 111 112 struct vgic_ops { 113 struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int); 114 void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr); 115 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); 116 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); 117 u64 (*get_eisr)(const struct kvm_vcpu *vcpu); 118 void (*clear_eisr)(struct kvm_vcpu *vcpu); 119 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); 120 void (*enable_underflow)(struct kvm_vcpu *vcpu); 121 void (*disable_underflow)(struct kvm_vcpu *vcpu); 122 void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 123 void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 124 void (*enable)(struct kvm_vcpu *vcpu); 125 }; 126 127 struct vgic_params { 128 /* vgic type */ 129 enum vgic_type type; 130 /* Physical address of vgic virtual cpu interface */ 131 phys_addr_t vcpu_base; 132 /* Number of list registers */ 133 u32 nr_lr; 134 /* Interrupt number */ 135 unsigned int maint_irq; 136 /* Virtual control interface base address */ 137 void __iomem *vctrl_base; 138 int max_gic_vcpus; 139 /* Only needed for the legacy KVM_CREATE_IRQCHIP */ 140 bool can_emulate_gicv2; 141 }; 142 143 struct vgic_vm_ops { 144 bool (*queue_sgi)(struct kvm_vcpu *, int irq); 145 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source); 146 int (*init_model)(struct kvm *); 147 int (*map_resources)(struct kvm *, const struct vgic_params *); 148 }; 149 150 struct vgic_io_device { 151 gpa_t addr; 152 int len; 153 const struct vgic_io_range *reg_ranges; 154 struct kvm_vcpu *redist_vcpu; 155 struct kvm_io_device dev; 156 }; 157 158 struct irq_phys_map { 159 u32 virt_irq; 160 u32 phys_irq; 161 u32 irq; 162 bool active; 163 }; 164 165 struct irq_phys_map_entry { 166 struct list_head entry; 167 struct rcu_head rcu; 168 struct irq_phys_map map; 169 }; 170 171 struct vgic_dist { 172 spinlock_t lock; 173 bool in_kernel; 174 bool ready; 175 176 /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ 177 u32 vgic_model; 178 179 int nr_cpus; 180 int nr_irqs; 181 182 /* Virtual control interface mapping */ 183 void __iomem *vctrl_base; 184 185 /* Distributor and vcpu interface mapping in the guest */ 186 phys_addr_t vgic_dist_base; 187 /* GICv2 and GICv3 use different mapped register blocks */ 188 union { 189 phys_addr_t vgic_cpu_base; 190 phys_addr_t vgic_redist_base; 191 }; 192 193 /* Distributor enabled */ 194 u32 enabled; 195 196 /* Interrupt enabled (one bit per IRQ) */ 197 struct vgic_bitmap irq_enabled; 198 199 /* Level-triggered interrupt external input is asserted */ 200 struct vgic_bitmap irq_level; 201 202 /* 203 * Interrupt state is pending on the distributor 204 */ 205 struct vgic_bitmap irq_pending; 206 207 /* 208 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered 209 * interrupts. Essentially holds the state of the flip-flop in 210 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b. 211 * Once set, it is only cleared for level-triggered interrupts on 212 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn. 213 */ 214 struct vgic_bitmap irq_soft_pend; 215 216 /* Level-triggered interrupt queued on VCPU interface */ 217 struct vgic_bitmap irq_queued; 218 219 /* Interrupt was active when unqueue from VCPU interface */ 220 struct vgic_bitmap irq_active; 221 222 /* Interrupt priority. Not used yet. */ 223 struct vgic_bytemap irq_priority; 224 225 /* Level/edge triggered */ 226 struct vgic_bitmap irq_cfg; 227 228 /* 229 * Source CPU per SGI and target CPU: 230 * 231 * Each byte represent a SGI observable on a VCPU, each bit of 232 * this byte indicating if the corresponding VCPU has 233 * generated this interrupt. This is a GICv2 feature only. 234 * 235 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are 236 * the SGIs observable on VCPUn. 237 */ 238 u8 *irq_sgi_sources; 239 240 /* 241 * Target CPU for each SPI: 242 * 243 * Array of available SPI, each byte indicating the target 244 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32]. 245 */ 246 u8 *irq_spi_cpu; 247 248 /* 249 * Reverse lookup of irq_spi_cpu for faster compute pending: 250 * 251 * Array of bitmaps, one per VCPU, describing if IRQn is 252 * routed to a particular VCPU. 253 */ 254 struct vgic_bitmap *irq_spi_target; 255 256 /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */ 257 u32 *irq_spi_mpidr; 258 259 /* Bitmap indicating which CPU has something pending */ 260 unsigned long *irq_pending_on_cpu; 261 262 /* Bitmap indicating which CPU has active IRQs */ 263 unsigned long *irq_active_on_cpu; 264 265 struct vgic_vm_ops vm_ops; 266 struct vgic_io_device dist_iodev; 267 struct vgic_io_device *redist_iodevs; 268 269 /* Virtual irq to hwirq mapping */ 270 spinlock_t irq_phys_map_lock; 271 struct list_head irq_phys_map_list; 272 }; 273 274 struct vgic_v2_cpu_if { 275 u32 vgic_hcr; 276 u32 vgic_vmcr; 277 u32 vgic_misr; /* Saved only */ 278 u64 vgic_eisr; /* Saved only */ 279 u64 vgic_elrsr; /* Saved only */ 280 u32 vgic_apr; 281 u32 vgic_lr[VGIC_V2_MAX_LRS]; 282 }; 283 284 struct vgic_v3_cpu_if { 285 #ifdef CONFIG_ARM_GIC_V3 286 u32 vgic_hcr; 287 u32 vgic_vmcr; 288 u32 vgic_sre; /* Restored only, change ignored */ 289 u32 vgic_misr; /* Saved only */ 290 u32 vgic_eisr; /* Saved only */ 291 u32 vgic_elrsr; /* Saved only */ 292 u32 vgic_ap0r[4]; 293 u32 vgic_ap1r[4]; 294 u64 vgic_lr[VGIC_V3_MAX_LRS]; 295 #endif 296 }; 297 298 struct vgic_cpu { 299 /* per IRQ to LR mapping */ 300 u8 *vgic_irq_lr_map; 301 302 /* Pending/active/both interrupts on this VCPU */ 303 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); 304 DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS); 305 DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS); 306 307 /* Pending/active/both shared interrupts, dynamically sized */ 308 unsigned long *pending_shared; 309 unsigned long *active_shared; 310 unsigned long *pend_act_shared; 311 312 /* Bitmap of used/free list registers */ 313 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); 314 315 /* Number of list registers on this CPU */ 316 int nr_lr; 317 318 /* CPU vif control registers for world switch */ 319 union { 320 struct vgic_v2_cpu_if vgic_v2; 321 struct vgic_v3_cpu_if vgic_v3; 322 }; 323 324 /* Protected by the distributor's irq_phys_map_lock */ 325 struct list_head irq_phys_map_list; 326 }; 327 328 #define LR_EMPTY 0xff 329 330 #define INT_STATUS_EOI (1 << 0) 331 #define INT_STATUS_UNDERFLOW (1 << 1) 332 333 struct kvm; 334 struct kvm_vcpu; 335 336 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); 337 int kvm_vgic_hyp_init(void); 338 int kvm_vgic_map_resources(struct kvm *kvm); 339 int kvm_vgic_get_max_vcpus(void); 340 void kvm_vgic_early_init(struct kvm *kvm); 341 int kvm_vgic_create(struct kvm *kvm, u32 type); 342 void kvm_vgic_destroy(struct kvm *kvm); 343 void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); 344 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 345 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); 346 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); 347 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, 348 bool level); 349 int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, 350 struct irq_phys_map *map, bool level); 351 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 352 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 353 int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); 354 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, 355 int virt_irq, int irq); 356 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); 357 bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map); 358 void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active); 359 360 #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 361 #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) 362 #define vgic_ready(k) ((k)->arch.vgic.ready) 363 364 int vgic_v2_probe(struct device_node *vgic_node, 365 const struct vgic_ops **ops, 366 const struct vgic_params **params); 367 #ifdef CONFIG_ARM_GIC_V3 368 int vgic_v3_probe(struct device_node *vgic_node, 369 const struct vgic_ops **ops, 370 const struct vgic_params **params); 371 #else 372 static inline int vgic_v3_probe(struct device_node *vgic_node, 373 const struct vgic_ops **ops, 374 const struct vgic_params **params) 375 { 376 return -ENODEV; 377 } 378 #endif 379 380 #endif 381