1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * irq_comm.c: Common API for in kernel interrupt controller 4 * Copyright (c) 2007, Intel Corporation. 5 * 6 * Authors: 7 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 8 * 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 */ 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kvm_host.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/rculist.h> 17 18 #include <trace/events/kvm.h> 19 20 #include "irq.h" 21 22 #include "ioapic.h" 23 24 #include "lapic.h" 25 26 #include "hyperv.h" 27 #include "x86.h" 28 #include "xen.h" 29 30 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, 31 struct kvm *kvm, int irq_source_id, int level, 32 bool line_status) 33 { 34 struct kvm_pic *pic = kvm->arch.vpic; 35 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); 36 } 37 38 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, 39 struct kvm *kvm, int irq_source_id, int level, 40 bool line_status) 41 { 42 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 43 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, 44 line_status); 45 } 46 47 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, 48 struct kvm_lapic_irq *irq, struct dest_map *dest_map) 49 { 50 int r = -1; 51 struct kvm_vcpu *vcpu, *lowest = NULL; 52 unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 53 unsigned int dest_vcpus = 0; 54 55 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) 56 return r; 57 58 if (irq->dest_mode == APIC_DEST_PHYSICAL && 59 irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) { 60 pr_info("apic: phys broadcast and lowest prio\n"); 61 irq->delivery_mode = APIC_DM_FIXED; 62 } 63 64 memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap)); 65 66 kvm_for_each_vcpu(i, vcpu, kvm) { 67 if (!kvm_apic_present(vcpu)) 68 continue; 69 70 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, 71 irq->dest_id, irq->dest_mode)) 72 continue; 73 74 if (!kvm_lowest_prio_delivery(irq)) { 75 if (r < 0) 76 r = 0; 77 r += kvm_apic_set_irq(vcpu, irq, dest_map); 78 } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) { 79 if (!kvm_vector_hashing_enabled()) { 80 if (!lowest) 81 lowest = vcpu; 82 else if (kvm_apic_compare_prio(vcpu, lowest) < 0) 83 lowest = vcpu; 84 } else { 85 __set_bit(i, dest_vcpu_bitmap); 86 dest_vcpus++; 87 } 88 } 89 } 90 91 if (dest_vcpus != 0) { 92 int idx = kvm_vector_to_index(irq->vector, dest_vcpus, 93 dest_vcpu_bitmap, KVM_MAX_VCPUS); 94 95 lowest = kvm_get_vcpu(kvm, idx); 96 } 97 98 if (lowest) 99 r = kvm_apic_set_irq(lowest, irq, dest_map); 100 101 return r; 102 } 103 104 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, 105 struct kvm_lapic_irq *irq) 106 { 107 struct msi_msg msg = { .address_lo = e->msi.address_lo, 108 .address_hi = e->msi.address_hi, 109 .data = e->msi.data }; 110 111 trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ? 112 (u64)msg.address_hi << 32 : 0), msg.data); 113 114 irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format); 115 irq->vector = msg.arch_data.vector; 116 irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical); 117 irq->trig_mode = msg.arch_data.is_level; 118 irq->delivery_mode = msg.arch_data.delivery_mode << 8; 119 irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint; 120 irq->level = 1; 121 irq->shorthand = APIC_DEST_NOSHORT; 122 } 123 EXPORT_SYMBOL_GPL(kvm_set_msi_irq); 124 125 static inline bool kvm_msi_route_invalid(struct kvm *kvm, 126 struct kvm_kernel_irq_routing_entry *e) 127 { 128 return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff); 129 } 130 131 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, 132 struct kvm *kvm, int irq_source_id, int level, bool line_status) 133 { 134 struct kvm_lapic_irq irq; 135 136 if (kvm_msi_route_invalid(kvm, e)) 137 return -EINVAL; 138 139 if (!level) 140 return -1; 141 142 kvm_set_msi_irq(kvm, e, &irq); 143 144 return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); 145 } 146 147 #ifdef CONFIG_KVM_HYPERV 148 static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, 149 struct kvm *kvm, int irq_source_id, int level, 150 bool line_status) 151 { 152 if (!level) 153 return -1; 154 155 return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); 156 } 157 #endif 158 159 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 160 struct kvm *kvm, int irq_source_id, int level, 161 bool line_status) 162 { 163 struct kvm_lapic_irq irq; 164 int r; 165 166 switch (e->type) { 167 #ifdef CONFIG_KVM_HYPERV 168 case KVM_IRQ_ROUTING_HV_SINT: 169 return kvm_hv_set_sint(e, kvm, irq_source_id, level, 170 line_status); 171 #endif 172 173 case KVM_IRQ_ROUTING_MSI: 174 if (kvm_msi_route_invalid(kvm, e)) 175 return -EINVAL; 176 177 kvm_set_msi_irq(kvm, e, &irq); 178 179 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) 180 return r; 181 break; 182 183 #ifdef CONFIG_KVM_XEN 184 case KVM_IRQ_ROUTING_XEN_EVTCHN: 185 if (!level) 186 return -1; 187 188 return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm); 189 #endif 190 default: 191 break; 192 } 193 194 return -EWOULDBLOCK; 195 } 196 197 int kvm_request_irq_source_id(struct kvm *kvm) 198 { 199 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; 200 int irq_source_id; 201 202 mutex_lock(&kvm->irq_lock); 203 irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); 204 205 if (irq_source_id >= BITS_PER_LONG) { 206 pr_warn("exhausted allocatable IRQ sources!\n"); 207 irq_source_id = -EFAULT; 208 goto unlock; 209 } 210 211 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 212 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); 213 set_bit(irq_source_id, bitmap); 214 unlock: 215 mutex_unlock(&kvm->irq_lock); 216 217 return irq_source_id; 218 } 219 220 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) 221 { 222 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 223 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); 224 225 mutex_lock(&kvm->irq_lock); 226 if (irq_source_id < 0 || 227 irq_source_id >= BITS_PER_LONG) { 228 pr_err("IRQ source ID out of range!\n"); 229 goto unlock; 230 } 231 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); 232 if (!irqchip_kernel(kvm)) 233 goto unlock; 234 235 kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); 236 kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); 237 unlock: 238 mutex_unlock(&kvm->irq_lock); 239 } 240 241 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 242 struct kvm_irq_mask_notifier *kimn) 243 { 244 mutex_lock(&kvm->irq_lock); 245 kimn->irq = irq; 246 hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list); 247 mutex_unlock(&kvm->irq_lock); 248 } 249 250 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 251 struct kvm_irq_mask_notifier *kimn) 252 { 253 mutex_lock(&kvm->irq_lock); 254 hlist_del_rcu(&kimn->link); 255 mutex_unlock(&kvm->irq_lock); 256 synchronize_srcu(&kvm->irq_srcu); 257 } 258 259 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 260 bool mask) 261 { 262 struct kvm_irq_mask_notifier *kimn; 263 int idx, gsi; 264 265 idx = srcu_read_lock(&kvm->irq_srcu); 266 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); 267 if (gsi != -1) 268 hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link) 269 if (kimn->irq == gsi) 270 kimn->func(kimn, mask); 271 srcu_read_unlock(&kvm->irq_srcu, idx); 272 } 273 274 bool kvm_arch_can_set_irq_routing(struct kvm *kvm) 275 { 276 return irqchip_in_kernel(kvm); 277 } 278 279 int kvm_set_routing_entry(struct kvm *kvm, 280 struct kvm_kernel_irq_routing_entry *e, 281 const struct kvm_irq_routing_entry *ue) 282 { 283 /* We can't check irqchip_in_kernel() here as some callers are 284 * currently initializing the irqchip. Other callers should therefore 285 * check kvm_arch_can_set_irq_routing() before calling this function. 286 */ 287 switch (ue->type) { 288 case KVM_IRQ_ROUTING_IRQCHIP: 289 if (irqchip_split(kvm)) 290 return -EINVAL; 291 e->irqchip.pin = ue->u.irqchip.pin; 292 switch (ue->u.irqchip.irqchip) { 293 case KVM_IRQCHIP_PIC_SLAVE: 294 e->irqchip.pin += PIC_NUM_PINS / 2; 295 fallthrough; 296 case KVM_IRQCHIP_PIC_MASTER: 297 if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) 298 return -EINVAL; 299 e->set = kvm_set_pic_irq; 300 break; 301 case KVM_IRQCHIP_IOAPIC: 302 if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS) 303 return -EINVAL; 304 e->set = kvm_set_ioapic_irq; 305 break; 306 default: 307 return -EINVAL; 308 } 309 e->irqchip.irqchip = ue->u.irqchip.irqchip; 310 break; 311 case KVM_IRQ_ROUTING_MSI: 312 e->set = kvm_set_msi; 313 e->msi.address_lo = ue->u.msi.address_lo; 314 e->msi.address_hi = ue->u.msi.address_hi; 315 e->msi.data = ue->u.msi.data; 316 317 if (kvm_msi_route_invalid(kvm, e)) 318 return -EINVAL; 319 break; 320 #ifdef CONFIG_KVM_HYPERV 321 case KVM_IRQ_ROUTING_HV_SINT: 322 e->set = kvm_hv_set_sint; 323 e->hv_sint.vcpu = ue->u.hv_sint.vcpu; 324 e->hv_sint.sint = ue->u.hv_sint.sint; 325 break; 326 #endif 327 #ifdef CONFIG_KVM_XEN 328 case KVM_IRQ_ROUTING_XEN_EVTCHN: 329 return kvm_xen_setup_evtchn(kvm, e, ue); 330 #endif 331 default: 332 return -EINVAL; 333 } 334 335 return 0; 336 } 337 338 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, 339 struct kvm_vcpu **dest_vcpu) 340 { 341 int r = 0; 342 unsigned long i; 343 struct kvm_vcpu *vcpu; 344 345 if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) 346 return true; 347 348 kvm_for_each_vcpu(i, vcpu, kvm) { 349 if (!kvm_apic_present(vcpu)) 350 continue; 351 352 if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand, 353 irq->dest_id, irq->dest_mode)) 354 continue; 355 356 if (++r == 2) 357 return false; 358 359 *dest_vcpu = vcpu; 360 } 361 362 return r == 1; 363 } 364 EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu); 365 366 #define IOAPIC_ROUTING_ENTRY(irq) \ 367 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ 368 .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } } 369 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) 370 371 #define PIC_ROUTING_ENTRY(irq) \ 372 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ 373 .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } } 374 #define ROUTING_ENTRY2(irq) \ 375 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) 376 377 static const struct kvm_irq_routing_entry default_routing[] = { 378 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1), 379 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3), 380 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5), 381 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7), 382 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9), 383 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11), 384 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13), 385 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15), 386 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17), 387 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), 388 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), 389 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), 390 }; 391 392 int kvm_setup_default_irq_routing(struct kvm *kvm) 393 { 394 return kvm_set_irq_routing(kvm, default_routing, 395 ARRAY_SIZE(default_routing), 0); 396 } 397 398 void kvm_arch_post_irq_routing_update(struct kvm *kvm) 399 { 400 if (!irqchip_split(kvm)) 401 return; 402 kvm_make_scan_ioapic_request(kvm); 403 } 404 405 void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, 406 ulong *ioapic_handled_vectors) 407 { 408 struct kvm *kvm = vcpu->kvm; 409 struct kvm_kernel_irq_routing_entry *entry; 410 struct kvm_irq_routing_table *table; 411 u32 i, nr_ioapic_pins; 412 int idx; 413 414 idx = srcu_read_lock(&kvm->irq_srcu); 415 table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); 416 nr_ioapic_pins = min_t(u32, table->nr_rt_entries, 417 kvm->arch.nr_reserved_ioapic_pins); 418 for (i = 0; i < nr_ioapic_pins; ++i) { 419 hlist_for_each_entry(entry, &table->map[i], link) { 420 struct kvm_lapic_irq irq; 421 422 if (entry->type != KVM_IRQ_ROUTING_MSI) 423 continue; 424 425 kvm_set_msi_irq(vcpu->kvm, entry, &irq); 426 427 if (irq.trig_mode && 428 (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, 429 irq.dest_id, irq.dest_mode) || 430 kvm_apic_pending_eoi(vcpu, irq.vector))) 431 __set_bit(irq.vector, ioapic_handled_vectors); 432 } 433 } 434 srcu_read_unlock(&kvm->irq_srcu, idx); 435 } 436 437 void kvm_arch_irq_routing_update(struct kvm *kvm) 438 { 439 #ifdef CONFIG_KVM_HYPERV 440 kvm_hv_irq_routing_update(kvm); 441 #endif 442 } 443