Lines Matching +full:lock +full:- +full:less

1 // SPDX-License-Identifier: GPL-2.0-only
39 * Each ICS has a spin lock protecting the information about the IRQ
50 * - To speed up resends, keep a bitmap of "resend" set bits in the
53 * - Speed up server# -> ICP lookup (array ? hash table ?)
55 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
59 /* -- ICS routines -- */
81 return -EINVAL; in ics_deliver_irq()
83 state = &ics->irq_state[src]; in ics_deliver_irq()
84 if (!state->exists) in ics_deliver_irq()
85 return -EINVAL; in ics_deliver_irq()
96 if (!state->lsi && level == 0) /* noop for MSI */ in ics_deliver_irq()
100 pq_old = state->pq_state; in ics_deliver_irq()
101 if (state->lsi) { in ics_deliver_irq()
112 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); in ics_deliver_irq()
118 /* Record which CPU this arrived on for passed-through interrupts */ in ics_deliver_irq()
119 if (state->host_irq) in ics_deliver_irq()
120 state->intr_cpu = raw_smp_processor_id(); in ics_deliver_irq()
131 struct ics_irq_state *state = &ics->irq_state[i]; in ics_check_resend()
132 if (state->resend) { in ics_check_resend()
133 XICS_DBG("resend %#x prio %#x\n", state->number, in ics_check_resend()
134 state->priority); in ics_check_resend()
135 icp_deliver_irq(xics, icp, state->number, true); in ics_check_resend()
148 arch_spin_lock(&ics->lock); in write_xive()
150 state->server = server; in write_xive()
151 state->priority = priority; in write_xive()
152 state->saved_priority = saved_priority; in write_xive()
154 if ((state->masked_pending || state->resend) && priority != MASKED) { in write_xive()
155 state->masked_pending = 0; in write_xive()
156 state->resend = 0; in write_xive()
160 arch_spin_unlock(&ics->lock); in write_xive()
168 struct kvmppc_xics *xics = kvm->arch.xics; in kvmppc_xics_set_xive()
175 return -ENODEV; in kvmppc_xics_set_xive()
179 return -EINVAL; in kvmppc_xics_set_xive()
180 state = &ics->irq_state[src]; in kvmppc_xics_set_xive()
184 return -EINVAL; in kvmppc_xics_set_xive()
188 state->masked_pending, state->resend); in kvmppc_xics_set_xive()
198 struct kvmppc_xics *xics = kvm->arch.xics; in kvmppc_xics_get_xive()
205 return -ENODEV; in kvmppc_xics_get_xive()
209 return -EINVAL; in kvmppc_xics_get_xive()
210 state = &ics->irq_state[src]; in kvmppc_xics_get_xive()
213 arch_spin_lock(&ics->lock); in kvmppc_xics_get_xive()
214 *server = state->server; in kvmppc_xics_get_xive()
215 *priority = state->priority; in kvmppc_xics_get_xive()
216 arch_spin_unlock(&ics->lock); in kvmppc_xics_get_xive()
224 struct kvmppc_xics *xics = kvm->arch.xics; in kvmppc_xics_int_on()
231 return -ENODEV; in kvmppc_xics_int_on()
235 return -EINVAL; in kvmppc_xics_int_on()
236 state = &ics->irq_state[src]; in kvmppc_xics_int_on()
238 icp = kvmppc_xics_find_server(kvm, state->server); in kvmppc_xics_int_on()
240 return -EINVAL; in kvmppc_xics_int_on()
242 if (write_xive(xics, ics, state, state->server, state->saved_priority, in kvmppc_xics_int_on()
243 state->saved_priority)) in kvmppc_xics_int_on()
251 struct kvmppc_xics *xics = kvm->arch.xics; in kvmppc_xics_int_off()
257 return -ENODEV; in kvmppc_xics_int_off()
261 return -EINVAL; in kvmppc_xics_int_off()
262 state = &ics->irq_state[src]; in kvmppc_xics_int_off()
264 write_xive(xics, ics, state, state->server, MASKED, state->priority); in kvmppc_xics_int_off()
269 /* -- ICP routines, including hcalls -- */
282 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; in icp_try_update()
286 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", in icp_try_update()
287 icp->server_num, in icp_try_update()
290 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", in icp_try_update()
309 kvmppc_book3s_queue_irqprio(icp->vcpu, in icp_try_update()
312 kvmppc_fast_vcpu_kick(icp->vcpu); in icp_try_update()
325 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { in icp_check_resend()
326 struct kvmppc_ics *ics = xics->ics[icsid]; in icp_check_resend()
328 if (!test_and_clear_bit(icsid, icp->resend_map)) in icp_check_resend()
343 icp->server_num); in icp_try_to_deliver()
346 old_state = new_state = READ_ONCE(icp->state); in icp_try_to_deliver()
398 * thus we may need to re-do the ICP lookup as well in icp_deliver_irq()
402 /* Get the ICS state and lock it */ in icp_deliver_irq()
408 state = &ics->irq_state[src]; in icp_deliver_irq()
410 /* Get a lock on the ICS */ in icp_deliver_irq()
412 arch_spin_lock(&ics->lock); in icp_deliver_irq()
415 if (!icp || state->server != icp->server_num) { in icp_deliver_irq()
416 icp = kvmppc_xics_find_server(xics->kvm, state->server); in icp_deliver_irq()
419 new_irq, state->server); in icp_deliver_irq()
425 if (!state->resend) in icp_deliver_irq()
429 state->resend = 0; in icp_deliver_irq()
446 if (state->priority == MASKED) { in icp_deliver_irq()
448 state->masked_pending = 1; in icp_deliver_irq()
459 * ics spin lock. in icp_deliver_irq()
462 * new guy. We cannot assume that the rejected interrupt is less in icp_deliver_irq()
468 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { in icp_deliver_irq()
473 arch_spin_unlock(&ics->lock); in icp_deliver_irq()
484 state->resend = 1; in icp_deliver_irq()
491 set_bit(ics->icsid, icp->resend_map); in icp_deliver_irq()
500 if (!icp->state.need_resend) { in icp_deliver_irq()
501 state->resend = 0; in icp_deliver_irq()
502 arch_spin_unlock(&ics->lock); in icp_deliver_irq()
509 arch_spin_unlock(&ics->lock); in icp_deliver_irq()
549 old_state = new_state = READ_ONCE(icp->state); in icp_down_cppr()
589 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_h_xirr()
593 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); in kvmppc_h_xirr()
603 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_xirr()
614 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr); in kvmppc_h_xirr()
623 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_h_ipi()
630 vcpu->vcpu_id, server, mfrr); in kvmppc_h_ipi()
632 icp = vcpu->arch.icp; in kvmppc_h_ipi()
633 local = icp->server_num == server; in kvmppc_h_ipi()
635 icp = kvmppc_xics_find_server(vcpu->kvm, server); in kvmppc_h_ipi()
645 * reject. If the MFRR is being made less favored then in kvmppc_h_ipi()
646 * there might be a previously-rejected interrupt needing in kvmppc_h_ipi()
651 * If the CPPR is less favored, then we might be replacing in kvmppc_h_ipi()
660 * made less favored than its earlier value, there might be in kvmppc_h_ipi()
661 * a previously-rejected interrupt needing to be resent. in kvmppc_h_ipi()
667 * whenever the MFRR is made less favored. in kvmppc_h_ipi()
670 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_ipi()
709 icp = vcpu->arch.icp; in kvmppc_h_ipoll()
710 if (icp->server_num != server) { in kvmppc_h_ipoll()
711 icp = kvmppc_xics_find_server(vcpu->kvm, server); in kvmppc_h_ipoll()
715 state = READ_ONCE(icp->state); in kvmppc_h_ipoll()
724 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_h_cppr()
725 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_h_cppr()
728 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr); in kvmppc_h_cppr()
737 if (cppr > icp->state.cppr) in kvmppc_h_cppr()
739 else if (cppr == icp->state.cppr) in kvmppc_h_cppr()
753 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); in kvmppc_h_cppr()
756 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_cppr()
779 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in ics_eoi()
780 struct kvmppc_icp *icp = vcpu->arch.icp; in ics_eoi()
799 state = &ics->irq_state[src]; in ics_eoi()
801 if (state->lsi) in ics_eoi()
802 pq_new = state->pq_state; in ics_eoi()
805 pq_old = state->pq_state; in ics_eoi()
807 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); in ics_eoi()
812 kvm_notify_acked_irq(vcpu->kvm, 0, irq); in ics_eoi()
819 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_h_eoi()
820 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_h_eoi()
823 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr); in kvmppc_h_eoi()
850 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_xics_rm_complete()
851 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_xics_rm_complete()
854 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt); in kvmppc_xics_rm_complete()
856 if (icp->rm_action & XICS_RM_KICK_VCPU) { in kvmppc_xics_rm_complete()
857 icp->n_rm_kick_vcpu++; in kvmppc_xics_rm_complete()
858 kvmppc_fast_vcpu_kick(icp->rm_kick_target); in kvmppc_xics_rm_complete()
860 if (icp->rm_action & XICS_RM_CHECK_RESEND) { in kvmppc_xics_rm_complete()
861 icp->n_rm_check_resend++; in kvmppc_xics_rm_complete()
862 icp_check_resend(xics, icp->rm_resend_icp); in kvmppc_xics_rm_complete()
864 if (icp->rm_action & XICS_RM_NOTIFY_EOI) { in kvmppc_xics_rm_complete()
865 icp->n_rm_notify_eoi++; in kvmppc_xics_rm_complete()
866 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq); in kvmppc_xics_rm_complete()
869 icp->rm_action = 0; in kvmppc_xics_rm_complete()
877 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_xics_hcall()
882 if (!xics || !vcpu->arch.icp) in kvmppc_xics_hcall()
885 /* These requests don't have real-mode implementations at present */ in kvmppc_xics_hcall()
898 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_xics_hcall()
923 /* -- Initialisation code etc. -- */
933 pimap->n_mapped); in xics_debugfs_irqmap()
934 for (i = 0; i < pimap->n_mapped; i++) { in xics_debugfs_irqmap()
936 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq); in xics_debugfs_irqmap()
942 struct kvmppc_xics *xics = m->private; in xics_debug_show()
943 struct kvm *kvm = xics->kvm; in xics_debug_show()
960 xics_debugfs_irqmap(m, kvm->arch.pimap); in xics_debug_show()
965 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_debug_show()
971 state.raw = READ_ONCE(icp->state.raw); in xics_debug_show()
973 icp->server_num, state.xisr, in xics_debug_show()
976 t_rm_kick_vcpu += icp->n_rm_kick_vcpu; in xics_debug_show()
977 t_rm_notify_eoi += icp->n_rm_notify_eoi; in xics_debug_show()
978 t_rm_check_resend += icp->n_rm_check_resend; in xics_debug_show()
979 t_check_resend += icp->n_check_resend; in xics_debug_show()
980 t_reject += icp->n_reject; in xics_debug_show()
983 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n", in xics_debug_show()
989 struct kvmppc_ics *ics = xics->ics[icsid]; in xics_debug_show()
998 arch_spin_lock(&ics->lock); in xics_debug_show()
1001 struct ics_irq_state *irq = &ics->irq_state[i]; in xics_debug_show()
1004 irq->number, irq->server, irq->priority, in xics_debug_show()
1005 irq->saved_priority, irq->pq_state, in xics_debug_show()
1006 irq->resend, irq->masked_pending); in xics_debug_show()
1009 arch_spin_unlock(&ics->lock); in xics_debug_show()
1019 xics->dentry = debugfs_create_file("xics", 0444, xics->kvm->debugfs_dentry, in xics_debugfs_init()
1033 mutex_lock(&kvm->lock); in kvmppc_xics_create_ics()
1035 /* ICS already exists - somebody else got here first */ in kvmppc_xics_create_ics()
1036 if (xics->ics[icsid]) in kvmppc_xics_create_ics()
1044 ics->icsid = icsid; in kvmppc_xics_create_ics()
1047 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i; in kvmppc_xics_create_ics()
1048 ics->irq_state[i].priority = MASKED; in kvmppc_xics_create_ics()
1049 ics->irq_state[i].saved_priority = MASKED; in kvmppc_xics_create_ics()
1052 xics->ics[icsid] = ics; in kvmppc_xics_create_ics()
1054 if (icsid > xics->max_icsid) in kvmppc_xics_create_ics()
1055 xics->max_icsid = icsid; in kvmppc_xics_create_ics()
1058 mutex_unlock(&kvm->lock); in kvmppc_xics_create_ics()
1059 return xics->ics[icsid]; in kvmppc_xics_create_ics()
1066 if (!vcpu->kvm->arch.xics) in kvmppc_xics_create_icp()
1067 return -ENODEV; in kvmppc_xics_create_icp()
1069 if (kvmppc_xics_find_server(vcpu->kvm, server_num)) in kvmppc_xics_create_icp()
1070 return -EEXIST; in kvmppc_xics_create_icp()
1074 return -ENOMEM; in kvmppc_xics_create_icp()
1076 icp->vcpu = vcpu; in kvmppc_xics_create_icp()
1077 icp->server_num = server_num; in kvmppc_xics_create_icp()
1078 icp->state.mfrr = MASKED; in kvmppc_xics_create_icp()
1079 icp->state.pending_pri = MASKED; in kvmppc_xics_create_icp()
1080 vcpu->arch.icp = icp; in kvmppc_xics_create_icp()
1082 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id); in kvmppc_xics_create_icp()
1089 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_xics_get_icp()
1094 state = icp->state; in kvmppc_xics_get_icp()
1103 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_xics_set_icp()
1104 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_xics_set_icp()
1113 return -ENOENT; in kvmppc_xics_set_icp()
1124 return -EINVAL; in kvmppc_xics_set_icp()
1127 return -EINVAL; in kvmppc_xics_set_icp()
1130 return -EINVAL; in kvmppc_xics_set_icp()
1133 return -EINVAL; in kvmppc_xics_set_icp()
1146 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); in kvmppc_xics_set_icp()
1153 * matter). We do handle resends due to CPPR becoming less in kvmppc_xics_set_icp()
1159 old_state = READ_ONCE(icp->state); in kvmppc_xics_set_icp()
1188 return -ENOENT; in xics_get_source()
1190 irqp = &ics->irq_state[idx]; in xics_get_source()
1192 arch_spin_lock(&ics->lock); in xics_get_source()
1193 ret = -ENOENT; in xics_get_source()
1194 if (irqp->exists) { in xics_get_source()
1195 val = irqp->server; in xics_get_source()
1196 prio = irqp->priority; in xics_get_source()
1199 prio = irqp->saved_priority; in xics_get_source()
1202 if (irqp->lsi) { in xics_get_source()
1204 if (irqp->pq_state & PQ_PRESENTED) in xics_get_source()
1206 } else if (irqp->masked_pending || irqp->resend) in xics_get_source()
1209 if (irqp->pq_state & PQ_PRESENTED) in xics_get_source()
1212 if (irqp->pq_state & PQ_QUEUED) in xics_get_source()
1217 arch_spin_unlock(&ics->lock); in xics_get_source()
1221 ret = -EFAULT; in xics_get_source()
1238 return -ENOENT; in xics_set_source()
1242 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq); in xics_set_source()
1244 return -ENOMEM; in xics_set_source()
1246 irqp = &ics->irq_state[idx]; in xics_set_source()
1248 return -EFAULT; in xics_set_source()
1253 kvmppc_xics_find_server(xics->kvm, server) == NULL) in xics_set_source()
1254 return -EINVAL; in xics_set_source()
1257 arch_spin_lock(&ics->lock); in xics_set_source()
1258 irqp->server = server; in xics_set_source()
1259 irqp->saved_priority = prio; in xics_set_source()
1262 irqp->priority = prio; in xics_set_source()
1263 irqp->resend = 0; in xics_set_source()
1264 irqp->masked_pending = 0; in xics_set_source()
1265 irqp->lsi = 0; in xics_set_source()
1266 irqp->pq_state = 0; in xics_set_source()
1268 irqp->lsi = 1; in xics_set_source()
1271 irqp->pq_state |= PQ_PRESENTED; in xics_set_source()
1273 irqp->pq_state |= PQ_QUEUED; in xics_set_source()
1274 irqp->exists = 1; in xics_set_source()
1275 arch_spin_unlock(&ics->lock); in xics_set_source()
1279 icp_deliver_irq(xics, NULL, irqp->number, false); in xics_set_source()
1287 struct kvmppc_xics *xics = kvm->arch.xics; in kvmppc_xics_set_irq()
1290 return -ENODEV; in kvmppc_xics_set_irq()
1296 struct kvmppc_xics *xics = dev->private; in xics_set_attr()
1298 switch (attr->group) { in xics_set_attr()
1300 return xics_set_source(xics, attr->attr, attr->addr); in xics_set_attr()
1302 return -ENXIO; in xics_set_attr()
1307 struct kvmppc_xics *xics = dev->private; in xics_get_attr()
1309 switch (attr->group) { in xics_get_attr()
1311 return xics_get_source(xics, attr->attr, attr->addr); in xics_get_attr()
1313 return -ENXIO; in xics_get_attr()
1318 switch (attr->group) { in xics_has_attr()
1320 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && in xics_has_attr()
1321 attr->attr < KVMPPC_XICS_NR_IRQS) in xics_has_attr()
1325 return -ENXIO; in xics_has_attr()
1329 * Called when device fd is closed. kvm->lock is held.
1333 struct kvmppc_xics *xics = dev->private; in kvmppc_xics_release()
1335 struct kvm *kvm = xics->kvm; in kvmppc_xics_release()
1349 debugfs_remove(xics->dentry); in kvmppc_xics_release()
1356 * Take vcpu->mutex to ensure that no one_reg get/set ioctl in kvmppc_xics_release()
1358 * Holding the vcpu->mutex also means that execution is in kvmppc_xics_release()
1360 * can execute again, vcpu->arch.icp and vcpu->arch.irq_type in kvmppc_xics_release()
1364 mutex_lock(&vcpu->mutex); in kvmppc_xics_release()
1366 mutex_unlock(&vcpu->mutex); in kvmppc_xics_release()
1370 kvm->arch.xics = NULL; in kvmppc_xics_release()
1372 for (i = 0; i <= xics->max_icsid; i++) { in kvmppc_xics_release()
1373 kfree(xics->ics[i]); in kvmppc_xics_release()
1374 xics->ics[i] = NULL; in kvmppc_xics_release()
1387 struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device; in kvmppc_xics_get_device()
1403 struct kvm *kvm = dev->kvm; in kvmppc_xics_create()
1408 if (kvm->arch.xics) in kvmppc_xics_create()
1409 return -EEXIST; in kvmppc_xics_create()
1413 return -ENOMEM; in kvmppc_xics_create()
1415 dev->private = xics; in kvmppc_xics_create()
1416 xics->dev = dev; in kvmppc_xics_create()
1417 xics->kvm = kvm; in kvmppc_xics_create()
1418 kvm->arch.xics = xics; in kvmppc_xics_create()
1424 xics->real_mode = ENABLE_REALMODE; in kvmppc_xics_create()
1425 xics->real_mode_dbg = DEBUG_REALMODE; in kvmppc_xics_create()
1434 struct kvmppc_xics *xics = dev->private; in kvmppc_xics_init()
1440 .name = "kvm-xics",
1452 struct kvmppc_xics *xics = dev->private; in kvmppc_xics_connect_vcpu()
1453 int r = -EBUSY; in kvmppc_xics_connect_vcpu()
1455 if (dev->ops != &kvm_xics_ops) in kvmppc_xics_connect_vcpu()
1456 return -EPERM; in kvmppc_xics_connect_vcpu()
1457 if (xics->kvm != vcpu->kvm) in kvmppc_xics_connect_vcpu()
1458 return -EPERM; in kvmppc_xics_connect_vcpu()
1459 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) in kvmppc_xics_connect_vcpu()
1460 return -EBUSY; in kvmppc_xics_connect_vcpu()
1464 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; in kvmppc_xics_connect_vcpu()
1471 if (!vcpu->arch.icp) in kvmppc_xics_free_icp()
1473 kfree(vcpu->arch.icp); in kvmppc_xics_free_icp()
1474 vcpu->arch.icp = NULL; in kvmppc_xics_free_icp()
1475 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; in kvmppc_xics_free_icp()
1481 struct kvmppc_xics *xics = kvm->arch.xics; in kvmppc_xics_set_mapped()
1489 ics->irq_state[idx].host_irq = host_irq; in kvmppc_xics_set_mapped()
1490 ics->irq_state[idx].intr_cpu = -1; in kvmppc_xics_set_mapped()
1497 struct kvmppc_xics *xics = kvm->arch.xics; in kvmppc_xics_clr_mapped()
1505 ics->irq_state[idx].host_irq = 0; in kvmppc_xics_clr_mapped()