xref: /linux/arch/powerpc/kvm/book3s_hv_rm_xics.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * Copyright 2012 Michael Ellerman, IBM Corporation.
3  * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 
14 #include <asm/kvm_book3s.h>
15 #include <asm/kvm_ppc.h>
16 #include <asm/hvcall.h>
17 #include <asm/xics.h>
18 #include <asm/debug.h>
19 #include <asm/synch.h>
20 #include <asm/cputhreads.h>
21 #include <asm/ppc-opcode.h>
22 
23 #include "book3s_xics.h"
24 
25 #define DEBUG_PASSUP
26 
27 int h_ipi_redirect = 1;
28 EXPORT_SYMBOL(h_ipi_redirect);
29 
30 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
31 			    u32 new_irq);
32 
33 /* -- ICS routines -- */
34 static void ics_rm_check_resend(struct kvmppc_xics *xics,
35 				struct kvmppc_ics *ics, struct kvmppc_icp *icp)
36 {
37 	int i;
38 
39 	arch_spin_lock(&ics->lock);
40 
41 	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
42 		struct ics_irq_state *state = &ics->irq_state[i];
43 
44 		if (!state->resend)
45 			continue;
46 
47 		arch_spin_unlock(&ics->lock);
48 		icp_rm_deliver_irq(xics, icp, state->number);
49 		arch_spin_lock(&ics->lock);
50 	}
51 
52 	arch_spin_unlock(&ics->lock);
53 }
54 
55 /* -- ICP routines -- */
56 
57 #ifdef CONFIG_SMP
58 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
59 {
60 	int hcpu;
61 
62 	hcpu = hcore << threads_shift;
63 	kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
64 	smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
65 	icp_native_cause_ipi_rm(hcpu);
66 }
67 #else
68 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
69 #endif
70 
71 /*
72  * We start the search from our current CPU Id in the core map
73  * and go in a circle until we get back to our ID looking for a
74  * core that is running in host context and that hasn't already
75  * been targeted for another rm_host_ops.
76  *
77  * In the future, could consider using a fairer algorithm (one
78  * that distributes the IPIs better)
79  *
80  * Returns -1, if no CPU could be found in the host
81  * Else, returns a CPU Id which has been reserved for use
82  */
83 static inline int grab_next_hostcore(int start,
84 		struct kvmppc_host_rm_core *rm_core, int max, int action)
85 {
86 	bool success;
87 	int core;
88 	union kvmppc_rm_state old, new;
89 
90 	for (core = start + 1; core < max; core++)  {
91 		old = new = READ_ONCE(rm_core[core].rm_state);
92 
93 		if (!old.in_host || old.rm_action)
94 			continue;
95 
96 		/* Try to grab this host core if not taken already. */
97 		new.rm_action = action;
98 
99 		success = cmpxchg64(&rm_core[core].rm_state.raw,
100 						old.raw, new.raw) == old.raw;
101 		if (success) {
102 			/*
103 			 * Make sure that the store to the rm_action is made
104 			 * visible before we return to caller (and the
105 			 * subsequent store to rm_data) to synchronize with
106 			 * the IPI handler.
107 			 */
108 			smp_wmb();
109 			return core;
110 		}
111 	}
112 
113 	return -1;
114 }
115 
116 static inline int find_available_hostcore(int action)
117 {
118 	int core;
119 	int my_core = smp_processor_id() >> threads_shift;
120 	struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
121 
122 	core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
123 	if (core == -1)
124 		core = grab_next_hostcore(core, rm_core, my_core, action);
125 
126 	return core;
127 }
128 
129 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
130 				struct kvm_vcpu *this_vcpu)
131 {
132 	struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
133 	int cpu;
134 	int hcore;
135 
136 	/* Mark the target VCPU as having an interrupt pending */
137 	vcpu->stat.queue_intr++;
138 	set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
139 
140 	/* Kick self ? Just set MER and return */
141 	if (vcpu == this_vcpu) {
142 		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
143 		return;
144 	}
145 
146 	/*
147 	 * Check if the core is loaded,
148 	 * if not, find an available host core to post to wake the VCPU,
149 	 * if we can't find one, set up state to eventually return too hard.
150 	 */
151 	cpu = vcpu->arch.thread_cpu;
152 	if (cpu < 0 || cpu >= nr_cpu_ids) {
153 		hcore = -1;
154 		if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
155 			hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
156 		if (hcore != -1) {
157 			icp_send_hcore_msg(hcore, vcpu);
158 		} else {
159 			this_icp->rm_action |= XICS_RM_KICK_VCPU;
160 			this_icp->rm_kick_target = vcpu;
161 		}
162 		return;
163 	}
164 
165 	smp_mb();
166 	kvmhv_rm_send_ipi(cpu);
167 }
168 
169 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
170 {
171 	/* Note: Only called on self ! */
172 	clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
173 		  &vcpu->arch.pending_exceptions);
174 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
175 }
176 
177 static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
178 				     union kvmppc_icp_state old,
179 				     union kvmppc_icp_state new)
180 {
181 	struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
182 	bool success;
183 
184 	/* Calculate new output value */
185 	new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
186 
187 	/* Attempt atomic update */
188 	success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
189 	if (!success)
190 		goto bail;
191 
192 	/*
193 	 * Check for output state update
194 	 *
195 	 * Note that this is racy since another processor could be updating
196 	 * the state already. This is why we never clear the interrupt output
197 	 * here, we only ever set it. The clear only happens prior to doing
198 	 * an update and only by the processor itself. Currently we do it
199 	 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
200 	 *
201 	 * We also do not try to figure out whether the EE state has changed,
202 	 * we unconditionally set it if the new state calls for it. The reason
203 	 * for that is that we opportunistically remove the pending interrupt
204 	 * flag when raising CPPR, so we need to set it back here if an
205 	 * interrupt is still pending.
206 	 */
207 	if (new.out_ee)
208 		icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
209 
210 	/* Expose the state change for debug purposes */
211 	this_vcpu->arch.icp->rm_dbgstate = new;
212 	this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
213 
214  bail:
215 	return success;
216 }
217 
218 static inline int check_too_hard(struct kvmppc_xics *xics,
219 				 struct kvmppc_icp *icp)
220 {
221 	return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
222 }
223 
224 static void icp_rm_check_resend(struct kvmppc_xics *xics,
225 			     struct kvmppc_icp *icp)
226 {
227 	u32 icsid;
228 
229 	/* Order this load with the test for need_resend in the caller */
230 	smp_rmb();
231 	for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
232 		struct kvmppc_ics *ics = xics->ics[icsid];
233 
234 		if (!test_and_clear_bit(icsid, icp->resend_map))
235 			continue;
236 		if (!ics)
237 			continue;
238 		ics_rm_check_resend(xics, ics, icp);
239 	}
240 }
241 
242 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
243 			       u32 *reject)
244 {
245 	union kvmppc_icp_state old_state, new_state;
246 	bool success;
247 
248 	do {
249 		old_state = new_state = READ_ONCE(icp->state);
250 
251 		*reject = 0;
252 
253 		/* See if we can deliver */
254 		success = new_state.cppr > priority &&
255 			new_state.mfrr > priority &&
256 			new_state.pending_pri > priority;
257 
258 		/*
259 		 * If we can, check for a rejection and perform the
260 		 * delivery
261 		 */
262 		if (success) {
263 			*reject = new_state.xisr;
264 			new_state.xisr = irq;
265 			new_state.pending_pri = priority;
266 		} else {
267 			/*
268 			 * If we failed to deliver we set need_resend
269 			 * so a subsequent CPPR state change causes us
270 			 * to try a new delivery.
271 			 */
272 			new_state.need_resend = true;
273 		}
274 
275 	} while (!icp_rm_try_update(icp, old_state, new_state));
276 
277 	return success;
278 }
279 
280 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
281 			    u32 new_irq)
282 {
283 	struct ics_irq_state *state;
284 	struct kvmppc_ics *ics;
285 	u32 reject;
286 	u16 src;
287 
288 	/*
289 	 * This is used both for initial delivery of an interrupt and
290 	 * for subsequent rejection.
291 	 *
292 	 * Rejection can be racy vs. resends. We have evaluated the
293 	 * rejection in an atomic ICP transaction which is now complete,
294 	 * so potentially the ICP can already accept the interrupt again.
295 	 *
296 	 * So we need to retry the delivery. Essentially the reject path
297 	 * boils down to a failed delivery. Always.
298 	 *
299 	 * Now the interrupt could also have moved to a different target,
300 	 * thus we may need to re-do the ICP lookup as well
301 	 */
302 
303  again:
304 	/* Get the ICS state and lock it */
305 	ics = kvmppc_xics_find_ics(xics, new_irq, &src);
306 	if (!ics) {
307 		/* Unsafe increment, but this does not need to be accurate */
308 		xics->err_noics++;
309 		return;
310 	}
311 	state = &ics->irq_state[src];
312 
313 	/* Get a lock on the ICS */
314 	arch_spin_lock(&ics->lock);
315 
316 	/* Get our server */
317 	if (!icp || state->server != icp->server_num) {
318 		icp = kvmppc_xics_find_server(xics->kvm, state->server);
319 		if (!icp) {
320 			/* Unsafe increment again*/
321 			xics->err_noicp++;
322 			goto out;
323 		}
324 	}
325 
326 	/* Clear the resend bit of that interrupt */
327 	state->resend = 0;
328 
329 	/*
330 	 * If masked, bail out
331 	 *
332 	 * Note: PAPR doesn't mention anything about masked pending
333 	 * when doing a resend, only when doing a delivery.
334 	 *
335 	 * However that would have the effect of losing a masked
336 	 * interrupt that was rejected and isn't consistent with
337 	 * the whole masked_pending business which is about not
338 	 * losing interrupts that occur while masked.
339 	 *
340 	 * I don't differentiate normal deliveries and resends, this
341 	 * implementation will differ from PAPR and not lose such
342 	 * interrupts.
343 	 */
344 	if (state->priority == MASKED) {
345 		state->masked_pending = 1;
346 		goto out;
347 	}
348 
349 	/*
350 	 * Try the delivery, this will set the need_resend flag
351 	 * in the ICP as part of the atomic transaction if the
352 	 * delivery is not possible.
353 	 *
354 	 * Note that if successful, the new delivery might have itself
355 	 * rejected an interrupt that was "delivered" before we took the
356 	 * ics spin lock.
357 	 *
358 	 * In this case we do the whole sequence all over again for the
359 	 * new guy. We cannot assume that the rejected interrupt is less
360 	 * favored than the new one, and thus doesn't need to be delivered,
361 	 * because by the time we exit icp_rm_try_to_deliver() the target
362 	 * processor may well have already consumed & completed it, and thus
363 	 * the rejected interrupt might actually be already acceptable.
364 	 */
365 	if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
366 		/*
367 		 * Delivery was successful, did we reject somebody else ?
368 		 */
369 		if (reject && reject != XICS_IPI) {
370 			arch_spin_unlock(&ics->lock);
371 			new_irq = reject;
372 			goto again;
373 		}
374 	} else {
375 		/*
376 		 * We failed to deliver the interrupt we need to set the
377 		 * resend map bit and mark the ICS state as needing a resend
378 		 */
379 		set_bit(ics->icsid, icp->resend_map);
380 		state->resend = 1;
381 
382 		/*
383 		 * If the need_resend flag got cleared in the ICP some time
384 		 * between icp_rm_try_to_deliver() atomic update and now, then
385 		 * we know it might have missed the resend_map bit. So we
386 		 * retry
387 		 */
388 		smp_mb();
389 		if (!icp->state.need_resend) {
390 			arch_spin_unlock(&ics->lock);
391 			goto again;
392 		}
393 	}
394  out:
395 	arch_spin_unlock(&ics->lock);
396 }
397 
398 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
399 			     u8 new_cppr)
400 {
401 	union kvmppc_icp_state old_state, new_state;
402 	bool resend;
403 
404 	/*
405 	 * This handles several related states in one operation:
406 	 *
407 	 * ICP State: Down_CPPR
408 	 *
409 	 * Load CPPR with new value and if the XISR is 0
410 	 * then check for resends:
411 	 *
412 	 * ICP State: Resend
413 	 *
414 	 * If MFRR is more favored than CPPR, check for IPIs
415 	 * and notify ICS of a potential resend. This is done
416 	 * asynchronously (when used in real mode, we will have
417 	 * to exit here).
418 	 *
419 	 * We do not handle the complete Check_IPI as documented
420 	 * here. In the PAPR, this state will be used for both
421 	 * Set_MFRR and Down_CPPR. However, we know that we aren't
422 	 * changing the MFRR state here so we don't need to handle
423 	 * the case of an MFRR causing a reject of a pending irq,
424 	 * this will have been handled when the MFRR was set in the
425 	 * first place.
426 	 *
427 	 * Thus we don't have to handle rejects, only resends.
428 	 *
429 	 * When implementing real mode for HV KVM, resend will lead to
430 	 * a H_TOO_HARD return and the whole transaction will be handled
431 	 * in virtual mode.
432 	 */
433 	do {
434 		old_state = new_state = READ_ONCE(icp->state);
435 
436 		/* Down_CPPR */
437 		new_state.cppr = new_cppr;
438 
439 		/*
440 		 * Cut down Resend / Check_IPI / IPI
441 		 *
442 		 * The logic is that we cannot have a pending interrupt
443 		 * trumped by an IPI at this point (see above), so we
444 		 * know that either the pending interrupt is already an
445 		 * IPI (in which case we don't care to override it) or
446 		 * it's either more favored than us or non existent
447 		 */
448 		if (new_state.mfrr < new_cppr &&
449 		    new_state.mfrr <= new_state.pending_pri) {
450 			new_state.pending_pri = new_state.mfrr;
451 			new_state.xisr = XICS_IPI;
452 		}
453 
454 		/* Latch/clear resend bit */
455 		resend = new_state.need_resend;
456 		new_state.need_resend = 0;
457 
458 	} while (!icp_rm_try_update(icp, old_state, new_state));
459 
460 	/*
461 	 * Now handle resend checks. Those are asynchronous to the ICP
462 	 * state update in HW (ie bus transactions) so we can handle them
463 	 * separately here as well.
464 	 */
465 	if (resend) {
466 		icp->n_check_resend++;
467 		icp_rm_check_resend(xics, icp);
468 	}
469 }
470 
471 
472 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
473 {
474 	union kvmppc_icp_state old_state, new_state;
475 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
476 	struct kvmppc_icp *icp = vcpu->arch.icp;
477 	u32 xirr;
478 
479 	if (!xics || !xics->real_mode)
480 		return H_TOO_HARD;
481 
482 	/* First clear the interrupt */
483 	icp_rm_clr_vcpu_irq(icp->vcpu);
484 
485 	/*
486 	 * ICP State: Accept_Interrupt
487 	 *
488 	 * Return the pending interrupt (if any) along with the
489 	 * current CPPR, then clear the XISR & set CPPR to the
490 	 * pending priority
491 	 */
492 	do {
493 		old_state = new_state = READ_ONCE(icp->state);
494 
495 		xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
496 		if (!old_state.xisr)
497 			break;
498 		new_state.cppr = new_state.pending_pri;
499 		new_state.pending_pri = 0xff;
500 		new_state.xisr = 0;
501 
502 	} while (!icp_rm_try_update(icp, old_state, new_state));
503 
504 	/* Return the result in GPR4 */
505 	vcpu->arch.gpr[4] = xirr;
506 
507 	return check_too_hard(xics, icp);
508 }
509 
510 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
511 		    unsigned long mfrr)
512 {
513 	union kvmppc_icp_state old_state, new_state;
514 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
515 	struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
516 	u32 reject;
517 	bool resend;
518 	bool local;
519 
520 	if (!xics || !xics->real_mode)
521 		return H_TOO_HARD;
522 
523 	local = this_icp->server_num == server;
524 	if (local)
525 		icp = this_icp;
526 	else
527 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
528 	if (!icp)
529 		return H_PARAMETER;
530 
531 	/*
532 	 * ICP state: Set_MFRR
533 	 *
534 	 * If the CPPR is more favored than the new MFRR, then
535 	 * nothing needs to be done as there can be no XISR to
536 	 * reject.
537 	 *
538 	 * ICP state: Check_IPI
539 	 *
540 	 * If the CPPR is less favored, then we might be replacing
541 	 * an interrupt, and thus need to possibly reject it.
542 	 *
543 	 * ICP State: IPI
544 	 *
545 	 * Besides rejecting any pending interrupts, we also
546 	 * update XISR and pending_pri to mark IPI as pending.
547 	 *
548 	 * PAPR does not describe this state, but if the MFRR is being
549 	 * made less favored than its earlier value, there might be
550 	 * a previously-rejected interrupt needing to be resent.
551 	 * Ideally, we would want to resend only if
552 	 *	prio(pending_interrupt) < mfrr &&
553 	 *	prio(pending_interrupt) < cppr
554 	 * where pending interrupt is the one that was rejected. But
555 	 * we don't have that state, so we simply trigger a resend
556 	 * whenever the MFRR is made less favored.
557 	 */
558 	do {
559 		old_state = new_state = READ_ONCE(icp->state);
560 
561 		/* Set_MFRR */
562 		new_state.mfrr = mfrr;
563 
564 		/* Check_IPI */
565 		reject = 0;
566 		resend = false;
567 		if (mfrr < new_state.cppr) {
568 			/* Reject a pending interrupt if not an IPI */
569 			if (mfrr <= new_state.pending_pri) {
570 				reject = new_state.xisr;
571 				new_state.pending_pri = mfrr;
572 				new_state.xisr = XICS_IPI;
573 			}
574 		}
575 
576 		if (mfrr > old_state.mfrr) {
577 			resend = new_state.need_resend;
578 			new_state.need_resend = 0;
579 		}
580 	} while (!icp_rm_try_update(icp, old_state, new_state));
581 
582 	/* Handle reject in real mode */
583 	if (reject && reject != XICS_IPI) {
584 		this_icp->n_reject++;
585 		icp_rm_deliver_irq(xics, icp, reject);
586 	}
587 
588 	/* Handle resends in real mode */
589 	if (resend) {
590 		this_icp->n_check_resend++;
591 		icp_rm_check_resend(xics, icp);
592 	}
593 
594 	return check_too_hard(xics, this_icp);
595 }
596 
597 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
598 {
599 	union kvmppc_icp_state old_state, new_state;
600 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
601 	struct kvmppc_icp *icp = vcpu->arch.icp;
602 	u32 reject;
603 
604 	if (!xics || !xics->real_mode)
605 		return H_TOO_HARD;
606 
607 	/*
608 	 * ICP State: Set_CPPR
609 	 *
610 	 * We can safely compare the new value with the current
611 	 * value outside of the transaction as the CPPR is only
612 	 * ever changed by the processor on itself
613 	 */
614 	if (cppr > icp->state.cppr) {
615 		icp_rm_down_cppr(xics, icp, cppr);
616 		goto bail;
617 	} else if (cppr == icp->state.cppr)
618 		return H_SUCCESS;
619 
620 	/*
621 	 * ICP State: Up_CPPR
622 	 *
623 	 * The processor is raising its priority, this can result
624 	 * in a rejection of a pending interrupt:
625 	 *
626 	 * ICP State: Reject_Current
627 	 *
628 	 * We can remove EE from the current processor, the update
629 	 * transaction will set it again if needed
630 	 */
631 	icp_rm_clr_vcpu_irq(icp->vcpu);
632 
633 	do {
634 		old_state = new_state = READ_ONCE(icp->state);
635 
636 		reject = 0;
637 		new_state.cppr = cppr;
638 
639 		if (cppr <= new_state.pending_pri) {
640 			reject = new_state.xisr;
641 			new_state.xisr = 0;
642 			new_state.pending_pri = 0xff;
643 		}
644 
645 	} while (!icp_rm_try_update(icp, old_state, new_state));
646 
647 	/*
648 	 * Check for rejects. They are handled by doing a new delivery
649 	 * attempt (see comments in icp_rm_deliver_irq).
650 	 */
651 	if (reject && reject != XICS_IPI) {
652 		icp->n_reject++;
653 		icp_rm_deliver_irq(xics, icp, reject);
654 	}
655  bail:
656 	return check_too_hard(xics, icp);
657 }
658 
659 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
660 {
661 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
662 	struct kvmppc_icp *icp = vcpu->arch.icp;
663 	struct kvmppc_ics *ics;
664 	struct ics_irq_state *state;
665 	u32 irq = xirr & 0x00ffffff;
666 	u16 src;
667 
668 	if (!xics || !xics->real_mode)
669 		return H_TOO_HARD;
670 
671 	/*
672 	 * ICP State: EOI
673 	 *
674 	 * Note: If EOI is incorrectly used by SW to lower the CPPR
675 	 * value (ie more favored), we do not check for rejection of
676 	 * a pending interrupt, this is a SW error and PAPR sepcifies
677 	 * that we don't have to deal with it.
678 	 *
679 	 * The sending of an EOI to the ICS is handled after the
680 	 * CPPR update
681 	 *
682 	 * ICP State: Down_CPPR which we handle
683 	 * in a separate function as it's shared with H_CPPR.
684 	 */
685 	icp_rm_down_cppr(xics, icp, xirr >> 24);
686 
687 	/* IPIs have no EOI */
688 	if (irq == XICS_IPI)
689 		goto bail;
690 	/*
691 	 * EOI handling: If the interrupt is still asserted, we need to
692 	 * resend it. We can take a lockless "peek" at the ICS state here.
693 	 *
694 	 * "Message" interrupts will never have "asserted" set
695 	 */
696 	ics = kvmppc_xics_find_ics(xics, irq, &src);
697 	if (!ics)
698 		goto bail;
699 	state = &ics->irq_state[src];
700 
701 	/* Still asserted, resend it */
702 	if (state->asserted) {
703 		icp->n_reject++;
704 		icp_rm_deliver_irq(xics, icp, irq);
705 	}
706 
707 	if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
708 		icp->rm_action |= XICS_RM_NOTIFY_EOI;
709 		icp->rm_eoied_irq = irq;
710 	}
711  bail:
712 	return check_too_hard(xics, icp);
713 }
714 
715 /*  --- Non-real mode XICS-related built-in routines ---  */
716 
717 /**
718  * Host Operations poked by RM KVM
719  */
720 static void rm_host_ipi_action(int action, void *data)
721 {
722 	switch (action) {
723 	case XICS_RM_KICK_VCPU:
724 		kvmppc_host_rm_ops_hv->vcpu_kick(data);
725 		break;
726 	default:
727 		WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
728 		break;
729 	}
730 
731 }
732 
733 void kvmppc_xics_ipi_action(void)
734 {
735 	int core;
736 	unsigned int cpu = smp_processor_id();
737 	struct kvmppc_host_rm_core *rm_corep;
738 
739 	core = cpu >> threads_shift;
740 	rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
741 
742 	if (rm_corep->rm_data) {
743 		rm_host_ipi_action(rm_corep->rm_state.rm_action,
744 							rm_corep->rm_data);
745 		/* Order these stores against the real mode KVM */
746 		rm_corep->rm_data = NULL;
747 		smp_wmb();
748 		rm_corep->rm_state.rm_action = 0;
749 	}
750 }
751