xref: /titanic_51/usr/src/uts/i86xpv/os/evtchn.c (revision 0bc46f0d82f5e2ab983b9daff3aa7c9abb447ff2)
1843e1988Sjohnlev /*
2843e1988Sjohnlev  * CDDL HEADER START
3843e1988Sjohnlev  *
4843e1988Sjohnlev  * The contents of this file are subject to the terms of the
5843e1988Sjohnlev  * Common Development and Distribution License (the "License").
6843e1988Sjohnlev  * You may not use this file except in compliance with the License.
7843e1988Sjohnlev  *
8843e1988Sjohnlev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9843e1988Sjohnlev  * or http://www.opensolaris.org/os/licensing.
10843e1988Sjohnlev  * See the License for the specific language governing permissions
11843e1988Sjohnlev  * and limitations under the License.
12843e1988Sjohnlev  *
13843e1988Sjohnlev  * When distributing Covered Code, include this CDDL HEADER in each
14843e1988Sjohnlev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15843e1988Sjohnlev  * If applicable, add the following below this CDDL HEADER, with the
16843e1988Sjohnlev  * fields enclosed by brackets "[]" replaced with your own identifying
17843e1988Sjohnlev  * information: Portions Copyright [yyyy] [name of copyright owner]
18843e1988Sjohnlev  *
19843e1988Sjohnlev  * CDDL HEADER END
20843e1988Sjohnlev  */
21843e1988Sjohnlev 
22843e1988Sjohnlev /*
23349b53ddSStuart Maybee  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24843e1988Sjohnlev  * Use is subject to license terms.
25843e1988Sjohnlev  */
26843e1988Sjohnlev 
27843e1988Sjohnlev /*
28843e1988Sjohnlev  * evtchn.c
29843e1988Sjohnlev  *
30843e1988Sjohnlev  * Communication via hypervisor event channels.
31843e1988Sjohnlev  *
32843e1988Sjohnlev  * Copyright (c) 2002-2005, K A Fraser
33843e1988Sjohnlev  *
34843e1988Sjohnlev  * This file may be distributed separately from the Linux kernel, or
35843e1988Sjohnlev  * incorporated into other software packages, subject to the following license:
36843e1988Sjohnlev  *
37843e1988Sjohnlev  * Permission is hereby granted, free of charge, to any person obtaining a copy
38843e1988Sjohnlev  * of this source file (the "Software"), to deal in the Software without
39843e1988Sjohnlev  * restriction, including without limitation the rights to use, copy, modify,
40843e1988Sjohnlev  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
41843e1988Sjohnlev  * and to permit persons to whom the Software is furnished to do so, subject to
42843e1988Sjohnlev  * the following conditions:
43843e1988Sjohnlev  *
44843e1988Sjohnlev  * The above copyright notice and this permission notice shall be included in
45843e1988Sjohnlev  * all copies or substantial portions of the Software.
46843e1988Sjohnlev  *
47843e1988Sjohnlev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
48843e1988Sjohnlev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
49843e1988Sjohnlev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
50843e1988Sjohnlev  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
51843e1988Sjohnlev  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
52843e1988Sjohnlev  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
53843e1988Sjohnlev  * IN THE SOFTWARE.
54843e1988Sjohnlev  */
55843e1988Sjohnlev 
56843e1988Sjohnlev /* some parts derived from netbsd's hypervisor_machdep.c 1.2.2.2 */
57843e1988Sjohnlev 
58843e1988Sjohnlev /*
59843e1988Sjohnlev  *
60843e1988Sjohnlev  * Copyright (c) 2004 Christian Limpach.
61843e1988Sjohnlev  * All rights reserved.
62843e1988Sjohnlev  *
63843e1988Sjohnlev  * Redistribution and use in source and binary forms, with or without
64843e1988Sjohnlev  * modification, are permitted provided that the following conditions
65843e1988Sjohnlev  * are met:
66843e1988Sjohnlev  * 1. Redistributions of source code must retain the above copyright
67843e1988Sjohnlev  *    notice, this list of conditions and the following disclaimer.
68843e1988Sjohnlev  * 2. Redistributions in binary form must reproduce the above copyright
69843e1988Sjohnlev  *    notice, this list of conditions and the following disclaimer in the
70843e1988Sjohnlev  *    documentation and/or other materials provided with the distribution.
71843e1988Sjohnlev  * 3. This section intentionally left blank.
72843e1988Sjohnlev  * 4. The name of the author may not be used to endorse or promote products
73843e1988Sjohnlev  *    derived from this software without specific prior written permission.
74843e1988Sjohnlev  *
75843e1988Sjohnlev  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
76843e1988Sjohnlev  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
77843e1988Sjohnlev  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
78843e1988Sjohnlev  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
79843e1988Sjohnlev  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
80843e1988Sjohnlev  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
81843e1988Sjohnlev  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
82843e1988Sjohnlev  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
83843e1988Sjohnlev  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
84843e1988Sjohnlev  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
85843e1988Sjohnlev  */
86843e1988Sjohnlev /*
87843e1988Sjohnlev  * Section 3 of the above license was updated in response to bug 6379571.
88843e1988Sjohnlev  */
89843e1988Sjohnlev 
90843e1988Sjohnlev #include <sys/types.h>
91843e1988Sjohnlev #include <sys/hypervisor.h>
92843e1988Sjohnlev #include <sys/machsystm.h>
93843e1988Sjohnlev #include <sys/mutex.h>
94843e1988Sjohnlev #include <sys/evtchn_impl.h>
95843e1988Sjohnlev #include <sys/ddi_impldefs.h>
96843e1988Sjohnlev #include <sys/avintr.h>
97843e1988Sjohnlev #include <sys/cpuvar.h>
98843e1988Sjohnlev #include <sys/smp_impldefs.h>
99843e1988Sjohnlev #include <sys/archsystm.h>
100843e1988Sjohnlev #include <sys/sysmacros.h>
101843e1988Sjohnlev #include <sys/cmn_err.h>
102843e1988Sjohnlev #include <sys/promif.h>
103843e1988Sjohnlev #include <sys/debug.h>
104843e1988Sjohnlev #include <sys/psm.h>
105843e1988Sjohnlev #include <sys/privregs.h>
106843e1988Sjohnlev #include <sys/trap.h>
107843e1988Sjohnlev #include <sys/atomic.h>
108843e1988Sjohnlev #include <sys/cpu.h>
109843e1988Sjohnlev #include <sys/psw.h>
110843e1988Sjohnlev #include <sys/traptrace.h>
111843e1988Sjohnlev #include <sys/stack.h>
112843e1988Sjohnlev #include <sys/x_call.h>
113843e1988Sjohnlev #include <xen/public/physdev.h>
114843e1988Sjohnlev 
115843e1988Sjohnlev /*
116843e1988Sjohnlev  * This file manages our association between hypervisor event channels and
117843e1988Sjohnlev  * Solaris's IRQs.  This is a one-to-one mapping, with the exception of
118843e1988Sjohnlev  * IPI IRQs, for which there is one event channel per CPU participating
119843e1988Sjohnlev  * in the IPI, and the clock VIRQ which also has an event channel per cpu
120843e1988Sjohnlev  * and the IRQ for /dev/xen/evtchn. The IRQ types are:
121843e1988Sjohnlev  *
122843e1988Sjohnlev  * IRQT_VIRQ:
123843e1988Sjohnlev  *	The hypervisor's standard virtual IRQ, used for the clock timer, for
124843e1988Sjohnlev  *	example.  This code allows any cpu to bind to one of these, although
125843e1988Sjohnlev  *	some are treated specially (i.e. VIRQ_DEBUG).
126843e1988Sjohnlev  *	Event channel binding is done via EVTCHNOP_bind_virq.
127843e1988Sjohnlev  *
128843e1988Sjohnlev  * IRQT_PIRQ:
129843e1988Sjohnlev  *	These associate a physical IRQ with an event channel via
130843e1988Sjohnlev  *	EVTCHNOP_bind_pirq.
131843e1988Sjohnlev  *
132843e1988Sjohnlev  * IRQT_IPI:
133843e1988Sjohnlev  *	A cross-call IRQ. Maps to "ncpus" event channels, each of which is
134843e1988Sjohnlev  *	bound to exactly one of the vcpus.  We do not currently support
135843e1988Sjohnlev  *	unbinding of IPIs (since Solaris doesn't need it). Uses
136843e1988Sjohnlev  *	EVTCHNOP_bind_ipi.
137843e1988Sjohnlev  *
138843e1988Sjohnlev  * IRQT_EVTCHN:
139843e1988Sjohnlev  *	A "normal" binding to an event channel, typically used by the frontend
140843e1988Sjohnlev  *      drivers to bind to the their backend event channel.
141843e1988Sjohnlev  *
142843e1988Sjohnlev  * IRQT_DEV_EVTCHN:
143843e1988Sjohnlev  *	This is a one-time IRQ used by /dev/xen/evtchn. Unlike other IRQs, we
144843e1988Sjohnlev  *	have a one-IRQ to many-evtchn mapping. We only track evtchn->irq for
145843e1988Sjohnlev  *	these event channels, which are managed via ec_irq_add/rm_evtchn().
146843e1988Sjohnlev  *	We enforce that IRQT_DEV_EVTCHN's representative evtchn (->ii_evtchn)
147843e1988Sjohnlev  *	is zero, and make any calls to irq_evtchn() an error, to prevent
148843e1988Sjohnlev  *	accidentally attempting to use the illegal evtchn 0.
149843e1988Sjohnlev  *
150843e1988Sjohnlev  * Suspend/resume
151843e1988Sjohnlev  *
152843e1988Sjohnlev  *	During a suspend/resume cycle, we need to tear down the event channels.
153843e1988Sjohnlev  *	All other mapping data is kept. The drivers will remove their own event
154843e1988Sjohnlev  *	channels via xendev on receiving a DDI_SUSPEND.  This leaves us with
155843e1988Sjohnlev  *	the IPIs and VIRQs, which we handle in ec_suspend() and ec_resume()
156843e1988Sjohnlev  *	below.
157843e1988Sjohnlev  *
158843e1988Sjohnlev  * CPU binding
159843e1988Sjohnlev  *
160843e1988Sjohnlev  *	When an event channel is bound to a CPU, we set a bit in a mask present
161843e1988Sjohnlev  *	in the machcpu (evt_affinity) to indicate that this CPU can accept this
162843e1988Sjohnlev  *	event channel.  For both IPIs and VIRQs, this binding is fixed at
163843e1988Sjohnlev  *	allocation time and we never modify it.  All other event channels are
164843e1988Sjohnlev  *	bound via the PSM either as part of add_avintr(), or interrupt
165843e1988Sjohnlev  *	redistribution (xen_psm_dis/enable_intr()) as a result of CPU
166843e1988Sjohnlev  *	offline/online.
167843e1988Sjohnlev  *
168843e1988Sjohnlev  * Locking
169843e1988Sjohnlev  *
170843e1988Sjohnlev  *	Updates are done holding the ec_lock.  The xen_callback_handler()
171843e1988Sjohnlev  *	routine reads the mapping data in a lockless fashion.  Additionally
172843e1988Sjohnlev  *	suspend takes ec_lock to prevent update races during a suspend/resume
173843e1988Sjohnlev  *	cycle.  The IPI info is also examined without the lock; this is OK
174843e1988Sjohnlev  *	since we only ever change IPI info during initial setup and resume.
175843e1988Sjohnlev  */
176843e1988Sjohnlev 
177843e1988Sjohnlev #define	IRQ_IS_CPUPOKE(irq) (ipi_info[XC_CPUPOKE_PIL].mi_irq == (irq))
178843e1988Sjohnlev 
179843e1988Sjohnlev #define	EVTCHN_MASKED(ev) \
180843e1988Sjohnlev 	(HYPERVISOR_shared_info->evtchn_mask[(ev) >> EVTCHN_SHIFT] & \
181843e1988Sjohnlev 	(1ul << ((ev) & ((1ul << EVTCHN_SHIFT) - 1))))
182843e1988Sjohnlev 
183843e1988Sjohnlev static short evtchn_to_irq[NR_EVENT_CHANNELS];
184843e1988Sjohnlev static cpuset_t evtchn_cpus[NR_EVENT_CHANNELS];
185843e1988Sjohnlev static int	evtchn_owner[NR_EVENT_CHANNELS];
186843e1988Sjohnlev #ifdef DEBUG
187843e1988Sjohnlev static kthread_t *evtchn_owner_thread[NR_EVENT_CHANNELS];
188843e1988Sjohnlev #endif
189843e1988Sjohnlev 
190843e1988Sjohnlev static irq_info_t irq_info[NR_IRQS];
191843e1988Sjohnlev static mec_info_t ipi_info[MAXIPL];
192843e1988Sjohnlev static mec_info_t virq_info[NR_VIRQS];
193843e1988Sjohnlev 
194843e1988Sjohnlev /*
195843e1988Sjohnlev  * See the locking description above.
196843e1988Sjohnlev  */
197843e1988Sjohnlev kmutex_t ec_lock;
198843e1988Sjohnlev 
199843e1988Sjohnlev /*
200843e1988Sjohnlev  * Bitmap indicating which PIRQs require the hypervisor to be notified
201843e1988Sjohnlev  * on unmask.
202843e1988Sjohnlev  */
203843e1988Sjohnlev static unsigned long pirq_needs_eoi[NR_PIRQS / (sizeof (unsigned long) * NBBY)];
204843e1988Sjohnlev 
205843e1988Sjohnlev static int ec_debug_irq = INVALID_IRQ;
206843e1988Sjohnlev int ec_dev_irq = INVALID_IRQ;
207843e1988Sjohnlev 
208843e1988Sjohnlev int
209843e1988Sjohnlev xen_bind_virq(unsigned int virq, processorid_t cpu, int *port)
210843e1988Sjohnlev {
211843e1988Sjohnlev 	evtchn_bind_virq_t bind;
212843e1988Sjohnlev 	int err;
213843e1988Sjohnlev 
214843e1988Sjohnlev 	bind.virq = virq;
215843e1988Sjohnlev 	bind.vcpu = cpu;
216843e1988Sjohnlev 	if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind)) == 0)
217843e1988Sjohnlev 		*port = bind.port;
218843e1988Sjohnlev 	else
219843e1988Sjohnlev 		err = xen_xlate_errcode(err);
220843e1988Sjohnlev 	return (err);
221843e1988Sjohnlev }
222843e1988Sjohnlev 
223843e1988Sjohnlev int
224843e1988Sjohnlev xen_bind_interdomain(int domid, int remote_port, int *port)
225843e1988Sjohnlev {
226843e1988Sjohnlev 	evtchn_bind_interdomain_t bind;
227843e1988Sjohnlev 	int err;
228843e1988Sjohnlev 
229843e1988Sjohnlev 	bind.remote_dom  = domid;
230843e1988Sjohnlev 	bind.remote_port = remote_port;
231843e1988Sjohnlev 	if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
232843e1988Sjohnlev 	    &bind)) == 0)
233843e1988Sjohnlev 		*port = bind.local_port;
234843e1988Sjohnlev 	else
235843e1988Sjohnlev 		err = xen_xlate_errcode(err);
236843e1988Sjohnlev 	return (err);
237843e1988Sjohnlev }
238843e1988Sjohnlev 
239843e1988Sjohnlev int
240843e1988Sjohnlev xen_alloc_unbound_evtchn(int domid, int *evtchnp)
241843e1988Sjohnlev {
242843e1988Sjohnlev 	evtchn_alloc_unbound_t alloc;
243843e1988Sjohnlev 	int err;
244843e1988Sjohnlev 
245843e1988Sjohnlev 	alloc.dom = DOMID_SELF;
246843e1988Sjohnlev 	alloc.remote_dom = domid;
247843e1988Sjohnlev 
248843e1988Sjohnlev 	if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
249843e1988Sjohnlev 	    &alloc)) == 0) {
250843e1988Sjohnlev 		*evtchnp = alloc.port;
251843e1988Sjohnlev 		/* ensure evtchn is masked till we're ready to use it */
252843e1988Sjohnlev 		(void) ec_mask_evtchn(*evtchnp);
253843e1988Sjohnlev 	} else {
254843e1988Sjohnlev 		err = xen_xlate_errcode(err);
255843e1988Sjohnlev 	}
256843e1988Sjohnlev 
257843e1988Sjohnlev 	return (err);
258843e1988Sjohnlev }
259843e1988Sjohnlev 
260843e1988Sjohnlev static int
261843e1988Sjohnlev xen_close_evtchn(int evtchn)
262843e1988Sjohnlev {
263843e1988Sjohnlev 	evtchn_close_t close;
264843e1988Sjohnlev 	int err;
265843e1988Sjohnlev 
266843e1988Sjohnlev 	close.port = evtchn;
267843e1988Sjohnlev 	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
268843e1988Sjohnlev 	if (err)
269843e1988Sjohnlev 		err = xen_xlate_errcode(err);
270843e1988Sjohnlev 	return (err);
271843e1988Sjohnlev }
272843e1988Sjohnlev 
273843e1988Sjohnlev static int
274843e1988Sjohnlev xen_bind_ipi(processorid_t cpu)
275843e1988Sjohnlev {
276843e1988Sjohnlev 	evtchn_bind_ipi_t bind;
277843e1988Sjohnlev 
278843e1988Sjohnlev 	ASSERT(MUTEX_HELD(&ec_lock));
279843e1988Sjohnlev 
280843e1988Sjohnlev 	bind.vcpu = cpu;
281843e1988Sjohnlev 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind) != 0)
282843e1988Sjohnlev 		panic("xen_bind_ipi() failed");
283843e1988Sjohnlev 	return (bind.port);
284843e1988Sjohnlev }
285843e1988Sjohnlev 
286843e1988Sjohnlev /* Send future instances of this interrupt to other vcpu. */
287843e1988Sjohnlev static void
288843e1988Sjohnlev xen_bind_vcpu(int evtchn, int cpu)
289843e1988Sjohnlev {
290843e1988Sjohnlev 	evtchn_bind_vcpu_t bind;
291843e1988Sjohnlev 
292843e1988Sjohnlev 	ASSERT(MUTEX_HELD(&ec_lock));
293843e1988Sjohnlev 
294843e1988Sjohnlev 	bind.port = evtchn;
295843e1988Sjohnlev 	bind.vcpu = cpu;
296843e1988Sjohnlev 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind) != 0)
297843e1988Sjohnlev 		panic("xen_bind_vcpu() failed");
298843e1988Sjohnlev }
299843e1988Sjohnlev 
300843e1988Sjohnlev static int
301843e1988Sjohnlev xen_bind_pirq(int pirq)
302843e1988Sjohnlev {
303843e1988Sjohnlev 	evtchn_bind_pirq_t bind;
304843e1988Sjohnlev 	int ret;
305843e1988Sjohnlev 
306843e1988Sjohnlev 	bind.pirq = pirq;
307843e1988Sjohnlev 	bind.flags = BIND_PIRQ__WILL_SHARE;
308843e1988Sjohnlev 	if ((ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind)) != 0)
309843e1988Sjohnlev 		panic("xen_bind_pirq() failed (err %d)", ret);
310843e1988Sjohnlev 	return (bind.port);
311843e1988Sjohnlev }
312843e1988Sjohnlev 
313843e1988Sjohnlev /* unmask an evtchn and send upcall to appropriate vcpu if pending bit is set */
314843e1988Sjohnlev static void
315843e1988Sjohnlev xen_evtchn_unmask(int evtchn)
316843e1988Sjohnlev {
317843e1988Sjohnlev 	evtchn_unmask_t unmask;
318843e1988Sjohnlev 
319843e1988Sjohnlev 	unmask.port = evtchn;
320843e1988Sjohnlev 	if (HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask) != 0)
321843e1988Sjohnlev 		panic("xen_evtchn_unmask() failed");
322843e1988Sjohnlev }
323843e1988Sjohnlev 
324843e1988Sjohnlev static void
325843e1988Sjohnlev update_evtchn_affinity(int evtchn)
326843e1988Sjohnlev {
327843e1988Sjohnlev 	cpu_t *cp;
328843e1988Sjohnlev 	struct xen_evt_data *cpe;
329843e1988Sjohnlev 
330843e1988Sjohnlev 	ASSERT(evtchn_to_irq[evtchn] != INVALID_IRQ);
331843e1988Sjohnlev 	ASSERT(MUTEX_HELD(&ec_lock));
332843e1988Sjohnlev 
333843e1988Sjohnlev 	/*
334843e1988Sjohnlev 	 * Use lockless search of cpu_list, similar to mutex_vector_enter().
335843e1988Sjohnlev 	 */
336843e1988Sjohnlev 	kpreempt_disable();
337843e1988Sjohnlev 	cp = cpu_list;
338843e1988Sjohnlev 	do {
339843e1988Sjohnlev 		cpe = cp->cpu_m.mcpu_evt_pend;
340843e1988Sjohnlev 		if (CPU_IN_SET(evtchn_cpus[evtchn], cp->cpu_id))
341843e1988Sjohnlev 			SET_EVTCHN_BIT(evtchn, cpe->evt_affinity);
342843e1988Sjohnlev 		else
343843e1988Sjohnlev 			CLEAR_EVTCHN_BIT(evtchn, cpe->evt_affinity);
344843e1988Sjohnlev 	} while ((cp = cp->cpu_next) != cpu_list);
345843e1988Sjohnlev 	kpreempt_enable();
346843e1988Sjohnlev }
347843e1988Sjohnlev 
348843e1988Sjohnlev static void
349843e1988Sjohnlev bind_evtchn_to_cpuset(int evtchn, cpuset_t cpus)
350843e1988Sjohnlev {
351843e1988Sjohnlev 	ASSERT(evtchn_to_irq[evtchn] != INVALID_IRQ);
352843e1988Sjohnlev 
353843e1988Sjohnlev 	CPUSET_ZERO(evtchn_cpus[evtchn]);
354843e1988Sjohnlev 	CPUSET_OR(evtchn_cpus[evtchn], cpus);
355843e1988Sjohnlev 	update_evtchn_affinity(evtchn);
356843e1988Sjohnlev }
357843e1988Sjohnlev 
358843e1988Sjohnlev static void
359843e1988Sjohnlev clear_evtchn_affinity(int evtchn)
360843e1988Sjohnlev {
361843e1988Sjohnlev 	CPUSET_ZERO(evtchn_cpus[evtchn]);
362843e1988Sjohnlev 	update_evtchn_affinity(evtchn);
363843e1988Sjohnlev }
364843e1988Sjohnlev 
365843e1988Sjohnlev static void
366843e1988Sjohnlev alloc_irq_evtchn(int irq, int index, int evtchn, int cpu)
367843e1988Sjohnlev {
368843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
369843e1988Sjohnlev 
370843e1988Sjohnlev 	switch (irqp->ii_type) {
371843e1988Sjohnlev 	case IRQT_IPI:
372843e1988Sjohnlev 		ipi_info[index].mi_evtchns[cpu] = evtchn;
373843e1988Sjohnlev 		irqp->ii_u.index = index;
374843e1988Sjohnlev 		break;
375843e1988Sjohnlev 	case IRQT_VIRQ:
376843e1988Sjohnlev 		virq_info[index].mi_evtchns[cpu] = evtchn;
377843e1988Sjohnlev 		irqp->ii_u.index = index;
378843e1988Sjohnlev 		break;
379843e1988Sjohnlev 	default:
380843e1988Sjohnlev 		irqp->ii_u.evtchn = evtchn;
381843e1988Sjohnlev 		break;
382843e1988Sjohnlev 	}
383843e1988Sjohnlev 
384843e1988Sjohnlev 	evtchn_to_irq[evtchn] = irq;
385843e1988Sjohnlev 
386843e1988Sjohnlev 	/*
387843e1988Sjohnlev 	 * If a CPU is not specified, we expect to bind it to a CPU later via
388843e1988Sjohnlev 	 * the PSM.
389843e1988Sjohnlev 	 */
390843e1988Sjohnlev 	if (cpu != -1) {
391843e1988Sjohnlev 		cpuset_t tcpus;
392843e1988Sjohnlev 		CPUSET_ONLY(tcpus, cpu);
393843e1988Sjohnlev 		bind_evtchn_to_cpuset(evtchn, tcpus);
394843e1988Sjohnlev 	}
395843e1988Sjohnlev }
396843e1988Sjohnlev 
397843e1988Sjohnlev static int
398843e1988Sjohnlev alloc_irq(int type, int index, int evtchn, int cpu)
399843e1988Sjohnlev {
400843e1988Sjohnlev 	int irq;
401843e1988Sjohnlev 	irq_info_t *irqp;
402843e1988Sjohnlev 
403843e1988Sjohnlev 	ASSERT(MUTEX_HELD(&ec_lock));
404843e1988Sjohnlev 	ASSERT(type != IRQT_IPI || cpu != -1);
405843e1988Sjohnlev 
406843e1988Sjohnlev 	for (irq = 0; irq < NR_IRQS; irq++) {
407843e1988Sjohnlev 		if (irq_info[irq].ii_type == IRQT_UNBOUND)
408843e1988Sjohnlev 			break;
409843e1988Sjohnlev 	}
410843e1988Sjohnlev 
411843e1988Sjohnlev 	if (irq == NR_IRQS)
412843e1988Sjohnlev 		panic("No available IRQ to bind to: increase NR_IRQS!\n");
413843e1988Sjohnlev 
414843e1988Sjohnlev 	irqp = &irq_info[irq];
415843e1988Sjohnlev 
416843e1988Sjohnlev 	irqp->ii_type = type;
417843e1988Sjohnlev 	/*
418843e1988Sjohnlev 	 * Set irq/has_handler field to zero which means handler not installed
419843e1988Sjohnlev 	 */
420843e1988Sjohnlev 	irqp->ii_u2.has_handler = 0;
421843e1988Sjohnlev 
422843e1988Sjohnlev 	alloc_irq_evtchn(irq, index, evtchn, cpu);
423843e1988Sjohnlev 	return (irq);
424843e1988Sjohnlev }
425843e1988Sjohnlev 
426843e1988Sjohnlev static int
427843e1988Sjohnlev irq_evtchn(irq_info_t *irqp)
428843e1988Sjohnlev {
429843e1988Sjohnlev 	int evtchn;
430843e1988Sjohnlev 
431843e1988Sjohnlev 	ASSERT(irqp->ii_type != IRQT_DEV_EVTCHN);
432843e1988Sjohnlev 
433843e1988Sjohnlev 	switch (irqp->ii_type) {
434843e1988Sjohnlev 	case IRQT_IPI:
435843e1988Sjohnlev 		ASSERT(irqp->ii_u.index != 0);
436843e1988Sjohnlev 		evtchn = ipi_info[irqp->ii_u.index].mi_evtchns[CPU->cpu_id];
437843e1988Sjohnlev 		break;
438843e1988Sjohnlev 	case IRQT_VIRQ:
439843e1988Sjohnlev 		evtchn = virq_info[irqp->ii_u.index].mi_evtchns[CPU->cpu_id];
440843e1988Sjohnlev 		break;
441843e1988Sjohnlev 	default:
442843e1988Sjohnlev 		evtchn = irqp->ii_u.evtchn;
443843e1988Sjohnlev 		break;
444843e1988Sjohnlev 	}
445843e1988Sjohnlev 
446843e1988Sjohnlev 	return (evtchn);
447843e1988Sjohnlev }
448843e1988Sjohnlev 
449349b53ddSStuart Maybee int
450349b53ddSStuart Maybee ec_is_edge_pirq(int irq)
451349b53ddSStuart Maybee {
452349b53ddSStuart Maybee 	return (irq_info[irq].ii_type == IRQT_PIRQ &&
453349b53ddSStuart Maybee 	    !TEST_EVTCHN_BIT(irq, &pirq_needs_eoi[0]));
454349b53ddSStuart Maybee }
455349b53ddSStuart Maybee 
456843e1988Sjohnlev static void
457843e1988Sjohnlev unbind_evtchn(ushort_t *evtchnp)
458843e1988Sjohnlev {
459843e1988Sjohnlev 	int err;
460843e1988Sjohnlev 
461843e1988Sjohnlev 	ASSERT(MUTEX_HELD(&ec_lock));
462843e1988Sjohnlev 
463843e1988Sjohnlev 	ASSERT(*evtchnp != 0);
464843e1988Sjohnlev 
465843e1988Sjohnlev 	err = xen_close_evtchn(*evtchnp);
466843e1988Sjohnlev 	ASSERT(err == 0);
467843e1988Sjohnlev 	clear_evtchn_affinity(*evtchnp);
468843e1988Sjohnlev 	evtchn_to_irq[*evtchnp] = INVALID_IRQ;
469843e1988Sjohnlev 	*evtchnp = 0;
470843e1988Sjohnlev }
471843e1988Sjohnlev 
472843e1988Sjohnlev static void
473843e1988Sjohnlev pirq_unmask_notify(int pirq)
474843e1988Sjohnlev {
475843e1988Sjohnlev 	struct physdev_eoi eoi;
476843e1988Sjohnlev 
477843e1988Sjohnlev 	if (TEST_EVTCHN_BIT(pirq, &pirq_needs_eoi[0])) {
478843e1988Sjohnlev 		eoi.irq = pirq;
479843e1988Sjohnlev 		(void) HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
480843e1988Sjohnlev 	}
481843e1988Sjohnlev }
482843e1988Sjohnlev 
483843e1988Sjohnlev static void
484843e1988Sjohnlev pirq_query_unmask(int pirq)
485843e1988Sjohnlev {
486843e1988Sjohnlev 	struct physdev_irq_status_query irq_status;
487843e1988Sjohnlev 
488843e1988Sjohnlev 	irq_status.irq = pirq;
489843e1988Sjohnlev 	(void) HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
490843e1988Sjohnlev 	CLEAR_EVTCHN_BIT(pirq, &pirq_needs_eoi[0]);
491843e1988Sjohnlev 	if (irq_status.flags & XENIRQSTAT_needs_eoi)
492843e1988Sjohnlev 		SET_EVTCHN_BIT(pirq, &pirq_needs_eoi[0]);
493843e1988Sjohnlev }
494843e1988Sjohnlev 
495843e1988Sjohnlev static void
496843e1988Sjohnlev end_pirq(int irq)
497843e1988Sjohnlev {
498843e1988Sjohnlev 	int evtchn = irq_evtchn(&irq_info[irq]);
499843e1988Sjohnlev 
500349b53ddSStuart Maybee 	/*
501349b53ddSStuart Maybee 	 * If it is an edge-triggered interrupt we have already unmasked
502349b53ddSStuart Maybee 	 */
503349b53ddSStuart Maybee 	if (TEST_EVTCHN_BIT(irq, &pirq_needs_eoi[0])) {
504843e1988Sjohnlev 		ec_unmask_evtchn(evtchn);
505843e1988Sjohnlev 		pirq_unmask_notify(IRQ_TO_PIRQ(irq));
506843e1988Sjohnlev 	}
507843e1988Sjohnlev }
508843e1988Sjohnlev 
509843e1988Sjohnlev /*
510843e1988Sjohnlev  * Bind an event channel to a vcpu
511843e1988Sjohnlev  */
512843e1988Sjohnlev void
513843e1988Sjohnlev ec_bind_vcpu(int evtchn, int cpu)
514843e1988Sjohnlev {
515843e1988Sjohnlev 	mutex_enter(&ec_lock);
516843e1988Sjohnlev 	xen_bind_vcpu(evtchn, cpu);
517843e1988Sjohnlev 	mutex_exit(&ec_lock);
518843e1988Sjohnlev }
519843e1988Sjohnlev 
520843e1988Sjohnlev /*
521843e1988Sjohnlev  * Set up a physical device irq to be associated with an event channel.
522843e1988Sjohnlev  */
523843e1988Sjohnlev void
524b9bc7f78Ssmaybe ec_setup_pirq(int irq, int ipl, cpuset_t *cpusp)
525843e1988Sjohnlev {
526843e1988Sjohnlev 	int evtchn;
527843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
528843e1988Sjohnlev 
529843e1988Sjohnlev 	/*
530843e1988Sjohnlev 	 * Test if this PIRQ is already bound to an evtchn,
531843e1988Sjohnlev 	 * which means it is a shared IRQ and we don't want to
532843e1988Sjohnlev 	 * bind and do some initial setup that has already been
533843e1988Sjohnlev 	 * done for this irq on a previous trip through this code.
534843e1988Sjohnlev 	 */
535843e1988Sjohnlev 	if (irqp->ii_u.evtchn == INVALID_EVTCHN) {
536843e1988Sjohnlev 		evtchn = xen_bind_pirq(irq);
537843e1988Sjohnlev 
538843e1988Sjohnlev 		pirq_query_unmask(IRQ_TO_PIRQ(irq));
539843e1988Sjohnlev 
540843e1988Sjohnlev 		irqp->ii_type = IRQT_PIRQ;
541843e1988Sjohnlev 		irqp->ii_u.evtchn = evtchn;
542843e1988Sjohnlev 
543843e1988Sjohnlev 		evtchn_to_irq[evtchn] = irq;
544843e1988Sjohnlev 		irqp->ii_u2.ipl = ipl;
545b9bc7f78Ssmaybe 		ec_set_irq_affinity(irq, *cpusp);
546843e1988Sjohnlev 		ec_enable_irq(irq);
547843e1988Sjohnlev 		pirq_unmask_notify(IRQ_TO_PIRQ(irq));
548843e1988Sjohnlev 	} else {
549843e1988Sjohnlev 		ASSERT(irqp->ii_u2.ipl != 0);
550711f1c24Smrj 		cmn_err(CE_NOTE, "!IRQ%d is shared", irq);
551843e1988Sjohnlev 		if (ipl > irqp->ii_u2.ipl)
552843e1988Sjohnlev 			irqp->ii_u2.ipl = ipl;
553b9bc7f78Ssmaybe 		*cpusp = evtchn_cpus[irqp->ii_u.evtchn];
554843e1988Sjohnlev 	}
555843e1988Sjohnlev }
556843e1988Sjohnlev 
557843e1988Sjohnlev void
558843e1988Sjohnlev ec_unbind_irq(int irq)
559843e1988Sjohnlev {
560843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
561843e1988Sjohnlev 	mec_info_t *virqp;
562843e1988Sjohnlev 	int drop_lock = 0;
563843e1988Sjohnlev 	int type, i;
564843e1988Sjohnlev 
565843e1988Sjohnlev 	/*
566843e1988Sjohnlev 	 * Nasty, but we need this during suspend.
567843e1988Sjohnlev 	 */
568843e1988Sjohnlev 	if (mutex_owner(&ec_lock) != curthread) {
569843e1988Sjohnlev 		mutex_enter(&ec_lock);
570843e1988Sjohnlev 		drop_lock = 1;
571843e1988Sjohnlev 	}
572843e1988Sjohnlev 
573843e1988Sjohnlev 	type = irqp->ii_type;
574843e1988Sjohnlev 
575843e1988Sjohnlev 	ASSERT((type == IRQT_EVTCHN) || (type == IRQT_PIRQ) ||
576843e1988Sjohnlev 	    (type == IRQT_VIRQ));
577843e1988Sjohnlev 
578843e1988Sjohnlev 	if ((type == IRQT_EVTCHN) || (type == IRQT_PIRQ)) {
579843e1988Sjohnlev 		/* There's only one event channel associated with this irq */
580843e1988Sjohnlev 		unbind_evtchn(&irqp->ii_u.evtchn);
581843e1988Sjohnlev 	} else if (type == IRQT_VIRQ) {
582843e1988Sjohnlev 		/*
583843e1988Sjohnlev 		 * Each cpu on the system can have it's own event channel
584843e1988Sjohnlev 		 * associated with a virq.  Unbind them all.
585843e1988Sjohnlev 		 */
586843e1988Sjohnlev 		virqp = &virq_info[irqp->ii_u.index];
587843e1988Sjohnlev 		for (i = 0; i < NCPU; i++) {
588843e1988Sjohnlev 			if (virqp->mi_evtchns[i] != 0)
589843e1988Sjohnlev 				unbind_evtchn(&virqp->mi_evtchns[i]);
590843e1988Sjohnlev 		}
591843e1988Sjohnlev 		/* Mark the virq structure as invalid. */
592843e1988Sjohnlev 		virqp->mi_irq = INVALID_IRQ;
593843e1988Sjohnlev 	}
594843e1988Sjohnlev 
595843e1988Sjohnlev 	bzero(irqp, sizeof (*irqp));
596843e1988Sjohnlev 	/* Re-reserve PIRQ. */
597843e1988Sjohnlev 	if (type == IRQT_PIRQ)
598843e1988Sjohnlev 		irqp->ii_type = IRQT_PIRQ;
599843e1988Sjohnlev 
600843e1988Sjohnlev 	if (drop_lock)
601843e1988Sjohnlev 		mutex_exit(&ec_lock);
602843e1988Sjohnlev }
603843e1988Sjohnlev 
604843e1988Sjohnlev /*
605843e1988Sjohnlev  * Rebind an event channel for delivery to a CPU.
606843e1988Sjohnlev  */
607843e1988Sjohnlev void
608843e1988Sjohnlev ec_set_irq_affinity(int irq, cpuset_t dest)
609843e1988Sjohnlev {
610843e1988Sjohnlev 	int evtchn, tcpu;
611843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
612843e1988Sjohnlev 
613843e1988Sjohnlev 	mutex_enter(&ec_lock);
614843e1988Sjohnlev 
615843e1988Sjohnlev 	ASSERT(irq < NR_IRQS);
616843e1988Sjohnlev 	ASSERT(irqp->ii_type != IRQT_UNBOUND);
617843e1988Sjohnlev 
618843e1988Sjohnlev 	/*
619843e1988Sjohnlev 	 * Binding is done at allocation time for these types, so we should
620843e1988Sjohnlev 	 * never modify them.
621843e1988Sjohnlev 	 */
622843e1988Sjohnlev 	if (irqp->ii_type == IRQT_IPI || irqp->ii_type == IRQT_VIRQ ||
623843e1988Sjohnlev 	    irqp->ii_type == IRQT_DEV_EVTCHN) {
624843e1988Sjohnlev 		mutex_exit(&ec_lock);
625843e1988Sjohnlev 		return;
626843e1988Sjohnlev 	}
627843e1988Sjohnlev 
628843e1988Sjohnlev 	CPUSET_FIND(dest, tcpu);
629843e1988Sjohnlev 	ASSERT(tcpu != CPUSET_NOTINSET);
630843e1988Sjohnlev 
631843e1988Sjohnlev 	evtchn = irq_evtchn(irqp);
632843e1988Sjohnlev 
633843e1988Sjohnlev 	xen_bind_vcpu(evtchn, tcpu);
634843e1988Sjohnlev 
635843e1988Sjohnlev 	bind_evtchn_to_cpuset(evtchn, dest);
636843e1988Sjohnlev 
637843e1988Sjohnlev 	mutex_exit(&ec_lock);
638843e1988Sjohnlev 
639843e1988Sjohnlev 	/*
640b9bc7f78Ssmaybe 	 * Now send the new target processor a NOP IPI.
641b9bc7f78Ssmaybe 	 * It will check for any pending interrupts, and so service any that
642843e1988Sjohnlev 	 * got delivered to the wrong processor by mistake.
643843e1988Sjohnlev 	 */
644a43153bfSsmaybe 	if (ncpus > 1)
645b9bc7f78Ssmaybe 		poke_cpu(tcpu);
646843e1988Sjohnlev }
647843e1988Sjohnlev 
648843e1988Sjohnlev int
649843e1988Sjohnlev ec_set_irq_priority(int irq, int pri)
650843e1988Sjohnlev {
651843e1988Sjohnlev 	irq_info_t *irqp;
652843e1988Sjohnlev 
653843e1988Sjohnlev 	if (irq >= NR_IRQS)
654843e1988Sjohnlev 		return (-1);
655843e1988Sjohnlev 
656843e1988Sjohnlev 	irqp = &irq_info[irq];
657843e1988Sjohnlev 
658843e1988Sjohnlev 	if (irqp->ii_type == IRQT_UNBOUND)
659843e1988Sjohnlev 		return (-1);
660843e1988Sjohnlev 
661843e1988Sjohnlev 	irqp->ii_u2.ipl = pri;
662843e1988Sjohnlev 
663843e1988Sjohnlev 	return (0);
664843e1988Sjohnlev }
665843e1988Sjohnlev 
666843e1988Sjohnlev void
667843e1988Sjohnlev ec_clear_irq_priority(int irq)
668843e1988Sjohnlev {
669843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
670843e1988Sjohnlev 
671843e1988Sjohnlev 	ASSERT(irq < NR_IRQS);
672843e1988Sjohnlev 	ASSERT(irqp->ii_type != IRQT_UNBOUND);
673843e1988Sjohnlev 
674843e1988Sjohnlev 	irqp->ii_u2.ipl = 0;
675843e1988Sjohnlev }
676843e1988Sjohnlev 
677843e1988Sjohnlev int
678843e1988Sjohnlev ec_bind_evtchn_to_irq(int evtchn)
679843e1988Sjohnlev {
680843e1988Sjohnlev 	mutex_enter(&ec_lock);
681843e1988Sjohnlev 
682843e1988Sjohnlev 	ASSERT(evtchn_to_irq[evtchn] == INVALID_IRQ);
683843e1988Sjohnlev 
684843e1988Sjohnlev 	(void) alloc_irq(IRQT_EVTCHN, 0, evtchn, -1);
685843e1988Sjohnlev 
686843e1988Sjohnlev 	mutex_exit(&ec_lock);
687843e1988Sjohnlev 	return (evtchn_to_irq[evtchn]);
688843e1988Sjohnlev }
689843e1988Sjohnlev 
690843e1988Sjohnlev int
691843e1988Sjohnlev ec_bind_virq_to_irq(int virq, int cpu)
692843e1988Sjohnlev {
693843e1988Sjohnlev 	int err;
694843e1988Sjohnlev 	int evtchn;
695843e1988Sjohnlev 	mec_info_t *virqp;
696843e1988Sjohnlev 
697843e1988Sjohnlev 	virqp = &virq_info[virq];
698843e1988Sjohnlev 	mutex_enter(&ec_lock);
699843e1988Sjohnlev 
700843e1988Sjohnlev 	err = xen_bind_virq(virq, cpu, &evtchn);
701843e1988Sjohnlev 	ASSERT(err == 0);
702843e1988Sjohnlev 
703843e1988Sjohnlev 	ASSERT(evtchn_to_irq[evtchn] == INVALID_IRQ);
704843e1988Sjohnlev 
705843e1988Sjohnlev 	if (virqp->mi_irq == INVALID_IRQ) {
706843e1988Sjohnlev 		virqp->mi_irq = alloc_irq(IRQT_VIRQ, virq, evtchn, cpu);
707843e1988Sjohnlev 	} else {
708843e1988Sjohnlev 		alloc_irq_evtchn(virqp->mi_irq, virq, evtchn, cpu);
709843e1988Sjohnlev 	}
710843e1988Sjohnlev 
711843e1988Sjohnlev 	mutex_exit(&ec_lock);
712843e1988Sjohnlev 
713843e1988Sjohnlev 	return (virqp->mi_irq);
714843e1988Sjohnlev }
715843e1988Sjohnlev 
716843e1988Sjohnlev int
717843e1988Sjohnlev ec_bind_ipi_to_irq(int ipl, int cpu)
718843e1988Sjohnlev {
719843e1988Sjohnlev 	int evtchn;
720843e1988Sjohnlev 	ulong_t flags;
721843e1988Sjohnlev 	mec_info_t *ipip;
722843e1988Sjohnlev 
723843e1988Sjohnlev 	mutex_enter(&ec_lock);
724843e1988Sjohnlev 
725843e1988Sjohnlev 	ipip = &ipi_info[ipl];
726843e1988Sjohnlev 
727843e1988Sjohnlev 	evtchn = xen_bind_ipi(cpu);
728843e1988Sjohnlev 
729843e1988Sjohnlev 	ASSERT(evtchn_to_irq[evtchn] == INVALID_IRQ);
730843e1988Sjohnlev 
731843e1988Sjohnlev 	if (ipip->mi_irq == INVALID_IRQ) {
732843e1988Sjohnlev 		ipip->mi_irq = alloc_irq(IRQT_IPI, ipl, evtchn, cpu);
733843e1988Sjohnlev 	} else {
734843e1988Sjohnlev 		alloc_irq_evtchn(ipip->mi_irq, ipl, evtchn, cpu);
735843e1988Sjohnlev 	}
736843e1988Sjohnlev 
737843e1988Sjohnlev 	/*
738843e1988Sjohnlev 	 * Unmask the new evtchn so that it can be seen by the target cpu
739843e1988Sjohnlev 	 */
740843e1988Sjohnlev 	flags = intr_clear();
741843e1988Sjohnlev 	ec_unmask_evtchn(evtchn);
742843e1988Sjohnlev 	intr_restore(flags);
743843e1988Sjohnlev 
744843e1988Sjohnlev 	mutex_exit(&ec_lock);
745843e1988Sjohnlev 	return (ipip->mi_irq);
746843e1988Sjohnlev }
747843e1988Sjohnlev 
748843e1988Sjohnlev /*
749843e1988Sjohnlev  * When bringing up a CPU, bind to all the IPIs that CPU0 bound.
750843e1988Sjohnlev  */
751843e1988Sjohnlev void
752843e1988Sjohnlev ec_bind_cpu_ipis(int cpu)
753843e1988Sjohnlev {
754843e1988Sjohnlev 	int i;
755843e1988Sjohnlev 
756843e1988Sjohnlev 	for (i = 0; i < MAXIPL; i++) {
757843e1988Sjohnlev 		mec_info_t *ipip = &ipi_info[i];
758843e1988Sjohnlev 		if (ipip->mi_irq == INVALID_IRQ)
759843e1988Sjohnlev 			continue;
760843e1988Sjohnlev 
761843e1988Sjohnlev 		(void) ec_bind_ipi_to_irq(i, cpu);
762843e1988Sjohnlev 	}
763843e1988Sjohnlev }
764843e1988Sjohnlev 
765843e1988Sjohnlev /*
766843e1988Sjohnlev  * Can this IRQ be rebound to another CPU?
767843e1988Sjohnlev  */
768843e1988Sjohnlev int
769843e1988Sjohnlev ec_irq_rebindable(int irq)
770843e1988Sjohnlev {
771843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
772843e1988Sjohnlev 
773843e1988Sjohnlev 	if (irqp->ii_u.evtchn == 0)
774843e1988Sjohnlev 		return (0);
775843e1988Sjohnlev 
776843e1988Sjohnlev 	return (irqp->ii_type == IRQT_EVTCHN || irqp->ii_type == IRQT_PIRQ);
777843e1988Sjohnlev }
778843e1988Sjohnlev 
779843e1988Sjohnlev /*
780843e1988Sjohnlev  * Should this IRQ be unbound from this CPU (which is being offlined) to
781843e1988Sjohnlev  * another?
782843e1988Sjohnlev  */
783843e1988Sjohnlev int
784843e1988Sjohnlev ec_irq_needs_rebind(int irq, int cpu)
785843e1988Sjohnlev {
786843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
787843e1988Sjohnlev 
788843e1988Sjohnlev 	return (ec_irq_rebindable(irq) &&
789843e1988Sjohnlev 	    CPU_IN_SET(evtchn_cpus[irqp->ii_u.evtchn], cpu));
790843e1988Sjohnlev }
791843e1988Sjohnlev 
792843e1988Sjohnlev void
793843e1988Sjohnlev ec_send_ipi(int ipl, int cpu)
794843e1988Sjohnlev {
795843e1988Sjohnlev 	mec_info_t *ipip = &ipi_info[ipl];
796843e1988Sjohnlev 
797843e1988Sjohnlev 	ASSERT(ipip->mi_irq != INVALID_IRQ);
798843e1988Sjohnlev 
799843e1988Sjohnlev 	ec_notify_via_evtchn(ipip->mi_evtchns[cpu]);
800843e1988Sjohnlev }
801843e1988Sjohnlev 
802843e1988Sjohnlev void
803843e1988Sjohnlev ec_try_ipi(int ipl, int cpu)
804843e1988Sjohnlev {
805843e1988Sjohnlev 	mec_info_t *ipip = &ipi_info[ipl];
806843e1988Sjohnlev 
807843e1988Sjohnlev 	if (ipip->mi_irq == INVALID_IRQ || ipip->mi_irq == 0)
808843e1988Sjohnlev 		return;
809843e1988Sjohnlev 
810843e1988Sjohnlev 	ec_notify_via_evtchn(ipip->mi_evtchns[cpu]);
811843e1988Sjohnlev }
812843e1988Sjohnlev 
813843e1988Sjohnlev void
814843e1988Sjohnlev ec_irq_add_evtchn(int irq, int evtchn)
815843e1988Sjohnlev {
816843e1988Sjohnlev 	mutex_enter(&ec_lock);
817843e1988Sjohnlev 
818843e1988Sjohnlev 	/*
819843e1988Sjohnlev 	 * See description of IRQT_DEV_EVTCHN above.
820843e1988Sjohnlev 	 */
821843e1988Sjohnlev 	ASSERT(irq == ec_dev_irq);
822843e1988Sjohnlev 
823843e1988Sjohnlev 	alloc_irq_evtchn(irq, 0, evtchn, 0);
824843e1988Sjohnlev 	/*
825843e1988Sjohnlev 	 * We enforce that the representative event channel for IRQT_DEV_EVTCHN
826843e1988Sjohnlev 	 * is zero, so PSM operations on it have no effect.
827843e1988Sjohnlev 	 */
828843e1988Sjohnlev 	irq_info[irq].ii_u.evtchn = 0;
829843e1988Sjohnlev 	mutex_exit(&ec_lock);
830843e1988Sjohnlev }
831843e1988Sjohnlev 
832843e1988Sjohnlev void
833843e1988Sjohnlev ec_irq_rm_evtchn(int irq, int evtchn)
834843e1988Sjohnlev {
835843e1988Sjohnlev 	ushort_t ec = evtchn;
836843e1988Sjohnlev 
837843e1988Sjohnlev 	mutex_enter(&ec_lock);
838843e1988Sjohnlev 	ASSERT(irq == ec_dev_irq);
839843e1988Sjohnlev 	unbind_evtchn(&ec);
840843e1988Sjohnlev 	mutex_exit(&ec_lock);
841843e1988Sjohnlev }
842843e1988Sjohnlev 
843843e1988Sjohnlev /*
844843e1988Sjohnlev  * Allocate an /dev/xen/evtchn IRQ.  See the big comment at the top
845843e1988Sjohnlev  * for an explanation.
846843e1988Sjohnlev  */
847843e1988Sjohnlev int
848843e1988Sjohnlev ec_dev_alloc_irq(void)
849843e1988Sjohnlev {
850843e1988Sjohnlev 	int i;
851843e1988Sjohnlev 	irq_info_t *irqp;
852843e1988Sjohnlev 
853843e1988Sjohnlev 	for (i = 0; i < NR_IRQS; i++) {
854843e1988Sjohnlev 		if (irq_info[i].ii_type == IRQT_UNBOUND)
855843e1988Sjohnlev 			break;
856843e1988Sjohnlev 	}
857843e1988Sjohnlev 
858843e1988Sjohnlev 	ASSERT(i != NR_IRQS);
859843e1988Sjohnlev 
860843e1988Sjohnlev 	irqp = &irq_info[i];
861843e1988Sjohnlev 	irqp->ii_type = IRQT_DEV_EVTCHN;
862843e1988Sjohnlev 	irqp->ii_u2.ipl = IPL_EVTCHN;
863843e1988Sjohnlev 	/*
864843e1988Sjohnlev 	 * Force the evtchn to zero for the special evtchn device irq
865843e1988Sjohnlev 	 */
866843e1988Sjohnlev 	irqp->ii_u.evtchn = 0;
867843e1988Sjohnlev 	return (i);
868843e1988Sjohnlev }
869843e1988Sjohnlev 
870843e1988Sjohnlev void
871843e1988Sjohnlev ec_enable_irq(unsigned int irq)
872843e1988Sjohnlev {
873843e1988Sjohnlev 	ulong_t flag;
874843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
875843e1988Sjohnlev 
876843e1988Sjohnlev 	if (irqp->ii_type == IRQT_DEV_EVTCHN)
877843e1988Sjohnlev 		return;
878843e1988Sjohnlev 
879843e1988Sjohnlev 	flag = intr_clear();
880843e1988Sjohnlev 	ec_unmask_evtchn(irq_evtchn(irqp));
881843e1988Sjohnlev 	intr_restore(flag);
882843e1988Sjohnlev }
883843e1988Sjohnlev 
884843e1988Sjohnlev void
885843e1988Sjohnlev ec_disable_irq(unsigned int irq)
886843e1988Sjohnlev {
887843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
888843e1988Sjohnlev 
889843e1988Sjohnlev 	if (irqp->ii_type == IRQT_DEV_EVTCHN)
890843e1988Sjohnlev 		return;
891843e1988Sjohnlev 
892843e1988Sjohnlev 	/*
893843e1988Sjohnlev 	 * Spin till we are the one to mask the evtchn
894843e1988Sjohnlev 	 * Ensures no one else can be servicing this evtchn.
895843e1988Sjohnlev 	 */
896843e1988Sjohnlev 	while (!ec_mask_evtchn(irq_evtchn(irqp)))
897843e1988Sjohnlev 		SMT_PAUSE();
898843e1988Sjohnlev }
899843e1988Sjohnlev 
900843e1988Sjohnlev static int
901843e1988Sjohnlev ec_evtchn_pending(uint_t ev)
902843e1988Sjohnlev {
903843e1988Sjohnlev 	uint_t evi;
904843e1988Sjohnlev 	shared_info_t *si = HYPERVISOR_shared_info;
905843e1988Sjohnlev 
906843e1988Sjohnlev 	evi = ev >> EVTCHN_SHIFT;
907843e1988Sjohnlev 	ev &= (1ul << EVTCHN_SHIFT) - 1;
908843e1988Sjohnlev 	return ((si->evtchn_pending[evi] & (1ul << ev)) != 0);
909843e1988Sjohnlev }
910843e1988Sjohnlev 
911843e1988Sjohnlev int
912843e1988Sjohnlev ec_pending_irq(unsigned int irq)
913843e1988Sjohnlev {
914843e1988Sjohnlev 	int evtchn = irq_evtchn(&irq_info[irq]);
915843e1988Sjohnlev 
916843e1988Sjohnlev 	return (ec_evtchn_pending(evtchn));
917843e1988Sjohnlev }
918843e1988Sjohnlev 
919843e1988Sjohnlev void
920843e1988Sjohnlev ec_clear_irq(int irq)
921843e1988Sjohnlev {
922843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
923843e1988Sjohnlev 	int evtchn;
924843e1988Sjohnlev 
925843e1988Sjohnlev 	if (irqp->ii_type == IRQT_DEV_EVTCHN)
926843e1988Sjohnlev 		return;
927843e1988Sjohnlev 
928843e1988Sjohnlev 	ASSERT(irqp->ii_type != IRQT_UNBOUND);
929843e1988Sjohnlev 
930843e1988Sjohnlev 	evtchn = irq_evtchn(irqp);
931843e1988Sjohnlev 
932843e1988Sjohnlev 	ASSERT(EVTCHN_MASKED(evtchn));
933843e1988Sjohnlev 	ec_clear_evtchn(evtchn);
934843e1988Sjohnlev }
935843e1988Sjohnlev 
936843e1988Sjohnlev void
937843e1988Sjohnlev ec_unmask_irq(int irq)
938843e1988Sjohnlev {
939843e1988Sjohnlev 	ulong_t flags;
940843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
941843e1988Sjohnlev 
942843e1988Sjohnlev 	flags = intr_clear();
943843e1988Sjohnlev 	switch (irqp->ii_type) {
944843e1988Sjohnlev 	case IRQT_PIRQ:
945843e1988Sjohnlev 		end_pirq(irq);
946843e1988Sjohnlev 		break;
947843e1988Sjohnlev 	case IRQT_DEV_EVTCHN:
948843e1988Sjohnlev 		break;
949843e1988Sjohnlev 	default:
950843e1988Sjohnlev 		ec_unmask_evtchn(irq_evtchn(irqp));
951843e1988Sjohnlev 		break;
952843e1988Sjohnlev 	}
953843e1988Sjohnlev 	intr_restore(flags);
954843e1988Sjohnlev }
955843e1988Sjohnlev 
956843e1988Sjohnlev void
957843e1988Sjohnlev ec_try_unmask_irq(int irq)
958843e1988Sjohnlev {
959843e1988Sjohnlev 	ulong_t flags;
960843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
961843e1988Sjohnlev 	int evtchn;
962843e1988Sjohnlev 
963843e1988Sjohnlev 	flags = intr_clear();
964843e1988Sjohnlev 	switch (irqp->ii_type) {
965843e1988Sjohnlev 	case IRQT_PIRQ:
966843e1988Sjohnlev 		end_pirq(irq);
967843e1988Sjohnlev 		break;
968843e1988Sjohnlev 	case IRQT_DEV_EVTCHN:
969843e1988Sjohnlev 		break;
970843e1988Sjohnlev 	default:
971843e1988Sjohnlev 		if ((evtchn = irq_evtchn(irqp)) != 0)
972843e1988Sjohnlev 			ec_unmask_evtchn(evtchn);
973843e1988Sjohnlev 		break;
974843e1988Sjohnlev 	}
975843e1988Sjohnlev 	intr_restore(flags);
976843e1988Sjohnlev }
977843e1988Sjohnlev 
978843e1988Sjohnlev /*
979843e1988Sjohnlev  * Poll until an event channel is ready or 'check_func' returns true.  This can
980843e1988Sjohnlev  * only be used in a situation where interrupts are masked, otherwise we have a
981843e1988Sjohnlev  * classic time-of-check vs. time-of-use race.
982843e1988Sjohnlev  */
983843e1988Sjohnlev void
984843e1988Sjohnlev ec_wait_on_evtchn(int evtchn, int (*check_func)(void *), void *arg)
985843e1988Sjohnlev {
986843e1988Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
987843e1988Sjohnlev 		while (!check_func(arg))
988843e1988Sjohnlev 			(void) HYPERVISOR_yield();
989843e1988Sjohnlev 		return;
990843e1988Sjohnlev 	}
991843e1988Sjohnlev 
992843e1988Sjohnlev 	ASSERT(CPU->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask != 0);
993843e1988Sjohnlev 
994843e1988Sjohnlev 	for (;;) {
995843e1988Sjohnlev 		evtchn_port_t ports[1];
996843e1988Sjohnlev 
997843e1988Sjohnlev 		ports[0] = evtchn;
998843e1988Sjohnlev 
999843e1988Sjohnlev 		ec_clear_evtchn(evtchn);
1000843e1988Sjohnlev 
1001843e1988Sjohnlev 		if (check_func(arg))
1002843e1988Sjohnlev 			return;
1003843e1988Sjohnlev 
1004843e1988Sjohnlev 		(void) HYPERVISOR_poll(ports, 1, 0);
1005843e1988Sjohnlev 	}
1006843e1988Sjohnlev }
1007843e1988Sjohnlev 
1008843e1988Sjohnlev void
1009843e1988Sjohnlev ec_wait_on_ipi(int ipl, int (*check_func)(void *), void *arg)
1010843e1988Sjohnlev {
1011843e1988Sjohnlev 	mec_info_t *ipip = &ipi_info[ipl];
1012843e1988Sjohnlev 
1013843e1988Sjohnlev 	if (ipip->mi_irq == INVALID_IRQ || ipip->mi_irq == 0)
1014843e1988Sjohnlev 		return;
1015843e1988Sjohnlev 
1016843e1988Sjohnlev 	ec_wait_on_evtchn(ipip->mi_evtchns[CPU->cpu_id], check_func, arg);
1017843e1988Sjohnlev }
1018843e1988Sjohnlev 
1019843e1988Sjohnlev void
1020843e1988Sjohnlev ec_suspend(void)
1021843e1988Sjohnlev {
1022843e1988Sjohnlev 	irq_info_t *irqp;
1023843e1988Sjohnlev 	ushort_t *evtchnp;
1024843e1988Sjohnlev 	int i;
1025843e1988Sjohnlev 	int c;
1026843e1988Sjohnlev 
1027843e1988Sjohnlev 	ASSERT(MUTEX_HELD(&ec_lock));
1028843e1988Sjohnlev 
1029843e1988Sjohnlev 	for (i = 0; i < MAXIPL; i++) {
1030843e1988Sjohnlev 		if (ipi_info[i].mi_irq == INVALID_IRQ)
1031843e1988Sjohnlev 			continue;
1032843e1988Sjohnlev 
1033843e1988Sjohnlev 		for (c = 0; c < NCPU; c++) {
1034843e1988Sjohnlev 			if (cpu[c] == NULL)
1035843e1988Sjohnlev 				continue;
1036843e1988Sjohnlev 
1037843e1988Sjohnlev 			if (CPU_IN_SET(cpu_suspend_lost_set, c))
1038843e1988Sjohnlev 				continue;
1039843e1988Sjohnlev 
1040843e1988Sjohnlev 			evtchnp = &ipi_info[i].mi_evtchns[c];
1041843e1988Sjohnlev 			ASSERT(*evtchnp != 0);
1042843e1988Sjohnlev 			unbind_evtchn(evtchnp);
1043843e1988Sjohnlev 		}
1044843e1988Sjohnlev 	}
1045843e1988Sjohnlev 
1046843e1988Sjohnlev 	for (i = 0; i < NR_VIRQS; i++) {
1047843e1988Sjohnlev 		if (virq_info[i].mi_irq == INVALID_IRQ)
1048843e1988Sjohnlev 			continue;
1049843e1988Sjohnlev 
1050843e1988Sjohnlev 		/*
1051843e1988Sjohnlev 		 * If we're sharing a single event channel across all CPUs, we
1052843e1988Sjohnlev 		 * should only unbind once.
1053843e1988Sjohnlev 		 */
1054843e1988Sjohnlev 		if (virq_info[i].mi_shared) {
1055843e1988Sjohnlev 			evtchnp = &virq_info[i].mi_evtchns[0];
1056843e1988Sjohnlev 			unbind_evtchn(evtchnp);
1057843e1988Sjohnlev 			for (c = 1; c < NCPU; c++)
1058843e1988Sjohnlev 				virq_info[i].mi_evtchns[c] = 0;
1059843e1988Sjohnlev 		} else {
1060843e1988Sjohnlev 			for (c = 0; c < NCPU; c++) {
1061843e1988Sjohnlev 				if (cpu[c] == NULL)
1062843e1988Sjohnlev 					continue;
1063843e1988Sjohnlev 
1064843e1988Sjohnlev 				evtchnp = &virq_info[i].mi_evtchns[c];
1065843e1988Sjohnlev 				if (*evtchnp != 0)
1066843e1988Sjohnlev 					unbind_evtchn(evtchnp);
1067843e1988Sjohnlev 			}
1068843e1988Sjohnlev 		}
1069843e1988Sjohnlev 	}
1070843e1988Sjohnlev 
1071843e1988Sjohnlev 	for (i = 0; i < NR_IRQS; i++) {
1072843e1988Sjohnlev 		irqp = &irq_info[i];
1073843e1988Sjohnlev 
1074843e1988Sjohnlev 		switch (irqp->ii_type) {
1075843e1988Sjohnlev 		case IRQT_EVTCHN:
1076843e1988Sjohnlev 		case IRQT_DEV_EVTCHN:
1077843e1988Sjohnlev 			(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
1078843e1988Sjohnlev 			break;
1079843e1988Sjohnlev 		case IRQT_PIRQ:
1080843e1988Sjohnlev 			if (irqp->ii_u.evtchn != 0)
1081843e1988Sjohnlev 				(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
1082843e1988Sjohnlev 			break;
1083843e1988Sjohnlev 		default:
1084843e1988Sjohnlev 			break;
1085843e1988Sjohnlev 		}
1086843e1988Sjohnlev 	}
1087843e1988Sjohnlev }
1088843e1988Sjohnlev 
1089843e1988Sjohnlev /*
1090843e1988Sjohnlev  * The debug irq is special, we only have one evtchn and irq but we allow all
1091843e1988Sjohnlev  * cpus to service it.  It's marked as shared and we propogate the event
1092843e1988Sjohnlev  * channel into all CPUs by hand.
1093843e1988Sjohnlev  */
1094843e1988Sjohnlev static void
1095843e1988Sjohnlev share_virq(mec_info_t *virqp)
1096843e1988Sjohnlev {
1097843e1988Sjohnlev 	int evtchn = virqp->mi_evtchns[0];
1098843e1988Sjohnlev 	cpuset_t tset;
1099843e1988Sjohnlev 	int i;
1100843e1988Sjohnlev 
1101843e1988Sjohnlev 	ASSERT(evtchn != 0);
1102843e1988Sjohnlev 
1103843e1988Sjohnlev 	virqp->mi_shared = 1;
1104843e1988Sjohnlev 
1105843e1988Sjohnlev 	for (i = 1; i < NCPU; i++)
1106843e1988Sjohnlev 		virqp->mi_evtchns[i] = evtchn;
1107843e1988Sjohnlev 	CPUSET_ALL(tset);
1108843e1988Sjohnlev 	bind_evtchn_to_cpuset(evtchn, tset);
1109843e1988Sjohnlev }
1110843e1988Sjohnlev 
1111843e1988Sjohnlev static void
1112843e1988Sjohnlev virq_resume(int virq)
1113843e1988Sjohnlev {
1114843e1988Sjohnlev 	mec_info_t *virqp = &virq_info[virq];
1115843e1988Sjohnlev 	int evtchn;
1116843e1988Sjohnlev 	int i, err;
1117843e1988Sjohnlev 
1118843e1988Sjohnlev 	for (i = 0; i < NCPU; i++) {
1119843e1988Sjohnlev 		cpuset_t tcpus;
1120843e1988Sjohnlev 
1121843e1988Sjohnlev 		if (cpu[i] == NULL || CPU_IN_SET(cpu_suspend_lost_set, i))
1122843e1988Sjohnlev 			continue;
1123843e1988Sjohnlev 
1124843e1988Sjohnlev 		err = xen_bind_virq(virq, i, &evtchn);
1125843e1988Sjohnlev 		ASSERT(err == 0);
1126843e1988Sjohnlev 
1127843e1988Sjohnlev 		virqp->mi_evtchns[i] = evtchn;
1128843e1988Sjohnlev 		evtchn_to_irq[evtchn] = virqp->mi_irq;
1129843e1988Sjohnlev 		CPUSET_ONLY(tcpus, i);
1130843e1988Sjohnlev 		bind_evtchn_to_cpuset(evtchn, tcpus);
1131843e1988Sjohnlev 		ec_unmask_evtchn(evtchn);
1132843e1988Sjohnlev 		/*
1133843e1988Sjohnlev 		 * only timer VIRQ is bound to all cpus
1134843e1988Sjohnlev 		 */
1135843e1988Sjohnlev 		if (virq != VIRQ_TIMER)
1136843e1988Sjohnlev 			break;
1137843e1988Sjohnlev 	}
1138843e1988Sjohnlev 
1139843e1988Sjohnlev 	if (virqp->mi_shared)
1140843e1988Sjohnlev 		share_virq(virqp);
1141843e1988Sjohnlev }
1142843e1988Sjohnlev 
1143843e1988Sjohnlev static void
1144843e1988Sjohnlev ipi_resume(int ipl)
1145843e1988Sjohnlev {
1146843e1988Sjohnlev 	mec_info_t *ipip = &ipi_info[ipl];
1147843e1988Sjohnlev 	int i;
1148843e1988Sjohnlev 
1149843e1988Sjohnlev 	for (i = 0; i < NCPU; i++) {
1150843e1988Sjohnlev 		cpuset_t tcpus;
1151843e1988Sjohnlev 		int evtchn;
1152843e1988Sjohnlev 
1153843e1988Sjohnlev 		if (cpu[i] == NULL || CPU_IN_SET(cpu_suspend_lost_set, i))
1154843e1988Sjohnlev 			continue;
1155843e1988Sjohnlev 
1156843e1988Sjohnlev 		evtchn = xen_bind_ipi(i);
1157843e1988Sjohnlev 		ipip->mi_evtchns[i] = evtchn;
1158843e1988Sjohnlev 		evtchn_to_irq[evtchn] = ipip->mi_irq;
1159843e1988Sjohnlev 		CPUSET_ONLY(tcpus, i);
1160843e1988Sjohnlev 		bind_evtchn_to_cpuset(evtchn, tcpus);
1161843e1988Sjohnlev 		ec_unmask_evtchn(evtchn);
1162843e1988Sjohnlev 	}
1163843e1988Sjohnlev }
1164843e1988Sjohnlev 
1165843e1988Sjohnlev void
1166843e1988Sjohnlev ec_resume(void)
1167843e1988Sjohnlev {
1168843e1988Sjohnlev 	int i;
1169843e1988Sjohnlev 
1170843e1988Sjohnlev 	/* New event-channel space is not 'live' yet. */
1171843e1988Sjohnlev 	for (i = 0; i < NR_EVENT_CHANNELS; i++)
1172843e1988Sjohnlev 		(void) ec_mask_evtchn(i);
1173843e1988Sjohnlev 
1174843e1988Sjohnlev 	for (i = 0; i < MAXIPL; i++) {
1175843e1988Sjohnlev 		if (ipi_info[i].mi_irq == INVALID_IRQ)
1176843e1988Sjohnlev 			continue;
1177843e1988Sjohnlev 		ipi_resume(i);
1178843e1988Sjohnlev 	}
1179843e1988Sjohnlev 
1180843e1988Sjohnlev 	for (i = 0; i < NR_VIRQS; i++) {
1181843e1988Sjohnlev 		if (virq_info[i].mi_irq == INVALID_IRQ)
1182843e1988Sjohnlev 			continue;
1183843e1988Sjohnlev 		virq_resume(i);
1184843e1988Sjohnlev 	}
1185843e1988Sjohnlev }
1186843e1988Sjohnlev 
1187349b53ddSStuart Maybee int
1188843e1988Sjohnlev ec_init(void)
1189843e1988Sjohnlev {
1190843e1988Sjohnlev 	int i;
1191843e1988Sjohnlev 	mutex_init(&ec_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1192843e1988Sjohnlev 
1193843e1988Sjohnlev 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1194843e1988Sjohnlev 		CPUSET_ZERO(evtchn_cpus[i]);
1195843e1988Sjohnlev 		evtchn_to_irq[i] = INVALID_IRQ;
1196843e1988Sjohnlev 		(void) ec_mask_evtchn(i);
1197843e1988Sjohnlev 	}
1198843e1988Sjohnlev 
1199843e1988Sjohnlev 	for (i = 0; i < MAXIPL; i++)
1200843e1988Sjohnlev 		ipi_info[i].mi_irq = INVALID_IRQ;
1201843e1988Sjohnlev 
1202843e1988Sjohnlev 	for (i = 0; i < NR_VIRQS; i++)
1203843e1988Sjohnlev 		virq_info[i].mi_irq = INVALID_IRQ;
1204843e1988Sjohnlev 
1205843e1988Sjohnlev 	/*
1206843e1988Sjohnlev 	 * Phys IRQ space is statically bound (1:1 mapping), grab the IRQs
1207843e1988Sjohnlev 	 * now.
1208843e1988Sjohnlev 	 */
1209843e1988Sjohnlev 	for (i = PIRQ_BASE; i < NR_PIRQS; i++) {
1210843e1988Sjohnlev 		irq_info[PIRQ_TO_IRQ(i)].ii_type = IRQT_PIRQ;
1211843e1988Sjohnlev 	}
1212349b53ddSStuart Maybee 
1213349b53ddSStuart Maybee 	return (0);
1214843e1988Sjohnlev }
1215843e1988Sjohnlev 
1216843e1988Sjohnlev void
1217843e1988Sjohnlev ec_init_debug_irq()
1218843e1988Sjohnlev {
1219843e1988Sjohnlev 	int irq;
1220843e1988Sjohnlev 
1221843e1988Sjohnlev 	irq = ec_bind_virq_to_irq(VIRQ_DEBUG, 0);
1222843e1988Sjohnlev 	(void) add_avintr(NULL, IPL_DEBUG, (avfunc)xen_debug_handler,
1223843e1988Sjohnlev 	    "debug", irq, NULL, NULL, NULL, NULL);
1224843e1988Sjohnlev 
1225843e1988Sjohnlev 	mutex_enter(&ec_lock);
1226843e1988Sjohnlev 	share_virq(&virq_info[irq_info[irq].ii_u.index]);
1227843e1988Sjohnlev 	mutex_exit(&ec_lock);
1228843e1988Sjohnlev 	ec_debug_irq = irq;
1229843e1988Sjohnlev }
1230843e1988Sjohnlev 
1231843e1988Sjohnlev #define	UNBLOCKED_EVENTS(si, ix, cpe, cpu_id) \
1232843e1988Sjohnlev 	((si)->evtchn_pending[ix] & ~(si)->evtchn_mask[ix] & \
1233843e1988Sjohnlev 		(cpe)->evt_affinity[ix])
1234843e1988Sjohnlev 
1235349b53ddSStuart Maybee 
1236843e1988Sjohnlev /*
1237843e1988Sjohnlev  * This is the entry point for processing events from xen
1238843e1988Sjohnlev  *
1239843e1988Sjohnlev  * (See the commentary associated with the shared_info_st structure
1240843e1988Sjohnlev  * in hypervisor-if.h)
1241843e1988Sjohnlev  *
1242843e1988Sjohnlev  * Since the event channel mechanism doesn't really implement the
1243843e1988Sjohnlev  * concept of priority like hardware interrupt controllers, we simulate
1244843e1988Sjohnlev  * that in software here using the cpu priority field and the pending
1245843e1988Sjohnlev  * interrupts field.  Events/interrupts that are not able to be serviced
1246843e1988Sjohnlev  * now because they are at a lower priority than the current cpu priority
1247843e1988Sjohnlev  * cause a level bit to be recorded in the pending interrupts word.  When
1248843e1988Sjohnlev  * the priority is lowered (either by spl or interrupt exit code) the pending
1249843e1988Sjohnlev  * levels are checked and an upcall is scheduled if there are events/interrupts
1250843e1988Sjohnlev  * that have become deliverable.
1251843e1988Sjohnlev  */
1252843e1988Sjohnlev void
1253843e1988Sjohnlev xen_callback_handler(struct regs *rp, trap_trace_rec_t *ttp)
1254843e1988Sjohnlev {
1255843e1988Sjohnlev 	ulong_t pending_sels, pe, selbit;
1256349b53ddSStuart Maybee 	int i, j, port, pri, curpri, irq, sipri;
1257349b53ddSStuart Maybee 	uint16_t pending_ints, sip;
1258843e1988Sjohnlev 	struct cpu *cpu = CPU;
1259843e1988Sjohnlev 	volatile shared_info_t *si = HYPERVISOR_shared_info;
1260843e1988Sjohnlev 	volatile vcpu_info_t *vci = cpu->cpu_m.mcpu_vcpu_info;
1261843e1988Sjohnlev 	volatile struct xen_evt_data *cpe = cpu->cpu_m.mcpu_evt_pend;
1262843e1988Sjohnlev 	volatile uint16_t *cpu_ipp = &cpu->cpu_m.mcpu_intr_pending;
1263349b53ddSStuart Maybee 	extern void dosoftint(struct regs *);
1264843e1988Sjohnlev 
1265843e1988Sjohnlev 	ASSERT(rp->r_trapno == T_AST && rp->r_err == 0);
1266843e1988Sjohnlev 	ASSERT(&si->vcpu_info[cpu->cpu_id] == vci);
1267843e1988Sjohnlev 	ASSERT_STACK_ALIGNED();
1268843e1988Sjohnlev 
1269843e1988Sjohnlev 	vci->evtchn_upcall_pending = 0;
1270843e1988Sjohnlev 
1271843e1988Sjohnlev 	/*
1272843e1988Sjohnlev 	 * To expedite scanning of pending notifications, any 0->1
1273843e1988Sjohnlev 	 * pending transition on an unmasked channel causes a
1274843e1988Sjohnlev 	 * corresponding bit in evtchn_pending_sel to be set.
1275843e1988Sjohnlev 	 * Each bit in the selector covers a 32-bit word in
1276843e1988Sjohnlev 	 * the evtchn_pending[] array.
1277843e1988Sjohnlev 	 */
1278843e1988Sjohnlev 	membar_enter();
1279843e1988Sjohnlev 	do {
1280843e1988Sjohnlev 		pending_sels = vci->evtchn_pending_sel;
1281843e1988Sjohnlev 	} while (atomic_cas_ulong((volatile ulong_t *)&vci->evtchn_pending_sel,
1282843e1988Sjohnlev 	    pending_sels, 0) != pending_sels);
1283843e1988Sjohnlev 
1284843e1988Sjohnlev 	pending_ints = *cpu_ipp;
1285843e1988Sjohnlev 	while ((i = ffs(pending_sels)) != 0) {
1286843e1988Sjohnlev 		i--;
1287843e1988Sjohnlev 		selbit = 1ul << i;
1288843e1988Sjohnlev 		pending_sels &= ~selbit;
1289843e1988Sjohnlev 
1290843e1988Sjohnlev 		membar_enter();
1291843e1988Sjohnlev 		while ((pe = UNBLOCKED_EVENTS(si, i, cpe, cpu->cpu_id)) != 0) {
1292843e1988Sjohnlev 			j = ffs(pe) - 1;
1293843e1988Sjohnlev 			pe &= ~(1ul << j);
1294843e1988Sjohnlev 
1295843e1988Sjohnlev 			port = (i << EVTCHN_SHIFT) + j;
1296843e1988Sjohnlev 
1297843e1988Sjohnlev 			irq = evtchn_to_irq[port];
1298843e1988Sjohnlev 
1299843e1988Sjohnlev 			/*
1300843e1988Sjohnlev 			 * If no irq set, just ignore the event.
1301843e1988Sjohnlev 			 * On e.g. netbsd they call evtchn_device_upcall(port)
1302843e1988Sjohnlev 			 * We require the evtchn driver to install a handler
1303843e1988Sjohnlev 			 * so there will be an irq associated with user mode
1304843e1988Sjohnlev 			 * evtchns.
1305843e1988Sjohnlev 			 */
1306843e1988Sjohnlev 			if (irq == INVALID_IRQ) {
1307843e1988Sjohnlev 				ec_clear_evtchn(port);
1308843e1988Sjohnlev 				continue;
1309843e1988Sjohnlev 			}
1310843e1988Sjohnlev 
1311843e1988Sjohnlev 			/*
1312843e1988Sjohnlev 			 * If there's no handler, it could be a poke, so just
1313843e1988Sjohnlev 			 * accept the event and continue.
1314843e1988Sjohnlev 			 */
1315843e1988Sjohnlev 			if (!irq_info[irq].ii_u2.has_handler) {
1316843e1988Sjohnlev #ifdef TRAPTRACE
1317843e1988Sjohnlev 				ttp->ttr_ipl = 0xff;
1318843e1988Sjohnlev 				if (IRQ_IS_CPUPOKE(irq)) {
1319843e1988Sjohnlev 					ttp->ttr_ipl = XC_CPUPOKE_PIL;
1320843e1988Sjohnlev 					ttp->ttr_marker = TT_INTERRUPT;
1321843e1988Sjohnlev 				}
1322843e1988Sjohnlev 				ttp->ttr_pri = cpu->cpu_pri;
1323843e1988Sjohnlev 				ttp->ttr_spl = cpu->cpu_base_spl;
1324843e1988Sjohnlev 				ttp->ttr_vector = 0xff;
1325843e1988Sjohnlev #endif /* TRAPTRACE */
1326843e1988Sjohnlev 				if (ec_mask_evtchn(port)) {
1327843e1988Sjohnlev 					ec_clear_evtchn(port);
1328843e1988Sjohnlev 					ec_unmask_evtchn(port);
1329843e1988Sjohnlev 					continue;
1330843e1988Sjohnlev 				}
1331843e1988Sjohnlev 			}
1332843e1988Sjohnlev 
1333843e1988Sjohnlev 			pri = irq_info[irq].ii_u2.ipl;
1334843e1988Sjohnlev 
1335843e1988Sjohnlev 			/*
1336843e1988Sjohnlev 			 * If we are the cpu that successfully masks
1337843e1988Sjohnlev 			 * the event, then record it as a pending event
1338843e1988Sjohnlev 			 * for this cpu to service
1339843e1988Sjohnlev 			 */
1340843e1988Sjohnlev 			if (ec_mask_evtchn(port)) {
1341843e1988Sjohnlev 				if (ec_evtchn_pending(port)) {
1342843e1988Sjohnlev 					cpe->pending_sel[pri] |= selbit;
1343843e1988Sjohnlev 					cpe->pending_evts[pri][i] |= (1ul << j);
1344843e1988Sjohnlev 					pending_ints |= 1 << pri;
1345349b53ddSStuart Maybee 					/*
1346349b53ddSStuart Maybee 					 * We have recorded a pending interrupt
1347349b53ddSStuart Maybee 					 * for this cpu.  If it is an edge
1348349b53ddSStuart Maybee 					 * triggered interrupt then we go ahead
1349349b53ddSStuart Maybee 					 * and clear the pending and mask bits
1350349b53ddSStuart Maybee 					 * from the shared info to avoid having
1351349b53ddSStuart Maybee 					 * the hypervisor see the pending event
1352349b53ddSStuart Maybee 					 * again and possibly disabling the
1353349b53ddSStuart Maybee 					 * interrupt.  This should also help
1354349b53ddSStuart Maybee 					 * keep us from missing an interrupt.
1355349b53ddSStuart Maybee 					 */
1356349b53ddSStuart Maybee 					if (ec_is_edge_pirq(irq)) {
1357349b53ddSStuart Maybee 						ec_clear_evtchn(port);
1358349b53ddSStuart Maybee 						ec_unmask_evtchn(port);
1359349b53ddSStuart Maybee 					}
1360843e1988Sjohnlev 				} else {
1361843e1988Sjohnlev 					/*
1362843e1988Sjohnlev 					 * another cpu serviced this event
1363843e1988Sjohnlev 					 * before us, clear the mask.
1364843e1988Sjohnlev 					 */
1365843e1988Sjohnlev 					ec_unmask_evtchn(port);
1366843e1988Sjohnlev 				}
1367843e1988Sjohnlev 			}
1368843e1988Sjohnlev 		}
1369843e1988Sjohnlev 	}
1370843e1988Sjohnlev 	*cpu_ipp = pending_ints;
1371843e1988Sjohnlev 	if (pending_ints == 0)
1372843e1988Sjohnlev 		return;
1373843e1988Sjohnlev 	/*
1374843e1988Sjohnlev 	 * We have gathered all the pending events/interrupts,
1375843e1988Sjohnlev 	 * go service all the ones we can from highest priority to lowest.
1376843e1988Sjohnlev 	 * Note: This loop may not actually complete and service all
1377843e1988Sjohnlev 	 * pending interrupts since one of the interrupt threads may
1378843e1988Sjohnlev 	 * block and the pinned thread runs.  In that case, when we
1379843e1988Sjohnlev 	 * exit the interrupt thread that blocked we will check for
1380843e1988Sjohnlev 	 * any unserviced interrupts and re-post an upcall to process
1381843e1988Sjohnlev 	 * any unserviced pending events.
1382843e1988Sjohnlev 	 */
1383349b53ddSStuart Maybee restart:
1384843e1988Sjohnlev 	curpri = cpu->cpu_pri;
1385349b53ddSStuart Maybee 	pri = bsrw_insn(*cpu_ipp);
1386349b53ddSStuart Maybee 	while (pri > curpri) {
1387843e1988Sjohnlev 		while ((pending_sels = cpe->pending_sel[pri]) != 0) {
1388843e1988Sjohnlev 			i = ffs(pending_sels) - 1;
1389843e1988Sjohnlev 			while ((pe = cpe->pending_evts[pri][i]) != 0) {
1390843e1988Sjohnlev 				j = ffs(pe) - 1;
1391349b53ddSStuart Maybee 				port = (i << EVTCHN_SHIFT) + j;
1392843e1988Sjohnlev 				pe &= ~(1ul << j);
1393843e1988Sjohnlev 				cpe->pending_evts[pri][i] = pe;
1394843e1988Sjohnlev 				if (pe == 0) {
1395843e1988Sjohnlev 					/*
1396843e1988Sjohnlev 					 * Must reload pending selector bits
1397843e1988Sjohnlev 					 * here as they could have changed on
1398843e1988Sjohnlev 					 * a previous trip around the inner loop
1399843e1988Sjohnlev 					 * while we were interrupt enabled
1400843e1988Sjohnlev 					 * in a interrupt service routine.
1401843e1988Sjohnlev 					 */
1402843e1988Sjohnlev 					pending_sels = cpe->pending_sel[pri];
1403843e1988Sjohnlev 					pending_sels &= ~(1ul << i);
1404843e1988Sjohnlev 					cpe->pending_sel[pri] = pending_sels;
1405843e1988Sjohnlev 					if (pending_sels == 0)
1406843e1988Sjohnlev 						*cpu_ipp &= ~(1 << pri);
1407843e1988Sjohnlev 				}
1408843e1988Sjohnlev 				irq = evtchn_to_irq[port];
1409843e1988Sjohnlev 				if (irq == INVALID_IRQ) {
1410843e1988Sjohnlev 					/*
1411843e1988Sjohnlev 					 * No longer a handler for this event
1412843e1988Sjohnlev 					 * channel.  Clear the event and
1413843e1988Sjohnlev 					 * ignore it, unmask the event.
1414843e1988Sjohnlev 					 */
1415843e1988Sjohnlev 					ec_clear_evtchn(port);
1416843e1988Sjohnlev 					ec_unmask_evtchn(port);
1417843e1988Sjohnlev 					continue;
1418843e1988Sjohnlev 				}
1419843e1988Sjohnlev 				if (irq == ec_dev_irq) {
1420349b53ddSStuart Maybee 					ASSERT(cpu->cpu_m.mcpu_ec_mbox == 0);
1421349b53ddSStuart Maybee 					cpu->cpu_m.mcpu_ec_mbox = port;
1422843e1988Sjohnlev 				}
1423843e1988Sjohnlev 				/*
1424843e1988Sjohnlev 				 * Set up the regs struct to
1425843e1988Sjohnlev 				 * look like a normal hardware int
1426843e1988Sjohnlev 				 * and do normal interrupt handling.
1427843e1988Sjohnlev 				 */
1428843e1988Sjohnlev 				rp->r_trapno = irq;
1429843e1988Sjohnlev 				do_interrupt(rp, ttp);
1430843e1988Sjohnlev 				/*
1431843e1988Sjohnlev 				 * Check for cpu priority change
1432843e1988Sjohnlev 				 * Can happen if int thread blocks
1433843e1988Sjohnlev 				 */
1434349b53ddSStuart Maybee 				if (cpu->cpu_pri != curpri)
1435349b53ddSStuart Maybee 					goto restart;
1436843e1988Sjohnlev 			}
1437843e1988Sjohnlev 		}
1438349b53ddSStuart Maybee 		/*
1439349b53ddSStuart Maybee 		 * Dispatch any soft interrupts that are
1440349b53ddSStuart Maybee 		 * higher priority than any hard ones remaining.
1441349b53ddSStuart Maybee 		 */
1442349b53ddSStuart Maybee 		pri = bsrw_insn(*cpu_ipp);
1443349b53ddSStuart Maybee 		sip = (uint16_t)cpu->cpu_softinfo.st_pending;
1444349b53ddSStuart Maybee 		if (sip != 0) {
1445349b53ddSStuart Maybee 			sipri = bsrw_insn(sip);
1446*0bc46f0dSStuart Maybee 			if (sipri > pri && sipri > cpu->cpu_pri) {
1447349b53ddSStuart Maybee 				dosoftint(rp);
1448*0bc46f0dSStuart Maybee 				/*
1449*0bc46f0dSStuart Maybee 				 * Check for cpu priority change
1450*0bc46f0dSStuart Maybee 				 * Can happen if softint thread blocks
1451*0bc46f0dSStuart Maybee 				 */
1452*0bc46f0dSStuart Maybee 				if (cpu->cpu_pri != curpri)
1453*0bc46f0dSStuart Maybee 					goto restart;
1454*0bc46f0dSStuart Maybee 			}
1455843e1988Sjohnlev 		}
1456843e1988Sjohnlev 	}
1457349b53ddSStuart Maybee 	/*
1458349b53ddSStuart Maybee 	 * Deliver any pending soft interrupts.
1459349b53ddSStuart Maybee 	 */
1460349b53ddSStuart Maybee 	if (cpu->cpu_softinfo.st_pending)
1461349b53ddSStuart Maybee 		dosoftint(rp);
1462349b53ddSStuart Maybee }
1463349b53ddSStuart Maybee 
1464843e1988Sjohnlev 
1465843e1988Sjohnlev void
1466843e1988Sjohnlev ec_unmask_evtchn(unsigned int ev)
1467843e1988Sjohnlev {
1468349b53ddSStuart Maybee 	uint_t evi, evb;
1469843e1988Sjohnlev 	volatile shared_info_t *si = HYPERVISOR_shared_info;
1470843e1988Sjohnlev 	volatile vcpu_info_t *vci = CPU->cpu_m.mcpu_vcpu_info;
1471843e1988Sjohnlev 	volatile ulong_t *ulp;
1472843e1988Sjohnlev 
1473843e1988Sjohnlev 	ASSERT(!interrupts_enabled());
1474843e1988Sjohnlev 	/*
1475843e1988Sjohnlev 	 * Check if we need to take slow path
1476843e1988Sjohnlev 	 */
1477843e1988Sjohnlev 	if (!CPU_IN_SET(evtchn_cpus[ev], CPU->cpu_id)) {
1478843e1988Sjohnlev 		xen_evtchn_unmask(ev);
1479843e1988Sjohnlev 		return;
1480843e1988Sjohnlev 	}
1481843e1988Sjohnlev 	evi = ev >> EVTCHN_SHIFT;
1482349b53ddSStuart Maybee 	evb = ev & ((1ul << EVTCHN_SHIFT) - 1);
1483843e1988Sjohnlev 	ulp = (volatile ulong_t *)&si->evtchn_mask[evi];
1484349b53ddSStuart Maybee 	atomic_and_ulong(ulp, ~(1ul << evb));
1485843e1988Sjohnlev 	/*
1486843e1988Sjohnlev 	 * The following is basically the equivalent of
1487843e1988Sjohnlev 	 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the
1488843e1988Sjohnlev 	 * interrupt edge' if the channel is masked.
1489843e1988Sjohnlev 	 * XXPV - slight race if upcall was about to be set, we may get
1490843e1988Sjohnlev 	 * an extra upcall.
1491843e1988Sjohnlev 	 */
1492843e1988Sjohnlev 	membar_enter();
1493349b53ddSStuart Maybee 	if (si->evtchn_pending[evi] & (1ul << evb)) {
1494843e1988Sjohnlev 		membar_consumer();
1495843e1988Sjohnlev 		ulp = (volatile ulong_t *)&vci->evtchn_pending_sel;
1496843e1988Sjohnlev 		if (!(*ulp & (1ul << evi))) {
1497843e1988Sjohnlev 			atomic_or_ulong(ulp, (1ul << evi));
1498843e1988Sjohnlev 		}
1499843e1988Sjohnlev 		vci->evtchn_upcall_pending = 1;
1500843e1988Sjohnlev 	}
1501843e1988Sjohnlev }
1502843e1988Sjohnlev 
1503843e1988Sjohnlev /*
1504843e1988Sjohnlev  * Set a bit in an evtchan mask word, return true if we are the cpu that
1505843e1988Sjohnlev  * set the bit.
1506843e1988Sjohnlev  */
1507843e1988Sjohnlev int
1508843e1988Sjohnlev ec_mask_evtchn(unsigned int ev)
1509843e1988Sjohnlev {
1510843e1988Sjohnlev 	uint_t evi, evb;
1511843e1988Sjohnlev 	ulong_t new, old, bit;
1512843e1988Sjohnlev 	volatile shared_info_t *si = HYPERVISOR_shared_info;
1513843e1988Sjohnlev 	volatile ulong_t *maskp;
1514843e1988Sjohnlev 	int masked;
1515843e1988Sjohnlev 
1516843e1988Sjohnlev 	kpreempt_disable();
1517843e1988Sjohnlev 	evi = ev >> EVTCHN_SHIFT;
1518843e1988Sjohnlev 	evb = ev & ((1ul << EVTCHN_SHIFT) - 1);
1519843e1988Sjohnlev 	bit = 1ul << evb;
1520843e1988Sjohnlev 	maskp = (volatile ulong_t *)&si->evtchn_mask[evi];
1521843e1988Sjohnlev 	do {
1522843e1988Sjohnlev 		old = si->evtchn_mask[evi];
1523843e1988Sjohnlev 		new = old | bit;
1524843e1988Sjohnlev 	} while (atomic_cas_ulong(maskp, old, new) != old);
1525843e1988Sjohnlev 	masked = (old & bit) == 0;
1526843e1988Sjohnlev 	if (masked) {
1527843e1988Sjohnlev 		evtchn_owner[ev] = CPU->cpu_id;
1528843e1988Sjohnlev #ifdef DEBUG
1529843e1988Sjohnlev 		evtchn_owner_thread[ev] = curthread;
1530843e1988Sjohnlev #endif
1531843e1988Sjohnlev 	}
1532843e1988Sjohnlev 	kpreempt_enable();
1533843e1988Sjohnlev 	return (masked);
1534843e1988Sjohnlev }
1535843e1988Sjohnlev 
1536843e1988Sjohnlev void
1537843e1988Sjohnlev ec_clear_evtchn(unsigned int ev)
1538843e1988Sjohnlev {
1539843e1988Sjohnlev 	uint_t evi;
1540843e1988Sjohnlev 	shared_info_t *si = HYPERVISOR_shared_info;
1541843e1988Sjohnlev 	volatile ulong_t *pendp;
1542843e1988Sjohnlev 
1543843e1988Sjohnlev 	evi = ev >> EVTCHN_SHIFT;
1544843e1988Sjohnlev 	ev &= (1ul << EVTCHN_SHIFT) - 1;
1545843e1988Sjohnlev 	pendp = (volatile ulong_t *)&si->evtchn_pending[evi];
1546843e1988Sjohnlev 	atomic_and_ulong(pendp, ~(1ul << ev));
1547843e1988Sjohnlev }
1548843e1988Sjohnlev 
1549843e1988Sjohnlev void
1550843e1988Sjohnlev ec_notify_via_evtchn(unsigned int port)
1551843e1988Sjohnlev {
1552843e1988Sjohnlev 	evtchn_send_t send;
1553843e1988Sjohnlev 
1554843e1988Sjohnlev 	ASSERT(port != INVALID_EVTCHN);
1555843e1988Sjohnlev 
1556843e1988Sjohnlev 	send.port = port;
1557843e1988Sjohnlev 	(void) HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1558843e1988Sjohnlev }
1559843e1988Sjohnlev 
1560843e1988Sjohnlev int
1561843e1988Sjohnlev ec_block_irq(int irq)
1562843e1988Sjohnlev {
1563843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
1564843e1988Sjohnlev 	int evtchn;
1565843e1988Sjohnlev 
1566843e1988Sjohnlev 
1567843e1988Sjohnlev 	evtchn = irq_evtchn(irqp);
1568843e1988Sjohnlev 	(void) ec_mask_evtchn(evtchn);
1569843e1988Sjohnlev 	return (evtchn_owner[evtchn]);
1570843e1988Sjohnlev }
1571843e1988Sjohnlev 
1572843e1988Sjohnlev /*
1573843e1988Sjohnlev  * Make a event that is pending for delivery on the current cpu  "go away"
1574843e1988Sjohnlev  * without servicing the interrupt.
1575843e1988Sjohnlev  */
1576843e1988Sjohnlev void
1577843e1988Sjohnlev ec_unpend_irq(int irq)
1578843e1988Sjohnlev {
1579843e1988Sjohnlev 	irq_info_t *irqp = &irq_info[irq];
1580843e1988Sjohnlev 	int pri = irqp->ii_u2.ipl;
1581843e1988Sjohnlev 	ulong_t flags;
1582843e1988Sjohnlev 	uint_t evtchn, evi, bit;
1583843e1988Sjohnlev 	unsigned long pe, pending_sels;
1584843e1988Sjohnlev 	struct xen_evt_data *cpe;
1585843e1988Sjohnlev 
1586843e1988Sjohnlev 	/*
1587843e1988Sjohnlev 	 * The evtchn must be masked
1588843e1988Sjohnlev 	 */
1589843e1988Sjohnlev 	evtchn = irq_evtchn(irqp);
1590843e1988Sjohnlev 	ASSERT(EVTCHN_MASKED(evtchn));
1591843e1988Sjohnlev 	evi = evtchn >> EVTCHN_SHIFT;
1592843e1988Sjohnlev 	bit = evtchn & (1ul << EVTCHN_SHIFT) - 1;
1593843e1988Sjohnlev 	flags = intr_clear();
1594843e1988Sjohnlev 	cpe = CPU->cpu_m.mcpu_evt_pend;
1595843e1988Sjohnlev 	pe = cpe->pending_evts[pri][evi] & ~(1ul << bit);
1596843e1988Sjohnlev 	cpe->pending_evts[pri][evi] = pe;
1597843e1988Sjohnlev 	if (pe == 0) {
1598843e1988Sjohnlev 		pending_sels = cpe->pending_sel[pri];
1599843e1988Sjohnlev 		pending_sels &= ~(1ul << evi);
1600843e1988Sjohnlev 		cpe->pending_sel[pri] = pending_sels;
1601843e1988Sjohnlev 		if (pending_sels == 0)
1602843e1988Sjohnlev 			CPU->cpu_m.mcpu_intr_pending &= ~(1 << pri);
1603843e1988Sjohnlev 	}
1604843e1988Sjohnlev 	intr_restore(flags);
1605843e1988Sjohnlev }
1606