xref: /titanic_51/usr/src/uts/i86pc/i86hvm/io/xpv/evtchn.c (revision 349b53dd4e695e3d833b5380540385145b2d3ae8)
1eb0cc229Sedp /*
2eb0cc229Sedp  * CDDL HEADER START
3eb0cc229Sedp  *
4eb0cc229Sedp  * The contents of this file are subject to the terms of the
5eb0cc229Sedp  * Common Development and Distribution License (the "License").
6eb0cc229Sedp  * You may not use this file except in compliance with the License.
7eb0cc229Sedp  *
8eb0cc229Sedp  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9eb0cc229Sedp  * or http://www.opensolaris.org/os/licensing.
10eb0cc229Sedp  * See the License for the specific language governing permissions
11eb0cc229Sedp  * and limitations under the License.
12eb0cc229Sedp  *
13eb0cc229Sedp  * When distributing Covered Code, include this CDDL HEADER in each
14eb0cc229Sedp  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eb0cc229Sedp  * If applicable, add the following below this CDDL HEADER, with the
16eb0cc229Sedp  * fields enclosed by brackets "[]" replaced with your own identifying
17eb0cc229Sedp  * information: Portions Copyright [yyyy] [name of copyright owner]
18eb0cc229Sedp  *
19eb0cc229Sedp  * CDDL HEADER END
20eb0cc229Sedp  */
21eb0cc229Sedp 
22eb0cc229Sedp /*
23*349b53ddSStuart Maybee  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24eb0cc229Sedp  * Use is subject to license terms.
25eb0cc229Sedp  */
26eb0cc229Sedp 
27eb0cc229Sedp #include <sys/types.h>
28eb0cc229Sedp #include <sys/xpv_support.h>
29eb0cc229Sedp #include <sys/hypervisor.h>
30eb0cc229Sedp #include <sys/machsystm.h>
31eb0cc229Sedp #include <sys/mutex.h>
32eb0cc229Sedp #include <sys/cmn_err.h>
33eb0cc229Sedp #include <sys/dditypes.h>
34eb0cc229Sedp #include <sys/atomic.h>
35eb0cc229Sedp #include <sys/sysmacros.h>
36eb0cc229Sedp #include <sys/cpu.h>
37eb0cc229Sedp #include <sys/psw.h>
38eb0cc229Sedp #include <sys/psm.h>
39eb0cc229Sedp #include <sys/sdt.h>
40eb0cc229Sedp 
41eb0cc229Sedp extern dev_info_t *xpv_dip;
42eb0cc229Sedp static ddi_intr_handle_t *evtchn_ihp = NULL;
43eb0cc229Sedp static ddi_softint_handle_t evtchn_to_handle[NR_EVENT_CHANNELS];
44eb0cc229Sedp kmutex_t ec_lock;
45eb0cc229Sedp 
46eb0cc229Sedp static int evtchn_callback_irq = -1;
47eb0cc229Sedp 
48eb0cc229Sedp static volatile ulong_t *pending_events;
49eb0cc229Sedp static volatile ulong_t *masked_events;
50eb0cc229Sedp 
51eb0cc229Sedp /* log2(NBBY * sizeof (ulong)) */
52eb0cc229Sedp #ifdef __amd64
53eb0cc229Sedp #define	EVTCHN_SHIFT	6
54eb0cc229Sedp #else /* __i386 */
55eb0cc229Sedp #define	EVTCHN_SHIFT	5
56eb0cc229Sedp #endif
57eb0cc229Sedp 
58eb0cc229Sedp /* Atomically get and clear a ulong from memory. */
59eb0cc229Sedp #define	GET_AND_CLEAR(src, targ) {				\
60eb0cc229Sedp 	membar_enter();						\
61eb0cc229Sedp 	do {							\
62eb0cc229Sedp 		targ = *src;					\
63eb0cc229Sedp 	} while (atomic_cas_ulong(src, targ, 0) != targ);	\
64eb0cc229Sedp }
65eb0cc229Sedp 
66eb0cc229Sedp /* Get the first and last bits set in a bitmap */
67eb0cc229Sedp #define	GET_BOUNDS(bitmap, low, high)	 {	\
68eb0cc229Sedp 	int _i;						\
69eb0cc229Sedp 	low = high = -1;				\
70eb0cc229Sedp 	for (_i = 0; _i <= sizeof (ulong_t); _i++)			\
71eb0cc229Sedp 		if (bitmap & (1UL << _i)) {	\
72eb0cc229Sedp 			if (low == -1)			\
73eb0cc229Sedp 				low = _i;		\
74eb0cc229Sedp 			high = _i;			\
75eb0cc229Sedp 		}					\
76eb0cc229Sedp }
77eb0cc229Sedp 
78eb0cc229Sedp void
79eb0cc229Sedp ec_bind_evtchn_to_handler(int evtchn, pri_t pri, ec_handler_fcn_t handler,
80eb0cc229Sedp     void *arg1)
81eb0cc229Sedp {
82eb0cc229Sedp 	ddi_softint_handle_t hdl;
83eb0cc229Sedp 
84*349b53ddSStuart Maybee 	if (evtchn < 0 || evtchn >= NR_EVENT_CHANNELS) {
85eb0cc229Sedp 		cmn_err(CE_WARN, "Binding invalid event channel: %d", evtchn);
86eb0cc229Sedp 		return;
87eb0cc229Sedp 	}
88eb0cc229Sedp 
89eb0cc229Sedp 	(void) ddi_intr_add_softint(xpv_dip, &hdl, pri, handler, (caddr_t)arg1);
90eb0cc229Sedp 	mutex_enter(&ec_lock);
91eb0cc229Sedp 	ASSERT(evtchn_to_handle[evtchn] == NULL);
92eb0cc229Sedp 	evtchn_to_handle[evtchn] = hdl;
93eb0cc229Sedp 	mutex_exit(&ec_lock);
94eb0cc229Sedp 
95eb0cc229Sedp 	/* Let the hypervisor know we're prepared to handle this event */
96eb0cc229Sedp 	hypervisor_unmask_event(evtchn);
97eb0cc229Sedp }
98eb0cc229Sedp 
99eb0cc229Sedp void
100eb0cc229Sedp ec_unbind_evtchn(int evtchn)
101eb0cc229Sedp {
102eb0cc229Sedp 	evtchn_close_t close;
103eb0cc229Sedp 	ddi_softint_handle_t hdl;
104eb0cc229Sedp 
105*349b53ddSStuart Maybee 	if (evtchn < 0 || evtchn >= NR_EVENT_CHANNELS) {
106eb0cc229Sedp 		cmn_err(CE_WARN, "Unbinding invalid event channel: %d", evtchn);
107eb0cc229Sedp 		return;
108eb0cc229Sedp 	}
109eb0cc229Sedp 
110eb0cc229Sedp 	/*
111eb0cc229Sedp 	 * Let the hypervisor know we're no longer prepared to handle this
112eb0cc229Sedp 	 * event
113eb0cc229Sedp 	 */
114eb0cc229Sedp 	hypervisor_mask_event(evtchn);
115eb0cc229Sedp 
116eb0cc229Sedp 	/* Cleanup the event handler metadata */
117eb0cc229Sedp 	mutex_enter(&ec_lock);
118eb0cc229Sedp 	hdl = evtchn_to_handle[evtchn];
119eb0cc229Sedp 	evtchn_to_handle[evtchn] = NULL;
120eb0cc229Sedp 	mutex_exit(&ec_lock);
121eb0cc229Sedp 
122eb0cc229Sedp 	close.port = evtchn;
123eb0cc229Sedp 	(void) HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
124eb0cc229Sedp 	(void) ddi_intr_remove_softint(hdl);
125eb0cc229Sedp }
126eb0cc229Sedp 
127eb0cc229Sedp void
128eb0cc229Sedp ec_notify_via_evtchn(unsigned int port)
129eb0cc229Sedp {
130eb0cc229Sedp 	evtchn_send_t send;
131eb0cc229Sedp 
132eb0cc229Sedp 	if ((int)port == -1)
133eb0cc229Sedp 		return;
134eb0cc229Sedp 	send.port = port;
135eb0cc229Sedp 	(void) HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
136eb0cc229Sedp }
137eb0cc229Sedp 
138eb0cc229Sedp void
139eb0cc229Sedp hypervisor_unmask_event(unsigned int ev)
140eb0cc229Sedp {
141eb0cc229Sedp 	int index = ev >> EVTCHN_SHIFT;
142eb0cc229Sedp 	ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
143eb0cc229Sedp 	volatile ulong_t *maskp;
144eb0cc229Sedp 	evtchn_unmask_t unmask;
145eb0cc229Sedp 
146eb0cc229Sedp 	/*
147eb0cc229Sedp 	 * index,bit contain the event number as an index into the
148eb0cc229Sedp 	 * masked-events bitmask. Set it to 0.
149eb0cc229Sedp 	 */
150eb0cc229Sedp 	maskp = &masked_events[index];
151eb0cc229Sedp 	atomic_and_ulong(maskp, ~bit);
152eb0cc229Sedp 
153eb0cc229Sedp 	/* Let the hypervisor know the event has been unmasked */
154eb0cc229Sedp 	unmask.port = ev;
155eb0cc229Sedp 	if (HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask) != 0)
156eb0cc229Sedp 		panic("xen_evtchn_unmask() failed");
157eb0cc229Sedp }
158eb0cc229Sedp 
159eb0cc229Sedp /* Set a bit in an evtchan mask word */
160eb0cc229Sedp void
161eb0cc229Sedp hypervisor_mask_event(uint_t ev)
162eb0cc229Sedp {
163eb0cc229Sedp 	int index = ev >> EVTCHN_SHIFT;
164eb0cc229Sedp 	ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
165eb0cc229Sedp 	volatile ulong_t *maskp;
166eb0cc229Sedp 
167eb0cc229Sedp 	maskp = &masked_events[index];
168eb0cc229Sedp 	atomic_or_ulong(maskp, bit);
169eb0cc229Sedp }
170eb0cc229Sedp 
171eb0cc229Sedp void
172eb0cc229Sedp hypervisor_clear_event(uint_t ev)
173eb0cc229Sedp {
174eb0cc229Sedp 	int index = ev >> EVTCHN_SHIFT;
175eb0cc229Sedp 	ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
176eb0cc229Sedp 	volatile ulong_t *maskp;
177eb0cc229Sedp 
178eb0cc229Sedp 	maskp = &pending_events[index];
179eb0cc229Sedp 	atomic_and_ulong(maskp, ~bit);
180eb0cc229Sedp }
181eb0cc229Sedp 
182eb0cc229Sedp int
183eb0cc229Sedp xen_alloc_unbound_evtchn(int domid, int *evtchnp)
184eb0cc229Sedp {
185eb0cc229Sedp 	evtchn_alloc_unbound_t alloc;
186eb0cc229Sedp 	int err;
187eb0cc229Sedp 
188eb0cc229Sedp 	alloc.dom = DOMID_SELF;
189eb0cc229Sedp 	alloc.remote_dom = (domid_t)domid;
190eb0cc229Sedp 
191eb0cc229Sedp 	if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
192eb0cc229Sedp 	    &alloc)) == 0) {
193eb0cc229Sedp 		*evtchnp = alloc.port;
194eb0cc229Sedp 		/* ensure evtchn is masked till we're ready to use it */
195eb0cc229Sedp 		(void) hypervisor_mask_event(*evtchnp);
196eb0cc229Sedp 	} else {
197eb0cc229Sedp 		err = xen_xlate_errcode(err);
198eb0cc229Sedp 	}
199eb0cc229Sedp 
200eb0cc229Sedp 	return (err);
201eb0cc229Sedp }
202eb0cc229Sedp 
203eb0cc229Sedp int
204eb0cc229Sedp xen_bind_interdomain(int domid, int remote_port, int *port)
205eb0cc229Sedp {
206eb0cc229Sedp 	evtchn_bind_interdomain_t bind;
207eb0cc229Sedp 	int err;
208eb0cc229Sedp 
209eb0cc229Sedp 	bind.remote_dom = (domid_t)domid;
210eb0cc229Sedp 	bind.remote_port = remote_port;
211eb0cc229Sedp 	if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
212eb0cc229Sedp 	    &bind)) == 0)
213eb0cc229Sedp 		*port = bind.local_port;
214eb0cc229Sedp 	else
215eb0cc229Sedp 		err = xen_xlate_errcode(err);
216eb0cc229Sedp 	return (err);
217eb0cc229Sedp }
218eb0cc229Sedp 
219eb0cc229Sedp /*ARGSUSED*/
220eb0cc229Sedp uint_t
221eb0cc229Sedp evtchn_callback_fcn(caddr_t arg0, caddr_t arg1)
222eb0cc229Sedp {
223eb0cc229Sedp 	ulong_t pending_word;
224eb0cc229Sedp 	int i, j, port;
225eb0cc229Sedp 	volatile struct vcpu_info *vci;
226eb0cc229Sedp 	uint_t rv = DDI_INTR_UNCLAIMED;
227eb0cc229Sedp 	ddi_softint_handle_t hdl;
228eb0cc229Sedp 	int low, high;
229eb0cc229Sedp 	ulong_t sels;
230eb0cc229Sedp 
231391647d5SJohn Levon 	/*
232391647d5SJohn Levon 	 * Xen hard-codes all notifications to VCPU0, so we bind
233391647d5SJohn Levon 	 * ourselves via xpv.conf.  Note that this also assumes that all
234391647d5SJohn Levon 	 * evtchns are bound to VCPU0, which is true by default.
235391647d5SJohn Levon 	 */
236391647d5SJohn Levon 	ASSERT(CPU->cpu_id == 0);
237391647d5SJohn Levon 
238391647d5SJohn Levon 	vci = &HYPERVISOR_shared_info->vcpu_info[0];
239eb0cc229Sedp 
240eb0cc229Sedp again:
241eb0cc229Sedp 	DTRACE_PROBE2(evtchn__scan__start, int, vci->evtchn_upcall_pending,
242eb0cc229Sedp 	    ulong_t, vci->evtchn_pending_sel);
243eb0cc229Sedp 
244eb0cc229Sedp 	atomic_and_8(&vci->evtchn_upcall_pending, 0);
245eb0cc229Sedp 
246eb0cc229Sedp 	/*
247eb0cc229Sedp 	 * Find the upper and lower bounds in which we need to search for
248eb0cc229Sedp 	 * pending events.
249eb0cc229Sedp 	 */
250eb0cc229Sedp 	GET_AND_CLEAR(&vci->evtchn_pending_sel, sels);
251eb0cc229Sedp 
252eb0cc229Sedp 	/* sels == 1 is by far the most common case.  Make it fast */
253eb0cc229Sedp 	if (sels == 1)
254eb0cc229Sedp 		low = high = 0;
255eb0cc229Sedp 	else if (sels == 0)
256eb0cc229Sedp 		return (rv);
257eb0cc229Sedp 	else
258eb0cc229Sedp 		GET_BOUNDS(sels, low, high);
259eb0cc229Sedp 
260eb0cc229Sedp 	/* Scan the port list, looking for words with bits set */
261eb0cc229Sedp 	for (i = low; i <= high; i++) {
262eb0cc229Sedp 		ulong_t tmp;
263eb0cc229Sedp 
264eb0cc229Sedp 		GET_AND_CLEAR(&pending_events[i], tmp);
265eb0cc229Sedp 		pending_word = tmp & ~(masked_events[i]);
266eb0cc229Sedp 
267eb0cc229Sedp 		/* Scan the bits in the word, looking for pending events */
268eb0cc229Sedp 		while (pending_word != 0) {
269eb0cc229Sedp 			j = lowbit(pending_word) - 1;
270eb0cc229Sedp 			port = (i << EVTCHN_SHIFT) + j;
271eb0cc229Sedp 			pending_word = pending_word & ~(1 << j);
272eb0cc229Sedp 
273eb0cc229Sedp 			/*
274eb0cc229Sedp 			 * If there is a handler registered for this event,
275eb0cc229Sedp 			 * schedule a softint of the appropriate priority
276eb0cc229Sedp 			 * to execute it.
277eb0cc229Sedp 			 */
278eb0cc229Sedp 			if ((hdl = evtchn_to_handle[port]) != NULL) {
279eb0cc229Sedp 				(void) ddi_intr_trigger_softint(hdl, NULL);
280eb0cc229Sedp 				rv = DDI_INTR_CLAIMED;
281eb0cc229Sedp 			}
282eb0cc229Sedp 		}
283eb0cc229Sedp 	}
284eb0cc229Sedp 	DTRACE_PROBE2(evtchn__scan__end, int, vci->evtchn_upcall_pending,
285eb0cc229Sedp 	    ulong_t, vci->evtchn_pending_sel);
286eb0cc229Sedp 
287eb0cc229Sedp 	if ((volatile uint8_t)vci->evtchn_upcall_pending ||
288eb0cc229Sedp 	    ((volatile ulong_t)vci->evtchn_pending_sel))
289eb0cc229Sedp 		goto again;
290eb0cc229Sedp 
291eb0cc229Sedp 	return (rv);
292eb0cc229Sedp }
293eb0cc229Sedp 
294eb0cc229Sedp static int
295eb0cc229Sedp set_hvm_callback(int irq)
296eb0cc229Sedp {
297eb0cc229Sedp 	struct xen_hvm_param xhp;
298eb0cc229Sedp 
299eb0cc229Sedp 	xhp.domid = DOMID_SELF;
300eb0cc229Sedp 	xhp.index = HVM_PARAM_CALLBACK_IRQ;
301eb0cc229Sedp 	xhp.value = irq;
302eb0cc229Sedp 	return (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp));
303eb0cc229Sedp }
304eb0cc229Sedp 
305eb0cc229Sedp void
306eb0cc229Sedp ec_fini()
307eb0cc229Sedp {
308eb0cc229Sedp 	int i;
309eb0cc229Sedp 
310eb0cc229Sedp 	for (i = 0; i < NR_EVENT_CHANNELS; i++)
311eb0cc229Sedp 		ec_unbind_evtchn(i);
312eb0cc229Sedp 
313eb0cc229Sedp 	evtchn_callback_irq = -1;
314eb0cc229Sedp 	if (evtchn_ihp != NULL) {
315eb0cc229Sedp 		(void) ddi_intr_disable(*evtchn_ihp);
316eb0cc229Sedp 		(void) ddi_intr_remove_handler(*evtchn_ihp);
317eb0cc229Sedp 		(void) ddi_intr_free(*evtchn_ihp);
318eb0cc229Sedp 		kmem_free(evtchn_ihp, sizeof (ddi_intr_handle_t));
319eb0cc229Sedp 		evtchn_ihp = NULL;
320eb0cc229Sedp 	}
321eb0cc229Sedp }
322eb0cc229Sedp 
323eb0cc229Sedp int
324*349b53ddSStuart Maybee ec_init(void)
325eb0cc229Sedp {
326eb0cc229Sedp 	int i;
327eb0cc229Sedp 	int rv, actual;
328eb0cc229Sedp 	ddi_intr_handle_t *ihp;
329eb0cc229Sedp 
330eb0cc229Sedp 	/*
331eb0cc229Sedp 	 * Translate the variable-sized pending and masked event bitmasks
332eb0cc229Sedp 	 * into constant-sized arrays of uint32_t's.
333eb0cc229Sedp 	 */
334eb0cc229Sedp 	pending_events = &HYPERVISOR_shared_info->evtchn_pending[0];
335eb0cc229Sedp 	masked_events = &HYPERVISOR_shared_info->evtchn_mask[0];
336eb0cc229Sedp 
337eb0cc229Sedp 	/*
338eb0cc229Sedp 	 * Clear our event handler structures and prevent the hypervisor
339eb0cc229Sedp 	 * from triggering any events.
340eb0cc229Sedp 	 */
341eb0cc229Sedp 	mutex_init(&ec_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
342eb0cc229Sedp 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
343eb0cc229Sedp 		evtchn_to_handle[i] = NULL;
344eb0cc229Sedp 		(void) hypervisor_mask_event(i);
345eb0cc229Sedp 	}
346eb0cc229Sedp 
347eb0cc229Sedp 	/*
348eb0cc229Sedp 	 * Allocate and initialize an interrupt handler to process the
349eb0cc229Sedp 	 * hypervisor's "hey you have events pending!" interrupt.
350eb0cc229Sedp 	 */
351eb0cc229Sedp 	ihp = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
352*349b53ddSStuart Maybee 	rv = ddi_intr_alloc(xpv_dip, ihp, DDI_INTR_TYPE_FIXED, 0, 1, &actual,
353eb0cc229Sedp 	    DDI_INTR_ALLOC_NORMAL);
354eb0cc229Sedp 	if (rv < 0 || actual != 1) {
355eb0cc229Sedp 		cmn_err(CE_WARN, "Could not allocate evtchn interrupt: %d",
356eb0cc229Sedp 		    rv);
357eb0cc229Sedp 		return (-1);
358eb0cc229Sedp 	}
359eb0cc229Sedp 
360eb0cc229Sedp 	rv = ddi_intr_add_handler(*ihp, evtchn_callback_fcn, NULL, NULL);
361eb0cc229Sedp 	if (rv < 0) {
362eb0cc229Sedp 		(void) ddi_intr_free(*ihp);
363eb0cc229Sedp 		cmn_err(CE_WARN, "Could not attach evtchn handler");
364eb0cc229Sedp 		return (-1);
365eb0cc229Sedp 	}
366eb0cc229Sedp 	evtchn_ihp = ihp;
367eb0cc229Sedp 
368eb0cc229Sedp 	if (ddi_intr_enable(*ihp) != DDI_SUCCESS) {
369eb0cc229Sedp 		cmn_err(CE_WARN, "Could not enable evtchn interrupts\n");
370eb0cc229Sedp 		return (-1);
371eb0cc229Sedp 	}
372eb0cc229Sedp 
373eb0cc229Sedp 	/* Tell the hypervisor which interrupt we're waiting on. */
374eb0cc229Sedp 	evtchn_callback_irq = ((ddi_intr_handle_impl_t *)*ihp)->ih_vector;
375eb0cc229Sedp 
376eb0cc229Sedp 	if (set_hvm_callback(evtchn_callback_irq) != 0) {
377eb0cc229Sedp 		cmn_err(CE_WARN, "Couldn't register evtchn callback");
378eb0cc229Sedp 		return (-1);
379eb0cc229Sedp 	}
380eb0cc229Sedp 	return (0);
381eb0cc229Sedp }
382eb0cc229Sedp 
383eb0cc229Sedp void
384eb0cc229Sedp ec_resume(void)
385eb0cc229Sedp {
386eb0cc229Sedp 	int i;
387eb0cc229Sedp 
388eb0cc229Sedp 	/* New event-channel space is not 'live' yet. */
389eb0cc229Sedp 	for (i = 0; i < NR_EVENT_CHANNELS; i++)
390eb0cc229Sedp 		(void) hypervisor_mask_event(i);
391eb0cc229Sedp 	if (set_hvm_callback(evtchn_callback_irq) != 0)
392eb0cc229Sedp 		cmn_err(CE_WARN, "Couldn't register evtchn callback");
393eb0cc229Sedp 
394eb0cc229Sedp }
395