xref: /titanic_41/usr/src/uts/i86pc/i86hvm/io/xpv/evtchn.c (revision b7d3956b92a285d8dac2c7f5f7e28d2ef5347ef8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/xpv_support.h>
31 #include <sys/hypervisor.h>
32 #include <sys/machsystm.h>
33 #include <sys/mutex.h>
34 #include <sys/cmn_err.h>
35 #include <sys/dditypes.h>
36 #include <sys/atomic.h>
37 #include <sys/sysmacros.h>
38 #include <sys/cpu.h>
39 #include <sys/psw.h>
40 #include <sys/psm.h>
41 #include <sys/sdt.h>
42 
43 extern dev_info_t *xpv_dip;
44 static ddi_intr_handle_t *evtchn_ihp = NULL;
45 static ddi_softint_handle_t evtchn_to_handle[NR_EVENT_CHANNELS];
46 kmutex_t ec_lock;
47 
48 static int evtchn_callback_irq = -1;
49 
50 static volatile ulong_t *pending_events;
51 static volatile ulong_t *masked_events;
52 
53 /* log2(NBBY * sizeof (ulong)) */
54 #ifdef __amd64
55 #define	EVTCHN_SHIFT	6
56 #else /* __i386 */
57 #define	EVTCHN_SHIFT	5
58 #endif
59 
60 /* Atomically get and clear a ulong from memory. */
61 #define	GET_AND_CLEAR(src, targ) {				\
62 	membar_enter();						\
63 	do {							\
64 		targ = *src;					\
65 	} while (atomic_cas_ulong(src, targ, 0) != targ);	\
66 }
67 
68 /* Get the first and last bits set in a bitmap */
69 #define	GET_BOUNDS(bitmap, low, high)	 {	\
70 	int _i;						\
71 	low = high = -1;				\
72 	for (_i = 0; _i <= sizeof (ulong_t); _i++)			\
73 		if (bitmap & (1UL << _i)) {	\
74 			if (low == -1)			\
75 				low = _i;		\
76 			high = _i;			\
77 		}					\
78 }
79 
80 void
81 ec_bind_evtchn_to_handler(int evtchn, pri_t pri, ec_handler_fcn_t handler,
82     void *arg1)
83 {
84 	ddi_softint_handle_t hdl;
85 
86 	if (evtchn < 0 || evtchn > NR_EVENT_CHANNELS) {
87 		cmn_err(CE_WARN, "Binding invalid event channel: %d", evtchn);
88 		return;
89 	}
90 
91 	(void) ddi_intr_add_softint(xpv_dip, &hdl, pri, handler, (caddr_t)arg1);
92 	mutex_enter(&ec_lock);
93 	ASSERT(evtchn_to_handle[evtchn] == NULL);
94 	evtchn_to_handle[evtchn] = hdl;
95 	mutex_exit(&ec_lock);
96 
97 	/* Let the hypervisor know we're prepared to handle this event */
98 	hypervisor_unmask_event(evtchn);
99 }
100 
101 void
102 ec_unbind_evtchn(int evtchn)
103 {
104 	evtchn_close_t close;
105 	ddi_softint_handle_t hdl;
106 
107 	if (evtchn < 0 || evtchn > NR_EVENT_CHANNELS) {
108 		cmn_err(CE_WARN, "Unbinding invalid event channel: %d", evtchn);
109 		return;
110 	}
111 
112 	/*
113 	 * Let the hypervisor know we're no longer prepared to handle this
114 	 * event
115 	 */
116 	hypervisor_mask_event(evtchn);
117 
118 	/* Cleanup the event handler metadata */
119 	mutex_enter(&ec_lock);
120 	hdl = evtchn_to_handle[evtchn];
121 	evtchn_to_handle[evtchn] = NULL;
122 	mutex_exit(&ec_lock);
123 
124 	close.port = evtchn;
125 	(void) HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
126 	(void) ddi_intr_remove_softint(hdl);
127 }
128 
129 void
130 ec_notify_via_evtchn(unsigned int port)
131 {
132 	evtchn_send_t send;
133 
134 	if ((int)port == -1)
135 		return;
136 	send.port = port;
137 	(void) HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
138 }
139 
140 void
141 hypervisor_unmask_event(unsigned int ev)
142 {
143 	int index = ev >> EVTCHN_SHIFT;
144 	ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
145 	volatile ulong_t *maskp;
146 	evtchn_unmask_t unmask;
147 
148 	/*
149 	 * index,bit contain the event number as an index into the
150 	 * masked-events bitmask. Set it to 0.
151 	 */
152 	maskp = &masked_events[index];
153 	atomic_and_ulong(maskp, ~bit);
154 
155 	/* Let the hypervisor know the event has been unmasked */
156 	unmask.port = ev;
157 	if (HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask) != 0)
158 		panic("xen_evtchn_unmask() failed");
159 }
160 
161 /* Set a bit in an evtchan mask word */
162 void
163 hypervisor_mask_event(uint_t ev)
164 {
165 	int index = ev >> EVTCHN_SHIFT;
166 	ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
167 	volatile ulong_t *maskp;
168 
169 	maskp = &masked_events[index];
170 	atomic_or_ulong(maskp, bit);
171 }
172 
173 void
174 hypervisor_clear_event(uint_t ev)
175 {
176 	int index = ev >> EVTCHN_SHIFT;
177 	ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
178 	volatile ulong_t *maskp;
179 
180 	maskp = &pending_events[index];
181 	atomic_and_ulong(maskp, ~bit);
182 }
183 
184 int
185 xen_alloc_unbound_evtchn(int domid, int *evtchnp)
186 {
187 	evtchn_alloc_unbound_t alloc;
188 	int err;
189 
190 	alloc.dom = DOMID_SELF;
191 	alloc.remote_dom = (domid_t)domid;
192 
193 	if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
194 	    &alloc)) == 0) {
195 		*evtchnp = alloc.port;
196 		/* ensure evtchn is masked till we're ready to use it */
197 		(void) hypervisor_mask_event(*evtchnp);
198 	} else {
199 		err = xen_xlate_errcode(err);
200 	}
201 
202 	return (err);
203 }
204 
205 int
206 xen_bind_interdomain(int domid, int remote_port, int *port)
207 {
208 	evtchn_bind_interdomain_t bind;
209 	int err;
210 
211 	bind.remote_dom = (domid_t)domid;
212 	bind.remote_port = remote_port;
213 	if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
214 	    &bind)) == 0)
215 		*port = bind.local_port;
216 	else
217 		err = xen_xlate_errcode(err);
218 	return (err);
219 }
220 
221 /*ARGSUSED*/
222 uint_t
223 evtchn_callback_fcn(caddr_t arg0, caddr_t arg1)
224 {
225 	ulong_t pending_word;
226 	int i, j, port;
227 	volatile struct vcpu_info *vci;
228 	uint_t rv = DDI_INTR_UNCLAIMED;
229 	ddi_softint_handle_t hdl;
230 	int low, high;
231 	ulong_t sels;
232 
233 	vci = &HYPERVISOR_shared_info->vcpu_info[CPU->cpu_id];
234 
235 again:
236 	DTRACE_PROBE2(evtchn__scan__start, int, vci->evtchn_upcall_pending,
237 	    ulong_t, vci->evtchn_pending_sel);
238 
239 	atomic_and_8(&vci->evtchn_upcall_pending, 0);
240 
241 	/*
242 	 * Find the upper and lower bounds in which we need to search for
243 	 * pending events.
244 	 */
245 	GET_AND_CLEAR(&vci->evtchn_pending_sel, sels);
246 
247 	/* sels == 1 is by far the most common case.  Make it fast */
248 	if (sels == 1)
249 		low = high = 0;
250 	else if (sels == 0)
251 		return (rv);
252 	else
253 		GET_BOUNDS(sels, low, high);
254 
255 	/* Scan the port list, looking for words with bits set */
256 	for (i = low; i <= high; i++) {
257 		ulong_t tmp;
258 
259 		GET_AND_CLEAR(&pending_events[i], tmp);
260 		pending_word = tmp & ~(masked_events[i]);
261 
262 		/* Scan the bits in the word, looking for pending events */
263 		while (pending_word != 0) {
264 			j = lowbit(pending_word) - 1;
265 			port = (i << EVTCHN_SHIFT) + j;
266 			pending_word = pending_word & ~(1 << j);
267 
268 			/*
269 			 * If there is a handler registered for this event,
270 			 * schedule a softint of the appropriate priority
271 			 * to execute it.
272 			 */
273 			if ((hdl = evtchn_to_handle[port]) != NULL) {
274 				(void) ddi_intr_trigger_softint(hdl, NULL);
275 				rv = DDI_INTR_CLAIMED;
276 			}
277 		}
278 	}
279 	DTRACE_PROBE2(evtchn__scan__end, int, vci->evtchn_upcall_pending,
280 	    ulong_t, vci->evtchn_pending_sel);
281 
282 	if ((volatile uint8_t)vci->evtchn_upcall_pending ||
283 	    ((volatile ulong_t)vci->evtchn_pending_sel))
284 		goto again;
285 
286 	return (rv);
287 }
288 
289 static int
290 set_hvm_callback(int irq)
291 {
292 	struct xen_hvm_param xhp;
293 
294 	xhp.domid = DOMID_SELF;
295 	xhp.index = HVM_PARAM_CALLBACK_IRQ;
296 	xhp.value = irq;
297 	return (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp));
298 }
299 
300 void
301 ec_fini()
302 {
303 	int i;
304 
305 	for (i = 0; i < NR_EVENT_CHANNELS; i++)
306 		ec_unbind_evtchn(i);
307 
308 	evtchn_callback_irq = -1;
309 	if (evtchn_ihp != NULL) {
310 		(void) ddi_intr_disable(*evtchn_ihp);
311 		(void) ddi_intr_remove_handler(*evtchn_ihp);
312 		(void) ddi_intr_free(*evtchn_ihp);
313 		kmem_free(evtchn_ihp, sizeof (ddi_intr_handle_t));
314 		evtchn_ihp = NULL;
315 	}
316 }
317 
318 int
319 ec_init(dev_info_t *dip)
320 {
321 	int i;
322 	int rv, actual;
323 	ddi_intr_handle_t *ihp;
324 
325 	/*
326 	 * Translate the variable-sized pending and masked event bitmasks
327 	 * into constant-sized arrays of uint32_t's.
328 	 */
329 	pending_events = &HYPERVISOR_shared_info->evtchn_pending[0];
330 	masked_events = &HYPERVISOR_shared_info->evtchn_mask[0];
331 
332 	/*
333 	 * Clear our event handler structures and prevent the hypervisor
334 	 * from triggering any events.
335 	 */
336 	mutex_init(&ec_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
337 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
338 		evtchn_to_handle[i] = NULL;
339 		(void) hypervisor_mask_event(i);
340 	}
341 
342 	/*
343 	 * Allocate and initialize an interrupt handler to process the
344 	 * hypervisor's "hey you have events pending!" interrupt.
345 	 */
346 	ihp = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
347 	rv = ddi_intr_alloc(dip, ihp, DDI_INTR_TYPE_FIXED, 0, 1, &actual,
348 	    DDI_INTR_ALLOC_NORMAL);
349 	if (rv < 0 || actual != 1) {
350 		cmn_err(CE_WARN, "Could not allocate evtchn interrupt: %d",
351 		    rv);
352 		return (-1);
353 	}
354 
355 	rv = ddi_intr_add_handler(*ihp, evtchn_callback_fcn, NULL, NULL);
356 	if (rv < 0) {
357 		(void) ddi_intr_free(*ihp);
358 		cmn_err(CE_WARN, "Could not attach evtchn handler");
359 		return (-1);
360 	}
361 	evtchn_ihp = ihp;
362 
363 	if (ddi_intr_enable(*ihp) != DDI_SUCCESS) {
364 		cmn_err(CE_WARN, "Could not enable evtchn interrupts\n");
365 		return (-1);
366 	}
367 
368 	/* Tell the hypervisor which interrupt we're waiting on. */
369 	evtchn_callback_irq = ((ddi_intr_handle_impl_t *)*ihp)->ih_vector;
370 
371 	if (set_hvm_callback(evtchn_callback_irq) != 0) {
372 		cmn_err(CE_WARN, "Couldn't register evtchn callback");
373 		return (-1);
374 	}
375 	return (0);
376 }
377 
378 void
379 ec_resume(void)
380 {
381 	int i;
382 
383 	/* New event-channel space is not 'live' yet. */
384 	for (i = 0; i < NR_EVENT_CHANNELS; i++)
385 		(void) hypervisor_mask_event(i);
386 	if (set_hvm_callback(evtchn_callback_irq) != 0)
387 		cmn_err(CE_WARN, "Couldn't register evtchn callback");
388 
389 }
390