1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/xpv_support.h>
29 #include <sys/hypervisor.h>
30 #include <sys/machsystm.h>
31 #include <sys/mutex.h>
32 #include <sys/cmn_err.h>
33 #include <sys/dditypes.h>
34 #include <sys/atomic.h>
35 #include <sys/sysmacros.h>
36 #include <sys/cpu.h>
37 #include <sys/psw.h>
38 #include <sys/psm.h>
39 #include <sys/sdt.h>
40
41 extern dev_info_t *xpv_dip;
42 static ddi_intr_handle_t *evtchn_ihp = NULL;
43 static ddi_softint_handle_t evtchn_to_handle[NR_EVENT_CHANNELS];
44 kmutex_t ec_lock;
45
46 static int evtchn_callback_irq = -1;
47
48 static volatile ulong_t *pending_events;
49 static volatile ulong_t *masked_events;
50
51 /* log2(NBBY * sizeof (ulong)) */
52 #ifdef __amd64
53 #define EVTCHN_SHIFT 6
54 #else /* __i386 */
55 #define EVTCHN_SHIFT 5
56 #endif
57
58 /* Atomically get and clear a ulong from memory. */
59 #define GET_AND_CLEAR(src, targ) { \
60 membar_enter(); \
61 do { \
62 targ = *src; \
63 } while (atomic_cas_ulong(src, targ, 0) != targ); \
64 }
65
66 /* Get the first and last bits set in a bitmap */
67 #define GET_BOUNDS(bitmap, low, high) { \
68 int _i; \
69 low = high = -1; \
70 for (_i = 0; _i <= sizeof (ulong_t); _i++) \
71 if (bitmap & (1UL << _i)) { \
72 if (low == -1) \
73 low = _i; \
74 high = _i; \
75 } \
76 }
77
78 void
ec_bind_evtchn_to_handler(int evtchn,pri_t pri,ec_handler_fcn_t handler,void * arg1)79 ec_bind_evtchn_to_handler(int evtchn, pri_t pri, ec_handler_fcn_t handler,
80 void *arg1)
81 {
82 ddi_softint_handle_t hdl;
83
84 if (evtchn < 0 || evtchn >= NR_EVENT_CHANNELS) {
85 cmn_err(CE_WARN, "Binding invalid event channel: %d", evtchn);
86 return;
87 }
88
89 (void) ddi_intr_add_softint(xpv_dip, &hdl, pri, handler, (caddr_t)arg1);
90 mutex_enter(&ec_lock);
91 ASSERT(evtchn_to_handle[evtchn] == NULL);
92 evtchn_to_handle[evtchn] = hdl;
93 mutex_exit(&ec_lock);
94
95 /* Let the hypervisor know we're prepared to handle this event */
96 hypervisor_unmask_event(evtchn);
97 }
98
99 void
ec_unbind_evtchn(int evtchn)100 ec_unbind_evtchn(int evtchn)
101 {
102 evtchn_close_t close;
103 ddi_softint_handle_t hdl;
104
105 if (evtchn < 0 || evtchn >= NR_EVENT_CHANNELS) {
106 cmn_err(CE_WARN, "Unbinding invalid event channel: %d", evtchn);
107 return;
108 }
109
110 /*
111 * Let the hypervisor know we're no longer prepared to handle this
112 * event
113 */
114 hypervisor_mask_event(evtchn);
115
116 /* Cleanup the event handler metadata */
117 mutex_enter(&ec_lock);
118 hdl = evtchn_to_handle[evtchn];
119 evtchn_to_handle[evtchn] = NULL;
120 mutex_exit(&ec_lock);
121
122 close.port = evtchn;
123 (void) HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
124 (void) ddi_intr_remove_softint(hdl);
125 }
126
127 void
ec_notify_via_evtchn(unsigned int port)128 ec_notify_via_evtchn(unsigned int port)
129 {
130 evtchn_send_t send;
131
132 if ((int)port == -1)
133 return;
134 send.port = port;
135 (void) HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
136 }
137
138 void
hypervisor_unmask_event(unsigned int ev)139 hypervisor_unmask_event(unsigned int ev)
140 {
141 int index = ev >> EVTCHN_SHIFT;
142 ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
143 volatile ulong_t *maskp;
144 evtchn_unmask_t unmask;
145
146 /*
147 * index,bit contain the event number as an index into the
148 * masked-events bitmask. Set it to 0.
149 */
150 maskp = &masked_events[index];
151 atomic_and_ulong(maskp, ~bit);
152
153 /* Let the hypervisor know the event has been unmasked */
154 unmask.port = ev;
155 if (HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask) != 0)
156 panic("xen_evtchn_unmask() failed");
157 }
158
159 /* Set a bit in an evtchan mask word */
160 void
hypervisor_mask_event(uint_t ev)161 hypervisor_mask_event(uint_t ev)
162 {
163 int index = ev >> EVTCHN_SHIFT;
164 ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
165 volatile ulong_t *maskp;
166
167 maskp = &masked_events[index];
168 atomic_or_ulong(maskp, bit);
169 }
170
171 void
hypervisor_clear_event(uint_t ev)172 hypervisor_clear_event(uint_t ev)
173 {
174 int index = ev >> EVTCHN_SHIFT;
175 ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
176 volatile ulong_t *maskp;
177
178 maskp = &pending_events[index];
179 atomic_and_ulong(maskp, ~bit);
180 }
181
182 int
xen_alloc_unbound_evtchn(int domid,int * evtchnp)183 xen_alloc_unbound_evtchn(int domid, int *evtchnp)
184 {
185 evtchn_alloc_unbound_t alloc;
186 int err;
187
188 alloc.dom = DOMID_SELF;
189 alloc.remote_dom = (domid_t)domid;
190
191 if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
192 &alloc)) == 0) {
193 *evtchnp = alloc.port;
194 /* ensure evtchn is masked till we're ready to use it */
195 (void) hypervisor_mask_event(*evtchnp);
196 } else {
197 err = xen_xlate_errcode(err);
198 }
199
200 return (err);
201 }
202
203 int
xen_bind_interdomain(int domid,int remote_port,int * port)204 xen_bind_interdomain(int domid, int remote_port, int *port)
205 {
206 evtchn_bind_interdomain_t bind;
207 int err;
208
209 bind.remote_dom = (domid_t)domid;
210 bind.remote_port = remote_port;
211 if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
212 &bind)) == 0)
213 *port = bind.local_port;
214 else
215 err = xen_xlate_errcode(err);
216 return (err);
217 }
218
219 /*ARGSUSED*/
220 uint_t
evtchn_callback_fcn(caddr_t arg0,caddr_t arg1)221 evtchn_callback_fcn(caddr_t arg0, caddr_t arg1)
222 {
223 ulong_t pending_word;
224 int i, j, port;
225 volatile struct vcpu_info *vci;
226 uint_t rv = DDI_INTR_UNCLAIMED;
227 ddi_softint_handle_t hdl;
228 int low, high;
229 ulong_t sels;
230
231 /*
232 * Xen hard-codes all notifications to VCPU0, so we bind
233 * ourselves via xpv.conf. Note that this also assumes that all
234 * evtchns are bound to VCPU0, which is true by default.
235 */
236 ASSERT(CPU->cpu_id == 0);
237
238 vci = &HYPERVISOR_shared_info->vcpu_info[0];
239
240 again:
241 DTRACE_PROBE2(evtchn__scan__start, int, vci->evtchn_upcall_pending,
242 ulong_t, vci->evtchn_pending_sel);
243
244 atomic_and_8(&vci->evtchn_upcall_pending, 0);
245
246 /*
247 * Find the upper and lower bounds in which we need to search for
248 * pending events.
249 */
250 GET_AND_CLEAR(&vci->evtchn_pending_sel, sels);
251
252 /* sels == 1 is by far the most common case. Make it fast */
253 if (sels == 1)
254 low = high = 0;
255 else if (sels == 0)
256 return (rv);
257 else
258 GET_BOUNDS(sels, low, high);
259
260 /* Scan the port list, looking for words with bits set */
261 for (i = low; i <= high; i++) {
262 ulong_t tmp;
263
264 GET_AND_CLEAR(&pending_events[i], tmp);
265 pending_word = tmp & ~(masked_events[i]);
266
267 /* Scan the bits in the word, looking for pending events */
268 while (pending_word != 0) {
269 j = lowbit(pending_word) - 1;
270 port = (i << EVTCHN_SHIFT) + j;
271 pending_word = pending_word & ~(1 << j);
272
273 /*
274 * If there is a handler registered for this event,
275 * schedule a softint of the appropriate priority
276 * to execute it.
277 */
278 if ((hdl = evtchn_to_handle[port]) != NULL) {
279 (void) ddi_intr_trigger_softint(hdl, NULL);
280 rv = DDI_INTR_CLAIMED;
281 }
282 }
283 }
284 DTRACE_PROBE2(evtchn__scan__end, int, vci->evtchn_upcall_pending,
285 ulong_t, vci->evtchn_pending_sel);
286
287 if ((volatile uint8_t)vci->evtchn_upcall_pending ||
288 ((volatile ulong_t)vci->evtchn_pending_sel))
289 goto again;
290
291 return (rv);
292 }
293
294 static int
set_hvm_callback(int irq)295 set_hvm_callback(int irq)
296 {
297 struct xen_hvm_param xhp;
298
299 xhp.domid = DOMID_SELF;
300 xhp.index = HVM_PARAM_CALLBACK_IRQ;
301 xhp.value = irq;
302 return (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp));
303 }
304
305 void
ec_fini()306 ec_fini()
307 {
308 int i;
309
310 for (i = 0; i < NR_EVENT_CHANNELS; i++)
311 ec_unbind_evtchn(i);
312
313 evtchn_callback_irq = -1;
314 if (evtchn_ihp != NULL) {
315 (void) ddi_intr_disable(*evtchn_ihp);
316 (void) ddi_intr_remove_handler(*evtchn_ihp);
317 (void) ddi_intr_free(*evtchn_ihp);
318 kmem_free(evtchn_ihp, sizeof (ddi_intr_handle_t));
319 evtchn_ihp = NULL;
320 }
321 }
322
323 int
ec_init(void)324 ec_init(void)
325 {
326 int i;
327 int rv, actual;
328 ddi_intr_handle_t *ihp;
329
330 /*
331 * Translate the variable-sized pending and masked event bitmasks
332 * into constant-sized arrays of uint32_t's.
333 */
334 pending_events = &HYPERVISOR_shared_info->evtchn_pending[0];
335 masked_events = &HYPERVISOR_shared_info->evtchn_mask[0];
336
337 /*
338 * Clear our event handler structures and prevent the hypervisor
339 * from triggering any events.
340 */
341 mutex_init(&ec_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
342 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
343 evtchn_to_handle[i] = NULL;
344 (void) hypervisor_mask_event(i);
345 }
346
347 /*
348 * Allocate and initialize an interrupt handler to process the
349 * hypervisor's "hey you have events pending!" interrupt.
350 */
351 ihp = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
352 rv = ddi_intr_alloc(xpv_dip, ihp, DDI_INTR_TYPE_FIXED, 0, 1, &actual,
353 DDI_INTR_ALLOC_NORMAL);
354 if (rv < 0 || actual != 1) {
355 cmn_err(CE_WARN, "Could not allocate evtchn interrupt: %d",
356 rv);
357 return (-1);
358 }
359
360 rv = ddi_intr_add_handler(*ihp, evtchn_callback_fcn, NULL, NULL);
361 if (rv < 0) {
362 (void) ddi_intr_free(*ihp);
363 cmn_err(CE_WARN, "Could not attach evtchn handler");
364 return (-1);
365 }
366 evtchn_ihp = ihp;
367
368 if (ddi_intr_enable(*ihp) != DDI_SUCCESS) {
369 cmn_err(CE_WARN, "Could not enable evtchn interrupts\n");
370 return (-1);
371 }
372
373 /* Tell the hypervisor which interrupt we're waiting on. */
374 evtchn_callback_irq = ((ddi_intr_handle_impl_t *)*ihp)->ih_vector;
375
376 if (set_hvm_callback(evtchn_callback_irq) != 0) {
377 cmn_err(CE_WARN, "Couldn't register evtchn callback");
378 return (-1);
379 }
380 return (0);
381 }
382
383 void
ec_resume(void)384 ec_resume(void)
385 {
386 int i;
387
388 /* New event-channel space is not 'live' yet. */
389 for (i = 0; i < NR_EVENT_CHANNELS; i++)
390 (void) hypervisor_mask_event(i);
391 if (set_hvm_callback(evtchn_callback_irq) != 0)
392 cmn_err(CE_WARN, "Couldn't register evtchn callback");
393
394 }
395