xref: /freebsd/sys/dev/xen/evtchn/evtchn_dev.c (revision d65cd7a57bf0600b722afc770838a5d0c1c3a8e1)
1 /******************************************************************************
2  * evtchn.c
3  *
4  * Driver for receiving and demuxing event-channel signals.
5  *
6  * Copyright (c) 2004-2005, K A Fraser
7  * Multi-process extensions Copyright (c) 2004, Steven Smith
8  * FreeBSD port Copyright (c) 2014, Roger Pau Monné
9  * Fetched from git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
10  * File: drivers/xen/evtchn.c
11  * Git commit: 0dc0064add422bc0ef5165ebe9ece3052bbd457d
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/uio.h>
44 #include <sys/bus.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sx.h>
50 #include <sys/selinfo.h>
51 #include <sys/poll.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/ioccom.h>
55 #include <sys/rman.h>
56 #include <sys/tree.h>
57 #include <sys/module.h>
58 #include <sys/filio.h>
59 #include <sys/vnode.h>
60 
61 #include <machine/intr_machdep.h>
62 #include <machine/xen/synch_bitops.h>
63 
64 #include <xen/xen-os.h>
65 #include <xen/evtchn.h>
66 #include <xen/xen_intr.h>
67 
68 #include <xen/evtchn/evtchnvar.h>
69 
70 MALLOC_DEFINE(M_EVTCHN, "evtchn_dev", "Xen event channel user-space device");
71 
72 struct user_evtchn;
73 
74 static int evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2);
75 
76 RB_HEAD(evtchn_tree, user_evtchn);
77 
78 struct per_user_data {
79 	struct mtx bind_mutex; /* serialize bind/unbind operations */
80 	struct evtchn_tree evtchns;
81 
82 	/* Notification ring, accessed via /dev/xen/evtchn. */
83 #define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
84 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
85 	evtchn_port_t *ring;
86 	unsigned int ring_cons, ring_prod, ring_overflow;
87 	struct sx ring_cons_mutex; /* protect against concurrent readers */
88 	struct mtx ring_prod_mutex; /* product against concurrent interrupts */
89 	struct selinfo ev_rsel;
90 };
91 
92 struct user_evtchn {
93 	RB_ENTRY(user_evtchn) node;
94 	struct per_user_data *user;
95 	evtchn_port_t port;
96 	xen_intr_handle_t handle;
97 	bool enabled;
98 };
99 
100 RB_GENERATE_STATIC(evtchn_tree, user_evtchn, node, evtchn_cmp);
101 
102 static device_t evtchn_dev;
103 
104 static d_read_t      evtchn_read;
105 static d_write_t     evtchn_write;
106 static d_ioctl_t     evtchn_ioctl;
107 static d_poll_t      evtchn_poll;
108 static d_open_t      evtchn_open;
109 
110 static void evtchn_release(void *arg);
111 
112 static struct cdevsw evtchn_devsw = {
113 	.d_version = D_VERSION,
114 	.d_open = evtchn_open,
115 	.d_read = evtchn_read,
116 	.d_write = evtchn_write,
117 	.d_ioctl = evtchn_ioctl,
118 	.d_poll = evtchn_poll,
119 	.d_name = "evtchn",
120 };
121 
122 /*------------------------- Red-black tree helpers ---------------------------*/
123 static int
124 evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2)
125 {
126 
127 	return (u1->port - u2->port);
128 }
129 
130 static struct user_evtchn *
131 find_evtchn(struct per_user_data *u, evtchn_port_t port)
132 {
133 	struct user_evtchn tmp = {
134 		.port = port,
135 	};
136 
137 	return (RB_FIND(evtchn_tree, &u->evtchns, &tmp));
138 }
139 
140 /*--------------------------- Interrupt handlers -----------------------------*/
141 static int
142 evtchn_filter(void *arg)
143 {
144 	struct user_evtchn *evtchn;
145 
146 	evtchn = arg;
147 
148 	if (!evtchn->enabled && bootverbose) {
149 		device_printf(evtchn_dev,
150 		    "Received upcall for disabled event channel %d\n",
151 		    evtchn->port);
152 	}
153 
154 	evtchn_mask_port(evtchn->port);
155 	evtchn->enabled = false;
156 
157 	return (FILTER_SCHEDULE_THREAD);
158 }
159 
160 static void
161 evtchn_interrupt(void *arg)
162 {
163 	struct user_evtchn *evtchn;
164 	struct per_user_data *u;
165 
166 	evtchn = arg;
167 	u = evtchn->user;
168 
169 	/*
170 	 * Protect against concurrent events using this handler
171 	 * on different CPUs.
172 	 */
173 	mtx_lock(&u->ring_prod_mutex);
174 	if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
175 		u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port;
176 		wmb(); /* Ensure ring contents visible */
177 		if (u->ring_cons == u->ring_prod++) {
178 			wakeup(u);
179 			selwakeup(&u->ev_rsel);
180 		}
181 	} else
182 		u->ring_overflow = 1;
183 	mtx_unlock(&u->ring_prod_mutex);
184 }
185 
186 /*------------------------- Character device methods -------------------------*/
187 static int
188 evtchn_open(struct cdev *dev, int flag, int otyp, struct thread *td)
189 {
190 	struct per_user_data *u;
191 	int error;
192 
193 	u = malloc(sizeof(*u), M_EVTCHN, M_WAITOK | M_ZERO);
194 	u->ring = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK | M_ZERO);
195 
196 	/* Initialize locks */
197 	mtx_init(&u->bind_mutex, "evtchn_bind_mutex", NULL, MTX_DEF);
198 	sx_init(&u->ring_cons_mutex, "evtchn_ringc_sx");
199 	mtx_init(&u->ring_prod_mutex, "evtchn_ringp_mutex", NULL, MTX_DEF);
200 
201 	/* Initialize red-black tree. */
202 	RB_INIT(&u->evtchns);
203 
204 	/* Assign the allocated per_user_data to this open instance. */
205 	error = devfs_set_cdevpriv(u, evtchn_release);
206 	if (error != 0) {
207 		mtx_destroy(&u->bind_mutex);
208 		mtx_destroy(&u->ring_prod_mutex);
209 		sx_destroy(&u->ring_cons_mutex);
210 		free(u->ring, M_EVTCHN);
211 		free(u, M_EVTCHN);
212 	}
213 
214 	return (error);
215 }
216 
217 static void
218 evtchn_release(void *arg)
219 {
220 	struct per_user_data *u;
221 	struct user_evtchn *evtchn, *tmp;
222 
223 	u = arg;
224 
225 	seldrain(&u->ev_rsel);
226 
227 	RB_FOREACH_SAFE(evtchn, evtchn_tree, &u->evtchns, tmp) {
228 		xen_intr_unbind(&evtchn->handle);
229 
230 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
231 		free(evtchn, M_EVTCHN);
232 	}
233 
234 	mtx_destroy(&u->bind_mutex);
235 	mtx_destroy(&u->ring_prod_mutex);
236 	sx_destroy(&u->ring_cons_mutex);
237 	free(u->ring, M_EVTCHN);
238 	free(u, M_EVTCHN);
239 }
240 
241 static int
242 evtchn_read(struct cdev *dev, struct uio *uio, int ioflag)
243 {
244 	int error, count;
245 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
246 	struct per_user_data *u;
247 
248 	error = devfs_get_cdevpriv((void **)&u);
249 	if (error != 0)
250 		return (EINVAL);
251 
252 	/* Whole number of ports. */
253 	count = uio->uio_resid;
254 	count &= ~(sizeof(evtchn_port_t)-1);
255 
256 	if (count == 0)
257 		return (0);
258 
259 	if (count > PAGE_SIZE)
260 		count = PAGE_SIZE;
261 
262 	sx_xlock(&u->ring_cons_mutex);
263 	for (;;) {
264 		if (u->ring_overflow) {
265 			error = EFBIG;
266 			goto unlock_out;
267 		}
268 
269 		c = u->ring_cons;
270 		p = u->ring_prod;
271 		if (c != p)
272 			break;
273 
274 		if (ioflag & IO_NDELAY) {
275 			error = EWOULDBLOCK;
276 			goto unlock_out;
277 		}
278 
279 		error = sx_sleep(u, &u->ring_cons_mutex, PCATCH, "evtchw", 0);
280 		if ((error != 0) && (error != EWOULDBLOCK))
281 			goto unlock_out;
282 	}
283 
284 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
285 	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
286 		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
287 		    sizeof(evtchn_port_t);
288 		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
289 	} else {
290 		bytes1 = (p - c) * sizeof(evtchn_port_t);
291 		bytes2 = 0;
292 	}
293 
294 	/* Truncate chunks according to caller's maximum byte count. */
295 	if (bytes1 > count) {
296 		bytes1 = count;
297 		bytes2 = 0;
298 	} else if ((bytes1 + bytes2) > count) {
299 		bytes2 = count - bytes1;
300 	}
301 
302 	error = EFAULT;
303 	rmb(); /* Ensure that we see the port before we copy it. */
304 
305 	if (uiomove(&u->ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
306 	    ((bytes2 != 0) && uiomove(&u->ring[0], bytes2, uio)))
307 		goto unlock_out;
308 
309 	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
310 	error = 0;
311 
312 unlock_out:
313 	sx_xunlock(&u->ring_cons_mutex);
314 	return (error);
315 }
316 
317 static int
318 evtchn_write(struct cdev *dev, struct uio *uio, int ioflag)
319 {
320 	int error, i, count;
321 	evtchn_port_t *kbuf;
322 	struct per_user_data *u;
323 
324 	error = devfs_get_cdevpriv((void **)&u);
325 	if (error != 0)
326 		return (EINVAL);
327 
328 	kbuf = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK);
329 
330 	count = uio->uio_resid;
331 	/* Whole number of ports. */
332 	count &= ~(sizeof(evtchn_port_t)-1);
333 
334 	error = 0;
335 	if (count == 0)
336 		goto out;
337 
338 	if (count > PAGE_SIZE)
339 		count = PAGE_SIZE;
340 
341 	error = uiomove(kbuf, count, uio);
342 	if (error != 0)
343 		goto out;
344 
345 	mtx_lock(&u->bind_mutex);
346 
347 	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
348 		evtchn_port_t port = kbuf[i];
349 		struct user_evtchn *evtchn;
350 
351 		evtchn = find_evtchn(u, port);
352 		if (evtchn && !evtchn->enabled) {
353 			evtchn->enabled = true;
354 			evtchn_unmask_port(evtchn->port);
355 		}
356 	}
357 
358 	mtx_unlock(&u->bind_mutex);
359 	error = 0;
360 
361 out:
362 	free(kbuf, M_EVTCHN);
363 	return (error);
364 }
365 
366 static inline int
367 evtchn_bind_user_port(struct per_user_data *u, struct user_evtchn *evtchn)
368 {
369 	int error;
370 
371 	evtchn->port = xen_intr_port(evtchn->handle);
372 	evtchn->user = u;
373 	evtchn->enabled = true;
374 	mtx_lock(&u->bind_mutex);
375 	RB_INSERT(evtchn_tree, &u->evtchns, evtchn);
376 	mtx_unlock(&u->bind_mutex);
377 	error = xen_intr_add_handler(device_get_nameunit(evtchn_dev),
378 	    evtchn_filter, evtchn_interrupt, evtchn,
379 	    INTR_TYPE_MISC | INTR_MPSAFE, evtchn->handle);
380 	if (error != 0) {
381 		xen_intr_unbind(&evtchn->handle);
382 		mtx_lock(&u->bind_mutex);
383 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
384 		mtx_unlock(&u->bind_mutex);
385 		free(evtchn, M_EVTCHN);
386 	}
387 	return (error);
388 }
389 
390 static int
391 evtchn_ioctl(struct cdev *dev, unsigned long cmd, caddr_t arg,
392     int mode, struct thread *td __unused)
393 {
394 	struct per_user_data *u;
395 	int error;
396 
397 	error = devfs_get_cdevpriv((void **)&u);
398 	if (error != 0)
399 		return (EINVAL);
400 
401 	switch (cmd) {
402 	case IOCTL_EVTCHN_BIND_VIRQ: {
403 		struct ioctl_evtchn_bind_virq *bind;
404 		struct user_evtchn *evtchn;
405 
406 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
407 
408 		bind = (struct ioctl_evtchn_bind_virq *)arg;
409 
410 		error = xen_intr_bind_virq(evtchn_dev, bind->virq, 0,
411 		    NULL, NULL, NULL, 0, &evtchn->handle);
412 		if (error != 0) {
413 			free(evtchn, M_EVTCHN);
414 			break;
415 		}
416 		error = evtchn_bind_user_port(u, evtchn);
417 		if (error != 0)
418 			break;
419 		bind->port = evtchn->port;
420 		break;
421 	}
422 
423 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
424 		struct ioctl_evtchn_bind_interdomain *bind;
425 		struct user_evtchn *evtchn;
426 
427 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
428 
429 		bind = (struct ioctl_evtchn_bind_interdomain *)arg;
430 
431 		error = xen_intr_bind_remote_port(evtchn_dev,
432 		    bind->remote_domain, bind->remote_port, NULL,
433 		    NULL, NULL, 0, &evtchn->handle);
434 		if (error != 0) {
435 			free(evtchn, M_EVTCHN);
436 			break;
437 		}
438 		error = evtchn_bind_user_port(u, evtchn);
439 		if (error != 0)
440 			break;
441 		bind->port = evtchn->port;
442 		break;
443 	}
444 
445 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
446 		struct ioctl_evtchn_bind_unbound_port *bind;
447 		struct user_evtchn *evtchn;
448 
449 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
450 
451 		bind = (struct ioctl_evtchn_bind_unbound_port *)arg;
452 
453 		error = xen_intr_alloc_and_bind_local_port(evtchn_dev,
454 		    bind->remote_domain, NULL, NULL, NULL, 0, &evtchn->handle);
455 		if (error != 0) {
456 			free(evtchn, M_EVTCHN);
457 			break;
458 		}
459 		error = evtchn_bind_user_port(u, evtchn);
460 		if (error != 0)
461 			break;
462 		bind->port = evtchn->port;
463 		break;
464 	}
465 
466 	case IOCTL_EVTCHN_UNBIND: {
467 		struct ioctl_evtchn_unbind *unbind;
468 		struct user_evtchn *evtchn;
469 
470 		unbind = (struct ioctl_evtchn_unbind *)arg;
471 
472 		mtx_lock(&u->bind_mutex);
473 		evtchn = find_evtchn(u, unbind->port);
474 		if (evtchn == NULL) {
475 			error = ENOTCONN;
476 			break;
477 		}
478 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
479 		mtx_unlock(&u->bind_mutex);
480 
481 		xen_intr_unbind(&evtchn->handle);
482 		free(evtchn, M_EVTCHN);
483 		error = 0;
484 		break;
485 	}
486 
487 	case IOCTL_EVTCHN_NOTIFY: {
488 		struct ioctl_evtchn_notify *notify;
489 		struct user_evtchn *evtchn;
490 
491 		notify = (struct ioctl_evtchn_notify *)arg;
492 
493 		mtx_lock(&u->bind_mutex);
494 		evtchn = find_evtchn(u, notify->port);
495 		if (evtchn == NULL) {
496 			error = ENOTCONN;
497 			break;
498 		}
499 
500 		xen_intr_signal(evtchn->handle);
501 		mtx_unlock(&u->bind_mutex);
502 		error = 0;
503 		break;
504 	}
505 
506 	case IOCTL_EVTCHN_RESET: {
507 		/* Initialise the ring to empty. Clear errors. */
508 		sx_xlock(&u->ring_cons_mutex);
509 		mtx_lock(&u->ring_prod_mutex);
510 		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
511 		mtx_unlock(&u->ring_prod_mutex);
512 		sx_xunlock(&u->ring_cons_mutex);
513 		error = 0;
514 		break;
515 	}
516 
517 	case FIONBIO:
518 	case FIOASYNC:
519 		/* Handled in an upper layer */
520 		error = 0;
521 		break;
522 
523 	default:
524 		error = ENOTTY;
525 		break;
526 	}
527 
528 	return (error);
529 }
530 
531 static int
532 evtchn_poll(struct cdev *dev, int events, struct thread *td)
533 {
534 	struct per_user_data *u;
535 	int error, mask;
536 
537 	error = devfs_get_cdevpriv((void **)&u);
538 	if (error != 0)
539 		return (POLLERR);
540 
541 	/* we can always write */
542 	mask = events & (POLLOUT | POLLWRNORM);
543 
544 	mtx_lock(&u->ring_prod_mutex);
545 	if (events & (POLLIN | POLLRDNORM)) {
546 		if (u->ring_cons != u->ring_prod) {
547 			mask |= events & (POLLIN | POLLRDNORM);
548 		} else {
549 			/* Record that someone is waiting */
550 			selrecord(td, &u->ev_rsel);
551 		}
552 	}
553 	mtx_unlock(&u->ring_prod_mutex);
554 
555 	return (mask);
556 }
557 
558 /*------------------ Private Device Attachment Functions  --------------------*/
559 static void
560 evtchn_identify(driver_t *driver, device_t parent)
561 {
562 
563 	KASSERT((xen_domain()),
564 	    ("Trying to attach evtchn device on non Xen domain"));
565 
566 	evtchn_dev = BUS_ADD_CHILD(parent, 0, "evtchn", 0);
567 	if (evtchn_dev == NULL)
568 		panic("unable to attach evtchn user-space device");
569 }
570 
571 static int
572 evtchn_probe(device_t dev)
573 {
574 
575 	device_set_desc(dev, "Xen event channel user-space device");
576 	return (BUS_PROBE_NOWILDCARD);
577 }
578 
579 static int
580 evtchn_attach(device_t dev)
581 {
582 
583 	make_dev_credf(MAKEDEV_ETERNAL, &evtchn_devsw, 0, NULL, UID_ROOT,
584 	    GID_WHEEL, 0600, "xen/evtchn");
585 	return (0);
586 }
587 
588 /*-------------------- Private Device Attachment Data  -----------------------*/
589 static device_method_t evtchn_methods[] = {
590 	DEVMETHOD(device_identify, evtchn_identify),
591 	DEVMETHOD(device_probe, evtchn_probe),
592 	DEVMETHOD(device_attach, evtchn_attach),
593 
594 	DEVMETHOD_END
595 };
596 
597 static driver_t evtchn_driver = {
598 	"evtchn",
599 	evtchn_methods,
600 	0,
601 };
602 
603 devclass_t evtchn_devclass;
604 
605 DRIVER_MODULE(evtchn, xenpv, evtchn_driver, evtchn_devclass, 0, 0);
606 MODULE_DEPEND(evtchn, xenpv, 1, 1, 1);
607