xref: /freebsd/sys/dev/xen/evtchn/evtchn_dev.c (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 /******************************************************************************
2  * evtchn.c
3  *
4  * Driver for receiving and demuxing event-channel signals.
5  *
6  * Copyright (c) 2004-2005, K A Fraser
7  * Multi-process extensions Copyright (c) 2004, Steven Smith
8  * FreeBSD port Copyright (c) 2014, Roger Pau Monné
9  * Fetched from git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
10  * File: drivers/xen/evtchn.c
11  * Git commit: 0dc0064add422bc0ef5165ebe9ece3052bbd457d
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/uio.h>
44 #include <sys/bus.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sx.h>
50 #include <sys/selinfo.h>
51 #include <sys/poll.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/ioccom.h>
55 #include <sys/rman.h>
56 #include <sys/tree.h>
57 #include <sys/module.h>
58 #include <sys/filio.h>
59 #include <sys/vnode.h>
60 
61 #include <xen/xen-os.h>
62 #include <xen/evtchn.h>
63 #include <xen/xen_intr.h>
64 
65 #include <xen/evtchn/evtchnvar.h>
66 
67 MALLOC_DEFINE(M_EVTCHN, "evtchn_dev", "Xen event channel user-space device");
68 
69 struct user_evtchn;
70 
71 static int evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2);
72 
73 RB_HEAD(evtchn_tree, user_evtchn);
74 
75 struct per_user_data {
76 	struct mtx bind_mutex; /* serialize bind/unbind operations */
77 	struct evtchn_tree evtchns;
78 
79 	/* Notification ring, accessed via /dev/xen/evtchn. */
80 #define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
81 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
82 	evtchn_port_t *ring;
83 	unsigned int ring_cons, ring_prod, ring_overflow;
84 	struct sx ring_cons_mutex; /* protect against concurrent readers */
85 	struct mtx ring_prod_mutex; /* product against concurrent interrupts */
86 	struct selinfo ev_rsel;
87 };
88 
89 struct user_evtchn {
90 	RB_ENTRY(user_evtchn) node;
91 	struct per_user_data *user;
92 	evtchn_port_t port;
93 	xen_intr_handle_t handle;
94 	bool enabled;
95 };
96 
97 RB_GENERATE_STATIC(evtchn_tree, user_evtchn, node, evtchn_cmp);
98 
99 static device_t evtchn_dev;
100 
101 static d_read_t      evtchn_read;
102 static d_write_t     evtchn_write;
103 static d_ioctl_t     evtchn_ioctl;
104 static d_poll_t      evtchn_poll;
105 static d_open_t      evtchn_open;
106 
107 static void evtchn_release(void *arg);
108 
109 static struct cdevsw evtchn_devsw = {
110 	.d_version = D_VERSION,
111 	.d_open = evtchn_open,
112 	.d_read = evtchn_read,
113 	.d_write = evtchn_write,
114 	.d_ioctl = evtchn_ioctl,
115 	.d_poll = evtchn_poll,
116 	.d_name = "evtchn",
117 };
118 
119 /*------------------------- Red-black tree helpers ---------------------------*/
120 static int
121 evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2)
122 {
123 
124 	return (u1->port - u2->port);
125 }
126 
127 static struct user_evtchn *
128 find_evtchn(struct per_user_data *u, evtchn_port_t port)
129 {
130 	struct user_evtchn tmp = {
131 		.port = port,
132 	};
133 
134 	return (RB_FIND(evtchn_tree, &u->evtchns, &tmp));
135 }
136 
137 /*--------------------------- Interrupt handlers -----------------------------*/
138 static int
139 evtchn_filter(void *arg)
140 {
141 	struct user_evtchn *evtchn;
142 
143 	evtchn = arg;
144 
145 	if (!evtchn->enabled && bootverbose) {
146 		device_printf(evtchn_dev,
147 		    "Received upcall for disabled event channel %d\n",
148 		    evtchn->port);
149 	}
150 
151 	evtchn_mask_port(evtchn->port);
152 	evtchn->enabled = false;
153 
154 	return (FILTER_SCHEDULE_THREAD);
155 }
156 
157 static void
158 evtchn_interrupt(void *arg)
159 {
160 	struct user_evtchn *evtchn;
161 	struct per_user_data *u;
162 
163 	evtchn = arg;
164 	u = evtchn->user;
165 
166 	/*
167 	 * Protect against concurrent events using this handler
168 	 * on different CPUs.
169 	 */
170 	mtx_lock(&u->ring_prod_mutex);
171 	if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
172 		u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port;
173 		wmb(); /* Ensure ring contents visible */
174 		if (u->ring_cons == u->ring_prod++) {
175 			wakeup(u);
176 			selwakeup(&u->ev_rsel);
177 		}
178 	} else
179 		u->ring_overflow = 1;
180 	mtx_unlock(&u->ring_prod_mutex);
181 }
182 
183 /*------------------------- Character device methods -------------------------*/
184 static int
185 evtchn_open(struct cdev *dev, int flag, int otyp, struct thread *td)
186 {
187 	struct per_user_data *u;
188 	int error;
189 
190 	u = malloc(sizeof(*u), M_EVTCHN, M_WAITOK | M_ZERO);
191 	u->ring = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK | M_ZERO);
192 
193 	/* Initialize locks */
194 	mtx_init(&u->bind_mutex, "evtchn_bind_mutex", NULL, MTX_DEF);
195 	sx_init(&u->ring_cons_mutex, "evtchn_ringc_sx");
196 	mtx_init(&u->ring_prod_mutex, "evtchn_ringp_mutex", NULL, MTX_DEF);
197 
198 	/* Initialize red-black tree. */
199 	RB_INIT(&u->evtchns);
200 
201 	/* Assign the allocated per_user_data to this open instance. */
202 	error = devfs_set_cdevpriv(u, evtchn_release);
203 	if (error != 0) {
204 		mtx_destroy(&u->bind_mutex);
205 		mtx_destroy(&u->ring_prod_mutex);
206 		sx_destroy(&u->ring_cons_mutex);
207 		free(u->ring, M_EVTCHN);
208 		free(u, M_EVTCHN);
209 	}
210 
211 	return (error);
212 }
213 
214 static void
215 evtchn_release(void *arg)
216 {
217 	struct per_user_data *u;
218 	struct user_evtchn *evtchn, *tmp;
219 
220 	u = arg;
221 
222 	seldrain(&u->ev_rsel);
223 
224 	RB_FOREACH_SAFE(evtchn, evtchn_tree, &u->evtchns, tmp) {
225 		xen_intr_unbind(&evtchn->handle);
226 
227 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
228 		free(evtchn, M_EVTCHN);
229 	}
230 
231 	mtx_destroy(&u->bind_mutex);
232 	mtx_destroy(&u->ring_prod_mutex);
233 	sx_destroy(&u->ring_cons_mutex);
234 	free(u->ring, M_EVTCHN);
235 	free(u, M_EVTCHN);
236 }
237 
238 static int
239 evtchn_read(struct cdev *dev, struct uio *uio, int ioflag)
240 {
241 	int error, count;
242 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
243 	struct per_user_data *u;
244 
245 	error = devfs_get_cdevpriv((void **)&u);
246 	if (error != 0)
247 		return (EINVAL);
248 
249 	/* Whole number of ports. */
250 	count = uio->uio_resid;
251 	count &= ~(sizeof(evtchn_port_t)-1);
252 
253 	if (count == 0)
254 		return (0);
255 
256 	if (count > PAGE_SIZE)
257 		count = PAGE_SIZE;
258 
259 	sx_xlock(&u->ring_cons_mutex);
260 	for (;;) {
261 		if (u->ring_overflow) {
262 			error = EFBIG;
263 			goto unlock_out;
264 		}
265 
266 		c = u->ring_cons;
267 		p = u->ring_prod;
268 		if (c != p)
269 			break;
270 
271 		if (ioflag & IO_NDELAY) {
272 			error = EWOULDBLOCK;
273 			goto unlock_out;
274 		}
275 
276 		error = sx_sleep(u, &u->ring_cons_mutex, PCATCH, "evtchw", 0);
277 		if ((error != 0) && (error != EWOULDBLOCK))
278 			goto unlock_out;
279 	}
280 
281 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
282 	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
283 		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
284 		    sizeof(evtchn_port_t);
285 		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
286 	} else {
287 		bytes1 = (p - c) * sizeof(evtchn_port_t);
288 		bytes2 = 0;
289 	}
290 
291 	/* Truncate chunks according to caller's maximum byte count. */
292 	if (bytes1 > count) {
293 		bytes1 = count;
294 		bytes2 = 0;
295 	} else if ((bytes1 + bytes2) > count) {
296 		bytes2 = count - bytes1;
297 	}
298 
299 	error = EFAULT;
300 	rmb(); /* Ensure that we see the port before we copy it. */
301 
302 	if (uiomove(&u->ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
303 	    ((bytes2 != 0) && uiomove(&u->ring[0], bytes2, uio)))
304 		goto unlock_out;
305 
306 	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
307 	error = 0;
308 
309 unlock_out:
310 	sx_xunlock(&u->ring_cons_mutex);
311 	return (error);
312 }
313 
314 static int
315 evtchn_write(struct cdev *dev, struct uio *uio, int ioflag)
316 {
317 	int error, i, count;
318 	evtchn_port_t *kbuf;
319 	struct per_user_data *u;
320 
321 	error = devfs_get_cdevpriv((void **)&u);
322 	if (error != 0)
323 		return (EINVAL);
324 
325 	kbuf = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK);
326 
327 	count = uio->uio_resid;
328 	/* Whole number of ports. */
329 	count &= ~(sizeof(evtchn_port_t)-1);
330 
331 	error = 0;
332 	if (count == 0)
333 		goto out;
334 
335 	if (count > PAGE_SIZE)
336 		count = PAGE_SIZE;
337 
338 	error = uiomove(kbuf, count, uio);
339 	if (error != 0)
340 		goto out;
341 
342 	mtx_lock(&u->bind_mutex);
343 
344 	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
345 		evtchn_port_t port = kbuf[i];
346 		struct user_evtchn *evtchn;
347 
348 		evtchn = find_evtchn(u, port);
349 		if (evtchn && !evtchn->enabled) {
350 			evtchn->enabled = true;
351 			evtchn_unmask_port(evtchn->port);
352 		}
353 	}
354 
355 	mtx_unlock(&u->bind_mutex);
356 	error = 0;
357 
358 out:
359 	free(kbuf, M_EVTCHN);
360 	return (error);
361 }
362 
363 static inline int
364 evtchn_bind_user_port(struct per_user_data *u, struct user_evtchn *evtchn)
365 {
366 	int error;
367 
368 	evtchn->port = xen_intr_port(evtchn->handle);
369 	evtchn->user = u;
370 	evtchn->enabled = true;
371 	mtx_lock(&u->bind_mutex);
372 	RB_INSERT(evtchn_tree, &u->evtchns, evtchn);
373 	mtx_unlock(&u->bind_mutex);
374 	error = xen_intr_add_handler(device_get_nameunit(evtchn_dev),
375 	    evtchn_filter, evtchn_interrupt, evtchn,
376 	    INTR_TYPE_MISC | INTR_MPSAFE, evtchn->handle);
377 	if (error != 0) {
378 		xen_intr_unbind(&evtchn->handle);
379 		mtx_lock(&u->bind_mutex);
380 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
381 		mtx_unlock(&u->bind_mutex);
382 		free(evtchn, M_EVTCHN);
383 	}
384 	return (error);
385 }
386 
387 static int
388 evtchn_ioctl(struct cdev *dev, unsigned long cmd, caddr_t arg,
389     int mode, struct thread *td __unused)
390 {
391 	struct per_user_data *u;
392 	int error;
393 
394 	error = devfs_get_cdevpriv((void **)&u);
395 	if (error != 0)
396 		return (EINVAL);
397 
398 	switch (cmd) {
399 	case IOCTL_EVTCHN_BIND_VIRQ: {
400 		struct ioctl_evtchn_bind_virq *bind;
401 		struct user_evtchn *evtchn;
402 
403 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
404 
405 		bind = (struct ioctl_evtchn_bind_virq *)arg;
406 
407 		error = xen_intr_bind_virq(evtchn_dev, bind->virq, 0,
408 		    NULL, NULL, NULL, 0, &evtchn->handle);
409 		if (error != 0) {
410 			free(evtchn, M_EVTCHN);
411 			break;
412 		}
413 		error = evtchn_bind_user_port(u, evtchn);
414 		if (error != 0)
415 			break;
416 		bind->port = evtchn->port;
417 		break;
418 	}
419 
420 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
421 		struct ioctl_evtchn_bind_interdomain *bind;
422 		struct user_evtchn *evtchn;
423 
424 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
425 
426 		bind = (struct ioctl_evtchn_bind_interdomain *)arg;
427 
428 		error = xen_intr_bind_remote_port(evtchn_dev,
429 		    bind->remote_domain, bind->remote_port, NULL,
430 		    NULL, NULL, 0, &evtchn->handle);
431 		if (error != 0) {
432 			free(evtchn, M_EVTCHN);
433 			break;
434 		}
435 		error = evtchn_bind_user_port(u, evtchn);
436 		if (error != 0)
437 			break;
438 		bind->port = evtchn->port;
439 		break;
440 	}
441 
442 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
443 		struct ioctl_evtchn_bind_unbound_port *bind;
444 		struct user_evtchn *evtchn;
445 
446 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
447 
448 		bind = (struct ioctl_evtchn_bind_unbound_port *)arg;
449 
450 		error = xen_intr_alloc_and_bind_local_port(evtchn_dev,
451 		    bind->remote_domain, NULL, NULL, NULL, 0, &evtchn->handle);
452 		if (error != 0) {
453 			free(evtchn, M_EVTCHN);
454 			break;
455 		}
456 		error = evtchn_bind_user_port(u, evtchn);
457 		if (error != 0)
458 			break;
459 		bind->port = evtchn->port;
460 		break;
461 	}
462 
463 	case IOCTL_EVTCHN_UNBIND: {
464 		struct ioctl_evtchn_unbind *unbind;
465 		struct user_evtchn *evtchn;
466 
467 		unbind = (struct ioctl_evtchn_unbind *)arg;
468 
469 		mtx_lock(&u->bind_mutex);
470 		evtchn = find_evtchn(u, unbind->port);
471 		if (evtchn == NULL) {
472 			error = ENOTCONN;
473 			break;
474 		}
475 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
476 		mtx_unlock(&u->bind_mutex);
477 
478 		xen_intr_unbind(&evtchn->handle);
479 		free(evtchn, M_EVTCHN);
480 		error = 0;
481 		break;
482 	}
483 
484 	case IOCTL_EVTCHN_NOTIFY: {
485 		struct ioctl_evtchn_notify *notify;
486 		struct user_evtchn *evtchn;
487 
488 		notify = (struct ioctl_evtchn_notify *)arg;
489 
490 		mtx_lock(&u->bind_mutex);
491 		evtchn = find_evtchn(u, notify->port);
492 		if (evtchn == NULL) {
493 			error = ENOTCONN;
494 			break;
495 		}
496 
497 		xen_intr_signal(evtchn->handle);
498 		mtx_unlock(&u->bind_mutex);
499 		error = 0;
500 		break;
501 	}
502 
503 	case IOCTL_EVTCHN_RESET: {
504 		/* Initialise the ring to empty. Clear errors. */
505 		sx_xlock(&u->ring_cons_mutex);
506 		mtx_lock(&u->ring_prod_mutex);
507 		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
508 		mtx_unlock(&u->ring_prod_mutex);
509 		sx_xunlock(&u->ring_cons_mutex);
510 		error = 0;
511 		break;
512 	}
513 
514 	case FIONBIO:
515 	case FIOASYNC:
516 		/* Handled in an upper layer */
517 		error = 0;
518 		break;
519 
520 	default:
521 		error = ENOTTY;
522 		break;
523 	}
524 
525 	return (error);
526 }
527 
528 static int
529 evtchn_poll(struct cdev *dev, int events, struct thread *td)
530 {
531 	struct per_user_data *u;
532 	int error, mask;
533 
534 	error = devfs_get_cdevpriv((void **)&u);
535 	if (error != 0)
536 		return (POLLERR);
537 
538 	/* we can always write */
539 	mask = events & (POLLOUT | POLLWRNORM);
540 
541 	mtx_lock(&u->ring_prod_mutex);
542 	if (events & (POLLIN | POLLRDNORM)) {
543 		if (u->ring_cons != u->ring_prod) {
544 			mask |= events & (POLLIN | POLLRDNORM);
545 		} else {
546 			/* Record that someone is waiting */
547 			selrecord(td, &u->ev_rsel);
548 		}
549 	}
550 	mtx_unlock(&u->ring_prod_mutex);
551 
552 	return (mask);
553 }
554 
555 /*------------------ Private Device Attachment Functions  --------------------*/
556 static void
557 evtchn_identify(driver_t *driver, device_t parent)
558 {
559 
560 	KASSERT((xen_domain()),
561 	    ("Trying to attach evtchn device on non Xen domain"));
562 
563 	evtchn_dev = BUS_ADD_CHILD(parent, 0, "evtchn", 0);
564 	if (evtchn_dev == NULL)
565 		panic("unable to attach evtchn user-space device");
566 }
567 
568 static int
569 evtchn_probe(device_t dev)
570 {
571 
572 	device_set_desc(dev, "Xen event channel user-space device");
573 	return (BUS_PROBE_NOWILDCARD);
574 }
575 
576 static int
577 evtchn_attach(device_t dev)
578 {
579 
580 	make_dev_credf(MAKEDEV_ETERNAL, &evtchn_devsw, 0, NULL, UID_ROOT,
581 	    GID_WHEEL, 0600, "xen/evtchn");
582 	return (0);
583 }
584 
585 /*-------------------- Private Device Attachment Data  -----------------------*/
586 static device_method_t evtchn_methods[] = {
587 	DEVMETHOD(device_identify, evtchn_identify),
588 	DEVMETHOD(device_probe, evtchn_probe),
589 	DEVMETHOD(device_attach, evtchn_attach),
590 
591 	DEVMETHOD_END
592 };
593 
594 static driver_t evtchn_driver = {
595 	"evtchn",
596 	evtchn_methods,
597 	0,
598 };
599 
600 DRIVER_MODULE(evtchn, xenpv, evtchn_driver, 0, 0);
601 MODULE_DEPEND(evtchn, xenpv, 1, 1, 1);
602