xref: /freebsd/sys/dev/xen/evtchn/evtchn_dev.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /******************************************************************************
2  * evtchn.c
3  *
4  * Driver for receiving and demuxing event-channel signals.
5  *
6  * Copyright (c) 2004-2005, K A Fraser
7  * Multi-process extensions Copyright (c) 2004, Steven Smith
8  * FreeBSD port Copyright (c) 2014, Roger Pau Monné
9  * Fetched from git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
10  * File: drivers/xen/evtchn.c
11  * Git commit: 0dc0064add422bc0ef5165ebe9ece3052bbd457d
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/bus.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/sx.h>
47 #include <sys/selinfo.h>
48 #include <sys/poll.h>
49 #include <sys/conf.h>
50 #include <sys/fcntl.h>
51 #include <sys/ioccom.h>
52 #include <sys/rman.h>
53 #include <sys/tree.h>
54 #include <sys/module.h>
55 #include <sys/filio.h>
56 #include <sys/vnode.h>
57 
58 #include <xen/xen-os.h>
59 #include <xen/evtchn.h>
60 #include <xen/xen_intr.h>
61 
62 #include <xen/evtchn/evtchnvar.h>
63 
64 MALLOC_DEFINE(M_EVTCHN, "evtchn_dev", "Xen event channel user-space device");
65 
66 struct user_evtchn;
67 
68 static int evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2);
69 
70 RB_HEAD(evtchn_tree, user_evtchn);
71 
72 struct per_user_data {
73 	struct mtx bind_mutex; /* serialize bind/unbind operations */
74 	struct evtchn_tree evtchns;
75 
76 	/* Notification ring, accessed via /dev/xen/evtchn. */
77 #define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
78 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
79 	evtchn_port_t *ring;
80 	unsigned int ring_cons, ring_prod, ring_overflow;
81 	struct sx ring_cons_mutex; /* protect against concurrent readers */
82 	struct mtx ring_prod_mutex; /* product against concurrent interrupts */
83 	struct selinfo ev_rsel;
84 };
85 
86 struct user_evtchn {
87 	RB_ENTRY(user_evtchn) node;
88 	struct per_user_data *user;
89 	evtchn_port_t port;
90 	xen_intr_handle_t handle;
91 	bool enabled;
92 };
93 
94 RB_GENERATE_STATIC(evtchn_tree, user_evtchn, node, evtchn_cmp);
95 
96 static device_t evtchn_dev;
97 
98 static d_read_t      evtchn_read;
99 static d_write_t     evtchn_write;
100 static d_ioctl_t     evtchn_ioctl;
101 static d_poll_t      evtchn_poll;
102 static d_open_t      evtchn_open;
103 
104 static void evtchn_release(void *arg);
105 
106 static struct cdevsw evtchn_devsw = {
107 	.d_version = D_VERSION,
108 	.d_open = evtchn_open,
109 	.d_read = evtchn_read,
110 	.d_write = evtchn_write,
111 	.d_ioctl = evtchn_ioctl,
112 	.d_poll = evtchn_poll,
113 	.d_name = "evtchn",
114 };
115 
116 /*------------------------- Red-black tree helpers ---------------------------*/
117 static int
evtchn_cmp(struct user_evtchn * u1,struct user_evtchn * u2)118 evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2)
119 {
120 
121 	return (u1->port - u2->port);
122 }
123 
124 static struct user_evtchn *
find_evtchn(struct per_user_data * u,evtchn_port_t port)125 find_evtchn(struct per_user_data *u, evtchn_port_t port)
126 {
127 	struct user_evtchn tmp = {
128 		.port = port,
129 	};
130 
131 	return (RB_FIND(evtchn_tree, &u->evtchns, &tmp));
132 }
133 
134 /*--------------------------- Interrupt handlers -----------------------------*/
135 static int
evtchn_filter(void * arg)136 evtchn_filter(void *arg)
137 {
138 	struct user_evtchn *evtchn;
139 
140 	evtchn = arg;
141 
142 	if (!evtchn->enabled && bootverbose) {
143 		device_printf(evtchn_dev,
144 		    "Received upcall for disabled event channel %d\n",
145 		    evtchn->port);
146 	}
147 
148 	evtchn_mask_port(evtchn->port);
149 	evtchn->enabled = false;
150 
151 	return (FILTER_SCHEDULE_THREAD);
152 }
153 
154 static void
evtchn_interrupt(void * arg)155 evtchn_interrupt(void *arg)
156 {
157 	struct user_evtchn *evtchn;
158 	struct per_user_data *u;
159 
160 	evtchn = arg;
161 	u = evtchn->user;
162 
163 	/*
164 	 * Protect against concurrent events using this handler
165 	 * on different CPUs.
166 	 */
167 	mtx_lock(&u->ring_prod_mutex);
168 	if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
169 		u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port;
170 		wmb(); /* Ensure ring contents visible */
171 		if (u->ring_cons == u->ring_prod++) {
172 			wakeup(u);
173 			selwakeup(&u->ev_rsel);
174 		}
175 	} else
176 		u->ring_overflow = 1;
177 	mtx_unlock(&u->ring_prod_mutex);
178 }
179 
180 /*------------------------- Character device methods -------------------------*/
181 static int
evtchn_open(struct cdev * dev,int flag,int otyp,struct thread * td)182 evtchn_open(struct cdev *dev, int flag, int otyp, struct thread *td)
183 {
184 	struct per_user_data *u;
185 	int error;
186 
187 	u = malloc(sizeof(*u), M_EVTCHN, M_WAITOK | M_ZERO);
188 	u->ring = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK | M_ZERO);
189 
190 	/* Initialize locks */
191 	mtx_init(&u->bind_mutex, "evtchn_bind_mutex", NULL, MTX_DEF);
192 	sx_init(&u->ring_cons_mutex, "evtchn_ringc_sx");
193 	mtx_init(&u->ring_prod_mutex, "evtchn_ringp_mutex", NULL, MTX_DEF);
194 
195 	/* Initialize red-black tree. */
196 	RB_INIT(&u->evtchns);
197 
198 	/* Assign the allocated per_user_data to this open instance. */
199 	error = devfs_set_cdevpriv(u, evtchn_release);
200 	if (error != 0) {
201 		mtx_destroy(&u->bind_mutex);
202 		mtx_destroy(&u->ring_prod_mutex);
203 		sx_destroy(&u->ring_cons_mutex);
204 		free(u->ring, M_EVTCHN);
205 		free(u, M_EVTCHN);
206 	}
207 
208 	return (error);
209 }
210 
211 static void
evtchn_release(void * arg)212 evtchn_release(void *arg)
213 {
214 	struct per_user_data *u;
215 	struct user_evtchn *evtchn, *tmp;
216 
217 	u = arg;
218 
219 	seldrain(&u->ev_rsel);
220 
221 	RB_FOREACH_SAFE(evtchn, evtchn_tree, &u->evtchns, tmp) {
222 		xen_intr_unbind(&evtchn->handle);
223 
224 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
225 		free(evtchn, M_EVTCHN);
226 	}
227 
228 	mtx_destroy(&u->bind_mutex);
229 	mtx_destroy(&u->ring_prod_mutex);
230 	sx_destroy(&u->ring_cons_mutex);
231 	free(u->ring, M_EVTCHN);
232 	free(u, M_EVTCHN);
233 }
234 
235 static int
evtchn_read(struct cdev * dev,struct uio * uio,int ioflag)236 evtchn_read(struct cdev *dev, struct uio *uio, int ioflag)
237 {
238 	int error, count;
239 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
240 	struct per_user_data *u;
241 
242 	error = devfs_get_cdevpriv((void **)&u);
243 	if (error != 0)
244 		return (EINVAL);
245 
246 	/* Whole number of ports. */
247 	count = uio->uio_resid;
248 	count &= ~(sizeof(evtchn_port_t)-1);
249 
250 	if (count == 0)
251 		return (0);
252 
253 	if (count > PAGE_SIZE)
254 		count = PAGE_SIZE;
255 
256 	sx_xlock(&u->ring_cons_mutex);
257 	for (;;) {
258 		if (u->ring_overflow) {
259 			error = EFBIG;
260 			goto unlock_out;
261 		}
262 
263 		c = u->ring_cons;
264 		p = u->ring_prod;
265 		if (c != p)
266 			break;
267 
268 		if (ioflag & IO_NDELAY) {
269 			error = EWOULDBLOCK;
270 			goto unlock_out;
271 		}
272 
273 		error = sx_sleep(u, &u->ring_cons_mutex, PCATCH, "evtchw", 0);
274 		if ((error != 0) && (error != EWOULDBLOCK))
275 			goto unlock_out;
276 	}
277 
278 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
279 	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
280 		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
281 		    sizeof(evtchn_port_t);
282 		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
283 	} else {
284 		bytes1 = (p - c) * sizeof(evtchn_port_t);
285 		bytes2 = 0;
286 	}
287 
288 	/* Truncate chunks according to caller's maximum byte count. */
289 	if (bytes1 > count) {
290 		bytes1 = count;
291 		bytes2 = 0;
292 	} else if ((bytes1 + bytes2) > count) {
293 		bytes2 = count - bytes1;
294 	}
295 
296 	error = EFAULT;
297 	rmb(); /* Ensure that we see the port before we copy it. */
298 
299 	if (uiomove(&u->ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
300 	    ((bytes2 != 0) && uiomove(&u->ring[0], bytes2, uio)))
301 		goto unlock_out;
302 
303 	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
304 	error = 0;
305 
306 unlock_out:
307 	sx_xunlock(&u->ring_cons_mutex);
308 	return (error);
309 }
310 
311 static int
evtchn_write(struct cdev * dev,struct uio * uio,int ioflag)312 evtchn_write(struct cdev *dev, struct uio *uio, int ioflag)
313 {
314 	int error, i, count;
315 	evtchn_port_t *kbuf;
316 	struct per_user_data *u;
317 
318 	error = devfs_get_cdevpriv((void **)&u);
319 	if (error != 0)
320 		return (EINVAL);
321 
322 	kbuf = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK);
323 
324 	count = uio->uio_resid;
325 	/* Whole number of ports. */
326 	count &= ~(sizeof(evtchn_port_t)-1);
327 
328 	error = 0;
329 	if (count == 0)
330 		goto out;
331 
332 	if (count > PAGE_SIZE)
333 		count = PAGE_SIZE;
334 
335 	error = uiomove(kbuf, count, uio);
336 	if (error != 0)
337 		goto out;
338 
339 	mtx_lock(&u->bind_mutex);
340 
341 	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
342 		evtchn_port_t port = kbuf[i];
343 		struct user_evtchn *evtchn;
344 
345 		evtchn = find_evtchn(u, port);
346 		if (evtchn && !evtchn->enabled) {
347 			evtchn->enabled = true;
348 			evtchn_unmask_port(evtchn->port);
349 		}
350 	}
351 
352 	mtx_unlock(&u->bind_mutex);
353 	error = 0;
354 
355 out:
356 	free(kbuf, M_EVTCHN);
357 	return (error);
358 }
359 
360 static inline int
evtchn_bind_user_port(struct per_user_data * u,struct user_evtchn * evtchn)361 evtchn_bind_user_port(struct per_user_data *u, struct user_evtchn *evtchn)
362 {
363 	int error;
364 
365 	evtchn->port = xen_intr_port(evtchn->handle);
366 	evtchn->user = u;
367 	evtchn->enabled = true;
368 	mtx_lock(&u->bind_mutex);
369 	RB_INSERT(evtchn_tree, &u->evtchns, evtchn);
370 	mtx_unlock(&u->bind_mutex);
371 	error = xen_intr_add_handler(device_get_nameunit(evtchn_dev),
372 	    evtchn_filter, evtchn_interrupt, evtchn,
373 	    INTR_TYPE_MISC | INTR_MPSAFE, evtchn->handle);
374 	if (error != 0) {
375 		xen_intr_unbind(&evtchn->handle);
376 		mtx_lock(&u->bind_mutex);
377 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
378 		mtx_unlock(&u->bind_mutex);
379 		free(evtchn, M_EVTCHN);
380 	}
381 	return (error);
382 }
383 
384 static int
evtchn_ioctl(struct cdev * dev,unsigned long cmd,caddr_t arg,int mode,struct thread * td __unused)385 evtchn_ioctl(struct cdev *dev, unsigned long cmd, caddr_t arg,
386     int mode, struct thread *td __unused)
387 {
388 	struct per_user_data *u;
389 	int error;
390 
391 	error = devfs_get_cdevpriv((void **)&u);
392 	if (error != 0)
393 		return (EINVAL);
394 
395 	switch (cmd) {
396 	case IOCTL_EVTCHN_BIND_VIRQ: {
397 		struct ioctl_evtchn_bind_virq *bind;
398 		struct user_evtchn *evtchn;
399 
400 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
401 
402 		bind = (struct ioctl_evtchn_bind_virq *)arg;
403 
404 		error = xen_intr_bind_virq(evtchn_dev, bind->virq, 0,
405 		    NULL, NULL, NULL, 0, &evtchn->handle);
406 		if (error != 0) {
407 			free(evtchn, M_EVTCHN);
408 			break;
409 		}
410 		error = evtchn_bind_user_port(u, evtchn);
411 		if (error != 0)
412 			break;
413 		bind->port = evtchn->port;
414 		break;
415 	}
416 
417 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
418 		struct ioctl_evtchn_bind_interdomain *bind;
419 		struct user_evtchn *evtchn;
420 
421 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
422 
423 		bind = (struct ioctl_evtchn_bind_interdomain *)arg;
424 
425 		error = xen_intr_bind_remote_port(evtchn_dev,
426 		    bind->remote_domain, bind->remote_port, NULL,
427 		    NULL, NULL, 0, &evtchn->handle);
428 		if (error != 0) {
429 			free(evtchn, M_EVTCHN);
430 			break;
431 		}
432 		error = evtchn_bind_user_port(u, evtchn);
433 		if (error != 0)
434 			break;
435 		bind->port = evtchn->port;
436 		break;
437 	}
438 
439 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
440 		struct ioctl_evtchn_bind_unbound_port *bind;
441 		struct user_evtchn *evtchn;
442 
443 		evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
444 
445 		bind = (struct ioctl_evtchn_bind_unbound_port *)arg;
446 
447 		error = xen_intr_alloc_and_bind_local_port(evtchn_dev,
448 		    bind->remote_domain, NULL, NULL, NULL, 0, &evtchn->handle);
449 		if (error != 0) {
450 			free(evtchn, M_EVTCHN);
451 			break;
452 		}
453 		error = evtchn_bind_user_port(u, evtchn);
454 		if (error != 0)
455 			break;
456 		bind->port = evtchn->port;
457 		break;
458 	}
459 
460 	case IOCTL_EVTCHN_UNBIND: {
461 		struct ioctl_evtchn_unbind *unbind;
462 		struct user_evtchn *evtchn;
463 
464 		unbind = (struct ioctl_evtchn_unbind *)arg;
465 
466 		mtx_lock(&u->bind_mutex);
467 		evtchn = find_evtchn(u, unbind->port);
468 		if (evtchn == NULL) {
469 			error = ENOTCONN;
470 			break;
471 		}
472 		RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
473 		mtx_unlock(&u->bind_mutex);
474 
475 		xen_intr_unbind(&evtchn->handle);
476 		free(evtchn, M_EVTCHN);
477 		error = 0;
478 		break;
479 	}
480 
481 	case IOCTL_EVTCHN_NOTIFY: {
482 		struct ioctl_evtchn_notify *notify;
483 		struct user_evtchn *evtchn;
484 
485 		notify = (struct ioctl_evtchn_notify *)arg;
486 
487 		mtx_lock(&u->bind_mutex);
488 		evtchn = find_evtchn(u, notify->port);
489 		if (evtchn == NULL) {
490 			error = ENOTCONN;
491 			break;
492 		}
493 
494 		xen_intr_signal(evtchn->handle);
495 		mtx_unlock(&u->bind_mutex);
496 		error = 0;
497 		break;
498 	}
499 
500 	case IOCTL_EVTCHN_RESET: {
501 		/* Initialise the ring to empty. Clear errors. */
502 		sx_xlock(&u->ring_cons_mutex);
503 		mtx_lock(&u->ring_prod_mutex);
504 		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
505 		mtx_unlock(&u->ring_prod_mutex);
506 		sx_xunlock(&u->ring_cons_mutex);
507 		error = 0;
508 		break;
509 	}
510 
511 	case FIONBIO:
512 	case FIOASYNC:
513 		/* Handled in an upper layer */
514 		error = 0;
515 		break;
516 
517 	default:
518 		error = ENOTTY;
519 		break;
520 	}
521 
522 	return (error);
523 }
524 
525 static int
evtchn_poll(struct cdev * dev,int events,struct thread * td)526 evtchn_poll(struct cdev *dev, int events, struct thread *td)
527 {
528 	struct per_user_data *u;
529 	int error, mask;
530 
531 	error = devfs_get_cdevpriv((void **)&u);
532 	if (error != 0)
533 		return (POLLERR);
534 
535 	/* we can always write */
536 	mask = events & (POLLOUT | POLLWRNORM);
537 
538 	mtx_lock(&u->ring_prod_mutex);
539 	if (events & (POLLIN | POLLRDNORM)) {
540 		if (u->ring_cons != u->ring_prod) {
541 			mask |= events & (POLLIN | POLLRDNORM);
542 		} else {
543 			/* Record that someone is waiting */
544 			selrecord(td, &u->ev_rsel);
545 		}
546 	}
547 	mtx_unlock(&u->ring_prod_mutex);
548 
549 	return (mask);
550 }
551 
552 /*------------------ Private Device Attachment Functions  --------------------*/
553 static void
evtchn_identify(driver_t * driver,device_t parent)554 evtchn_identify(driver_t *driver, device_t parent)
555 {
556 
557 	KASSERT((xen_domain()),
558 	    ("Trying to attach evtchn device on non Xen domain"));
559 
560 	evtchn_dev = BUS_ADD_CHILD(parent, 0, "evtchn", 0);
561 	if (evtchn_dev == NULL)
562 		panic("unable to attach evtchn user-space device");
563 }
564 
565 static int
evtchn_probe(device_t dev)566 evtchn_probe(device_t dev)
567 {
568 
569 	device_set_desc(dev, "Xen event channel user-space device");
570 	return (BUS_PROBE_NOWILDCARD);
571 }
572 
573 static int
evtchn_attach(device_t dev)574 evtchn_attach(device_t dev)
575 {
576 
577 	make_dev_credf(MAKEDEV_ETERNAL, &evtchn_devsw, 0, NULL, UID_ROOT,
578 	    GID_WHEEL, 0600, "xen/evtchn");
579 	return (0);
580 }
581 
582 /*-------------------- Private Device Attachment Data  -----------------------*/
583 static device_method_t evtchn_methods[] = {
584 	DEVMETHOD(device_identify, evtchn_identify),
585 	DEVMETHOD(device_probe, evtchn_probe),
586 	DEVMETHOD(device_attach, evtchn_attach),
587 
588 	DEVMETHOD_END
589 };
590 
591 static driver_t evtchn_driver = {
592 	"evtchn",
593 	evtchn_methods,
594 	0,
595 };
596 
597 DRIVER_MODULE(evtchn, xenpv, evtchn_driver, 0, 0);
598 MODULE_DEPEND(evtchn, xenpv, 1, 1, 1);
599