xref: /illumos-gate/usr/src/uts/common/xen/io/evtchn_dev.c (revision 30165b7f6753bc3d48c52319bed7ec7b3ea36b3c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2017 Joyent, Inc.
26  */
27 
28 
29 /*
30  * evtchn.c
31  *
32  * Driver for receiving and demuxing event-channel signals.
33  *
34  * Copyright (c) 2004-2005, K A Fraser
35  * Multi-process extensions Copyright (c) 2004, Steven Smith
36  *
37  * This file may be distributed separately from the Linux kernel, or
38  * incorporated into other software packages, subject to the following license:
39  *
40  * Permission is hereby granted, free of charge, to any person obtaining a copy
41  * of this source file (the "Software"), to deal in the Software without
42  * restriction, including without limitation the rights to use, copy, modify,
43  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
44  * and to permit persons to whom the Software is furnished to do so, subject to
45  * the following conditions:
46  *
47  * The above copyright notice and this permission notice shall be included in
48  * all copies or substantial portions of the Software.
49  *
50  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
51  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
52  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
53  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
54  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
55  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
56  * IN THE SOFTWARE.
57  */
58 
59 #include <sys/types.h>
60 #include <sys/hypervisor.h>
61 #include <sys/machsystm.h>
62 #include <sys/mutex.h>
63 #include <sys/evtchn_impl.h>
64 #include <sys/ddi_impldefs.h>
65 #include <sys/avintr.h>
66 #include <sys/cpuvar.h>
67 #include <sys/smp_impldefs.h>
68 #include <sys/archsystm.h>
69 #include <sys/sysmacros.h>
70 #include <sys/fcntl.h>
71 #include <sys/open.h>
72 #include <sys/stat.h>
73 #include <sys/psm.h>
74 #include <sys/cpu.h>
75 #include <sys/cmn_err.h>
76 #include <sys/xen_errno.h>
77 #include <sys/policy.h>
78 #include <xen/sys/evtchn.h>
79 
80 /* Some handy macros */
81 #define	EVTCHNDRV_MINOR2INST(minor)	((int)(minor))
82 #define	EVTCHNDRV_DEFAULT_NCLONES 	256
83 #define	EVTCHNDRV_INST2SOFTS(inst)	\
84 	(ddi_get_soft_state(evtchndrv_statep, (inst)))
85 
86 /* Soft state data structure for evtchn driver */
87 struct evtsoftdata {
88 	dev_info_t *dip;
89 	/* Notification ring, accessed via /dev/xen/evtchn. */
90 #define	EVTCHN_RING_SIZE	(PAGESIZE / sizeof (evtchn_port_t))
91 #define	EVTCHN_RING_MASK(_i)	((_i) & (EVTCHN_RING_SIZE - 1))
92 	evtchn_port_t *ring;
93 	unsigned int ring_cons, ring_prod, ring_overflow;
94 
95 	kcondvar_t evtchn_wait; /* Processes wait on this when ring is empty. */
96 	kmutex_t evtchn_lock;
97 	struct pollhead evtchn_pollhead;
98 
99 	pid_t pid;		/* last pid to bind to this event channel. */
100 	processorid_t cpu;	/* cpu thread/evtchn is bound to */
101 };
102 
103 static void *evtchndrv_statep;
104 int evtchndrv_nclones = EVTCHNDRV_DEFAULT_NCLONES;
105 static int *evtchndrv_clone_tab;
106 static dev_info_t *evtchndrv_dip;
107 static kmutex_t evtchndrv_clone_tab_mutex;
108 
109 static int evtchndrv_detach(dev_info_t *, ddi_detach_cmd_t);
110 
111 /* Who's bound to each port? */
112 static struct evtsoftdata *port_user[NR_EVENT_CHANNELS];
113 static kmutex_t port_user_lock;
114 
115 void
116 evtchn_device_upcall()
117 {
118 	struct evtsoftdata *ep;
119 	int port;
120 
121 	/*
122 	 * This is quite gross, we had to leave the evtchn that led to this
123 	 * invocation in a per-cpu mailbox, retrieve it now.
124 	 * We do this because the interface doesn't offer us a way to pass
125 	 * a dynamic argument up through the generic interrupt service layer.
126 	 * The mailbox is safe since we either run with interrupts disabled or
127 	 * non-preemptable till we reach here.
128 	 */
129 	port = CPU->cpu_m.mcpu_ec_mbox;
130 	ASSERT(port != 0);
131 	CPU->cpu_m.mcpu_ec_mbox = 0;
132 	ec_clear_evtchn(port);
133 	mutex_enter(&port_user_lock);
134 
135 	if ((ep = port_user[port]) != NULL) {
136 		mutex_enter(&ep->evtchn_lock);
137 		if ((ep->ring_prod - ep->ring_cons) < EVTCHN_RING_SIZE) {
138 			ep->ring[EVTCHN_RING_MASK(ep->ring_prod)] = port;
139 			/*
140 			 * Wake up reader when ring goes non-empty
141 			 */
142 			if (ep->ring_cons == ep->ring_prod++) {
143 				cv_signal(&ep->evtchn_wait);
144 				mutex_exit(&ep->evtchn_lock);
145 				pollwakeup(&ep->evtchn_pollhead,
146 				    POLLIN | POLLRDNORM);
147 				goto done;
148 			}
149 		} else {
150 			ep->ring_overflow = 1;
151 		}
152 		mutex_exit(&ep->evtchn_lock);
153 	}
154 
155 done:
156 	mutex_exit(&port_user_lock);
157 }
158 
159 /* ARGSUSED */
160 static int
161 evtchndrv_read(dev_t dev, struct uio *uio, cred_t *cr)
162 {
163 	int rc = 0;
164 	ssize_t count;
165 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
166 	struct evtsoftdata *ep;
167 	minor_t minor = getminor(dev);
168 
169 	if (secpolicy_xvm_control(cr))
170 		return (EPERM);
171 
172 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
173 
174 	/* Whole number of ports. */
175 	count = uio->uio_resid;
176 	count &= ~(sizeof (evtchn_port_t) - 1);
177 
178 	if (count == 0)
179 		return (0);
180 
181 	if (count > PAGESIZE)
182 		count = PAGESIZE;
183 
184 	mutex_enter(&ep->evtchn_lock);
185 	for (;;) {
186 		if (ep->ring_overflow) {
187 			rc = EFBIG;
188 			goto done;
189 		}
190 
191 		if ((c = ep->ring_cons) != (p = ep->ring_prod))
192 			break;
193 
194 		if (uio->uio_fmode & O_NONBLOCK) {
195 			rc = EAGAIN;
196 			goto done;
197 		}
198 
199 		if (cv_wait_sig(&ep->evtchn_wait, &ep->evtchn_lock) == 0) {
200 			rc = EINTR;
201 			goto done;
202 		}
203 	}
204 
205 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
206 	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
207 		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
208 		    sizeof (evtchn_port_t);
209 		bytes2 = EVTCHN_RING_MASK(p) * sizeof (evtchn_port_t);
210 	} else {
211 		bytes1 = (p - c) * sizeof (evtchn_port_t);
212 		bytes2 = 0;
213 	}
214 
215 	/* Truncate chunks according to caller's maximum byte count. */
216 	if (bytes1 > count) {
217 		bytes1 = count;
218 		bytes2 = 0;
219 	} else if ((bytes1 + bytes2) > count) {
220 		bytes2 = count - bytes1;
221 	}
222 
223 	if (uiomove(&ep->ring[EVTCHN_RING_MASK(c)], bytes1, UIO_READ, uio) ||
224 	    ((bytes2 != 0) && uiomove(&ep->ring[0], bytes2, UIO_READ, uio))) {
225 		rc = EFAULT;
226 		goto done;
227 	}
228 
229 	ep->ring_cons += (bytes1 + bytes2) / sizeof (evtchn_port_t);
230 done:
231 	mutex_exit(&ep->evtchn_lock);
232 	return (rc);
233 }
234 
235 /* ARGSUSED */
236 static int
237 evtchndrv_write(dev_t dev, struct uio *uio, cred_t *cr)
238 {
239 	int  rc, i;
240 	ssize_t count;
241 	evtchn_port_t *kbuf;
242 	struct evtsoftdata *ep;
243 	ulong_t flags;
244 	minor_t minor = getminor(dev);
245 	evtchn_port_t sbuf[32];
246 
247 	if (secpolicy_xvm_control(cr))
248 		return (EPERM);
249 
250 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
251 
252 
253 	/* Whole number of ports. */
254 	count = uio->uio_resid;
255 	count &= ~(sizeof (evtchn_port_t) - 1);
256 
257 	if (count == 0)
258 		return (0);
259 
260 	if (count > PAGESIZE)
261 		count = PAGESIZE;
262 
263 	if (count <= sizeof (sbuf))
264 		kbuf = sbuf;
265 	else
266 		kbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
267 	if ((rc = uiomove(kbuf, count, UIO_WRITE, uio)) != 0)
268 		goto out;
269 
270 	mutex_enter(&port_user_lock);
271 	for (i = 0; i < (count / sizeof (evtchn_port_t)); i++)
272 		if ((kbuf[i] < NR_EVENT_CHANNELS) &&
273 		    (port_user[kbuf[i]] == ep)) {
274 			flags = intr_clear();
275 			ec_unmask_evtchn(kbuf[i]);
276 			intr_restore(flags);
277 		}
278 	mutex_exit(&port_user_lock);
279 
280 out:
281 	if (kbuf != sbuf)
282 		kmem_free(kbuf, PAGESIZE);
283 	return (rc);
284 }
285 
286 static void
287 evtchn_bind_to_user(struct evtsoftdata *u, int port)
288 {
289 	ulong_t flags;
290 
291 	/*
292 	 * save away the PID of the last process to bind to this event channel.
293 	 * Useful for debugging.
294 	 */
295 	u->pid = ddi_get_pid();
296 
297 	mutex_enter(&port_user_lock);
298 	ASSERT(port_user[port] == NULL);
299 	port_user[port] = u;
300 	ec_irq_add_evtchn(ec_dev_irq, port);
301 	flags = intr_clear();
302 	ec_unmask_evtchn(port);
303 	intr_restore(flags);
304 	mutex_exit(&port_user_lock);
305 }
306 
307 static void
308 evtchndrv_close_evtchn(int port)
309 {
310 	struct evtsoftdata *ep;
311 
312 	ASSERT(MUTEX_HELD(&port_user_lock));
313 	ep = port_user[port];
314 	ASSERT(ep != NULL);
315 	(void) ec_mask_evtchn(port);
316 	/*
317 	 * It is possible the event is in transit to us.
318 	 * If it is already in the ring buffer, then a client may
319 	 * get a spurious event notification on the next read of
320 	 * of the evtchn device.  Clients will need to be able to
321 	 * handle getting a spurious event notification.
322 	 */
323 	port_user[port] = NULL;
324 	/*
325 	 * The event is masked and should stay so, clean it up.
326 	 */
327 	ec_irq_rm_evtchn(ec_dev_irq, port);
328 }
329 
330 /* ARGSUSED */
331 static int
332 evtchndrv_ioctl(dev_t dev, int cmd, intptr_t data, int flag, cred_t *cr,
333     int *rvalp)
334 {
335 	int err = 0;
336 	struct evtsoftdata *ep;
337 	minor_t minor = getminor(dev);
338 
339 	if (secpolicy_xvm_control(cr))
340 		return (EPERM);
341 
342 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
343 
344 	*rvalp = 0;
345 
346 	switch (cmd) {
347 	case IOCTL_EVTCHN_BIND_VIRQ: {
348 		struct ioctl_evtchn_bind_virq bind;
349 
350 		if (copyin((void *)data, &bind, sizeof (bind))) {
351 			err = EFAULT;
352 			break;
353 		}
354 
355 		if ((err = xen_bind_virq(bind.virq, 0, rvalp)) != 0)
356 			break;
357 
358 		evtchn_bind_to_user(ep, *rvalp);
359 		break;
360 	}
361 
362 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
363 		struct ioctl_evtchn_bind_interdomain bind;
364 
365 		if (copyin((void *)data, &bind, sizeof (bind))) {
366 			err = EFAULT;
367 			break;
368 		}
369 
370 		if ((err = xen_bind_interdomain(bind.remote_domain,
371 		    bind.remote_port, rvalp)) != 0)
372 			break;
373 
374 		ec_bind_vcpu(*rvalp, 0);
375 		evtchn_bind_to_user(ep, *rvalp);
376 		break;
377 	}
378 
379 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
380 		struct ioctl_evtchn_bind_unbound_port bind;
381 
382 		if (copyin((void *)data, &bind, sizeof (bind))) {
383 			err = EFAULT;
384 			break;
385 		}
386 
387 		if ((err = xen_alloc_unbound_evtchn(bind.remote_domain,
388 		    rvalp)) != 0)
389 			break;
390 
391 		evtchn_bind_to_user(ep, *rvalp);
392 		break;
393 	}
394 
395 	case IOCTL_EVTCHN_UNBIND: {
396 		struct ioctl_evtchn_unbind unbind;
397 
398 		if (copyin((void *)data, &unbind, sizeof (unbind))) {
399 			err = EFAULT;
400 			break;
401 		}
402 
403 		if (unbind.port >= NR_EVENT_CHANNELS) {
404 			err = EFAULT;
405 			break;
406 		}
407 
408 		mutex_enter(&port_user_lock);
409 
410 		if (port_user[unbind.port] != ep) {
411 			mutex_exit(&port_user_lock);
412 			err = ENOTCONN;
413 			break;
414 		}
415 
416 		evtchndrv_close_evtchn(unbind.port);
417 		mutex_exit(&port_user_lock);
418 		break;
419 	}
420 
421 	case IOCTL_EVTCHN_NOTIFY: {
422 		struct ioctl_evtchn_notify notify;
423 
424 		if (copyin((void *)data, &notify, sizeof (notify))) {
425 			err = EFAULT;
426 			break;
427 		}
428 
429 		if (notify.port >= NR_EVENT_CHANNELS) {
430 			err = EINVAL;
431 		} else if (port_user[notify.port] != ep) {
432 			err = ENOTCONN;
433 		} else {
434 			ec_notify_via_evtchn(notify.port);
435 		}
436 		break;
437 	}
438 
439 	default:
440 		err = ENOSYS;
441 	}
442 
443 	return (err);
444 }
445 
446 static int
447 evtchndrv_poll(dev_t dev, short ev, int anyyet, short *revp, pollhead_t **phpp)
448 {
449 	struct evtsoftdata *ep;
450 	minor_t minor = getminor(dev);
451 	short mask = 0;
452 
453 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
454 
455 	if (ev & POLLOUT)
456 		mask |= POLLOUT;
457 	if (ep->ring_overflow)
458 		mask |= POLLERR;
459 	if (ev & (POLLIN | POLLRDNORM)) {
460 		mutex_enter(&ep->evtchn_lock);
461 		if (ep->ring_cons != ep->ring_prod) {
462 			mask |= (POLLIN | POLLRDNORM) & ev;
463 		}
464 		mutex_exit(&ep->evtchn_lock);
465 	}
466 	if ((mask == 0 && !anyyet) || (ev & POLLET)) {
467 		*phpp = &ep->evtchn_pollhead;
468 	}
469 	*revp = mask;
470 	return (0);
471 }
472 
473 
474 /* ARGSUSED */
475 static int
476 evtchndrv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
477 {
478 	struct evtsoftdata *ep;
479 	minor_t minor = getminor(*devp);
480 
481 	if (otyp == OTYP_BLK)
482 		return (ENXIO);
483 
484 	/*
485 	 * only allow open on minor = 0 - the clone device
486 	 */
487 	if (minor != 0)
488 		return (ENXIO);
489 
490 	/*
491 	 * find a free slot and grab it
492 	 */
493 	mutex_enter(&evtchndrv_clone_tab_mutex);
494 	for (minor = 1; minor < evtchndrv_nclones; minor++) {
495 		if (evtchndrv_clone_tab[minor] == 0) {
496 			evtchndrv_clone_tab[minor] = 1;
497 			break;
498 		}
499 	}
500 	mutex_exit(&evtchndrv_clone_tab_mutex);
501 	if (minor == evtchndrv_nclones)
502 		return (EAGAIN);
503 
504 	/* Allocate softstate structure */
505 	if (ddi_soft_state_zalloc(evtchndrv_statep,
506 	    EVTCHNDRV_MINOR2INST(minor)) != DDI_SUCCESS) {
507 		mutex_enter(&evtchndrv_clone_tab_mutex);
508 		evtchndrv_clone_tab[minor] = 0;
509 		mutex_exit(&evtchndrv_clone_tab_mutex);
510 		return (EAGAIN);
511 	}
512 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
513 
514 	/* ... and init it */
515 	ep->dip = evtchndrv_dip;
516 
517 	cv_init(&ep->evtchn_wait, NULL, CV_DEFAULT, NULL);
518 	mutex_init(&ep->evtchn_lock, NULL, MUTEX_DEFAULT, NULL);
519 
520 	ep->ring = kmem_alloc(PAGESIZE, KM_SLEEP);
521 
522 	/* clone driver */
523 	*devp = makedevice(getmajor(*devp), minor);
524 
525 	return (0);
526 }
527 
528 /* ARGSUSED */
529 static int
530 evtchndrv_close(dev_t dev, int flag, int otyp, struct cred *credp)
531 {
532 	struct evtsoftdata *ep;
533 	minor_t minor = getminor(dev);
534 	int i;
535 
536 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
537 	if (ep == NULL)
538 		return (ENXIO);
539 
540 	mutex_enter(&port_user_lock);
541 
542 
543 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
544 		if (port_user[i] != ep)
545 			continue;
546 
547 		evtchndrv_close_evtchn(i);
548 	}
549 
550 	mutex_exit(&port_user_lock);
551 
552 	kmem_free(ep->ring, PAGESIZE);
553 	ddi_soft_state_free(evtchndrv_statep, EVTCHNDRV_MINOR2INST(minor));
554 
555 	/*
556 	 * free clone tab slot
557 	 */
558 	mutex_enter(&evtchndrv_clone_tab_mutex);
559 	evtchndrv_clone_tab[minor] = 0;
560 	mutex_exit(&evtchndrv_clone_tab_mutex);
561 
562 	return (0);
563 }
564 
565 /* ARGSUSED */
566 static int
567 evtchndrv_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
568 {
569 	dev_t	dev = (dev_t)arg;
570 	minor_t	minor = getminor(dev);
571 	int	retval;
572 
573 	switch (cmd) {
574 	case DDI_INFO_DEVT2DEVINFO:
575 		if (minor != 0 || evtchndrv_dip == NULL) {
576 			*result = (void *)NULL;
577 			retval = DDI_FAILURE;
578 		} else {
579 			*result = (void *)evtchndrv_dip;
580 			retval = DDI_SUCCESS;
581 		}
582 		break;
583 	case DDI_INFO_DEVT2INSTANCE:
584 		*result = (void *)0;
585 		retval = DDI_SUCCESS;
586 		break;
587 	default:
588 		retval = DDI_FAILURE;
589 	}
590 	return (retval);
591 }
592 
593 
594 static int
595 evtchndrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
596 {
597 	int	error;
598 	int	unit = ddi_get_instance(dip);
599 
600 
601 	switch (cmd) {
602 	case DDI_ATTACH:
603 		break;
604 	case DDI_RESUME:
605 		return (DDI_SUCCESS);
606 	default:
607 		cmn_err(CE_WARN, "evtchn_attach: unknown cmd 0x%x\n", cmd);
608 		return (DDI_FAILURE);
609 	}
610 
611 	/* DDI_ATTACH */
612 
613 	/*
614 	 * only one instance - but we clone using the open routine
615 	 */
616 	if (ddi_get_instance(dip) > 0)
617 		return (DDI_FAILURE);
618 
619 	mutex_init(&evtchndrv_clone_tab_mutex, NULL, MUTEX_DRIVER,
620 	    NULL);
621 
622 	error = ddi_create_minor_node(dip, "evtchn", S_IFCHR, unit,
623 	    DDI_PSEUDO, NULL);
624 	if (error != DDI_SUCCESS)
625 		goto fail;
626 
627 	/*
628 	 * save dip for getinfo
629 	 */
630 	evtchndrv_dip = dip;
631 	ddi_report_dev(dip);
632 
633 	mutex_init(&port_user_lock, NULL, MUTEX_DRIVER, NULL);
634 	(void) memset(port_user, 0, sizeof (port_user));
635 
636 	ec_dev_irq = ec_dev_alloc_irq();
637 	(void) add_avintr(NULL, IPL_EVTCHN, (avfunc)evtchn_device_upcall,
638 	    "evtchn_driver", ec_dev_irq, NULL, NULL, NULL, dip);
639 
640 	return (DDI_SUCCESS);
641 
642 fail:
643 	(void) evtchndrv_detach(dip, DDI_DETACH);
644 	return (error);
645 }
646 
647 /*ARGSUSED*/
648 static int
649 evtchndrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
650 {
651 	/*
652 	 * Don't allow detach for now.
653 	 */
654 	return (DDI_FAILURE);
655 }
656 
657 /* Solaris driver framework */
658 
659 static 	struct cb_ops evtchndrv_cb_ops = {
660 	evtchndrv_open,		/* cb_open */
661 	evtchndrv_close,	/* cb_close */
662 	nodev,			/* cb_strategy */
663 	nodev,			/* cb_print */
664 	nodev,			/* cb_dump */
665 	evtchndrv_read,		/* cb_read */
666 	evtchndrv_write,	/* cb_write */
667 	evtchndrv_ioctl,	/* cb_ioctl */
668 	nodev,			/* cb_devmap */
669 	nodev,			/* cb_mmap */
670 	nodev,			/* cb_segmap */
671 	evtchndrv_poll,		/* cb_chpoll */
672 	ddi_prop_op,		/* cb_prop_op */
673 	0,			/* cb_stream */
674 	D_NEW | D_MP | D_64BIT	/* cb_flag */
675 };
676 
677 static struct dev_ops evtchndrv_dev_ops = {
678 	DEVO_REV,		/* devo_rev */
679 	0,			/* devo_refcnt */
680 	evtchndrv_info,		/* devo_getinfo */
681 	nulldev,		/* devo_identify */
682 	nulldev,		/* devo_probe */
683 	evtchndrv_attach,	/* devo_attach */
684 	evtchndrv_detach,	/* devo_detach */
685 	nodev,			/* devo_reset */
686 	&evtchndrv_cb_ops,	/* devo_cb_ops */
687 	NULL,			/* devo_bus_ops */
688 	NULL,			/* power */
689 	ddi_quiesce_not_needed,		/* devo_quiesce */
690 };
691 
692 static struct modldrv modldrv = {
693 	&mod_driverops,		/* Type of module.  This one is a driver */
694 	"Evtchn driver",	/* Name of the module. */
695 	&evtchndrv_dev_ops	/* driver ops */
696 };
697 
698 static struct modlinkage modlinkage = {
699 	MODREV_1,
700 	&modldrv,
701 	NULL
702 };
703 
704 int
705 _init(void)
706 {
707 	int err;
708 
709 	err = ddi_soft_state_init(&evtchndrv_statep,
710 	    sizeof (struct evtsoftdata), 1);
711 	if (err)
712 		return (err);
713 
714 	err = mod_install(&modlinkage);
715 	if (err)
716 		ddi_soft_state_fini(&evtchndrv_statep);
717 	else
718 		evtchndrv_clone_tab = kmem_zalloc(
719 		    sizeof (int) * evtchndrv_nclones, KM_SLEEP);
720 	return (err);
721 }
722 
723 int
724 _fini(void)
725 {
726 	int e;
727 
728 	e = mod_remove(&modlinkage);
729 	if (e)
730 		return (e);
731 
732 	ddi_soft_state_fini(&evtchndrv_statep);
733 
734 	return (0);
735 }
736 
737 int
738 _info(struct modinfo *modinfop)
739 {
740 	return (mod_info(&modlinkage, modinfop));
741 }
742