xref: /illumos-gate/usr/src/uts/common/xen/io/evtchn_dev.c (revision 7a6d80f1660abd4755c68cbd094d4a914681d26e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2017 Joyent, Inc.
26  */
27 
28 
29 /*
30  * evtchn.c
31  *
32  * Driver for receiving and demuxing event-channel signals.
33  *
34  * Copyright (c) 2004-2005, K A Fraser
35  * Multi-process extensions Copyright (c) 2004, Steven Smith
36  *
37  * This file may be distributed separately from the Linux kernel, or
38  * incorporated into other software packages, subject to the following license:
39  *
40  * Permission is hereby granted, free of charge, to any person obtaining a copy
41  * of this source file (the "Software"), to deal in the Software without
42  * restriction, including without limitation the rights to use, copy, modify,
43  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
44  * and to permit persons to whom the Software is furnished to do so, subject to
45  * the following conditions:
46  *
47  * The above copyright notice and this permission notice shall be included in
48  * all copies or substantial portions of the Software.
49  *
50  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
51  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
52  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
53  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
54  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
55  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
56  * IN THE SOFTWARE.
57  */
58 
59 #include <sys/types.h>
60 #include <sys/hypervisor.h>
61 #include <sys/machsystm.h>
62 #include <sys/mutex.h>
63 #include <sys/evtchn_impl.h>
64 #include <sys/ddi_impldefs.h>
65 #include <sys/avintr.h>
66 #include <sys/cpuvar.h>
67 #include <sys/smp_impldefs.h>
68 #include <sys/archsystm.h>
69 #include <sys/sysmacros.h>
70 #include <sys/fcntl.h>
71 #include <sys/open.h>
72 #include <sys/stat.h>
73 #include <sys/psm.h>
74 #include <sys/cpu.h>
75 #include <sys/cmn_err.h>
76 #include <sys/xen_errno.h>
77 #include <sys/policy.h>
78 #include <xen/sys/evtchn.h>
79 
80 /* Some handy macros */
81 #define	EVTCHNDRV_MINOR2INST(minor)	((int)(minor))
82 #define	EVTCHNDRV_DEFAULT_NCLONES	256
83 #define	EVTCHNDRV_INST2SOFTS(inst)	\
84 	(ddi_get_soft_state(evtchndrv_statep, (inst)))
85 
86 /* Soft state data structure for evtchn driver */
87 struct evtsoftdata {
88 	dev_info_t *dip;
89 	/* Notification ring, accessed via /dev/xen/evtchn. */
90 #define	EVTCHN_RING_SIZE	(PAGESIZE / sizeof (evtchn_port_t))
91 #define	EVTCHN_RING_MASK(_i)	((_i) & (EVTCHN_RING_SIZE - 1))
92 	evtchn_port_t *ring;
93 	unsigned int ring_cons, ring_prod, ring_overflow;
94 
95 	kcondvar_t evtchn_wait; /* Processes wait on this when ring is empty. */
96 	kmutex_t evtchn_lock;
97 	pollhead_t evtchn_pollhead;
98 
99 	pid_t pid;		/* last pid to bind to this event channel. */
100 	processorid_t cpu;	/* cpu thread/evtchn is bound to */
101 };
102 
103 static void *evtchndrv_statep;
104 int evtchndrv_nclones = EVTCHNDRV_DEFAULT_NCLONES;
105 static int *evtchndrv_clone_tab;
106 static dev_info_t *evtchndrv_dip;
107 static kmutex_t evtchndrv_clone_tab_mutex;
108 
109 static int evtchndrv_detach(dev_info_t *, ddi_detach_cmd_t);
110 
111 /* Who's bound to each port? */
112 static struct evtsoftdata *port_user[NR_EVENT_CHANNELS];
113 static kmutex_t port_user_lock;
114 
115 uint_t
116 evtchn_device_upcall(caddr_t arg __unused, caddr_t arg1 __unused)
117 {
118 	struct evtsoftdata *ep;
119 	int port;
120 
121 	/*
122 	 * This is quite gross, we had to leave the evtchn that led to this
123 	 * invocation in a per-cpu mailbox, retrieve it now.
124 	 * We do this because the interface doesn't offer us a way to pass
125 	 * a dynamic argument up through the generic interrupt service layer.
126 	 * The mailbox is safe since we either run with interrupts disabled or
127 	 * non-preemptable till we reach here.
128 	 */
129 	port = CPU->cpu_m.mcpu_ec_mbox;
130 	ASSERT(port != 0);
131 	CPU->cpu_m.mcpu_ec_mbox = 0;
132 	ec_clear_evtchn(port);
133 	mutex_enter(&port_user_lock);
134 
135 	if ((ep = port_user[port]) != NULL) {
136 		mutex_enter(&ep->evtchn_lock);
137 		if ((ep->ring_prod - ep->ring_cons) < EVTCHN_RING_SIZE) {
138 			ep->ring[EVTCHN_RING_MASK(ep->ring_prod)] = port;
139 			/*
140 			 * Wake up reader when ring goes non-empty
141 			 */
142 			if (ep->ring_cons == ep->ring_prod++) {
143 				cv_signal(&ep->evtchn_wait);
144 				mutex_exit(&ep->evtchn_lock);
145 				pollwakeup(&ep->evtchn_pollhead,
146 				    POLLIN | POLLRDNORM);
147 				goto done;
148 			}
149 		} else {
150 			ep->ring_overflow = 1;
151 		}
152 		mutex_exit(&ep->evtchn_lock);
153 	}
154 
155 done:
156 	mutex_exit(&port_user_lock);
157 	return (DDI_INTR_CLAIMED);
158 }
159 
160 /* ARGSUSED */
161 static int
162 evtchndrv_read(dev_t dev, struct uio *uio, cred_t *cr)
163 {
164 	int rc = 0;
165 	ssize_t count;
166 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
167 	struct evtsoftdata *ep;
168 	minor_t minor = getminor(dev);
169 
170 	if (secpolicy_xvm_control(cr))
171 		return (EPERM);
172 
173 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
174 
175 	/* Whole number of ports. */
176 	count = uio->uio_resid;
177 	count &= ~(sizeof (evtchn_port_t) - 1);
178 
179 	if (count == 0)
180 		return (0);
181 
182 	if (count > PAGESIZE)
183 		count = PAGESIZE;
184 
185 	mutex_enter(&ep->evtchn_lock);
186 	for (;;) {
187 		if (ep->ring_overflow) {
188 			rc = EFBIG;
189 			goto done;
190 		}
191 
192 		if ((c = ep->ring_cons) != (p = ep->ring_prod))
193 			break;
194 
195 		if (uio->uio_fmode & O_NONBLOCK) {
196 			rc = EAGAIN;
197 			goto done;
198 		}
199 
200 		if (cv_wait_sig(&ep->evtchn_wait, &ep->evtchn_lock) == 0) {
201 			rc = EINTR;
202 			goto done;
203 		}
204 	}
205 
206 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
207 	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
208 		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
209 		    sizeof (evtchn_port_t);
210 		bytes2 = EVTCHN_RING_MASK(p) * sizeof (evtchn_port_t);
211 	} else {
212 		bytes1 = (p - c) * sizeof (evtchn_port_t);
213 		bytes2 = 0;
214 	}
215 
216 	/* Truncate chunks according to caller's maximum byte count. */
217 	if (bytes1 > count) {
218 		bytes1 = count;
219 		bytes2 = 0;
220 	} else if ((bytes1 + bytes2) > count) {
221 		bytes2 = count - bytes1;
222 	}
223 
224 	if (uiomove(&ep->ring[EVTCHN_RING_MASK(c)], bytes1, UIO_READ, uio) ||
225 	    ((bytes2 != 0) && uiomove(&ep->ring[0], bytes2, UIO_READ, uio))) {
226 		rc = EFAULT;
227 		goto done;
228 	}
229 
230 	ep->ring_cons += (bytes1 + bytes2) / sizeof (evtchn_port_t);
231 done:
232 	mutex_exit(&ep->evtchn_lock);
233 	return (rc);
234 }
235 
236 /* ARGSUSED */
237 static int
238 evtchndrv_write(dev_t dev, struct uio *uio, cred_t *cr)
239 {
240 	int  rc, i;
241 	ssize_t count;
242 	evtchn_port_t *kbuf;
243 	struct evtsoftdata *ep;
244 	ulong_t flags;
245 	minor_t minor = getminor(dev);
246 	evtchn_port_t sbuf[32];
247 
248 	if (secpolicy_xvm_control(cr))
249 		return (EPERM);
250 
251 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
252 
253 
254 	/* Whole number of ports. */
255 	count = uio->uio_resid;
256 	count &= ~(sizeof (evtchn_port_t) - 1);
257 
258 	if (count == 0)
259 		return (0);
260 
261 	if (count > PAGESIZE)
262 		count = PAGESIZE;
263 
264 	if (count <= sizeof (sbuf))
265 		kbuf = sbuf;
266 	else
267 		kbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
268 	if ((rc = uiomove(kbuf, count, UIO_WRITE, uio)) != 0)
269 		goto out;
270 
271 	mutex_enter(&port_user_lock);
272 	for (i = 0; i < (count / sizeof (evtchn_port_t)); i++)
273 		if ((kbuf[i] < NR_EVENT_CHANNELS) &&
274 		    (port_user[kbuf[i]] == ep)) {
275 			flags = intr_clear();
276 			ec_unmask_evtchn(kbuf[i]);
277 			intr_restore(flags);
278 		}
279 	mutex_exit(&port_user_lock);
280 
281 out:
282 	if (kbuf != sbuf)
283 		kmem_free(kbuf, PAGESIZE);
284 	return (rc);
285 }
286 
287 static void
288 evtchn_bind_to_user(struct evtsoftdata *u, int port)
289 {
290 	ulong_t flags;
291 
292 	/*
293 	 * save away the PID of the last process to bind to this event channel.
294 	 * Useful for debugging.
295 	 */
296 	u->pid = ddi_get_pid();
297 
298 	mutex_enter(&port_user_lock);
299 	ASSERT(port_user[port] == NULL);
300 	port_user[port] = u;
301 	ec_irq_add_evtchn(ec_dev_irq, port);
302 	flags = intr_clear();
303 	ec_unmask_evtchn(port);
304 	intr_restore(flags);
305 	mutex_exit(&port_user_lock);
306 }
307 
308 static void
309 evtchndrv_close_evtchn(int port)
310 {
311 	struct evtsoftdata *ep;
312 
313 	ASSERT(MUTEX_HELD(&port_user_lock));
314 	ep = port_user[port];
315 	ASSERT(ep != NULL);
316 	(void) ec_mask_evtchn(port);
317 	/*
318 	 * It is possible the event is in transit to us.
319 	 * If it is already in the ring buffer, then a client may
320 	 * get a spurious event notification on the next read of
321 	 * of the evtchn device.  Clients will need to be able to
322 	 * handle getting a spurious event notification.
323 	 */
324 	port_user[port] = NULL;
325 	/*
326 	 * The event is masked and should stay so, clean it up.
327 	 */
328 	ec_irq_rm_evtchn(ec_dev_irq, port);
329 }
330 
331 /* ARGSUSED */
332 static int
333 evtchndrv_ioctl(dev_t dev, int cmd, intptr_t data, int flag, cred_t *cr,
334     int *rvalp)
335 {
336 	int err = 0;
337 	struct evtsoftdata *ep;
338 	minor_t minor = getminor(dev);
339 
340 	if (secpolicy_xvm_control(cr))
341 		return (EPERM);
342 
343 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
344 
345 	*rvalp = 0;
346 
347 	switch (cmd) {
348 	case IOCTL_EVTCHN_BIND_VIRQ: {
349 		struct ioctl_evtchn_bind_virq bind;
350 
351 		if (copyin((void *)data, &bind, sizeof (bind))) {
352 			err = EFAULT;
353 			break;
354 		}
355 
356 		if ((err = xen_bind_virq(bind.virq, 0, rvalp)) != 0)
357 			break;
358 
359 		evtchn_bind_to_user(ep, *rvalp);
360 		break;
361 	}
362 
363 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
364 		struct ioctl_evtchn_bind_interdomain bind;
365 
366 		if (copyin((void *)data, &bind, sizeof (bind))) {
367 			err = EFAULT;
368 			break;
369 		}
370 
371 		if ((err = xen_bind_interdomain(bind.remote_domain,
372 		    bind.remote_port, rvalp)) != 0)
373 			break;
374 
375 		ec_bind_vcpu(*rvalp, 0);
376 		evtchn_bind_to_user(ep, *rvalp);
377 		break;
378 	}
379 
380 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
381 		struct ioctl_evtchn_bind_unbound_port bind;
382 
383 		if (copyin((void *)data, &bind, sizeof (bind))) {
384 			err = EFAULT;
385 			break;
386 		}
387 
388 		if ((err = xen_alloc_unbound_evtchn(bind.remote_domain,
389 		    rvalp)) != 0)
390 			break;
391 
392 		evtchn_bind_to_user(ep, *rvalp);
393 		break;
394 	}
395 
396 	case IOCTL_EVTCHN_UNBIND: {
397 		struct ioctl_evtchn_unbind unbind;
398 
399 		if (copyin((void *)data, &unbind, sizeof (unbind))) {
400 			err = EFAULT;
401 			break;
402 		}
403 
404 		if (unbind.port >= NR_EVENT_CHANNELS) {
405 			err = EFAULT;
406 			break;
407 		}
408 
409 		mutex_enter(&port_user_lock);
410 
411 		if (port_user[unbind.port] != ep) {
412 			mutex_exit(&port_user_lock);
413 			err = ENOTCONN;
414 			break;
415 		}
416 
417 		evtchndrv_close_evtchn(unbind.port);
418 		mutex_exit(&port_user_lock);
419 		break;
420 	}
421 
422 	case IOCTL_EVTCHN_NOTIFY: {
423 		struct ioctl_evtchn_notify notify;
424 
425 		if (copyin((void *)data, &notify, sizeof (notify))) {
426 			err = EFAULT;
427 			break;
428 		}
429 
430 		if (notify.port >= NR_EVENT_CHANNELS) {
431 			err = EINVAL;
432 		} else if (port_user[notify.port] != ep) {
433 			err = ENOTCONN;
434 		} else {
435 			ec_notify_via_evtchn(notify.port);
436 		}
437 		break;
438 	}
439 
440 	default:
441 		err = ENOSYS;
442 	}
443 
444 	return (err);
445 }
446 
447 static int
448 evtchndrv_poll(dev_t dev, short ev, int anyyet, short *revp, pollhead_t **phpp)
449 {
450 	struct evtsoftdata *ep;
451 	minor_t minor = getminor(dev);
452 	short mask = 0;
453 
454 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
455 
456 	if (ev & POLLOUT)
457 		mask |= POLLOUT;
458 	if (ep->ring_overflow)
459 		mask |= POLLERR;
460 	if (ev & (POLLIN | POLLRDNORM)) {
461 		mutex_enter(&ep->evtchn_lock);
462 		if (ep->ring_cons != ep->ring_prod) {
463 			mask |= (POLLIN | POLLRDNORM) & ev;
464 		}
465 		mutex_exit(&ep->evtchn_lock);
466 	}
467 	if ((mask == 0 && !anyyet) || (ev & POLLET)) {
468 		*phpp = &ep->evtchn_pollhead;
469 	}
470 	*revp = mask;
471 	return (0);
472 }
473 
474 
475 /* ARGSUSED */
476 static int
477 evtchndrv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
478 {
479 	struct evtsoftdata *ep;
480 	minor_t minor = getminor(*devp);
481 
482 	if (otyp == OTYP_BLK)
483 		return (ENXIO);
484 
485 	/*
486 	 * only allow open on minor = 0 - the clone device
487 	 */
488 	if (minor != 0)
489 		return (ENXIO);
490 
491 	/*
492 	 * find a free slot and grab it
493 	 */
494 	mutex_enter(&evtchndrv_clone_tab_mutex);
495 	for (minor = 1; minor < evtchndrv_nclones; minor++) {
496 		if (evtchndrv_clone_tab[minor] == 0) {
497 			evtchndrv_clone_tab[minor] = 1;
498 			break;
499 		}
500 	}
501 	mutex_exit(&evtchndrv_clone_tab_mutex);
502 	if (minor == evtchndrv_nclones)
503 		return (EAGAIN);
504 
505 	/* Allocate softstate structure */
506 	if (ddi_soft_state_zalloc(evtchndrv_statep,
507 	    EVTCHNDRV_MINOR2INST(minor)) != DDI_SUCCESS) {
508 		mutex_enter(&evtchndrv_clone_tab_mutex);
509 		evtchndrv_clone_tab[minor] = 0;
510 		mutex_exit(&evtchndrv_clone_tab_mutex);
511 		return (EAGAIN);
512 	}
513 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
514 
515 	/* ... and init it */
516 	ep->dip = evtchndrv_dip;
517 
518 	cv_init(&ep->evtchn_wait, NULL, CV_DEFAULT, NULL);
519 	mutex_init(&ep->evtchn_lock, NULL, MUTEX_DEFAULT, NULL);
520 
521 	ep->ring = kmem_alloc(PAGESIZE, KM_SLEEP);
522 
523 	/* clone driver */
524 	*devp = makedevice(getmajor(*devp), minor);
525 
526 	return (0);
527 }
528 
529 /* ARGSUSED */
530 static int
531 evtchndrv_close(dev_t dev, int flag, int otyp, struct cred *credp)
532 {
533 	struct evtsoftdata *ep;
534 	minor_t minor = getminor(dev);
535 	int i;
536 
537 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
538 	if (ep == NULL)
539 		return (ENXIO);
540 
541 	mutex_enter(&port_user_lock);
542 
543 
544 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
545 		if (port_user[i] != ep)
546 			continue;
547 
548 		evtchndrv_close_evtchn(i);
549 	}
550 
551 	mutex_exit(&port_user_lock);
552 
553 	kmem_free(ep->ring, PAGESIZE);
554 	pollhead_clean(&ep->evtchn_pollhead);
555 	ddi_soft_state_free(evtchndrv_statep, EVTCHNDRV_MINOR2INST(minor));
556 
557 	/*
558 	 * free clone tab slot
559 	 */
560 	mutex_enter(&evtchndrv_clone_tab_mutex);
561 	evtchndrv_clone_tab[minor] = 0;
562 	mutex_exit(&evtchndrv_clone_tab_mutex);
563 
564 	return (0);
565 }
566 
567 /* ARGSUSED */
568 static int
569 evtchndrv_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
570 {
571 	dev_t	dev = (dev_t)arg;
572 	minor_t	minor = getminor(dev);
573 	int	retval;
574 
575 	switch (cmd) {
576 	case DDI_INFO_DEVT2DEVINFO:
577 		if (minor != 0 || evtchndrv_dip == NULL) {
578 			*result = (void *)NULL;
579 			retval = DDI_FAILURE;
580 		} else {
581 			*result = (void *)evtchndrv_dip;
582 			retval = DDI_SUCCESS;
583 		}
584 		break;
585 	case DDI_INFO_DEVT2INSTANCE:
586 		*result = (void *)0;
587 		retval = DDI_SUCCESS;
588 		break;
589 	default:
590 		retval = DDI_FAILURE;
591 	}
592 	return (retval);
593 }
594 
595 
596 static int
597 evtchndrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
598 {
599 	int	error;
600 	int	unit = ddi_get_instance(dip);
601 
602 
603 	switch (cmd) {
604 	case DDI_ATTACH:
605 		break;
606 	case DDI_RESUME:
607 		return (DDI_SUCCESS);
608 	default:
609 		cmn_err(CE_WARN, "evtchn_attach: unknown cmd 0x%x\n", cmd);
610 		return (DDI_FAILURE);
611 	}
612 
613 	/* DDI_ATTACH */
614 
615 	/*
616 	 * only one instance - but we clone using the open routine
617 	 */
618 	if (ddi_get_instance(dip) > 0)
619 		return (DDI_FAILURE);
620 
621 	mutex_init(&evtchndrv_clone_tab_mutex, NULL, MUTEX_DRIVER,
622 	    NULL);
623 
624 	error = ddi_create_minor_node(dip, "evtchn", S_IFCHR, unit,
625 	    DDI_PSEUDO, 0);
626 	if (error != DDI_SUCCESS)
627 		goto fail;
628 
629 	/*
630 	 * save dip for getinfo
631 	 */
632 	evtchndrv_dip = dip;
633 	ddi_report_dev(dip);
634 
635 	mutex_init(&port_user_lock, NULL, MUTEX_DRIVER, NULL);
636 	(void) memset(port_user, 0, sizeof (port_user));
637 
638 	ec_dev_irq = ec_dev_alloc_irq();
639 	(void) add_avintr(NULL, IPL_EVTCHN, (avfunc)evtchn_device_upcall,
640 	    "evtchn_driver", ec_dev_irq, NULL, NULL, NULL, dip);
641 
642 	return (DDI_SUCCESS);
643 
644 fail:
645 	(void) evtchndrv_detach(dip, DDI_DETACH);
646 	return (error);
647 }
648 
649 /*ARGSUSED*/
650 static int
651 evtchndrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
652 {
653 	/*
654 	 * Don't allow detach for now.
655 	 */
656 	return (DDI_FAILURE);
657 }
658 
659 /* Solaris driver framework */
660 
661 static struct cb_ops evtchndrv_cb_ops = {
662 	evtchndrv_open,		/* cb_open */
663 	evtchndrv_close,	/* cb_close */
664 	nodev,			/* cb_strategy */
665 	nodev,			/* cb_print */
666 	nodev,			/* cb_dump */
667 	evtchndrv_read,		/* cb_read */
668 	evtchndrv_write,	/* cb_write */
669 	evtchndrv_ioctl,	/* cb_ioctl */
670 	nodev,			/* cb_devmap */
671 	nodev,			/* cb_mmap */
672 	nodev,			/* cb_segmap */
673 	evtchndrv_poll,		/* cb_chpoll */
674 	ddi_prop_op,		/* cb_prop_op */
675 	0,			/* cb_stream */
676 	D_NEW | D_MP | D_64BIT	/* cb_flag */
677 };
678 
679 static struct dev_ops evtchndrv_dev_ops = {
680 	DEVO_REV,		/* devo_rev */
681 	0,			/* devo_refcnt */
682 	evtchndrv_info,		/* devo_getinfo */
683 	nulldev,		/* devo_identify */
684 	nulldev,		/* devo_probe */
685 	evtchndrv_attach,	/* devo_attach */
686 	evtchndrv_detach,	/* devo_detach */
687 	nodev,			/* devo_reset */
688 	&evtchndrv_cb_ops,	/* devo_cb_ops */
689 	NULL,			/* devo_bus_ops */
690 	NULL,			/* power */
691 	ddi_quiesce_not_needed,		/* devo_quiesce */
692 };
693 
694 static struct modldrv modldrv = {
695 	&mod_driverops,		/* Type of module.  This one is a driver */
696 	"Evtchn driver",	/* Name of the module. */
697 	&evtchndrv_dev_ops	/* driver ops */
698 };
699 
700 static struct modlinkage modlinkage = {
701 	MODREV_1,
702 	&modldrv,
703 	NULL
704 };
705 
706 int
707 _init(void)
708 {
709 	int err;
710 
711 	err = ddi_soft_state_init(&evtchndrv_statep,
712 	    sizeof (struct evtsoftdata), 1);
713 	if (err)
714 		return (err);
715 
716 	err = mod_install(&modlinkage);
717 	if (err)
718 		ddi_soft_state_fini(&evtchndrv_statep);
719 	else
720 		evtchndrv_clone_tab = kmem_zalloc(
721 		    sizeof (int) * evtchndrv_nclones, KM_SLEEP);
722 	return (err);
723 }
724 
725 int
726 _fini(void)
727 {
728 	int e;
729 
730 	e = mod_remove(&modlinkage);
731 	if (e)
732 		return (e);
733 
734 	ddi_soft_state_fini(&evtchndrv_statep);
735 
736 	return (0);
737 }
738 
739 int
740 _info(struct modinfo *modinfop)
741 {
742 	return (mod_info(&modlinkage, modinfop));
743 }
744