xref: /titanic_50/usr/src/uts/common/xen/io/evtchn_dev.c (revision ff3124eff995e6cd8ebd8c6543648e0670920034)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * evtchn.c
31  *
32  * Driver for receiving and demuxing event-channel signals.
33  *
34  * Copyright (c) 2004-2005, K A Fraser
35  * Multi-process extensions Copyright (c) 2004, Steven Smith
36  *
37  * This file may be distributed separately from the Linux kernel, or
38  * incorporated into other software packages, subject to the following license:
39  *
40  * Permission is hereby granted, free of charge, to any person obtaining a copy
41  * of this source file (the "Software"), to deal in the Software without
42  * restriction, including without limitation the rights to use, copy, modify,
43  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
44  * and to permit persons to whom the Software is furnished to do so, subject to
45  * the following conditions:
46  *
47  * The above copyright notice and this permission notice shall be included in
48  * all copies or substantial portions of the Software.
49  *
50  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
51  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
52  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
53  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
54  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
55  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
56  * IN THE SOFTWARE.
57  */
58 
59 #include <sys/types.h>
60 #include <sys/hypervisor.h>
61 #include <sys/machsystm.h>
62 #include <sys/mutex.h>
63 #include <sys/evtchn_impl.h>
64 #include <sys/ddi_impldefs.h>
65 #include <sys/avintr.h>
66 #include <sys/cpuvar.h>
67 #include <sys/smp_impldefs.h>
68 #include <sys/archsystm.h>
69 #include <sys/sysmacros.h>
70 #include <sys/fcntl.h>
71 #include <sys/open.h>
72 #include <sys/stat.h>
73 #include <sys/psm.h>
74 #include <sys/cpu.h>
75 #include <sys/cmn_err.h>
76 #include <sys/xen_errno.h>
77 #include <sys/policy.h>
78 #include <xen/sys/evtchn.h>
79 
80 /* Some handy macros */
81 #define	EVTCHNDRV_MINOR2INST(minor)	((int)(minor))
82 #define	EVTCHNDRV_DEFAULT_NCLONES 	256
83 #define	EVTCHNDRV_INST2SOFTS(inst)	\
84 	(ddi_get_soft_state(evtchndrv_statep, (inst)))
85 
86 /* Soft state data structure for evtchn driver */
87 struct evtsoftdata {
88 	dev_info_t *dip;
89 	/* Notification ring, accessed via /dev/xen/evtchn. */
90 #define	EVTCHN_RING_SIZE	(PAGESIZE / sizeof (evtchn_port_t))
91 #define	EVTCHN_RING_MASK(_i)	((_i) & (EVTCHN_RING_SIZE - 1))
92 	evtchn_port_t *ring;
93 	unsigned int ring_cons, ring_prod, ring_overflow;
94 
95 	/* Processes wait on this queue when ring is empty. */
96 	kcondvar_t evtchn_wait;
97 	kmutex_t evtchn_lock;
98 	struct pollhead evtchn_pollhead;
99 
100 	/* last pid to bind to this event channel. debug aid. */
101 	pid_t pid;
102 };
103 
104 static void *evtchndrv_statep;
105 int evtchndrv_nclones = EVTCHNDRV_DEFAULT_NCLONES;
106 static int *evtchndrv_clone_tab;
107 static dev_info_t *evtchndrv_dip;
108 static kmutex_t evtchndrv_clone_tab_mutex;
109 
110 static int evtchndrv_detach(dev_info_t *, ddi_detach_cmd_t);
111 
112 /* Who's bound to each port? */
113 static struct evtsoftdata *port_user[NR_EVENT_CHANNELS];
114 static kmutex_t port_user_lock;
115 
116 void
117 evtchn_device_upcall()
118 {
119 	struct evtsoftdata *ep;
120 	int port;
121 
122 	/*
123 	 * This is quite gross, we had to leave the evtchn that led to this
124 	 * invocation in a global mailbox, retrieve it now.
125 	 * We do this because the interface doesn't offer us a way to pass
126 	 * a dynamic argument up through the generic interrupt service layer.
127 	 * The mailbox is safe since we either run with interrupts disabled or
128 	 * non-preemptable till we reach here.
129 	 */
130 	port = ec_dev_mbox;
131 	ASSERT(port != 0);
132 	ec_dev_mbox = 0;
133 	ec_clear_evtchn(port);
134 	mutex_enter(&port_user_lock);
135 
136 	if ((ep = port_user[port]) != NULL) {
137 		mutex_enter(&ep->evtchn_lock);
138 		if ((ep->ring_prod - ep->ring_cons) < EVTCHN_RING_SIZE) {
139 			ep->ring[EVTCHN_RING_MASK(ep->ring_prod)] = port;
140 			/*
141 			 * Wake up reader when ring goes non-empty
142 			 */
143 			if (ep->ring_cons == ep->ring_prod++) {
144 				cv_signal(&ep->evtchn_wait);
145 				mutex_exit(&ep->evtchn_lock);
146 				pollwakeup(&ep->evtchn_pollhead,
147 				    POLLIN | POLLRDNORM);
148 				goto done;
149 			}
150 		} else {
151 			ep->ring_overflow = 1;
152 		}
153 		mutex_exit(&ep->evtchn_lock);
154 	}
155 
156 done:
157 	mutex_exit(&port_user_lock);
158 }
159 
160 /* ARGSUSED */
161 static int
162 evtchndrv_read(dev_t dev, struct uio *uio, cred_t *cr)
163 {
164 	int rc = 0;
165 	ssize_t count;
166 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
167 	struct evtsoftdata *ep;
168 	minor_t minor = getminor(dev);
169 
170 	if (secpolicy_xvm_control(cr))
171 		return (EPERM);
172 
173 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
174 
175 	/* Whole number of ports. */
176 	count = uio->uio_resid;
177 	count &= ~(sizeof (evtchn_port_t) - 1);
178 
179 	if (count == 0)
180 		return (0);
181 
182 	if (count > PAGESIZE)
183 		count = PAGESIZE;
184 
185 	mutex_enter(&ep->evtchn_lock);
186 	for (;;) {
187 		if (ep->ring_overflow) {
188 			rc = EFBIG;
189 			goto done;
190 		}
191 
192 		if ((c = ep->ring_cons) != (p = ep->ring_prod))
193 			break;
194 
195 		if (uio->uio_fmode & O_NONBLOCK) {
196 			rc = EAGAIN;
197 			goto done;
198 		}
199 
200 		if (cv_wait_sig(&ep->evtchn_wait, &ep->evtchn_lock) == 0) {
201 			rc = EINTR;
202 			goto done;
203 		}
204 	}
205 
206 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
207 	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
208 		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
209 		    sizeof (evtchn_port_t);
210 		bytes2 = EVTCHN_RING_MASK(p) * sizeof (evtchn_port_t);
211 	} else {
212 		bytes1 = (p - c) * sizeof (evtchn_port_t);
213 		bytes2 = 0;
214 	}
215 
216 	/* Truncate chunks according to caller's maximum byte count. */
217 	if (bytes1 > count) {
218 		bytes1 = count;
219 		bytes2 = 0;
220 	} else if ((bytes1 + bytes2) > count) {
221 		bytes2 = count - bytes1;
222 	}
223 
224 	if (uiomove(&ep->ring[EVTCHN_RING_MASK(c)], bytes1, UIO_READ, uio) ||
225 	    ((bytes2 != 0) && uiomove(&ep->ring[0], bytes2, UIO_READ, uio))) {
226 		rc = EFAULT;
227 		goto done;
228 	}
229 
230 	ep->ring_cons += (bytes1 + bytes2) / sizeof (evtchn_port_t);
231 done:
232 	mutex_exit(&ep->evtchn_lock);
233 	return (rc);
234 }
235 
236 /* ARGSUSED */
237 static int
238 evtchndrv_write(dev_t dev, struct uio *uio, cred_t *cr)
239 {
240 	int  rc, i;
241 	ssize_t count;
242 	evtchn_port_t *kbuf;
243 	struct evtsoftdata *ep;
244 	ulong_t flags;
245 	minor_t minor = getminor(dev);
246 
247 	if (secpolicy_xvm_control(cr))
248 		return (EPERM);
249 
250 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
251 
252 	kbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
253 
254 	/* Whole number of ports. */
255 	count = uio->uio_resid;
256 	count &= ~(sizeof (evtchn_port_t) - 1);
257 
258 	if (count == 0) {
259 		rc = 0;
260 		goto out;
261 	}
262 
263 	if (count > PAGESIZE)
264 		count = PAGESIZE;
265 
266 	if ((rc = uiomove(kbuf, count, UIO_WRITE, uio)) != 0)
267 		goto out;
268 
269 	mutex_enter(&port_user_lock);
270 	for (i = 0; i < (count / sizeof (evtchn_port_t)); i++)
271 		if ((kbuf[i] < NR_EVENT_CHANNELS) &&
272 		    (port_user[kbuf[i]] == ep)) {
273 			flags = intr_clear();
274 			ec_unmask_evtchn(kbuf[i]);
275 			intr_restore(flags);
276 		}
277 	mutex_exit(&port_user_lock);
278 
279 out:
280 	kmem_free(kbuf, PAGESIZE);
281 	return (rc);
282 }
283 
284 static void
285 evtchn_bind_to_user(struct evtsoftdata *u, int port)
286 {
287 	ulong_t flags;
288 
289 	/*
290 	 * save away the PID of the last process to bind to this event channel.
291 	 * Useful for debugging.
292 	 */
293 	u->pid = ddi_get_pid();
294 
295 	mutex_enter(&port_user_lock);
296 	ASSERT(port_user[port] == NULL);
297 	port_user[port] = u;
298 	ec_irq_add_evtchn(ec_dev_irq, port);
299 	flags = intr_clear();
300 	ec_unmask_evtchn(port);
301 	intr_restore(flags);
302 	mutex_exit(&port_user_lock);
303 }
304 
305 static void
306 evtchndrv_close_evtchn(int port)
307 {
308 	struct evtsoftdata *ep;
309 
310 	ASSERT(MUTEX_HELD(&port_user_lock));
311 	ep = port_user[port];
312 	ASSERT(ep != NULL);
313 	(void) ec_mask_evtchn(port);
314 	/*
315 	 * It is possible the event is in transit to us.
316 	 * If it is already in the ring buffer, then a client may
317 	 * get a spurious event notification on the next read of
318 	 * of the evtchn device.  Clients will need to be able to
319 	 * handle getting a spurious event notification.
320 	 */
321 	port_user[port] = NULL;
322 	/*
323 	 * The event is masked and should stay so, clean it up.
324 	 */
325 	ec_irq_rm_evtchn(ec_dev_irq, port);
326 }
327 
328 /* ARGSUSED */
329 static int
330 evtchndrv_ioctl(dev_t dev, int cmd, intptr_t data, int flag, cred_t *cr,
331     int *rvalp)
332 {
333 	int err = 0;
334 	struct evtsoftdata *ep;
335 	minor_t minor = getminor(dev);
336 
337 	if (secpolicy_xvm_control(cr))
338 		return (EPERM);
339 
340 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
341 
342 	*rvalp = 0;
343 
344 	switch (cmd) {
345 	case IOCTL_EVTCHN_BIND_VIRQ: {
346 		struct ioctl_evtchn_bind_virq bind;
347 
348 		if (copyin((void *)data, &bind, sizeof (bind))) {
349 			err = EFAULT;
350 			break;
351 		}
352 
353 		if ((err = xen_bind_virq(bind.virq, 0, rvalp)) != 0)
354 			break;
355 
356 		evtchn_bind_to_user(ep, *rvalp);
357 		break;
358 	}
359 
360 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
361 		struct ioctl_evtchn_bind_interdomain bind;
362 
363 		if (copyin((void *)data, &bind, sizeof (bind))) {
364 			err = EFAULT;
365 			break;
366 		}
367 
368 		if ((err = xen_bind_interdomain(bind.remote_domain,
369 		    bind.remote_port, rvalp)) != 0)
370 			break;
371 
372 		ec_bind_vcpu(*rvalp, 0);
373 		evtchn_bind_to_user(ep, *rvalp);
374 		break;
375 	}
376 
377 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
378 		struct ioctl_evtchn_bind_unbound_port bind;
379 
380 		if (copyin((void *)data, &bind, sizeof (bind))) {
381 			err = EFAULT;
382 			break;
383 		}
384 
385 		if ((err = xen_alloc_unbound_evtchn(bind.remote_domain,
386 		    rvalp)) != 0)
387 			break;
388 
389 		evtchn_bind_to_user(ep, *rvalp);
390 		break;
391 	}
392 
393 	case IOCTL_EVTCHN_UNBIND: {
394 		struct ioctl_evtchn_unbind unbind;
395 
396 		if (copyin((void *)data, &unbind, sizeof (unbind))) {
397 			err = EFAULT;
398 			break;
399 		}
400 
401 		if (unbind.port >= NR_EVENT_CHANNELS) {
402 			err = EFAULT;
403 			break;
404 		}
405 
406 		mutex_enter(&port_user_lock);
407 
408 		if (port_user[unbind.port] != ep) {
409 			mutex_exit(&port_user_lock);
410 			err = ENOTCONN;
411 			break;
412 		}
413 
414 		evtchndrv_close_evtchn(unbind.port);
415 		mutex_exit(&port_user_lock);
416 		break;
417 	}
418 
419 	case IOCTL_EVTCHN_NOTIFY: {
420 		struct ioctl_evtchn_notify notify;
421 
422 		if (copyin((void *)data, &notify, sizeof (notify))) {
423 			err = EFAULT;
424 			break;
425 		}
426 
427 		if (notify.port >= NR_EVENT_CHANNELS) {
428 			err = EINVAL;
429 		} else if (port_user[notify.port] != ep) {
430 			err = ENOTCONN;
431 		} else {
432 			ec_notify_via_evtchn(notify.port);
433 		}
434 		break;
435 	}
436 
437 	default:
438 		err = ENOSYS;
439 	}
440 
441 	return (err);
442 }
443 
444 static int
445 evtchndrv_poll(dev_t dev, short ev, int anyyet, short *revp, pollhead_t **phpp)
446 {
447 	struct evtsoftdata *ep;
448 	minor_t minor = getminor(dev);
449 	short mask = 0;
450 
451 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
452 	*phpp = (struct pollhead *)NULL;
453 
454 	if (ev & POLLOUT)
455 		mask |= POLLOUT;
456 	if (ep->ring_overflow)
457 		mask |= POLLERR;
458 	if (ev & (POLLIN | POLLRDNORM)) {
459 		mutex_enter(&ep->evtchn_lock);
460 		if (ep->ring_cons != ep->ring_prod)
461 			mask |= (POLLIN | POLLRDNORM) & ev;
462 		else
463 			if (mask == 0 && !anyyet)
464 				*phpp = &ep->evtchn_pollhead;
465 		mutex_exit(&ep->evtchn_lock);
466 	}
467 	*revp = mask;
468 	return (0);
469 }
470 
471 
472 /* ARGSUSED */
473 static int
474 evtchndrv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
475 {
476 	struct evtsoftdata *ep;
477 	minor_t minor = getminor(*devp);
478 
479 	if (otyp == OTYP_BLK)
480 		return (ENXIO);
481 
482 	/*
483 	 * only allow open on minor = 0 - the clone device
484 	 */
485 	if (minor != 0)
486 		return (ENXIO);
487 
488 	/*
489 	 * find a free slot and grab it
490 	 */
491 	mutex_enter(&evtchndrv_clone_tab_mutex);
492 	for (minor = 1; minor < evtchndrv_nclones; minor++) {
493 		if (evtchndrv_clone_tab[minor] == 0) {
494 			evtchndrv_clone_tab[minor] = 1;
495 			break;
496 		}
497 	}
498 	mutex_exit(&evtchndrv_clone_tab_mutex);
499 	if (minor == evtchndrv_nclones)
500 		return (EAGAIN);
501 
502 	/* Allocate softstate structure */
503 	if (ddi_soft_state_zalloc(evtchndrv_statep,
504 	    EVTCHNDRV_MINOR2INST(minor)) != DDI_SUCCESS) {
505 		mutex_enter(&evtchndrv_clone_tab_mutex);
506 		evtchndrv_clone_tab[minor] = 0;
507 		mutex_exit(&evtchndrv_clone_tab_mutex);
508 		return (EAGAIN);
509 	}
510 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
511 
512 	/* ... and init it */
513 	ep->dip = evtchndrv_dip;
514 
515 	cv_init(&ep->evtchn_wait, NULL, CV_DEFAULT, NULL);
516 	mutex_init(&ep->evtchn_lock, NULL, MUTEX_DEFAULT, NULL);
517 
518 	ep->ring = kmem_alloc(PAGESIZE, KM_SLEEP);
519 
520 	/* clone driver */
521 	*devp = makedevice(getmajor(*devp), minor);
522 
523 	return (0);
524 }
525 
526 /* ARGSUSED */
527 static int
528 evtchndrv_close(dev_t dev, int flag, int otyp, struct cred *credp)
529 {
530 	struct evtsoftdata *ep;
531 	minor_t minor = getminor(dev);
532 	int i;
533 
534 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
535 	if (ep == NULL)
536 		return (ENXIO);
537 
538 	mutex_enter(&port_user_lock);
539 
540 
541 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
542 		if (port_user[i] != ep)
543 			continue;
544 
545 		evtchndrv_close_evtchn(i);
546 	}
547 
548 	mutex_exit(&port_user_lock);
549 
550 	kmem_free(ep->ring, PAGESIZE);
551 	ddi_soft_state_free(evtchndrv_statep, EVTCHNDRV_MINOR2INST(minor));
552 
553 	/*
554 	 * free clone tab slot
555 	 */
556 	mutex_enter(&evtchndrv_clone_tab_mutex);
557 	evtchndrv_clone_tab[minor] = 0;
558 	mutex_exit(&evtchndrv_clone_tab_mutex);
559 
560 	return (0);
561 }
562 
563 /* ARGSUSED */
564 static int
565 evtchndrv_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
566 {
567 	dev_t	dev = (dev_t)arg;
568 	minor_t	minor = getminor(dev);
569 	int	retval;
570 
571 	switch (cmd) {
572 	case DDI_INFO_DEVT2DEVINFO:
573 		if (minor != 0 || evtchndrv_dip == NULL) {
574 			*result = (void *)NULL;
575 			retval = DDI_FAILURE;
576 		} else {
577 			*result = (void *)evtchndrv_dip;
578 			retval = DDI_SUCCESS;
579 		}
580 		break;
581 	case DDI_INFO_DEVT2INSTANCE:
582 		*result = (void *)0;
583 		retval = DDI_SUCCESS;
584 		break;
585 	default:
586 		retval = DDI_FAILURE;
587 	}
588 	return (retval);
589 }
590 
591 
592 static int
593 evtchndrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
594 {
595 	int	error;
596 	int	unit = ddi_get_instance(dip);
597 
598 
599 	switch (cmd) {
600 	case DDI_ATTACH:
601 		break;
602 	case DDI_RESUME:
603 		return (DDI_SUCCESS);
604 	default:
605 		cmn_err(CE_WARN, "evtchn_attach: unknown cmd 0x%x\n", cmd);
606 		return (DDI_FAILURE);
607 	}
608 
609 	/* DDI_ATTACH */
610 
611 	/*
612 	 * only one instance - but we clone using the open routine
613 	 */
614 	if (ddi_get_instance(dip) > 0)
615 		return (DDI_FAILURE);
616 
617 	mutex_init(&evtchndrv_clone_tab_mutex, NULL, MUTEX_DRIVER,
618 	    NULL);
619 
620 	error = ddi_create_minor_node(dip, "evtchn", S_IFCHR, unit,
621 	    DDI_PSEUDO, NULL);
622 	if (error != DDI_SUCCESS)
623 		goto fail;
624 
625 	/*
626 	 * save dip for getinfo
627 	 */
628 	evtchndrv_dip = dip;
629 	ddi_report_dev(dip);
630 
631 	mutex_init(&port_user_lock, NULL, MUTEX_DRIVER, NULL);
632 	(void) memset(port_user, 0, sizeof (port_user));
633 
634 	ec_dev_irq = ec_dev_alloc_irq();
635 	(void) add_avintr(NULL, IPL_EVTCHN, (avfunc)evtchn_device_upcall,
636 	    "evtchn_driver", ec_dev_irq, NULL, NULL, NULL, dip);
637 
638 	return (DDI_SUCCESS);
639 
640 fail:
641 	(void) evtchndrv_detach(dip, DDI_DETACH);
642 	return (error);
643 }
644 
645 /*ARGSUSED*/
646 static int
647 evtchndrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
648 {
649 	/*
650 	 * Don't allow detach for now.
651 	 */
652 	return (DDI_FAILURE);
653 }
654 
655 /* Solaris driver framework */
656 
657 static 	struct cb_ops evtchndrv_cb_ops = {
658 	evtchndrv_open,		/* cb_open */
659 	evtchndrv_close,	/* cb_close */
660 	nodev,			/* cb_strategy */
661 	nodev,			/* cb_print */
662 	nodev,			/* cb_dump */
663 	evtchndrv_read,		/* cb_read */
664 	evtchndrv_write,	/* cb_write */
665 	evtchndrv_ioctl,	/* cb_ioctl */
666 	nodev,			/* cb_devmap */
667 	nodev,			/* cb_mmap */
668 	nodev,			/* cb_segmap */
669 	evtchndrv_poll,		/* cb_chpoll */
670 	ddi_prop_op,		/* cb_prop_op */
671 	0,			/* cb_stream */
672 	D_NEW | D_MP | D_64BIT	/* cb_flag */
673 };
674 
675 static struct dev_ops evtchndrv_dev_ops = {
676 	DEVO_REV,		/* devo_rev */
677 	0,			/* devo_refcnt */
678 	evtchndrv_info,		/* devo_getinfo */
679 	nulldev,		/* devo_identify */
680 	nulldev,		/* devo_probe */
681 	evtchndrv_attach,	/* devo_attach */
682 	evtchndrv_detach,	/* devo_detach */
683 	nodev,			/* devo_reset */
684 	&evtchndrv_cb_ops,	/* devo_cb_ops */
685 	NULL,			/* devo_bus_ops */
686 	NULL			/* power */
687 };
688 
689 static struct modldrv modldrv = {
690 	&mod_driverops,		/* Type of module.  This one is a driver */
691 	"Evtchn driver v1.0",	/* Name of the module. */
692 	&evtchndrv_dev_ops	/* driver ops */
693 };
694 
695 static struct modlinkage modlinkage = {
696 	MODREV_1,
697 	&modldrv,
698 	NULL
699 };
700 
701 int
702 _init(void)
703 {
704 	int err;
705 
706 	err = ddi_soft_state_init(&evtchndrv_statep,
707 	    sizeof (struct evtsoftdata), 1);
708 	if (err)
709 		return (err);
710 
711 	err = mod_install(&modlinkage);
712 	if (err)
713 		ddi_soft_state_fini(&evtchndrv_statep);
714 	else
715 		evtchndrv_clone_tab = kmem_zalloc(
716 		    sizeof (int) * evtchndrv_nclones, KM_SLEEP);
717 	return (err);
718 }
719 
720 int
721 _fini(void)
722 {
723 	int e;
724 
725 	e = mod_remove(&modlinkage);
726 	if (e)
727 		return (e);
728 
729 	ddi_soft_state_fini(&evtchndrv_statep);
730 
731 	return (0);
732 }
733 
734 int
735 _info(struct modinfo *modinfop)
736 {
737 	return (mod_info(&modlinkage, modinfop));
738 }
739