xref: /titanic_41/usr/src/uts/common/xen/io/evtchn_dev.c (revision ea394cb00fd96864e34d2841b4a22357b621c78f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 /*
29  * evtchn.c
30  *
31  * Driver for receiving and demuxing event-channel signals.
32  *
33  * Copyright (c) 2004-2005, K A Fraser
34  * Multi-process extensions Copyright (c) 2004, Steven Smith
35  *
36  * This file may be distributed separately from the Linux kernel, or
37  * incorporated into other software packages, subject to the following license:
38  *
39  * Permission is hereby granted, free of charge, to any person obtaining a copy
40  * of this source file (the "Software"), to deal in the Software without
41  * restriction, including without limitation the rights to use, copy, modify,
42  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
43  * and to permit persons to whom the Software is furnished to do so, subject to
44  * the following conditions:
45  *
46  * The above copyright notice and this permission notice shall be included in
47  * all copies or substantial portions of the Software.
48  *
49  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
50  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
51  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
52  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
53  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
54  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
55  * IN THE SOFTWARE.
56  */
57 
58 #include <sys/types.h>
59 #include <sys/hypervisor.h>
60 #include <sys/machsystm.h>
61 #include <sys/mutex.h>
62 #include <sys/evtchn_impl.h>
63 #include <sys/ddi_impldefs.h>
64 #include <sys/avintr.h>
65 #include <sys/cpuvar.h>
66 #include <sys/smp_impldefs.h>
67 #include <sys/archsystm.h>
68 #include <sys/sysmacros.h>
69 #include <sys/fcntl.h>
70 #include <sys/open.h>
71 #include <sys/stat.h>
72 #include <sys/psm.h>
73 #include <sys/cpu.h>
74 #include <sys/cmn_err.h>
75 #include <sys/xen_errno.h>
76 #include <sys/policy.h>
77 #include <xen/sys/evtchn.h>
78 
79 /* Some handy macros */
80 #define	EVTCHNDRV_MINOR2INST(minor)	((int)(minor))
81 #define	EVTCHNDRV_DEFAULT_NCLONES 	256
82 #define	EVTCHNDRV_INST2SOFTS(inst)	\
83 	(ddi_get_soft_state(evtchndrv_statep, (inst)))
84 
85 /* Soft state data structure for evtchn driver */
86 struct evtsoftdata {
87 	dev_info_t *dip;
88 	/* Notification ring, accessed via /dev/xen/evtchn. */
89 #define	EVTCHN_RING_SIZE	(PAGESIZE / sizeof (evtchn_port_t))
90 #define	EVTCHN_RING_MASK(_i)	((_i) & (EVTCHN_RING_SIZE - 1))
91 	evtchn_port_t *ring;
92 	unsigned int ring_cons, ring_prod, ring_overflow;
93 
94 	kcondvar_t evtchn_wait; /* Processes wait on this when ring is empty. */
95 	kmutex_t evtchn_lock;
96 	struct pollhead evtchn_pollhead;
97 
98 	pid_t pid;		/* last pid to bind to this event channel. */
99 	processorid_t cpu;	/* cpu thread/evtchn is bound to */
100 };
101 
102 static void *evtchndrv_statep;
103 int evtchndrv_nclones = EVTCHNDRV_DEFAULT_NCLONES;
104 static int *evtchndrv_clone_tab;
105 static dev_info_t *evtchndrv_dip;
106 static kmutex_t evtchndrv_clone_tab_mutex;
107 
108 static int evtchndrv_detach(dev_info_t *, ddi_detach_cmd_t);
109 
110 /* Who's bound to each port? */
111 static struct evtsoftdata *port_user[NR_EVENT_CHANNELS];
112 static kmutex_t port_user_lock;
113 
114 void
115 evtchn_device_upcall()
116 {
117 	struct evtsoftdata *ep;
118 	int port;
119 
120 	/*
121 	 * This is quite gross, we had to leave the evtchn that led to this
122 	 * invocation in a per-cpu mailbox, retrieve it now.
123 	 * We do this because the interface doesn't offer us a way to pass
124 	 * a dynamic argument up through the generic interrupt service layer.
125 	 * The mailbox is safe since we either run with interrupts disabled or
126 	 * non-preemptable till we reach here.
127 	 */
128 	port = CPU->cpu_m.mcpu_ec_mbox;
129 	ASSERT(port != 0);
130 	CPU->cpu_m.mcpu_ec_mbox = 0;
131 	ec_clear_evtchn(port);
132 	mutex_enter(&port_user_lock);
133 
134 	if ((ep = port_user[port]) != NULL) {
135 		mutex_enter(&ep->evtchn_lock);
136 		if ((ep->ring_prod - ep->ring_cons) < EVTCHN_RING_SIZE) {
137 			ep->ring[EVTCHN_RING_MASK(ep->ring_prod)] = port;
138 			/*
139 			 * Wake up reader when ring goes non-empty
140 			 */
141 			if (ep->ring_cons == ep->ring_prod++) {
142 				cv_signal(&ep->evtchn_wait);
143 				mutex_exit(&ep->evtchn_lock);
144 				pollwakeup(&ep->evtchn_pollhead,
145 				    POLLIN | POLLRDNORM);
146 				goto done;
147 			}
148 		} else {
149 			ep->ring_overflow = 1;
150 		}
151 		mutex_exit(&ep->evtchn_lock);
152 	}
153 
154 done:
155 	mutex_exit(&port_user_lock);
156 }
157 
158 /* ARGSUSED */
159 static int
160 evtchndrv_read(dev_t dev, struct uio *uio, cred_t *cr)
161 {
162 	int rc = 0;
163 	ssize_t count;
164 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
165 	struct evtsoftdata *ep;
166 	minor_t minor = getminor(dev);
167 
168 	if (secpolicy_xvm_control(cr))
169 		return (EPERM);
170 
171 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
172 
173 	/* Whole number of ports. */
174 	count = uio->uio_resid;
175 	count &= ~(sizeof (evtchn_port_t) - 1);
176 
177 	if (count == 0)
178 		return (0);
179 
180 	if (count > PAGESIZE)
181 		count = PAGESIZE;
182 
183 	mutex_enter(&ep->evtchn_lock);
184 	for (;;) {
185 		if (ep->ring_overflow) {
186 			rc = EFBIG;
187 			goto done;
188 		}
189 
190 		if ((c = ep->ring_cons) != (p = ep->ring_prod))
191 			break;
192 
193 		if (uio->uio_fmode & O_NONBLOCK) {
194 			rc = EAGAIN;
195 			goto done;
196 		}
197 
198 		if (cv_wait_sig(&ep->evtchn_wait, &ep->evtchn_lock) == 0) {
199 			rc = EINTR;
200 			goto done;
201 		}
202 	}
203 
204 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
205 	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
206 		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
207 		    sizeof (evtchn_port_t);
208 		bytes2 = EVTCHN_RING_MASK(p) * sizeof (evtchn_port_t);
209 	} else {
210 		bytes1 = (p - c) * sizeof (evtchn_port_t);
211 		bytes2 = 0;
212 	}
213 
214 	/* Truncate chunks according to caller's maximum byte count. */
215 	if (bytes1 > count) {
216 		bytes1 = count;
217 		bytes2 = 0;
218 	} else if ((bytes1 + bytes2) > count) {
219 		bytes2 = count - bytes1;
220 	}
221 
222 	if (uiomove(&ep->ring[EVTCHN_RING_MASK(c)], bytes1, UIO_READ, uio) ||
223 	    ((bytes2 != 0) && uiomove(&ep->ring[0], bytes2, UIO_READ, uio))) {
224 		rc = EFAULT;
225 		goto done;
226 	}
227 
228 	ep->ring_cons += (bytes1 + bytes2) / sizeof (evtchn_port_t);
229 done:
230 	mutex_exit(&ep->evtchn_lock);
231 	return (rc);
232 }
233 
234 /* ARGSUSED */
235 static int
236 evtchndrv_write(dev_t dev, struct uio *uio, cred_t *cr)
237 {
238 	int  rc, i;
239 	ssize_t count;
240 	evtchn_port_t *kbuf;
241 	struct evtsoftdata *ep;
242 	ulong_t flags;
243 	minor_t minor = getminor(dev);
244 	evtchn_port_t sbuf[32];
245 
246 	if (secpolicy_xvm_control(cr))
247 		return (EPERM);
248 
249 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
250 
251 
252 	/* Whole number of ports. */
253 	count = uio->uio_resid;
254 	count &= ~(sizeof (evtchn_port_t) - 1);
255 
256 	if (count == 0)
257 		return (0);
258 
259 	if (count > PAGESIZE)
260 		count = PAGESIZE;
261 
262 	if (count <= sizeof (sbuf))
263 		kbuf = sbuf;
264 	else
265 		kbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
266 	if ((rc = uiomove(kbuf, count, UIO_WRITE, uio)) != 0)
267 		goto out;
268 
269 	mutex_enter(&port_user_lock);
270 	for (i = 0; i < (count / sizeof (evtchn_port_t)); i++)
271 		if ((kbuf[i] < NR_EVENT_CHANNELS) &&
272 		    (port_user[kbuf[i]] == ep)) {
273 			flags = intr_clear();
274 			ec_unmask_evtchn(kbuf[i]);
275 			intr_restore(flags);
276 		}
277 	mutex_exit(&port_user_lock);
278 
279 out:
280 	if (kbuf != sbuf)
281 		kmem_free(kbuf, PAGESIZE);
282 	return (rc);
283 }
284 
285 static void
286 evtchn_bind_to_user(struct evtsoftdata *u, int port)
287 {
288 	ulong_t flags;
289 
290 	/*
291 	 * save away the PID of the last process to bind to this event channel.
292 	 * Useful for debugging.
293 	 */
294 	u->pid = ddi_get_pid();
295 
296 	mutex_enter(&port_user_lock);
297 	ASSERT(port_user[port] == NULL);
298 	port_user[port] = u;
299 	ec_irq_add_evtchn(ec_dev_irq, port);
300 	flags = intr_clear();
301 	ec_unmask_evtchn(port);
302 	intr_restore(flags);
303 	mutex_exit(&port_user_lock);
304 }
305 
306 static void
307 evtchndrv_close_evtchn(int port)
308 {
309 	struct evtsoftdata *ep;
310 
311 	ASSERT(MUTEX_HELD(&port_user_lock));
312 	ep = port_user[port];
313 	ASSERT(ep != NULL);
314 	(void) ec_mask_evtchn(port);
315 	/*
316 	 * It is possible the event is in transit to us.
317 	 * If it is already in the ring buffer, then a client may
318 	 * get a spurious event notification on the next read of
319 	 * of the evtchn device.  Clients will need to be able to
320 	 * handle getting a spurious event notification.
321 	 */
322 	port_user[port] = NULL;
323 	/*
324 	 * The event is masked and should stay so, clean it up.
325 	 */
326 	ec_irq_rm_evtchn(ec_dev_irq, port);
327 }
328 
329 /* ARGSUSED */
330 static int
331 evtchndrv_ioctl(dev_t dev, int cmd, intptr_t data, int flag, cred_t *cr,
332     int *rvalp)
333 {
334 	int err = 0;
335 	struct evtsoftdata *ep;
336 	minor_t minor = getminor(dev);
337 
338 	if (secpolicy_xvm_control(cr))
339 		return (EPERM);
340 
341 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
342 
343 	*rvalp = 0;
344 
345 	switch (cmd) {
346 	case IOCTL_EVTCHN_BIND_VIRQ: {
347 		struct ioctl_evtchn_bind_virq bind;
348 
349 		if (copyin((void *)data, &bind, sizeof (bind))) {
350 			err = EFAULT;
351 			break;
352 		}
353 
354 		if ((err = xen_bind_virq(bind.virq, 0, rvalp)) != 0)
355 			break;
356 
357 		evtchn_bind_to_user(ep, *rvalp);
358 		break;
359 	}
360 
361 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
362 		struct ioctl_evtchn_bind_interdomain bind;
363 
364 		if (copyin((void *)data, &bind, sizeof (bind))) {
365 			err = EFAULT;
366 			break;
367 		}
368 
369 		if ((err = xen_bind_interdomain(bind.remote_domain,
370 		    bind.remote_port, rvalp)) != 0)
371 			break;
372 
373 		ec_bind_vcpu(*rvalp, 0);
374 		evtchn_bind_to_user(ep, *rvalp);
375 		break;
376 	}
377 
378 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
379 		struct ioctl_evtchn_bind_unbound_port bind;
380 
381 		if (copyin((void *)data, &bind, sizeof (bind))) {
382 			err = EFAULT;
383 			break;
384 		}
385 
386 		if ((err = xen_alloc_unbound_evtchn(bind.remote_domain,
387 		    rvalp)) != 0)
388 			break;
389 
390 		evtchn_bind_to_user(ep, *rvalp);
391 		break;
392 	}
393 
394 	case IOCTL_EVTCHN_UNBIND: {
395 		struct ioctl_evtchn_unbind unbind;
396 
397 		if (copyin((void *)data, &unbind, sizeof (unbind))) {
398 			err = EFAULT;
399 			break;
400 		}
401 
402 		if (unbind.port >= NR_EVENT_CHANNELS) {
403 			err = EFAULT;
404 			break;
405 		}
406 
407 		mutex_enter(&port_user_lock);
408 
409 		if (port_user[unbind.port] != ep) {
410 			mutex_exit(&port_user_lock);
411 			err = ENOTCONN;
412 			break;
413 		}
414 
415 		evtchndrv_close_evtchn(unbind.port);
416 		mutex_exit(&port_user_lock);
417 		break;
418 	}
419 
420 	case IOCTL_EVTCHN_NOTIFY: {
421 		struct ioctl_evtchn_notify notify;
422 
423 		if (copyin((void *)data, &notify, sizeof (notify))) {
424 			err = EFAULT;
425 			break;
426 		}
427 
428 		if (notify.port >= NR_EVENT_CHANNELS) {
429 			err = EINVAL;
430 		} else if (port_user[notify.port] != ep) {
431 			err = ENOTCONN;
432 		} else {
433 			ec_notify_via_evtchn(notify.port);
434 		}
435 		break;
436 	}
437 
438 	default:
439 		err = ENOSYS;
440 	}
441 
442 	return (err);
443 }
444 
445 static int
446 evtchndrv_poll(dev_t dev, short ev, int anyyet, short *revp, pollhead_t **phpp)
447 {
448 	struct evtsoftdata *ep;
449 	minor_t minor = getminor(dev);
450 	short mask = 0;
451 
452 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
453 	*phpp = (struct pollhead *)NULL;
454 
455 	if (ev & POLLOUT)
456 		mask |= POLLOUT;
457 	if (ep->ring_overflow)
458 		mask |= POLLERR;
459 	if (ev & (POLLIN | POLLRDNORM)) {
460 		mutex_enter(&ep->evtchn_lock);
461 		if (ep->ring_cons != ep->ring_prod)
462 			mask |= (POLLIN | POLLRDNORM) & ev;
463 		else
464 			if (mask == 0 && !anyyet)
465 				*phpp = &ep->evtchn_pollhead;
466 		mutex_exit(&ep->evtchn_lock);
467 	}
468 	*revp = mask;
469 	return (0);
470 }
471 
472 
473 /* ARGSUSED */
474 static int
475 evtchndrv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
476 {
477 	struct evtsoftdata *ep;
478 	minor_t minor = getminor(*devp);
479 
480 	if (otyp == OTYP_BLK)
481 		return (ENXIO);
482 
483 	/*
484 	 * only allow open on minor = 0 - the clone device
485 	 */
486 	if (minor != 0)
487 		return (ENXIO);
488 
489 	/*
490 	 * find a free slot and grab it
491 	 */
492 	mutex_enter(&evtchndrv_clone_tab_mutex);
493 	for (minor = 1; minor < evtchndrv_nclones; minor++) {
494 		if (evtchndrv_clone_tab[minor] == 0) {
495 			evtchndrv_clone_tab[minor] = 1;
496 			break;
497 		}
498 	}
499 	mutex_exit(&evtchndrv_clone_tab_mutex);
500 	if (minor == evtchndrv_nclones)
501 		return (EAGAIN);
502 
503 	/* Allocate softstate structure */
504 	if (ddi_soft_state_zalloc(evtchndrv_statep,
505 	    EVTCHNDRV_MINOR2INST(minor)) != DDI_SUCCESS) {
506 		mutex_enter(&evtchndrv_clone_tab_mutex);
507 		evtchndrv_clone_tab[minor] = 0;
508 		mutex_exit(&evtchndrv_clone_tab_mutex);
509 		return (EAGAIN);
510 	}
511 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
512 
513 	/* ... and init it */
514 	ep->dip = evtchndrv_dip;
515 
516 	cv_init(&ep->evtchn_wait, NULL, CV_DEFAULT, NULL);
517 	mutex_init(&ep->evtchn_lock, NULL, MUTEX_DEFAULT, NULL);
518 
519 	ep->ring = kmem_alloc(PAGESIZE, KM_SLEEP);
520 
521 	/* clone driver */
522 	*devp = makedevice(getmajor(*devp), minor);
523 
524 	return (0);
525 }
526 
527 /* ARGSUSED */
528 static int
529 evtchndrv_close(dev_t dev, int flag, int otyp, struct cred *credp)
530 {
531 	struct evtsoftdata *ep;
532 	minor_t minor = getminor(dev);
533 	int i;
534 
535 	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));
536 	if (ep == NULL)
537 		return (ENXIO);
538 
539 	mutex_enter(&port_user_lock);
540 
541 
542 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
543 		if (port_user[i] != ep)
544 			continue;
545 
546 		evtchndrv_close_evtchn(i);
547 	}
548 
549 	mutex_exit(&port_user_lock);
550 
551 	kmem_free(ep->ring, PAGESIZE);
552 	ddi_soft_state_free(evtchndrv_statep, EVTCHNDRV_MINOR2INST(minor));
553 
554 	/*
555 	 * free clone tab slot
556 	 */
557 	mutex_enter(&evtchndrv_clone_tab_mutex);
558 	evtchndrv_clone_tab[minor] = 0;
559 	mutex_exit(&evtchndrv_clone_tab_mutex);
560 
561 	return (0);
562 }
563 
564 /* ARGSUSED */
565 static int
566 evtchndrv_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
567 {
568 	dev_t	dev = (dev_t)arg;
569 	minor_t	minor = getminor(dev);
570 	int	retval;
571 
572 	switch (cmd) {
573 	case DDI_INFO_DEVT2DEVINFO:
574 		if (minor != 0 || evtchndrv_dip == NULL) {
575 			*result = (void *)NULL;
576 			retval = DDI_FAILURE;
577 		} else {
578 			*result = (void *)evtchndrv_dip;
579 			retval = DDI_SUCCESS;
580 		}
581 		break;
582 	case DDI_INFO_DEVT2INSTANCE:
583 		*result = (void *)0;
584 		retval = DDI_SUCCESS;
585 		break;
586 	default:
587 		retval = DDI_FAILURE;
588 	}
589 	return (retval);
590 }
591 
592 
593 static int
594 evtchndrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
595 {
596 	int	error;
597 	int	unit = ddi_get_instance(dip);
598 
599 
600 	switch (cmd) {
601 	case DDI_ATTACH:
602 		break;
603 	case DDI_RESUME:
604 		return (DDI_SUCCESS);
605 	default:
606 		cmn_err(CE_WARN, "evtchn_attach: unknown cmd 0x%x\n", cmd);
607 		return (DDI_FAILURE);
608 	}
609 
610 	/* DDI_ATTACH */
611 
612 	/*
613 	 * only one instance - but we clone using the open routine
614 	 */
615 	if (ddi_get_instance(dip) > 0)
616 		return (DDI_FAILURE);
617 
618 	mutex_init(&evtchndrv_clone_tab_mutex, NULL, MUTEX_DRIVER,
619 	    NULL);
620 
621 	error = ddi_create_minor_node(dip, "evtchn", S_IFCHR, unit,
622 	    DDI_PSEUDO, NULL);
623 	if (error != DDI_SUCCESS)
624 		goto fail;
625 
626 	/*
627 	 * save dip for getinfo
628 	 */
629 	evtchndrv_dip = dip;
630 	ddi_report_dev(dip);
631 
632 	mutex_init(&port_user_lock, NULL, MUTEX_DRIVER, NULL);
633 	(void) memset(port_user, 0, sizeof (port_user));
634 
635 	ec_dev_irq = ec_dev_alloc_irq();
636 	(void) add_avintr(NULL, IPL_EVTCHN, (avfunc)evtchn_device_upcall,
637 	    "evtchn_driver", ec_dev_irq, NULL, NULL, NULL, dip);
638 
639 	return (DDI_SUCCESS);
640 
641 fail:
642 	(void) evtchndrv_detach(dip, DDI_DETACH);
643 	return (error);
644 }
645 
646 /*ARGSUSED*/
647 static int
648 evtchndrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
649 {
650 	/*
651 	 * Don't allow detach for now.
652 	 */
653 	return (DDI_FAILURE);
654 }
655 
656 /* Solaris driver framework */
657 
658 static 	struct cb_ops evtchndrv_cb_ops = {
659 	evtchndrv_open,		/* cb_open */
660 	evtchndrv_close,	/* cb_close */
661 	nodev,			/* cb_strategy */
662 	nodev,			/* cb_print */
663 	nodev,			/* cb_dump */
664 	evtchndrv_read,		/* cb_read */
665 	evtchndrv_write,	/* cb_write */
666 	evtchndrv_ioctl,	/* cb_ioctl */
667 	nodev,			/* cb_devmap */
668 	nodev,			/* cb_mmap */
669 	nodev,			/* cb_segmap */
670 	evtchndrv_poll,		/* cb_chpoll */
671 	ddi_prop_op,		/* cb_prop_op */
672 	0,			/* cb_stream */
673 	D_NEW | D_MP | D_64BIT	/* cb_flag */
674 };
675 
676 static struct dev_ops evtchndrv_dev_ops = {
677 	DEVO_REV,		/* devo_rev */
678 	0,			/* devo_refcnt */
679 	evtchndrv_info,		/* devo_getinfo */
680 	nulldev,		/* devo_identify */
681 	nulldev,		/* devo_probe */
682 	evtchndrv_attach,	/* devo_attach */
683 	evtchndrv_detach,	/* devo_detach */
684 	nodev,			/* devo_reset */
685 	&evtchndrv_cb_ops,	/* devo_cb_ops */
686 	NULL,			/* devo_bus_ops */
687 	NULL,			/* power */
688 	ddi_quiesce_not_needed,		/* devo_quiesce */
689 };
690 
691 static struct modldrv modldrv = {
692 	&mod_driverops,		/* Type of module.  This one is a driver */
693 	"Evtchn driver",	/* Name of the module. */
694 	&evtchndrv_dev_ops	/* driver ops */
695 };
696 
697 static struct modlinkage modlinkage = {
698 	MODREV_1,
699 	&modldrv,
700 	NULL
701 };
702 
703 int
704 _init(void)
705 {
706 	int err;
707 
708 	err = ddi_soft_state_init(&evtchndrv_statep,
709 	    sizeof (struct evtsoftdata), 1);
710 	if (err)
711 		return (err);
712 
713 	err = mod_install(&modlinkage);
714 	if (err)
715 		ddi_soft_state_fini(&evtchndrv_statep);
716 	else
717 		evtchndrv_clone_tab = kmem_zalloc(
718 		    sizeof (int) * evtchndrv_nclones, KM_SLEEP);
719 	return (err);
720 }
721 
722 int
723 _fini(void)
724 {
725 	int e;
726 
727 	e = mod_remove(&modlinkage);
728 	if (e)
729 		return (e);
730 
731 	ddi_soft_state_fini(&evtchndrv_statep);
732 
733 	return (0);
734 }
735 
736 int
737 _info(struct modinfo *modinfop)
738 {
739 	return (mod_info(&modlinkage, modinfop));
740 }
741