1 /****************************************************************************** 2 * evtchn.c 3 * 4 * Driver for receiving and demuxing event-channel signals. 5 * 6 * Copyright (c) 2004-2005, K A Fraser 7 * Multi-process extensions Copyright (c) 2004, Steven Smith 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/sched.h> 39 #include <linux/slab.h> 40 #include <linux/string.h> 41 #include <linux/errno.h> 42 #include <linux/fs.h> 43 #include <linux/miscdevice.h> 44 #include <linux/major.h> 45 #include <linux/proc_fs.h> 46 #include <linux/stat.h> 47 #include <linux/poll.h> 48 #include <linux/irq.h> 49 #include <linux/init.h> 50 #include <linux/mutex.h> 51 #include <linux/cpu.h> 52 #include <linux/mm.h> 53 #include <linux/vmalloc.h> 54 55 #include <xen/xen.h> 56 #include <xen/events.h> 57 #include <xen/evtchn.h> 58 #include <asm/xen/hypervisor.h> 59 60 struct per_user_data { 61 struct mutex bind_mutex; /* serialize bind/unbind operations */ 62 struct rb_root evtchns; 63 unsigned int nr_evtchns; 64 65 /* Notification ring, accessed via /dev/xen/evtchn. */ 66 unsigned int ring_size; 67 evtchn_port_t *ring; 68 unsigned int ring_cons, ring_prod, ring_overflow; 69 struct mutex ring_cons_mutex; /* protect against concurrent readers */ 70 spinlock_t ring_prod_lock; /* product against concurrent interrupts */ 71 72 /* Processes wait on this queue when ring is empty. */ 73 wait_queue_head_t evtchn_wait; 74 struct fasync_struct *evtchn_async_queue; 75 const char *name; 76 }; 77 78 struct user_evtchn { 79 struct rb_node node; 80 struct per_user_data *user; 81 unsigned port; 82 bool enabled; 83 }; 84 85 static evtchn_port_t *evtchn_alloc_ring(unsigned int size) 86 { 87 evtchn_port_t *ring; 88 size_t s = size * sizeof(*ring); 89 90 ring = kmalloc(s, GFP_KERNEL); 91 if (!ring) 92 ring = vmalloc(s); 93 94 return ring; 95 } 96 97 static void evtchn_free_ring(evtchn_port_t *ring) 98 { 99 kvfree(ring); 100 } 101 102 static unsigned int evtchn_ring_offset(struct per_user_data *u, 103 unsigned int idx) 104 { 105 return idx & (u->ring_size - 1); 106 } 107 108 static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u, 109 unsigned int idx) 110 { 111 return u->ring + evtchn_ring_offset(u, idx); 112 } 113 114 static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 115 { 116 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 117 118 u->nr_evtchns++; 119 120 while (*new) { 121 struct user_evtchn *this; 122 123 this = container_of(*new, struct user_evtchn, node); 124 125 parent = *new; 126 if (this->port < evtchn->port) 127 new = &((*new)->rb_left); 128 else if (this->port > evtchn->port) 129 new = &((*new)->rb_right); 130 else 131 return -EEXIST; 132 } 133 134 /* Add new node and rebalance tree. */ 135 rb_link_node(&evtchn->node, parent, new); 136 rb_insert_color(&evtchn->node, &u->evtchns); 137 138 return 0; 139 } 140 141 static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 142 { 143 u->nr_evtchns--; 144 rb_erase(&evtchn->node, &u->evtchns); 145 kfree(evtchn); 146 } 147 148 static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port) 149 { 150 struct rb_node *node = u->evtchns.rb_node; 151 152 while (node) { 153 struct user_evtchn *evtchn; 154 155 evtchn = container_of(node, struct user_evtchn, node); 156 157 if (evtchn->port < port) 158 node = node->rb_left; 159 else if (evtchn->port > port) 160 node = node->rb_right; 161 else 162 return evtchn; 163 } 164 return NULL; 165 } 166 167 static irqreturn_t evtchn_interrupt(int irq, void *data) 168 { 169 struct user_evtchn *evtchn = data; 170 struct per_user_data *u = evtchn->user; 171 172 WARN(!evtchn->enabled, 173 "Interrupt for port %d, but apparently not enabled; per-user %p\n", 174 evtchn->port, u); 175 176 disable_irq_nosync(irq); 177 evtchn->enabled = false; 178 179 spin_lock(&u->ring_prod_lock); 180 181 if ((u->ring_prod - u->ring_cons) < u->ring_size) { 182 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port; 183 wmb(); /* Ensure ring contents visible */ 184 if (u->ring_cons == u->ring_prod++) { 185 wake_up_interruptible(&u->evtchn_wait); 186 kill_fasync(&u->evtchn_async_queue, 187 SIGIO, POLL_IN); 188 } 189 } else 190 u->ring_overflow = 1; 191 192 spin_unlock(&u->ring_prod_lock); 193 194 return IRQ_HANDLED; 195 } 196 197 static ssize_t evtchn_read(struct file *file, char __user *buf, 198 size_t count, loff_t *ppos) 199 { 200 int rc; 201 unsigned int c, p, bytes1 = 0, bytes2 = 0; 202 struct per_user_data *u = file->private_data; 203 204 /* Whole number of ports. */ 205 count &= ~(sizeof(evtchn_port_t)-1); 206 207 if (count == 0) 208 return 0; 209 210 if (count > PAGE_SIZE) 211 count = PAGE_SIZE; 212 213 for (;;) { 214 mutex_lock(&u->ring_cons_mutex); 215 216 rc = -EFBIG; 217 if (u->ring_overflow) 218 goto unlock_out; 219 220 c = u->ring_cons; 221 p = u->ring_prod; 222 if (c != p) 223 break; 224 225 mutex_unlock(&u->ring_cons_mutex); 226 227 if (file->f_flags & O_NONBLOCK) 228 return -EAGAIN; 229 230 rc = wait_event_interruptible(u->evtchn_wait, 231 u->ring_cons != u->ring_prod); 232 if (rc) 233 return rc; 234 } 235 236 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 237 if (((c ^ p) & u->ring_size) != 0) { 238 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * 239 sizeof(evtchn_port_t); 240 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t); 241 } else { 242 bytes1 = (p - c) * sizeof(evtchn_port_t); 243 bytes2 = 0; 244 } 245 246 /* Truncate chunks according to caller's maximum byte count. */ 247 if (bytes1 > count) { 248 bytes1 = count; 249 bytes2 = 0; 250 } else if ((bytes1 + bytes2) > count) { 251 bytes2 = count - bytes1; 252 } 253 254 rc = -EFAULT; 255 rmb(); /* Ensure that we see the port before we copy it. */ 256 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || 257 ((bytes2 != 0) && 258 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 259 goto unlock_out; 260 261 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t); 262 rc = bytes1 + bytes2; 263 264 unlock_out: 265 mutex_unlock(&u->ring_cons_mutex); 266 return rc; 267 } 268 269 static ssize_t evtchn_write(struct file *file, const char __user *buf, 270 size_t count, loff_t *ppos) 271 { 272 int rc, i; 273 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 274 struct per_user_data *u = file->private_data; 275 276 if (kbuf == NULL) 277 return -ENOMEM; 278 279 /* Whole number of ports. */ 280 count &= ~(sizeof(evtchn_port_t)-1); 281 282 rc = 0; 283 if (count == 0) 284 goto out; 285 286 if (count > PAGE_SIZE) 287 count = PAGE_SIZE; 288 289 rc = -EFAULT; 290 if (copy_from_user(kbuf, buf, count) != 0) 291 goto out; 292 293 mutex_lock(&u->bind_mutex); 294 295 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 296 unsigned port = kbuf[i]; 297 struct user_evtchn *evtchn; 298 299 evtchn = find_evtchn(u, port); 300 if (evtchn && !evtchn->enabled) { 301 evtchn->enabled = true; 302 enable_irq(irq_from_evtchn(port)); 303 } 304 } 305 306 mutex_unlock(&u->bind_mutex); 307 308 rc = count; 309 310 out: 311 free_page((unsigned long)kbuf); 312 return rc; 313 } 314 315 static int evtchn_resize_ring(struct per_user_data *u) 316 { 317 unsigned int new_size; 318 evtchn_port_t *new_ring, *old_ring; 319 unsigned int p, c; 320 321 /* 322 * Ensure the ring is large enough to capture all possible 323 * events. i.e., one free slot for each bound event. 324 */ 325 if (u->nr_evtchns <= u->ring_size) 326 return 0; 327 328 if (u->ring_size == 0) 329 new_size = 64; 330 else 331 new_size = 2 * u->ring_size; 332 333 new_ring = evtchn_alloc_ring(new_size); 334 if (!new_ring) 335 return -ENOMEM; 336 337 old_ring = u->ring; 338 339 /* 340 * Access to the ring contents is serialized by either the 341 * prod /or/ cons lock so take both when resizing. 342 */ 343 mutex_lock(&u->ring_cons_mutex); 344 spin_lock_irq(&u->ring_prod_lock); 345 346 /* 347 * Copy the old ring contents to the new ring. 348 * 349 * If the ring contents crosses the end of the current ring, 350 * it needs to be copied in two chunks. 351 * 352 * +---------+ +------------------+ 353 * |34567 12| -> | 1234567 | 354 * +-----p-c-+ +------------------+ 355 */ 356 p = evtchn_ring_offset(u, u->ring_prod); 357 c = evtchn_ring_offset(u, u->ring_cons); 358 if (p < c) { 359 memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); 360 memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); 361 } else 362 memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); 363 364 u->ring = new_ring; 365 u->ring_size = new_size; 366 367 spin_unlock_irq(&u->ring_prod_lock); 368 mutex_unlock(&u->ring_cons_mutex); 369 370 evtchn_free_ring(old_ring); 371 372 return 0; 373 } 374 375 static int evtchn_bind_to_user(struct per_user_data *u, int port) 376 { 377 struct user_evtchn *evtchn; 378 struct evtchn_close close; 379 int rc = 0; 380 381 /* 382 * Ports are never reused, so every caller should pass in a 383 * unique port. 384 * 385 * (Locking not necessary because we haven't registered the 386 * interrupt handler yet, and our caller has already 387 * serialized bind operations.) 388 */ 389 390 evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL); 391 if (!evtchn) 392 return -ENOMEM; 393 394 evtchn->user = u; 395 evtchn->port = port; 396 evtchn->enabled = true; /* start enabled */ 397 398 rc = add_evtchn(u, evtchn); 399 if (rc < 0) 400 goto err; 401 402 rc = evtchn_resize_ring(u); 403 if (rc < 0) 404 goto err; 405 406 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, 407 u->name, evtchn); 408 if (rc < 0) 409 goto err; 410 411 rc = evtchn_make_refcounted(port); 412 return rc; 413 414 err: 415 /* bind failed, should close the port now */ 416 close.port = port; 417 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 418 BUG(); 419 del_evtchn(u, evtchn); 420 return rc; 421 } 422 423 static void evtchn_unbind_from_user(struct per_user_data *u, 424 struct user_evtchn *evtchn) 425 { 426 int irq = irq_from_evtchn(evtchn->port); 427 428 BUG_ON(irq < 0); 429 430 unbind_from_irqhandler(irq, evtchn); 431 432 del_evtchn(u, evtchn); 433 } 434 435 static long evtchn_ioctl(struct file *file, 436 unsigned int cmd, unsigned long arg) 437 { 438 int rc; 439 struct per_user_data *u = file->private_data; 440 void __user *uarg = (void __user *) arg; 441 442 /* Prevent bind from racing with unbind */ 443 mutex_lock(&u->bind_mutex); 444 445 switch (cmd) { 446 case IOCTL_EVTCHN_BIND_VIRQ: { 447 struct ioctl_evtchn_bind_virq bind; 448 struct evtchn_bind_virq bind_virq; 449 450 rc = -EFAULT; 451 if (copy_from_user(&bind, uarg, sizeof(bind))) 452 break; 453 454 bind_virq.virq = bind.virq; 455 bind_virq.vcpu = 0; 456 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 457 &bind_virq); 458 if (rc != 0) 459 break; 460 461 rc = evtchn_bind_to_user(u, bind_virq.port); 462 if (rc == 0) 463 rc = bind_virq.port; 464 break; 465 } 466 467 case IOCTL_EVTCHN_BIND_INTERDOMAIN: { 468 struct ioctl_evtchn_bind_interdomain bind; 469 struct evtchn_bind_interdomain bind_interdomain; 470 471 rc = -EFAULT; 472 if (copy_from_user(&bind, uarg, sizeof(bind))) 473 break; 474 475 bind_interdomain.remote_dom = bind.remote_domain; 476 bind_interdomain.remote_port = bind.remote_port; 477 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 478 &bind_interdomain); 479 if (rc != 0) 480 break; 481 482 rc = evtchn_bind_to_user(u, bind_interdomain.local_port); 483 if (rc == 0) 484 rc = bind_interdomain.local_port; 485 break; 486 } 487 488 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { 489 struct ioctl_evtchn_bind_unbound_port bind; 490 struct evtchn_alloc_unbound alloc_unbound; 491 492 rc = -EFAULT; 493 if (copy_from_user(&bind, uarg, sizeof(bind))) 494 break; 495 496 alloc_unbound.dom = DOMID_SELF; 497 alloc_unbound.remote_dom = bind.remote_domain; 498 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 499 &alloc_unbound); 500 if (rc != 0) 501 break; 502 503 rc = evtchn_bind_to_user(u, alloc_unbound.port); 504 if (rc == 0) 505 rc = alloc_unbound.port; 506 break; 507 } 508 509 case IOCTL_EVTCHN_UNBIND: { 510 struct ioctl_evtchn_unbind unbind; 511 struct user_evtchn *evtchn; 512 513 rc = -EFAULT; 514 if (copy_from_user(&unbind, uarg, sizeof(unbind))) 515 break; 516 517 rc = -EINVAL; 518 if (unbind.port >= xen_evtchn_nr_channels()) 519 break; 520 521 rc = -ENOTCONN; 522 evtchn = find_evtchn(u, unbind.port); 523 if (!evtchn) 524 break; 525 526 disable_irq(irq_from_evtchn(unbind.port)); 527 evtchn_unbind_from_user(u, evtchn); 528 rc = 0; 529 break; 530 } 531 532 case IOCTL_EVTCHN_NOTIFY: { 533 struct ioctl_evtchn_notify notify; 534 struct user_evtchn *evtchn; 535 536 rc = -EFAULT; 537 if (copy_from_user(¬ify, uarg, sizeof(notify))) 538 break; 539 540 rc = -ENOTCONN; 541 evtchn = find_evtchn(u, notify.port); 542 if (evtchn) { 543 notify_remote_via_evtchn(notify.port); 544 rc = 0; 545 } 546 break; 547 } 548 549 case IOCTL_EVTCHN_RESET: { 550 /* Initialise the ring to empty. Clear errors. */ 551 mutex_lock(&u->ring_cons_mutex); 552 spin_lock_irq(&u->ring_prod_lock); 553 u->ring_cons = u->ring_prod = u->ring_overflow = 0; 554 spin_unlock_irq(&u->ring_prod_lock); 555 mutex_unlock(&u->ring_cons_mutex); 556 rc = 0; 557 break; 558 } 559 560 default: 561 rc = -ENOSYS; 562 break; 563 } 564 mutex_unlock(&u->bind_mutex); 565 566 return rc; 567 } 568 569 static unsigned int evtchn_poll(struct file *file, poll_table *wait) 570 { 571 unsigned int mask = POLLOUT | POLLWRNORM; 572 struct per_user_data *u = file->private_data; 573 574 poll_wait(file, &u->evtchn_wait, wait); 575 if (u->ring_cons != u->ring_prod) 576 mask |= POLLIN | POLLRDNORM; 577 if (u->ring_overflow) 578 mask = POLLERR; 579 return mask; 580 } 581 582 static int evtchn_fasync(int fd, struct file *filp, int on) 583 { 584 struct per_user_data *u = filp->private_data; 585 return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 586 } 587 588 static int evtchn_open(struct inode *inode, struct file *filp) 589 { 590 struct per_user_data *u; 591 592 u = kzalloc(sizeof(*u), GFP_KERNEL); 593 if (u == NULL) 594 return -ENOMEM; 595 596 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); 597 if (u->name == NULL) { 598 kfree(u); 599 return -ENOMEM; 600 } 601 602 init_waitqueue_head(&u->evtchn_wait); 603 604 mutex_init(&u->bind_mutex); 605 mutex_init(&u->ring_cons_mutex); 606 spin_lock_init(&u->ring_prod_lock); 607 608 filp->private_data = u; 609 610 return nonseekable_open(inode, filp); 611 } 612 613 static int evtchn_release(struct inode *inode, struct file *filp) 614 { 615 struct per_user_data *u = filp->private_data; 616 struct rb_node *node; 617 618 while ((node = u->evtchns.rb_node)) { 619 struct user_evtchn *evtchn; 620 621 evtchn = rb_entry(node, struct user_evtchn, node); 622 disable_irq(irq_from_evtchn(evtchn->port)); 623 evtchn_unbind_from_user(u, evtchn); 624 } 625 626 evtchn_free_ring(u->ring); 627 kfree(u->name); 628 kfree(u); 629 630 return 0; 631 } 632 633 static const struct file_operations evtchn_fops = { 634 .owner = THIS_MODULE, 635 .read = evtchn_read, 636 .write = evtchn_write, 637 .unlocked_ioctl = evtchn_ioctl, 638 .poll = evtchn_poll, 639 .fasync = evtchn_fasync, 640 .open = evtchn_open, 641 .release = evtchn_release, 642 .llseek = no_llseek, 643 }; 644 645 static struct miscdevice evtchn_miscdev = { 646 .minor = MISC_DYNAMIC_MINOR, 647 .name = "xen/evtchn", 648 .fops = &evtchn_fops, 649 }; 650 static int __init evtchn_init(void) 651 { 652 int err; 653 654 if (!xen_domain()) 655 return -ENODEV; 656 657 /* Create '/dev/xen/evtchn'. */ 658 err = misc_register(&evtchn_miscdev); 659 if (err != 0) { 660 pr_err("Could not register /dev/xen/evtchn\n"); 661 return err; 662 } 663 664 pr_info("Event-channel device installed\n"); 665 666 return 0; 667 } 668 669 static void __exit evtchn_cleanup(void) 670 { 671 misc_deregister(&evtchn_miscdev); 672 } 673 674 module_init(evtchn_init); 675 module_exit(evtchn_cleanup); 676 677 MODULE_LICENSE("GPL"); 678