1 /****************************************************************************** 2 * gntdev.c 3 * 4 * Device for accessing (in user-space) pages that have been granted by other 5 * domains. 6 * 7 * Copyright (c) 2006-2007, D G Murray. 8 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #undef DEBUG 21 22 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/init.h> 27 #include <linux/miscdevice.h> 28 #include <linux/fs.h> 29 #include <linux/mm.h> 30 #include <linux/mman.h> 31 #include <linux/mmu_notifier.h> 32 #include <linux/types.h> 33 #include <linux/uaccess.h> 34 #include <linux/sched.h> 35 #include <linux/sched/mm.h> 36 #include <linux/spinlock.h> 37 #include <linux/slab.h> 38 #include <linux/highmem.h> 39 #include <linux/refcount.h> 40 41 #include <xen/xen.h> 42 #include <xen/grant_table.h> 43 #include <xen/balloon.h> 44 #include <xen/gntdev.h> 45 #include <xen/events.h> 46 #include <xen/page.h> 47 #include <asm/xen/hypervisor.h> 48 #include <asm/xen/hypercall.h> 49 50 MODULE_LICENSE("GPL"); 51 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, " 52 "Gerd Hoffmann <kraxel@redhat.com>"); 53 MODULE_DESCRIPTION("User-space granted page access driver"); 54 55 static int limit = 1024*1024; 56 module_param(limit, int, 0644); 57 MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " 58 "the gntdev device"); 59 60 static atomic_t pages_mapped = ATOMIC_INIT(0); 61 62 static int use_ptemod; 63 #define populate_freeable_maps use_ptemod 64 65 struct gntdev_priv { 66 /* maps with visible offsets in the file descriptor */ 67 struct list_head maps; 68 /* maps that are not visible; will be freed on munmap. 69 * Only populated if populate_freeable_maps == 1 */ 70 struct list_head freeable_maps; 71 /* lock protects maps and freeable_maps */ 72 struct mutex lock; 73 struct mm_struct *mm; 74 struct mmu_notifier mn; 75 }; 76 77 struct unmap_notify { 78 int flags; 79 /* Address relative to the start of the grant_map */ 80 int addr; 81 int event; 82 }; 83 84 struct grant_map { 85 struct list_head next; 86 struct vm_area_struct *vma; 87 int index; 88 int count; 89 int flags; 90 refcount_t users; 91 struct unmap_notify notify; 92 struct ioctl_gntdev_grant_ref *grants; 93 struct gnttab_map_grant_ref *map_ops; 94 struct gnttab_unmap_grant_ref *unmap_ops; 95 struct gnttab_map_grant_ref *kmap_ops; 96 struct gnttab_unmap_grant_ref *kunmap_ops; 97 struct page **pages; 98 unsigned long pages_vm_start; 99 }; 100 101 static int unmap_grant_pages(struct grant_map *map, int offset, int pages); 102 103 /* ------------------------------------------------------------------ */ 104 105 static void gntdev_print_maps(struct gntdev_priv *priv, 106 char *text, int text_index) 107 { 108 #ifdef DEBUG 109 struct grant_map *map; 110 111 pr_debug("%s: maps list (priv %p)\n", __func__, priv); 112 list_for_each_entry(map, &priv->maps, next) 113 pr_debug(" index %2d, count %2d %s\n", 114 map->index, map->count, 115 map->index == text_index && text ? text : ""); 116 #endif 117 } 118 119 static void gntdev_free_map(struct grant_map *map) 120 { 121 if (map == NULL) 122 return; 123 124 if (map->pages) 125 gnttab_free_pages(map->count, map->pages); 126 kfree(map->pages); 127 kfree(map->grants); 128 kfree(map->map_ops); 129 kfree(map->unmap_ops); 130 kfree(map->kmap_ops); 131 kfree(map->kunmap_ops); 132 kfree(map); 133 } 134 135 static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) 136 { 137 struct grant_map *add; 138 int i; 139 140 add = kzalloc(sizeof(struct grant_map), GFP_KERNEL); 141 if (NULL == add) 142 return NULL; 143 144 add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL); 145 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); 146 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); 147 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); 148 add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL); 149 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); 150 if (NULL == add->grants || 151 NULL == add->map_ops || 152 NULL == add->unmap_ops || 153 NULL == add->kmap_ops || 154 NULL == add->kunmap_ops || 155 NULL == add->pages) 156 goto err; 157 158 if (gnttab_alloc_pages(count, add->pages)) 159 goto err; 160 161 for (i = 0; i < count; i++) { 162 add->map_ops[i].handle = -1; 163 add->unmap_ops[i].handle = -1; 164 add->kmap_ops[i].handle = -1; 165 add->kunmap_ops[i].handle = -1; 166 } 167 168 add->index = 0; 169 add->count = count; 170 refcount_set(&add->users, 1); 171 172 return add; 173 174 err: 175 gntdev_free_map(add); 176 return NULL; 177 } 178 179 static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add) 180 { 181 struct grant_map *map; 182 183 list_for_each_entry(map, &priv->maps, next) { 184 if (add->index + add->count < map->index) { 185 list_add_tail(&add->next, &map->next); 186 goto done; 187 } 188 add->index = map->index + map->count; 189 } 190 list_add_tail(&add->next, &priv->maps); 191 192 done: 193 gntdev_print_maps(priv, "[new]", add->index); 194 } 195 196 static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, 197 int index, int count) 198 { 199 struct grant_map *map; 200 201 list_for_each_entry(map, &priv->maps, next) { 202 if (map->index != index) 203 continue; 204 if (count && map->count != count) 205 continue; 206 return map; 207 } 208 return NULL; 209 } 210 211 static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) 212 { 213 if (!map) 214 return; 215 216 if (!refcount_dec_and_test(&map->users)) 217 return; 218 219 atomic_sub(map->count, &pages_mapped); 220 221 if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { 222 notify_remote_via_evtchn(map->notify.event); 223 evtchn_put(map->notify.event); 224 } 225 226 if (populate_freeable_maps && priv) { 227 mutex_lock(&priv->lock); 228 list_del(&map->next); 229 mutex_unlock(&priv->lock); 230 } 231 232 if (map->pages && !use_ptemod) 233 unmap_grant_pages(map, 0, map->count); 234 gntdev_free_map(map); 235 } 236 237 /* ------------------------------------------------------------------ */ 238 239 static int find_grant_ptes(pte_t *pte, pgtable_t token, 240 unsigned long addr, void *data) 241 { 242 struct grant_map *map = data; 243 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; 244 int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; 245 u64 pte_maddr; 246 247 BUG_ON(pgnr >= map->count); 248 pte_maddr = arbitrary_virt_to_machine(pte).maddr; 249 250 /* 251 * Set the PTE as special to force get_user_pages_fast() fall 252 * back to the slow path. If this is not supported as part of 253 * the grant map, it will be done afterwards. 254 */ 255 if (xen_feature(XENFEAT_gnttab_map_avail_bits)) 256 flags |= (1 << _GNTMAP_guest_avail0); 257 258 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, 259 map->grants[pgnr].ref, 260 map->grants[pgnr].domid); 261 gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, 262 -1 /* handle */); 263 return 0; 264 } 265 266 #ifdef CONFIG_X86 267 static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token, 268 unsigned long addr, void *data) 269 { 270 set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte)); 271 return 0; 272 } 273 #endif 274 275 static int map_grant_pages(struct grant_map *map) 276 { 277 int i, err = 0; 278 279 if (!use_ptemod) { 280 /* Note: it could already be mapped */ 281 if (map->map_ops[0].handle != -1) 282 return 0; 283 for (i = 0; i < map->count; i++) { 284 unsigned long addr = (unsigned long) 285 pfn_to_kaddr(page_to_pfn(map->pages[i])); 286 gnttab_set_map_op(&map->map_ops[i], addr, map->flags, 287 map->grants[i].ref, 288 map->grants[i].domid); 289 gnttab_set_unmap_op(&map->unmap_ops[i], addr, 290 map->flags, -1 /* handle */); 291 } 292 } else { 293 /* 294 * Setup the map_ops corresponding to the pte entries pointing 295 * to the kernel linear addresses of the struct pages. 296 * These ptes are completely different from the user ptes dealt 297 * with find_grant_ptes. 298 */ 299 for (i = 0; i < map->count; i++) { 300 unsigned long address = (unsigned long) 301 pfn_to_kaddr(page_to_pfn(map->pages[i])); 302 BUG_ON(PageHighMem(map->pages[i])); 303 304 gnttab_set_map_op(&map->kmap_ops[i], address, 305 map->flags | GNTMAP_host_map, 306 map->grants[i].ref, 307 map->grants[i].domid); 308 gnttab_set_unmap_op(&map->kunmap_ops[i], address, 309 map->flags | GNTMAP_host_map, -1); 310 } 311 } 312 313 pr_debug("map %d+%d\n", map->index, map->count); 314 err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, 315 map->pages, map->count); 316 if (err) 317 return err; 318 319 for (i = 0; i < map->count; i++) { 320 if (map->map_ops[i].status) { 321 err = -EINVAL; 322 continue; 323 } 324 325 map->unmap_ops[i].handle = map->map_ops[i].handle; 326 if (use_ptemod) 327 map->kunmap_ops[i].handle = map->kmap_ops[i].handle; 328 } 329 return err; 330 } 331 332 static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) 333 { 334 int i, err = 0; 335 struct gntab_unmap_queue_data unmap_data; 336 337 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 338 int pgno = (map->notify.addr >> PAGE_SHIFT); 339 if (pgno >= offset && pgno < offset + pages) { 340 /* No need for kmap, pages are in lowmem */ 341 uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); 342 tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; 343 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 344 } 345 } 346 347 unmap_data.unmap_ops = map->unmap_ops + offset; 348 unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; 349 unmap_data.pages = map->pages + offset; 350 unmap_data.count = pages; 351 352 err = gnttab_unmap_refs_sync(&unmap_data); 353 if (err) 354 return err; 355 356 for (i = 0; i < pages; i++) { 357 if (map->unmap_ops[offset+i].status) 358 err = -EINVAL; 359 pr_debug("unmap handle=%d st=%d\n", 360 map->unmap_ops[offset+i].handle, 361 map->unmap_ops[offset+i].status); 362 map->unmap_ops[offset+i].handle = -1; 363 } 364 return err; 365 } 366 367 static int unmap_grant_pages(struct grant_map *map, int offset, int pages) 368 { 369 int range, err = 0; 370 371 pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); 372 373 /* It is possible the requested range will have a "hole" where we 374 * already unmapped some of the grants. Only unmap valid ranges. 375 */ 376 while (pages && !err) { 377 while (pages && map->unmap_ops[offset].handle == -1) { 378 offset++; 379 pages--; 380 } 381 range = 0; 382 while (range < pages) { 383 if (map->unmap_ops[offset+range].handle == -1) { 384 range--; 385 break; 386 } 387 range++; 388 } 389 err = __unmap_grant_pages(map, offset, range); 390 offset += range; 391 pages -= range; 392 } 393 394 return err; 395 } 396 397 /* ------------------------------------------------------------------ */ 398 399 static void gntdev_vma_open(struct vm_area_struct *vma) 400 { 401 struct grant_map *map = vma->vm_private_data; 402 403 pr_debug("gntdev_vma_open %p\n", vma); 404 refcount_inc(&map->users); 405 } 406 407 static void gntdev_vma_close(struct vm_area_struct *vma) 408 { 409 struct grant_map *map = vma->vm_private_data; 410 struct file *file = vma->vm_file; 411 struct gntdev_priv *priv = file->private_data; 412 413 pr_debug("gntdev_vma_close %p\n", vma); 414 if (use_ptemod) { 415 /* It is possible that an mmu notifier could be running 416 * concurrently, so take priv->lock to ensure that the vma won't 417 * vanishing during the unmap_grant_pages call, since we will 418 * spin here until that completes. Such a concurrent call will 419 * not do any unmapping, since that has been done prior to 420 * closing the vma, but it may still iterate the unmap_ops list. 421 */ 422 mutex_lock(&priv->lock); 423 map->vma = NULL; 424 mutex_unlock(&priv->lock); 425 } 426 vma->vm_private_data = NULL; 427 gntdev_put_map(priv, map); 428 } 429 430 static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, 431 unsigned long addr) 432 { 433 struct grant_map *map = vma->vm_private_data; 434 435 return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT]; 436 } 437 438 static const struct vm_operations_struct gntdev_vmops = { 439 .open = gntdev_vma_open, 440 .close = gntdev_vma_close, 441 .find_special_page = gntdev_vma_find_special_page, 442 }; 443 444 /* ------------------------------------------------------------------ */ 445 446 static void unmap_if_in_range(struct grant_map *map, 447 unsigned long start, unsigned long end) 448 { 449 unsigned long mstart, mend; 450 int err; 451 452 if (!map->vma) 453 return; 454 if (map->vma->vm_start >= end) 455 return; 456 if (map->vma->vm_end <= start) 457 return; 458 mstart = max(start, map->vma->vm_start); 459 mend = min(end, map->vma->vm_end); 460 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 461 map->index, map->count, 462 map->vma->vm_start, map->vma->vm_end, 463 start, end, mstart, mend); 464 err = unmap_grant_pages(map, 465 (mstart - map->vma->vm_start) >> PAGE_SHIFT, 466 (mend - mstart) >> PAGE_SHIFT); 467 WARN_ON(err); 468 } 469 470 static void mn_invl_range_start(struct mmu_notifier *mn, 471 struct mm_struct *mm, 472 unsigned long start, unsigned long end) 473 { 474 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); 475 struct grant_map *map; 476 477 mutex_lock(&priv->lock); 478 list_for_each_entry(map, &priv->maps, next) { 479 unmap_if_in_range(map, start, end); 480 } 481 list_for_each_entry(map, &priv->freeable_maps, next) { 482 unmap_if_in_range(map, start, end); 483 } 484 mutex_unlock(&priv->lock); 485 } 486 487 static void mn_invl_page(struct mmu_notifier *mn, 488 struct mm_struct *mm, 489 unsigned long address) 490 { 491 mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); 492 } 493 494 static void mn_release(struct mmu_notifier *mn, 495 struct mm_struct *mm) 496 { 497 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); 498 struct grant_map *map; 499 int err; 500 501 mutex_lock(&priv->lock); 502 list_for_each_entry(map, &priv->maps, next) { 503 if (!map->vma) 504 continue; 505 pr_debug("map %d+%d (%lx %lx)\n", 506 map->index, map->count, 507 map->vma->vm_start, map->vma->vm_end); 508 err = unmap_grant_pages(map, /* offset */ 0, map->count); 509 WARN_ON(err); 510 } 511 list_for_each_entry(map, &priv->freeable_maps, next) { 512 if (!map->vma) 513 continue; 514 pr_debug("map %d+%d (%lx %lx)\n", 515 map->index, map->count, 516 map->vma->vm_start, map->vma->vm_end); 517 err = unmap_grant_pages(map, /* offset */ 0, map->count); 518 WARN_ON(err); 519 } 520 mutex_unlock(&priv->lock); 521 } 522 523 static const struct mmu_notifier_ops gntdev_mmu_ops = { 524 .release = mn_release, 525 .invalidate_page = mn_invl_page, 526 .invalidate_range_start = mn_invl_range_start, 527 }; 528 529 /* ------------------------------------------------------------------ */ 530 531 static int gntdev_open(struct inode *inode, struct file *flip) 532 { 533 struct gntdev_priv *priv; 534 int ret = 0; 535 536 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 537 if (!priv) 538 return -ENOMEM; 539 540 INIT_LIST_HEAD(&priv->maps); 541 INIT_LIST_HEAD(&priv->freeable_maps); 542 mutex_init(&priv->lock); 543 544 if (use_ptemod) { 545 priv->mm = get_task_mm(current); 546 if (!priv->mm) { 547 kfree(priv); 548 return -ENOMEM; 549 } 550 priv->mn.ops = &gntdev_mmu_ops; 551 ret = mmu_notifier_register(&priv->mn, priv->mm); 552 mmput(priv->mm); 553 } 554 555 if (ret) { 556 kfree(priv); 557 return ret; 558 } 559 560 flip->private_data = priv; 561 pr_debug("priv %p\n", priv); 562 563 return 0; 564 } 565 566 static int gntdev_release(struct inode *inode, struct file *flip) 567 { 568 struct gntdev_priv *priv = flip->private_data; 569 struct grant_map *map; 570 571 pr_debug("priv %p\n", priv); 572 573 mutex_lock(&priv->lock); 574 while (!list_empty(&priv->maps)) { 575 map = list_entry(priv->maps.next, struct grant_map, next); 576 list_del(&map->next); 577 gntdev_put_map(NULL /* already removed */, map); 578 } 579 WARN_ON(!list_empty(&priv->freeable_maps)); 580 mutex_unlock(&priv->lock); 581 582 if (use_ptemod) 583 mmu_notifier_unregister(&priv->mn, priv->mm); 584 kfree(priv); 585 return 0; 586 } 587 588 static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, 589 struct ioctl_gntdev_map_grant_ref __user *u) 590 { 591 struct ioctl_gntdev_map_grant_ref op; 592 struct grant_map *map; 593 int err; 594 595 if (copy_from_user(&op, u, sizeof(op)) != 0) 596 return -EFAULT; 597 pr_debug("priv %p, add %d\n", priv, op.count); 598 if (unlikely(op.count <= 0)) 599 return -EINVAL; 600 601 err = -ENOMEM; 602 map = gntdev_alloc_map(priv, op.count); 603 if (!map) 604 return err; 605 606 if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { 607 pr_debug("can't map: over limit\n"); 608 gntdev_put_map(NULL, map); 609 return err; 610 } 611 612 if (copy_from_user(map->grants, &u->refs, 613 sizeof(map->grants[0]) * op.count) != 0) { 614 gntdev_put_map(NULL, map); 615 return -EFAULT; 616 } 617 618 mutex_lock(&priv->lock); 619 gntdev_add_map(priv, map); 620 op.index = map->index << PAGE_SHIFT; 621 mutex_unlock(&priv->lock); 622 623 if (copy_to_user(u, &op, sizeof(op)) != 0) 624 return -EFAULT; 625 626 return 0; 627 } 628 629 static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, 630 struct ioctl_gntdev_unmap_grant_ref __user *u) 631 { 632 struct ioctl_gntdev_unmap_grant_ref op; 633 struct grant_map *map; 634 int err = -ENOENT; 635 636 if (copy_from_user(&op, u, sizeof(op)) != 0) 637 return -EFAULT; 638 pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); 639 640 mutex_lock(&priv->lock); 641 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); 642 if (map) { 643 list_del(&map->next); 644 if (populate_freeable_maps) 645 list_add_tail(&map->next, &priv->freeable_maps); 646 err = 0; 647 } 648 mutex_unlock(&priv->lock); 649 if (map) 650 gntdev_put_map(priv, map); 651 return err; 652 } 653 654 static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, 655 struct ioctl_gntdev_get_offset_for_vaddr __user *u) 656 { 657 struct ioctl_gntdev_get_offset_for_vaddr op; 658 struct vm_area_struct *vma; 659 struct grant_map *map; 660 int rv = -EINVAL; 661 662 if (copy_from_user(&op, u, sizeof(op)) != 0) 663 return -EFAULT; 664 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); 665 666 down_read(¤t->mm->mmap_sem); 667 vma = find_vma(current->mm, op.vaddr); 668 if (!vma || vma->vm_ops != &gntdev_vmops) 669 goto out_unlock; 670 671 map = vma->vm_private_data; 672 if (!map) 673 goto out_unlock; 674 675 op.offset = map->index << PAGE_SHIFT; 676 op.count = map->count; 677 rv = 0; 678 679 out_unlock: 680 up_read(¤t->mm->mmap_sem); 681 682 if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0) 683 return -EFAULT; 684 return rv; 685 } 686 687 static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) 688 { 689 struct ioctl_gntdev_unmap_notify op; 690 struct grant_map *map; 691 int rc; 692 int out_flags; 693 unsigned int out_event; 694 695 if (copy_from_user(&op, u, sizeof(op))) 696 return -EFAULT; 697 698 if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) 699 return -EINVAL; 700 701 /* We need to grab a reference to the event channel we are going to use 702 * to send the notify before releasing the reference we may already have 703 * (if someone has called this ioctl twice). This is required so that 704 * it is possible to change the clear_byte part of the notification 705 * without disturbing the event channel part, which may now be the last 706 * reference to that event channel. 707 */ 708 if (op.action & UNMAP_NOTIFY_SEND_EVENT) { 709 if (evtchn_get(op.event_channel_port)) 710 return -EINVAL; 711 } 712 713 out_flags = op.action; 714 out_event = op.event_channel_port; 715 716 mutex_lock(&priv->lock); 717 718 list_for_each_entry(map, &priv->maps, next) { 719 uint64_t begin = map->index << PAGE_SHIFT; 720 uint64_t end = (map->index + map->count) << PAGE_SHIFT; 721 if (op.index >= begin && op.index < end) 722 goto found; 723 } 724 rc = -ENOENT; 725 goto unlock_out; 726 727 found: 728 if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) && 729 (map->flags & GNTMAP_readonly)) { 730 rc = -EINVAL; 731 goto unlock_out; 732 } 733 734 out_flags = map->notify.flags; 735 out_event = map->notify.event; 736 737 map->notify.flags = op.action; 738 map->notify.addr = op.index - (map->index << PAGE_SHIFT); 739 map->notify.event = op.event_channel_port; 740 741 rc = 0; 742 743 unlock_out: 744 mutex_unlock(&priv->lock); 745 746 /* Drop the reference to the event channel we did not save in the map */ 747 if (out_flags & UNMAP_NOTIFY_SEND_EVENT) 748 evtchn_put(out_event); 749 750 return rc; 751 } 752 753 #define GNTDEV_COPY_BATCH 16 754 755 struct gntdev_copy_batch { 756 struct gnttab_copy ops[GNTDEV_COPY_BATCH]; 757 struct page *pages[GNTDEV_COPY_BATCH]; 758 s16 __user *status[GNTDEV_COPY_BATCH]; 759 unsigned int nr_ops; 760 unsigned int nr_pages; 761 }; 762 763 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, 764 bool writeable, unsigned long *gfn) 765 { 766 unsigned long addr = (unsigned long)virt; 767 struct page *page; 768 unsigned long xen_pfn; 769 int ret; 770 771 ret = get_user_pages_fast(addr, 1, writeable, &page); 772 if (ret < 0) 773 return ret; 774 775 batch->pages[batch->nr_pages++] = page; 776 777 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK); 778 *gfn = pfn_to_gfn(xen_pfn); 779 780 return 0; 781 } 782 783 static void gntdev_put_pages(struct gntdev_copy_batch *batch) 784 { 785 unsigned int i; 786 787 for (i = 0; i < batch->nr_pages; i++) 788 put_page(batch->pages[i]); 789 batch->nr_pages = 0; 790 } 791 792 static int gntdev_copy(struct gntdev_copy_batch *batch) 793 { 794 unsigned int i; 795 796 gnttab_batch_copy(batch->ops, batch->nr_ops); 797 gntdev_put_pages(batch); 798 799 /* 800 * For each completed op, update the status if the op failed 801 * and all previous ops for the segment were successful. 802 */ 803 for (i = 0; i < batch->nr_ops; i++) { 804 s16 status = batch->ops[i].status; 805 s16 old_status; 806 807 if (status == GNTST_okay) 808 continue; 809 810 if (__get_user(old_status, batch->status[i])) 811 return -EFAULT; 812 813 if (old_status != GNTST_okay) 814 continue; 815 816 if (__put_user(status, batch->status[i])) 817 return -EFAULT; 818 } 819 820 batch->nr_ops = 0; 821 return 0; 822 } 823 824 static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch, 825 struct gntdev_grant_copy_segment *seg, 826 s16 __user *status) 827 { 828 uint16_t copied = 0; 829 830 /* 831 * Disallow local -> local copies since there is only space in 832 * batch->pages for one page per-op and this would be a very 833 * expensive memcpy(). 834 */ 835 if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref))) 836 return -EINVAL; 837 838 /* Can't cross page if source/dest is a grant ref. */ 839 if (seg->flags & GNTCOPY_source_gref) { 840 if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE) 841 return -EINVAL; 842 } 843 if (seg->flags & GNTCOPY_dest_gref) { 844 if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE) 845 return -EINVAL; 846 } 847 848 if (put_user(GNTST_okay, status)) 849 return -EFAULT; 850 851 while (copied < seg->len) { 852 struct gnttab_copy *op; 853 void __user *virt; 854 size_t len, off; 855 unsigned long gfn; 856 int ret; 857 858 if (batch->nr_ops >= GNTDEV_COPY_BATCH) { 859 ret = gntdev_copy(batch); 860 if (ret < 0) 861 return ret; 862 } 863 864 len = seg->len - copied; 865 866 op = &batch->ops[batch->nr_ops]; 867 op->flags = 0; 868 869 if (seg->flags & GNTCOPY_source_gref) { 870 op->source.u.ref = seg->source.foreign.ref; 871 op->source.domid = seg->source.foreign.domid; 872 op->source.offset = seg->source.foreign.offset + copied; 873 op->flags |= GNTCOPY_source_gref; 874 } else { 875 virt = seg->source.virt + copied; 876 off = (unsigned long)virt & ~XEN_PAGE_MASK; 877 len = min(len, (size_t)XEN_PAGE_SIZE - off); 878 879 ret = gntdev_get_page(batch, virt, false, &gfn); 880 if (ret < 0) 881 return ret; 882 883 op->source.u.gmfn = gfn; 884 op->source.domid = DOMID_SELF; 885 op->source.offset = off; 886 } 887 888 if (seg->flags & GNTCOPY_dest_gref) { 889 op->dest.u.ref = seg->dest.foreign.ref; 890 op->dest.domid = seg->dest.foreign.domid; 891 op->dest.offset = seg->dest.foreign.offset + copied; 892 op->flags |= GNTCOPY_dest_gref; 893 } else { 894 virt = seg->dest.virt + copied; 895 off = (unsigned long)virt & ~XEN_PAGE_MASK; 896 len = min(len, (size_t)XEN_PAGE_SIZE - off); 897 898 ret = gntdev_get_page(batch, virt, true, &gfn); 899 if (ret < 0) 900 return ret; 901 902 op->dest.u.gmfn = gfn; 903 op->dest.domid = DOMID_SELF; 904 op->dest.offset = off; 905 } 906 907 op->len = len; 908 copied += len; 909 910 batch->status[batch->nr_ops] = status; 911 batch->nr_ops++; 912 } 913 914 return 0; 915 } 916 917 static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u) 918 { 919 struct ioctl_gntdev_grant_copy copy; 920 struct gntdev_copy_batch batch; 921 unsigned int i; 922 int ret = 0; 923 924 if (copy_from_user(©, u, sizeof(copy))) 925 return -EFAULT; 926 927 batch.nr_ops = 0; 928 batch.nr_pages = 0; 929 930 for (i = 0; i < copy.count; i++) { 931 struct gntdev_grant_copy_segment seg; 932 933 if (copy_from_user(&seg, ©.segments[i], sizeof(seg))) { 934 ret = -EFAULT; 935 goto out; 936 } 937 938 ret = gntdev_grant_copy_seg(&batch, &seg, ©.segments[i].status); 939 if (ret < 0) 940 goto out; 941 942 cond_resched(); 943 } 944 if (batch.nr_ops) 945 ret = gntdev_copy(&batch); 946 return ret; 947 948 out: 949 gntdev_put_pages(&batch); 950 return ret; 951 } 952 953 static long gntdev_ioctl(struct file *flip, 954 unsigned int cmd, unsigned long arg) 955 { 956 struct gntdev_priv *priv = flip->private_data; 957 void __user *ptr = (void __user *)arg; 958 959 switch (cmd) { 960 case IOCTL_GNTDEV_MAP_GRANT_REF: 961 return gntdev_ioctl_map_grant_ref(priv, ptr); 962 963 case IOCTL_GNTDEV_UNMAP_GRANT_REF: 964 return gntdev_ioctl_unmap_grant_ref(priv, ptr); 965 966 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: 967 return gntdev_ioctl_get_offset_for_vaddr(priv, ptr); 968 969 case IOCTL_GNTDEV_SET_UNMAP_NOTIFY: 970 return gntdev_ioctl_notify(priv, ptr); 971 972 case IOCTL_GNTDEV_GRANT_COPY: 973 return gntdev_ioctl_grant_copy(priv, ptr); 974 975 default: 976 pr_debug("priv %p, unknown cmd %x\n", priv, cmd); 977 return -ENOIOCTLCMD; 978 } 979 980 return 0; 981 } 982 983 static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) 984 { 985 struct gntdev_priv *priv = flip->private_data; 986 int index = vma->vm_pgoff; 987 int count = vma_pages(vma); 988 struct grant_map *map; 989 int i, err = -EINVAL; 990 991 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) 992 return -EINVAL; 993 994 pr_debug("map %d+%d at %lx (pgoff %lx)\n", 995 index, count, vma->vm_start, vma->vm_pgoff); 996 997 mutex_lock(&priv->lock); 998 map = gntdev_find_map_index(priv, index, count); 999 if (!map) 1000 goto unlock_out; 1001 if (use_ptemod && map->vma) 1002 goto unlock_out; 1003 if (use_ptemod && priv->mm != vma->vm_mm) { 1004 pr_warn("Huh? Other mm?\n"); 1005 goto unlock_out; 1006 } 1007 1008 refcount_inc(&map->users); 1009 1010 vma->vm_ops = &gntdev_vmops; 1011 1012 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP; 1013 1014 if (use_ptemod) 1015 vma->vm_flags |= VM_DONTCOPY; 1016 1017 vma->vm_private_data = map; 1018 1019 if (use_ptemod) 1020 map->vma = vma; 1021 1022 if (map->flags) { 1023 if ((vma->vm_flags & VM_WRITE) && 1024 (map->flags & GNTMAP_readonly)) 1025 goto out_unlock_put; 1026 } else { 1027 map->flags = GNTMAP_host_map; 1028 if (!(vma->vm_flags & VM_WRITE)) 1029 map->flags |= GNTMAP_readonly; 1030 } 1031 1032 mutex_unlock(&priv->lock); 1033 1034 if (use_ptemod) { 1035 err = apply_to_page_range(vma->vm_mm, vma->vm_start, 1036 vma->vm_end - vma->vm_start, 1037 find_grant_ptes, map); 1038 if (err) { 1039 pr_warn("find_grant_ptes() failure.\n"); 1040 goto out_put_map; 1041 } 1042 } 1043 1044 err = map_grant_pages(map); 1045 if (err) 1046 goto out_put_map; 1047 1048 if (!use_ptemod) { 1049 for (i = 0; i < count; i++) { 1050 err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, 1051 map->pages[i]); 1052 if (err) 1053 goto out_put_map; 1054 } 1055 } else { 1056 #ifdef CONFIG_X86 1057 /* 1058 * If the PTEs were not made special by the grant map 1059 * hypercall, do so here. 1060 * 1061 * This is racy since the mapping is already visible 1062 * to userspace but userspace should be well-behaved 1063 * enough to not touch it until the mmap() call 1064 * returns. 1065 */ 1066 if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) { 1067 apply_to_page_range(vma->vm_mm, vma->vm_start, 1068 vma->vm_end - vma->vm_start, 1069 set_grant_ptes_as_special, NULL); 1070 } 1071 #endif 1072 map->pages_vm_start = vma->vm_start; 1073 } 1074 1075 return 0; 1076 1077 unlock_out: 1078 mutex_unlock(&priv->lock); 1079 return err; 1080 1081 out_unlock_put: 1082 mutex_unlock(&priv->lock); 1083 out_put_map: 1084 if (use_ptemod) 1085 map->vma = NULL; 1086 gntdev_put_map(priv, map); 1087 return err; 1088 } 1089 1090 static const struct file_operations gntdev_fops = { 1091 .owner = THIS_MODULE, 1092 .open = gntdev_open, 1093 .release = gntdev_release, 1094 .mmap = gntdev_mmap, 1095 .unlocked_ioctl = gntdev_ioctl 1096 }; 1097 1098 static struct miscdevice gntdev_miscdev = { 1099 .minor = MISC_DYNAMIC_MINOR, 1100 .name = "xen/gntdev", 1101 .fops = &gntdev_fops, 1102 }; 1103 1104 /* ------------------------------------------------------------------ */ 1105 1106 static int __init gntdev_init(void) 1107 { 1108 int err; 1109 1110 if (!xen_domain()) 1111 return -ENODEV; 1112 1113 use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap); 1114 1115 err = misc_register(&gntdev_miscdev); 1116 if (err != 0) { 1117 pr_err("Could not register gntdev device\n"); 1118 return err; 1119 } 1120 return 0; 1121 } 1122 1123 static void __exit gntdev_exit(void) 1124 { 1125 misc_deregister(&gntdev_miscdev); 1126 } 1127 1128 module_init(gntdev_init); 1129 module_exit(gntdev_exit); 1130 1131 /* ------------------------------------------------------------------ */ 1132