1 /*- 2 * Copyright (c) 2016 Akshay Jaggi <jaggi@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * gntdev.c 27 * 28 * Interface to /dev/xen/gntdev. 29 * 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/uio.h> 35 #include <sys/bus.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/rwlock.h> 41 #include <sys/selinfo.h> 42 #include <sys/poll.h> 43 #include <sys/conf.h> 44 #include <sys/fcntl.h> 45 #include <sys/ioccom.h> 46 #include <sys/rman.h> 47 #include <sys/tree.h> 48 #include <sys/module.h> 49 #include <sys/proc.h> 50 #include <sys/bitset.h> 51 #include <sys/queue.h> 52 #include <sys/mman.h> 53 #include <sys/syslog.h> 54 #include <sys/taskqueue.h> 55 56 #include <vm/vm.h> 57 #include <vm/vm_param.h> 58 #include <vm/vm_extern.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_object.h> 63 #include <vm/vm_pager.h> 64 65 #include <machine/md_var.h> 66 67 #include <xen/xen-os.h> 68 #include <xen/hypervisor.h> 69 #include <xen/error.h> 70 #include <xen/xen_intr.h> 71 #include <xen/gnttab.h> 72 #include <xen/gntdev.h> 73 74 MALLOC_DEFINE(M_GNTDEV, "gntdev", "Xen grant-table user-space device"); 75 76 #define MAX_OFFSET_COUNT ((0xffffffffffffffffull >> PAGE_SHIFT) + 1) 77 78 static d_open_t gntdev_open; 79 static d_ioctl_t gntdev_ioctl; 80 static d_mmap_single_t gntdev_mmap_single; 81 82 static struct cdevsw gntdev_devsw = { 83 .d_version = D_VERSION, 84 .d_open = gntdev_open, 85 .d_ioctl = gntdev_ioctl, 86 .d_mmap_single = gntdev_mmap_single, 87 .d_name = "gntdev", 88 }; 89 90 static device_t gntdev_dev = NULL; 91 92 struct gntdev_gref; 93 struct gntdev_gmap; 94 STAILQ_HEAD(gref_list_head, gntdev_gref); 95 STAILQ_HEAD(gmap_list_head, gntdev_gmap); 96 RB_HEAD(gref_tree_head, gntdev_gref); 97 RB_HEAD(gmap_tree_head, gntdev_gmap); 98 99 struct file_offset_struct { 100 RB_ENTRY(file_offset_struct) next; 101 uint64_t file_offset; 102 uint64_t count; 103 }; 104 105 static int 106 offset_cmp(struct file_offset_struct *f1, struct file_offset_struct *f2) 107 { 108 return (f1->file_offset - f2->file_offset); 109 } 110 111 RB_HEAD(file_offset_head, file_offset_struct); 112 RB_GENERATE_STATIC(file_offset_head, file_offset_struct, next, offset_cmp); 113 114 struct per_user_data { 115 struct mtx user_data_lock; 116 struct gref_tree_head gref_tree; 117 struct gmap_tree_head gmap_tree; 118 struct file_offset_head file_offset; 119 }; 120 121 /* 122 * Get offset into the file which will be used while mmapping the 123 * appropriate pages by the userspace program. 124 */ 125 static int 126 get_file_offset(struct per_user_data *priv_user, uint32_t count, 127 uint64_t *file_offset) 128 { 129 struct file_offset_struct *offset, *offset_tmp; 130 131 if (count == 0) 132 return (EINVAL); 133 mtx_lock(&priv_user->user_data_lock); 134 RB_FOREACH_SAFE(offset, file_offset_head, &priv_user->file_offset, 135 offset_tmp) { 136 if (offset->count >= count) { 137 offset->count -= count; 138 *file_offset = offset->file_offset + offset->count * 139 PAGE_SIZE; 140 if (offset->count == 0) { 141 RB_REMOVE(file_offset_head, 142 &priv_user->file_offset, offset); 143 free(offset, M_GNTDEV); 144 } 145 mtx_unlock(&priv_user->user_data_lock); 146 return (0); 147 } 148 } 149 mtx_unlock(&priv_user->user_data_lock); 150 151 return (ENOSPC); 152 } 153 154 static void 155 put_file_offset(struct per_user_data *priv_user, uint32_t count, 156 uint64_t file_offset) 157 { 158 struct file_offset_struct *offset, *offset_nxt, *offset_prv; 159 160 offset = malloc(sizeof(*offset), M_GNTDEV, M_WAITOK | M_ZERO); 161 offset->file_offset = file_offset; 162 offset->count = count; 163 164 mtx_lock(&priv_user->user_data_lock); 165 RB_INSERT(file_offset_head, &priv_user->file_offset, offset); 166 offset_nxt = RB_NEXT(file_offset_head, &priv_user->file_offset, offset); 167 offset_prv = RB_PREV(file_offset_head, &priv_user->file_offset, offset); 168 if (offset_nxt != NULL && 169 offset_nxt->file_offset == offset->file_offset + offset->count * 170 PAGE_SIZE) { 171 offset->count += offset_nxt->count; 172 RB_REMOVE(file_offset_head, &priv_user->file_offset, 173 offset_nxt); 174 free(offset_nxt, M_GNTDEV); 175 } 176 if (offset_prv != NULL && 177 offset->file_offset == offset_prv->file_offset + offset_prv->count * 178 PAGE_SIZE) { 179 offset_prv->count += offset->count; 180 RB_REMOVE(file_offset_head, &priv_user->file_offset, offset); 181 free(offset, M_GNTDEV); 182 } 183 mtx_unlock(&priv_user->user_data_lock); 184 } 185 186 static int gntdev_gmap_pg_ctor(void *handle, vm_ooffset_t size, 187 vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color); 188 static void gntdev_gmap_pg_dtor(void *handle); 189 static int gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t offset, 190 int prot, vm_page_t *mres); 191 192 static struct cdev_pager_ops gntdev_gmap_pg_ops = { 193 .cdev_pg_fault = gntdev_gmap_pg_fault, 194 .cdev_pg_ctor = gntdev_gmap_pg_ctor, 195 .cdev_pg_dtor = gntdev_gmap_pg_dtor, 196 }; 197 198 struct cleanup_data_struct { 199 struct mtx to_kill_grefs_mtx; 200 struct mtx to_kill_gmaps_mtx; 201 struct gref_list_head to_kill_grefs; 202 struct gmap_list_head to_kill_gmaps; 203 }; 204 205 static struct cleanup_data_struct cleanup_data = { 206 .to_kill_grefs = STAILQ_HEAD_INITIALIZER(cleanup_data.to_kill_grefs), 207 .to_kill_gmaps = STAILQ_HEAD_INITIALIZER(cleanup_data.to_kill_gmaps), 208 }; 209 MTX_SYSINIT(to_kill_grefs_mtx, &cleanup_data.to_kill_grefs_mtx, 210 "gntdev to_kill_grefs mutex", MTX_DEF); 211 MTX_SYSINIT(to_kill_gmaps_mtx, &cleanup_data.to_kill_gmaps_mtx, 212 "gntdev to_kill_gmaps mutex", MTX_DEF); 213 214 static void cleanup_function(void *arg, __unused int pending); 215 static struct task cleanup_task = TASK_INITIALIZER(0, cleanup_function, 216 &cleanup_data); 217 218 struct notify_data { 219 uint64_t index; 220 uint32_t action; 221 uint32_t event_channel_port; 222 xen_intr_handle_t notify_evtchn_handle; 223 }; 224 225 static void notify(struct notify_data *notify, vm_page_t page); 226 227 /*-------------------- Grant Allocation Methods -----------------------------*/ 228 229 struct gntdev_gref { 230 union gref_next_union { 231 STAILQ_ENTRY(gntdev_gref) list; 232 RB_ENTRY(gntdev_gref) tree; 233 } gref_next; 234 uint64_t file_index; 235 grant_ref_t gref_id; 236 vm_page_t page; 237 struct notify_data *notify; 238 }; 239 240 static int 241 gref_cmp(struct gntdev_gref *g1, struct gntdev_gref *g2) 242 { 243 return (g1->file_index - g2->file_index); 244 } 245 246 RB_GENERATE_STATIC(gref_tree_head, gntdev_gref, gref_next.tree, gref_cmp); 247 248 /* 249 * Traverse over the device-list of to-be-deleted grants allocated, and 250 * if all accesses, both local mmaps and foreign maps, to them have ended, 251 * destroy them. 252 */ 253 static void 254 gref_list_dtor(struct cleanup_data_struct *cleanup_data) 255 { 256 struct gref_list_head tmp_grefs; 257 struct gntdev_gref *gref, *gref_tmp, *gref_previous; 258 259 STAILQ_INIT(&tmp_grefs); 260 mtx_lock(&cleanup_data->to_kill_grefs_mtx); 261 STAILQ_SWAP(&cleanup_data->to_kill_grefs, &tmp_grefs, gntdev_gref); 262 mtx_unlock(&cleanup_data->to_kill_grefs_mtx); 263 264 gref_previous = NULL; 265 STAILQ_FOREACH_SAFE(gref, &tmp_grefs, gref_next.list, gref_tmp) { 266 if (gref->page && gref->page->object == NULL) { 267 if (gref->notify) { 268 notify(gref->notify, gref->page); 269 } 270 if (gref->gref_id != GRANT_REF_INVALID) { 271 if (gnttab_query_foreign_access(gref->gref_id)) 272 continue; 273 if (gnttab_end_foreign_access_ref(gref->gref_id) 274 == 0) 275 continue; 276 gnttab_free_grant_reference(gref->gref_id); 277 } 278 vm_page_unwire_noq(gref->page); 279 vm_page_free(gref->page); 280 gref->page = NULL; 281 } 282 if (gref->page == NULL) { 283 if (gref_previous == NULL) 284 STAILQ_REMOVE_HEAD(&tmp_grefs, gref_next.list); 285 else 286 STAILQ_REMOVE_AFTER(&tmp_grefs, gref_previous, 287 gref_next.list); 288 if (gref->notify) 289 free(gref->notify, M_GNTDEV); 290 free(gref, M_GNTDEV); 291 } 292 else 293 gref_previous = gref; 294 } 295 296 if (!STAILQ_EMPTY(&tmp_grefs)) { 297 mtx_lock(&cleanup_data->to_kill_grefs_mtx); 298 STAILQ_CONCAT(&cleanup_data->to_kill_grefs, &tmp_grefs); 299 mtx_unlock(&cleanup_data->to_kill_grefs_mtx); 300 } 301 } 302 303 /* 304 * Find count number of contiguous allocated grants for a given userspace 305 * program by file-offset (index). 306 */ 307 static struct gntdev_gref* 308 gntdev_find_grefs(struct per_user_data *priv_user, 309 uint64_t index, uint32_t count) 310 { 311 struct gntdev_gref find_gref, *gref, *gref_start = NULL; 312 313 find_gref.file_index = index; 314 315 mtx_lock(&priv_user->user_data_lock); 316 gref_start = RB_FIND(gref_tree_head, &priv_user->gref_tree, &find_gref); 317 for (gref = gref_start; gref != NULL && count > 0; gref = 318 RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) { 319 if (index != gref->file_index) 320 break; 321 index += PAGE_SIZE; 322 count--; 323 } 324 mtx_unlock(&priv_user->user_data_lock); 325 326 if (count) 327 return (NULL); 328 return (gref_start); 329 } 330 331 /* 332 * IOCTL_GNTDEV_ALLOC_GREF 333 * Allocate required number of wired pages for the request, grant foreign 334 * access to the physical frames for these pages, and add details about 335 * this allocation to the per user private data, so that these pages can 336 * be mmapped by the userspace program. 337 */ 338 static int 339 gntdev_alloc_gref(struct ioctl_gntdev_alloc_gref *arg) 340 { 341 uint32_t i; 342 int error, readonly; 343 uint64_t file_offset; 344 struct gntdev_gref *grefs; 345 struct per_user_data *priv_user; 346 347 readonly = !(arg->flags & GNTDEV_ALLOC_FLAG_WRITABLE); 348 349 error = devfs_get_cdevpriv((void**) &priv_user); 350 if (error != 0) 351 return (EINVAL); 352 353 /* Cleanup grefs and free pages. */ 354 taskqueue_enqueue(taskqueue_thread, &cleanup_task); 355 356 /* Get file offset for this request. */ 357 error = get_file_offset(priv_user, arg->count, &file_offset); 358 if (error != 0) 359 return (error); 360 361 /* Allocate grefs. */ 362 grefs = malloc(sizeof(*grefs) * arg->count, M_GNTDEV, M_WAITOK); 363 364 for (i = 0; i < arg->count; i++) { 365 grefs[i].file_index = file_offset + i * PAGE_SIZE; 366 grefs[i].gref_id = GRANT_REF_INVALID; 367 grefs[i].notify = NULL; 368 grefs[i].page = vm_page_alloc_noobj(VM_ALLOC_WIRED | 369 VM_ALLOC_ZERO); 370 if (grefs[i].page == NULL) { 371 log(LOG_ERR, "Page allocation failed."); 372 error = ENOMEM; 373 break; 374 } 375 grefs[i].page->valid = VM_PAGE_BITS_ALL; 376 377 error = gnttab_grant_foreign_access(arg->domid, 378 (VM_PAGE_TO_PHYS(grefs[i].page) >> PAGE_SHIFT), 379 readonly, &grefs[i].gref_id); 380 if (error != 0) { 381 log(LOG_ERR, "Grant Table Hypercall failed."); 382 break; 383 } 384 } 385 386 /* Copy the output values. */ 387 arg->index = file_offset; 388 for (i = 0; error == 0 && i < arg->count; i++) { 389 if (suword32(&arg->gref_ids[i], grefs[i].gref_id) != 0) 390 error = EFAULT; 391 } 392 393 if (error != 0) { 394 /* 395 * If target domain maps the gref (by guessing the gref-id), 396 * then we can't clean it up yet and we have to leave the 397 * page in place so as to not leak our memory to that domain. 398 * Add it to a global list to be cleaned up later. 399 */ 400 mtx_lock(&cleanup_data.to_kill_grefs_mtx); 401 for (i = 0; i < arg->count; i++) 402 STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, 403 &grefs[i], gref_next.list); 404 mtx_unlock(&cleanup_data.to_kill_grefs_mtx); 405 406 taskqueue_enqueue(taskqueue_thread, &cleanup_task); 407 408 return (error); 409 } 410 411 /* Modify the per user private data. */ 412 mtx_lock(&priv_user->user_data_lock); 413 for (i = 0; i < arg->count; i++) 414 RB_INSERT(gref_tree_head, &priv_user->gref_tree, &grefs[i]); 415 mtx_unlock(&priv_user->user_data_lock); 416 417 return (error); 418 } 419 420 /* 421 * IOCTL_GNTDEV_DEALLOC_GREF 422 * Remove grant allocation information from the per user private data, so 423 * that it can't be mmapped anymore by the userspace program, and add it 424 * to the to-be-deleted grants global device-list. 425 */ 426 static int 427 gntdev_dealloc_gref(struct ioctl_gntdev_dealloc_gref *arg) 428 { 429 int error; 430 uint32_t count; 431 struct gntdev_gref *gref, *gref_tmp; 432 struct per_user_data *priv_user; 433 434 error = devfs_get_cdevpriv((void**) &priv_user); 435 if (error != 0) 436 return (EINVAL); 437 438 gref = gntdev_find_grefs(priv_user, arg->index, arg->count); 439 if (gref == NULL) { 440 log(LOG_ERR, "Can't find requested grant-refs."); 441 return (EINVAL); 442 } 443 444 /* Remove the grefs from user private data. */ 445 count = arg->count; 446 mtx_lock(&priv_user->user_data_lock); 447 mtx_lock(&cleanup_data.to_kill_grefs_mtx); 448 for (; gref != NULL && count > 0; gref = gref_tmp) { 449 gref_tmp = RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref); 450 RB_REMOVE(gref_tree_head, &priv_user->gref_tree, gref); 451 STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, gref, 452 gref_next.list); 453 count--; 454 } 455 mtx_unlock(&cleanup_data.to_kill_grefs_mtx); 456 mtx_unlock(&priv_user->user_data_lock); 457 458 taskqueue_enqueue(taskqueue_thread, &cleanup_task); 459 put_file_offset(priv_user, arg->count, arg->index); 460 461 return (0); 462 } 463 464 /*-------------------- Grant Mapping Methods --------------------------------*/ 465 466 struct gntdev_gmap_map { 467 vm_object_t mem; 468 struct resource *pseudo_phys_res; 469 int pseudo_phys_res_id; 470 vm_paddr_t phys_base_addr; 471 }; 472 473 struct gntdev_gmap { 474 union gmap_next_union { 475 STAILQ_ENTRY(gntdev_gmap) list; 476 RB_ENTRY(gntdev_gmap) tree; 477 } gmap_next; 478 uint64_t file_index; 479 uint32_t count; 480 struct gnttab_map_grant_ref *grant_map_ops; 481 struct gntdev_gmap_map *map; 482 struct notify_data *notify; 483 }; 484 485 static int 486 gmap_cmp(struct gntdev_gmap *g1, struct gntdev_gmap *g2) 487 { 488 return (g1->file_index - g2->file_index); 489 } 490 491 RB_GENERATE_STATIC(gmap_tree_head, gntdev_gmap, gmap_next.tree, gmap_cmp); 492 493 /* 494 * Traverse over the device-list of to-be-deleted grant mappings, and if 495 * the region is no longer mmapped by anyone, free the memory used to 496 * store information about the mapping. 497 */ 498 static void 499 gmap_list_dtor(struct cleanup_data_struct *cleanup_data) 500 { 501 struct gmap_list_head tmp_gmaps; 502 struct gntdev_gmap *gmap, *gmap_tmp, *gmap_previous; 503 504 STAILQ_INIT(&tmp_gmaps); 505 mtx_lock(&cleanup_data->to_kill_gmaps_mtx); 506 STAILQ_SWAP(&cleanup_data->to_kill_gmaps, &tmp_gmaps, gntdev_gmap); 507 mtx_unlock(&cleanup_data->to_kill_gmaps_mtx); 508 509 gmap_previous = NULL; 510 STAILQ_FOREACH_SAFE(gmap, &tmp_gmaps, gmap_next.list, gmap_tmp) { 511 if (gmap->map == NULL) { 512 if (gmap_previous == NULL) 513 STAILQ_REMOVE_HEAD(&tmp_gmaps, gmap_next.list); 514 else 515 STAILQ_REMOVE_AFTER(&tmp_gmaps, gmap_previous, 516 gmap_next.list); 517 518 if (gmap->notify) 519 free(gmap->notify, M_GNTDEV); 520 free(gmap->grant_map_ops, M_GNTDEV); 521 free(gmap, M_GNTDEV); 522 } 523 else 524 gmap_previous = gmap; 525 } 526 527 if (!STAILQ_EMPTY(&tmp_gmaps)) { 528 mtx_lock(&cleanup_data->to_kill_gmaps_mtx); 529 STAILQ_CONCAT(&cleanup_data->to_kill_gmaps, &tmp_gmaps); 530 mtx_unlock(&cleanup_data->to_kill_gmaps_mtx); 531 } 532 } 533 534 /* 535 * Find mapped grants for a given userspace program, by file-offset (index) 536 * and count, as supplied during the map-ioctl. 537 */ 538 static struct gntdev_gmap* 539 gntdev_find_gmap(struct per_user_data *priv_user, 540 uint64_t index, uint32_t count) 541 { 542 struct gntdev_gmap find_gmap, *gmap; 543 544 find_gmap.file_index = index; 545 546 mtx_lock(&priv_user->user_data_lock); 547 gmap = RB_FIND(gmap_tree_head, &priv_user->gmap_tree, &find_gmap); 548 mtx_unlock(&priv_user->user_data_lock); 549 550 if (gmap != NULL && gmap->count == count) 551 return (gmap); 552 return (NULL); 553 } 554 555 /* 556 * Remove the pages from the mgtdevice pager, call the unmap hypercall, 557 * free the xenmem resource. This function is called during the 558 * destruction of the mgtdevice pager, which happens when all mmaps to 559 * it have been removed, and the unmap-ioctl has been performed. 560 */ 561 static int 562 notify_unmap_cleanup(struct gntdev_gmap *gmap) 563 { 564 uint32_t i; 565 int error, count; 566 vm_page_t m; 567 struct gnttab_unmap_grant_ref *unmap_ops; 568 569 unmap_ops = malloc(sizeof(struct gnttab_unmap_grant_ref) * gmap->count, 570 M_GNTDEV, M_WAITOK); 571 572 /* Enumerate freeable maps. */ 573 count = 0; 574 for (i = 0; i < gmap->count; i++) { 575 if (gmap->grant_map_ops[i].handle != -1) { 576 unmap_ops[count].handle = gmap->grant_map_ops[i].handle; 577 unmap_ops[count].host_addr = 578 gmap->grant_map_ops[i].host_addr; 579 unmap_ops[count].dev_bus_addr = 0; 580 count++; 581 } 582 } 583 584 /* Perform notification. */ 585 if (count > 0 && gmap->notify) { 586 vm_page_t page; 587 uint64_t page_offset; 588 589 page_offset = gmap->notify->index - gmap->file_index; 590 page = PHYS_TO_VM_PAGE(gmap->map->phys_base_addr + page_offset); 591 notify(gmap->notify, page); 592 } 593 594 /* Free the pages. */ 595 VM_OBJECT_WLOCK(gmap->map->mem); 596 retry: 597 for (i = 0; i < gmap->count; i++) { 598 m = vm_page_lookup(gmap->map->mem, i); 599 if (m == NULL) 600 continue; 601 if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0) 602 goto retry; 603 cdev_mgtdev_pager_free_page(gmap->map->mem, m); 604 } 605 VM_OBJECT_WUNLOCK(gmap->map->mem); 606 607 /* Perform unmap hypercall. */ 608 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, 609 unmap_ops, count); 610 611 for (i = 0; i < gmap->count; i++) { 612 gmap->grant_map_ops[i].handle = -1; 613 gmap->grant_map_ops[i].host_addr = 0; 614 } 615 616 if (gmap->map) { 617 error = xenmem_free(gntdev_dev, gmap->map->pseudo_phys_res_id, 618 gmap->map->pseudo_phys_res); 619 KASSERT(error == 0, 620 ("Unable to release memory resource: %d", error)); 621 622 free(gmap->map, M_GNTDEV); 623 gmap->map = NULL; 624 } 625 626 free(unmap_ops, M_GNTDEV); 627 628 return (error); 629 } 630 631 /* 632 * IOCTL_GNTDEV_MAP_GRANT_REF 633 * Populate structures for mapping the grant reference in the per user 634 * private data. Actual resource allocation and map hypercall is performed 635 * during the mmap. 636 */ 637 static int 638 gntdev_map_grant_ref(struct ioctl_gntdev_map_grant_ref *arg) 639 { 640 uint32_t i; 641 int error; 642 struct gntdev_gmap *gmap; 643 struct per_user_data *priv_user; 644 645 error = devfs_get_cdevpriv((void**) &priv_user); 646 if (error != 0) 647 return (EINVAL); 648 649 gmap = malloc(sizeof(*gmap), M_GNTDEV, M_WAITOK | M_ZERO); 650 gmap->count = arg->count; 651 gmap->grant_map_ops = 652 malloc(sizeof(struct gnttab_map_grant_ref) * arg->count, 653 M_GNTDEV, M_WAITOK | M_ZERO); 654 655 for (i = 0; i < arg->count; i++) { 656 struct ioctl_gntdev_grant_ref ref; 657 658 error = copyin(&arg->refs[i], &ref, sizeof(ref)); 659 if (error != 0) { 660 free(gmap->grant_map_ops, M_GNTDEV); 661 free(gmap, M_GNTDEV); 662 return (error); 663 } 664 gmap->grant_map_ops[i].dom = ref.domid; 665 gmap->grant_map_ops[i].ref = ref.ref; 666 gmap->grant_map_ops[i].handle = -1; 667 gmap->grant_map_ops[i].flags = GNTMAP_host_map; 668 } 669 670 error = get_file_offset(priv_user, arg->count, &gmap->file_index); 671 if (error != 0) { 672 free(gmap->grant_map_ops, M_GNTDEV); 673 free(gmap, M_GNTDEV); 674 return (error); 675 } 676 677 mtx_lock(&priv_user->user_data_lock); 678 RB_INSERT(gmap_tree_head, &priv_user->gmap_tree, gmap); 679 mtx_unlock(&priv_user->user_data_lock); 680 681 arg->index = gmap->file_index; 682 683 return (error); 684 } 685 686 /* 687 * IOCTL_GNTDEV_UNMAP_GRANT_REF 688 * Remove the map information from the per user private data and add it 689 * to the global device-list of mappings to be deleted. A reference to 690 * the mgtdevice pager is also decreased, the reason for which is 691 * explained in mmap_gmap(). 692 */ 693 static int 694 gntdev_unmap_grant_ref(struct ioctl_gntdev_unmap_grant_ref *arg) 695 { 696 int error; 697 struct gntdev_gmap *gmap; 698 struct per_user_data *priv_user; 699 700 error = devfs_get_cdevpriv((void**) &priv_user); 701 if (error != 0) 702 return (EINVAL); 703 704 gmap = gntdev_find_gmap(priv_user, arg->index, arg->count); 705 if (gmap == NULL) { 706 log(LOG_ERR, "Can't find requested grant-map."); 707 return (EINVAL); 708 } 709 710 mtx_lock(&priv_user->user_data_lock); 711 mtx_lock(&cleanup_data.to_kill_gmaps_mtx); 712 RB_REMOVE(gmap_tree_head, &priv_user->gmap_tree, gmap); 713 STAILQ_INSERT_TAIL(&cleanup_data.to_kill_gmaps, gmap, gmap_next.list); 714 mtx_unlock(&cleanup_data.to_kill_gmaps_mtx); 715 mtx_unlock(&priv_user->user_data_lock); 716 717 if (gmap->map) 718 vm_object_deallocate(gmap->map->mem); 719 720 taskqueue_enqueue(taskqueue_thread, &cleanup_task); 721 put_file_offset(priv_user, arg->count, arg->index); 722 723 return (0); 724 } 725 726 /* 727 * IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR 728 * Get file-offset and count for a given mapping, from the virtual address 729 * where the mapping is mmapped. 730 * Please note, this only works for grants mapped by this domain, and not 731 * grants allocated. Count doesn't make much sense in reference to grants 732 * allocated. Also, because this function is present in the linux gntdev 733 * device, but not in the linux gntalloc one, most userspace code only use 734 * it for mapped grants. 735 */ 736 static int 737 gntdev_get_offset_for_vaddr(struct ioctl_gntdev_get_offset_for_vaddr *arg, 738 struct thread *td) 739 { 740 int error; 741 vm_map_t map; 742 vm_map_entry_t entry; 743 vm_object_t mem; 744 vm_pindex_t pindex; 745 vm_prot_t prot; 746 boolean_t wired; 747 struct gntdev_gmap *gmap; 748 int rc; 749 750 map = &td->td_proc->p_vmspace->vm_map; 751 error = vm_map_lookup(&map, arg->vaddr, VM_PROT_NONE, &entry, 752 &mem, &pindex, &prot, &wired); 753 if (error != KERN_SUCCESS) 754 return (EINVAL); 755 756 if ((mem->type != OBJT_MGTDEVICE) || 757 (mem->un_pager.devp.ops != &gntdev_gmap_pg_ops)) { 758 rc = EINVAL; 759 goto out; 760 } 761 762 gmap = mem->handle; 763 if (gmap == NULL || 764 (entry->end - entry->start) != (gmap->count * PAGE_SIZE)) { 765 rc = EINVAL; 766 goto out; 767 } 768 769 arg->count = gmap->count; 770 arg->offset = gmap->file_index; 771 rc = 0; 772 773 out: 774 vm_map_lookup_done(map, entry); 775 return (rc); 776 } 777 778 /*-------------------- Grant Mapping Pager ----------------------------------*/ 779 780 static int 781 gntdev_gmap_pg_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 782 vm_ooffset_t foff, struct ucred *cred, u_short *color) 783 { 784 785 return (0); 786 } 787 788 static void 789 gntdev_gmap_pg_dtor(void *handle) 790 { 791 792 notify_unmap_cleanup((struct gntdev_gmap *)handle); 793 } 794 795 static int 796 gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t offset, int prot, 797 vm_page_t *mres) 798 { 799 struct gntdev_gmap *gmap = object->handle; 800 vm_pindex_t pidx, ridx; 801 vm_page_t page; 802 vm_ooffset_t relative_offset; 803 804 if (gmap->map == NULL) 805 return (VM_PAGER_FAIL); 806 807 relative_offset = offset - gmap->file_index; 808 809 pidx = OFF_TO_IDX(offset); 810 ridx = OFF_TO_IDX(relative_offset); 811 if (ridx >= gmap->count || 812 gmap->grant_map_ops[ridx].status != GNTST_okay) 813 return (VM_PAGER_FAIL); 814 815 page = PHYS_TO_VM_PAGE(gmap->map->phys_base_addr + relative_offset); 816 if (page == NULL) 817 return (VM_PAGER_FAIL); 818 819 KASSERT((page->flags & PG_FICTITIOUS) != 0, 820 ("not fictitious %p", page)); 821 KASSERT(vm_page_wired(page), ("page %p is not wired", page)); 822 KASSERT(!vm_page_busied(page), ("page %p is busy", page)); 823 824 vm_page_busy_acquire(page, 0); 825 vm_page_valid(page); 826 if (*mres != NULL) 827 vm_page_replace(page, object, pidx, *mres); 828 else 829 vm_page_insert(page, object, pidx); 830 *mres = page; 831 return (VM_PAGER_OK); 832 } 833 834 /*------------------ Grant Table Methods ------------------------------------*/ 835 836 static void 837 notify(struct notify_data *notify, vm_page_t page) 838 { 839 if (notify->action & UNMAP_NOTIFY_CLEAR_BYTE) { 840 uint8_t *mem; 841 uint64_t offset; 842 843 offset = notify->index & PAGE_MASK; 844 mem = (uint8_t *)pmap_quick_enter_page(page); 845 mem[offset] = 0; 846 pmap_quick_remove_page((vm_offset_t)mem); 847 } 848 if (notify->action & UNMAP_NOTIFY_SEND_EVENT) { 849 xen_intr_signal(notify->notify_evtchn_handle); 850 xen_intr_unbind(¬ify->notify_evtchn_handle); 851 } 852 notify->action = 0; 853 } 854 855 /* 856 * Helper to copy new arguments from the notify ioctl into 857 * the existing notify data. 858 */ 859 static int 860 copy_notify_helper(struct notify_data *destination, 861 struct ioctl_gntdev_unmap_notify *source) 862 { 863 xen_intr_handle_t handlep = NULL; 864 865 /* 866 * "Get" before "Put"ting previous reference, as we might be 867 * holding the last reference to the event channel port. 868 */ 869 if (source->action & UNMAP_NOTIFY_SEND_EVENT) 870 if (xen_intr_get_evtchn_from_port(source->event_channel_port, 871 &handlep) != 0) 872 return (EINVAL); 873 874 if (destination->action & UNMAP_NOTIFY_SEND_EVENT) 875 xen_intr_unbind(&destination->notify_evtchn_handle); 876 877 destination->action = source->action; 878 destination->event_channel_port = source->event_channel_port; 879 destination->index = source->index; 880 destination->notify_evtchn_handle = handlep; 881 882 return (0); 883 } 884 885 /* 886 * IOCTL_GNTDEV_SET_UNMAP_NOTIFY 887 * Set unmap notification inside the appropriate grant. It sends a 888 * notification when the grant is completely munmapped by this domain 889 * and ready for destruction. 890 */ 891 static int 892 gntdev_set_unmap_notify(struct ioctl_gntdev_unmap_notify *arg) 893 { 894 int error; 895 uint64_t index; 896 struct per_user_data *priv_user; 897 struct gntdev_gref *gref = NULL; 898 struct gntdev_gmap *gmap; 899 900 error = devfs_get_cdevpriv((void**) &priv_user); 901 if (error != 0) 902 return (EINVAL); 903 904 if (arg->action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) 905 return (EINVAL); 906 907 index = arg->index & ~PAGE_MASK; 908 gref = gntdev_find_grefs(priv_user, index, 1); 909 if (gref) { 910 if (gref->notify == NULL) 911 gref->notify = malloc(sizeof(*arg), M_GNTDEV, 912 M_WAITOK | M_ZERO); 913 return (copy_notify_helper(gref->notify, arg)); 914 } 915 916 error = EINVAL; 917 mtx_lock(&priv_user->user_data_lock); 918 RB_FOREACH(gmap, gmap_tree_head, &priv_user->gmap_tree) { 919 if (arg->index >= gmap->file_index && 920 arg->index < gmap->file_index + gmap->count * PAGE_SIZE) { 921 if (gmap->notify == NULL) 922 gmap->notify = malloc(sizeof(*arg), M_GNTDEV, 923 M_WAITOK | M_ZERO); 924 error = copy_notify_helper(gmap->notify, arg); 925 break; 926 } 927 } 928 mtx_unlock(&priv_user->user_data_lock); 929 930 return (error); 931 } 932 933 /*------------------ Gntdev Char Device Methods -----------------------------*/ 934 935 static void 936 cleanup_function(void *arg, __unused int pending) 937 { 938 939 gref_list_dtor((struct cleanup_data_struct *) arg); 940 gmap_list_dtor((struct cleanup_data_struct *) arg); 941 } 942 943 static void 944 per_user_data_dtor(void *arg) 945 { 946 struct gntdev_gref *gref, *gref_tmp; 947 struct gntdev_gmap *gmap, *gmap_tmp; 948 struct file_offset_struct *offset, *offset_tmp; 949 struct per_user_data *priv_user; 950 951 priv_user = (struct per_user_data *) arg; 952 953 mtx_lock(&priv_user->user_data_lock); 954 955 mtx_lock(&cleanup_data.to_kill_grefs_mtx); 956 RB_FOREACH_SAFE(gref, gref_tree_head, &priv_user->gref_tree, gref_tmp) { 957 RB_REMOVE(gref_tree_head, &priv_user->gref_tree, gref); 958 STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, gref, 959 gref_next.list); 960 } 961 mtx_unlock(&cleanup_data.to_kill_grefs_mtx); 962 963 mtx_lock(&cleanup_data.to_kill_gmaps_mtx); 964 RB_FOREACH_SAFE(gmap, gmap_tree_head, &priv_user->gmap_tree, gmap_tmp) { 965 RB_REMOVE(gmap_tree_head, &priv_user->gmap_tree, gmap); 966 STAILQ_INSERT_TAIL(&cleanup_data.to_kill_gmaps, gmap, 967 gmap_next.list); 968 if (gmap->map) 969 vm_object_deallocate(gmap->map->mem); 970 } 971 mtx_unlock(&cleanup_data.to_kill_gmaps_mtx); 972 973 RB_FOREACH_SAFE(offset, file_offset_head, &priv_user->file_offset, 974 offset_tmp) { 975 RB_REMOVE(file_offset_head, &priv_user->file_offset, offset); 976 free(offset, M_GNTDEV); 977 } 978 979 mtx_unlock(&priv_user->user_data_lock); 980 981 taskqueue_enqueue(taskqueue_thread, &cleanup_task); 982 983 mtx_destroy(&priv_user->user_data_lock); 984 free(priv_user, M_GNTDEV); 985 } 986 987 static int 988 gntdev_open(struct cdev *dev, int flag, int otyp, struct thread *td) 989 { 990 int error; 991 struct per_user_data *priv_user; 992 struct file_offset_struct *offset; 993 994 priv_user = malloc(sizeof(*priv_user), M_GNTDEV, M_WAITOK | M_ZERO); 995 RB_INIT(&priv_user->gref_tree); 996 RB_INIT(&priv_user->gmap_tree); 997 RB_INIT(&priv_user->file_offset); 998 offset = malloc(sizeof(*offset), M_GNTDEV, M_WAITOK | M_ZERO); 999 offset->file_offset = 0; 1000 offset->count = MAX_OFFSET_COUNT; 1001 RB_INSERT(file_offset_head, &priv_user->file_offset, offset); 1002 mtx_init(&priv_user->user_data_lock, 1003 "per user data mutex", NULL, MTX_DEF); 1004 1005 error = devfs_set_cdevpriv(priv_user, per_user_data_dtor); 1006 if (error != 0) 1007 per_user_data_dtor(priv_user); 1008 1009 return (error); 1010 } 1011 1012 static int 1013 gntdev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 1014 int fflag, struct thread *td) 1015 { 1016 int error; 1017 1018 switch (cmd) { 1019 case IOCTL_GNTDEV_SET_UNMAP_NOTIFY: 1020 error = gntdev_set_unmap_notify( 1021 (struct ioctl_gntdev_unmap_notify*) data); 1022 break; 1023 case IOCTL_GNTDEV_ALLOC_GREF: 1024 error = gntdev_alloc_gref( 1025 (struct ioctl_gntdev_alloc_gref*) data); 1026 break; 1027 case IOCTL_GNTDEV_DEALLOC_GREF: 1028 error = gntdev_dealloc_gref( 1029 (struct ioctl_gntdev_dealloc_gref*) data); 1030 break; 1031 case IOCTL_GNTDEV_MAP_GRANT_REF: 1032 error = gntdev_map_grant_ref( 1033 (struct ioctl_gntdev_map_grant_ref*) data); 1034 break; 1035 case IOCTL_GNTDEV_UNMAP_GRANT_REF: 1036 error = gntdev_unmap_grant_ref( 1037 (struct ioctl_gntdev_unmap_grant_ref*) data); 1038 break; 1039 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: 1040 error = gntdev_get_offset_for_vaddr( 1041 (struct ioctl_gntdev_get_offset_for_vaddr*) data, td); 1042 break; 1043 default: 1044 error = ENOSYS; 1045 break; 1046 } 1047 1048 return (error); 1049 } 1050 1051 /* 1052 * MMAP an allocated grant into user memory. 1053 * Please note, that the grants must not already be mmapped, otherwise 1054 * this function will fail. 1055 */ 1056 static int 1057 mmap_gref(struct per_user_data *priv_user, struct gntdev_gref *gref_start, 1058 uint32_t count, vm_size_t size, struct vm_object **object) 1059 { 1060 vm_object_t mem_obj; 1061 struct gntdev_gref *gref; 1062 1063 mem_obj = vm_pager_allocate(OBJT_PHYS, NULL, size, VM_PROT_ALL, 0, 1064 curthread->td_ucred); 1065 if (mem_obj == NULL) 1066 return (ENOMEM); 1067 1068 mtx_lock(&priv_user->user_data_lock); 1069 VM_OBJECT_WLOCK(mem_obj); 1070 for (gref = gref_start; gref != NULL && count > 0; gref = 1071 RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) { 1072 if (gref->page->object) 1073 break; 1074 1075 vm_page_insert(gref->page, mem_obj, 1076 OFF_TO_IDX(gref->file_index)); 1077 1078 count--; 1079 } 1080 VM_OBJECT_WUNLOCK(mem_obj); 1081 mtx_unlock(&priv_user->user_data_lock); 1082 1083 if (count) { 1084 vm_object_deallocate(mem_obj); 1085 return (EINVAL); 1086 } 1087 1088 *object = mem_obj; 1089 1090 return (0); 1091 1092 } 1093 1094 /* 1095 * MMAP a mapped grant into user memory. 1096 */ 1097 static int 1098 mmap_gmap(struct per_user_data *priv_user, struct gntdev_gmap *gmap_start, 1099 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot) 1100 { 1101 uint32_t i; 1102 int error; 1103 1104 /* 1105 * The grant map hypercall might already be done. 1106 * If that is the case, increase a reference to the 1107 * vm object and return the already allocated object. 1108 */ 1109 if (gmap_start->map) { 1110 vm_object_reference(gmap_start->map->mem); 1111 *object = gmap_start->map->mem; 1112 return (0); 1113 } 1114 1115 gmap_start->map = malloc(sizeof(*(gmap_start->map)), M_GNTDEV, 1116 M_WAITOK | M_ZERO); 1117 1118 /* Allocate the xen pseudo physical memory resource. */ 1119 gmap_start->map->pseudo_phys_res_id = 0; 1120 gmap_start->map->pseudo_phys_res = xenmem_alloc(gntdev_dev, 1121 &gmap_start->map->pseudo_phys_res_id, size); 1122 if (gmap_start->map->pseudo_phys_res == NULL) { 1123 free(gmap_start->map, M_GNTDEV); 1124 gmap_start->map = NULL; 1125 return (ENOMEM); 1126 } 1127 gmap_start->map->phys_base_addr = 1128 rman_get_start(gmap_start->map->pseudo_phys_res); 1129 1130 /* Allocate the mgtdevice pager. */ 1131 gmap_start->map->mem = cdev_pager_allocate(gmap_start, OBJT_MGTDEVICE, 1132 &gntdev_gmap_pg_ops, size, nprot, *offset, NULL); 1133 if (gmap_start->map->mem == NULL) { 1134 xenmem_free(gntdev_dev, gmap_start->map->pseudo_phys_res_id, 1135 gmap_start->map->pseudo_phys_res); 1136 free(gmap_start->map, M_GNTDEV); 1137 gmap_start->map = NULL; 1138 return (ENOMEM); 1139 } 1140 1141 for (i = 0; i < gmap_start->count; i++) { 1142 gmap_start->grant_map_ops[i].host_addr = 1143 gmap_start->map->phys_base_addr + i * PAGE_SIZE; 1144 1145 if ((nprot & PROT_WRITE) == 0) 1146 gmap_start->grant_map_ops[i].flags |= GNTMAP_readonly; 1147 } 1148 /* Make the MAP hypercall. */ 1149 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 1150 gmap_start->grant_map_ops, gmap_start->count); 1151 if (error != 0) { 1152 /* 1153 * Deallocate pager. 1154 * Pager deallocation will automatically take care of 1155 * xenmem deallocation, etc. 1156 */ 1157 vm_object_deallocate(gmap_start->map->mem); 1158 1159 return (EINVAL); 1160 } 1161 1162 /* Retry EAGAIN maps. */ 1163 for (i = 0; i < gmap_start->count; i++) { 1164 int delay = 1; 1165 while (delay < 256 && 1166 gmap_start->grant_map_ops[i].status == GNTST_eagain) { 1167 HYPERVISOR_grant_table_op( GNTTABOP_map_grant_ref, 1168 &gmap_start->grant_map_ops[i], 1); 1169 pause(("gntmap"), delay * SBT_1MS); 1170 delay++; 1171 } 1172 if (gmap_start->grant_map_ops[i].status == GNTST_eagain) 1173 gmap_start->grant_map_ops[i].status = GNTST_bad_page; 1174 1175 if (gmap_start->grant_map_ops[i].status != GNTST_okay) { 1176 /* 1177 * Deallocate pager. 1178 * Pager deallocation will automatically take care of 1179 * xenmem deallocation, notification, unmap hypercall, 1180 * etc. 1181 */ 1182 vm_object_deallocate(gmap_start->map->mem); 1183 1184 return (EINVAL); 1185 } 1186 } 1187 1188 /* 1189 * Add a reference to the vm object. We do not want 1190 * the vm object to be deleted when all the mmaps are 1191 * unmapped, because it may be re-mmapped. Instead, 1192 * we want the object to be deleted, when along with 1193 * munmaps, we have also processed the unmap-ioctl. 1194 */ 1195 vm_object_reference(gmap_start->map->mem); 1196 1197 *object = gmap_start->map->mem; 1198 1199 return (0); 1200 } 1201 1202 static int 1203 gntdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size, 1204 struct vm_object **object, int nprot) 1205 { 1206 int error; 1207 uint32_t count; 1208 struct gntdev_gref *gref_start; 1209 struct gntdev_gmap *gmap_start; 1210 struct per_user_data *priv_user; 1211 1212 error = devfs_get_cdevpriv((void**) &priv_user); 1213 if (error != 0) 1214 return (EINVAL); 1215 1216 count = OFF_TO_IDX(size); 1217 1218 gref_start = gntdev_find_grefs(priv_user, *offset, count); 1219 if (gref_start) { 1220 error = mmap_gref(priv_user, gref_start, count, size, object); 1221 return (error); 1222 } 1223 1224 gmap_start = gntdev_find_gmap(priv_user, *offset, count); 1225 if (gmap_start) { 1226 error = mmap_gmap(priv_user, gmap_start, offset, size, object, 1227 nprot); 1228 return (error); 1229 } 1230 1231 return (EINVAL); 1232 } 1233 1234 /*------------------ Private Device Attachment Functions --------------------*/ 1235 static void 1236 gntdev_identify(driver_t *driver, device_t parent) 1237 { 1238 1239 KASSERT((xen_domain()), 1240 ("Trying to attach gntdev device on non Xen domain")); 1241 1242 if (BUS_ADD_CHILD(parent, 0, "gntdev", 0) == NULL) 1243 panic("unable to attach gntdev user-space device"); 1244 } 1245 1246 static int 1247 gntdev_probe(device_t dev) 1248 { 1249 1250 gntdev_dev = dev; 1251 device_set_desc(dev, "Xen grant-table user-space device"); 1252 return (BUS_PROBE_NOWILDCARD); 1253 } 1254 1255 static int 1256 gntdev_attach(device_t dev) 1257 { 1258 1259 make_dev_credf(MAKEDEV_ETERNAL, &gntdev_devsw, 0, NULL, UID_ROOT, 1260 GID_WHEEL, 0600, "xen/gntdev"); 1261 return (0); 1262 } 1263 1264 /*-------------------- Private Device Attachment Data -----------------------*/ 1265 static device_method_t gntdev_methods[] = { 1266 DEVMETHOD(device_identify, gntdev_identify), 1267 DEVMETHOD(device_probe, gntdev_probe), 1268 DEVMETHOD(device_attach, gntdev_attach), 1269 DEVMETHOD_END 1270 }; 1271 1272 static driver_t gntdev_driver = { 1273 "gntdev", 1274 gntdev_methods, 1275 0, 1276 }; 1277 1278 DRIVER_MODULE(gntdev, xenpv, gntdev_driver, 0, 0); 1279 MODULE_DEPEND(gntdev, xenpv, 1, 1, 1); 1280