1 /****************************************************************************** 2 * Client-facing interface for the Xenbus driver. In other words, the 3 * interface between the Xenbus and the device-specific code, be it the 4 * frontend or the backend of that driver. 5 * 6 * Copyright (C) 2005 XenSource Ltd 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 */ 32 33 #include <linux/mm.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/spinlock.h> 37 #include <linux/vmalloc.h> 38 #include <linux/export.h> 39 #include <asm/xen/hypervisor.h> 40 #include <xen/page.h> 41 #include <xen/interface/xen.h> 42 #include <xen/interface/event_channel.h> 43 #include <xen/balloon.h> 44 #include <xen/events.h> 45 #include <xen/grant_table.h> 46 #include <xen/xenbus.h> 47 #include <xen/xen.h> 48 #include <xen/features.h> 49 50 #include "xenbus.h" 51 52 #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE)) 53 54 #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS)) 55 56 struct xenbus_map_node { 57 struct list_head next; 58 union { 59 struct { 60 struct vm_struct *area; 61 } pv; 62 struct { 63 struct page *pages[XENBUS_MAX_RING_PAGES]; 64 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 65 void *addr; 66 } hvm; 67 }; 68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS]; 69 unsigned int nr_handles; 70 }; 71 72 struct map_ring_valloc { 73 struct xenbus_map_node *node; 74 75 /* Why do we need two arrays? See comment of __xenbus_map_ring */ 76 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 77 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 78 79 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; 80 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 81 82 unsigned int idx; 83 }; 84 85 static DEFINE_SPINLOCK(xenbus_valloc_lock); 86 static LIST_HEAD(xenbus_valloc_pages); 87 88 struct xenbus_ring_ops { 89 int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info, 90 grant_ref_t *gnt_refs, unsigned int nr_grefs, 91 void **vaddr); 92 int (*unmap)(struct xenbus_device *dev, void *vaddr); 93 }; 94 95 static const struct xenbus_ring_ops *ring_ops __read_mostly; 96 97 const char *xenbus_strstate(enum xenbus_state state) 98 { 99 static const char *const name[] = { 100 [ XenbusStateUnknown ] = "Unknown", 101 [ XenbusStateInitialising ] = "Initialising", 102 [ XenbusStateInitWait ] = "InitWait", 103 [ XenbusStateInitialised ] = "Initialised", 104 [ XenbusStateConnected ] = "Connected", 105 [ XenbusStateClosing ] = "Closing", 106 [ XenbusStateClosed ] = "Closed", 107 [XenbusStateReconfiguring] = "Reconfiguring", 108 [XenbusStateReconfigured] = "Reconfigured", 109 }; 110 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; 111 } 112 EXPORT_SYMBOL_GPL(xenbus_strstate); 113 114 /** 115 * xenbus_watch_path - register a watch 116 * @dev: xenbus device 117 * @path: path to watch 118 * @watch: watch to register 119 * @callback: callback to register 120 * 121 * Register a @watch on the given path, using the given xenbus_watch structure 122 * for storage, and the given @callback function as the callback. Return 0 on 123 * success, or -errno on error. On success, the given @path will be saved as 124 * @watch->node, and remains the caller's to free. On error, @watch->node will 125 * be NULL, the device will switch to %XenbusStateClosing, and the error will 126 * be saved in the store. 127 */ 128 int xenbus_watch_path(struct xenbus_device *dev, const char *path, 129 struct xenbus_watch *watch, 130 bool (*will_handle)(struct xenbus_watch *, 131 const char *, const char *), 132 void (*callback)(struct xenbus_watch *, 133 const char *, const char *)) 134 { 135 int err; 136 137 watch->node = path; 138 watch->will_handle = will_handle; 139 watch->callback = callback; 140 141 err = register_xenbus_watch(watch); 142 143 if (err) { 144 watch->node = NULL; 145 watch->will_handle = NULL; 146 watch->callback = NULL; 147 xenbus_dev_fatal(dev, err, "adding watch on %s", path); 148 } 149 150 return err; 151 } 152 EXPORT_SYMBOL_GPL(xenbus_watch_path); 153 154 155 /** 156 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path 157 * @dev: xenbus device 158 * @watch: watch to register 159 * @callback: callback to register 160 * @pathfmt: format of path to watch 161 * 162 * Register a watch on the given @path, using the given xenbus_watch 163 * structure for storage, and the given @callback function as the callback. 164 * Return 0 on success, or -errno on error. On success, the watched path 165 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to 166 * kfree(). On error, watch->node will be NULL, so the caller has nothing to 167 * free, the device will switch to %XenbusStateClosing, and the error will be 168 * saved in the store. 169 */ 170 int xenbus_watch_pathfmt(struct xenbus_device *dev, 171 struct xenbus_watch *watch, 172 bool (*will_handle)(struct xenbus_watch *, 173 const char *, const char *), 174 void (*callback)(struct xenbus_watch *, 175 const char *, const char *), 176 const char *pathfmt, ...) 177 { 178 int err; 179 va_list ap; 180 char *path; 181 182 va_start(ap, pathfmt); 183 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); 184 va_end(ap); 185 186 if (!path) { 187 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); 188 return -ENOMEM; 189 } 190 err = xenbus_watch_path(dev, path, watch, will_handle, callback); 191 192 if (err) 193 kfree(path); 194 return err; 195 } 196 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); 197 198 static void xenbus_switch_fatal(struct xenbus_device *, int, int, 199 const char *, ...); 200 201 static int 202 __xenbus_switch_state(struct xenbus_device *dev, 203 enum xenbus_state state, int depth) 204 { 205 /* We check whether the state is currently set to the given value, and 206 if not, then the state is set. We don't want to unconditionally 207 write the given state, because we don't want to fire watches 208 unnecessarily. Furthermore, if the node has gone, we don't write 209 to it, as the device will be tearing down, and we don't want to 210 resurrect that directory. 211 212 Note that, because of this cached value of our state, this 213 function will not take a caller's Xenstore transaction 214 (something it was trying to in the past) because dev->state 215 would not get reset if the transaction was aborted. 216 */ 217 218 struct xenbus_transaction xbt; 219 int current_state; 220 int err, abort; 221 222 if (state == dev->state) 223 return 0; 224 225 again: 226 abort = 1; 227 228 err = xenbus_transaction_start(&xbt); 229 if (err) { 230 xenbus_switch_fatal(dev, depth, err, "starting transaction"); 231 return 0; 232 } 233 234 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); 235 if (err != 1) 236 goto abort; 237 238 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); 239 if (err) { 240 xenbus_switch_fatal(dev, depth, err, "writing new state"); 241 goto abort; 242 } 243 244 abort = 0; 245 abort: 246 err = xenbus_transaction_end(xbt, abort); 247 if (err) { 248 if (err == -EAGAIN && !abort) 249 goto again; 250 xenbus_switch_fatal(dev, depth, err, "ending transaction"); 251 } else 252 dev->state = state; 253 254 return 0; 255 } 256 257 /** 258 * xenbus_switch_state 259 * @dev: xenbus device 260 * @state: new state 261 * 262 * Advertise in the store a change of the given driver to the given new_state. 263 * Return 0 on success, or -errno on error. On error, the device will switch 264 * to XenbusStateClosing, and the error will be saved in the store. 265 */ 266 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) 267 { 268 return __xenbus_switch_state(dev, state, 0); 269 } 270 271 EXPORT_SYMBOL_GPL(xenbus_switch_state); 272 273 int xenbus_frontend_closed(struct xenbus_device *dev) 274 { 275 xenbus_switch_state(dev, XenbusStateClosed); 276 complete(&dev->down); 277 return 0; 278 } 279 EXPORT_SYMBOL_GPL(xenbus_frontend_closed); 280 281 static void xenbus_va_dev_error(struct xenbus_device *dev, int err, 282 const char *fmt, va_list ap) 283 { 284 unsigned int len; 285 char *printf_buffer; 286 char *path_buffer; 287 288 #define PRINTF_BUFFER_SIZE 4096 289 290 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); 291 if (!printf_buffer) 292 return; 293 294 len = sprintf(printf_buffer, "%i ", -err); 295 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap); 296 297 dev_err(&dev->dev, "%s\n", printf_buffer); 298 299 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename); 300 if (path_buffer) 301 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer); 302 303 kfree(printf_buffer); 304 kfree(path_buffer); 305 } 306 307 /** 308 * xenbus_dev_error 309 * @dev: xenbus device 310 * @err: error to report 311 * @fmt: error message format 312 * 313 * Report the given negative errno into the store, along with the given 314 * formatted message. 315 */ 316 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) 317 { 318 va_list ap; 319 320 va_start(ap, fmt); 321 xenbus_va_dev_error(dev, err, fmt, ap); 322 va_end(ap); 323 } 324 EXPORT_SYMBOL_GPL(xenbus_dev_error); 325 326 /** 327 * xenbus_dev_fatal 328 * @dev: xenbus device 329 * @err: error to report 330 * @fmt: error message format 331 * 332 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by 333 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly 334 * closedown of this driver and its peer. 335 */ 336 337 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) 338 { 339 va_list ap; 340 341 va_start(ap, fmt); 342 xenbus_va_dev_error(dev, err, fmt, ap); 343 va_end(ap); 344 345 xenbus_switch_state(dev, XenbusStateClosing); 346 } 347 EXPORT_SYMBOL_GPL(xenbus_dev_fatal); 348 349 /** 350 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps 351 * avoiding recursion within xenbus_switch_state. 352 */ 353 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, 354 const char *fmt, ...) 355 { 356 va_list ap; 357 358 va_start(ap, fmt); 359 xenbus_va_dev_error(dev, err, fmt, ap); 360 va_end(ap); 361 362 if (!depth) 363 __xenbus_switch_state(dev, XenbusStateClosing, 1); 364 } 365 366 /** 367 * xenbus_grant_ring 368 * @dev: xenbus device 369 * @vaddr: starting virtual address of the ring 370 * @nr_pages: number of pages to be granted 371 * @grefs: grant reference array to be filled in 372 * 373 * Grant access to the given @vaddr to the peer of the given device. 374 * Then fill in @grefs with grant references. Return 0 on success, or 375 * -errno on error. On error, the device will switch to 376 * XenbusStateClosing, and the error will be saved in the store. 377 */ 378 int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, 379 unsigned int nr_pages, grant_ref_t *grefs) 380 { 381 int err; 382 unsigned int i; 383 grant_ref_t gref_head; 384 385 err = gnttab_alloc_grant_references(nr_pages, &gref_head); 386 if (err) { 387 xenbus_dev_fatal(dev, err, "granting access to ring page"); 388 return err; 389 } 390 391 for (i = 0; i < nr_pages; i++) { 392 unsigned long gfn; 393 394 if (is_vmalloc_addr(vaddr)) 395 gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); 396 else 397 gfn = virt_to_gfn(vaddr); 398 399 grefs[i] = gnttab_claim_grant_reference(&gref_head); 400 gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, 401 gfn, 0); 402 403 vaddr = vaddr + XEN_PAGE_SIZE; 404 } 405 406 return 0; 407 } 408 EXPORT_SYMBOL_GPL(xenbus_grant_ring); 409 410 /* 411 * xenbus_setup_ring 412 * @dev: xenbus device 413 * @vaddr: pointer to starting virtual address of the ring 414 * @nr_pages: number of pages to be granted 415 * @grefs: grant reference array to be filled in 416 * 417 * Allocate physically contiguous pages for a shared ring buffer and grant it 418 * to the peer of the given device. The ring buffer is initially filled with 419 * zeroes. The virtual address of the ring is stored at @vaddr and the 420 * grant references are stored in the @grefs array. In case of error @vaddr 421 * will be set to NULL and @grefs will be filled with INVALID_GRANT_REF. 422 */ 423 int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, 424 unsigned int nr_pages, grant_ref_t *grefs) 425 { 426 unsigned long ring_size = nr_pages * XEN_PAGE_SIZE; 427 unsigned int i; 428 int ret; 429 430 *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); 431 if (!*vaddr) { 432 ret = -ENOMEM; 433 goto err; 434 } 435 436 ret = xenbus_grant_ring(dev, *vaddr, nr_pages, grefs); 437 if (ret) 438 goto err; 439 440 return 0; 441 442 err: 443 if (*vaddr) 444 free_pages_exact(*vaddr, ring_size); 445 for (i = 0; i < nr_pages; i++) 446 grefs[i] = INVALID_GRANT_REF; 447 *vaddr = NULL; 448 449 return ret; 450 } 451 EXPORT_SYMBOL_GPL(xenbus_setup_ring); 452 453 /* 454 * xenbus_teardown_ring 455 * @vaddr: starting virtual address of the ring 456 * @nr_pages: number of pages 457 * @grefs: grant reference array 458 * 459 * Remove grants for the shared ring buffer and free the associated memory. 460 * On return the grant reference array is filled with INVALID_GRANT_REF. 461 */ 462 void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages, 463 grant_ref_t *grefs) 464 { 465 unsigned int i; 466 467 for (i = 0; i < nr_pages; i++) { 468 if (grefs[i] != INVALID_GRANT_REF) { 469 gnttab_end_foreign_access(grefs[i], 0); 470 grefs[i] = INVALID_GRANT_REF; 471 } 472 } 473 474 if (*vaddr) 475 free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE); 476 *vaddr = NULL; 477 } 478 EXPORT_SYMBOL_GPL(xenbus_teardown_ring); 479 480 /** 481 * Allocate an event channel for the given xenbus_device, assigning the newly 482 * created local port to *port. Return 0 on success, or -errno on error. On 483 * error, the device will switch to XenbusStateClosing, and the error will be 484 * saved in the store. 485 */ 486 int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port) 487 { 488 struct evtchn_alloc_unbound alloc_unbound; 489 int err; 490 491 alloc_unbound.dom = DOMID_SELF; 492 alloc_unbound.remote_dom = dev->otherend_id; 493 494 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 495 &alloc_unbound); 496 if (err) 497 xenbus_dev_fatal(dev, err, "allocating event channel"); 498 else 499 *port = alloc_unbound.port; 500 501 return err; 502 } 503 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); 504 505 506 /** 507 * Free an existing event channel. Returns 0 on success or -errno on error. 508 */ 509 int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port) 510 { 511 struct evtchn_close close; 512 int err; 513 514 close.port = port; 515 516 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); 517 if (err) 518 xenbus_dev_error(dev, err, "freeing event channel %u", port); 519 520 return err; 521 } 522 EXPORT_SYMBOL_GPL(xenbus_free_evtchn); 523 524 525 /** 526 * xenbus_map_ring_valloc 527 * @dev: xenbus device 528 * @gnt_refs: grant reference array 529 * @nr_grefs: number of grant references 530 * @vaddr: pointer to address to be filled out by mapping 531 * 532 * Map @nr_grefs pages of memory into this domain from another 533 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs 534 * pages of virtual address space, maps the pages to that address, and 535 * sets *vaddr to that address. Returns 0 on success, and -errno on 536 * error. If an error is returned, device will switch to 537 * XenbusStateClosing and the error message will be saved in XenStore. 538 */ 539 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, 540 unsigned int nr_grefs, void **vaddr) 541 { 542 int err; 543 struct map_ring_valloc *info; 544 545 *vaddr = NULL; 546 547 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 548 return -EINVAL; 549 550 info = kzalloc(sizeof(*info), GFP_KERNEL); 551 if (!info) 552 return -ENOMEM; 553 554 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); 555 if (!info->node) 556 err = -ENOMEM; 557 else 558 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); 559 560 kfree(info->node); 561 kfree(info); 562 return err; 563 } 564 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); 565 566 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned 567 * long), e.g. 32-on-64. Caller is responsible for preparing the 568 * right array to feed into this function */ 569 static int __xenbus_map_ring(struct xenbus_device *dev, 570 grant_ref_t *gnt_refs, 571 unsigned int nr_grefs, 572 grant_handle_t *handles, 573 struct map_ring_valloc *info, 574 unsigned int flags, 575 bool *leaked) 576 { 577 int i, j; 578 579 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 580 return -EINVAL; 581 582 for (i = 0; i < nr_grefs; i++) { 583 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, 584 gnt_refs[i], dev->otherend_id); 585 handles[i] = INVALID_GRANT_HANDLE; 586 } 587 588 gnttab_batch_map(info->map, i); 589 590 for (i = 0; i < nr_grefs; i++) { 591 if (info->map[i].status != GNTST_okay) { 592 xenbus_dev_fatal(dev, info->map[i].status, 593 "mapping in shared page %d from domain %d", 594 gnt_refs[i], dev->otherend_id); 595 goto fail; 596 } else 597 handles[i] = info->map[i].handle; 598 } 599 600 return 0; 601 602 fail: 603 for (i = j = 0; i < nr_grefs; i++) { 604 if (handles[i] != INVALID_GRANT_HANDLE) { 605 gnttab_set_unmap_op(&info->unmap[j], 606 info->phys_addrs[i], 607 GNTMAP_host_map, handles[i]); 608 j++; 609 } 610 } 611 612 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)); 613 614 *leaked = false; 615 for (i = 0; i < j; i++) { 616 if (info->unmap[i].status != GNTST_okay) { 617 *leaked = true; 618 break; 619 } 620 } 621 622 return -ENOENT; 623 } 624 625 /** 626 * xenbus_unmap_ring 627 * @dev: xenbus device 628 * @handles: grant handle array 629 * @nr_handles: number of handles in the array 630 * @vaddrs: addresses to unmap 631 * 632 * Unmap memory in this domain that was imported from another domain. 633 * Returns 0 on success and returns GNTST_* on error 634 * (see xen/include/interface/grant_table.h). 635 */ 636 static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles, 637 unsigned int nr_handles, unsigned long *vaddrs) 638 { 639 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 640 int i; 641 int err; 642 643 if (nr_handles > XENBUS_MAX_RING_GRANTS) 644 return -EINVAL; 645 646 for (i = 0; i < nr_handles; i++) 647 gnttab_set_unmap_op(&unmap[i], vaddrs[i], 648 GNTMAP_host_map, handles[i]); 649 650 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)); 651 652 err = GNTST_okay; 653 for (i = 0; i < nr_handles; i++) { 654 if (unmap[i].status != GNTST_okay) { 655 xenbus_dev_error(dev, unmap[i].status, 656 "unmapping page at handle %d error %d", 657 handles[i], unmap[i].status); 658 err = unmap[i].status; 659 break; 660 } 661 } 662 663 return err; 664 } 665 666 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, 667 unsigned int goffset, 668 unsigned int len, 669 void *data) 670 { 671 struct map_ring_valloc *info = data; 672 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); 673 674 info->phys_addrs[info->idx] = vaddr; 675 info->addrs[info->idx] = vaddr; 676 677 info->idx++; 678 } 679 680 static int xenbus_map_ring_hvm(struct xenbus_device *dev, 681 struct map_ring_valloc *info, 682 grant_ref_t *gnt_ref, 683 unsigned int nr_grefs, 684 void **vaddr) 685 { 686 struct xenbus_map_node *node = info->node; 687 int err; 688 void *addr; 689 bool leaked = false; 690 unsigned int nr_pages = XENBUS_PAGES(nr_grefs); 691 692 err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages); 693 if (err) 694 goto out_err; 695 696 gnttab_foreach_grant(node->hvm.pages, nr_grefs, 697 xenbus_map_ring_setup_grant_hvm, 698 info); 699 700 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, 701 info, GNTMAP_host_map, &leaked); 702 node->nr_handles = nr_grefs; 703 704 if (err) 705 goto out_free_ballooned_pages; 706 707 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, 708 PAGE_KERNEL); 709 if (!addr) { 710 err = -ENOMEM; 711 goto out_xenbus_unmap_ring; 712 } 713 714 node->hvm.addr = addr; 715 716 spin_lock(&xenbus_valloc_lock); 717 list_add(&node->next, &xenbus_valloc_pages); 718 spin_unlock(&xenbus_valloc_lock); 719 720 *vaddr = addr; 721 info->node = NULL; 722 723 return 0; 724 725 out_xenbus_unmap_ring: 726 if (!leaked) 727 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); 728 else 729 pr_alert("leaking %p size %u page(s)", 730 addr, nr_pages); 731 out_free_ballooned_pages: 732 if (!leaked) 733 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); 734 out_err: 735 return err; 736 } 737 738 /** 739 * xenbus_unmap_ring_vfree 740 * @dev: xenbus device 741 * @vaddr: addr to unmap 742 * 743 * Based on Rusty Russell's skeleton driver's unmap_page. 744 * Unmap a page of memory in this domain that was imported from another domain. 745 * Use xenbus_unmap_ring_vfree if you mapped in your memory with 746 * xenbus_map_ring_valloc (it will free the virtual address space). 747 * Returns 0 on success and returns GNTST_* on error 748 * (see xen/include/interface/grant_table.h). 749 */ 750 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) 751 { 752 return ring_ops->unmap(dev, vaddr); 753 } 754 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); 755 756 #ifdef CONFIG_XEN_PV 757 static int map_ring_apply(pte_t *pte, unsigned long addr, void *data) 758 { 759 struct map_ring_valloc *info = data; 760 761 info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr; 762 return 0; 763 } 764 765 static int xenbus_map_ring_pv(struct xenbus_device *dev, 766 struct map_ring_valloc *info, 767 grant_ref_t *gnt_refs, 768 unsigned int nr_grefs, 769 void **vaddr) 770 { 771 struct xenbus_map_node *node = info->node; 772 struct vm_struct *area; 773 bool leaked = false; 774 int err = -ENOMEM; 775 776 area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP); 777 if (!area) 778 return -ENOMEM; 779 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 780 XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info)) 781 goto failed; 782 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, 783 info, GNTMAP_host_map | GNTMAP_contains_pte, 784 &leaked); 785 if (err) 786 goto failed; 787 788 node->nr_handles = nr_grefs; 789 node->pv.area = area; 790 791 spin_lock(&xenbus_valloc_lock); 792 list_add(&node->next, &xenbus_valloc_pages); 793 spin_unlock(&xenbus_valloc_lock); 794 795 *vaddr = area->addr; 796 info->node = NULL; 797 798 return 0; 799 800 failed: 801 if (!leaked) 802 free_vm_area(area); 803 else 804 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); 805 806 return err; 807 } 808 809 static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr) 810 { 811 struct xenbus_map_node *node; 812 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 813 unsigned int level; 814 int i; 815 bool leaked = false; 816 int err; 817 818 spin_lock(&xenbus_valloc_lock); 819 list_for_each_entry(node, &xenbus_valloc_pages, next) { 820 if (node->pv.area->addr == vaddr) { 821 list_del(&node->next); 822 goto found; 823 } 824 } 825 node = NULL; 826 found: 827 spin_unlock(&xenbus_valloc_lock); 828 829 if (!node) { 830 xenbus_dev_error(dev, -ENOENT, 831 "can't find mapped virtual address %p", vaddr); 832 return GNTST_bad_virt_addr; 833 } 834 835 for (i = 0; i < node->nr_handles; i++) { 836 unsigned long addr; 837 838 memset(&unmap[i], 0, sizeof(unmap[i])); 839 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i); 840 unmap[i].host_addr = arbitrary_virt_to_machine( 841 lookup_address(addr, &level)).maddr; 842 unmap[i].dev_bus_addr = 0; 843 unmap[i].handle = node->handles[i]; 844 } 845 846 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)); 847 848 err = GNTST_okay; 849 leaked = false; 850 for (i = 0; i < node->nr_handles; i++) { 851 if (unmap[i].status != GNTST_okay) { 852 leaked = true; 853 xenbus_dev_error(dev, unmap[i].status, 854 "unmapping page at handle %d error %d", 855 node->handles[i], unmap[i].status); 856 err = unmap[i].status; 857 break; 858 } 859 } 860 861 if (!leaked) 862 free_vm_area(node->pv.area); 863 else 864 pr_alert("leaking VM area %p size %u page(s)", 865 node->pv.area, node->nr_handles); 866 867 kfree(node); 868 return err; 869 } 870 871 static const struct xenbus_ring_ops ring_ops_pv = { 872 .map = xenbus_map_ring_pv, 873 .unmap = xenbus_unmap_ring_pv, 874 }; 875 #endif 876 877 struct unmap_ring_hvm 878 { 879 unsigned int idx; 880 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 881 }; 882 883 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, 884 unsigned int goffset, 885 unsigned int len, 886 void *data) 887 { 888 struct unmap_ring_hvm *info = data; 889 890 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); 891 892 info->idx++; 893 } 894 895 static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) 896 { 897 int rv; 898 struct xenbus_map_node *node; 899 void *addr; 900 struct unmap_ring_hvm info = { 901 .idx = 0, 902 }; 903 unsigned int nr_pages; 904 905 spin_lock(&xenbus_valloc_lock); 906 list_for_each_entry(node, &xenbus_valloc_pages, next) { 907 addr = node->hvm.addr; 908 if (addr == vaddr) { 909 list_del(&node->next); 910 goto found; 911 } 912 } 913 node = addr = NULL; 914 found: 915 spin_unlock(&xenbus_valloc_lock); 916 917 if (!node) { 918 xenbus_dev_error(dev, -ENOENT, 919 "can't find mapped virtual address %p", vaddr); 920 return GNTST_bad_virt_addr; 921 } 922 923 nr_pages = XENBUS_PAGES(node->nr_handles); 924 925 gnttab_foreach_grant(node->hvm.pages, node->nr_handles, 926 xenbus_unmap_ring_setup_grant_hvm, 927 &info); 928 929 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 930 info.addrs); 931 if (!rv) { 932 vunmap(vaddr); 933 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); 934 } 935 else 936 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); 937 938 kfree(node); 939 return rv; 940 } 941 942 /** 943 * xenbus_read_driver_state 944 * @path: path for driver 945 * 946 * Return the state of the driver rooted at the given store path, or 947 * XenbusStateUnknown if no state can be read. 948 */ 949 enum xenbus_state xenbus_read_driver_state(const char *path) 950 { 951 enum xenbus_state result; 952 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); 953 if (err) 954 result = XenbusStateUnknown; 955 956 return result; 957 } 958 EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 959 960 static const struct xenbus_ring_ops ring_ops_hvm = { 961 .map = xenbus_map_ring_hvm, 962 .unmap = xenbus_unmap_ring_hvm, 963 }; 964 965 void __init xenbus_ring_ops_init(void) 966 { 967 #ifdef CONFIG_XEN_PV 968 if (!xen_feature(XENFEAT_auto_translated_physmap)) 969 ring_ops = &ring_ops_pv; 970 else 971 #endif 972 ring_ops = &ring_ops_hvm; 973 } 974