1 /****************************************************************************** 2 * Client-facing interface for the Xenbus driver. In other words, the 3 * interface between the Xenbus and the device-specific code, be it the 4 * frontend or the backend of that driver. 5 * 6 * Copyright (C) 2005 XenSource Ltd 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 */ 32 33 #include <linux/mm.h> 34 #include <linux/slab.h> 35 #include <linux/types.h> 36 #include <linux/spinlock.h> 37 #include <linux/vmalloc.h> 38 #include <linux/export.h> 39 #include <asm/xen/hypervisor.h> 40 #include <xen/page.h> 41 #include <xen/interface/xen.h> 42 #include <xen/interface/event_channel.h> 43 #include <xen/balloon.h> 44 #include <xen/events.h> 45 #include <xen/grant_table.h> 46 #include <xen/xenbus.h> 47 #include <xen/xen.h> 48 #include <xen/features.h> 49 50 #include "xenbus.h" 51 52 #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE)) 53 54 #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS)) 55 56 struct xenbus_map_node { 57 struct list_head next; 58 union { 59 struct { 60 struct vm_struct *area; 61 } pv; 62 struct { 63 struct page *pages[XENBUS_MAX_RING_PAGES]; 64 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 65 void *addr; 66 } hvm; 67 }; 68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS]; 69 unsigned int nr_handles; 70 }; 71 72 static DEFINE_SPINLOCK(xenbus_valloc_lock); 73 static LIST_HEAD(xenbus_valloc_pages); 74 75 struct xenbus_ring_ops { 76 int (*map)(struct xenbus_device *dev, 77 grant_ref_t *gnt_refs, unsigned int nr_grefs, 78 void **vaddr); 79 int (*unmap)(struct xenbus_device *dev, void *vaddr); 80 }; 81 82 static const struct xenbus_ring_ops *ring_ops __read_mostly; 83 84 const char *xenbus_strstate(enum xenbus_state state) 85 { 86 static const char *const name[] = { 87 [ XenbusStateUnknown ] = "Unknown", 88 [ XenbusStateInitialising ] = "Initialising", 89 [ XenbusStateInitWait ] = "InitWait", 90 [ XenbusStateInitialised ] = "Initialised", 91 [ XenbusStateConnected ] = "Connected", 92 [ XenbusStateClosing ] = "Closing", 93 [ XenbusStateClosed ] = "Closed", 94 [XenbusStateReconfiguring] = "Reconfiguring", 95 [XenbusStateReconfigured] = "Reconfigured", 96 }; 97 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; 98 } 99 EXPORT_SYMBOL_GPL(xenbus_strstate); 100 101 /** 102 * xenbus_watch_path - register a watch 103 * @dev: xenbus device 104 * @path: path to watch 105 * @watch: watch to register 106 * @callback: callback to register 107 * 108 * Register a @watch on the given path, using the given xenbus_watch structure 109 * for storage, and the given @callback function as the callback. Return 0 on 110 * success, or -errno on error. On success, the given @path will be saved as 111 * @watch->node, and remains the caller's to free. On error, @watch->node will 112 * be NULL, the device will switch to %XenbusStateClosing, and the error will 113 * be saved in the store. 114 */ 115 int xenbus_watch_path(struct xenbus_device *dev, const char *path, 116 struct xenbus_watch *watch, 117 void (*callback)(struct xenbus_watch *, 118 const char *, const char *)) 119 { 120 int err; 121 122 watch->node = path; 123 watch->callback = callback; 124 125 err = register_xenbus_watch(watch); 126 127 if (err) { 128 watch->node = NULL; 129 watch->callback = NULL; 130 xenbus_dev_fatal(dev, err, "adding watch on %s", path); 131 } 132 133 return err; 134 } 135 EXPORT_SYMBOL_GPL(xenbus_watch_path); 136 137 138 /** 139 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path 140 * @dev: xenbus device 141 * @watch: watch to register 142 * @callback: callback to register 143 * @pathfmt: format of path to watch 144 * 145 * Register a watch on the given @path, using the given xenbus_watch 146 * structure for storage, and the given @callback function as the callback. 147 * Return 0 on success, or -errno on error. On success, the watched path 148 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to 149 * kfree(). On error, watch->node will be NULL, so the caller has nothing to 150 * free, the device will switch to %XenbusStateClosing, and the error will be 151 * saved in the store. 152 */ 153 int xenbus_watch_pathfmt(struct xenbus_device *dev, 154 struct xenbus_watch *watch, 155 void (*callback)(struct xenbus_watch *, 156 const char *, const char *), 157 const char *pathfmt, ...) 158 { 159 int err; 160 va_list ap; 161 char *path; 162 163 va_start(ap, pathfmt); 164 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); 165 va_end(ap); 166 167 if (!path) { 168 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); 169 return -ENOMEM; 170 } 171 err = xenbus_watch_path(dev, path, watch, callback); 172 173 if (err) 174 kfree(path); 175 return err; 176 } 177 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); 178 179 static void xenbus_switch_fatal(struct xenbus_device *, int, int, 180 const char *, ...); 181 182 static int 183 __xenbus_switch_state(struct xenbus_device *dev, 184 enum xenbus_state state, int depth) 185 { 186 /* We check whether the state is currently set to the given value, and 187 if not, then the state is set. We don't want to unconditionally 188 write the given state, because we don't want to fire watches 189 unnecessarily. Furthermore, if the node has gone, we don't write 190 to it, as the device will be tearing down, and we don't want to 191 resurrect that directory. 192 193 Note that, because of this cached value of our state, this 194 function will not take a caller's Xenstore transaction 195 (something it was trying to in the past) because dev->state 196 would not get reset if the transaction was aborted. 197 */ 198 199 struct xenbus_transaction xbt; 200 int current_state; 201 int err, abort; 202 203 if (state == dev->state) 204 return 0; 205 206 again: 207 abort = 1; 208 209 err = xenbus_transaction_start(&xbt); 210 if (err) { 211 xenbus_switch_fatal(dev, depth, err, "starting transaction"); 212 return 0; 213 } 214 215 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); 216 if (err != 1) 217 goto abort; 218 219 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); 220 if (err) { 221 xenbus_switch_fatal(dev, depth, err, "writing new state"); 222 goto abort; 223 } 224 225 abort = 0; 226 abort: 227 err = xenbus_transaction_end(xbt, abort); 228 if (err) { 229 if (err == -EAGAIN && !abort) 230 goto again; 231 xenbus_switch_fatal(dev, depth, err, "ending transaction"); 232 } else 233 dev->state = state; 234 235 return 0; 236 } 237 238 /** 239 * xenbus_switch_state 240 * @dev: xenbus device 241 * @state: new state 242 * 243 * Advertise in the store a change of the given driver to the given new_state. 244 * Return 0 on success, or -errno on error. On error, the device will switch 245 * to XenbusStateClosing, and the error will be saved in the store. 246 */ 247 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) 248 { 249 return __xenbus_switch_state(dev, state, 0); 250 } 251 252 EXPORT_SYMBOL_GPL(xenbus_switch_state); 253 254 int xenbus_frontend_closed(struct xenbus_device *dev) 255 { 256 xenbus_switch_state(dev, XenbusStateClosed); 257 complete(&dev->down); 258 return 0; 259 } 260 EXPORT_SYMBOL_GPL(xenbus_frontend_closed); 261 262 static void xenbus_va_dev_error(struct xenbus_device *dev, int err, 263 const char *fmt, va_list ap) 264 { 265 unsigned int len; 266 char *printf_buffer; 267 char *path_buffer; 268 269 #define PRINTF_BUFFER_SIZE 4096 270 271 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); 272 if (!printf_buffer) 273 return; 274 275 len = sprintf(printf_buffer, "%i ", -err); 276 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap); 277 278 dev_err(&dev->dev, "%s\n", printf_buffer); 279 280 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename); 281 if (!path_buffer || 282 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer)) 283 dev_err(&dev->dev, "failed to write error node for %s (%s)\n", 284 dev->nodename, printf_buffer); 285 286 kfree(printf_buffer); 287 kfree(path_buffer); 288 } 289 290 /** 291 * xenbus_dev_error 292 * @dev: xenbus device 293 * @err: error to report 294 * @fmt: error message format 295 * 296 * Report the given negative errno into the store, along with the given 297 * formatted message. 298 */ 299 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) 300 { 301 va_list ap; 302 303 va_start(ap, fmt); 304 xenbus_va_dev_error(dev, err, fmt, ap); 305 va_end(ap); 306 } 307 EXPORT_SYMBOL_GPL(xenbus_dev_error); 308 309 /** 310 * xenbus_dev_fatal 311 * @dev: xenbus device 312 * @err: error to report 313 * @fmt: error message format 314 * 315 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by 316 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly 317 * closedown of this driver and its peer. 318 */ 319 320 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) 321 { 322 va_list ap; 323 324 va_start(ap, fmt); 325 xenbus_va_dev_error(dev, err, fmt, ap); 326 va_end(ap); 327 328 xenbus_switch_state(dev, XenbusStateClosing); 329 } 330 EXPORT_SYMBOL_GPL(xenbus_dev_fatal); 331 332 /** 333 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps 334 * avoiding recursion within xenbus_switch_state. 335 */ 336 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, 337 const char *fmt, ...) 338 { 339 va_list ap; 340 341 va_start(ap, fmt); 342 xenbus_va_dev_error(dev, err, fmt, ap); 343 va_end(ap); 344 345 if (!depth) 346 __xenbus_switch_state(dev, XenbusStateClosing, 1); 347 } 348 349 /** 350 * xenbus_grant_ring 351 * @dev: xenbus device 352 * @vaddr: starting virtual address of the ring 353 * @nr_pages: number of pages to be granted 354 * @grefs: grant reference array to be filled in 355 * 356 * Grant access to the given @vaddr to the peer of the given device. 357 * Then fill in @grefs with grant references. Return 0 on success, or 358 * -errno on error. On error, the device will switch to 359 * XenbusStateClosing, and the error will be saved in the store. 360 */ 361 int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, 362 unsigned int nr_pages, grant_ref_t *grefs) 363 { 364 int err; 365 int i, j; 366 367 for (i = 0; i < nr_pages; i++) { 368 err = gnttab_grant_foreign_access(dev->otherend_id, 369 virt_to_gfn(vaddr), 0); 370 if (err < 0) { 371 xenbus_dev_fatal(dev, err, 372 "granting access to ring page"); 373 goto fail; 374 } 375 grefs[i] = err; 376 377 vaddr = vaddr + XEN_PAGE_SIZE; 378 } 379 380 return 0; 381 382 fail: 383 for (j = 0; j < i; j++) 384 gnttab_end_foreign_access_ref(grefs[j], 0); 385 return err; 386 } 387 EXPORT_SYMBOL_GPL(xenbus_grant_ring); 388 389 390 /** 391 * Allocate an event channel for the given xenbus_device, assigning the newly 392 * created local port to *port. Return 0 on success, or -errno on error. On 393 * error, the device will switch to XenbusStateClosing, and the error will be 394 * saved in the store. 395 */ 396 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) 397 { 398 struct evtchn_alloc_unbound alloc_unbound; 399 int err; 400 401 alloc_unbound.dom = DOMID_SELF; 402 alloc_unbound.remote_dom = dev->otherend_id; 403 404 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 405 &alloc_unbound); 406 if (err) 407 xenbus_dev_fatal(dev, err, "allocating event channel"); 408 else 409 *port = alloc_unbound.port; 410 411 return err; 412 } 413 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); 414 415 416 /** 417 * Free an existing event channel. Returns 0 on success or -errno on error. 418 */ 419 int xenbus_free_evtchn(struct xenbus_device *dev, int port) 420 { 421 struct evtchn_close close; 422 int err; 423 424 close.port = port; 425 426 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); 427 if (err) 428 xenbus_dev_error(dev, err, "freeing event channel %d", port); 429 430 return err; 431 } 432 EXPORT_SYMBOL_GPL(xenbus_free_evtchn); 433 434 435 /** 436 * xenbus_map_ring_valloc 437 * @dev: xenbus device 438 * @gnt_refs: grant reference array 439 * @nr_grefs: number of grant references 440 * @vaddr: pointer to address to be filled out by mapping 441 * 442 * Map @nr_grefs pages of memory into this domain from another 443 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs 444 * pages of virtual address space, maps the pages to that address, and 445 * sets *vaddr to that address. Returns 0 on success, and GNTST_* 446 * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on 447 * error. If an error is returned, device will switch to 448 * XenbusStateClosing and the error message will be saved in XenStore. 449 */ 450 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, 451 unsigned int nr_grefs, void **vaddr) 452 { 453 return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); 454 } 455 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); 456 457 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned 458 * long), e.g. 32-on-64. Caller is responsible for preparing the 459 * right array to feed into this function */ 460 static int __xenbus_map_ring(struct xenbus_device *dev, 461 grant_ref_t *gnt_refs, 462 unsigned int nr_grefs, 463 grant_handle_t *handles, 464 phys_addr_t *addrs, 465 unsigned int flags, 466 bool *leaked) 467 { 468 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; 469 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 470 int i, j; 471 int err = GNTST_okay; 472 473 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 474 return -EINVAL; 475 476 for (i = 0; i < nr_grefs; i++) { 477 memset(&map[i], 0, sizeof(map[i])); 478 gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], 479 dev->otherend_id); 480 handles[i] = INVALID_GRANT_HANDLE; 481 } 482 483 gnttab_batch_map(map, i); 484 485 for (i = 0; i < nr_grefs; i++) { 486 if (map[i].status != GNTST_okay) { 487 err = map[i].status; 488 xenbus_dev_fatal(dev, map[i].status, 489 "mapping in shared page %d from domain %d", 490 gnt_refs[i], dev->otherend_id); 491 goto fail; 492 } else 493 handles[i] = map[i].handle; 494 } 495 496 return GNTST_okay; 497 498 fail: 499 for (i = j = 0; i < nr_grefs; i++) { 500 if (handles[i] != INVALID_GRANT_HANDLE) { 501 memset(&unmap[j], 0, sizeof(unmap[j])); 502 gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], 503 GNTMAP_host_map, handles[i]); 504 j++; 505 } 506 } 507 508 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) 509 BUG(); 510 511 *leaked = false; 512 for (i = 0; i < j; i++) { 513 if (unmap[i].status != GNTST_okay) { 514 *leaked = true; 515 break; 516 } 517 } 518 519 return err; 520 } 521 522 struct map_ring_valloc_hvm 523 { 524 unsigned int idx; 525 526 /* Why do we need two arrays? See comment of __xenbus_map_ring */ 527 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 528 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 529 }; 530 531 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, 532 unsigned int goffset, 533 unsigned int len, 534 void *data) 535 { 536 struct map_ring_valloc_hvm *info = data; 537 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); 538 539 info->phys_addrs[info->idx] = vaddr; 540 info->addrs[info->idx] = vaddr; 541 542 info->idx++; 543 } 544 545 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, 546 grant_ref_t *gnt_ref, 547 unsigned int nr_grefs, 548 void **vaddr) 549 { 550 struct xenbus_map_node *node; 551 int err; 552 void *addr; 553 bool leaked = false; 554 struct map_ring_valloc_hvm info = { 555 .idx = 0, 556 }; 557 unsigned int nr_pages = XENBUS_PAGES(nr_grefs); 558 559 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 560 return -EINVAL; 561 562 *vaddr = NULL; 563 564 node = kzalloc(sizeof(*node), GFP_KERNEL); 565 if (!node) 566 return -ENOMEM; 567 568 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); 569 if (err) 570 goto out_err; 571 572 gnttab_foreach_grant(node->hvm.pages, nr_grefs, 573 xenbus_map_ring_setup_grant_hvm, 574 &info); 575 576 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, 577 info.phys_addrs, GNTMAP_host_map, &leaked); 578 node->nr_handles = nr_grefs; 579 580 if (err) 581 goto out_free_ballooned_pages; 582 583 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, 584 PAGE_KERNEL); 585 if (!addr) { 586 err = -ENOMEM; 587 goto out_xenbus_unmap_ring; 588 } 589 590 node->hvm.addr = addr; 591 592 spin_lock(&xenbus_valloc_lock); 593 list_add(&node->next, &xenbus_valloc_pages); 594 spin_unlock(&xenbus_valloc_lock); 595 596 *vaddr = addr; 597 return 0; 598 599 out_xenbus_unmap_ring: 600 if (!leaked) 601 xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs); 602 else 603 pr_alert("leaking %p size %u page(s)", 604 addr, nr_pages); 605 out_free_ballooned_pages: 606 if (!leaked) 607 free_xenballooned_pages(nr_pages, node->hvm.pages); 608 out_err: 609 kfree(node); 610 return err; 611 } 612 613 614 /** 615 * xenbus_map_ring 616 * @dev: xenbus device 617 * @gnt_refs: grant reference array 618 * @nr_grefs: number of grant reference 619 * @handles: pointer to grant handle to be filled 620 * @vaddrs: addresses to be mapped to 621 * @leaked: fail to clean up a failed map, caller should not free vaddr 622 * 623 * Map pages of memory into this domain from another domain's grant table. 624 * xenbus_map_ring does not allocate the virtual address space (you must do 625 * this yourself!). It only maps in the pages to the specified address. 626 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) 627 * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to 628 * XenbusStateClosing and the first error message will be saved in XenStore. 629 * Further more if we fail to map the ring, caller should check @leaked. 630 * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller 631 * should not free the address space of @vaddr. 632 */ 633 int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, 634 unsigned int nr_grefs, grant_handle_t *handles, 635 unsigned long *vaddrs, bool *leaked) 636 { 637 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 638 int i; 639 640 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 641 return -EINVAL; 642 643 for (i = 0; i < nr_grefs; i++) 644 phys_addrs[i] = (unsigned long)vaddrs[i]; 645 646 return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles, 647 phys_addrs, GNTMAP_host_map, leaked); 648 } 649 EXPORT_SYMBOL_GPL(xenbus_map_ring); 650 651 652 /** 653 * xenbus_unmap_ring_vfree 654 * @dev: xenbus device 655 * @vaddr: addr to unmap 656 * 657 * Based on Rusty Russell's skeleton driver's unmap_page. 658 * Unmap a page of memory in this domain that was imported from another domain. 659 * Use xenbus_unmap_ring_vfree if you mapped in your memory with 660 * xenbus_map_ring_valloc (it will free the virtual address space). 661 * Returns 0 on success and returns GNTST_* on error 662 * (see xen/include/interface/grant_table.h). 663 */ 664 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) 665 { 666 return ring_ops->unmap(dev, vaddr); 667 } 668 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); 669 670 #ifdef CONFIG_XEN_PV 671 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, 672 grant_ref_t *gnt_refs, 673 unsigned int nr_grefs, 674 void **vaddr) 675 { 676 struct xenbus_map_node *node; 677 struct vm_struct *area; 678 pte_t *ptes[XENBUS_MAX_RING_GRANTS]; 679 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 680 int err = GNTST_okay; 681 int i; 682 bool leaked; 683 684 *vaddr = NULL; 685 686 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 687 return -EINVAL; 688 689 node = kzalloc(sizeof(*node), GFP_KERNEL); 690 if (!node) 691 return -ENOMEM; 692 693 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); 694 if (!area) { 695 kfree(node); 696 return -ENOMEM; 697 } 698 699 for (i = 0; i < nr_grefs; i++) 700 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; 701 702 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, 703 phys_addrs, 704 GNTMAP_host_map | GNTMAP_contains_pte, 705 &leaked); 706 if (err) 707 goto failed; 708 709 node->nr_handles = nr_grefs; 710 node->pv.area = area; 711 712 spin_lock(&xenbus_valloc_lock); 713 list_add(&node->next, &xenbus_valloc_pages); 714 spin_unlock(&xenbus_valloc_lock); 715 716 *vaddr = area->addr; 717 return 0; 718 719 failed: 720 if (!leaked) 721 free_vm_area(area); 722 else 723 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); 724 725 kfree(node); 726 return err; 727 } 728 729 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) 730 { 731 struct xenbus_map_node *node; 732 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 733 unsigned int level; 734 int i; 735 bool leaked = false; 736 int err; 737 738 spin_lock(&xenbus_valloc_lock); 739 list_for_each_entry(node, &xenbus_valloc_pages, next) { 740 if (node->pv.area->addr == vaddr) { 741 list_del(&node->next); 742 goto found; 743 } 744 } 745 node = NULL; 746 found: 747 spin_unlock(&xenbus_valloc_lock); 748 749 if (!node) { 750 xenbus_dev_error(dev, -ENOENT, 751 "can't find mapped virtual address %p", vaddr); 752 return GNTST_bad_virt_addr; 753 } 754 755 for (i = 0; i < node->nr_handles; i++) { 756 unsigned long addr; 757 758 memset(&unmap[i], 0, sizeof(unmap[i])); 759 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i); 760 unmap[i].host_addr = arbitrary_virt_to_machine( 761 lookup_address(addr, &level)).maddr; 762 unmap[i].dev_bus_addr = 0; 763 unmap[i].handle = node->handles[i]; 764 } 765 766 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) 767 BUG(); 768 769 err = GNTST_okay; 770 leaked = false; 771 for (i = 0; i < node->nr_handles; i++) { 772 if (unmap[i].status != GNTST_okay) { 773 leaked = true; 774 xenbus_dev_error(dev, unmap[i].status, 775 "unmapping page at handle %d error %d", 776 node->handles[i], unmap[i].status); 777 err = unmap[i].status; 778 break; 779 } 780 } 781 782 if (!leaked) 783 free_vm_area(node->pv.area); 784 else 785 pr_alert("leaking VM area %p size %u page(s)", 786 node->pv.area, node->nr_handles); 787 788 kfree(node); 789 return err; 790 } 791 792 static const struct xenbus_ring_ops ring_ops_pv = { 793 .map = xenbus_map_ring_valloc_pv, 794 .unmap = xenbus_unmap_ring_vfree_pv, 795 }; 796 #endif 797 798 struct unmap_ring_vfree_hvm 799 { 800 unsigned int idx; 801 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 802 }; 803 804 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, 805 unsigned int goffset, 806 unsigned int len, 807 void *data) 808 { 809 struct unmap_ring_vfree_hvm *info = data; 810 811 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); 812 813 info->idx++; 814 } 815 816 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) 817 { 818 int rv; 819 struct xenbus_map_node *node; 820 void *addr; 821 struct unmap_ring_vfree_hvm info = { 822 .idx = 0, 823 }; 824 unsigned int nr_pages; 825 826 spin_lock(&xenbus_valloc_lock); 827 list_for_each_entry(node, &xenbus_valloc_pages, next) { 828 addr = node->hvm.addr; 829 if (addr == vaddr) { 830 list_del(&node->next); 831 goto found; 832 } 833 } 834 node = addr = NULL; 835 found: 836 spin_unlock(&xenbus_valloc_lock); 837 838 if (!node) { 839 xenbus_dev_error(dev, -ENOENT, 840 "can't find mapped virtual address %p", vaddr); 841 return GNTST_bad_virt_addr; 842 } 843 844 nr_pages = XENBUS_PAGES(node->nr_handles); 845 846 gnttab_foreach_grant(node->hvm.pages, node->nr_handles, 847 xenbus_unmap_ring_setup_grant_hvm, 848 &info); 849 850 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 851 info.addrs); 852 if (!rv) { 853 vunmap(vaddr); 854 free_xenballooned_pages(nr_pages, node->hvm.pages); 855 } 856 else 857 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); 858 859 kfree(node); 860 return rv; 861 } 862 863 /** 864 * xenbus_unmap_ring 865 * @dev: xenbus device 866 * @handles: grant handle array 867 * @nr_handles: number of handles in the array 868 * @vaddrs: addresses to unmap 869 * 870 * Unmap memory in this domain that was imported from another domain. 871 * Returns 0 on success and returns GNTST_* on error 872 * (see xen/include/interface/grant_table.h). 873 */ 874 int xenbus_unmap_ring(struct xenbus_device *dev, 875 grant_handle_t *handles, unsigned int nr_handles, 876 unsigned long *vaddrs) 877 { 878 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 879 int i; 880 int err; 881 882 if (nr_handles > XENBUS_MAX_RING_GRANTS) 883 return -EINVAL; 884 885 for (i = 0; i < nr_handles; i++) 886 gnttab_set_unmap_op(&unmap[i], vaddrs[i], 887 GNTMAP_host_map, handles[i]); 888 889 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) 890 BUG(); 891 892 err = GNTST_okay; 893 for (i = 0; i < nr_handles; i++) { 894 if (unmap[i].status != GNTST_okay) { 895 xenbus_dev_error(dev, unmap[i].status, 896 "unmapping page at handle %d error %d", 897 handles[i], unmap[i].status); 898 err = unmap[i].status; 899 break; 900 } 901 } 902 903 return err; 904 } 905 EXPORT_SYMBOL_GPL(xenbus_unmap_ring); 906 907 908 /** 909 * xenbus_read_driver_state 910 * @path: path for driver 911 * 912 * Return the state of the driver rooted at the given store path, or 913 * XenbusStateUnknown if no state can be read. 914 */ 915 enum xenbus_state xenbus_read_driver_state(const char *path) 916 { 917 enum xenbus_state result; 918 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); 919 if (err) 920 result = XenbusStateUnknown; 921 922 return result; 923 } 924 EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 925 926 static const struct xenbus_ring_ops ring_ops_hvm = { 927 .map = xenbus_map_ring_valloc_hvm, 928 .unmap = xenbus_unmap_ring_vfree_hvm, 929 }; 930 931 void __init xenbus_ring_ops_init(void) 932 { 933 #ifdef CONFIG_XEN_PV 934 if (!xen_feature(XENFEAT_auto_translated_physmap)) 935 ring_ops = &ring_ops_pv; 936 else 937 #endif 938 ring_ops = &ring_ops_hvm; 939 } 940