1 /****************************************************************************** 2 * Client-facing interface for the Xenbus driver. In other words, the 3 * interface between the Xenbus and the device-specific code, be it the 4 * frontend or the backend of that driver. 5 * 6 * Copyright (C) 2005 XenSource Ltd 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 */ 32 33 #include <linux/slab.h> 34 #include <linux/types.h> 35 #include <linux/vmalloc.h> 36 #include <linux/export.h> 37 #include <asm/xen/hypervisor.h> 38 #include <asm/xen/page.h> 39 #include <xen/interface/xen.h> 40 #include <xen/interface/event_channel.h> 41 #include <xen/events.h> 42 #include <xen/grant_table.h> 43 #include <xen/xenbus.h> 44 45 const char *xenbus_strstate(enum xenbus_state state) 46 { 47 static const char *const name[] = { 48 [ XenbusStateUnknown ] = "Unknown", 49 [ XenbusStateInitialising ] = "Initialising", 50 [ XenbusStateInitWait ] = "InitWait", 51 [ XenbusStateInitialised ] = "Initialised", 52 [ XenbusStateConnected ] = "Connected", 53 [ XenbusStateClosing ] = "Closing", 54 [ XenbusStateClosed ] = "Closed", 55 [XenbusStateReconfiguring] = "Reconfiguring", 56 [XenbusStateReconfigured] = "Reconfigured", 57 }; 58 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; 59 } 60 EXPORT_SYMBOL_GPL(xenbus_strstate); 61 62 /** 63 * xenbus_watch_path - register a watch 64 * @dev: xenbus device 65 * @path: path to watch 66 * @watch: watch to register 67 * @callback: callback to register 68 * 69 * Register a @watch on the given path, using the given xenbus_watch structure 70 * for storage, and the given @callback function as the callback. Return 0 on 71 * success, or -errno on error. On success, the given @path will be saved as 72 * @watch->node, and remains the caller's to free. On error, @watch->node will 73 * be NULL, the device will switch to %XenbusStateClosing, and the error will 74 * be saved in the store. 75 */ 76 int xenbus_watch_path(struct xenbus_device *dev, const char *path, 77 struct xenbus_watch *watch, 78 void (*callback)(struct xenbus_watch *, 79 const char **, unsigned int)) 80 { 81 int err; 82 83 watch->node = path; 84 watch->callback = callback; 85 86 err = register_xenbus_watch(watch); 87 88 if (err) { 89 watch->node = NULL; 90 watch->callback = NULL; 91 xenbus_dev_fatal(dev, err, "adding watch on %s", path); 92 } 93 94 return err; 95 } 96 EXPORT_SYMBOL_GPL(xenbus_watch_path); 97 98 99 /** 100 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path 101 * @dev: xenbus device 102 * @watch: watch to register 103 * @callback: callback to register 104 * @pathfmt: format of path to watch 105 * 106 * Register a watch on the given @path, using the given xenbus_watch 107 * structure for storage, and the given @callback function as the callback. 108 * Return 0 on success, or -errno on error. On success, the watched path 109 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to 110 * kfree(). On error, watch->node will be NULL, so the caller has nothing to 111 * free, the device will switch to %XenbusStateClosing, and the error will be 112 * saved in the store. 113 */ 114 int xenbus_watch_pathfmt(struct xenbus_device *dev, 115 struct xenbus_watch *watch, 116 void (*callback)(struct xenbus_watch *, 117 const char **, unsigned int), 118 const char *pathfmt, ...) 119 { 120 int err; 121 va_list ap; 122 char *path; 123 124 va_start(ap, pathfmt); 125 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); 126 va_end(ap); 127 128 if (!path) { 129 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); 130 return -ENOMEM; 131 } 132 err = xenbus_watch_path(dev, path, watch, callback); 133 134 if (err) 135 kfree(path); 136 return err; 137 } 138 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); 139 140 static void xenbus_switch_fatal(struct xenbus_device *, int, int, 141 const char *, ...); 142 143 static int 144 __xenbus_switch_state(struct xenbus_device *dev, 145 enum xenbus_state state, int depth) 146 { 147 /* We check whether the state is currently set to the given value, and 148 if not, then the state is set. We don't want to unconditionally 149 write the given state, because we don't want to fire watches 150 unnecessarily. Furthermore, if the node has gone, we don't write 151 to it, as the device will be tearing down, and we don't want to 152 resurrect that directory. 153 154 Note that, because of this cached value of our state, this 155 function will not take a caller's Xenstore transaction 156 (something it was trying to in the past) because dev->state 157 would not get reset if the transaction was aborted. 158 */ 159 160 struct xenbus_transaction xbt; 161 int current_state; 162 int err, abort; 163 164 if (state == dev->state) 165 return 0; 166 167 again: 168 abort = 1; 169 170 err = xenbus_transaction_start(&xbt); 171 if (err) { 172 xenbus_switch_fatal(dev, depth, err, "starting transaction"); 173 return 0; 174 } 175 176 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); 177 if (err != 1) 178 goto abort; 179 180 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); 181 if (err) { 182 xenbus_switch_fatal(dev, depth, err, "writing new state"); 183 goto abort; 184 } 185 186 abort = 0; 187 abort: 188 err = xenbus_transaction_end(xbt, abort); 189 if (err) { 190 if (err == -EAGAIN && !abort) 191 goto again; 192 xenbus_switch_fatal(dev, depth, err, "ending transaction"); 193 } else 194 dev->state = state; 195 196 return 0; 197 } 198 199 /** 200 * xenbus_switch_state 201 * @dev: xenbus device 202 * @state: new state 203 * 204 * Advertise in the store a change of the given driver to the given new_state. 205 * Return 0 on success, or -errno on error. On error, the device will switch 206 * to XenbusStateClosing, and the error will be saved in the store. 207 */ 208 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) 209 { 210 return __xenbus_switch_state(dev, state, 0); 211 } 212 213 EXPORT_SYMBOL_GPL(xenbus_switch_state); 214 215 int xenbus_frontend_closed(struct xenbus_device *dev) 216 { 217 xenbus_switch_state(dev, XenbusStateClosed); 218 complete(&dev->down); 219 return 0; 220 } 221 EXPORT_SYMBOL_GPL(xenbus_frontend_closed); 222 223 /** 224 * Return the path to the error node for the given device, or NULL on failure. 225 * If the value returned is non-NULL, then it is the caller's to kfree. 226 */ 227 static char *error_path(struct xenbus_device *dev) 228 { 229 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); 230 } 231 232 233 static void xenbus_va_dev_error(struct xenbus_device *dev, int err, 234 const char *fmt, va_list ap) 235 { 236 int ret; 237 unsigned int len; 238 char *printf_buffer = NULL; 239 char *path_buffer = NULL; 240 241 #define PRINTF_BUFFER_SIZE 4096 242 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); 243 if (printf_buffer == NULL) 244 goto fail; 245 246 len = sprintf(printf_buffer, "%i ", -err); 247 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); 248 249 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); 250 251 dev_err(&dev->dev, "%s\n", printf_buffer); 252 253 path_buffer = error_path(dev); 254 255 if (path_buffer == NULL) { 256 dev_err(&dev->dev, "failed to write error node for %s (%s)\n", 257 dev->nodename, printf_buffer); 258 goto fail; 259 } 260 261 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { 262 dev_err(&dev->dev, "failed to write error node for %s (%s)\n", 263 dev->nodename, printf_buffer); 264 goto fail; 265 } 266 267 fail: 268 kfree(printf_buffer); 269 kfree(path_buffer); 270 } 271 272 273 /** 274 * xenbus_dev_error 275 * @dev: xenbus device 276 * @err: error to report 277 * @fmt: error message format 278 * 279 * Report the given negative errno into the store, along with the given 280 * formatted message. 281 */ 282 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) 283 { 284 va_list ap; 285 286 va_start(ap, fmt); 287 xenbus_va_dev_error(dev, err, fmt, ap); 288 va_end(ap); 289 } 290 EXPORT_SYMBOL_GPL(xenbus_dev_error); 291 292 /** 293 * xenbus_dev_fatal 294 * @dev: xenbus device 295 * @err: error to report 296 * @fmt: error message format 297 * 298 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by 299 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly 300 * closedown of this driver and its peer. 301 */ 302 303 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) 304 { 305 va_list ap; 306 307 va_start(ap, fmt); 308 xenbus_va_dev_error(dev, err, fmt, ap); 309 va_end(ap); 310 311 xenbus_switch_state(dev, XenbusStateClosing); 312 } 313 EXPORT_SYMBOL_GPL(xenbus_dev_fatal); 314 315 /** 316 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps 317 * avoiding recursion within xenbus_switch_state. 318 */ 319 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, 320 const char *fmt, ...) 321 { 322 va_list ap; 323 324 va_start(ap, fmt); 325 xenbus_va_dev_error(dev, err, fmt, ap); 326 va_end(ap); 327 328 if (!depth) 329 __xenbus_switch_state(dev, XenbusStateClosing, 1); 330 } 331 332 /** 333 * xenbus_grant_ring 334 * @dev: xenbus device 335 * @ring_mfn: mfn of ring to grant 336 337 * Grant access to the given @ring_mfn to the peer of the given device. Return 338 * 0 on success, or -errno on error. On error, the device will switch to 339 * XenbusStateClosing, and the error will be saved in the store. 340 */ 341 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) 342 { 343 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); 344 if (err < 0) 345 xenbus_dev_fatal(dev, err, "granting access to ring page"); 346 return err; 347 } 348 EXPORT_SYMBOL_GPL(xenbus_grant_ring); 349 350 351 /** 352 * Allocate an event channel for the given xenbus_device, assigning the newly 353 * created local port to *port. Return 0 on success, or -errno on error. On 354 * error, the device will switch to XenbusStateClosing, and the error will be 355 * saved in the store. 356 */ 357 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) 358 { 359 struct evtchn_alloc_unbound alloc_unbound; 360 int err; 361 362 alloc_unbound.dom = DOMID_SELF; 363 alloc_unbound.remote_dom = dev->otherend_id; 364 365 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 366 &alloc_unbound); 367 if (err) 368 xenbus_dev_fatal(dev, err, "allocating event channel"); 369 else 370 *port = alloc_unbound.port; 371 372 return err; 373 } 374 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); 375 376 377 /** 378 * Bind to an existing interdomain event channel in another domain. Returns 0 379 * on success and stores the local port in *port. On error, returns -errno, 380 * switches the device to XenbusStateClosing, and saves the error in XenStore. 381 */ 382 int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) 383 { 384 struct evtchn_bind_interdomain bind_interdomain; 385 int err; 386 387 bind_interdomain.remote_dom = dev->otherend_id; 388 bind_interdomain.remote_port = remote_port; 389 390 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 391 &bind_interdomain); 392 if (err) 393 xenbus_dev_fatal(dev, err, 394 "binding to event channel %d from domain %d", 395 remote_port, dev->otherend_id); 396 else 397 *port = bind_interdomain.local_port; 398 399 return err; 400 } 401 EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); 402 403 404 /** 405 * Free an existing event channel. Returns 0 on success or -errno on error. 406 */ 407 int xenbus_free_evtchn(struct xenbus_device *dev, int port) 408 { 409 struct evtchn_close close; 410 int err; 411 412 close.port = port; 413 414 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); 415 if (err) 416 xenbus_dev_error(dev, err, "freeing event channel %d", port); 417 418 return err; 419 } 420 EXPORT_SYMBOL_GPL(xenbus_free_evtchn); 421 422 423 /** 424 * xenbus_map_ring_valloc 425 * @dev: xenbus device 426 * @gnt_ref: grant reference 427 * @vaddr: pointer to address to be filled out by mapping 428 * 429 * Based on Rusty Russell's skeleton driver's map_page. 430 * Map a page of memory into this domain from another domain's grant table. 431 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the 432 * page to that address, and sets *vaddr to that address. 433 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) 434 * or -ENOMEM on error. If an error is returned, device will switch to 435 * XenbusStateClosing and the error message will be saved in XenStore. 436 */ 437 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) 438 { 439 struct gnttab_map_grant_ref op = { 440 .flags = GNTMAP_host_map | GNTMAP_contains_pte, 441 .ref = gnt_ref, 442 .dom = dev->otherend_id, 443 }; 444 struct vm_struct *area; 445 pte_t *pte; 446 447 *vaddr = NULL; 448 449 area = alloc_vm_area(PAGE_SIZE, &pte); 450 if (!area) 451 return -ENOMEM; 452 453 op.host_addr = arbitrary_virt_to_machine(pte).maddr; 454 455 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 456 BUG(); 457 458 if (op.status != GNTST_okay) { 459 free_vm_area(area); 460 xenbus_dev_fatal(dev, op.status, 461 "mapping in shared page %d from domain %d", 462 gnt_ref, dev->otherend_id); 463 return op.status; 464 } 465 466 /* Stuff the handle in an unused field */ 467 area->phys_addr = (unsigned long)op.handle; 468 469 *vaddr = area->addr; 470 return 0; 471 } 472 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); 473 474 475 /** 476 * xenbus_map_ring 477 * @dev: xenbus device 478 * @gnt_ref: grant reference 479 * @handle: pointer to grant handle to be filled 480 * @vaddr: address to be mapped to 481 * 482 * Map a page of memory into this domain from another domain's grant table. 483 * xenbus_map_ring does not allocate the virtual address space (you must do 484 * this yourself!). It only maps in the page to the specified address. 485 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) 486 * or -ENOMEM on error. If an error is returned, device will switch to 487 * XenbusStateClosing and the error message will be saved in XenStore. 488 */ 489 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, 490 grant_handle_t *handle, void *vaddr) 491 { 492 struct gnttab_map_grant_ref op = { 493 .host_addr = (unsigned long)vaddr, 494 .flags = GNTMAP_host_map, 495 .ref = gnt_ref, 496 .dom = dev->otherend_id, 497 }; 498 499 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 500 BUG(); 501 502 if (op.status != GNTST_okay) { 503 xenbus_dev_fatal(dev, op.status, 504 "mapping in shared page %d from domain %d", 505 gnt_ref, dev->otherend_id); 506 } else 507 *handle = op.handle; 508 509 return op.status; 510 } 511 EXPORT_SYMBOL_GPL(xenbus_map_ring); 512 513 514 /** 515 * xenbus_unmap_ring_vfree 516 * @dev: xenbus device 517 * @vaddr: addr to unmap 518 * 519 * Based on Rusty Russell's skeleton driver's unmap_page. 520 * Unmap a page of memory in this domain that was imported from another domain. 521 * Use xenbus_unmap_ring_vfree if you mapped in your memory with 522 * xenbus_map_ring_valloc (it will free the virtual address space). 523 * Returns 0 on success and returns GNTST_* on error 524 * (see xen/include/interface/grant_table.h). 525 */ 526 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) 527 { 528 struct vm_struct *area; 529 struct gnttab_unmap_grant_ref op = { 530 .host_addr = (unsigned long)vaddr, 531 }; 532 unsigned int level; 533 534 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) 535 * method so that we don't have to muck with vmalloc internals here. 536 * We could force the user to hang on to their struct vm_struct from 537 * xenbus_map_ring_valloc, but these 6 lines considerably simplify 538 * this API. 539 */ 540 read_lock(&vmlist_lock); 541 for (area = vmlist; area != NULL; area = area->next) { 542 if (area->addr == vaddr) 543 break; 544 } 545 read_unlock(&vmlist_lock); 546 547 if (!area) { 548 xenbus_dev_error(dev, -ENOENT, 549 "can't find mapped virtual address %p", vaddr); 550 return GNTST_bad_virt_addr; 551 } 552 553 op.handle = (grant_handle_t)area->phys_addr; 554 op.host_addr = arbitrary_virt_to_machine( 555 lookup_address((unsigned long)vaddr, &level)).maddr; 556 557 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 558 BUG(); 559 560 if (op.status == GNTST_okay) 561 free_vm_area(area); 562 else 563 xenbus_dev_error(dev, op.status, 564 "unmapping page at handle %d error %d", 565 (int16_t)area->phys_addr, op.status); 566 567 return op.status; 568 } 569 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); 570 571 572 /** 573 * xenbus_unmap_ring 574 * @dev: xenbus device 575 * @handle: grant handle 576 * @vaddr: addr to unmap 577 * 578 * Unmap a page of memory in this domain that was imported from another domain. 579 * Returns 0 on success and returns GNTST_* on error 580 * (see xen/include/interface/grant_table.h). 581 */ 582 int xenbus_unmap_ring(struct xenbus_device *dev, 583 grant_handle_t handle, void *vaddr) 584 { 585 struct gnttab_unmap_grant_ref op = { 586 .host_addr = (unsigned long)vaddr, 587 .handle = handle, 588 }; 589 590 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 591 BUG(); 592 593 if (op.status != GNTST_okay) 594 xenbus_dev_error(dev, op.status, 595 "unmapping page at handle %d error %d", 596 handle, op.status); 597 598 return op.status; 599 } 600 EXPORT_SYMBOL_GPL(xenbus_unmap_ring); 601 602 603 /** 604 * xenbus_read_driver_state 605 * @path: path for driver 606 * 607 * Return the state of the driver rooted at the given store path, or 608 * XenbusStateUnknown if no state can be read. 609 */ 610 enum xenbus_state xenbus_read_driver_state(const char *path) 611 { 612 enum xenbus_state result; 613 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); 614 if (err) 615 result = XenbusStateUnknown; 616 617 return result; 618 } 619 EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 620