1 /* 2 * Driver giving user-space access to the kernel's xenbus connection 3 * to xenstore. 4 * 5 * Copyright (c) 2005, Christian Limpach 6 * Copyright (c) 2005, Rusty Russell, IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation; or, when distributed 11 * separately from the Linux kernel or incorporated into other 12 * software packages, subject to the following license: 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this source file (the "Software"), to deal in the Software without 16 * restriction, including without limitation the rights to use, copy, modify, 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 * and to permit persons to whom the Software is furnished to do so, subject to 19 * the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 * IN THE SOFTWARE. 31 * 32 * Changes: 33 * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem 34 * and /proc/xen compatibility mount point. 35 * Turned xenfs into a loadable module. 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/kernel.h> 41 #include <linux/errno.h> 42 #include <linux/uio.h> 43 #include <linux/notifier.h> 44 #include <linux/wait.h> 45 #include <linux/fs.h> 46 #include <linux/poll.h> 47 #include <linux/mutex.h> 48 #include <linux/sched.h> 49 #include <linux/spinlock.h> 50 #include <linux/mount.h> 51 #include <linux/pagemap.h> 52 #include <linux/uaccess.h> 53 #include <linux/init.h> 54 #include <linux/namei.h> 55 #include <linux/string.h> 56 #include <linux/slab.h> 57 #include <linux/miscdevice.h> 58 #include <linux/workqueue.h> 59 60 #include <xen/xenbus.h> 61 #include <xen/xen.h> 62 #include <asm/xen/hypervisor.h> 63 64 #include "xenbus.h" 65 66 unsigned int xb_dev_generation_id; 67 68 /* 69 * An element of a list of outstanding transactions, for which we're 70 * still waiting a reply. 71 */ 72 struct xenbus_transaction_holder { 73 struct list_head list; 74 struct xenbus_transaction handle; 75 unsigned int generation_id; 76 }; 77 78 /* 79 * A buffer of data on the queue. 80 */ 81 struct read_buffer { 82 struct list_head list; 83 unsigned int cons; 84 unsigned int len; 85 char msg[] __counted_by(len); 86 }; 87 88 struct xenbus_file_priv { 89 /* 90 * msgbuffer_mutex is held while partial requests are built up 91 * and complete requests are acted on. It therefore protects 92 * the "transactions" and "watches" lists, and the partial 93 * request length and buffer. 94 * 95 * reply_mutex protects the reply being built up to return to 96 * usermode. It nests inside msgbuffer_mutex but may be held 97 * alone during a watch callback. 98 */ 99 struct mutex msgbuffer_mutex; 100 101 /* In-progress transactions */ 102 struct list_head transactions; 103 104 /* Active watches. */ 105 struct list_head watches; 106 107 /* Partial request. */ 108 unsigned int len; 109 union { 110 struct xsd_sockmsg msg; 111 char buffer[XENSTORE_PAYLOAD_MAX]; 112 } u; 113 114 /* Response queue. */ 115 struct mutex reply_mutex; 116 struct list_head read_buffers; 117 wait_queue_head_t read_waitq; 118 119 struct kref kref; 120 121 struct work_struct wq; 122 }; 123 124 /* Read out any raw xenbus messages queued up. */ 125 static ssize_t xenbus_file_read(struct file *filp, 126 char __user *ubuf, 127 size_t len, loff_t *ppos) 128 { 129 struct xenbus_file_priv *u = filp->private_data; 130 struct read_buffer *rb; 131 ssize_t i; 132 int ret; 133 134 mutex_lock(&u->reply_mutex); 135 again: 136 while (list_empty(&u->read_buffers)) { 137 mutex_unlock(&u->reply_mutex); 138 if (filp->f_flags & O_NONBLOCK) 139 return -EAGAIN; 140 141 ret = wait_event_interruptible(u->read_waitq, 142 !list_empty(&u->read_buffers)); 143 if (ret) 144 return ret; 145 mutex_lock(&u->reply_mutex); 146 } 147 148 rb = list_entry(u->read_buffers.next, struct read_buffer, list); 149 i = 0; 150 while (i < len) { 151 size_t sz = min_t(size_t, len - i, rb->len - rb->cons); 152 153 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); 154 155 i += sz - ret; 156 rb->cons += sz - ret; 157 158 if (ret != 0) { 159 if (i == 0) 160 i = -EFAULT; 161 goto out; 162 } 163 164 /* Clear out buffer if it has been consumed */ 165 if (rb->cons == rb->len) { 166 list_del(&rb->list); 167 kfree(rb); 168 if (list_empty(&u->read_buffers)) 169 break; 170 rb = list_entry(u->read_buffers.next, 171 struct read_buffer, list); 172 } 173 } 174 if (i == 0) 175 goto again; 176 177 out: 178 mutex_unlock(&u->reply_mutex); 179 return i; 180 } 181 182 /* 183 * Add a buffer to the queue. Caller must hold the appropriate lock 184 * if the queue is not local. (Commonly the caller will build up 185 * multiple queued buffers on a temporary local list, and then add it 186 * to the appropriate list under lock once all the buffers have een 187 * successfully allocated.) 188 */ 189 static int queue_reply(struct list_head *queue, const void *data, size_t len) 190 { 191 struct read_buffer *rb; 192 193 if (len == 0) 194 return 0; 195 if (len > XENSTORE_PAYLOAD_MAX) 196 return -EINVAL; 197 198 rb = kmalloc(struct_size(rb, msg, len), GFP_KERNEL); 199 if (rb == NULL) 200 return -ENOMEM; 201 202 rb->cons = 0; 203 rb->len = len; 204 205 memcpy(rb->msg, data, len); 206 207 list_add_tail(&rb->list, queue); 208 return 0; 209 } 210 211 /* 212 * Free all the read_buffer s on a list. 213 * Caller must have sole reference to list. 214 */ 215 static void queue_cleanup(struct list_head *list) 216 { 217 struct read_buffer *rb; 218 219 while (!list_empty(list)) { 220 rb = list_entry(list->next, struct read_buffer, list); 221 list_del(list->next); 222 kfree(rb); 223 } 224 } 225 226 struct watch_adapter { 227 struct list_head list; 228 struct xenbus_watch watch; 229 struct xenbus_file_priv *dev_data; 230 char *token; 231 }; 232 233 static void free_watch_adapter(struct watch_adapter *watch) 234 { 235 kfree(watch->watch.node); 236 kfree(watch->token); 237 kfree(watch); 238 } 239 240 static struct watch_adapter *alloc_watch_adapter(const char *path, 241 const char *token) 242 { 243 struct watch_adapter *watch; 244 245 watch = kzalloc(sizeof(*watch), GFP_KERNEL); 246 if (watch == NULL) 247 goto out_fail; 248 249 watch->watch.node = kstrdup(path, GFP_KERNEL); 250 if (watch->watch.node == NULL) 251 goto out_free; 252 253 watch->token = kstrdup(token, GFP_KERNEL); 254 if (watch->token == NULL) 255 goto out_free; 256 257 return watch; 258 259 out_free: 260 free_watch_adapter(watch); 261 262 out_fail: 263 return NULL; 264 } 265 266 static void watch_fired(struct xenbus_watch *watch, 267 const char *path, 268 const char *token) 269 { 270 struct watch_adapter *adap; 271 struct xsd_sockmsg hdr; 272 const char *token_caller; 273 int path_len, tok_len, body_len; 274 int ret; 275 LIST_HEAD(staging_q); 276 277 adap = container_of(watch, struct watch_adapter, watch); 278 279 token_caller = adap->token; 280 281 path_len = strlen(path) + 1; 282 tok_len = strlen(token_caller) + 1; 283 body_len = path_len + tok_len; 284 285 hdr.type = XS_WATCH_EVENT; 286 hdr.len = body_len; 287 288 mutex_lock(&adap->dev_data->reply_mutex); 289 290 ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); 291 if (!ret) 292 ret = queue_reply(&staging_q, path, path_len); 293 if (!ret) 294 ret = queue_reply(&staging_q, token_caller, tok_len); 295 296 if (!ret) { 297 /* success: pass reply list onto watcher */ 298 list_splice_tail(&staging_q, &adap->dev_data->read_buffers); 299 wake_up(&adap->dev_data->read_waitq); 300 } else 301 queue_cleanup(&staging_q); 302 303 mutex_unlock(&adap->dev_data->reply_mutex); 304 } 305 306 static void xenbus_worker(struct work_struct *wq) 307 { 308 struct xenbus_file_priv *u; 309 struct xenbus_transaction_holder *trans, *tmp; 310 struct watch_adapter *watch, *tmp_watch; 311 struct read_buffer *rb, *tmp_rb; 312 313 u = container_of(wq, struct xenbus_file_priv, wq); 314 315 /* 316 * No need for locking here because there are no other users, 317 * by definition. 318 */ 319 320 list_for_each_entry_safe(trans, tmp, &u->transactions, list) { 321 xenbus_transaction_end(trans->handle, 1); 322 list_del(&trans->list); 323 kfree(trans); 324 } 325 326 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { 327 unregister_xenbus_watch(&watch->watch); 328 list_del(&watch->list); 329 free_watch_adapter(watch); 330 } 331 332 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { 333 list_del(&rb->list); 334 kfree(rb); 335 } 336 kfree(u); 337 } 338 339 static void xenbus_file_free(struct kref *kref) 340 { 341 struct xenbus_file_priv *u; 342 343 /* 344 * We might be called in xenbus_thread(). 345 * Use workqueue to avoid deadlock. 346 */ 347 u = container_of(kref, struct xenbus_file_priv, kref); 348 schedule_work(&u->wq); 349 } 350 351 static struct xenbus_transaction_holder *xenbus_get_transaction( 352 struct xenbus_file_priv *u, uint32_t tx_id) 353 { 354 struct xenbus_transaction_holder *trans; 355 356 list_for_each_entry(trans, &u->transactions, list) 357 if (trans->handle.id == tx_id) 358 return trans; 359 360 return NULL; 361 } 362 363 void xenbus_dev_queue_reply(struct xb_req_data *req) 364 { 365 struct xenbus_file_priv *u = req->par; 366 struct xenbus_transaction_holder *trans = NULL; 367 int rc; 368 LIST_HEAD(staging_q); 369 370 xs_request_exit(req); 371 372 mutex_lock(&u->msgbuffer_mutex); 373 374 if (req->type == XS_TRANSACTION_START) { 375 trans = xenbus_get_transaction(u, 0); 376 if (WARN_ON(!trans)) 377 goto out; 378 if (req->msg.type == XS_ERROR) { 379 list_del(&trans->list); 380 kfree(trans); 381 } else { 382 rc = kstrtou32(req->body, 10, &trans->handle.id); 383 if (WARN_ON(rc)) 384 goto out; 385 } 386 } else if (req->type == XS_TRANSACTION_END) { 387 trans = xenbus_get_transaction(u, req->msg.tx_id); 388 if (WARN_ON(!trans)) 389 goto out; 390 list_del(&trans->list); 391 kfree(trans); 392 } 393 394 mutex_unlock(&u->msgbuffer_mutex); 395 396 mutex_lock(&u->reply_mutex); 397 rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg)); 398 if (!rc) 399 rc = queue_reply(&staging_q, req->body, req->msg.len); 400 if (!rc) { 401 list_splice_tail(&staging_q, &u->read_buffers); 402 wake_up(&u->read_waitq); 403 } else { 404 queue_cleanup(&staging_q); 405 } 406 mutex_unlock(&u->reply_mutex); 407 408 kfree(req->body); 409 kfree(req); 410 411 kref_put(&u->kref, xenbus_file_free); 412 413 return; 414 415 out: 416 mutex_unlock(&u->msgbuffer_mutex); 417 } 418 419 static int xenbus_command_reply(struct xenbus_file_priv *u, 420 unsigned int msg_type, const char *reply) 421 { 422 struct { 423 struct xsd_sockmsg hdr; 424 char body[16]; 425 } msg; 426 int rc; 427 428 msg.hdr = u->u.msg; 429 msg.hdr.type = msg_type; 430 msg.hdr.len = strlen(reply) + 1; 431 if (msg.hdr.len > sizeof(msg.body)) 432 return -E2BIG; 433 memcpy(&msg.body, reply, msg.hdr.len); 434 435 mutex_lock(&u->reply_mutex); 436 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); 437 wake_up(&u->read_waitq); 438 mutex_unlock(&u->reply_mutex); 439 440 if (!rc) 441 kref_put(&u->kref, xenbus_file_free); 442 443 return rc; 444 } 445 446 static int xenbus_write_transaction(unsigned msg_type, 447 struct xenbus_file_priv *u) 448 { 449 int rc; 450 struct xenbus_transaction_holder *trans = NULL; 451 struct { 452 struct xsd_sockmsg hdr; 453 char body[]; 454 } *msg = (void *)u->u.buffer; 455 456 if (msg_type == XS_TRANSACTION_START) { 457 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 458 if (!trans) { 459 rc = -ENOMEM; 460 goto out; 461 } 462 trans->generation_id = xb_dev_generation_id; 463 list_add(&trans->list, &u->transactions); 464 } else if (msg->hdr.tx_id != 0 && 465 !xenbus_get_transaction(u, msg->hdr.tx_id)) 466 return xenbus_command_reply(u, XS_ERROR, "ENOENT"); 467 else if (msg_type == XS_TRANSACTION_END && 468 !(msg->hdr.len == 2 && 469 (!strcmp(msg->body, "T") || !strcmp(msg->body, "F")))) 470 return xenbus_command_reply(u, XS_ERROR, "EINVAL"); 471 else if (msg_type == XS_TRANSACTION_END) { 472 trans = xenbus_get_transaction(u, msg->hdr.tx_id); 473 if (trans && trans->generation_id != xb_dev_generation_id) { 474 list_del(&trans->list); 475 kfree(trans); 476 if (!strcmp(msg->body, "T")) 477 return xenbus_command_reply(u, XS_ERROR, 478 "EAGAIN"); 479 else 480 return xenbus_command_reply(u, 481 XS_TRANSACTION_END, 482 "OK"); 483 } 484 } 485 486 rc = xenbus_dev_request_and_reply(&msg->hdr, u); 487 if (rc && trans) { 488 list_del(&trans->list); 489 kfree(trans); 490 } 491 492 out: 493 return rc; 494 } 495 496 static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) 497 { 498 struct watch_adapter *watch; 499 char *path, *token; 500 int err, rc; 501 502 path = u->u.buffer + sizeof(u->u.msg); 503 token = memchr(path, 0, u->u.msg.len); 504 if (token == NULL) { 505 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); 506 goto out; 507 } 508 token++; 509 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { 510 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); 511 goto out; 512 } 513 514 if (msg_type == XS_WATCH) { 515 watch = alloc_watch_adapter(path, token); 516 if (watch == NULL) { 517 rc = -ENOMEM; 518 goto out; 519 } 520 521 watch->watch.callback = watch_fired; 522 watch->dev_data = u; 523 524 err = register_xenbus_watch(&watch->watch); 525 if (err) { 526 free_watch_adapter(watch); 527 rc = err; 528 goto out; 529 } 530 list_add(&watch->list, &u->watches); 531 } else { 532 list_for_each_entry(watch, &u->watches, list) { 533 if (!strcmp(watch->token, token) && 534 !strcmp(watch->watch.node, path)) { 535 unregister_xenbus_watch(&watch->watch); 536 list_del(&watch->list); 537 free_watch_adapter(watch); 538 break; 539 } 540 } 541 } 542 543 /* Success. Synthesize a reply to say all is OK. */ 544 rc = xenbus_command_reply(u, msg_type, "OK"); 545 546 out: 547 return rc; 548 } 549 550 static ssize_t xenbus_file_write(struct file *filp, 551 const char __user *ubuf, 552 size_t len, loff_t *ppos) 553 { 554 struct xenbus_file_priv *u = filp->private_data; 555 uint32_t msg_type; 556 int rc = len; 557 int ret; 558 559 /* 560 * We're expecting usermode to be writing properly formed 561 * xenbus messages. If they write an incomplete message we 562 * buffer it up. Once it is complete, we act on it. 563 */ 564 565 /* 566 * Make sure concurrent writers can't stomp all over each 567 * other's messages and make a mess of our partial message 568 * buffer. We don't make any attemppt to stop multiple 569 * writers from making a mess of each other's incomplete 570 * messages; we're just trying to guarantee our own internal 571 * consistency and make sure that single writes are handled 572 * atomically. 573 */ 574 mutex_lock(&u->msgbuffer_mutex); 575 576 /* Get this out of the way early to avoid confusion */ 577 if (len == 0) 578 goto out; 579 580 /* Can't write a xenbus message larger we can buffer */ 581 if (len > sizeof(u->u.buffer) - u->len) { 582 /* On error, dump existing buffer */ 583 u->len = 0; 584 rc = -EINVAL; 585 goto out; 586 } 587 588 ret = copy_from_user(u->u.buffer + u->len, ubuf, len); 589 590 if (ret != 0) { 591 rc = -EFAULT; 592 goto out; 593 } 594 595 /* Deal with a partial copy. */ 596 len -= ret; 597 rc = len; 598 599 u->len += len; 600 601 /* Return if we haven't got a full message yet */ 602 if (u->len < sizeof(u->u.msg)) 603 goto out; /* not even the header yet */ 604 605 /* If we're expecting a message that's larger than we can 606 possibly send, dump what we have and return an error. */ 607 if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { 608 rc = -E2BIG; 609 u->len = 0; 610 goto out; 611 } 612 613 if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) 614 goto out; /* incomplete data portion */ 615 616 /* 617 * OK, now we have a complete message. Do something with it. 618 */ 619 620 kref_get(&u->kref); 621 622 msg_type = u->u.msg.type; 623 624 switch (msg_type) { 625 case XS_WATCH: 626 case XS_UNWATCH: 627 /* (Un)Ask for some path to be watched for changes */ 628 ret = xenbus_write_watch(msg_type, u); 629 break; 630 631 default: 632 /* Send out a transaction */ 633 ret = xenbus_write_transaction(msg_type, u); 634 break; 635 } 636 if (ret != 0) { 637 rc = ret; 638 kref_put(&u->kref, xenbus_file_free); 639 } 640 641 /* Buffered message consumed */ 642 u->len = 0; 643 644 out: 645 mutex_unlock(&u->msgbuffer_mutex); 646 return rc; 647 } 648 649 static int xenbus_file_open(struct inode *inode, struct file *filp) 650 { 651 struct xenbus_file_priv *u; 652 653 if (xen_store_evtchn == 0) 654 return -ENOENT; 655 656 stream_open(inode, filp); 657 658 u = kzalloc(sizeof(*u), GFP_KERNEL); 659 if (u == NULL) 660 return -ENOMEM; 661 662 kref_init(&u->kref); 663 664 INIT_LIST_HEAD(&u->transactions); 665 INIT_LIST_HEAD(&u->watches); 666 INIT_LIST_HEAD(&u->read_buffers); 667 init_waitqueue_head(&u->read_waitq); 668 INIT_WORK(&u->wq, xenbus_worker); 669 670 mutex_init(&u->reply_mutex); 671 mutex_init(&u->msgbuffer_mutex); 672 673 filp->private_data = u; 674 675 return 0; 676 } 677 678 static int xenbus_file_release(struct inode *inode, struct file *filp) 679 { 680 struct xenbus_file_priv *u = filp->private_data; 681 682 kref_put(&u->kref, xenbus_file_free); 683 684 return 0; 685 } 686 687 static __poll_t xenbus_file_poll(struct file *file, poll_table *wait) 688 { 689 struct xenbus_file_priv *u = file->private_data; 690 691 poll_wait(file, &u->read_waitq, wait); 692 if (!list_empty(&u->read_buffers)) 693 return EPOLLIN | EPOLLRDNORM; 694 return 0; 695 } 696 697 const struct file_operations xen_xenbus_fops = { 698 .read = xenbus_file_read, 699 .write = xenbus_file_write, 700 .open = xenbus_file_open, 701 .release = xenbus_file_release, 702 .poll = xenbus_file_poll, 703 .llseek = no_llseek, 704 }; 705 EXPORT_SYMBOL_GPL(xen_xenbus_fops); 706 707 static struct miscdevice xenbus_dev = { 708 .minor = MISC_DYNAMIC_MINOR, 709 .name = "xen/xenbus", 710 .fops = &xen_xenbus_fops, 711 }; 712 713 static int __init xenbus_init(void) 714 { 715 int err; 716 717 if (!xen_domain()) 718 return -ENODEV; 719 720 err = misc_register(&xenbus_dev); 721 if (err) 722 pr_err("Could not register xenbus frontend device\n"); 723 return err; 724 } 725 device_initcall(xenbus_init); 726