1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/moduleparam.h> 19 #include <linux/miscdevice.h> 20 #include <linux/interrupt.h> 21 #include <linux/highmem.h> 22 #include <linux/atomic.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/mutex.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/init.h> 30 #include <linux/poll.h> 31 #include <linux/pci.h> 32 #include <linux/smp.h> 33 #include <linux/fs.h> 34 #include <linux/io.h> 35 36 #include "vmci_handle_array.h" 37 #include "vmci_queue_pair.h" 38 #include "vmci_datagram.h" 39 #include "vmci_doorbell.h" 40 #include "vmci_resource.h" 41 #include "vmci_context.h" 42 #include "vmci_driver.h" 43 #include "vmci_event.h" 44 45 #define VMCI_UTIL_NUM_RESOURCES 1 46 47 enum { 48 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, 49 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, 50 }; 51 52 enum { 53 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, 54 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, 55 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, 56 }; 57 58 /* 59 * VMCI driver initialization. This block can also be used to 60 * pass initial group membership etc. 61 */ 62 struct vmci_init_blk { 63 u32 cid; 64 u32 flags; 65 }; 66 67 /* VMCIqueue_pairAllocInfo_VMToVM */ 68 struct vmci_qp_alloc_info_vmvm { 69 struct vmci_handle handle; 70 u32 peer; 71 u32 flags; 72 u64 produce_size; 73 u64 consume_size; 74 u64 produce_page_file; /* User VA. */ 75 u64 consume_page_file; /* User VA. */ 76 u64 produce_page_file_size; /* Size of the file name array. */ 77 u64 consume_page_file_size; /* Size of the file name array. */ 78 s32 result; 79 u32 _pad; 80 }; 81 82 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ 83 struct vmci_set_notify_info { 84 u64 notify_uva; 85 s32 result; 86 u32 _pad; 87 }; 88 89 /* 90 * Per-instance host state 91 */ 92 struct vmci_host_dev { 93 struct vmci_ctx *context; 94 int user_version; 95 enum vmci_obj_type ct_type; 96 struct mutex lock; /* Mutex lock for vmci context access */ 97 }; 98 99 static struct vmci_ctx *host_context; 100 static bool vmci_host_device_initialized; 101 static atomic_t vmci_host_active_users = ATOMIC_INIT(0); 102 103 /* 104 * Determines whether the VMCI host personality is 105 * available. Since the core functionality of the host driver is 106 * always present, all guests could possibly use the host 107 * personality. However, to minimize the deviation from the 108 * pre-unified driver state of affairs, we only consider the host 109 * device active if there is no active guest device or if there 110 * are VMX'en with active VMCI contexts using the host device. 111 */ 112 bool vmci_host_code_active(void) 113 { 114 return vmci_host_device_initialized && 115 (!vmci_guest_code_active() || 116 atomic_read(&vmci_host_active_users) > 0); 117 } 118 119 /* 120 * Called on open of /dev/vmci. 121 */ 122 static int vmci_host_open(struct inode *inode, struct file *filp) 123 { 124 struct vmci_host_dev *vmci_host_dev; 125 126 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); 127 if (vmci_host_dev == NULL) 128 return -ENOMEM; 129 130 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 131 mutex_init(&vmci_host_dev->lock); 132 filp->private_data = vmci_host_dev; 133 134 return 0; 135 } 136 137 /* 138 * Called on close of /dev/vmci, most often when the process 139 * exits. 140 */ 141 static int vmci_host_close(struct inode *inode, struct file *filp) 142 { 143 struct vmci_host_dev *vmci_host_dev = filp->private_data; 144 145 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 146 vmci_ctx_destroy(vmci_host_dev->context); 147 vmci_host_dev->context = NULL; 148 149 /* 150 * The number of active contexts is used to track whether any 151 * VMX'en are using the host personality. It is incremented when 152 * a context is created through the IOCTL_VMCI_INIT_CONTEXT 153 * ioctl. 154 */ 155 atomic_dec(&vmci_host_active_users); 156 } 157 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 158 159 kfree(vmci_host_dev); 160 filp->private_data = NULL; 161 return 0; 162 } 163 164 /* 165 * This is used to wake up the VMX when a VMCI call arrives, or 166 * to wake up select() or poll() at the next clock tick. 167 */ 168 static unsigned int vmci_host_poll(struct file *filp, poll_table *wait) 169 { 170 struct vmci_host_dev *vmci_host_dev = filp->private_data; 171 struct vmci_ctx *context = vmci_host_dev->context; 172 unsigned int mask = 0; 173 174 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 175 /* Check for VMCI calls to this VM context. */ 176 if (wait) 177 poll_wait(filp, &context->host_context.wait_queue, 178 wait); 179 180 spin_lock(&context->lock); 181 if (context->pending_datagrams > 0 || 182 vmci_handle_arr_get_size( 183 context->pending_doorbell_array) > 0) { 184 mask = POLLIN; 185 } 186 spin_unlock(&context->lock); 187 } 188 return mask; 189 } 190 191 /* 192 * Copies the handles of a handle array into a user buffer, and 193 * returns the new length in userBufferSize. If the copy to the 194 * user buffer fails, the functions still returns VMCI_SUCCESS, 195 * but retval != 0. 196 */ 197 static int drv_cp_harray_to_user(void __user *user_buf_uva, 198 u64 *user_buf_size, 199 struct vmci_handle_arr *handle_array, 200 int *retval) 201 { 202 u32 array_size = 0; 203 struct vmci_handle *handles; 204 205 if (handle_array) 206 array_size = vmci_handle_arr_get_size(handle_array); 207 208 if (array_size * sizeof(*handles) > *user_buf_size) 209 return VMCI_ERROR_MORE_DATA; 210 211 *user_buf_size = array_size * sizeof(*handles); 212 if (*user_buf_size) 213 *retval = copy_to_user(user_buf_uva, 214 vmci_handle_arr_get_handles 215 (handle_array), *user_buf_size); 216 217 return VMCI_SUCCESS; 218 } 219 220 /* 221 * Sets up a given context for notify to work. Calls drv_map_bool_ptr() 222 * which maps the notify boolean in user VA in kernel space. 223 */ 224 static int vmci_host_setup_notify(struct vmci_ctx *context, 225 unsigned long uva) 226 { 227 struct page *page; 228 int retval; 229 230 if (context->notify_page) { 231 pr_devel("%s: Notify mechanism is already set up\n", __func__); 232 return VMCI_ERROR_DUPLICATE_ENTRY; 233 } 234 235 /* 236 * We are using 'bool' internally, but let's make sure we explicit 237 * about the size. 238 */ 239 BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); 240 if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8))) 241 return VMCI_ERROR_GENERIC; 242 243 /* 244 * Lock physical page backing a given user VA. 245 */ 246 down_read(¤t->mm->mmap_sem); 247 retval = get_user_pages(current, current->mm, 248 PAGE_ALIGN(uva), 249 1, 1, 0, &page, NULL); 250 up_read(¤t->mm->mmap_sem); 251 if (retval != 1) 252 return VMCI_ERROR_GENERIC; 253 254 /* 255 * Map the locked page and set up notify pointer. 256 */ 257 context->notify = kmap(page) + (uva & (PAGE_SIZE - 1)); 258 vmci_ctx_check_signal_notify(context); 259 260 return VMCI_SUCCESS; 261 } 262 263 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, 264 unsigned int cmd, void __user *uptr) 265 { 266 if (cmd == IOCTL_VMCI_VERSION2) { 267 int __user *vptr = uptr; 268 if (get_user(vmci_host_dev->user_version, vptr)) 269 return -EFAULT; 270 } 271 272 /* 273 * The basic logic here is: 274 * 275 * If the user sends in a version of 0 tell it our version. 276 * If the user didn't send in a version, tell it our version. 277 * If the user sent in an old version, tell it -its- version. 278 * If the user sent in an newer version, tell it our version. 279 * 280 * The rationale behind telling the caller its version is that 281 * Workstation 6.5 required that VMX and VMCI kernel module were 282 * version sync'd. All new VMX users will be programmed to 283 * handle the VMCI kernel module version. 284 */ 285 286 if (vmci_host_dev->user_version > 0 && 287 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { 288 return vmci_host_dev->user_version; 289 } 290 291 return VMCI_VERSION; 292 } 293 294 #define vmci_ioctl_err(fmt, ...) \ 295 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) 296 297 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, 298 const char *ioctl_name, 299 void __user *uptr) 300 { 301 struct vmci_init_blk init_block; 302 const struct cred *cred; 303 int retval; 304 305 if (copy_from_user(&init_block, uptr, sizeof(init_block))) { 306 vmci_ioctl_err("error reading init block\n"); 307 return -EFAULT; 308 } 309 310 mutex_lock(&vmci_host_dev->lock); 311 312 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { 313 vmci_ioctl_err("received VMCI init on initialized handle\n"); 314 retval = -EINVAL; 315 goto out; 316 } 317 318 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { 319 vmci_ioctl_err("unsupported VMCI restriction flag\n"); 320 retval = -EINVAL; 321 goto out; 322 } 323 324 cred = get_current_cred(); 325 vmci_host_dev->context = vmci_ctx_create(init_block.cid, 326 init_block.flags, 0, 327 vmci_host_dev->user_version, 328 cred); 329 put_cred(cred); 330 if (IS_ERR(vmci_host_dev->context)) { 331 retval = PTR_ERR(vmci_host_dev->context); 332 vmci_ioctl_err("error initializing context\n"); 333 goto out; 334 } 335 336 /* 337 * Copy cid to userlevel, we do this to allow the VMX 338 * to enforce its policy on cid generation. 339 */ 340 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); 341 if (copy_to_user(uptr, &init_block, sizeof(init_block))) { 342 vmci_ctx_destroy(vmci_host_dev->context); 343 vmci_host_dev->context = NULL; 344 vmci_ioctl_err("error writing init block\n"); 345 retval = -EFAULT; 346 goto out; 347 } 348 349 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; 350 atomic_inc(&vmci_host_active_users); 351 352 retval = 0; 353 354 out: 355 mutex_unlock(&vmci_host_dev->lock); 356 return retval; 357 } 358 359 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, 360 const char *ioctl_name, 361 void __user *uptr) 362 { 363 struct vmci_datagram_snd_rcv_info send_info; 364 struct vmci_datagram *dg = NULL; 365 u32 cid; 366 367 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 368 vmci_ioctl_err("only valid for contexts\n"); 369 return -EINVAL; 370 } 371 372 if (copy_from_user(&send_info, uptr, sizeof(send_info))) 373 return -EFAULT; 374 375 if (send_info.len > VMCI_MAX_DG_SIZE) { 376 vmci_ioctl_err("datagram is too big (size=%d)\n", 377 send_info.len); 378 return -EINVAL; 379 } 380 381 if (send_info.len < sizeof(*dg)) { 382 vmci_ioctl_err("datagram is too small (size=%d)\n", 383 send_info.len); 384 return -EINVAL; 385 } 386 387 dg = kmalloc(send_info.len, GFP_KERNEL); 388 if (!dg) { 389 vmci_ioctl_err( 390 "cannot allocate memory to dispatch datagram\n"); 391 return -ENOMEM; 392 } 393 394 if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr, 395 send_info.len)) { 396 vmci_ioctl_err("error getting datagram\n"); 397 kfree(dg); 398 return -EFAULT; 399 } 400 401 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", 402 dg->dst.context, dg->dst.resource, 403 dg->src.context, dg->src.resource, 404 (unsigned long long)dg->payload_size); 405 406 /* Get source context id. */ 407 cid = vmci_ctx_get_id(vmci_host_dev->context); 408 send_info.result = vmci_datagram_dispatch(cid, dg, true); 409 kfree(dg); 410 411 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; 412 } 413 414 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, 415 const char *ioctl_name, 416 void __user *uptr) 417 { 418 struct vmci_datagram_snd_rcv_info recv_info; 419 struct vmci_datagram *dg = NULL; 420 int retval; 421 size_t size; 422 423 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 424 vmci_ioctl_err("only valid for contexts\n"); 425 return -EINVAL; 426 } 427 428 if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) 429 return -EFAULT; 430 431 size = recv_info.len; 432 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, 433 &size, &dg); 434 435 if (recv_info.result >= VMCI_SUCCESS) { 436 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; 437 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); 438 kfree(dg); 439 if (retval != 0) 440 return -EFAULT; 441 } 442 443 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; 444 } 445 446 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, 447 const char *ioctl_name, 448 void __user *uptr) 449 { 450 struct vmci_handle handle; 451 int vmci_status; 452 int __user *retptr; 453 u32 cid; 454 455 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 456 vmci_ioctl_err("only valid for contexts\n"); 457 return -EINVAL; 458 } 459 460 cid = vmci_ctx_get_id(vmci_host_dev->context); 461 462 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 463 struct vmci_qp_alloc_info_vmvm alloc_info; 464 struct vmci_qp_alloc_info_vmvm __user *info = uptr; 465 466 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 467 return -EFAULT; 468 469 handle = alloc_info.handle; 470 retptr = &info->result; 471 472 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 473 alloc_info.peer, 474 alloc_info.flags, 475 VMCI_NO_PRIVILEGE_FLAGS, 476 alloc_info.produce_size, 477 alloc_info.consume_size, 478 NULL, 479 vmci_host_dev->context); 480 481 if (vmci_status == VMCI_SUCCESS) 482 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; 483 } else { 484 struct vmci_qp_alloc_info alloc_info; 485 struct vmci_qp_alloc_info __user *info = uptr; 486 struct vmci_qp_page_store page_store; 487 488 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 489 return -EFAULT; 490 491 handle = alloc_info.handle; 492 retptr = &info->result; 493 494 page_store.pages = alloc_info.ppn_va; 495 page_store.len = alloc_info.num_ppns; 496 497 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 498 alloc_info.peer, 499 alloc_info.flags, 500 VMCI_NO_PRIVILEGE_FLAGS, 501 alloc_info.produce_size, 502 alloc_info.consume_size, 503 &page_store, 504 vmci_host_dev->context); 505 } 506 507 if (put_user(vmci_status, retptr)) { 508 if (vmci_status >= VMCI_SUCCESS) { 509 vmci_status = vmci_qp_broker_detach(handle, 510 vmci_host_dev->context); 511 } 512 return -EFAULT; 513 } 514 515 return 0; 516 } 517 518 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, 519 const char *ioctl_name, 520 void __user *uptr) 521 { 522 struct vmci_qp_set_va_info set_va_info; 523 struct vmci_qp_set_va_info __user *info = uptr; 524 s32 result; 525 526 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 527 vmci_ioctl_err("only valid for contexts\n"); 528 return -EINVAL; 529 } 530 531 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 532 vmci_ioctl_err("is not allowed\n"); 533 return -EINVAL; 534 } 535 536 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) 537 return -EFAULT; 538 539 if (set_va_info.va) { 540 /* 541 * VMX is passing down a new VA for the queue 542 * pair mapping. 543 */ 544 result = vmci_qp_broker_map(set_va_info.handle, 545 vmci_host_dev->context, 546 set_va_info.va); 547 } else { 548 /* 549 * The queue pair is about to be unmapped by 550 * the VMX. 551 */ 552 result = vmci_qp_broker_unmap(set_va_info.handle, 553 vmci_host_dev->context, 0); 554 } 555 556 return put_user(result, &info->result) ? -EFAULT : 0; 557 } 558 559 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, 560 const char *ioctl_name, 561 void __user *uptr) 562 { 563 struct vmci_qp_page_file_info page_file_info; 564 struct vmci_qp_page_file_info __user *info = uptr; 565 s32 result; 566 567 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || 568 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { 569 vmci_ioctl_err("not supported on this VMX (version=%d)\n", 570 vmci_host_dev->user_version); 571 return -EINVAL; 572 } 573 574 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 575 vmci_ioctl_err("only valid for contexts\n"); 576 return -EINVAL; 577 } 578 579 if (copy_from_user(&page_file_info, uptr, sizeof(*info))) 580 return -EFAULT; 581 582 /* 583 * Communicate success pre-emptively to the caller. Note that the 584 * basic premise is that it is incumbent upon the caller not to look at 585 * the info.result field until after the ioctl() returns. And then, 586 * only if the ioctl() result indicates no error. We send up the 587 * SUCCESS status before calling SetPageStore() store because failing 588 * to copy up the result code means unwinding the SetPageStore(). 589 * 590 * It turns out the logic to unwind a SetPageStore() opens a can of 591 * worms. For example, if a host had created the queue_pair and a 592 * guest attaches and SetPageStore() is successful but writing success 593 * fails, then ... the host has to be stopped from writing (anymore) 594 * data into the queue_pair. That means an additional test in the 595 * VMCI_Enqueue() code path. Ugh. 596 */ 597 598 if (put_user(VMCI_SUCCESS, &info->result)) { 599 /* 600 * In this case, we can't write a result field of the 601 * caller's info block. So, we don't even try to 602 * SetPageStore(). 603 */ 604 return -EFAULT; 605 } 606 607 result = vmci_qp_broker_set_page_store(page_file_info.handle, 608 page_file_info.produce_va, 609 page_file_info.consume_va, 610 vmci_host_dev->context); 611 if (result < VMCI_SUCCESS) { 612 if (put_user(result, &info->result)) { 613 /* 614 * Note that in this case the SetPageStore() 615 * call failed but we were unable to 616 * communicate that to the caller (because the 617 * copy_to_user() call failed). So, if we 618 * simply return an error (in this case 619 * -EFAULT) then the caller will know that the 620 * SetPageStore failed even though we couldn't 621 * put the result code in the result field and 622 * indicate exactly why it failed. 623 * 624 * That says nothing about the issue where we 625 * were once able to write to the caller's info 626 * memory and now can't. Something more 627 * serious is probably going on than the fact 628 * that SetPageStore() didn't work. 629 */ 630 return -EFAULT; 631 } 632 } 633 634 return 0; 635 } 636 637 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, 638 const char *ioctl_name, 639 void __user *uptr) 640 { 641 struct vmci_qp_dtch_info detach_info; 642 struct vmci_qp_dtch_info __user *info = uptr; 643 s32 result; 644 645 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 646 vmci_ioctl_err("only valid for contexts\n"); 647 return -EINVAL; 648 } 649 650 if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) 651 return -EFAULT; 652 653 result = vmci_qp_broker_detach(detach_info.handle, 654 vmci_host_dev->context); 655 if (result == VMCI_SUCCESS && 656 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 657 result = VMCI_SUCCESS_LAST_DETACH; 658 } 659 660 return put_user(result, &info->result) ? -EFAULT : 0; 661 } 662 663 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, 664 const char *ioctl_name, 665 void __user *uptr) 666 { 667 struct vmci_ctx_info ar_info; 668 struct vmci_ctx_info __user *info = uptr; 669 s32 result; 670 u32 cid; 671 672 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 673 vmci_ioctl_err("only valid for contexts\n"); 674 return -EINVAL; 675 } 676 677 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 678 return -EFAULT; 679 680 cid = vmci_ctx_get_id(vmci_host_dev->context); 681 result = vmci_ctx_add_notification(cid, ar_info.remote_cid); 682 683 return put_user(result, &info->result) ? -EFAULT : 0; 684 } 685 686 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, 687 const char *ioctl_name, 688 void __user *uptr) 689 { 690 struct vmci_ctx_info ar_info; 691 struct vmci_ctx_info __user *info = uptr; 692 u32 cid; 693 int result; 694 695 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 696 vmci_ioctl_err("only valid for contexts\n"); 697 return -EINVAL; 698 } 699 700 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 701 return -EFAULT; 702 703 cid = vmci_ctx_get_id(vmci_host_dev->context); 704 result = vmci_ctx_remove_notification(cid, 705 ar_info.remote_cid); 706 707 return put_user(result, &info->result) ? -EFAULT : 0; 708 } 709 710 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, 711 const char *ioctl_name, 712 void __user *uptr) 713 { 714 struct vmci_ctx_chkpt_buf_info get_info; 715 u32 cid; 716 void *cpt_buf; 717 int retval; 718 719 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 720 vmci_ioctl_err("only valid for contexts\n"); 721 return -EINVAL; 722 } 723 724 if (copy_from_user(&get_info, uptr, sizeof(get_info))) 725 return -EFAULT; 726 727 cid = vmci_ctx_get_id(vmci_host_dev->context); 728 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, 729 &get_info.buf_size, &cpt_buf); 730 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { 731 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; 732 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); 733 kfree(cpt_buf); 734 735 if (retval) 736 return -EFAULT; 737 } 738 739 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; 740 } 741 742 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, 743 const char *ioctl_name, 744 void __user *uptr) 745 { 746 struct vmci_ctx_chkpt_buf_info set_info; 747 u32 cid; 748 void *cpt_buf; 749 int retval; 750 751 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 752 vmci_ioctl_err("only valid for contexts\n"); 753 return -EINVAL; 754 } 755 756 if (copy_from_user(&set_info, uptr, sizeof(set_info))) 757 return -EFAULT; 758 759 cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL); 760 if (!cpt_buf) { 761 vmci_ioctl_err( 762 "cannot allocate memory to set cpt state (type=%d)\n", 763 set_info.cpt_type); 764 return -ENOMEM; 765 } 766 767 if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf, 768 set_info.buf_size)) { 769 retval = -EFAULT; 770 goto out; 771 } 772 773 cid = vmci_ctx_get_id(vmci_host_dev->context); 774 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, 775 set_info.buf_size, cpt_buf); 776 777 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; 778 779 out: 780 kfree(cpt_buf); 781 return retval; 782 } 783 784 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, 785 const char *ioctl_name, 786 void __user *uptr) 787 { 788 u32 __user *u32ptr = uptr; 789 790 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; 791 } 792 793 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, 794 const char *ioctl_name, 795 void __user *uptr) 796 { 797 struct vmci_set_notify_info notify_info; 798 799 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 800 vmci_ioctl_err("only valid for contexts\n"); 801 return -EINVAL; 802 } 803 804 if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) 805 return -EFAULT; 806 807 if (notify_info.notify_uva) { 808 notify_info.result = 809 vmci_host_setup_notify(vmci_host_dev->context, 810 notify_info.notify_uva); 811 } else { 812 vmci_ctx_unset_notify(vmci_host_dev->context); 813 notify_info.result = VMCI_SUCCESS; 814 } 815 816 return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? 817 -EFAULT : 0; 818 } 819 820 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, 821 const char *ioctl_name, 822 void __user *uptr) 823 { 824 struct vmci_dbell_notify_resource_info info; 825 u32 cid; 826 827 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 828 vmci_ioctl_err("invalid for current VMX versions\n"); 829 return -EINVAL; 830 } 831 832 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 833 vmci_ioctl_err("only valid for contexts\n"); 834 return -EINVAL; 835 } 836 837 if (copy_from_user(&info, uptr, sizeof(info))) 838 return -EFAULT; 839 840 cid = vmci_ctx_get_id(vmci_host_dev->context); 841 842 switch (info.action) { 843 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: 844 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { 845 u32 flags = VMCI_NO_PRIVILEGE_FLAGS; 846 info.result = vmci_ctx_notify_dbell(cid, info.handle, 847 flags); 848 } else { 849 info.result = VMCI_ERROR_UNAVAILABLE; 850 } 851 break; 852 853 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: 854 info.result = vmci_ctx_dbell_create(cid, info.handle); 855 break; 856 857 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: 858 info.result = vmci_ctx_dbell_destroy(cid, info.handle); 859 break; 860 861 default: 862 vmci_ioctl_err("got unknown action (action=%d)\n", 863 info.action); 864 info.result = VMCI_ERROR_INVALID_ARGS; 865 } 866 867 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 868 } 869 870 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, 871 const char *ioctl_name, 872 void __user *uptr) 873 { 874 struct vmci_ctx_notify_recv_info info; 875 struct vmci_handle_arr *db_handle_array; 876 struct vmci_handle_arr *qp_handle_array; 877 void __user *ubuf; 878 u32 cid; 879 int retval = 0; 880 881 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 882 vmci_ioctl_err("only valid for contexts\n"); 883 return -EINVAL; 884 } 885 886 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 887 vmci_ioctl_err("not supported for the current vmx version\n"); 888 return -EINVAL; 889 } 890 891 if (copy_from_user(&info, uptr, sizeof(info))) 892 return -EFAULT; 893 894 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || 895 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { 896 return -EINVAL; 897 } 898 899 cid = vmci_ctx_get_id(vmci_host_dev->context); 900 901 info.result = vmci_ctx_rcv_notifications_get(cid, 902 &db_handle_array, &qp_handle_array); 903 if (info.result != VMCI_SUCCESS) 904 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 905 906 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; 907 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, 908 db_handle_array, &retval); 909 if (info.result == VMCI_SUCCESS && !retval) { 910 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; 911 info.result = drv_cp_harray_to_user(ubuf, 912 &info.qp_handle_buf_size, 913 qp_handle_array, &retval); 914 } 915 916 if (!retval && copy_to_user(uptr, &info, sizeof(info))) 917 retval = -EFAULT; 918 919 vmci_ctx_rcv_notifications_release(cid, 920 db_handle_array, qp_handle_array, 921 info.result == VMCI_SUCCESS && !retval); 922 923 return retval; 924 } 925 926 static long vmci_host_unlocked_ioctl(struct file *filp, 927 unsigned int iocmd, unsigned long ioarg) 928 { 929 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ 930 char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \ 931 return vmci_host_do_ ## ioctl_fn( \ 932 vmci_host_dev, name, uptr); \ 933 } while (0) 934 935 struct vmci_host_dev *vmci_host_dev = filp->private_data; 936 void __user *uptr = (void __user *)ioarg; 937 938 switch (iocmd) { 939 case IOCTL_VMCI_INIT_CONTEXT: 940 VMCI_DO_IOCTL(INIT_CONTEXT, init_context); 941 case IOCTL_VMCI_DATAGRAM_SEND: 942 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); 943 case IOCTL_VMCI_DATAGRAM_RECEIVE: 944 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); 945 case IOCTL_VMCI_QUEUEPAIR_ALLOC: 946 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); 947 case IOCTL_VMCI_QUEUEPAIR_SETVA: 948 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); 949 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: 950 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); 951 case IOCTL_VMCI_QUEUEPAIR_DETACH: 952 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); 953 case IOCTL_VMCI_CTX_ADD_NOTIFICATION: 954 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); 955 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: 956 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); 957 case IOCTL_VMCI_CTX_GET_CPT_STATE: 958 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); 959 case IOCTL_VMCI_CTX_SET_CPT_STATE: 960 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); 961 case IOCTL_VMCI_GET_CONTEXT_ID: 962 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); 963 case IOCTL_VMCI_SET_NOTIFY: 964 VMCI_DO_IOCTL(SET_NOTIFY, set_notify); 965 case IOCTL_VMCI_NOTIFY_RESOURCE: 966 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); 967 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: 968 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); 969 970 case IOCTL_VMCI_VERSION: 971 case IOCTL_VMCI_VERSION2: 972 return vmci_host_get_version(vmci_host_dev, iocmd, uptr); 973 974 default: 975 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); 976 return -EINVAL; 977 } 978 979 #undef VMCI_DO_IOCTL 980 } 981 982 static const struct file_operations vmuser_fops = { 983 .owner = THIS_MODULE, 984 .open = vmci_host_open, 985 .release = vmci_host_close, 986 .poll = vmci_host_poll, 987 .unlocked_ioctl = vmci_host_unlocked_ioctl, 988 .compat_ioctl = vmci_host_unlocked_ioctl, 989 }; 990 991 static struct miscdevice vmci_host_miscdev = { 992 .name = "vmci", 993 .minor = MISC_DYNAMIC_MINOR, 994 .fops = &vmuser_fops, 995 }; 996 997 int __init vmci_host_init(void) 998 { 999 int error; 1000 1001 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, 1002 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, 1003 -1, VMCI_VERSION, NULL); 1004 if (IS_ERR(host_context)) { 1005 error = PTR_ERR(host_context); 1006 pr_warn("Failed to initialize VMCIContext (error%d)\n", 1007 error); 1008 return error; 1009 } 1010 1011 error = misc_register(&vmci_host_miscdev); 1012 if (error) { 1013 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", 1014 vmci_host_miscdev.name, 1015 MISC_MAJOR, vmci_host_miscdev.minor, 1016 error); 1017 pr_warn("Unable to initialize host personality\n"); 1018 vmci_ctx_destroy(host_context); 1019 return error; 1020 } 1021 1022 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", 1023 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); 1024 1025 vmci_host_device_initialized = true; 1026 return 0; 1027 } 1028 1029 void __exit vmci_host_exit(void) 1030 { 1031 int error; 1032 1033 vmci_host_device_initialized = false; 1034 1035 error = misc_deregister(&vmci_host_miscdev); 1036 if (error) 1037 pr_warn("Error unregistering character device: %d\n", error); 1038 1039 vmci_ctx_destroy(host_context); 1040 vmci_qp_broker_exit(); 1041 1042 pr_debug("VMCI host driver module unloaded\n"); 1043 } 1044