1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMware VMCI Driver 4 * 5 * Copyright (C) 2012 VMware, Inc. All rights reserved. 6 */ 7 8 #include <linux/vmw_vmci_defs.h> 9 #include <linux/vmw_vmci_api.h> 10 #include <linux/miscdevice.h> 11 #include <linux/interrupt.h> 12 #include <linux/highmem.h> 13 #include <linux/atomic.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/sched.h> 18 #include <linux/cred.h> 19 #include <linux/slab.h> 20 #include <linux/file.h> 21 #include <linux/init.h> 22 #include <linux/poll.h> 23 #include <linux/pci.h> 24 #include <linux/smp.h> 25 #include <linux/fs.h> 26 #include <linux/io.h> 27 28 #include "vmci_handle_array.h" 29 #include "vmci_queue_pair.h" 30 #include "vmci_datagram.h" 31 #include "vmci_doorbell.h" 32 #include "vmci_resource.h" 33 #include "vmci_context.h" 34 #include "vmci_driver.h" 35 #include "vmci_event.h" 36 37 #define VMCI_UTIL_NUM_RESOURCES 1 38 39 enum { 40 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, 41 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, 42 }; 43 44 enum { 45 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, 46 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, 47 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, 48 }; 49 50 /* 51 * VMCI driver initialization. This block can also be used to 52 * pass initial group membership etc. 53 */ 54 struct vmci_init_blk { 55 u32 cid; 56 u32 flags; 57 }; 58 59 /* VMCIqueue_pairAllocInfo_VMToVM */ 60 struct vmci_qp_alloc_info_vmvm { 61 struct vmci_handle handle; 62 u32 peer; 63 u32 flags; 64 u64 produce_size; 65 u64 consume_size; 66 u64 produce_page_file; /* User VA. */ 67 u64 consume_page_file; /* User VA. */ 68 u64 produce_page_file_size; /* Size of the file name array. */ 69 u64 consume_page_file_size; /* Size of the file name array. */ 70 s32 result; 71 u32 _pad; 72 }; 73 74 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ 75 struct vmci_set_notify_info { 76 u64 notify_uva; 77 s32 result; 78 u32 _pad; 79 }; 80 81 /* 82 * Per-instance host state 83 */ 84 struct vmci_host_dev { 85 struct vmci_ctx *context; 86 int user_version; 87 enum vmci_obj_type ct_type; 88 struct mutex lock; /* Mutex lock for vmci context access */ 89 }; 90 91 static struct vmci_ctx *host_context; 92 static bool vmci_host_device_initialized; 93 static atomic_t vmci_host_active_users = ATOMIC_INIT(0); 94 95 /* 96 * Determines whether the VMCI host personality is 97 * available. Since the core functionality of the host driver is 98 * always present, all guests could possibly use the host 99 * personality. However, to minimize the deviation from the 100 * pre-unified driver state of affairs, we only consider the host 101 * device active if there is no active guest device or if there 102 * are VMX'en with active VMCI contexts using the host device. 103 */ 104 bool vmci_host_code_active(void) 105 { 106 return vmci_host_device_initialized && 107 (!vmci_guest_code_active() || 108 atomic_read(&vmci_host_active_users) > 0); 109 } 110 111 int vmci_host_users(void) 112 { 113 return atomic_read(&vmci_host_active_users); 114 } 115 116 /* 117 * Called on open of /dev/vmci. 118 */ 119 static int vmci_host_open(struct inode *inode, struct file *filp) 120 { 121 struct vmci_host_dev *vmci_host_dev; 122 123 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); 124 if (vmci_host_dev == NULL) 125 return -ENOMEM; 126 127 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 128 mutex_init(&vmci_host_dev->lock); 129 filp->private_data = vmci_host_dev; 130 131 return 0; 132 } 133 134 /* 135 * Called on close of /dev/vmci, most often when the process 136 * exits. 137 */ 138 static int vmci_host_close(struct inode *inode, struct file *filp) 139 { 140 struct vmci_host_dev *vmci_host_dev = filp->private_data; 141 142 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 143 vmci_ctx_destroy(vmci_host_dev->context); 144 vmci_host_dev->context = NULL; 145 146 /* 147 * The number of active contexts is used to track whether any 148 * VMX'en are using the host personality. It is incremented when 149 * a context is created through the IOCTL_VMCI_INIT_CONTEXT 150 * ioctl. 151 */ 152 atomic_dec(&vmci_host_active_users); 153 } 154 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 155 156 kfree(vmci_host_dev); 157 filp->private_data = NULL; 158 return 0; 159 } 160 161 /* 162 * This is used to wake up the VMX when a VMCI call arrives, or 163 * to wake up select() or poll() at the next clock tick. 164 */ 165 static __poll_t vmci_host_poll(struct file *filp, poll_table *wait) 166 { 167 struct vmci_host_dev *vmci_host_dev = filp->private_data; 168 struct vmci_ctx *context; 169 __poll_t mask = 0; 170 171 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 172 /* 173 * Read context only if ct_type == VMCIOBJ_CONTEXT to make 174 * sure that context is initialized 175 */ 176 context = vmci_host_dev->context; 177 178 /* Check for VMCI calls to this VM context. */ 179 if (wait) 180 poll_wait(filp, &context->host_context.wait_queue, 181 wait); 182 183 spin_lock(&context->lock); 184 if (context->pending_datagrams > 0 || 185 vmci_handle_arr_get_size( 186 context->pending_doorbell_array) > 0) { 187 mask = EPOLLIN; 188 } 189 spin_unlock(&context->lock); 190 } 191 return mask; 192 } 193 194 /* 195 * Copies the handles of a handle array into a user buffer, and 196 * returns the new length in userBufferSize. If the copy to the 197 * user buffer fails, the functions still returns VMCI_SUCCESS, 198 * but retval != 0. 199 */ 200 static int drv_cp_harray_to_user(void __user *user_buf_uva, 201 u64 *user_buf_size, 202 struct vmci_handle_arr *handle_array, 203 int *retval) 204 { 205 u32 array_size = 0; 206 struct vmci_handle *handles; 207 208 if (handle_array) 209 array_size = vmci_handle_arr_get_size(handle_array); 210 211 if (array_size * sizeof(*handles) > *user_buf_size) 212 return VMCI_ERROR_MORE_DATA; 213 214 *user_buf_size = array_size * sizeof(*handles); 215 if (*user_buf_size) 216 *retval = copy_to_user(user_buf_uva, 217 vmci_handle_arr_get_handles 218 (handle_array), *user_buf_size); 219 220 return VMCI_SUCCESS; 221 } 222 223 /* 224 * Sets up a given context for notify to work. Maps the notify 225 * boolean in user VA into kernel space. 226 */ 227 static int vmci_host_setup_notify(struct vmci_ctx *context, 228 unsigned long uva) 229 { 230 struct page *page; 231 int retval; 232 233 if (context->notify_page) { 234 pr_devel("%s: Notify mechanism is already set up\n", __func__); 235 return VMCI_ERROR_DUPLICATE_ENTRY; 236 } 237 238 /* 239 * We are using 'bool' internally, but let's make sure we explicit 240 * about the size. 241 */ 242 BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); 243 244 /* 245 * Lock physical page backing a given user VA. 246 */ 247 retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page); 248 if (retval != 1) 249 return VMCI_ERROR_GENERIC; 250 251 context->notify_page = page; 252 253 /* 254 * Map the locked page and set up notify pointer. 255 */ 256 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1)); 257 vmci_ctx_check_signal_notify(context); 258 259 return VMCI_SUCCESS; 260 } 261 262 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, 263 unsigned int cmd, void __user *uptr) 264 { 265 if (cmd == IOCTL_VMCI_VERSION2) { 266 int __user *vptr = uptr; 267 if (get_user(vmci_host_dev->user_version, vptr)) 268 return -EFAULT; 269 } 270 271 /* 272 * The basic logic here is: 273 * 274 * If the user sends in a version of 0 tell it our version. 275 * If the user didn't send in a version, tell it our version. 276 * If the user sent in an old version, tell it -its- version. 277 * If the user sent in an newer version, tell it our version. 278 * 279 * The rationale behind telling the caller its version is that 280 * Workstation 6.5 required that VMX and VMCI kernel module were 281 * version sync'd. All new VMX users will be programmed to 282 * handle the VMCI kernel module version. 283 */ 284 285 if (vmci_host_dev->user_version > 0 && 286 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { 287 return vmci_host_dev->user_version; 288 } 289 290 return VMCI_VERSION; 291 } 292 293 #define vmci_ioctl_err(fmt, ...) \ 294 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) 295 296 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, 297 const char *ioctl_name, 298 void __user *uptr) 299 { 300 struct vmci_init_blk init_block; 301 const struct cred *cred; 302 int retval; 303 304 if (copy_from_user(&init_block, uptr, sizeof(init_block))) { 305 vmci_ioctl_err("error reading init block\n"); 306 return -EFAULT; 307 } 308 309 mutex_lock(&vmci_host_dev->lock); 310 311 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { 312 vmci_ioctl_err("received VMCI init on initialized handle\n"); 313 retval = -EINVAL; 314 goto out; 315 } 316 317 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { 318 vmci_ioctl_err("unsupported VMCI restriction flag\n"); 319 retval = -EINVAL; 320 goto out; 321 } 322 323 cred = get_current_cred(); 324 vmci_host_dev->context = vmci_ctx_create(init_block.cid, 325 init_block.flags, 0, 326 vmci_host_dev->user_version, 327 cred); 328 put_cred(cred); 329 if (IS_ERR(vmci_host_dev->context)) { 330 retval = PTR_ERR(vmci_host_dev->context); 331 vmci_ioctl_err("error initializing context\n"); 332 goto out; 333 } 334 335 /* 336 * Copy cid to userlevel, we do this to allow the VMX 337 * to enforce its policy on cid generation. 338 */ 339 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); 340 if (copy_to_user(uptr, &init_block, sizeof(init_block))) { 341 vmci_ctx_destroy(vmci_host_dev->context); 342 vmci_host_dev->context = NULL; 343 vmci_ioctl_err("error writing init block\n"); 344 retval = -EFAULT; 345 goto out; 346 } 347 348 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; 349 atomic_inc(&vmci_host_active_users); 350 351 vmci_call_vsock_callback(true); 352 353 retval = 0; 354 355 out: 356 mutex_unlock(&vmci_host_dev->lock); 357 return retval; 358 } 359 360 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, 361 const char *ioctl_name, 362 void __user *uptr) 363 { 364 struct vmci_datagram_snd_rcv_info send_info; 365 struct vmci_datagram *dg = NULL; 366 u32 cid; 367 368 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 369 vmci_ioctl_err("only valid for contexts\n"); 370 return -EINVAL; 371 } 372 373 if (copy_from_user(&send_info, uptr, sizeof(send_info))) 374 return -EFAULT; 375 376 if (send_info.len > VMCI_MAX_DG_SIZE) { 377 vmci_ioctl_err("datagram is too big (size=%d)\n", 378 send_info.len); 379 return -EINVAL; 380 } 381 382 if (send_info.len < sizeof(*dg)) { 383 vmci_ioctl_err("datagram is too small (size=%d)\n", 384 send_info.len); 385 return -EINVAL; 386 } 387 388 dg = memdup_user((void __user *)(uintptr_t)send_info.addr, 389 send_info.len); 390 if (IS_ERR(dg)) { 391 vmci_ioctl_err( 392 "cannot allocate memory to dispatch datagram\n"); 393 return PTR_ERR(dg); 394 } 395 396 if (VMCI_DG_SIZE(dg) != send_info.len) { 397 vmci_ioctl_err("datagram size mismatch\n"); 398 kfree(dg); 399 return -EINVAL; 400 } 401 402 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", 403 dg->dst.context, dg->dst.resource, 404 dg->src.context, dg->src.resource, 405 (unsigned long long)dg->payload_size); 406 407 /* Get source context id. */ 408 cid = vmci_ctx_get_id(vmci_host_dev->context); 409 send_info.result = vmci_datagram_dispatch(cid, dg, true); 410 kfree(dg); 411 412 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; 413 } 414 415 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, 416 const char *ioctl_name, 417 void __user *uptr) 418 { 419 struct vmci_datagram_snd_rcv_info recv_info; 420 struct vmci_datagram *dg = NULL; 421 int retval; 422 size_t size; 423 424 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 425 vmci_ioctl_err("only valid for contexts\n"); 426 return -EINVAL; 427 } 428 429 if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) 430 return -EFAULT; 431 432 size = recv_info.len; 433 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, 434 &size, &dg); 435 436 if (recv_info.result >= VMCI_SUCCESS) { 437 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; 438 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); 439 kfree(dg); 440 if (retval != 0) 441 return -EFAULT; 442 } 443 444 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; 445 } 446 447 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, 448 const char *ioctl_name, 449 void __user *uptr) 450 { 451 struct vmci_handle handle; 452 int vmci_status; 453 int __user *retptr; 454 455 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 456 vmci_ioctl_err("only valid for contexts\n"); 457 return -EINVAL; 458 } 459 460 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 461 struct vmci_qp_alloc_info_vmvm alloc_info; 462 struct vmci_qp_alloc_info_vmvm __user *info = uptr; 463 464 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 465 return -EFAULT; 466 467 handle = alloc_info.handle; 468 retptr = &info->result; 469 470 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 471 alloc_info.peer, 472 alloc_info.flags, 473 VMCI_NO_PRIVILEGE_FLAGS, 474 alloc_info.produce_size, 475 alloc_info.consume_size, 476 NULL, 477 vmci_host_dev->context); 478 479 if (vmci_status == VMCI_SUCCESS) 480 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; 481 } else { 482 struct vmci_qp_alloc_info alloc_info; 483 struct vmci_qp_alloc_info __user *info = uptr; 484 struct vmci_qp_page_store page_store; 485 486 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 487 return -EFAULT; 488 489 handle = alloc_info.handle; 490 retptr = &info->result; 491 492 page_store.pages = alloc_info.ppn_va; 493 page_store.len = alloc_info.num_ppns; 494 495 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 496 alloc_info.peer, 497 alloc_info.flags, 498 VMCI_NO_PRIVILEGE_FLAGS, 499 alloc_info.produce_size, 500 alloc_info.consume_size, 501 &page_store, 502 vmci_host_dev->context); 503 } 504 505 if (put_user(vmci_status, retptr)) { 506 if (vmci_status >= VMCI_SUCCESS) { 507 vmci_status = vmci_qp_broker_detach(handle, 508 vmci_host_dev->context); 509 } 510 return -EFAULT; 511 } 512 513 return 0; 514 } 515 516 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, 517 const char *ioctl_name, 518 void __user *uptr) 519 { 520 struct vmci_qp_set_va_info set_va_info; 521 struct vmci_qp_set_va_info __user *info = uptr; 522 s32 result; 523 524 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 525 vmci_ioctl_err("only valid for contexts\n"); 526 return -EINVAL; 527 } 528 529 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 530 vmci_ioctl_err("is not allowed\n"); 531 return -EINVAL; 532 } 533 534 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) 535 return -EFAULT; 536 537 if (set_va_info.va) { 538 /* 539 * VMX is passing down a new VA for the queue 540 * pair mapping. 541 */ 542 result = vmci_qp_broker_map(set_va_info.handle, 543 vmci_host_dev->context, 544 set_va_info.va); 545 } else { 546 /* 547 * The queue pair is about to be unmapped by 548 * the VMX. 549 */ 550 result = vmci_qp_broker_unmap(set_va_info.handle, 551 vmci_host_dev->context, 0); 552 } 553 554 return put_user(result, &info->result) ? -EFAULT : 0; 555 } 556 557 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, 558 const char *ioctl_name, 559 void __user *uptr) 560 { 561 struct vmci_qp_page_file_info page_file_info; 562 struct vmci_qp_page_file_info __user *info = uptr; 563 s32 result; 564 565 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || 566 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { 567 vmci_ioctl_err("not supported on this VMX (version=%d)\n", 568 vmci_host_dev->user_version); 569 return -EINVAL; 570 } 571 572 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 573 vmci_ioctl_err("only valid for contexts\n"); 574 return -EINVAL; 575 } 576 577 if (copy_from_user(&page_file_info, uptr, sizeof(*info))) 578 return -EFAULT; 579 580 /* 581 * Communicate success pre-emptively to the caller. Note that the 582 * basic premise is that it is incumbent upon the caller not to look at 583 * the info.result field until after the ioctl() returns. And then, 584 * only if the ioctl() result indicates no error. We send up the 585 * SUCCESS status before calling SetPageStore() store because failing 586 * to copy up the result code means unwinding the SetPageStore(). 587 * 588 * It turns out the logic to unwind a SetPageStore() opens a can of 589 * worms. For example, if a host had created the queue_pair and a 590 * guest attaches and SetPageStore() is successful but writing success 591 * fails, then ... the host has to be stopped from writing (anymore) 592 * data into the queue_pair. That means an additional test in the 593 * VMCI_Enqueue() code path. Ugh. 594 */ 595 596 if (put_user(VMCI_SUCCESS, &info->result)) { 597 /* 598 * In this case, we can't write a result field of the 599 * caller's info block. So, we don't even try to 600 * SetPageStore(). 601 */ 602 return -EFAULT; 603 } 604 605 result = vmci_qp_broker_set_page_store(page_file_info.handle, 606 page_file_info.produce_va, 607 page_file_info.consume_va, 608 vmci_host_dev->context); 609 if (result < VMCI_SUCCESS) { 610 if (put_user(result, &info->result)) { 611 /* 612 * Note that in this case the SetPageStore() 613 * call failed but we were unable to 614 * communicate that to the caller (because the 615 * copy_to_user() call failed). So, if we 616 * simply return an error (in this case 617 * -EFAULT) then the caller will know that the 618 * SetPageStore failed even though we couldn't 619 * put the result code in the result field and 620 * indicate exactly why it failed. 621 * 622 * That says nothing about the issue where we 623 * were once able to write to the caller's info 624 * memory and now can't. Something more 625 * serious is probably going on than the fact 626 * that SetPageStore() didn't work. 627 */ 628 return -EFAULT; 629 } 630 } 631 632 return 0; 633 } 634 635 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, 636 const char *ioctl_name, 637 void __user *uptr) 638 { 639 struct vmci_qp_dtch_info detach_info; 640 struct vmci_qp_dtch_info __user *info = uptr; 641 s32 result; 642 643 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 644 vmci_ioctl_err("only valid for contexts\n"); 645 return -EINVAL; 646 } 647 648 if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) 649 return -EFAULT; 650 651 result = vmci_qp_broker_detach(detach_info.handle, 652 vmci_host_dev->context); 653 if (result == VMCI_SUCCESS && 654 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 655 result = VMCI_SUCCESS_LAST_DETACH; 656 } 657 658 return put_user(result, &info->result) ? -EFAULT : 0; 659 } 660 661 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, 662 const char *ioctl_name, 663 void __user *uptr) 664 { 665 struct vmci_ctx_info ar_info; 666 struct vmci_ctx_info __user *info = uptr; 667 s32 result; 668 u32 cid; 669 670 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 671 vmci_ioctl_err("only valid for contexts\n"); 672 return -EINVAL; 673 } 674 675 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 676 return -EFAULT; 677 678 cid = vmci_ctx_get_id(vmci_host_dev->context); 679 result = vmci_ctx_add_notification(cid, ar_info.remote_cid); 680 681 return put_user(result, &info->result) ? -EFAULT : 0; 682 } 683 684 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, 685 const char *ioctl_name, 686 void __user *uptr) 687 { 688 struct vmci_ctx_info ar_info; 689 struct vmci_ctx_info __user *info = uptr; 690 u32 cid; 691 int result; 692 693 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 694 vmci_ioctl_err("only valid for contexts\n"); 695 return -EINVAL; 696 } 697 698 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 699 return -EFAULT; 700 701 cid = vmci_ctx_get_id(vmci_host_dev->context); 702 result = vmci_ctx_remove_notification(cid, 703 ar_info.remote_cid); 704 705 return put_user(result, &info->result) ? -EFAULT : 0; 706 } 707 708 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, 709 const char *ioctl_name, 710 void __user *uptr) 711 { 712 struct vmci_ctx_chkpt_buf_info get_info; 713 u32 cid; 714 void *cpt_buf; 715 int retval; 716 717 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 718 vmci_ioctl_err("only valid for contexts\n"); 719 return -EINVAL; 720 } 721 722 if (copy_from_user(&get_info, uptr, sizeof(get_info))) 723 return -EFAULT; 724 725 cid = vmci_ctx_get_id(vmci_host_dev->context); 726 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, 727 &get_info.buf_size, &cpt_buf); 728 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { 729 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; 730 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); 731 kfree(cpt_buf); 732 733 if (retval) 734 return -EFAULT; 735 } 736 737 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; 738 } 739 740 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, 741 const char *ioctl_name, 742 void __user *uptr) 743 { 744 struct vmci_ctx_chkpt_buf_info set_info; 745 u32 cid; 746 void *cpt_buf; 747 int retval; 748 749 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 750 vmci_ioctl_err("only valid for contexts\n"); 751 return -EINVAL; 752 } 753 754 if (copy_from_user(&set_info, uptr, sizeof(set_info))) 755 return -EFAULT; 756 757 cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf, 758 set_info.buf_size); 759 if (IS_ERR(cpt_buf)) 760 return PTR_ERR(cpt_buf); 761 762 cid = vmci_ctx_get_id(vmci_host_dev->context); 763 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, 764 set_info.buf_size, cpt_buf); 765 766 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; 767 768 kfree(cpt_buf); 769 return retval; 770 } 771 772 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, 773 const char *ioctl_name, 774 void __user *uptr) 775 { 776 u32 __user *u32ptr = uptr; 777 778 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; 779 } 780 781 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, 782 const char *ioctl_name, 783 void __user *uptr) 784 { 785 struct vmci_set_notify_info notify_info; 786 787 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 788 vmci_ioctl_err("only valid for contexts\n"); 789 return -EINVAL; 790 } 791 792 if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) 793 return -EFAULT; 794 795 if (notify_info.notify_uva) { 796 notify_info.result = 797 vmci_host_setup_notify(vmci_host_dev->context, 798 notify_info.notify_uva); 799 } else { 800 vmci_ctx_unset_notify(vmci_host_dev->context); 801 notify_info.result = VMCI_SUCCESS; 802 } 803 804 return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? 805 -EFAULT : 0; 806 } 807 808 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, 809 const char *ioctl_name, 810 void __user *uptr) 811 { 812 struct vmci_dbell_notify_resource_info info; 813 u32 cid; 814 815 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 816 vmci_ioctl_err("invalid for current VMX versions\n"); 817 return -EINVAL; 818 } 819 820 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 821 vmci_ioctl_err("only valid for contexts\n"); 822 return -EINVAL; 823 } 824 825 if (copy_from_user(&info, uptr, sizeof(info))) 826 return -EFAULT; 827 828 cid = vmci_ctx_get_id(vmci_host_dev->context); 829 830 switch (info.action) { 831 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: 832 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { 833 u32 flags = VMCI_NO_PRIVILEGE_FLAGS; 834 info.result = vmci_ctx_notify_dbell(cid, info.handle, 835 flags); 836 } else { 837 info.result = VMCI_ERROR_UNAVAILABLE; 838 } 839 break; 840 841 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: 842 info.result = vmci_ctx_dbell_create(cid, info.handle); 843 break; 844 845 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: 846 info.result = vmci_ctx_dbell_destroy(cid, info.handle); 847 break; 848 849 default: 850 vmci_ioctl_err("got unknown action (action=%d)\n", 851 info.action); 852 info.result = VMCI_ERROR_INVALID_ARGS; 853 } 854 855 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 856 } 857 858 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, 859 const char *ioctl_name, 860 void __user *uptr) 861 { 862 struct vmci_ctx_notify_recv_info info; 863 struct vmci_handle_arr *db_handle_array; 864 struct vmci_handle_arr *qp_handle_array; 865 void __user *ubuf; 866 u32 cid; 867 int retval = 0; 868 869 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 870 vmci_ioctl_err("only valid for contexts\n"); 871 return -EINVAL; 872 } 873 874 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 875 vmci_ioctl_err("not supported for the current vmx version\n"); 876 return -EINVAL; 877 } 878 879 if (copy_from_user(&info, uptr, sizeof(info))) 880 return -EFAULT; 881 882 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || 883 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { 884 return -EINVAL; 885 } 886 887 cid = vmci_ctx_get_id(vmci_host_dev->context); 888 889 info.result = vmci_ctx_rcv_notifications_get(cid, 890 &db_handle_array, &qp_handle_array); 891 if (info.result != VMCI_SUCCESS) 892 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 893 894 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; 895 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, 896 db_handle_array, &retval); 897 if (info.result == VMCI_SUCCESS && !retval) { 898 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; 899 info.result = drv_cp_harray_to_user(ubuf, 900 &info.qp_handle_buf_size, 901 qp_handle_array, &retval); 902 } 903 904 if (!retval && copy_to_user(uptr, &info, sizeof(info))) 905 retval = -EFAULT; 906 907 vmci_ctx_rcv_notifications_release(cid, 908 db_handle_array, qp_handle_array, 909 info.result == VMCI_SUCCESS && !retval); 910 911 return retval; 912 } 913 914 static long vmci_host_unlocked_ioctl(struct file *filp, 915 unsigned int iocmd, unsigned long ioarg) 916 { 917 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ 918 char *name = "IOCTL_VMCI_" # ioctl_name; \ 919 return vmci_host_do_ ## ioctl_fn( \ 920 vmci_host_dev, name, uptr); \ 921 } while (0) 922 923 struct vmci_host_dev *vmci_host_dev = filp->private_data; 924 void __user *uptr = (void __user *)ioarg; 925 926 switch (iocmd) { 927 case IOCTL_VMCI_INIT_CONTEXT: 928 VMCI_DO_IOCTL(INIT_CONTEXT, init_context); 929 case IOCTL_VMCI_DATAGRAM_SEND: 930 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); 931 case IOCTL_VMCI_DATAGRAM_RECEIVE: 932 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); 933 case IOCTL_VMCI_QUEUEPAIR_ALLOC: 934 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); 935 case IOCTL_VMCI_QUEUEPAIR_SETVA: 936 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); 937 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: 938 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); 939 case IOCTL_VMCI_QUEUEPAIR_DETACH: 940 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); 941 case IOCTL_VMCI_CTX_ADD_NOTIFICATION: 942 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); 943 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: 944 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); 945 case IOCTL_VMCI_CTX_GET_CPT_STATE: 946 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); 947 case IOCTL_VMCI_CTX_SET_CPT_STATE: 948 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); 949 case IOCTL_VMCI_GET_CONTEXT_ID: 950 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); 951 case IOCTL_VMCI_SET_NOTIFY: 952 VMCI_DO_IOCTL(SET_NOTIFY, set_notify); 953 case IOCTL_VMCI_NOTIFY_RESOURCE: 954 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); 955 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: 956 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); 957 958 case IOCTL_VMCI_VERSION: 959 case IOCTL_VMCI_VERSION2: 960 return vmci_host_get_version(vmci_host_dev, iocmd, uptr); 961 962 default: 963 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); 964 return -EINVAL; 965 } 966 967 #undef VMCI_DO_IOCTL 968 } 969 970 static const struct file_operations vmuser_fops = { 971 .owner = THIS_MODULE, 972 .open = vmci_host_open, 973 .release = vmci_host_close, 974 .poll = vmci_host_poll, 975 .unlocked_ioctl = vmci_host_unlocked_ioctl, 976 .compat_ioctl = compat_ptr_ioctl, 977 }; 978 979 static struct miscdevice vmci_host_miscdev = { 980 .name = "vmci", 981 .minor = MISC_DYNAMIC_MINOR, 982 .fops = &vmuser_fops, 983 }; 984 985 int __init vmci_host_init(void) 986 { 987 int error; 988 989 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, 990 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, 991 -1, VMCI_VERSION, NULL); 992 if (IS_ERR(host_context)) { 993 error = PTR_ERR(host_context); 994 pr_warn("Failed to initialize VMCIContext (error%d)\n", 995 error); 996 return error; 997 } 998 999 error = misc_register(&vmci_host_miscdev); 1000 if (error) { 1001 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", 1002 vmci_host_miscdev.name, 1003 MISC_MAJOR, vmci_host_miscdev.minor, 1004 error); 1005 pr_warn("Unable to initialize host personality\n"); 1006 vmci_ctx_destroy(host_context); 1007 return error; 1008 } 1009 1010 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", 1011 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); 1012 1013 vmci_host_device_initialized = true; 1014 return 0; 1015 } 1016 1017 void __exit vmci_host_exit(void) 1018 { 1019 vmci_host_device_initialized = false; 1020 1021 misc_deregister(&vmci_host_miscdev); 1022 vmci_ctx_destroy(host_context); 1023 vmci_qp_broker_exit(); 1024 1025 pr_debug("VMCI host driver module unloaded\n"); 1026 } 1027