1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMware VMCI Driver 4 * 5 * Copyright (C) 2012 VMware, Inc. All rights reserved. 6 */ 7 8 #include <linux/vmw_vmci_defs.h> 9 #include <linux/vmw_vmci_api.h> 10 #include <linux/miscdevice.h> 11 #include <linux/interrupt.h> 12 #include <linux/highmem.h> 13 #include <linux/atomic.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/sched.h> 18 #include <linux/cred.h> 19 #include <linux/slab.h> 20 #include <linux/file.h> 21 #include <linux/init.h> 22 #include <linux/poll.h> 23 #include <linux/pci.h> 24 #include <linux/smp.h> 25 #include <linux/fs.h> 26 #include <linux/io.h> 27 28 #include "vmci_handle_array.h" 29 #include "vmci_queue_pair.h" 30 #include "vmci_datagram.h" 31 #include "vmci_doorbell.h" 32 #include "vmci_resource.h" 33 #include "vmci_context.h" 34 #include "vmci_driver.h" 35 #include "vmci_event.h" 36 37 #define VMCI_UTIL_NUM_RESOURCES 1 38 39 enum { 40 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, 41 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, 42 }; 43 44 enum { 45 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, 46 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, 47 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, 48 }; 49 50 /* 51 * VMCI driver initialization. This block can also be used to 52 * pass initial group membership etc. 53 */ 54 struct vmci_init_blk { 55 u32 cid; 56 u32 flags; 57 }; 58 59 /* VMCIqueue_pairAllocInfo_VMToVM */ 60 struct vmci_qp_alloc_info_vmvm { 61 struct vmci_handle handle; 62 u32 peer; 63 u32 flags; 64 u64 produce_size; 65 u64 consume_size; 66 u64 produce_page_file; /* User VA. */ 67 u64 consume_page_file; /* User VA. */ 68 u64 produce_page_file_size; /* Size of the file name array. */ 69 u64 consume_page_file_size; /* Size of the file name array. */ 70 s32 result; 71 u32 _pad; 72 }; 73 74 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ 75 struct vmci_set_notify_info { 76 u64 notify_uva; 77 s32 result; 78 u32 _pad; 79 }; 80 81 /* 82 * Per-instance host state 83 */ 84 struct vmci_host_dev { 85 struct vmci_ctx *context; 86 int user_version; 87 enum vmci_obj_type ct_type; 88 struct mutex lock; /* Mutex lock for vmci context access */ 89 }; 90 91 static struct vmci_ctx *host_context; 92 static bool vmci_host_device_initialized; 93 static atomic_t vmci_host_active_users = ATOMIC_INIT(0); 94 95 /* 96 * Determines whether the VMCI host personality is 97 * available. Since the core functionality of the host driver is 98 * always present, all guests could possibly use the host 99 * personality. However, to minimize the deviation from the 100 * pre-unified driver state of affairs, we only consider the host 101 * device active if there is no active guest device or if there 102 * are VMX'en with active VMCI contexts using the host device. 103 */ 104 bool vmci_host_code_active(void) 105 { 106 return vmci_host_device_initialized && 107 (!vmci_guest_code_active() || 108 atomic_read(&vmci_host_active_users) > 0); 109 } 110 111 int vmci_host_users(void) 112 { 113 return atomic_read(&vmci_host_active_users); 114 } 115 116 /* 117 * Called on open of /dev/vmci. 118 */ 119 static int vmci_host_open(struct inode *inode, struct file *filp) 120 { 121 struct vmci_host_dev *vmci_host_dev; 122 123 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); 124 if (vmci_host_dev == NULL) 125 return -ENOMEM; 126 127 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 128 mutex_init(&vmci_host_dev->lock); 129 filp->private_data = vmci_host_dev; 130 131 return 0; 132 } 133 134 /* 135 * Called on close of /dev/vmci, most often when the process 136 * exits. 137 */ 138 static int vmci_host_close(struct inode *inode, struct file *filp) 139 { 140 struct vmci_host_dev *vmci_host_dev = filp->private_data; 141 142 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 143 vmci_ctx_destroy(vmci_host_dev->context); 144 vmci_host_dev->context = NULL; 145 146 /* 147 * The number of active contexts is used to track whether any 148 * VMX'en are using the host personality. It is incremented when 149 * a context is created through the IOCTL_VMCI_INIT_CONTEXT 150 * ioctl. 151 */ 152 atomic_dec(&vmci_host_active_users); 153 } 154 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 155 156 kfree(vmci_host_dev); 157 filp->private_data = NULL; 158 return 0; 159 } 160 161 /* 162 * This is used to wake up the VMX when a VMCI call arrives, or 163 * to wake up select() or poll() at the next clock tick. 164 */ 165 static __poll_t vmci_host_poll(struct file *filp, poll_table *wait) 166 { 167 struct vmci_host_dev *vmci_host_dev = filp->private_data; 168 struct vmci_ctx *context; 169 __poll_t mask = 0; 170 171 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 172 /* 173 * Read context only if ct_type == VMCIOBJ_CONTEXT to make 174 * sure that context is initialized 175 */ 176 context = vmci_host_dev->context; 177 178 /* Check for VMCI calls to this VM context. */ 179 if (wait) 180 poll_wait(filp, &context->host_context.wait_queue, 181 wait); 182 183 spin_lock(&context->lock); 184 if (context->pending_datagrams > 0 || 185 vmci_handle_arr_get_size( 186 context->pending_doorbell_array) > 0) { 187 mask = EPOLLIN; 188 } 189 spin_unlock(&context->lock); 190 } 191 return mask; 192 } 193 194 /* 195 * Copies the handles of a handle array into a user buffer, and 196 * returns the new length in userBufferSize. If the copy to the 197 * user buffer fails, the functions still returns VMCI_SUCCESS, 198 * but retval != 0. 199 */ 200 static int drv_cp_harray_to_user(void __user *user_buf_uva, 201 u64 *user_buf_size, 202 struct vmci_handle_arr *handle_array, 203 int *retval) 204 { 205 u32 array_size = 0; 206 struct vmci_handle *handles; 207 208 if (handle_array) 209 array_size = vmci_handle_arr_get_size(handle_array); 210 211 if (array_size * sizeof(*handles) > *user_buf_size) 212 return VMCI_ERROR_MORE_DATA; 213 214 *user_buf_size = array_size * sizeof(*handles); 215 if (*user_buf_size) 216 *retval = copy_to_user(user_buf_uva, 217 vmci_handle_arr_get_handles 218 (handle_array), *user_buf_size); 219 220 return VMCI_SUCCESS; 221 } 222 223 /* 224 * Sets up a given context for notify to work. Maps the notify 225 * boolean in user VA into kernel space. 226 */ 227 static int vmci_host_setup_notify(struct vmci_ctx *context, 228 unsigned long uva) 229 { 230 int retval; 231 232 if (context->notify_page) { 233 pr_devel("%s: Notify mechanism is already set up\n", __func__); 234 return VMCI_ERROR_DUPLICATE_ENTRY; 235 } 236 237 /* 238 * We are using 'bool' internally, but let's make sure we explicit 239 * about the size. 240 */ 241 BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); 242 243 /* 244 * Lock physical page backing a given user VA. 245 */ 246 retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page); 247 if (retval != 1) { 248 context->notify_page = NULL; 249 return VMCI_ERROR_GENERIC; 250 } 251 if (context->notify_page == NULL) 252 return VMCI_ERROR_UNAVAILABLE; 253 254 /* 255 * Map the locked page and set up notify pointer. 256 */ 257 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1)); 258 vmci_ctx_check_signal_notify(context); 259 260 return VMCI_SUCCESS; 261 } 262 263 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, 264 unsigned int cmd, void __user *uptr) 265 { 266 if (cmd == IOCTL_VMCI_VERSION2) { 267 int __user *vptr = uptr; 268 if (get_user(vmci_host_dev->user_version, vptr)) 269 return -EFAULT; 270 } 271 272 /* 273 * The basic logic here is: 274 * 275 * If the user sends in a version of 0 tell it our version. 276 * If the user didn't send in a version, tell it our version. 277 * If the user sent in an old version, tell it -its- version. 278 * If the user sent in an newer version, tell it our version. 279 * 280 * The rationale behind telling the caller its version is that 281 * Workstation 6.5 required that VMX and VMCI kernel module were 282 * version sync'd. All new VMX users will be programmed to 283 * handle the VMCI kernel module version. 284 */ 285 286 if (vmci_host_dev->user_version > 0 && 287 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { 288 return vmci_host_dev->user_version; 289 } 290 291 return VMCI_VERSION; 292 } 293 294 #define vmci_ioctl_err(fmt, ...) \ 295 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) 296 297 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, 298 const char *ioctl_name, 299 void __user *uptr) 300 { 301 struct vmci_init_blk init_block; 302 const struct cred *cred; 303 int retval; 304 305 if (copy_from_user(&init_block, uptr, sizeof(init_block))) { 306 vmci_ioctl_err("error reading init block\n"); 307 return -EFAULT; 308 } 309 310 mutex_lock(&vmci_host_dev->lock); 311 312 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { 313 vmci_ioctl_err("received VMCI init on initialized handle\n"); 314 retval = -EINVAL; 315 goto out; 316 } 317 318 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { 319 vmci_ioctl_err("unsupported VMCI restriction flag\n"); 320 retval = -EINVAL; 321 goto out; 322 } 323 324 cred = get_current_cred(); 325 vmci_host_dev->context = vmci_ctx_create(init_block.cid, 326 init_block.flags, 0, 327 vmci_host_dev->user_version, 328 cred); 329 put_cred(cred); 330 if (IS_ERR(vmci_host_dev->context)) { 331 retval = PTR_ERR(vmci_host_dev->context); 332 vmci_ioctl_err("error initializing context\n"); 333 goto out; 334 } 335 336 /* 337 * Copy cid to userlevel, we do this to allow the VMX 338 * to enforce its policy on cid generation. 339 */ 340 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); 341 if (copy_to_user(uptr, &init_block, sizeof(init_block))) { 342 vmci_ctx_destroy(vmci_host_dev->context); 343 vmci_host_dev->context = NULL; 344 vmci_ioctl_err("error writing init block\n"); 345 retval = -EFAULT; 346 goto out; 347 } 348 349 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; 350 atomic_inc(&vmci_host_active_users); 351 352 vmci_call_vsock_callback(true); 353 354 retval = 0; 355 356 out: 357 mutex_unlock(&vmci_host_dev->lock); 358 return retval; 359 } 360 361 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, 362 const char *ioctl_name, 363 void __user *uptr) 364 { 365 struct vmci_datagram_snd_rcv_info send_info; 366 struct vmci_datagram *dg = NULL; 367 u32 cid; 368 369 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 370 vmci_ioctl_err("only valid for contexts\n"); 371 return -EINVAL; 372 } 373 374 if (copy_from_user(&send_info, uptr, sizeof(send_info))) 375 return -EFAULT; 376 377 if (send_info.len > VMCI_MAX_DG_SIZE) { 378 vmci_ioctl_err("datagram is too big (size=%d)\n", 379 send_info.len); 380 return -EINVAL; 381 } 382 383 if (send_info.len < sizeof(*dg)) { 384 vmci_ioctl_err("datagram is too small (size=%d)\n", 385 send_info.len); 386 return -EINVAL; 387 } 388 389 dg = memdup_user((void __user *)(uintptr_t)send_info.addr, 390 send_info.len); 391 if (IS_ERR(dg)) { 392 vmci_ioctl_err( 393 "cannot allocate memory to dispatch datagram\n"); 394 return PTR_ERR(dg); 395 } 396 397 if (VMCI_DG_SIZE(dg) != send_info.len) { 398 vmci_ioctl_err("datagram size mismatch\n"); 399 kfree(dg); 400 return -EINVAL; 401 } 402 403 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", 404 dg->dst.context, dg->dst.resource, 405 dg->src.context, dg->src.resource, 406 (unsigned long long)dg->payload_size); 407 408 /* Get source context id. */ 409 cid = vmci_ctx_get_id(vmci_host_dev->context); 410 send_info.result = vmci_datagram_dispatch(cid, dg, true); 411 kfree(dg); 412 413 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; 414 } 415 416 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, 417 const char *ioctl_name, 418 void __user *uptr) 419 { 420 struct vmci_datagram_snd_rcv_info recv_info; 421 struct vmci_datagram *dg = NULL; 422 int retval; 423 size_t size; 424 425 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 426 vmci_ioctl_err("only valid for contexts\n"); 427 return -EINVAL; 428 } 429 430 if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) 431 return -EFAULT; 432 433 size = recv_info.len; 434 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, 435 &size, &dg); 436 437 if (recv_info.result >= VMCI_SUCCESS) { 438 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; 439 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); 440 kfree(dg); 441 if (retval != 0) 442 return -EFAULT; 443 } 444 445 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; 446 } 447 448 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, 449 const char *ioctl_name, 450 void __user *uptr) 451 { 452 struct vmci_handle handle; 453 int vmci_status; 454 int __user *retptr; 455 456 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 457 vmci_ioctl_err("only valid for contexts\n"); 458 return -EINVAL; 459 } 460 461 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 462 struct vmci_qp_alloc_info_vmvm alloc_info; 463 struct vmci_qp_alloc_info_vmvm __user *info = uptr; 464 465 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 466 return -EFAULT; 467 468 handle = alloc_info.handle; 469 retptr = &info->result; 470 471 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 472 alloc_info.peer, 473 alloc_info.flags, 474 VMCI_NO_PRIVILEGE_FLAGS, 475 alloc_info.produce_size, 476 alloc_info.consume_size, 477 NULL, 478 vmci_host_dev->context); 479 480 if (vmci_status == VMCI_SUCCESS) 481 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; 482 } else { 483 struct vmci_qp_alloc_info alloc_info; 484 struct vmci_qp_alloc_info __user *info = uptr; 485 struct vmci_qp_page_store page_store; 486 487 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 488 return -EFAULT; 489 490 handle = alloc_info.handle; 491 retptr = &info->result; 492 493 page_store.pages = alloc_info.ppn_va; 494 page_store.len = alloc_info.num_ppns; 495 496 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 497 alloc_info.peer, 498 alloc_info.flags, 499 VMCI_NO_PRIVILEGE_FLAGS, 500 alloc_info.produce_size, 501 alloc_info.consume_size, 502 &page_store, 503 vmci_host_dev->context); 504 } 505 506 if (put_user(vmci_status, retptr)) { 507 if (vmci_status >= VMCI_SUCCESS) { 508 vmci_status = vmci_qp_broker_detach(handle, 509 vmci_host_dev->context); 510 } 511 return -EFAULT; 512 } 513 514 return 0; 515 } 516 517 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, 518 const char *ioctl_name, 519 void __user *uptr) 520 { 521 struct vmci_qp_set_va_info set_va_info; 522 struct vmci_qp_set_va_info __user *info = uptr; 523 s32 result; 524 525 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 526 vmci_ioctl_err("only valid for contexts\n"); 527 return -EINVAL; 528 } 529 530 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 531 vmci_ioctl_err("is not allowed\n"); 532 return -EINVAL; 533 } 534 535 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) 536 return -EFAULT; 537 538 if (set_va_info.va) { 539 /* 540 * VMX is passing down a new VA for the queue 541 * pair mapping. 542 */ 543 result = vmci_qp_broker_map(set_va_info.handle, 544 vmci_host_dev->context, 545 set_va_info.va); 546 } else { 547 /* 548 * The queue pair is about to be unmapped by 549 * the VMX. 550 */ 551 result = vmci_qp_broker_unmap(set_va_info.handle, 552 vmci_host_dev->context, 0); 553 } 554 555 return put_user(result, &info->result) ? -EFAULT : 0; 556 } 557 558 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, 559 const char *ioctl_name, 560 void __user *uptr) 561 { 562 struct vmci_qp_page_file_info page_file_info; 563 struct vmci_qp_page_file_info __user *info = uptr; 564 s32 result; 565 566 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || 567 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { 568 vmci_ioctl_err("not supported on this VMX (version=%d)\n", 569 vmci_host_dev->user_version); 570 return -EINVAL; 571 } 572 573 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 574 vmci_ioctl_err("only valid for contexts\n"); 575 return -EINVAL; 576 } 577 578 if (copy_from_user(&page_file_info, uptr, sizeof(*info))) 579 return -EFAULT; 580 581 /* 582 * Communicate success pre-emptively to the caller. Note that the 583 * basic premise is that it is incumbent upon the caller not to look at 584 * the info.result field until after the ioctl() returns. And then, 585 * only if the ioctl() result indicates no error. We send up the 586 * SUCCESS status before calling SetPageStore() store because failing 587 * to copy up the result code means unwinding the SetPageStore(). 588 * 589 * It turns out the logic to unwind a SetPageStore() opens a can of 590 * worms. For example, if a host had created the queue_pair and a 591 * guest attaches and SetPageStore() is successful but writing success 592 * fails, then ... the host has to be stopped from writing (anymore) 593 * data into the queue_pair. That means an additional test in the 594 * VMCI_Enqueue() code path. Ugh. 595 */ 596 597 if (put_user(VMCI_SUCCESS, &info->result)) { 598 /* 599 * In this case, we can't write a result field of the 600 * caller's info block. So, we don't even try to 601 * SetPageStore(). 602 */ 603 return -EFAULT; 604 } 605 606 result = vmci_qp_broker_set_page_store(page_file_info.handle, 607 page_file_info.produce_va, 608 page_file_info.consume_va, 609 vmci_host_dev->context); 610 if (result < VMCI_SUCCESS) { 611 if (put_user(result, &info->result)) { 612 /* 613 * Note that in this case the SetPageStore() 614 * call failed but we were unable to 615 * communicate that to the caller (because the 616 * copy_to_user() call failed). So, if we 617 * simply return an error (in this case 618 * -EFAULT) then the caller will know that the 619 * SetPageStore failed even though we couldn't 620 * put the result code in the result field and 621 * indicate exactly why it failed. 622 * 623 * That says nothing about the issue where we 624 * were once able to write to the caller's info 625 * memory and now can't. Something more 626 * serious is probably going on than the fact 627 * that SetPageStore() didn't work. 628 */ 629 return -EFAULT; 630 } 631 } 632 633 return 0; 634 } 635 636 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, 637 const char *ioctl_name, 638 void __user *uptr) 639 { 640 struct vmci_qp_dtch_info detach_info; 641 struct vmci_qp_dtch_info __user *info = uptr; 642 s32 result; 643 644 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 645 vmci_ioctl_err("only valid for contexts\n"); 646 return -EINVAL; 647 } 648 649 if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) 650 return -EFAULT; 651 652 result = vmci_qp_broker_detach(detach_info.handle, 653 vmci_host_dev->context); 654 if (result == VMCI_SUCCESS && 655 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 656 result = VMCI_SUCCESS_LAST_DETACH; 657 } 658 659 return put_user(result, &info->result) ? -EFAULT : 0; 660 } 661 662 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, 663 const char *ioctl_name, 664 void __user *uptr) 665 { 666 struct vmci_ctx_info ar_info; 667 struct vmci_ctx_info __user *info = uptr; 668 s32 result; 669 u32 cid; 670 671 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 672 vmci_ioctl_err("only valid for contexts\n"); 673 return -EINVAL; 674 } 675 676 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 677 return -EFAULT; 678 679 cid = vmci_ctx_get_id(vmci_host_dev->context); 680 result = vmci_ctx_add_notification(cid, ar_info.remote_cid); 681 682 return put_user(result, &info->result) ? -EFAULT : 0; 683 } 684 685 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, 686 const char *ioctl_name, 687 void __user *uptr) 688 { 689 struct vmci_ctx_info ar_info; 690 struct vmci_ctx_info __user *info = uptr; 691 u32 cid; 692 int result; 693 694 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 695 vmci_ioctl_err("only valid for contexts\n"); 696 return -EINVAL; 697 } 698 699 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 700 return -EFAULT; 701 702 cid = vmci_ctx_get_id(vmci_host_dev->context); 703 result = vmci_ctx_remove_notification(cid, 704 ar_info.remote_cid); 705 706 return put_user(result, &info->result) ? -EFAULT : 0; 707 } 708 709 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, 710 const char *ioctl_name, 711 void __user *uptr) 712 { 713 struct vmci_ctx_chkpt_buf_info get_info; 714 u32 cid; 715 void *cpt_buf; 716 int retval; 717 718 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 719 vmci_ioctl_err("only valid for contexts\n"); 720 return -EINVAL; 721 } 722 723 if (copy_from_user(&get_info, uptr, sizeof(get_info))) 724 return -EFAULT; 725 726 cid = vmci_ctx_get_id(vmci_host_dev->context); 727 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, 728 &get_info.buf_size, &cpt_buf); 729 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { 730 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; 731 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); 732 kfree(cpt_buf); 733 734 if (retval) 735 return -EFAULT; 736 } 737 738 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; 739 } 740 741 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, 742 const char *ioctl_name, 743 void __user *uptr) 744 { 745 struct vmci_ctx_chkpt_buf_info set_info; 746 u32 cid; 747 void *cpt_buf; 748 int retval; 749 750 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 751 vmci_ioctl_err("only valid for contexts\n"); 752 return -EINVAL; 753 } 754 755 if (copy_from_user(&set_info, uptr, sizeof(set_info))) 756 return -EFAULT; 757 758 cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf, 759 set_info.buf_size); 760 if (IS_ERR(cpt_buf)) 761 return PTR_ERR(cpt_buf); 762 763 cid = vmci_ctx_get_id(vmci_host_dev->context); 764 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, 765 set_info.buf_size, cpt_buf); 766 767 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; 768 769 kfree(cpt_buf); 770 return retval; 771 } 772 773 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, 774 const char *ioctl_name, 775 void __user *uptr) 776 { 777 u32 __user *u32ptr = uptr; 778 779 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; 780 } 781 782 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, 783 const char *ioctl_name, 784 void __user *uptr) 785 { 786 struct vmci_set_notify_info notify_info; 787 788 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 789 vmci_ioctl_err("only valid for contexts\n"); 790 return -EINVAL; 791 } 792 793 if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) 794 return -EFAULT; 795 796 if (notify_info.notify_uva) { 797 notify_info.result = 798 vmci_host_setup_notify(vmci_host_dev->context, 799 notify_info.notify_uva); 800 } else { 801 vmci_ctx_unset_notify(vmci_host_dev->context); 802 notify_info.result = VMCI_SUCCESS; 803 } 804 805 return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? 806 -EFAULT : 0; 807 } 808 809 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, 810 const char *ioctl_name, 811 void __user *uptr) 812 { 813 struct vmci_dbell_notify_resource_info info; 814 u32 cid; 815 816 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 817 vmci_ioctl_err("invalid for current VMX versions\n"); 818 return -EINVAL; 819 } 820 821 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 822 vmci_ioctl_err("only valid for contexts\n"); 823 return -EINVAL; 824 } 825 826 if (copy_from_user(&info, uptr, sizeof(info))) 827 return -EFAULT; 828 829 cid = vmci_ctx_get_id(vmci_host_dev->context); 830 831 switch (info.action) { 832 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: 833 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { 834 u32 flags = VMCI_NO_PRIVILEGE_FLAGS; 835 info.result = vmci_ctx_notify_dbell(cid, info.handle, 836 flags); 837 } else { 838 info.result = VMCI_ERROR_UNAVAILABLE; 839 } 840 break; 841 842 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: 843 info.result = vmci_ctx_dbell_create(cid, info.handle); 844 break; 845 846 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: 847 info.result = vmci_ctx_dbell_destroy(cid, info.handle); 848 break; 849 850 default: 851 vmci_ioctl_err("got unknown action (action=%d)\n", 852 info.action); 853 info.result = VMCI_ERROR_INVALID_ARGS; 854 } 855 856 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 857 } 858 859 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, 860 const char *ioctl_name, 861 void __user *uptr) 862 { 863 struct vmci_ctx_notify_recv_info info; 864 struct vmci_handle_arr *db_handle_array; 865 struct vmci_handle_arr *qp_handle_array; 866 void __user *ubuf; 867 u32 cid; 868 int retval = 0; 869 870 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 871 vmci_ioctl_err("only valid for contexts\n"); 872 return -EINVAL; 873 } 874 875 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 876 vmci_ioctl_err("not supported for the current vmx version\n"); 877 return -EINVAL; 878 } 879 880 if (copy_from_user(&info, uptr, sizeof(info))) 881 return -EFAULT; 882 883 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || 884 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { 885 return -EINVAL; 886 } 887 888 cid = vmci_ctx_get_id(vmci_host_dev->context); 889 890 info.result = vmci_ctx_rcv_notifications_get(cid, 891 &db_handle_array, &qp_handle_array); 892 if (info.result != VMCI_SUCCESS) 893 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 894 895 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; 896 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, 897 db_handle_array, &retval); 898 if (info.result == VMCI_SUCCESS && !retval) { 899 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; 900 info.result = drv_cp_harray_to_user(ubuf, 901 &info.qp_handle_buf_size, 902 qp_handle_array, &retval); 903 } 904 905 if (!retval && copy_to_user(uptr, &info, sizeof(info))) 906 retval = -EFAULT; 907 908 vmci_ctx_rcv_notifications_release(cid, 909 db_handle_array, qp_handle_array, 910 info.result == VMCI_SUCCESS && !retval); 911 912 return retval; 913 } 914 915 static long vmci_host_unlocked_ioctl(struct file *filp, 916 unsigned int iocmd, unsigned long ioarg) 917 { 918 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ 919 char *name = "IOCTL_VMCI_" # ioctl_name; \ 920 return vmci_host_do_ ## ioctl_fn( \ 921 vmci_host_dev, name, uptr); \ 922 } while (0) 923 924 struct vmci_host_dev *vmci_host_dev = filp->private_data; 925 void __user *uptr = (void __user *)ioarg; 926 927 switch (iocmd) { 928 case IOCTL_VMCI_INIT_CONTEXT: 929 VMCI_DO_IOCTL(INIT_CONTEXT, init_context); 930 case IOCTL_VMCI_DATAGRAM_SEND: 931 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); 932 case IOCTL_VMCI_DATAGRAM_RECEIVE: 933 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); 934 case IOCTL_VMCI_QUEUEPAIR_ALLOC: 935 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); 936 case IOCTL_VMCI_QUEUEPAIR_SETVA: 937 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); 938 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: 939 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); 940 case IOCTL_VMCI_QUEUEPAIR_DETACH: 941 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); 942 case IOCTL_VMCI_CTX_ADD_NOTIFICATION: 943 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); 944 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: 945 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); 946 case IOCTL_VMCI_CTX_GET_CPT_STATE: 947 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); 948 case IOCTL_VMCI_CTX_SET_CPT_STATE: 949 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); 950 case IOCTL_VMCI_GET_CONTEXT_ID: 951 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); 952 case IOCTL_VMCI_SET_NOTIFY: 953 VMCI_DO_IOCTL(SET_NOTIFY, set_notify); 954 case IOCTL_VMCI_NOTIFY_RESOURCE: 955 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); 956 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: 957 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); 958 959 case IOCTL_VMCI_VERSION: 960 case IOCTL_VMCI_VERSION2: 961 return vmci_host_get_version(vmci_host_dev, iocmd, uptr); 962 963 default: 964 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); 965 return -EINVAL; 966 } 967 968 #undef VMCI_DO_IOCTL 969 } 970 971 static const struct file_operations vmuser_fops = { 972 .owner = THIS_MODULE, 973 .open = vmci_host_open, 974 .release = vmci_host_close, 975 .poll = vmci_host_poll, 976 .unlocked_ioctl = vmci_host_unlocked_ioctl, 977 .compat_ioctl = compat_ptr_ioctl, 978 }; 979 980 static struct miscdevice vmci_host_miscdev = { 981 .name = "vmci", 982 .minor = MISC_DYNAMIC_MINOR, 983 .fops = &vmuser_fops, 984 }; 985 986 int __init vmci_host_init(void) 987 { 988 int error; 989 990 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, 991 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, 992 -1, VMCI_VERSION, NULL); 993 if (IS_ERR(host_context)) { 994 error = PTR_ERR(host_context); 995 pr_warn("Failed to initialize VMCIContext (error%d)\n", 996 error); 997 return error; 998 } 999 1000 error = misc_register(&vmci_host_miscdev); 1001 if (error) { 1002 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", 1003 vmci_host_miscdev.name, 1004 MISC_MAJOR, vmci_host_miscdev.minor, 1005 error); 1006 pr_warn("Unable to initialize host personality\n"); 1007 vmci_ctx_destroy(host_context); 1008 return error; 1009 } 1010 1011 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", 1012 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); 1013 1014 vmci_host_device_initialized = true; 1015 return 0; 1016 } 1017 1018 void __exit vmci_host_exit(void) 1019 { 1020 vmci_host_device_initialized = false; 1021 1022 misc_deregister(&vmci_host_miscdev); 1023 vmci_ctx_destroy(host_context); 1024 vmci_qp_broker_exit(); 1025 1026 pr_debug("VMCI host driver module unloaded\n"); 1027 } 1028