1 /*- 2 * Copyright (c) 2018 VMware, Inc. 3 * 4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0) 5 */ 6 7 /* This file implements defines and helper functions. */ 8 9 #include <sys/cdefs.h> 10 __FBSDID("$FreeBSD$"); 11 12 #include <sys/malloc.h> 13 #include <sys/proc.h> 14 #include <sys/uio.h> 15 16 #include <machine/bus.h> 17 18 #include "vmci.h" 19 #include "vmci_defs.h" 20 #include "vmci_kernel_defs.h" 21 #include "vmci_kernel_if.h" 22 #include "vmci_queue.h" 23 24 struct vmci_queue_kernel_if { 25 size_t num_pages; /* Num pages incl. header. */ 26 struct vmci_dma_alloc *dmas; /* For dma alloc. */ 27 }; 28 29 /* 30 *------------------------------------------------------------------------------ 31 * 32 * vmci_init_lock 33 * 34 * Initializes the lock. Must be called before use. 35 * 36 * Results: 37 * Always VMCI_SUCCESS. 38 * 39 * Side effects: 40 * Thread can block. 41 * 42 *------------------------------------------------------------------------------ 43 */ 44 45 int 46 vmci_init_lock(vmci_lock *lock, char *name) 47 { 48 49 mtx_init(lock, name, NULL, MTX_DEF | MTX_NOWITNESS); 50 return (VMCI_SUCCESS); 51 } 52 53 /* 54 *------------------------------------------------------------------------------ 55 * 56 * vmci_cleanup_lock 57 * 58 * Cleanup the lock. Must be called before deallocating lock. 59 * 60 * Results: 61 * None 62 * 63 * Side effects: 64 * Deletes kernel lock state 65 * 66 *------------------------------------------------------------------------------ 67 */ 68 69 void 70 vmci_cleanup_lock(vmci_lock *lock) 71 { 72 73 mtx_destroy(lock); 74 } 75 76 /* 77 *------------------------------------------------------------------------------ 78 * 79 * vmci_grab_lock 80 * 81 * Grabs the given lock. 82 * 83 * Results: 84 * None 85 * 86 * Side effects: 87 * Thread can block. 88 * 89 *------------------------------------------------------------------------------ 90 */ 91 92 void 93 vmci_grab_lock(vmci_lock *lock) 94 { 95 96 mtx_lock(lock); 97 } 98 99 /* 100 *------------------------------------------------------------------------------ 101 * 102 * vmci_release_lock 103 * 104 * Releases the given lock. 105 * 106 * Results: 107 * None 108 * 109 * Side effects: 110 * A thread blocked on this lock may wake up. 111 * 112 *------------------------------------------------------------------------------ 113 */ 114 115 void 116 vmci_release_lock(vmci_lock *lock) 117 { 118 119 mtx_unlock(lock); 120 } 121 122 /* 123 *------------------------------------------------------------------------------ 124 * 125 * vmci_grab_lock_bh 126 * 127 * Grabs the given lock. 128 * 129 * Results: 130 * None 131 * 132 * Side effects: 133 * None. 134 * 135 *------------------------------------------------------------------------------ 136 */ 137 138 void 139 vmci_grab_lock_bh(vmci_lock *lock) 140 { 141 142 mtx_lock(lock); 143 } 144 145 /* 146 *------------------------------------------------------------------------------ 147 * 148 * vmci_release_lock_bh 149 * 150 * Releases the given lock. 151 * 152 * Results: 153 * None 154 * 155 * Side effects: 156 * None. 157 * 158 *------------------------------------------------------------------------------ 159 */ 160 161 void 162 vmci_release_lock_bh(vmci_lock *lock) 163 { 164 165 mtx_unlock(lock); 166 } 167 168 /* 169 *------------------------------------------------------------------------------ 170 * 171 * vmci_alloc_kernel_mem 172 * 173 * Allocate physically contiguous memory for the VMCI driver. 174 * 175 * Results: 176 * The address allocated or NULL on error. 177 * 178 * 179 * Side effects: 180 * Memory may be allocated. 181 * 182 *------------------------------------------------------------------------------ 183 */ 184 185 void * 186 vmci_alloc_kernel_mem(size_t size, int flags) 187 { 188 void *ptr; 189 190 if ((flags & VMCI_MEMORY_ATOMIC) != 0) 191 ptr = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xFFFFFFFF, 192 8, 1024 * 1024); 193 else 194 ptr = contigmalloc(size, M_DEVBUF, M_WAITOK, 0, 0xFFFFFFFF, 195 8, 1024 * 1024); 196 197 return (ptr); 198 } 199 200 /* 201 *------------------------------------------------------------------------------ 202 * 203 * vmci_free_kernel_mem 204 * 205 * Free kernel memory allocated for the VMCI driver. 206 * 207 * Results: 208 * None. 209 * 210 * Side effects: 211 * Memory is freed. 212 * 213 *------------------------------------------------------------------------------ 214 */ 215 216 void 217 vmci_free_kernel_mem(void *ptr, size_t size) 218 { 219 220 contigfree(ptr, size, M_DEVBUF); 221 } 222 223 /* 224 *------------------------------------------------------------------------------ 225 * 226 * vmci_can_schedule_delayed_work -- 227 * 228 * Checks to see if the given platform supports delayed work callbacks. 229 * 230 * Results: 231 * true if it does. false otherwise. 232 * 233 * Side effects: 234 * None. 235 * 236 *------------------------------------------------------------------------------ 237 */ 238 239 bool 240 vmci_can_schedule_delayed_work(void) 241 { 242 243 return (true); 244 } 245 246 /* 247 *------------------------------------------------------------------------------ 248 * 249 * vmci_schedule_delayed_work -- 250 * 251 * Schedule the specified callback. 252 * 253 * Results: 254 * Zero on success, error code otherwise. 255 * 256 * Side effects: 257 * None. 258 * 259 *------------------------------------------------------------------------------ 260 */ 261 262 int 263 vmci_schedule_delayed_work(vmci_work_fn *work_fn, void *data) 264 { 265 266 return (vmci_schedule_delayed_work_fn(work_fn, data)); 267 } 268 269 /* 270 *------------------------------------------------------------------------------ 271 * 272 * vmci_create_event -- 273 * 274 * Results: 275 * None. 276 * 277 * Side effects: 278 * None. 279 * 280 *------------------------------------------------------------------------------ 281 */ 282 283 void 284 vmci_create_event(vmci_event *event) 285 { 286 287 sema_init(event, 0, "vmci_event"); 288 } 289 290 /* 291 *------------------------------------------------------------------------------ 292 * 293 * vmci_destroy_event -- 294 * 295 * Results: 296 * None. 297 * 298 * Side effects: 299 * None. 300 * 301 *------------------------------------------------------------------------------ 302 */ 303 304 void 305 vmci_destroy_event(vmci_event *event) 306 { 307 308 if (mtx_owned(&event->sema_mtx)) 309 sema_destroy(event); 310 } 311 312 /* 313 *------------------------------------------------------------------------------ 314 * 315 * vmci_signal_event -- 316 * 317 * Results: 318 * None. 319 * 320 * Side effects: 321 * None. 322 * 323 *------------------------------------------------------------------------------ 324 */ 325 326 void 327 vmci_signal_event(vmci_event *event) 328 { 329 330 sema_post(event); 331 } 332 333 /* 334 *------------------------------------------------------------------------------ 335 * 336 * vmci_wait_on_event -- 337 * 338 * Results: 339 * None. 340 * 341 * Side effects: 342 * None. 343 * 344 *------------------------------------------------------------------------------ 345 */ 346 347 void 348 vmci_wait_on_event(vmci_event *event, vmci_event_release_cb release_cb, 349 void *client_data) 350 { 351 352 release_cb(client_data); 353 sema_wait(event); 354 } 355 356 /* 357 *------------------------------------------------------------------------------ 358 * 359 * vmci_mutex_init -- 360 * 361 * Initializes the mutex. Must be called before use. 362 * 363 * Results: 364 * Success. 365 * 366 * Side effects: 367 * None. 368 * 369 *------------------------------------------------------------------------------ 370 */ 371 372 int 373 vmci_mutex_init(vmci_mutex *mutex, char *name) 374 { 375 376 mtx_init(mutex, name, NULL, MTX_DEF | MTX_NOWITNESS); 377 return (VMCI_SUCCESS); 378 } 379 380 /* 381 *------------------------------------------------------------------------------ 382 * 383 * vmci_mutex_destroy -- 384 * 385 * Destroys the mutex. 386 * 387 * Results: 388 * None. 389 * 390 * Side effects: 391 * None. 392 * 393 *------------------------------------------------------------------------------ 394 */ 395 396 void 397 vmci_mutex_destroy(vmci_mutex *mutex) 398 { 399 400 mtx_destroy(mutex); 401 } 402 403 /* 404 *------------------------------------------------------------------------------ 405 * 406 * vmci_mutex_acquire -- 407 * 408 * Acquires the mutex. 409 * 410 * Results: 411 * None. 412 * 413 * Side effects: 414 * Thread may block. 415 * 416 *------------------------------------------------------------------------------ 417 */ 418 419 void 420 vmci_mutex_acquire(vmci_mutex *mutex) 421 { 422 423 mtx_lock(mutex); 424 } 425 426 /* 427 *------------------------------------------------------------------------------ 428 * 429 * vmci_mutex_release -- 430 * 431 * Releases the mutex. 432 * 433 * Results: 434 * None. 435 * 436 * Side effects: 437 * May wake up the thread blocking on this mutex. 438 * 439 *------------------------------------------------------------------------------ 440 */ 441 442 void 443 vmci_mutex_release(vmci_mutex *mutex) 444 { 445 446 mtx_unlock(mutex); 447 } 448 449 /* 450 *------------------------------------------------------------------------------ 451 * 452 * vmci_alloc_queue -- 453 * 454 * Allocates kernel queue pages of specified size with IOMMU mappings, plus 455 * space for the queue structure/kernel interface and the queue header. 456 * 457 * Results: 458 * Pointer to the queue on success, NULL otherwise. 459 * 460 * Side effects: 461 * Memory is allocated. 462 * 463 *------------------------------------------------------------------------------ 464 */ 465 466 void * 467 vmci_alloc_queue(uint64_t size, uint32_t flags) 468 { 469 struct vmci_queue *queue; 470 size_t i; 471 const size_t num_pages = CEILING(size, PAGE_SIZE) + 1; 472 const size_t dmas_size = num_pages * sizeof(struct vmci_dma_alloc); 473 const size_t queue_size = 474 sizeof(*queue) + sizeof(*(queue->kernel_if)) + dmas_size; 475 476 /* Size should be enforced by vmci_qpair_alloc(), double-check here. */ 477 if (size > VMCI_MAX_GUEST_QP_MEMORY) { 478 ASSERT(false); 479 return (NULL); 480 } 481 482 queue = malloc(queue_size, M_DEVBUF, M_NOWAIT); 483 if (!queue) 484 return (NULL); 485 486 queue->q_header = NULL; 487 queue->saved_header = NULL; 488 queue->kernel_if = (struct vmci_queue_kernel_if *)(queue + 1); 489 queue->kernel_if->num_pages = num_pages; 490 queue->kernel_if->dmas = (struct vmci_dma_alloc *)(queue->kernel_if + 491 1); 492 for (i = 0; i < num_pages; i++) { 493 vmci_dma_malloc(PAGE_SIZE, 1, &queue->kernel_if->dmas[i]); 494 if (!queue->kernel_if->dmas[i].dma_vaddr) { 495 /* Size excl. the header. */ 496 vmci_free_queue(queue, i * PAGE_SIZE); 497 return (NULL); 498 } 499 } 500 501 /* Queue header is the first page. */ 502 queue->q_header = (void *)queue->kernel_if->dmas[0].dma_vaddr; 503 504 return ((void *)queue); 505 } 506 507 /* 508 *------------------------------------------------------------------------------ 509 * 510 * vmci_free_queue -- 511 * 512 * Frees kernel VA space for a given queue and its queue header, and frees 513 * physical data pages. 514 * 515 * Results: 516 * None. 517 * 518 * Side effects: 519 * Memory is freed. 520 * 521 *------------------------------------------------------------------------------ 522 */ 523 524 void 525 vmci_free_queue(void *q, uint64_t size) 526 { 527 struct vmci_queue *queue = q; 528 529 if (queue) { 530 const size_t num_pages = CEILING(size, PAGE_SIZE) + 1; 531 uint64_t i; 532 533 /* Given size doesn't include header, so add in a page here. */ 534 for (i = 0; i < num_pages; i++) 535 vmci_dma_free(&queue->kernel_if->dmas[i]); 536 free(queue, M_DEVBUF); 537 } 538 } 539 540 /* 541 *------------------------------------------------------------------------------ 542 * 543 * vmci_alloc_ppn_set -- 544 * 545 * Allocates two list of PPNs --- one for the pages in the produce queue, 546 * and the other for the pages in the consume queue. Intializes the list of 547 * PPNs with the page frame numbers of the KVA for the two queues (and the 548 * queue headers). 549 * 550 * Results: 551 * Success or failure. 552 * 553 * Side effects: 554 * Memory may be allocated. 555 * 556 *----------------------------------------------------------------------------- 557 */ 558 559 int 560 vmci_alloc_ppn_set(void *prod_q, uint64_t num_produce_pages, void *cons_q, 561 uint64_t num_consume_pages, struct ppn_set *ppn_set) 562 { 563 struct vmci_queue *consume_q = cons_q; 564 struct vmci_queue *produce_q = prod_q; 565 vmci_ppn_list consume_ppns; 566 vmci_ppn_list produce_ppns; 567 uint64_t i; 568 569 if (!produce_q || !num_produce_pages || !consume_q || 570 !num_consume_pages || !ppn_set) 571 return (VMCI_ERROR_INVALID_ARGS); 572 573 if (ppn_set->initialized) 574 return (VMCI_ERROR_ALREADY_EXISTS); 575 576 produce_ppns = 577 vmci_alloc_kernel_mem(num_produce_pages * sizeof(*produce_ppns), 578 VMCI_MEMORY_NORMAL); 579 if (!produce_ppns) 580 return (VMCI_ERROR_NO_MEM); 581 582 consume_ppns = 583 vmci_alloc_kernel_mem(num_consume_pages * sizeof(*consume_ppns), 584 VMCI_MEMORY_NORMAL); 585 if (!consume_ppns) { 586 vmci_free_kernel_mem(produce_ppns, 587 num_produce_pages * sizeof(*produce_ppns)); 588 return (VMCI_ERROR_NO_MEM); 589 } 590 591 for (i = 0; i < num_produce_pages; i++) { 592 unsigned long pfn; 593 594 produce_ppns[i] = 595 pfn = produce_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT; 596 597 /* 598 * Fail allocation if PFN isn't supported by hypervisor. 599 */ 600 601 if (sizeof(pfn) > 602 sizeof(*produce_ppns) && pfn != produce_ppns[i]) 603 goto ppn_error; 604 } 605 for (i = 0; i < num_consume_pages; i++) { 606 unsigned long pfn; 607 608 consume_ppns[i] = 609 pfn = consume_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT; 610 611 /* 612 * Fail allocation if PFN isn't supported by hypervisor. 613 */ 614 615 if (sizeof(pfn) > 616 sizeof(*consume_ppns) && pfn != consume_ppns[i]) 617 goto ppn_error; 618 } 619 620 ppn_set->num_produce_pages = num_produce_pages; 621 ppn_set->num_consume_pages = num_consume_pages; 622 ppn_set->produce_ppns = produce_ppns; 623 ppn_set->consume_ppns = consume_ppns; 624 ppn_set->initialized = true; 625 return (VMCI_SUCCESS); 626 627 ppn_error: 628 vmci_free_kernel_mem(produce_ppns, num_produce_pages * 629 sizeof(*produce_ppns)); 630 vmci_free_kernel_mem(consume_ppns, num_consume_pages * 631 sizeof(*consume_ppns)); 632 return (VMCI_ERROR_INVALID_ARGS); 633 } 634 635 /* 636 *------------------------------------------------------------------------------ 637 * 638 * vmci_free_ppn_set -- 639 * 640 * Frees the two list of PPNs for a queue pair. 641 * 642 * Results: 643 * None. 644 * 645 * Side effects: 646 * None. 647 * 648 *------------------------------------------------------------------------------ 649 */ 650 651 void 652 vmci_free_ppn_set(struct ppn_set *ppn_set) 653 { 654 655 ASSERT(ppn_set); 656 if (ppn_set->initialized) { 657 /* Do not call these functions on NULL inputs. */ 658 ASSERT(ppn_set->produce_ppns && ppn_set->consume_ppns); 659 vmci_free_kernel_mem(ppn_set->produce_ppns, 660 ppn_set->num_produce_pages * 661 sizeof(*ppn_set->produce_ppns)); 662 vmci_free_kernel_mem(ppn_set->consume_ppns, 663 ppn_set->num_consume_pages * 664 sizeof(*ppn_set->consume_ppns)); 665 } 666 memset(ppn_set, 0, sizeof(*ppn_set)); 667 } 668 669 /* 670 *------------------------------------------------------------------------------ 671 * 672 * vmci_populate_ppn_list -- 673 * 674 * Populates the list of PPNs in the hypercall structure with the PPNS 675 * of the produce queue and the consume queue. 676 * 677 * Results: 678 * VMCI_SUCCESS. 679 * 680 * Side effects: 681 * None. 682 * 683 *------------------------------------------------------------------------------ 684 */ 685 686 int 687 vmci_populate_ppn_list(uint8_t *call_buf, const struct ppn_set *ppn_set) 688 { 689 690 ASSERT(call_buf && ppn_set && ppn_set->initialized); 691 memcpy(call_buf, ppn_set->produce_ppns, 692 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); 693 memcpy(call_buf + ppn_set->num_produce_pages * 694 sizeof(*ppn_set->produce_ppns), ppn_set->consume_ppns, 695 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); 696 697 return (VMCI_SUCCESS); 698 } 699 700 /* 701 *------------------------------------------------------------------------------ 702 * 703 * vmci_memcpy_{to,from}iovec -- 704 * 705 * These helper routines will copy the specified bytes to/from memory that's 706 * specified as a struct iovec. The routines can not verify the correctness 707 * of the struct iovec's contents. 708 * 709 * Results: 710 * None. 711 * 712 * Side effects: 713 * None. 714 * 715 *------------------------------------------------------------------------------ 716 */ 717 718 static inline void 719 vmci_memcpy_toiovec(struct iovec *iov, uint8_t *src, size_t len) 720 { 721 722 while (len > 0) { 723 if (iov->iov_len) { 724 size_t to_copy = MIN(iov->iov_len, len); 725 memcpy(iov->iov_base, src, to_copy); 726 src += to_copy; 727 len -= to_copy; 728 iov->iov_base = (void *)((uintptr_t) iov->iov_base + 729 to_copy); 730 iov->iov_len -= to_copy; 731 } 732 iov++; 733 } 734 } 735 736 static inline void 737 vmci_memcpy_fromiovec(uint8_t *dst, struct iovec *iov, size_t len) 738 { 739 740 while (len > 0) { 741 if (iov->iov_len) { 742 size_t to_copy = MIN(iov->iov_len, len); 743 memcpy(dst, iov->iov_base, to_copy); 744 dst += to_copy; 745 len -= to_copy; 746 iov->iov_base = (void *)((uintptr_t) iov->iov_base + 747 to_copy); 748 iov->iov_len -= to_copy; 749 } 750 iov++; 751 } 752 } 753 754 /* 755 *------------------------------------------------------------------------------ 756 * 757 * __vmci_memcpy_to_queue -- 758 * 759 * Copies from a given buffer or iovector to a VMCI Queue. Assumes that 760 * offset + size does not wrap around in the queue. 761 * 762 * Results: 763 * Zero on success, negative error code on failure. 764 * 765 * Side effects: 766 * None. 767 * 768 *------------------------------------------------------------------------------ 769 */ 770 771 #pragma GCC diagnostic ignored "-Wcast-qual" 772 static int 773 __vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset, 774 const void *src, size_t size, bool is_iovec) 775 { 776 struct vmci_queue_kernel_if *kernel_if = queue->kernel_if; 777 size_t bytes_copied = 0; 778 779 while (bytes_copied < size) { 780 const uint64_t page_index = 781 (queue_offset + bytes_copied) / PAGE_SIZE; 782 const size_t page_offset = 783 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 784 void *va; 785 size_t to_copy; 786 787 /* Skip header. */ 788 va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr; 789 790 ASSERT(va); 791 /* 792 * Fill up the page if we have enough payload, or else 793 * copy the remaining bytes. 794 */ 795 to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied); 796 797 if (is_iovec) { 798 struct iovec *iov = (struct iovec *)src; 799 800 /* The iovec will track bytes_copied internally. */ 801 vmci_memcpy_fromiovec((uint8_t *)va + page_offset, 802 iov, to_copy); 803 } else 804 memcpy((uint8_t *)va + page_offset, 805 (uint8_t *)src + bytes_copied, to_copy); 806 bytes_copied += to_copy; 807 } 808 809 return (VMCI_SUCCESS); 810 } 811 812 /* 813 *------------------------------------------------------------------------------ 814 * 815 * __vmci_memcpy_from_queue -- 816 * 817 * Copies to a given buffer or iovector from a VMCI Queue. Assumes that 818 * offset + size does not wrap around in the queue. 819 * 820 * Results: 821 * Zero on success, negative error code on failure. 822 * 823 * Side effects: 824 * None. 825 * 826 *------------------------------------------------------------------------------ 827 */ 828 829 static int 830 __vmci_memcpy_from_queue(void *dest, const struct vmci_queue *queue, 831 uint64_t queue_offset, size_t size, bool is_iovec) 832 { 833 struct vmci_queue_kernel_if *kernel_if = queue->kernel_if; 834 size_t bytes_copied = 0; 835 836 while (bytes_copied < size) { 837 const uint64_t page_index = 838 (queue_offset + bytes_copied) / PAGE_SIZE; 839 const size_t page_offset = 840 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 841 void *va; 842 size_t to_copy; 843 844 /* Skip header. */ 845 va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr; 846 847 ASSERT(va); 848 /* 849 * Fill up the page if we have enough payload, or else 850 * copy the remaining bytes. 851 */ 852 to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied); 853 854 if (is_iovec) { 855 struct iovec *iov = (struct iovec *)dest; 856 857 /* The iovec will track bytesCopied internally. */ 858 vmci_memcpy_toiovec(iov, (uint8_t *)va + 859 page_offset, to_copy); 860 } else 861 memcpy((uint8_t *)dest + bytes_copied, 862 (uint8_t *)va + page_offset, to_copy); 863 864 bytes_copied += to_copy; 865 } 866 867 return (VMCI_SUCCESS); 868 } 869 870 /* 871 *------------------------------------------------------------------------------ 872 * 873 * vmci_memcpy_to_queue -- 874 * 875 * Copies from a given buffer to a VMCI Queue. 876 * 877 * Results: 878 * Zero on success, negative error code on failure. 879 * 880 * Side effects: 881 * None. 882 * 883 *------------------------------------------------------------------------------ 884 */ 885 886 int 887 vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset, 888 const void *src, size_t src_offset, size_t size, int buf_type, 889 bool can_block) 890 { 891 892 ASSERT(can_block); 893 894 return (__vmci_memcpy_to_queue(queue, queue_offset, 895 (uint8_t *)src + src_offset, size, false)); 896 } 897 898 /* 899 *------------------------------------------------------------------------------ 900 * 901 * vmci_memcpy_from_queue -- 902 * 903 * Copies to a given buffer from a VMCI Queue. 904 * 905 * Results: 906 * Zero on success, negative error code on failure. 907 * 908 * Side effects: 909 * None. 910 * 911 *------------------------------------------------------------------------------ 912 */ 913 914 int 915 vmci_memcpy_from_queue(void *dest, size_t dest_offset, 916 const struct vmci_queue *queue, uint64_t queue_offset, size_t size, 917 int buf_type, bool can_block) 918 { 919 920 ASSERT(can_block); 921 922 return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset, 923 queue, queue_offset, size, false)); 924 } 925 926 /* 927 *------------------------------------------------------------------------------ 928 * 929 * vmci_memcpy_to_queue_local -- 930 * 931 * Copies from a given buffer to a local VMCI queue. This is the 932 * same as a regular copy. 933 * 934 * Results: 935 * Zero on success, negative error code on failure. 936 * 937 * Side effects: 938 * None. 939 * 940 *------------------------------------------------------------------------------ 941 */ 942 943 int 944 vmci_memcpy_to_queue_local(struct vmci_queue *queue, uint64_t queue_offset, 945 const void *src, size_t src_offset, size_t size, int buf_type, 946 bool can_block) 947 { 948 949 ASSERT(can_block); 950 951 return (__vmci_memcpy_to_queue(queue, queue_offset, 952 (uint8_t *)src + src_offset, size, false)); 953 } 954 955 /* 956 *------------------------------------------------------------------------------ 957 * 958 * vmci_memcpy_from_queue_local -- 959 * 960 * Copies to a given buffer from a VMCI Queue. 961 * 962 * Results: 963 * Zero on success, negative error code on failure. 964 * 965 * Side effects: 966 * None. 967 * 968 *------------------------------------------------------------------------------ 969 */ 970 971 int 972 vmci_memcpy_from_queue_local(void *dest, size_t dest_offset, 973 const struct vmci_queue *queue, uint64_t queue_offset, size_t size, 974 int buf_type, bool can_block) 975 { 976 977 ASSERT(can_block); 978 979 return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset, 980 queue, queue_offset, size, false)); 981 } 982 983 /*------------------------------------------------------------------------------ 984 * 985 * vmci_memcpy_to_queue_v -- 986 * 987 * Copies from a given iovec from a VMCI Queue. 988 * 989 * Results: 990 * Zero on success, negative error code on failure. 991 * 992 * Side effects: 993 * None. 994 * 995 *------------------------------------------------------------------------------ 996 */ 997 998 int 999 vmci_memcpy_to_queue_v(struct vmci_queue *queue, uint64_t queue_offset, 1000 const void *src, size_t src_offset, size_t size, int buf_type, 1001 bool can_block) 1002 { 1003 1004 ASSERT(can_block); 1005 1006 /* 1007 * We ignore src_offset because src is really a struct iovec * and will 1008 * maintain offset internally. 1009 */ 1010 return (__vmci_memcpy_to_queue(queue, queue_offset, src, size, 1011 true)); 1012 } 1013 1014 /* 1015 *------------------------------------------------------------------------------ 1016 * 1017 * vmci_memcpy_from_queue_v -- 1018 * 1019 * Copies to a given iovec from a VMCI Queue. 1020 * 1021 * Results: 1022 * Zero on success, negative error code on failure. 1023 * 1024 * Side effects: 1025 * None. 1026 * 1027 *------------------------------------------------------------------------------ 1028 */ 1029 1030 int 1031 vmci_memcpy_from_queue_v(void *dest, size_t dest_offset, 1032 const struct vmci_queue *queue, uint64_t queue_offset, size_t size, 1033 int buf_type, bool can_block) 1034 { 1035 1036 ASSERT(can_block); 1037 1038 /* 1039 * We ignore dest_offset because dest is really a struct iovec * and 1040 * will maintain offset internally. 1041 */ 1042 return (__vmci_memcpy_from_queue(dest, queue, queue_offset, size, 1043 true)); 1044 } 1045 1046 /* 1047 *------------------------------------------------------------------------------ 1048 * 1049 * vmci_read_port_bytes -- 1050 * 1051 * Copy memory from an I/O port to kernel memory. 1052 * 1053 * Results: 1054 * No results. 1055 * 1056 * Side effects: 1057 * None. 1058 * 1059 *------------------------------------------------------------------------------ 1060 */ 1061 1062 void 1063 vmci_read_port_bytes(vmci_io_handle handle, vmci_io_port port, uint8_t *buffer, 1064 size_t buffer_length) 1065 { 1066 1067 insb(port, buffer, buffer_length); 1068 } 1069