xref: /freebsd/sys/dev/vmware/vmci/vmci_kernel_if.c (revision eda14cbc264d6969b02f2b1994cef11148e914f1)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 /* This file implements defines and helper functions. */
8 
9 #include <sys/cdefs.h>
10 __FBSDID("$FreeBSD$");
11 
12 #include <sys/malloc.h>
13 #include <sys/proc.h>
14 #include <sys/uio.h>
15 
16 #include <machine/bus.h>
17 
18 #include "vmci.h"
19 #include "vmci_defs.h"
20 #include "vmci_kernel_defs.h"
21 #include "vmci_kernel_if.h"
22 #include "vmci_queue.h"
23 
24 struct vmci_queue_kernel_if {
25 	size_t			num_pages;	/* Num pages incl. header. */
26 	struct vmci_dma_alloc	*dmas;		/* For dma alloc. */
27 };
28 
29 /*
30  *------------------------------------------------------------------------------
31  *
32  * vmci_init_lock
33  *
34  *     Initializes the lock. Must be called before use.
35  *
36  * Results:
37  *     Always VMCI_SUCCESS.
38  *
39  * Side effects:
40  *     Thread can block.
41  *
42  *------------------------------------------------------------------------------
43  */
44 
45 int
46 vmci_init_lock(vmci_lock *lock, char *name)
47 {
48 
49 	mtx_init(lock, name, NULL, MTX_DEF | MTX_NOWITNESS);
50 	return (VMCI_SUCCESS);
51 }
52 
53 /*
54  *------------------------------------------------------------------------------
55  *
56  * vmci_cleanup_lock
57  *
58  *     Cleanup the lock. Must be called before deallocating lock.
59  *
60  * Results:
61  *     None
62  *
63  * Side effects:
64  *     Deletes kernel lock state
65  *
66  *------------------------------------------------------------------------------
67  */
68 
69 void
70 vmci_cleanup_lock(vmci_lock *lock)
71 {
72 
73 	mtx_destroy(lock);
74 }
75 
76 /*
77  *------------------------------------------------------------------------------
78  *
79  * vmci_grab_lock
80  *
81  *     Grabs the given lock.
82  *
83  * Results:
84  *      None
85  *
86  * Side effects:
87  *      Thread can block.
88  *
89  *------------------------------------------------------------------------------
90  */
91 
92 void
93 vmci_grab_lock(vmci_lock *lock)
94 {
95 
96 	mtx_lock(lock);
97 }
98 
99 /*
100  *------------------------------------------------------------------------------
101  *
102  * vmci_release_lock
103  *
104  *     Releases the given lock.
105  *
106  * Results:
107  *     None
108  *
109  * Side effects:
110  *     A thread blocked on this lock may wake up.
111  *
112  *------------------------------------------------------------------------------
113  */
114 
115 void
116 vmci_release_lock(vmci_lock *lock)
117 {
118 
119 	mtx_unlock(lock);
120 }
121 
122 /*
123  *------------------------------------------------------------------------------
124  *
125  * vmci_grab_lock_bh
126  *
127  *     Grabs the given lock.
128  *
129  * Results:
130  *     None
131  *
132  * Side effects:
133  *     None.
134  *
135  *------------------------------------------------------------------------------
136  */
137 
138 void
139 vmci_grab_lock_bh(vmci_lock *lock)
140 {
141 
142 	mtx_lock(lock);
143 }
144 
145 /*
146  *------------------------------------------------------------------------------
147  *
148  * vmci_release_lock_bh
149  *
150  *     Releases the given lock.
151  *
152  * Results:
153  *     None
154  *
155  * Side effects:
156  *     None.
157  *
158  *------------------------------------------------------------------------------
159  */
160 
161 void
162 vmci_release_lock_bh(vmci_lock *lock)
163 {
164 
165 	mtx_unlock(lock);
166 }
167 
168 /*
169  *------------------------------------------------------------------------------
170  *
171  * vmci_alloc_kernel_mem
172  *
173  *     Allocate physically contiguous memory for the VMCI driver.
174  *
175  * Results:
176  *     The address allocated or NULL on error.
177  *
178  *
179  * Side effects:
180  *     Memory may be allocated.
181  *
182  *------------------------------------------------------------------------------
183  */
184 
185 void *
186 vmci_alloc_kernel_mem(size_t size, int flags)
187 {
188 	void *ptr;
189 
190 	if ((flags & VMCI_MEMORY_ATOMIC) != 0)
191 		ptr = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xFFFFFFFF,
192 		    8, 1024 * 1024);
193 	else
194 		ptr = contigmalloc(size, M_DEVBUF, M_WAITOK, 0, 0xFFFFFFFF,
195 		    8, 1024 * 1024);
196 
197 	return (ptr);
198 }
199 
200 /*
201  *------------------------------------------------------------------------------
202  *
203  * vmci_free_kernel_mem
204  *
205  *     Free kernel memory allocated for the VMCI driver.
206  *
207  * Results:
208  *     None.
209  *
210  * Side effects:
211  *     Memory is freed.
212  *
213  *------------------------------------------------------------------------------
214  */
215 
216 void
217 vmci_free_kernel_mem(void *ptr, size_t size)
218 {
219 
220 	contigfree(ptr, size, M_DEVBUF);
221 }
222 
223 /*
224  *------------------------------------------------------------------------------
225  *
226  * vmci_can_schedule_delayed_work --
227  *
228  *     Checks to see if the given platform supports delayed work callbacks.
229  *
230  * Results:
231  *     true if it does. false otherwise.
232  *
233  * Side effects:
234  *     None.
235  *
236  *------------------------------------------------------------------------------
237  */
238 
239 bool
240 vmci_can_schedule_delayed_work(void)
241 {
242 
243 	return (true);
244 }
245 
246 /*
247  *------------------------------------------------------------------------------
248  *
249  * vmci_schedule_delayed_work --
250  *
251  *     Schedule the specified callback.
252  *
253  * Results:
254  *     Zero on success, error code otherwise.
255  *
256  * Side effects:
257  *     None.
258  *
259  *------------------------------------------------------------------------------
260  */
261 
262 int
263 vmci_schedule_delayed_work(vmci_work_fn *work_fn, void *data)
264 {
265 
266 	return (vmci_schedule_delayed_work_fn(work_fn, data));
267 }
268 
269 /*
270  *------------------------------------------------------------------------------
271  *
272  * vmci_create_event --
273  *
274  * Results:
275  *     None.
276  *
277  * Side effects:
278  *     None.
279  *
280  *------------------------------------------------------------------------------
281  */
282 
283 void
284 vmci_create_event(vmci_event *event)
285 {
286 
287 	sema_init(event, 0, "vmci_event");
288 }
289 
290 /*
291  *------------------------------------------------------------------------------
292  *
293  * vmci_destroy_event --
294  *
295  * Results:
296  *     None.
297  *
298  * Side effects:
299  *     None.
300  *
301  *------------------------------------------------------------------------------
302  */
303 
304 void
305 vmci_destroy_event(vmci_event *event)
306 {
307 
308 	if (mtx_owned(&event->sema_mtx))
309 		sema_destroy(event);
310 }
311 
312 /*
313  *------------------------------------------------------------------------------
314  *
315  * vmci_signal_event --
316  *
317  * Results:
318  *     None.
319  *
320  * Side effects:
321  *     None.
322  *
323  *------------------------------------------------------------------------------
324  */
325 
326 void
327 vmci_signal_event(vmci_event *event)
328 {
329 
330 	sema_post(event);
331 }
332 
333 /*
334  *------------------------------------------------------------------------------
335  *
336  * vmci_wait_on_event --
337  *
338  * Results:
339  *     None.
340  *
341  * Side effects:
342  *     None.
343  *
344  *------------------------------------------------------------------------------
345  */
346 
347 void
348 vmci_wait_on_event(vmci_event *event, vmci_event_release_cb release_cb,
349     void *client_data)
350 {
351 
352 	release_cb(client_data);
353 	sema_wait(event);
354 }
355 
356 /*
357  *------------------------------------------------------------------------------
358  *
359  * vmci_mutex_init --
360  *
361  *     Initializes the mutex. Must be called before use.
362  *
363  * Results:
364  *     Success.
365  *
366  * Side effects:
367  *     None.
368  *
369  *------------------------------------------------------------------------------
370  */
371 
372 int
373 vmci_mutex_init(vmci_mutex *mutex, char *name)
374 {
375 
376 	mtx_init(mutex, name, NULL, MTX_DEF | MTX_NOWITNESS);
377 	return (VMCI_SUCCESS);
378 }
379 
380 /*
381  *------------------------------------------------------------------------------
382  *
383  * vmci_mutex_destroy --
384  *
385  *     Destroys the mutex.
386  *
387  * Results:
388  *     None.
389  *
390  * Side effects:
391  *     None.
392  *
393  *------------------------------------------------------------------------------
394  */
395 
396 void
397 vmci_mutex_destroy(vmci_mutex *mutex)
398 {
399 
400 	mtx_destroy(mutex);
401 }
402 
403 /*
404  *------------------------------------------------------------------------------
405  *
406  * vmci_mutex_acquire --
407  *
408  *     Acquires the mutex.
409  *
410  * Results:
411  *     None.
412  *
413  * Side effects:
414  *     Thread may block.
415  *
416  *------------------------------------------------------------------------------
417  */
418 
419 void
420 vmci_mutex_acquire(vmci_mutex *mutex)
421 {
422 
423 	mtx_lock(mutex);
424 }
425 
426 /*
427  *------------------------------------------------------------------------------
428  *
429  * vmci_mutex_release --
430  *
431  *     Releases the mutex.
432  *
433  * Results:
434  *     None.
435  *
436  * Side effects:
437  *     May wake up the thread blocking on this mutex.
438  *
439  *------------------------------------------------------------------------------
440  */
441 
442 void
443 vmci_mutex_release(vmci_mutex *mutex)
444 {
445 
446 	mtx_unlock(mutex);
447 }
448 
449 /*
450  *------------------------------------------------------------------------------
451  *
452  * vmci_alloc_queue --
453  *
454  *     Allocates kernel queue pages of specified size with IOMMU mappings, plus
455  *     space for the queue structure/kernel interface and the queue header.
456  *
457  * Results:
458  *     Pointer to the queue on success, NULL otherwise.
459  *
460  * Side effects:
461  *     Memory is allocated.
462  *
463  *------------------------------------------------------------------------------
464  */
465 
466 void *
467 vmci_alloc_queue(uint64_t size, uint32_t flags)
468 {
469 	struct vmci_queue *queue;
470 	size_t i;
471 	const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
472 	const size_t dmas_size = num_pages * sizeof(struct vmci_dma_alloc);
473 	const size_t queue_size =
474 	    sizeof(*queue) + sizeof(*(queue->kernel_if)) + dmas_size;
475 
476 	/* Size should be enforced by vmci_qpair_alloc(), double-check here. */
477 	if (size > VMCI_MAX_GUEST_QP_MEMORY) {
478 		ASSERT(false);
479 		return (NULL);
480 	}
481 
482 	queue = malloc(queue_size, M_DEVBUF, M_NOWAIT);
483 	if (!queue)
484 		return (NULL);
485 
486 	queue->q_header = NULL;
487 	queue->saved_header = NULL;
488 	queue->kernel_if = (struct vmci_queue_kernel_if *)(queue + 1);
489 	queue->kernel_if->num_pages = num_pages;
490 	queue->kernel_if->dmas = (struct vmci_dma_alloc *)(queue->kernel_if +
491 	    1);
492 	for (i = 0; i < num_pages; i++) {
493 		vmci_dma_malloc(PAGE_SIZE, 1, &queue->kernel_if->dmas[i]);
494 		if (!queue->kernel_if->dmas[i].dma_vaddr) {
495 			/* Size excl. the header. */
496 			vmci_free_queue(queue, i * PAGE_SIZE);
497 			return (NULL);
498 		}
499 	}
500 
501 	/* Queue header is the first page. */
502 	queue->q_header = (void *)queue->kernel_if->dmas[0].dma_vaddr;
503 
504 	return ((void *)queue);
505 }
506 
507 /*
508  *------------------------------------------------------------------------------
509  *
510  * vmci_free_queue --
511  *
512  *     Frees kernel VA space for a given queue and its queue header, and frees
513  *     physical data pages.
514  *
515  * Results:
516  *     None.
517  *
518  * Side effects:
519  *     Memory is freed.
520  *
521  *------------------------------------------------------------------------------
522  */
523 
524 void
525 vmci_free_queue(void *q, uint64_t size)
526 {
527 	struct vmci_queue *queue = q;
528 
529 	if (queue) {
530 		const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
531 		uint64_t i;
532 
533 		/* Given size doesn't include header, so add in a page here. */
534 		for (i = 0; i < num_pages; i++)
535 			vmci_dma_free(&queue->kernel_if->dmas[i]);
536 		free(queue, M_DEVBUF);
537 	}
538 }
539 
540 /*
541  *------------------------------------------------------------------------------
542  *
543  * vmci_alloc_ppn_set --
544  *
545  *     Allocates two list of PPNs --- one for the pages in the produce queue,
546  *     and the other for the pages in the consume queue. Intializes the list of
547  *     PPNs with the page frame numbers of the KVA for the two queues (and the
548  *     queue headers).
549  *
550  * Results:
551  *     Success or failure.
552  *
553  * Side effects:
554  *     Memory may be allocated.
555  *
556  *-----------------------------------------------------------------------------
557  */
558 
559 int
560 vmci_alloc_ppn_set(void *prod_q, uint64_t num_produce_pages, void *cons_q,
561     uint64_t num_consume_pages, struct ppn_set *ppn_set)
562 {
563 	struct vmci_queue *consume_q = cons_q;
564 	struct vmci_queue *produce_q = prod_q;
565 	vmci_ppn_list consume_ppns;
566 	vmci_ppn_list produce_ppns;
567 	uint64_t i;
568 
569 	if (!produce_q || !num_produce_pages || !consume_q ||
570 	    !num_consume_pages || !ppn_set)
571 		return (VMCI_ERROR_INVALID_ARGS);
572 
573 	if (ppn_set->initialized)
574 		return (VMCI_ERROR_ALREADY_EXISTS);
575 
576 	produce_ppns =
577 	    vmci_alloc_kernel_mem(num_produce_pages * sizeof(*produce_ppns),
578 	    VMCI_MEMORY_NORMAL);
579 	if (!produce_ppns)
580 		return (VMCI_ERROR_NO_MEM);
581 
582 	consume_ppns =
583 	    vmci_alloc_kernel_mem(num_consume_pages * sizeof(*consume_ppns),
584 	    VMCI_MEMORY_NORMAL);
585 	if (!consume_ppns) {
586 		vmci_free_kernel_mem(produce_ppns,
587 		    num_produce_pages * sizeof(*produce_ppns));
588 		return (VMCI_ERROR_NO_MEM);
589 	}
590 
591 	for (i = 0; i < num_produce_pages; i++) {
592 		unsigned long pfn;
593 
594 		produce_ppns[i] =
595 		    pfn = produce_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
596 
597 		/*
598 		 * Fail allocation if PFN isn't supported by hypervisor.
599 		 */
600 
601 		if (sizeof(pfn) >
602 		    sizeof(*produce_ppns) && pfn != produce_ppns[i])
603 			goto ppn_error;
604 	}
605 	for (i = 0; i < num_consume_pages; i++) {
606 		unsigned long pfn;
607 
608 		consume_ppns[i] =
609 		    pfn = consume_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
610 
611 		/*
612 		 * Fail allocation if PFN isn't supported by hypervisor.
613 		 */
614 
615 		if (sizeof(pfn) >
616 		    sizeof(*consume_ppns) && pfn != consume_ppns[i])
617 			goto ppn_error;
618 
619 	}
620 
621 	ppn_set->num_produce_pages = num_produce_pages;
622 	ppn_set->num_consume_pages = num_consume_pages;
623 	ppn_set->produce_ppns = produce_ppns;
624 	ppn_set->consume_ppns = consume_ppns;
625 	ppn_set->initialized = true;
626 	return (VMCI_SUCCESS);
627 
628 ppn_error:
629 	vmci_free_kernel_mem(produce_ppns, num_produce_pages *
630 	    sizeof(*produce_ppns));
631 	vmci_free_kernel_mem(consume_ppns, num_consume_pages *
632 	    sizeof(*consume_ppns));
633 	return (VMCI_ERROR_INVALID_ARGS);
634 }
635 
636 /*
637  *------------------------------------------------------------------------------
638  *
639  * vmci_free_ppn_set --
640  *
641  *     Frees the two list of PPNs for a queue pair.
642  *
643  * Results:
644  *     None.
645  *
646  * Side effects:
647  *     None.
648  *
649  *------------------------------------------------------------------------------
650  */
651 
652 void
653 vmci_free_ppn_set(struct ppn_set *ppn_set)
654 {
655 
656 	ASSERT(ppn_set);
657 	if (ppn_set->initialized) {
658 		/* Do not call these functions on NULL inputs. */
659 		ASSERT(ppn_set->produce_ppns && ppn_set->consume_ppns);
660 		vmci_free_kernel_mem(ppn_set->produce_ppns,
661 		    ppn_set->num_produce_pages *
662 		    sizeof(*ppn_set->produce_ppns));
663 		vmci_free_kernel_mem(ppn_set->consume_ppns,
664 		    ppn_set->num_consume_pages *
665 		    sizeof(*ppn_set->consume_ppns));
666 	}
667 	memset(ppn_set, 0, sizeof(*ppn_set));
668 }
669 
670 /*
671  *------------------------------------------------------------------------------
672  *
673  * vmci_populate_ppn_list --
674  *
675  *     Populates the list of PPNs in the hypercall structure with the PPNS
676  *     of the produce queue and the consume queue.
677  *
678  * Results:
679  *     VMCI_SUCCESS.
680  *
681  * Side effects:
682  *     None.
683  *
684  *------------------------------------------------------------------------------
685  */
686 
687 int
688 vmci_populate_ppn_list(uint8_t *call_buf, const struct ppn_set *ppn_set)
689 {
690 
691 	ASSERT(call_buf && ppn_set && ppn_set->initialized);
692 	memcpy(call_buf, ppn_set->produce_ppns,
693 	    ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
694 	memcpy(call_buf + ppn_set->num_produce_pages *
695 	    sizeof(*ppn_set->produce_ppns), ppn_set->consume_ppns,
696 	    ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
697 
698 	return (VMCI_SUCCESS);
699 }
700 
701 /*
702  *------------------------------------------------------------------------------
703  *
704  * vmci_memcpy_{to,from}iovec --
705  *
706  *     These helper routines will copy the specified bytes to/from memory that's
707  *     specified as a struct iovec.  The routines can not verify the correctness
708  *     of the struct iovec's contents.
709  *
710  * Results:
711  *      None.
712  *
713  * Side effects:
714  *      None.
715  *
716  *------------------------------------------------------------------------------
717  */
718 
719 static inline void
720 vmci_memcpy_toiovec(struct iovec *iov, uint8_t *src, size_t len)
721 {
722 
723 	while (len > 0) {
724 		if (iov->iov_len) {
725 			size_t to_copy = MIN(iov->iov_len, len);
726 			memcpy(iov->iov_base, src, to_copy);
727 			src += to_copy;
728 			len -= to_copy;
729 			iov->iov_base = (void *)((uintptr_t) iov->iov_base +
730 			    to_copy);
731 			iov->iov_len -= to_copy;
732 		}
733 		iov++;
734 	}
735 }
736 
737 static inline void
738 vmci_memcpy_fromiovec(uint8_t *dst, struct iovec *iov, size_t len)
739 {
740 
741 	while (len > 0) {
742 		if (iov->iov_len) {
743 			size_t to_copy = MIN(iov->iov_len, len);
744 			memcpy(dst, iov->iov_base, to_copy);
745 			dst += to_copy;
746 			len -= to_copy;
747 			iov->iov_base = (void *)((uintptr_t) iov->iov_base +
748 			    to_copy);
749 			iov->iov_len -= to_copy;
750 		}
751 		iov++;
752 	}
753 }
754 
755 /*
756  *------------------------------------------------------------------------------
757  *
758  * __vmci_memcpy_to_queue --
759  *
760  *     Copies from a given buffer or iovector to a VMCI Queue. Assumes that
761  *     offset + size does not wrap around in the queue.
762  *
763  * Results:
764  *     Zero on success, negative error code on failure.
765  *
766  * Side effects:
767  *     None.
768  *
769  *------------------------------------------------------------------------------
770  */
771 
772 #pragma GCC diagnostic ignored "-Wcast-qual"
773 static int
774 __vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
775     const void *src, size_t size, bool is_iovec)
776 {
777 	struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
778 	size_t bytes_copied = 0;
779 
780 	while (bytes_copied < size) {
781 		const uint64_t page_index =
782 		    (queue_offset + bytes_copied) / PAGE_SIZE;
783 		const size_t page_offset =
784 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
785 		void *va;
786 		size_t to_copy;
787 
788 		/* Skip header. */
789 		va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
790 
791 		ASSERT(va);
792 		/*
793 		 * Fill up the page if we have enough payload, or else
794 		 * copy the remaining bytes.
795 		 */
796 		to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
797 
798 		if (is_iovec) {
799 			struct iovec *iov = (struct iovec *)src;
800 
801 			/* The iovec will track bytes_copied internally. */
802 			vmci_memcpy_fromiovec((uint8_t *)va + page_offset,
803 			    iov, to_copy);
804 		} else
805 			memcpy((uint8_t *)va + page_offset,
806 			    (uint8_t *)src + bytes_copied, to_copy);
807 		bytes_copied += to_copy;
808 	}
809 
810 	return (VMCI_SUCCESS);
811 }
812 
813 /*
814  *------------------------------------------------------------------------------
815  *
816  * __vmci_memcpy_from_queue --
817  *
818  *     Copies to a given buffer or iovector from a VMCI Queue. Assumes that
819  *     offset + size does not wrap around in the queue.
820  *
821  * Results:
822  *     Zero on success, negative error code on failure.
823  *
824  * Side effects:
825  *     None.
826  *
827  *------------------------------------------------------------------------------
828  */
829 
830 static int
831 __vmci_memcpy_from_queue(void *dest, const struct vmci_queue *queue,
832     uint64_t queue_offset, size_t size, bool is_iovec)
833 {
834 	struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
835 	size_t bytes_copied = 0;
836 
837 	while (bytes_copied < size) {
838 		const uint64_t page_index =
839 		    (queue_offset + bytes_copied) / PAGE_SIZE;
840 		const size_t page_offset =
841 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
842 		void *va;
843 		size_t to_copy;
844 
845 		/* Skip header. */
846 		va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
847 
848 		ASSERT(va);
849 		/*
850 		 * Fill up the page if we have enough payload, or else
851 		 * copy the remaining bytes.
852 		 */
853 		to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
854 
855 		if (is_iovec) {
856 			struct iovec *iov = (struct iovec *)dest;
857 
858 			/* The iovec will track bytesCopied internally. */
859 			vmci_memcpy_toiovec(iov, (uint8_t *)va +
860 			    page_offset, to_copy);
861 		} else
862 			memcpy((uint8_t *)dest + bytes_copied,
863 			    (uint8_t *)va + page_offset, to_copy);
864 
865 		bytes_copied += to_copy;
866 	}
867 
868 	return (VMCI_SUCCESS);
869 }
870 
871 /*
872  *------------------------------------------------------------------------------
873  *
874  * vmci_memcpy_to_queue --
875  *
876  *     Copies from a given buffer to a VMCI Queue.
877  *
878  * Results:
879  *     Zero on success, negative error code on failure.
880  *
881  * Side effects:
882  *     None.
883  *
884  *------------------------------------------------------------------------------
885  */
886 
887 int
888 vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
889     const void *src, size_t src_offset, size_t size, int buf_type,
890     bool can_block)
891 {
892 
893 	ASSERT(can_block);
894 
895 	return (__vmci_memcpy_to_queue(queue, queue_offset,
896 	    (uint8_t *)src + src_offset, size, false));
897 }
898 
899 /*
900  *------------------------------------------------------------------------------
901  *
902  * vmci_memcpy_from_queue --
903  *
904  *      Copies to a given buffer from a VMCI Queue.
905  *
906  * Results:
907  *      Zero on success, negative error code on failure.
908  *
909  * Side effects:
910  *      None.
911  *
912  *------------------------------------------------------------------------------
913  */
914 
915 int
916 vmci_memcpy_from_queue(void *dest, size_t dest_offset,
917     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
918     int buf_type, bool can_block)
919 {
920 
921 	ASSERT(can_block);
922 
923 	return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
924 	    queue, queue_offset, size, false));
925 }
926 
927 /*
928  *------------------------------------------------------------------------------
929  *
930  * vmci_memcpy_to_queue_local --
931  *
932  *     Copies from a given buffer to a local VMCI queue. This is the
933  *     same as a regular copy.
934  *
935  * Results:
936  *     Zero on success, negative error code on failure.
937  *
938  * Side effects:
939  *     None.
940  *
941  *------------------------------------------------------------------------------
942  */
943 
944 int
945 vmci_memcpy_to_queue_local(struct vmci_queue *queue, uint64_t queue_offset,
946     const void *src, size_t src_offset, size_t size, int buf_type,
947     bool can_block)
948 {
949 
950 	ASSERT(can_block);
951 
952 	return (__vmci_memcpy_to_queue(queue, queue_offset,
953 	    (uint8_t *)src + src_offset, size, false));
954 }
955 
956 /*
957  *------------------------------------------------------------------------------
958  *
959  * vmci_memcpy_from_queue_local --
960  *
961  *     Copies to a given buffer from a VMCI Queue.
962  *
963  * Results:
964  *     Zero on success, negative error code on failure.
965  *
966  * Side effects:
967  *     None.
968  *
969  *------------------------------------------------------------------------------
970  */
971 
972 int
973 vmci_memcpy_from_queue_local(void *dest, size_t dest_offset,
974     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
975     int buf_type, bool can_block)
976 {
977 
978 	ASSERT(can_block);
979 
980 	return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
981 	    queue, queue_offset, size, false));
982 }
983 
984 /*------------------------------------------------------------------------------
985  *
986  * vmci_memcpy_to_queue_v --
987  *
988  *     Copies from a given iovec from a VMCI Queue.
989  *
990  * Results:
991  *     Zero on success, negative error code on failure.
992  *
993  * Side effects:
994  *     None.
995  *
996  *------------------------------------------------------------------------------
997  */
998 
999 int
1000 vmci_memcpy_to_queue_v(struct vmci_queue *queue, uint64_t queue_offset,
1001     const void *src, size_t src_offset, size_t size, int buf_type,
1002     bool can_block)
1003 {
1004 
1005 	ASSERT(can_block);
1006 
1007 	/*
1008 	 * We ignore src_offset because src is really a struct iovec * and will
1009 	 * maintain offset internally.
1010 	 */
1011 	return (__vmci_memcpy_to_queue(queue, queue_offset, src, size,
1012 	    true));
1013 }
1014 
1015 /*
1016  *------------------------------------------------------------------------------
1017  *
1018  * vmci_memcpy_from_queue_v --
1019  *
1020  *     Copies to a given iovec from a VMCI Queue.
1021  *
1022  * Results:
1023  *     Zero on success, negative error code on failure.
1024  *
1025  * Side effects:
1026  *     None.
1027  *
1028  *------------------------------------------------------------------------------
1029  */
1030 
1031 int
1032 vmci_memcpy_from_queue_v(void *dest, size_t dest_offset,
1033     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
1034     int buf_type, bool can_block)
1035 {
1036 
1037 	ASSERT(can_block);
1038 
1039 	/*
1040 	 * We ignore dest_offset because dest is really a struct iovec * and
1041 	 * will maintain offset internally.
1042 	 */
1043 	return (__vmci_memcpy_from_queue(dest, queue, queue_offset, size,
1044 	    true));
1045 }
1046 
1047 /*
1048  *------------------------------------------------------------------------------
1049  *
1050  * vmci_read_port_bytes --
1051  *
1052  *     Copy memory from an I/O port to kernel memory.
1053  *
1054  * Results:
1055  *     No results.
1056  *
1057  * Side effects:
1058  *     None.
1059  *
1060  *------------------------------------------------------------------------------
1061  */
1062 
1063 void
1064 vmci_read_port_bytes(vmci_io_handle handle, vmci_io_port port, uint8_t *buffer,
1065     size_t buffer_length)
1066 {
1067 
1068 	insb(port, buffer, buffer_length);
1069 }
1070