xref: /freebsd/sys/dev/vmware/vmci/vmci_queue_pair.c (revision e0c4386e7e71d93b0edc0c8fa156263fc4a8b0b6)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 /* VMCI QueuePair API implementation. */
8 
9 #include <sys/cdefs.h>
10 #include "vmci.h"
11 #include "vmci_driver.h"
12 #include "vmci_event.h"
13 #include "vmci_kernel_api.h"
14 #include "vmci_kernel_defs.h"
15 #include "vmci_queue_pair.h"
16 
17 #define LGPFX	"vmci_queue_pair: "
18 
19 struct queue_pair_entry {
20 	vmci_list_item(queue_pair_entry) list_item;
21 	struct vmci_handle handle;
22 	vmci_id		peer;
23 	uint32_t	flags;
24 	uint64_t	produce_size;
25 	uint64_t	consume_size;
26 	uint32_t	ref_count;
27 };
28 
29 struct qp_guest_endpoint {
30 	struct queue_pair_entry qp;
31 	uint64_t	num_ppns;
32 	void		*produce_q;
33 	void		*consume_q;
34 	bool		hibernate_failure;
35 	struct ppn_set	ppn_set;
36 };
37 
38 struct queue_pair_list {
39 	vmci_list(queue_pair_entry) head;
40 	volatile int	hibernate;
41 	vmci_mutex	mutex;
42 };
43 
44 #define QPE_NUM_PAGES(_QPE)						\
45 	((uint32_t)(CEILING(_QPE.produce_size, PAGE_SIZE) +		\
46 	CEILING(_QPE.consume_size, PAGE_SIZE) + 2))
47 
48 static struct queue_pair_list qp_guest_endpoints;
49 
50 static struct	queue_pair_entry *queue_pair_list_find_entry(
51 		    struct queue_pair_list *qp_list, struct vmci_handle handle);
52 static void	queue_pair_list_add_entry(struct queue_pair_list *qp_list,
53 		    struct queue_pair_entry *entry);
54 static void	queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
55 		    struct queue_pair_entry *entry);
56 static struct	queue_pair_entry *queue_pair_list_get_head(
57 		    struct queue_pair_list *qp_list);
58 static int	queue_pair_notify_peer_local(bool attach,
59 		    struct vmci_handle handle);
60 static struct	qp_guest_endpoint *qp_guest_endpoint_create(
61 		    struct vmci_handle handle, vmci_id peer, uint32_t flags,
62 		    uint64_t produce_size, uint64_t consume_size,
63 		    void *produce_q, void *consume_q);
64 static void	qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry);
65 static int	vmci_queue_pair_alloc_hypercall(
66 		    const struct qp_guest_endpoint *entry);
67 static int	vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
68 		    struct vmci_queue **produce_q, uint64_t produce_size,
69 		    struct vmci_queue **consume_q, uint64_t consume_size,
70 		    vmci_id peer, uint32_t flags,
71 		    vmci_privilege_flags priv_flags);
72 static int	vmci_queue_pair_detach_guest_work(struct vmci_handle handle);
73 static int	vmci_queue_pair_detach_hypercall(struct vmci_handle handle);
74 
75 /*
76  *------------------------------------------------------------------------------
77  *
78  * vmci_queue_pair_alloc --
79  *
80  *     Allocates a VMCI QueuePair. Only checks validity of input arguments. The
81  *     real work is done in the host or guest specific function.
82  *
83  * Results:
84  *     VMCI_SUCCESS on success, appropriate error code otherwise.
85  *
86  * Side effects:
87  *     None.
88  *
89  *------------------------------------------------------------------------------
90  */
91 
92 int
93 vmci_queue_pair_alloc(struct vmci_handle *handle, struct vmci_queue **produce_q,
94     uint64_t produce_size, struct vmci_queue **consume_q, uint64_t consume_size,
95     vmci_id peer, uint32_t flags, vmci_privilege_flags priv_flags)
96 {
97 
98 	if (!handle || !produce_q || !consume_q ||
99 	    (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
100 		return (VMCI_ERROR_INVALID_ARGS);
101 
102 	return (vmci_queue_pair_alloc_guest_work(handle, produce_q,
103 	    produce_size, consume_q, consume_size, peer, flags, priv_flags));
104 }
105 
106 /*
107  *------------------------------------------------------------------------------
108  *
109  * vmci_queue_pair_detach --
110  *
111  *     Detaches from a VMCI QueuePair. Only checks validity of input argument.
112  *     Real work is done in the host or guest specific function.
113  *
114  * Results:
115  *     Success or failure.
116  *
117  * Side effects:
118  *     Memory is freed.
119  *
120  *------------------------------------------------------------------------------
121  */
122 
123 int
124 vmci_queue_pair_detach(struct vmci_handle handle)
125 {
126 
127 	if (VMCI_HANDLE_INVALID(handle))
128 		return (VMCI_ERROR_INVALID_ARGS);
129 
130 	return (vmci_queue_pair_detach_guest_work(handle));
131 }
132 
133 /*
134  *------------------------------------------------------------------------------
135  *
136  * queue_pair_list_init --
137  *
138  *     Initializes the list of QueuePairs.
139  *
140  * Results:
141  *     Success or failure.
142  *
143  * Side effects:
144  *     None.
145  *
146  *------------------------------------------------------------------------------
147  */
148 
149 static inline int
150 queue_pair_list_init(struct queue_pair_list *qp_list)
151 {
152 	int ret;
153 
154 	vmci_list_init(&qp_list->head);
155 	atomic_store_int(&qp_list->hibernate, 0);
156 	ret = vmci_mutex_init(&qp_list->mutex, "VMCI QP List lock");
157 	return (ret);
158 }
159 
160 /*
161  *------------------------------------------------------------------------------
162  *
163  * queue_pair_list_destroy --
164  *
165  *     Destroy the list's mutex.
166  *
167  * Results:
168  *     None.
169  *
170  * Side effects:
171  *     None.
172  *
173  *------------------------------------------------------------------------------
174  */
175 
176 static inline void
177 queue_pair_list_destroy(struct queue_pair_list *qp_list)
178 {
179 
180 	vmci_mutex_destroy(&qp_list->mutex);
181 	vmci_list_init(&qp_list->head);
182 }
183 
184 /*
185  *------------------------------------------------------------------------------
186  *
187  * queue_pair_list_find_entry --
188  *
189  *     Finds the entry in the list corresponding to a given handle. Assumes that
190  *     the list is locked.
191  *
192  * Results:
193  *     Pointer to entry.
194  *
195  * Side effects:
196  *     None.
197  *
198  *------------------------------------------------------------------------------
199  */
200 
201 static struct queue_pair_entry *
202 queue_pair_list_find_entry(struct queue_pair_list *qp_list,
203     struct vmci_handle handle)
204 {
205 	struct queue_pair_entry *next;
206 
207 	if (VMCI_HANDLE_INVALID(handle))
208 		return (NULL);
209 
210 	vmci_list_scan(next, &qp_list->head, list_item) {
211 		if (VMCI_HANDLE_EQUAL(next->handle, handle))
212 			return (next);
213 	}
214 
215 	return (NULL);
216 }
217 
218 /*
219  *------------------------------------------------------------------------------
220  *
221  * queue_pair_list_add_entry --
222  *
223  *     Adds the given entry to the list. Assumes that the list is locked.
224  *
225  * Results:
226  *     None.
227  *
228  * Side effects:
229  *     None.
230  *
231  *------------------------------------------------------------------------------
232  */
233 
234 static void
235 queue_pair_list_add_entry(struct queue_pair_list *qp_list,
236     struct queue_pair_entry *entry)
237 {
238 
239 	if (entry)
240 		vmci_list_insert(&qp_list->head, entry, list_item);
241 }
242 
243 /*
244  *------------------------------------------------------------------------------
245  *
246  * queue_pair_list_remove_entry --
247  *
248  *     Removes the given entry from the list. Assumes that the list is locked.
249  *
250  * Results:
251  *     None.
252  *
253  * Side effects:
254  *     None.
255  *
256  *------------------------------------------------------------------------------
257  */
258 
259 static void
260 queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
261     struct queue_pair_entry *entry)
262 {
263 
264 	if (entry)
265 		vmci_list_remove(entry, list_item);
266 }
267 
268 /*
269  *------------------------------------------------------------------------------
270  *
271  * queue_pair_list_get_head --
272  *
273  *     Returns the entry from the head of the list. Assumes that the list is
274  *     locked.
275  *
276  * Results:
277  *     Pointer to entry.
278  *
279  * Side effects:
280  *     None.
281  *
282  *------------------------------------------------------------------------------
283  */
284 
285 static struct queue_pair_entry *
286 queue_pair_list_get_head(struct queue_pair_list *qp_list)
287 {
288 
289 	return (vmci_list_first(&qp_list->head));
290 }
291 
292 /*
293  *------------------------------------------------------------------------------
294  *
295  * vmci_qp_guest_endpoints_init --
296  *
297  *     Initalizes data structure state keeping track of queue pair guest
298  *     endpoints.
299  *
300  * Results:
301  *     VMCI_SUCCESS on success and appropriate failure code otherwise.
302  *
303  * Side effects:
304  *     None.
305  *
306  *------------------------------------------------------------------------------
307  */
308 
309 int
310 vmci_qp_guest_endpoints_init(void)
311 {
312 
313 	return (queue_pair_list_init(&qp_guest_endpoints));
314 }
315 
316 /*
317  *------------------------------------------------------------------------------
318  *
319  * vmci_qp_guest_endpoints_exit --
320  *
321  *     Destroys all guest queue pair endpoints. If active guest queue pairs
322  *     still exist, hypercalls to attempt detach from these queue pairs will be
323  *     made. Any failure to detach is silently ignored.
324  *
325  * Results:
326  *     None.
327  *
328  * Side effects:
329  *     None.
330  *
331  *------------------------------------------------------------------------------
332  */
333 
334 void
335 vmci_qp_guest_endpoints_exit(void)
336 {
337 	struct qp_guest_endpoint *entry;
338 
339 	if (!vmci_mutex_initialized(&qp_guest_endpoints.mutex))
340 		return;
341 
342 	vmci_mutex_acquire(&qp_guest_endpoints.mutex);
343 
344 	while ((entry =
345 	    (struct qp_guest_endpoint *)queue_pair_list_get_head(
346 	    &qp_guest_endpoints)) != NULL) {
347 		/*
348 		 * Don't make a hypercall for local QueuePairs.
349 		 */
350 		if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL))
351 			vmci_queue_pair_detach_hypercall(entry->qp.handle);
352 		/*
353 		 * We cannot fail the exit, so let's reset ref_count.
354 		 */
355 		entry->qp.ref_count = 0;
356 		queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
357 		qp_guest_endpoint_destroy(entry);
358 	}
359 
360 	atomic_store_int(&qp_guest_endpoints.hibernate, 0);
361 	vmci_mutex_release(&qp_guest_endpoints.mutex);
362 	queue_pair_list_destroy(&qp_guest_endpoints);
363 }
364 
365 /*
366  *------------------------------------------------------------------------------
367  *
368  * vmci_qp_guest_endpoints_sync --
369  *
370  *     Use this as a synchronization point when setting globals, for example,
371  *     during device shutdown.
372  *
373  * Results:
374  *     true.
375  *
376  * Side effects:
377  *     None.
378  *
379  *------------------------------------------------------------------------------
380  */
381 
382 void
383 vmci_qp_guest_endpoints_sync(void)
384 {
385 
386 	vmci_mutex_acquire(&qp_guest_endpoints.mutex);
387 	vmci_mutex_release(&qp_guest_endpoints.mutex);
388 }
389 
390 /*
391  *------------------------------------------------------------------------------
392  *
393  * qp_guest_endpoint_create --
394  *
395  *     Allocates and initializes a qp_guest_endpoint structure. Allocates a
396  *     QueuePair rid (and handle) iff the given entry has an invalid handle.
397  *     0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved handles. Assumes
398  *     that the QP list mutex is held by the caller.
399  *
400  * Results:
401  *     Pointer to structure intialized.
402  *
403  * Side effects:
404  *     None.
405  *
406  *------------------------------------------------------------------------------
407  */
408 
409 struct qp_guest_endpoint *
410 qp_guest_endpoint_create(struct vmci_handle handle, vmci_id peer,
411     uint32_t flags, uint64_t produce_size, uint64_t consume_size,
412     void *produce_q, void *consume_q)
413 {
414 	struct qp_guest_endpoint *entry;
415 	static vmci_id queue_pair_rid;
416 	const uint64_t num_ppns = CEILING(produce_size, PAGE_SIZE) +
417 	    CEILING(consume_size, PAGE_SIZE) +
418 	    2; /* One page each for the queue headers. */
419 
420 	queue_pair_rid = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
421 
422 	ASSERT((produce_size || consume_size) && produce_q && consume_q);
423 
424 	if (VMCI_HANDLE_INVALID(handle)) {
425 		vmci_id context_id = vmci_get_context_id();
426 		vmci_id old_rid = queue_pair_rid;
427 
428 		/*
429 		 * Generate a unique QueuePair rid.  Keep on trying until we
430 		 * wrap around in the RID space.
431 		 */
432 		ASSERT(old_rid > VMCI_RESERVED_RESOURCE_ID_MAX);
433 		do {
434 			handle = VMCI_MAKE_HANDLE(context_id, queue_pair_rid);
435 			entry =
436 			    (struct qp_guest_endpoint *)
437 			    queue_pair_list_find_entry(&qp_guest_endpoints,
438 			    handle);
439 			queue_pair_rid++;
440 			if (UNLIKELY(!queue_pair_rid)) {
441 				/*
442 				 * Skip the reserved rids.
443 				 */
444 				queue_pair_rid =
445 				    VMCI_RESERVED_RESOURCE_ID_MAX + 1;
446 			}
447 		} while (entry && queue_pair_rid != old_rid);
448 
449 		if (UNLIKELY(entry != NULL)) {
450 			ASSERT(queue_pair_rid == old_rid);
451 			/*
452 			 * We wrapped around --- no rids were free.
453 			 */
454 			return (NULL);
455 		}
456 	}
457 
458 	ASSERT(!VMCI_HANDLE_INVALID(handle) &&
459 	    queue_pair_list_find_entry(&qp_guest_endpoints, handle) == NULL);
460 	entry = vmci_alloc_kernel_mem(sizeof(*entry), VMCI_MEMORY_NORMAL);
461 	if (entry) {
462 		entry->qp.handle = handle;
463 		entry->qp.peer = peer;
464 		entry->qp.flags = flags;
465 		entry->qp.produce_size = produce_size;
466 		entry->qp.consume_size = consume_size;
467 		entry->qp.ref_count = 0;
468 		entry->num_ppns = num_ppns;
469 		memset(&entry->ppn_set, 0, sizeof(entry->ppn_set));
470 		entry->produce_q = produce_q;
471 		entry->consume_q = consume_q;
472 	}
473 	return (entry);
474 }
475 
476 /*
477  *------------------------------------------------------------------------------
478  *
479  * qp_guest_endpoint_destroy --
480  *
481  *     Frees a qp_guest_endpoint structure.
482  *
483  * Results:
484  *     None.
485  *
486  * Side effects:
487  *     None.
488  *
489  *------------------------------------------------------------------------------
490  */
491 
492 void
493 qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
494 {
495 
496 	ASSERT(entry);
497 	ASSERT(entry->qp.ref_count == 0);
498 
499 	vmci_free_ppn_set(&entry->ppn_set);
500 	vmci_free_queue(entry->produce_q, entry->qp.produce_size);
501 	vmci_free_queue(entry->consume_q, entry->qp.consume_size);
502 	vmci_free_kernel_mem(entry, sizeof(*entry));
503 }
504 
505 /*
506  *------------------------------------------------------------------------------
507  *
508  * vmci_queue_pair_alloc_hypercall --
509  *
510  *     Helper to make a QueuePairAlloc hypercall when the driver is
511  *     supporting a guest device.
512  *
513  * Results:
514  *     Result of the hypercall.
515  *
516  * Side effects:
517  *     Memory is allocated & freed.
518  *
519  *------------------------------------------------------------------------------
520  */
521 static int
522 vmci_queue_pair_alloc_hypercall(const struct qp_guest_endpoint *entry)
523 {
524 	struct vmci_queue_pair_alloc_msg *alloc_msg;
525 	size_t msg_size;
526 	int result;
527 
528 	if (!entry || entry->num_ppns <= 2)
529 		return (VMCI_ERROR_INVALID_ARGS);
530 
531 	ASSERT(!(entry->qp.flags & VMCI_QPFLAG_LOCAL));
532 
533 	msg_size = sizeof(*alloc_msg) + (size_t)entry->num_ppns * sizeof(PPN);
534 	alloc_msg = vmci_alloc_kernel_mem(msg_size, VMCI_MEMORY_NORMAL);
535 	if (!alloc_msg)
536 		return (VMCI_ERROR_NO_MEM);
537 
538 	alloc_msg->hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
539 	    VMCI_QUEUEPAIR_ALLOC);
540 	alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
541 	alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
542 	alloc_msg->handle = entry->qp.handle;
543 	alloc_msg->peer = entry->qp.peer;
544 	alloc_msg->flags = entry->qp.flags;
545 	alloc_msg->produce_size = entry->qp.produce_size;
546 	alloc_msg->consume_size = entry->qp.consume_size;
547 	alloc_msg->num_ppns = entry->num_ppns;
548 	result = vmci_populate_ppn_list((uint8_t *)alloc_msg +
549 	    sizeof(*alloc_msg), &entry->ppn_set);
550 	if (result == VMCI_SUCCESS)
551 		result = vmci_send_datagram((struct vmci_datagram *)alloc_msg);
552 	vmci_free_kernel_mem(alloc_msg, msg_size);
553 
554 	return (result);
555 }
556 
557 /*
558  *------------------------------------------------------------------------------
559  *
560  * vmci_queue_pair_alloc_guest_work --
561  *
562  *     This functions handles the actual allocation of a VMCI queue pair guest
563  *     endpoint. Allocates physical pages for the queue pair. It makes OS
564  *     dependent calls through generic wrappers.
565  *
566  * Results:
567  *     Success or failure.
568  *
569  * Side effects:
570  *     Memory is allocated.
571  *
572  *------------------------------------------------------------------------------
573  */
574 
575 static int
576 vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
577     struct vmci_queue **produce_q, uint64_t produce_size,
578     struct vmci_queue **consume_q, uint64_t consume_size, vmci_id peer,
579     uint32_t flags, vmci_privilege_flags priv_flags)
580 {
581 	struct qp_guest_endpoint *queue_pair_entry = NULL;
582 	void *my_consume_q = NULL;
583 	void *my_produce_q = NULL;
584 	const uint64_t num_consume_pages = CEILING(consume_size, PAGE_SIZE) + 1;
585 	const uint64_t num_produce_pages = CEILING(produce_size, PAGE_SIZE) + 1;
586 	int result;
587 
588 	ASSERT(handle && produce_q && consume_q &&
589 	    (produce_size || consume_size));
590 
591 	if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
592 		return (VMCI_ERROR_NO_ACCESS);
593 
594 	vmci_mutex_acquire(&qp_guest_endpoints.mutex);
595 
596 	if ((atomic_load_int(&qp_guest_endpoints.hibernate) == 1) &&
597 		 !(flags & VMCI_QPFLAG_LOCAL)) {
598 		/*
599 		 * While guest OS is in hibernate state, creating non-local
600 		 * queue pairs is not allowed after the point where the VMCI
601 		 * guest driver converted the existing queue pairs to local
602 		 * ones.
603 		 */
604 
605 		result = VMCI_ERROR_UNAVAILABLE;
606 		goto error;
607 	}
608 
609 	if ((queue_pair_entry =
610 	    (struct qp_guest_endpoint *)queue_pair_list_find_entry(
611 	    &qp_guest_endpoints, *handle)) != NULL) {
612 		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
613 			/* Local attach case. */
614 			if (queue_pair_entry->qp.ref_count > 1) {
615 				VMCI_LOG_DEBUG(LGPFX"Error attempting to "
616 				    "attach more than once.\n");
617 				result = VMCI_ERROR_UNAVAILABLE;
618 				goto error_keep_entry;
619 			}
620 
621 			if (queue_pair_entry->qp.produce_size != consume_size ||
622 			    queue_pair_entry->qp.consume_size != produce_size ||
623 			    queue_pair_entry->qp.flags !=
624 			    (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
625 				VMCI_LOG_DEBUG(LGPFX"Error mismatched "
626 				    "queue pair in local attach.\n");
627 				result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
628 				goto error_keep_entry;
629 			}
630 
631 			/*
632 			 * Do a local attach. We swap the consume and produce
633 			 * queues for the attacher and deliver an attach event.
634 			 */
635 			result = queue_pair_notify_peer_local(true, *handle);
636 			if (result < VMCI_SUCCESS)
637 				goto error_keep_entry;
638 			my_produce_q = queue_pair_entry->consume_q;
639 			my_consume_q = queue_pair_entry->produce_q;
640 			goto out;
641 		}
642 		result = VMCI_ERROR_ALREADY_EXISTS;
643 		goto error_keep_entry;
644 	}
645 
646 	my_produce_q = vmci_alloc_queue(produce_size, flags);
647 	if (!my_produce_q) {
648 		VMCI_LOG_WARNING(LGPFX"Error allocating pages for produce "
649 		    "queue.\n");
650 		result = VMCI_ERROR_NO_MEM;
651 		goto error;
652 	}
653 
654 	my_consume_q = vmci_alloc_queue(consume_size, flags);
655 	if (!my_consume_q) {
656 		VMCI_LOG_WARNING(LGPFX"Error allocating pages for consume "
657 		    "queue.\n");
658 		result = VMCI_ERROR_NO_MEM;
659 		goto error;
660 	}
661 
662 	queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
663 	    produce_size, consume_size, my_produce_q, my_consume_q);
664 	if (!queue_pair_entry) {
665 		VMCI_LOG_WARNING(LGPFX"Error allocating memory in %s.\n",
666 		    __FUNCTION__);
667 		result = VMCI_ERROR_NO_MEM;
668 		goto error;
669 	}
670 
671 	result = vmci_alloc_ppn_set(my_produce_q, num_produce_pages,
672 	    my_consume_q, num_consume_pages, &queue_pair_entry->ppn_set);
673 	if (result < VMCI_SUCCESS) {
674 		VMCI_LOG_WARNING(LGPFX"vmci_alloc_ppn_set failed.\n");
675 		goto error;
676 	}
677 
678 	/*
679 	 * It's only necessary to notify the host if this queue pair will be
680 	 * attached to from another context.
681 	 */
682 	if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
683 		/* Local create case. */
684 		vmci_id context_id = vmci_get_context_id();
685 
686 		/*
687 		 * Enforce similar checks on local queue pairs as we do for
688 		 * regular ones. The handle's context must match the creator
689 		 * or attacher context id (here they are both the current
690 		 * context id) and the attach-only flag cannot exist during
691 		 * create. We also ensure specified peer is this context or
692 		 * an invalid one.
693 		 */
694 		if (queue_pair_entry->qp.handle.context != context_id ||
695 		    (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
696 		    queue_pair_entry->qp.peer != context_id)) {
697 			result = VMCI_ERROR_NO_ACCESS;
698 			goto error;
699 		}
700 
701 		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
702 			result = VMCI_ERROR_NOT_FOUND;
703 			goto error;
704 		}
705 	} else {
706 		result = vmci_queue_pair_alloc_hypercall(queue_pair_entry);
707 		if (result < VMCI_SUCCESS) {
708 			VMCI_LOG_WARNING(
709 			    LGPFX"vmci_queue_pair_alloc_hypercall result = "
710 			    "%d.\n", result);
711 			goto error;
712 		}
713 	}
714 
715 	queue_pair_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
716 
717 out:
718 	queue_pair_entry->qp.ref_count++;
719 	*handle = queue_pair_entry->qp.handle;
720 	*produce_q = (struct vmci_queue *)my_produce_q;
721 	*consume_q = (struct vmci_queue *)my_consume_q;
722 
723 	/*
724 	 * We should initialize the queue pair header pages on a local queue
725 	 * pair create. For non-local queue pairs, the hypervisor initializes
726 	 * the header pages in the create step.
727 	 */
728 	if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
729 	    queue_pair_entry->qp.ref_count == 1) {
730 		vmci_queue_header_init((*produce_q)->q_header, *handle);
731 		vmci_queue_header_init((*consume_q)->q_header, *handle);
732 	}
733 
734 	vmci_mutex_release(&qp_guest_endpoints.mutex);
735 
736 	return (VMCI_SUCCESS);
737 
738 error:
739 	vmci_mutex_release(&qp_guest_endpoints.mutex);
740 	if (queue_pair_entry) {
741 		/* The queues will be freed inside the destroy routine. */
742 		qp_guest_endpoint_destroy(queue_pair_entry);
743 	} else {
744 		if (my_produce_q)
745 			vmci_free_queue(my_produce_q, produce_size);
746 		if (my_consume_q)
747 			vmci_free_queue(my_consume_q, consume_size);
748 	}
749 	return (result);
750 
751 error_keep_entry:
752 	/* This path should only be used when an existing entry was found. */
753 	ASSERT(queue_pair_entry->qp.ref_count > 0);
754 	vmci_mutex_release(&qp_guest_endpoints.mutex);
755 	return (result);
756 }
757 
758 /*
759  *------------------------------------------------------------------------------
760  *
761  * vmci_queue_pair_detach_hypercall --
762  *
763  *     Helper to make a QueuePairDetach hypercall when the driver is supporting
764  *     a guest device.
765  *
766  * Results:
767  *     Result of the hypercall.
768  *
769  * Side effects:
770  *     None.
771  *
772  *------------------------------------------------------------------------------
773  */
774 
775 int
776 vmci_queue_pair_detach_hypercall(struct vmci_handle handle)
777 {
778 	struct vmci_queue_pair_detach_msg detach_msg;
779 
780 	detach_msg.hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
781 	    VMCI_QUEUEPAIR_DETACH);
782 	detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
783 	detach_msg.hdr.payload_size = sizeof(handle);
784 	detach_msg.handle = handle;
785 
786 	return (vmci_send_datagram((struct vmci_datagram *)&detach_msg));
787 }
788 
789 /*
790  *------------------------------------------------------------------------------
791  *
792  * vmci_queue_pair_detach_guest_work --
793  *
794  *     Helper for VMCI QueuePair detach interface. Frees the physical pages for
795  *     the queue pair.
796  *
797  * Results:
798  *     Success or failure.
799  *
800  * Side effects:
801  *     Memory may be freed.
802  *
803  *------------------------------------------------------------------------------
804  */
805 
806 static int
807 vmci_queue_pair_detach_guest_work(struct vmci_handle handle)
808 {
809 	struct qp_guest_endpoint *entry;
810 	int result;
811 	uint32_t ref_count;
812 
813 	ASSERT(!VMCI_HANDLE_INVALID(handle));
814 
815 	vmci_mutex_acquire(&qp_guest_endpoints.mutex);
816 
817 	entry = (struct qp_guest_endpoint *)queue_pair_list_find_entry(
818 	    &qp_guest_endpoints, handle);
819 	if (!entry) {
820 		vmci_mutex_release(&qp_guest_endpoints.mutex);
821 		return (VMCI_ERROR_NOT_FOUND);
822 	}
823 
824 	ASSERT(entry->qp.ref_count >= 1);
825 
826 	if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
827 		result = VMCI_SUCCESS;
828 
829 		if (entry->qp.ref_count > 1) {
830 			result = queue_pair_notify_peer_local(false, handle);
831 
832 			/*
833 			 * We can fail to notify a local queuepair because we
834 			 * can't allocate. We still want to release the entry
835 			 * if that happens, so don't bail out yet.
836 			 */
837 		}
838 	} else {
839 		result = vmci_queue_pair_detach_hypercall(handle);
840 		if (entry->hibernate_failure) {
841 			if (result == VMCI_ERROR_NOT_FOUND) {
842 				/*
843 				 * If a queue pair detach failed when entering
844 				 * hibernation, the guest driver and the device
845 				 * may disagree on its existence when coming
846 				 * out of hibernation. The guest driver will
847 				 * regard it as a non-local queue pair, but
848 				 * the device state is gone, since the device
849 				 * has been powered off. In this case, we
850 				 * treat the queue pair as a local queue pair
851 				 * with no peer.
852 				 */
853 
854 				ASSERT(entry->qp.ref_count == 1);
855 				result = VMCI_SUCCESS;
856 			}
857 		}
858 		if (result < VMCI_SUCCESS) {
859 			/*
860 			 * We failed to notify a non-local queuepair. That other
861 			 * queuepair might still be accessing the shared
862 			 * memory, so don't release the entry yet. It will get
863 			 * cleaned up by vmci_queue_pair_Exit() if necessary
864 			 * (assuming we are going away, otherwise why did this
865 			 * fail?).
866 			 */
867 
868 			vmci_mutex_release(&qp_guest_endpoints.mutex);
869 			return (result);
870 		}
871 	}
872 
873 	/*
874 	 * If we get here then we either failed to notify a local queuepair, or
875 	 * we succeeded in all cases.  Release the entry if required.
876 	 */
877 
878 	entry->qp.ref_count--;
879 	if (entry->qp.ref_count == 0)
880 		queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
881 
882 	/* If we didn't remove the entry, this could change once we unlock. */
883 	ref_count = entry ? entry->qp.ref_count :
884 	    0xffffffff; /*
885 			 * Value does not matter, silence the
886 			 * compiler.
887 			 */
888 
889 	vmci_mutex_release(&qp_guest_endpoints.mutex);
890 
891 	if (ref_count == 0)
892 		qp_guest_endpoint_destroy(entry);
893 	return (result);
894 }
895 
896 /*
897  *------------------------------------------------------------------------------
898  *
899  * queue_pair_notify_peer_local --
900  *
901  *     Dispatches a queue pair event message directly into the local event
902  *     queue.
903  *
904  * Results:
905  *     VMCI_SUCCESS on success, error code otherwise
906  *
907  * Side effects:
908  *     None.
909  *
910  *------------------------------------------------------------------------------
911  */
912 
913 static int
914 queue_pair_notify_peer_local(bool attach, struct vmci_handle handle)
915 {
916 	struct vmci_event_msg *e_msg;
917 	struct vmci_event_payload_qp *e_payload;
918 	/* buf is only 48 bytes. */
919 	vmci_id context_id;
920 	context_id = vmci_get_context_id();
921 	char buf[sizeof(*e_msg) + sizeof(*e_payload)];
922 
923 	e_msg = (struct vmci_event_msg *)buf;
924 	e_payload = vmci_event_msg_payload(e_msg);
925 
926 	e_msg->hdr.dst = VMCI_MAKE_HANDLE(context_id, VMCI_EVENT_HANDLER);
927 	e_msg->hdr.src = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
928 	    VMCI_CONTEXT_RESOURCE_ID);
929 	e_msg->hdr.payload_size = sizeof(*e_msg) + sizeof(*e_payload) -
930 	    sizeof(e_msg->hdr);
931 	e_msg->event_data.event = attach ? VMCI_EVENT_QP_PEER_ATTACH :
932 	    VMCI_EVENT_QP_PEER_DETACH;
933 	e_payload->peer_id = context_id;
934 	e_payload->handle = handle;
935 
936 	return (vmci_event_dispatch((struct vmci_datagram *)e_msg));
937 }
938