163a93856SMark Peek /*-
23eeb7511SMark Peek * Copyright (c) 2018 VMware, Inc.
363a93856SMark Peek *
48c302b2eSMark Peek * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
563a93856SMark Peek */
663a93856SMark Peek
763a93856SMark Peek /* VMCI QueuePair API implementation. */
863a93856SMark Peek
963a93856SMark Peek #include <sys/cdefs.h>
1063a93856SMark Peek #include "vmci.h"
1163a93856SMark Peek #include "vmci_driver.h"
1263a93856SMark Peek #include "vmci_event.h"
1363a93856SMark Peek #include "vmci_kernel_api.h"
1463a93856SMark Peek #include "vmci_kernel_defs.h"
1563a93856SMark Peek #include "vmci_queue_pair.h"
1663a93856SMark Peek
1763a93856SMark Peek #define LGPFX "vmci_queue_pair: "
1863a93856SMark Peek
1963a93856SMark Peek struct queue_pair_entry {
2063a93856SMark Peek vmci_list_item(queue_pair_entry) list_item;
2163a93856SMark Peek struct vmci_handle handle;
2263a93856SMark Peek vmci_id peer;
2363a93856SMark Peek uint32_t flags;
2463a93856SMark Peek uint64_t produce_size;
2563a93856SMark Peek uint64_t consume_size;
2663a93856SMark Peek uint32_t ref_count;
2763a93856SMark Peek };
2863a93856SMark Peek
2963a93856SMark Peek struct qp_guest_endpoint {
3063a93856SMark Peek struct queue_pair_entry qp;
3163a93856SMark Peek uint64_t num_ppns;
3263a93856SMark Peek void *produce_q;
3363a93856SMark Peek void *consume_q;
3463a93856SMark Peek bool hibernate_failure;
3563a93856SMark Peek struct ppn_set ppn_set;
3663a93856SMark Peek };
3763a93856SMark Peek
3863a93856SMark Peek struct queue_pair_list {
3963a93856SMark Peek vmci_list(queue_pair_entry) head;
4063a93856SMark Peek volatile int hibernate;
4163a93856SMark Peek vmci_mutex mutex;
4263a93856SMark Peek };
4363a93856SMark Peek
4463a93856SMark Peek #define QPE_NUM_PAGES(_QPE) \
4563a93856SMark Peek ((uint32_t)(CEILING(_QPE.produce_size, PAGE_SIZE) + \
4663a93856SMark Peek CEILING(_QPE.consume_size, PAGE_SIZE) + 2))
4763a93856SMark Peek
4863a93856SMark Peek static struct queue_pair_list qp_guest_endpoints;
4963a93856SMark Peek
5063a93856SMark Peek static struct queue_pair_entry *queue_pair_list_find_entry(
5163a93856SMark Peek struct queue_pair_list *qp_list, struct vmci_handle handle);
5263a93856SMark Peek static void queue_pair_list_add_entry(struct queue_pair_list *qp_list,
5363a93856SMark Peek struct queue_pair_entry *entry);
5463a93856SMark Peek static void queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
5563a93856SMark Peek struct queue_pair_entry *entry);
5663a93856SMark Peek static struct queue_pair_entry *queue_pair_list_get_head(
5763a93856SMark Peek struct queue_pair_list *qp_list);
5863a93856SMark Peek static int queue_pair_notify_peer_local(bool attach,
5963a93856SMark Peek struct vmci_handle handle);
6063a93856SMark Peek static struct qp_guest_endpoint *qp_guest_endpoint_create(
6163a93856SMark Peek struct vmci_handle handle, vmci_id peer, uint32_t flags,
6263a93856SMark Peek uint64_t produce_size, uint64_t consume_size,
6363a93856SMark Peek void *produce_q, void *consume_q);
6463a93856SMark Peek static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry);
6563a93856SMark Peek static int vmci_queue_pair_alloc_hypercall(
6663a93856SMark Peek const struct qp_guest_endpoint *entry);
6763a93856SMark Peek static int vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
6863a93856SMark Peek struct vmci_queue **produce_q, uint64_t produce_size,
6963a93856SMark Peek struct vmci_queue **consume_q, uint64_t consume_size,
7063a93856SMark Peek vmci_id peer, uint32_t flags,
7163a93856SMark Peek vmci_privilege_flags priv_flags);
7263a93856SMark Peek static int vmci_queue_pair_detach_guest_work(struct vmci_handle handle);
7363a93856SMark Peek static int vmci_queue_pair_detach_hypercall(struct vmci_handle handle);
7463a93856SMark Peek
7563a93856SMark Peek /*
7663a93856SMark Peek *------------------------------------------------------------------------------
7763a93856SMark Peek *
7863a93856SMark Peek * vmci_queue_pair_alloc --
7963a93856SMark Peek *
8063a93856SMark Peek * Allocates a VMCI QueuePair. Only checks validity of input arguments. The
8163a93856SMark Peek * real work is done in the host or guest specific function.
8263a93856SMark Peek *
8363a93856SMark Peek * Results:
8463a93856SMark Peek * VMCI_SUCCESS on success, appropriate error code otherwise.
8563a93856SMark Peek *
8663a93856SMark Peek * Side effects:
8763a93856SMark Peek * None.
8863a93856SMark Peek *
8963a93856SMark Peek *------------------------------------------------------------------------------
9063a93856SMark Peek */
9163a93856SMark Peek
9263a93856SMark Peek int
vmci_queue_pair_alloc(struct vmci_handle * handle,struct vmci_queue ** produce_q,uint64_t produce_size,struct vmci_queue ** consume_q,uint64_t consume_size,vmci_id peer,uint32_t flags,vmci_privilege_flags priv_flags)9363a93856SMark Peek vmci_queue_pair_alloc(struct vmci_handle *handle, struct vmci_queue **produce_q,
9463a93856SMark Peek uint64_t produce_size, struct vmci_queue **consume_q, uint64_t consume_size,
9563a93856SMark Peek vmci_id peer, uint32_t flags, vmci_privilege_flags priv_flags)
9663a93856SMark Peek {
9763a93856SMark Peek
9863a93856SMark Peek if (!handle || !produce_q || !consume_q ||
9963a93856SMark Peek (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
10063a93856SMark Peek return (VMCI_ERROR_INVALID_ARGS);
10163a93856SMark Peek
10263a93856SMark Peek return (vmci_queue_pair_alloc_guest_work(handle, produce_q,
10363a93856SMark Peek produce_size, consume_q, consume_size, peer, flags, priv_flags));
10463a93856SMark Peek }
10563a93856SMark Peek
10663a93856SMark Peek /*
10763a93856SMark Peek *------------------------------------------------------------------------------
10863a93856SMark Peek *
10963a93856SMark Peek * vmci_queue_pair_detach --
11063a93856SMark Peek *
11163a93856SMark Peek * Detaches from a VMCI QueuePair. Only checks validity of input argument.
11263a93856SMark Peek * Real work is done in the host or guest specific function.
11363a93856SMark Peek *
11463a93856SMark Peek * Results:
11563a93856SMark Peek * Success or failure.
11663a93856SMark Peek *
11763a93856SMark Peek * Side effects:
11863a93856SMark Peek * Memory is freed.
11963a93856SMark Peek *
12063a93856SMark Peek *------------------------------------------------------------------------------
12163a93856SMark Peek */
12263a93856SMark Peek
12363a93856SMark Peek int
vmci_queue_pair_detach(struct vmci_handle handle)12463a93856SMark Peek vmci_queue_pair_detach(struct vmci_handle handle)
12563a93856SMark Peek {
12663a93856SMark Peek
12763a93856SMark Peek if (VMCI_HANDLE_INVALID(handle))
12863a93856SMark Peek return (VMCI_ERROR_INVALID_ARGS);
12963a93856SMark Peek
13063a93856SMark Peek return (vmci_queue_pair_detach_guest_work(handle));
13163a93856SMark Peek }
13263a93856SMark Peek
13363a93856SMark Peek /*
13463a93856SMark Peek *------------------------------------------------------------------------------
13563a93856SMark Peek *
13663a93856SMark Peek * queue_pair_list_init --
13763a93856SMark Peek *
13863a93856SMark Peek * Initializes the list of QueuePairs.
13963a93856SMark Peek *
14063a93856SMark Peek * Results:
14163a93856SMark Peek * Success or failure.
14263a93856SMark Peek *
14363a93856SMark Peek * Side effects:
14463a93856SMark Peek * None.
14563a93856SMark Peek *
14663a93856SMark Peek *------------------------------------------------------------------------------
14763a93856SMark Peek */
14863a93856SMark Peek
14963a93856SMark Peek static inline int
queue_pair_list_init(struct queue_pair_list * qp_list)15063a93856SMark Peek queue_pair_list_init(struct queue_pair_list *qp_list)
15163a93856SMark Peek {
15263a93856SMark Peek int ret;
15363a93856SMark Peek
15463a93856SMark Peek vmci_list_init(&qp_list->head);
15563a93856SMark Peek atomic_store_int(&qp_list->hibernate, 0);
15663a93856SMark Peek ret = vmci_mutex_init(&qp_list->mutex, "VMCI QP List lock");
15763a93856SMark Peek return (ret);
15863a93856SMark Peek }
15963a93856SMark Peek
16063a93856SMark Peek /*
16163a93856SMark Peek *------------------------------------------------------------------------------
16263a93856SMark Peek *
16363a93856SMark Peek * queue_pair_list_destroy --
16463a93856SMark Peek *
16563a93856SMark Peek * Destroy the list's mutex.
16663a93856SMark Peek *
16763a93856SMark Peek * Results:
16863a93856SMark Peek * None.
16963a93856SMark Peek *
17063a93856SMark Peek * Side effects:
17163a93856SMark Peek * None.
17263a93856SMark Peek *
17363a93856SMark Peek *------------------------------------------------------------------------------
17463a93856SMark Peek */
17563a93856SMark Peek
17663a93856SMark Peek static inline void
queue_pair_list_destroy(struct queue_pair_list * qp_list)17763a93856SMark Peek queue_pair_list_destroy(struct queue_pair_list *qp_list)
17863a93856SMark Peek {
17963a93856SMark Peek
18063a93856SMark Peek vmci_mutex_destroy(&qp_list->mutex);
18163a93856SMark Peek vmci_list_init(&qp_list->head);
18263a93856SMark Peek }
18363a93856SMark Peek
18463a93856SMark Peek /*
18563a93856SMark Peek *------------------------------------------------------------------------------
18663a93856SMark Peek *
18763a93856SMark Peek * queue_pair_list_find_entry --
18863a93856SMark Peek *
18963a93856SMark Peek * Finds the entry in the list corresponding to a given handle. Assumes that
19063a93856SMark Peek * the list is locked.
19163a93856SMark Peek *
19263a93856SMark Peek * Results:
19363a93856SMark Peek * Pointer to entry.
19463a93856SMark Peek *
19563a93856SMark Peek * Side effects:
19663a93856SMark Peek * None.
19763a93856SMark Peek *
19863a93856SMark Peek *------------------------------------------------------------------------------
19963a93856SMark Peek */
20063a93856SMark Peek
20163a93856SMark Peek static struct queue_pair_entry *
queue_pair_list_find_entry(struct queue_pair_list * qp_list,struct vmci_handle handle)20263a93856SMark Peek queue_pair_list_find_entry(struct queue_pair_list *qp_list,
20363a93856SMark Peek struct vmci_handle handle)
20463a93856SMark Peek {
20563a93856SMark Peek struct queue_pair_entry *next;
20663a93856SMark Peek
20763a93856SMark Peek if (VMCI_HANDLE_INVALID(handle))
20863a93856SMark Peek return (NULL);
20963a93856SMark Peek
21063a93856SMark Peek vmci_list_scan(next, &qp_list->head, list_item) {
21163a93856SMark Peek if (VMCI_HANDLE_EQUAL(next->handle, handle))
21263a93856SMark Peek return (next);
21363a93856SMark Peek }
21463a93856SMark Peek
21563a93856SMark Peek return (NULL);
21663a93856SMark Peek }
21763a93856SMark Peek
21863a93856SMark Peek /*
21963a93856SMark Peek *------------------------------------------------------------------------------
22063a93856SMark Peek *
22163a93856SMark Peek * queue_pair_list_add_entry --
22263a93856SMark Peek *
22363a93856SMark Peek * Adds the given entry to the list. Assumes that the list is locked.
22463a93856SMark Peek *
22563a93856SMark Peek * Results:
22663a93856SMark Peek * None.
22763a93856SMark Peek *
22863a93856SMark Peek * Side effects:
22963a93856SMark Peek * None.
23063a93856SMark Peek *
23163a93856SMark Peek *------------------------------------------------------------------------------
23263a93856SMark Peek */
23363a93856SMark Peek
23463a93856SMark Peek static void
queue_pair_list_add_entry(struct queue_pair_list * qp_list,struct queue_pair_entry * entry)23563a93856SMark Peek queue_pair_list_add_entry(struct queue_pair_list *qp_list,
23663a93856SMark Peek struct queue_pair_entry *entry)
23763a93856SMark Peek {
23863a93856SMark Peek
23963a93856SMark Peek if (entry)
24063a93856SMark Peek vmci_list_insert(&qp_list->head, entry, list_item);
24163a93856SMark Peek }
24263a93856SMark Peek
24363a93856SMark Peek /*
24463a93856SMark Peek *------------------------------------------------------------------------------
24563a93856SMark Peek *
24663a93856SMark Peek * queue_pair_list_remove_entry --
24763a93856SMark Peek *
24863a93856SMark Peek * Removes the given entry from the list. Assumes that the list is locked.
24963a93856SMark Peek *
25063a93856SMark Peek * Results:
25163a93856SMark Peek * None.
25263a93856SMark Peek *
25363a93856SMark Peek * Side effects:
25463a93856SMark Peek * None.
25563a93856SMark Peek *
25663a93856SMark Peek *------------------------------------------------------------------------------
25763a93856SMark Peek */
25863a93856SMark Peek
25963a93856SMark Peek static void
queue_pair_list_remove_entry(struct queue_pair_list * qp_list,struct queue_pair_entry * entry)26063a93856SMark Peek queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
26163a93856SMark Peek struct queue_pair_entry *entry)
26263a93856SMark Peek {
26363a93856SMark Peek
26463a93856SMark Peek if (entry)
26563a93856SMark Peek vmci_list_remove(entry, list_item);
26663a93856SMark Peek }
26763a93856SMark Peek
26863a93856SMark Peek /*
26963a93856SMark Peek *------------------------------------------------------------------------------
27063a93856SMark Peek *
27163a93856SMark Peek * queue_pair_list_get_head --
27263a93856SMark Peek *
27363a93856SMark Peek * Returns the entry from the head of the list. Assumes that the list is
27463a93856SMark Peek * locked.
27563a93856SMark Peek *
27663a93856SMark Peek * Results:
27763a93856SMark Peek * Pointer to entry.
27863a93856SMark Peek *
27963a93856SMark Peek * Side effects:
28063a93856SMark Peek * None.
28163a93856SMark Peek *
28263a93856SMark Peek *------------------------------------------------------------------------------
28363a93856SMark Peek */
28463a93856SMark Peek
28563a93856SMark Peek static struct queue_pair_entry *
queue_pair_list_get_head(struct queue_pair_list * qp_list)28663a93856SMark Peek queue_pair_list_get_head(struct queue_pair_list *qp_list)
28763a93856SMark Peek {
28863a93856SMark Peek
28963a93856SMark Peek return (vmci_list_first(&qp_list->head));
29063a93856SMark Peek }
29163a93856SMark Peek
29263a93856SMark Peek /*
29363a93856SMark Peek *------------------------------------------------------------------------------
29463a93856SMark Peek *
29563a93856SMark Peek * vmci_qp_guest_endpoints_init --
29663a93856SMark Peek *
29763a93856SMark Peek * Initalizes data structure state keeping track of queue pair guest
29863a93856SMark Peek * endpoints.
29963a93856SMark Peek *
30063a93856SMark Peek * Results:
30163a93856SMark Peek * VMCI_SUCCESS on success and appropriate failure code otherwise.
30263a93856SMark Peek *
30363a93856SMark Peek * Side effects:
30463a93856SMark Peek * None.
30563a93856SMark Peek *
30663a93856SMark Peek *------------------------------------------------------------------------------
30763a93856SMark Peek */
30863a93856SMark Peek
30963a93856SMark Peek int
vmci_qp_guest_endpoints_init(void)31063a93856SMark Peek vmci_qp_guest_endpoints_init(void)
31163a93856SMark Peek {
31263a93856SMark Peek
31363a93856SMark Peek return (queue_pair_list_init(&qp_guest_endpoints));
31463a93856SMark Peek }
31563a93856SMark Peek
31663a93856SMark Peek /*
31763a93856SMark Peek *------------------------------------------------------------------------------
31863a93856SMark Peek *
31963a93856SMark Peek * vmci_qp_guest_endpoints_exit --
32063a93856SMark Peek *
32163a93856SMark Peek * Destroys all guest queue pair endpoints. If active guest queue pairs
32263a93856SMark Peek * still exist, hypercalls to attempt detach from these queue pairs will be
32363a93856SMark Peek * made. Any failure to detach is silently ignored.
32463a93856SMark Peek *
32563a93856SMark Peek * Results:
32663a93856SMark Peek * None.
32763a93856SMark Peek *
32863a93856SMark Peek * Side effects:
32963a93856SMark Peek * None.
33063a93856SMark Peek *
33163a93856SMark Peek *------------------------------------------------------------------------------
33263a93856SMark Peek */
33363a93856SMark Peek
33463a93856SMark Peek void
vmci_qp_guest_endpoints_exit(void)33563a93856SMark Peek vmci_qp_guest_endpoints_exit(void)
33663a93856SMark Peek {
33763a93856SMark Peek struct qp_guest_endpoint *entry;
33863a93856SMark Peek
339*0f14bcbeSMark Peek if (!vmci_mutex_initialized(&qp_guest_endpoints.mutex))
340*0f14bcbeSMark Peek return;
341*0f14bcbeSMark Peek
34263a93856SMark Peek vmci_mutex_acquire(&qp_guest_endpoints.mutex);
34363a93856SMark Peek
34463a93856SMark Peek while ((entry =
34563a93856SMark Peek (struct qp_guest_endpoint *)queue_pair_list_get_head(
34663a93856SMark Peek &qp_guest_endpoints)) != NULL) {
34763a93856SMark Peek /*
34863a93856SMark Peek * Don't make a hypercall for local QueuePairs.
34963a93856SMark Peek */
35063a93856SMark Peek if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL))
35163a93856SMark Peek vmci_queue_pair_detach_hypercall(entry->qp.handle);
35263a93856SMark Peek /*
35363a93856SMark Peek * We cannot fail the exit, so let's reset ref_count.
35463a93856SMark Peek */
35563a93856SMark Peek entry->qp.ref_count = 0;
35663a93856SMark Peek queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
35763a93856SMark Peek qp_guest_endpoint_destroy(entry);
35863a93856SMark Peek }
35963a93856SMark Peek
36063a93856SMark Peek atomic_store_int(&qp_guest_endpoints.hibernate, 0);
36163a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
36263a93856SMark Peek queue_pair_list_destroy(&qp_guest_endpoints);
36363a93856SMark Peek }
36463a93856SMark Peek
36563a93856SMark Peek /*
36663a93856SMark Peek *------------------------------------------------------------------------------
36763a93856SMark Peek *
36863a93856SMark Peek * vmci_qp_guest_endpoints_sync --
36963a93856SMark Peek *
37063a93856SMark Peek * Use this as a synchronization point when setting globals, for example,
37163a93856SMark Peek * during device shutdown.
37263a93856SMark Peek *
37363a93856SMark Peek * Results:
37463a93856SMark Peek * true.
37563a93856SMark Peek *
37663a93856SMark Peek * Side effects:
37763a93856SMark Peek * None.
37863a93856SMark Peek *
37963a93856SMark Peek *------------------------------------------------------------------------------
38063a93856SMark Peek */
38163a93856SMark Peek
38263a93856SMark Peek void
vmci_qp_guest_endpoints_sync(void)38363a93856SMark Peek vmci_qp_guest_endpoints_sync(void)
38463a93856SMark Peek {
38563a93856SMark Peek
38663a93856SMark Peek vmci_mutex_acquire(&qp_guest_endpoints.mutex);
38763a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
38863a93856SMark Peek }
38963a93856SMark Peek
39063a93856SMark Peek /*
39163a93856SMark Peek *------------------------------------------------------------------------------
39263a93856SMark Peek *
39363a93856SMark Peek * qp_guest_endpoint_create --
39463a93856SMark Peek *
39563a93856SMark Peek * Allocates and initializes a qp_guest_endpoint structure. Allocates a
39663a93856SMark Peek * QueuePair rid (and handle) iff the given entry has an invalid handle.
39763a93856SMark Peek * 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved handles. Assumes
39863a93856SMark Peek * that the QP list mutex is held by the caller.
39963a93856SMark Peek *
40063a93856SMark Peek * Results:
40163a93856SMark Peek * Pointer to structure intialized.
40263a93856SMark Peek *
40363a93856SMark Peek * Side effects:
40463a93856SMark Peek * None.
40563a93856SMark Peek *
40663a93856SMark Peek *------------------------------------------------------------------------------
40763a93856SMark Peek */
40863a93856SMark Peek
40963a93856SMark Peek struct qp_guest_endpoint *
qp_guest_endpoint_create(struct vmci_handle handle,vmci_id peer,uint32_t flags,uint64_t produce_size,uint64_t consume_size,void * produce_q,void * consume_q)41063a93856SMark Peek qp_guest_endpoint_create(struct vmci_handle handle, vmci_id peer,
41163a93856SMark Peek uint32_t flags, uint64_t produce_size, uint64_t consume_size,
41263a93856SMark Peek void *produce_q, void *consume_q)
41363a93856SMark Peek {
41463a93856SMark Peek struct qp_guest_endpoint *entry;
41563a93856SMark Peek static vmci_id queue_pair_rid;
41663a93856SMark Peek const uint64_t num_ppns = CEILING(produce_size, PAGE_SIZE) +
41763a93856SMark Peek CEILING(consume_size, PAGE_SIZE) +
41863a93856SMark Peek 2; /* One page each for the queue headers. */
41963a93856SMark Peek
42063a93856SMark Peek queue_pair_rid = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
42163a93856SMark Peek
42263a93856SMark Peek ASSERT((produce_size || consume_size) && produce_q && consume_q);
42363a93856SMark Peek
42463a93856SMark Peek if (VMCI_HANDLE_INVALID(handle)) {
42563a93856SMark Peek vmci_id context_id = vmci_get_context_id();
42663a93856SMark Peek vmci_id old_rid = queue_pair_rid;
42763a93856SMark Peek
42863a93856SMark Peek /*
42963a93856SMark Peek * Generate a unique QueuePair rid. Keep on trying until we
43063a93856SMark Peek * wrap around in the RID space.
43163a93856SMark Peek */
43263a93856SMark Peek ASSERT(old_rid > VMCI_RESERVED_RESOURCE_ID_MAX);
43363a93856SMark Peek do {
43463a93856SMark Peek handle = VMCI_MAKE_HANDLE(context_id, queue_pair_rid);
43563a93856SMark Peek entry =
43663a93856SMark Peek (struct qp_guest_endpoint *)
43763a93856SMark Peek queue_pair_list_find_entry(&qp_guest_endpoints,
43863a93856SMark Peek handle);
43963a93856SMark Peek queue_pair_rid++;
44063a93856SMark Peek if (UNLIKELY(!queue_pair_rid)) {
44163a93856SMark Peek /*
44263a93856SMark Peek * Skip the reserved rids.
44363a93856SMark Peek */
44463a93856SMark Peek queue_pair_rid =
44563a93856SMark Peek VMCI_RESERVED_RESOURCE_ID_MAX + 1;
44663a93856SMark Peek }
44763a93856SMark Peek } while (entry && queue_pair_rid != old_rid);
44863a93856SMark Peek
44963a93856SMark Peek if (UNLIKELY(entry != NULL)) {
45063a93856SMark Peek ASSERT(queue_pair_rid == old_rid);
45163a93856SMark Peek /*
45263a93856SMark Peek * We wrapped around --- no rids were free.
45363a93856SMark Peek */
45463a93856SMark Peek return (NULL);
45563a93856SMark Peek }
45663a93856SMark Peek }
45763a93856SMark Peek
45863a93856SMark Peek ASSERT(!VMCI_HANDLE_INVALID(handle) &&
45963a93856SMark Peek queue_pair_list_find_entry(&qp_guest_endpoints, handle) == NULL);
46063a93856SMark Peek entry = vmci_alloc_kernel_mem(sizeof(*entry), VMCI_MEMORY_NORMAL);
46163a93856SMark Peek if (entry) {
46263a93856SMark Peek entry->qp.handle = handle;
46363a93856SMark Peek entry->qp.peer = peer;
46463a93856SMark Peek entry->qp.flags = flags;
46563a93856SMark Peek entry->qp.produce_size = produce_size;
46663a93856SMark Peek entry->qp.consume_size = consume_size;
46763a93856SMark Peek entry->qp.ref_count = 0;
46863a93856SMark Peek entry->num_ppns = num_ppns;
46963a93856SMark Peek memset(&entry->ppn_set, 0, sizeof(entry->ppn_set));
47063a93856SMark Peek entry->produce_q = produce_q;
47163a93856SMark Peek entry->consume_q = consume_q;
47263a93856SMark Peek }
47363a93856SMark Peek return (entry);
47463a93856SMark Peek }
47563a93856SMark Peek
47663a93856SMark Peek /*
47763a93856SMark Peek *------------------------------------------------------------------------------
47863a93856SMark Peek *
47963a93856SMark Peek * qp_guest_endpoint_destroy --
48063a93856SMark Peek *
48163a93856SMark Peek * Frees a qp_guest_endpoint structure.
48263a93856SMark Peek *
48363a93856SMark Peek * Results:
48463a93856SMark Peek * None.
48563a93856SMark Peek *
48663a93856SMark Peek * Side effects:
48763a93856SMark Peek * None.
48863a93856SMark Peek *
48963a93856SMark Peek *------------------------------------------------------------------------------
49063a93856SMark Peek */
49163a93856SMark Peek
49263a93856SMark Peek void
qp_guest_endpoint_destroy(struct qp_guest_endpoint * entry)49363a93856SMark Peek qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
49463a93856SMark Peek {
49563a93856SMark Peek
49663a93856SMark Peek ASSERT(entry);
49763a93856SMark Peek ASSERT(entry->qp.ref_count == 0);
49863a93856SMark Peek
49963a93856SMark Peek vmci_free_ppn_set(&entry->ppn_set);
50063a93856SMark Peek vmci_free_queue(entry->produce_q, entry->qp.produce_size);
50163a93856SMark Peek vmci_free_queue(entry->consume_q, entry->qp.consume_size);
50263a93856SMark Peek vmci_free_kernel_mem(entry, sizeof(*entry));
50363a93856SMark Peek }
50463a93856SMark Peek
50563a93856SMark Peek /*
50663a93856SMark Peek *------------------------------------------------------------------------------
50763a93856SMark Peek *
50863a93856SMark Peek * vmci_queue_pair_alloc_hypercall --
50963a93856SMark Peek *
51063a93856SMark Peek * Helper to make a QueuePairAlloc hypercall when the driver is
51163a93856SMark Peek * supporting a guest device.
51263a93856SMark Peek *
51363a93856SMark Peek * Results:
51463a93856SMark Peek * Result of the hypercall.
51563a93856SMark Peek *
51663a93856SMark Peek * Side effects:
51763a93856SMark Peek * Memory is allocated & freed.
51863a93856SMark Peek *
51963a93856SMark Peek *------------------------------------------------------------------------------
52063a93856SMark Peek */
52163a93856SMark Peek static int
vmci_queue_pair_alloc_hypercall(const struct qp_guest_endpoint * entry)52263a93856SMark Peek vmci_queue_pair_alloc_hypercall(const struct qp_guest_endpoint *entry)
52363a93856SMark Peek {
52463a93856SMark Peek struct vmci_queue_pair_alloc_msg *alloc_msg;
52563a93856SMark Peek size_t msg_size;
52663a93856SMark Peek int result;
52763a93856SMark Peek
52863a93856SMark Peek if (!entry || entry->num_ppns <= 2)
52963a93856SMark Peek return (VMCI_ERROR_INVALID_ARGS);
53063a93856SMark Peek
53163a93856SMark Peek ASSERT(!(entry->qp.flags & VMCI_QPFLAG_LOCAL));
53263a93856SMark Peek
53363a93856SMark Peek msg_size = sizeof(*alloc_msg) + (size_t)entry->num_ppns * sizeof(PPN);
53463a93856SMark Peek alloc_msg = vmci_alloc_kernel_mem(msg_size, VMCI_MEMORY_NORMAL);
53563a93856SMark Peek if (!alloc_msg)
53663a93856SMark Peek return (VMCI_ERROR_NO_MEM);
53763a93856SMark Peek
53863a93856SMark Peek alloc_msg->hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
53963a93856SMark Peek VMCI_QUEUEPAIR_ALLOC);
54063a93856SMark Peek alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
54163a93856SMark Peek alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
54263a93856SMark Peek alloc_msg->handle = entry->qp.handle;
54363a93856SMark Peek alloc_msg->peer = entry->qp.peer;
54463a93856SMark Peek alloc_msg->flags = entry->qp.flags;
54563a93856SMark Peek alloc_msg->produce_size = entry->qp.produce_size;
54663a93856SMark Peek alloc_msg->consume_size = entry->qp.consume_size;
54763a93856SMark Peek alloc_msg->num_ppns = entry->num_ppns;
54863a93856SMark Peek result = vmci_populate_ppn_list((uint8_t *)alloc_msg +
54963a93856SMark Peek sizeof(*alloc_msg), &entry->ppn_set);
55063a93856SMark Peek if (result == VMCI_SUCCESS)
55163a93856SMark Peek result = vmci_send_datagram((struct vmci_datagram *)alloc_msg);
55263a93856SMark Peek vmci_free_kernel_mem(alloc_msg, msg_size);
55363a93856SMark Peek
55463a93856SMark Peek return (result);
55563a93856SMark Peek }
55663a93856SMark Peek
55763a93856SMark Peek /*
55863a93856SMark Peek *------------------------------------------------------------------------------
55963a93856SMark Peek *
56063a93856SMark Peek * vmci_queue_pair_alloc_guest_work --
56163a93856SMark Peek *
56263a93856SMark Peek * This functions handles the actual allocation of a VMCI queue pair guest
56363a93856SMark Peek * endpoint. Allocates physical pages for the queue pair. It makes OS
56463a93856SMark Peek * dependent calls through generic wrappers.
56563a93856SMark Peek *
56663a93856SMark Peek * Results:
56763a93856SMark Peek * Success or failure.
56863a93856SMark Peek *
56963a93856SMark Peek * Side effects:
57063a93856SMark Peek * Memory is allocated.
57163a93856SMark Peek *
57263a93856SMark Peek *------------------------------------------------------------------------------
57363a93856SMark Peek */
57463a93856SMark Peek
57563a93856SMark Peek static int
vmci_queue_pair_alloc_guest_work(struct vmci_handle * handle,struct vmci_queue ** produce_q,uint64_t produce_size,struct vmci_queue ** consume_q,uint64_t consume_size,vmci_id peer,uint32_t flags,vmci_privilege_flags priv_flags)57663a93856SMark Peek vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
57763a93856SMark Peek struct vmci_queue **produce_q, uint64_t produce_size,
57863a93856SMark Peek struct vmci_queue **consume_q, uint64_t consume_size, vmci_id peer,
57963a93856SMark Peek uint32_t flags, vmci_privilege_flags priv_flags)
58063a93856SMark Peek {
58163a93856SMark Peek struct qp_guest_endpoint *queue_pair_entry = NULL;
58263a93856SMark Peek void *my_consume_q = NULL;
58363a93856SMark Peek void *my_produce_q = NULL;
58463a93856SMark Peek const uint64_t num_consume_pages = CEILING(consume_size, PAGE_SIZE) + 1;
58563a93856SMark Peek const uint64_t num_produce_pages = CEILING(produce_size, PAGE_SIZE) + 1;
58663a93856SMark Peek int result;
58763a93856SMark Peek
58863a93856SMark Peek ASSERT(handle && produce_q && consume_q &&
58963a93856SMark Peek (produce_size || consume_size));
59063a93856SMark Peek
59163a93856SMark Peek if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
59263a93856SMark Peek return (VMCI_ERROR_NO_ACCESS);
59363a93856SMark Peek
59463a93856SMark Peek vmci_mutex_acquire(&qp_guest_endpoints.mutex);
59563a93856SMark Peek
59663a93856SMark Peek if ((atomic_load_int(&qp_guest_endpoints.hibernate) == 1) &&
59763a93856SMark Peek !(flags & VMCI_QPFLAG_LOCAL)) {
59863a93856SMark Peek /*
59963a93856SMark Peek * While guest OS is in hibernate state, creating non-local
60063a93856SMark Peek * queue pairs is not allowed after the point where the VMCI
60163a93856SMark Peek * guest driver converted the existing queue pairs to local
60263a93856SMark Peek * ones.
60363a93856SMark Peek */
60463a93856SMark Peek
60563a93856SMark Peek result = VMCI_ERROR_UNAVAILABLE;
60663a93856SMark Peek goto error;
60763a93856SMark Peek }
60863a93856SMark Peek
60963a93856SMark Peek if ((queue_pair_entry =
61063a93856SMark Peek (struct qp_guest_endpoint *)queue_pair_list_find_entry(
61163a93856SMark Peek &qp_guest_endpoints, *handle)) != NULL) {
61263a93856SMark Peek if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
61363a93856SMark Peek /* Local attach case. */
61463a93856SMark Peek if (queue_pair_entry->qp.ref_count > 1) {
61563a93856SMark Peek VMCI_LOG_DEBUG(LGPFX"Error attempting to "
61663a93856SMark Peek "attach more than once.\n");
61763a93856SMark Peek result = VMCI_ERROR_UNAVAILABLE;
61863a93856SMark Peek goto error_keep_entry;
61963a93856SMark Peek }
62063a93856SMark Peek
62163a93856SMark Peek if (queue_pair_entry->qp.produce_size != consume_size ||
62263a93856SMark Peek queue_pair_entry->qp.consume_size != produce_size ||
62363a93856SMark Peek queue_pair_entry->qp.flags !=
62463a93856SMark Peek (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
62563a93856SMark Peek VMCI_LOG_DEBUG(LGPFX"Error mismatched "
62663a93856SMark Peek "queue pair in local attach.\n");
62763a93856SMark Peek result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
62863a93856SMark Peek goto error_keep_entry;
62963a93856SMark Peek }
63063a93856SMark Peek
63163a93856SMark Peek /*
63263a93856SMark Peek * Do a local attach. We swap the consume and produce
63363a93856SMark Peek * queues for the attacher and deliver an attach event.
63463a93856SMark Peek */
63563a93856SMark Peek result = queue_pair_notify_peer_local(true, *handle);
63663a93856SMark Peek if (result < VMCI_SUCCESS)
63763a93856SMark Peek goto error_keep_entry;
63863a93856SMark Peek my_produce_q = queue_pair_entry->consume_q;
63963a93856SMark Peek my_consume_q = queue_pair_entry->produce_q;
64063a93856SMark Peek goto out;
64163a93856SMark Peek }
64263a93856SMark Peek result = VMCI_ERROR_ALREADY_EXISTS;
64363a93856SMark Peek goto error_keep_entry;
64463a93856SMark Peek }
64563a93856SMark Peek
64663a93856SMark Peek my_produce_q = vmci_alloc_queue(produce_size, flags);
64763a93856SMark Peek if (!my_produce_q) {
64863a93856SMark Peek VMCI_LOG_WARNING(LGPFX"Error allocating pages for produce "
64963a93856SMark Peek "queue.\n");
65063a93856SMark Peek result = VMCI_ERROR_NO_MEM;
65163a93856SMark Peek goto error;
65263a93856SMark Peek }
65363a93856SMark Peek
65463a93856SMark Peek my_consume_q = vmci_alloc_queue(consume_size, flags);
65563a93856SMark Peek if (!my_consume_q) {
65663a93856SMark Peek VMCI_LOG_WARNING(LGPFX"Error allocating pages for consume "
65763a93856SMark Peek "queue.\n");
65863a93856SMark Peek result = VMCI_ERROR_NO_MEM;
65963a93856SMark Peek goto error;
66063a93856SMark Peek }
66163a93856SMark Peek
66263a93856SMark Peek queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
66363a93856SMark Peek produce_size, consume_size, my_produce_q, my_consume_q);
66463a93856SMark Peek if (!queue_pair_entry) {
66563a93856SMark Peek VMCI_LOG_WARNING(LGPFX"Error allocating memory in %s.\n",
66663a93856SMark Peek __FUNCTION__);
66763a93856SMark Peek result = VMCI_ERROR_NO_MEM;
66863a93856SMark Peek goto error;
66963a93856SMark Peek }
67063a93856SMark Peek
67163a93856SMark Peek result = vmci_alloc_ppn_set(my_produce_q, num_produce_pages,
67263a93856SMark Peek my_consume_q, num_consume_pages, &queue_pair_entry->ppn_set);
67363a93856SMark Peek if (result < VMCI_SUCCESS) {
67463a93856SMark Peek VMCI_LOG_WARNING(LGPFX"vmci_alloc_ppn_set failed.\n");
67563a93856SMark Peek goto error;
67663a93856SMark Peek }
67763a93856SMark Peek
67863a93856SMark Peek /*
67963a93856SMark Peek * It's only necessary to notify the host if this queue pair will be
68063a93856SMark Peek * attached to from another context.
68163a93856SMark Peek */
68263a93856SMark Peek if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
68363a93856SMark Peek /* Local create case. */
68463a93856SMark Peek vmci_id context_id = vmci_get_context_id();
68563a93856SMark Peek
68663a93856SMark Peek /*
68763a93856SMark Peek * Enforce similar checks on local queue pairs as we do for
68863a93856SMark Peek * regular ones. The handle's context must match the creator
68963a93856SMark Peek * or attacher context id (here they are both the current
69063a93856SMark Peek * context id) and the attach-only flag cannot exist during
69163a93856SMark Peek * create. We also ensure specified peer is this context or
69263a93856SMark Peek * an invalid one.
69363a93856SMark Peek */
69463a93856SMark Peek if (queue_pair_entry->qp.handle.context != context_id ||
69563a93856SMark Peek (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
69663a93856SMark Peek queue_pair_entry->qp.peer != context_id)) {
69763a93856SMark Peek result = VMCI_ERROR_NO_ACCESS;
69863a93856SMark Peek goto error;
69963a93856SMark Peek }
70063a93856SMark Peek
70163a93856SMark Peek if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
70263a93856SMark Peek result = VMCI_ERROR_NOT_FOUND;
70363a93856SMark Peek goto error;
70463a93856SMark Peek }
70563a93856SMark Peek } else {
70663a93856SMark Peek result = vmci_queue_pair_alloc_hypercall(queue_pair_entry);
70763a93856SMark Peek if (result < VMCI_SUCCESS) {
70863a93856SMark Peek VMCI_LOG_WARNING(
70963a93856SMark Peek LGPFX"vmci_queue_pair_alloc_hypercall result = "
71063a93856SMark Peek "%d.\n", result);
71163a93856SMark Peek goto error;
71263a93856SMark Peek }
71363a93856SMark Peek }
71463a93856SMark Peek
71563a93856SMark Peek queue_pair_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
71663a93856SMark Peek
71763a93856SMark Peek out:
71863a93856SMark Peek queue_pair_entry->qp.ref_count++;
71963a93856SMark Peek *handle = queue_pair_entry->qp.handle;
72063a93856SMark Peek *produce_q = (struct vmci_queue *)my_produce_q;
72163a93856SMark Peek *consume_q = (struct vmci_queue *)my_consume_q;
72263a93856SMark Peek
72363a93856SMark Peek /*
72463a93856SMark Peek * We should initialize the queue pair header pages on a local queue
72563a93856SMark Peek * pair create. For non-local queue pairs, the hypervisor initializes
72663a93856SMark Peek * the header pages in the create step.
72763a93856SMark Peek */
72863a93856SMark Peek if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
72963a93856SMark Peek queue_pair_entry->qp.ref_count == 1) {
73063a93856SMark Peek vmci_queue_header_init((*produce_q)->q_header, *handle);
73163a93856SMark Peek vmci_queue_header_init((*consume_q)->q_header, *handle);
73263a93856SMark Peek }
73363a93856SMark Peek
73463a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
73563a93856SMark Peek
73663a93856SMark Peek return (VMCI_SUCCESS);
73763a93856SMark Peek
73863a93856SMark Peek error:
73963a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
74063a93856SMark Peek if (queue_pair_entry) {
74163a93856SMark Peek /* The queues will be freed inside the destroy routine. */
74263a93856SMark Peek qp_guest_endpoint_destroy(queue_pair_entry);
74363a93856SMark Peek } else {
74463a93856SMark Peek if (my_produce_q)
74563a93856SMark Peek vmci_free_queue(my_produce_q, produce_size);
74663a93856SMark Peek if (my_consume_q)
74763a93856SMark Peek vmci_free_queue(my_consume_q, consume_size);
74863a93856SMark Peek }
74963a93856SMark Peek return (result);
75063a93856SMark Peek
75163a93856SMark Peek error_keep_entry:
75263a93856SMark Peek /* This path should only be used when an existing entry was found. */
75363a93856SMark Peek ASSERT(queue_pair_entry->qp.ref_count > 0);
75463a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
75563a93856SMark Peek return (result);
75663a93856SMark Peek }
75763a93856SMark Peek
75863a93856SMark Peek /*
75963a93856SMark Peek *------------------------------------------------------------------------------
76063a93856SMark Peek *
76163a93856SMark Peek * vmci_queue_pair_detach_hypercall --
76263a93856SMark Peek *
76363a93856SMark Peek * Helper to make a QueuePairDetach hypercall when the driver is supporting
76463a93856SMark Peek * a guest device.
76563a93856SMark Peek *
76663a93856SMark Peek * Results:
76763a93856SMark Peek * Result of the hypercall.
76863a93856SMark Peek *
76963a93856SMark Peek * Side effects:
77063a93856SMark Peek * None.
77163a93856SMark Peek *
77263a93856SMark Peek *------------------------------------------------------------------------------
77363a93856SMark Peek */
77463a93856SMark Peek
77563a93856SMark Peek int
vmci_queue_pair_detach_hypercall(struct vmci_handle handle)77663a93856SMark Peek vmci_queue_pair_detach_hypercall(struct vmci_handle handle)
77763a93856SMark Peek {
77863a93856SMark Peek struct vmci_queue_pair_detach_msg detach_msg;
77963a93856SMark Peek
78063a93856SMark Peek detach_msg.hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
78163a93856SMark Peek VMCI_QUEUEPAIR_DETACH);
78263a93856SMark Peek detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
78363a93856SMark Peek detach_msg.hdr.payload_size = sizeof(handle);
78463a93856SMark Peek detach_msg.handle = handle;
78563a93856SMark Peek
78663a93856SMark Peek return (vmci_send_datagram((struct vmci_datagram *)&detach_msg));
78763a93856SMark Peek }
78863a93856SMark Peek
78963a93856SMark Peek /*
79063a93856SMark Peek *------------------------------------------------------------------------------
79163a93856SMark Peek *
79263a93856SMark Peek * vmci_queue_pair_detach_guest_work --
79363a93856SMark Peek *
79463a93856SMark Peek * Helper for VMCI QueuePair detach interface. Frees the physical pages for
79563a93856SMark Peek * the queue pair.
79663a93856SMark Peek *
79763a93856SMark Peek * Results:
79863a93856SMark Peek * Success or failure.
79963a93856SMark Peek *
80063a93856SMark Peek * Side effects:
80163a93856SMark Peek * Memory may be freed.
80263a93856SMark Peek *
80363a93856SMark Peek *------------------------------------------------------------------------------
80463a93856SMark Peek */
80563a93856SMark Peek
80663a93856SMark Peek static int
vmci_queue_pair_detach_guest_work(struct vmci_handle handle)80763a93856SMark Peek vmci_queue_pair_detach_guest_work(struct vmci_handle handle)
80863a93856SMark Peek {
80963a93856SMark Peek struct qp_guest_endpoint *entry;
81063a93856SMark Peek int result;
81163a93856SMark Peek uint32_t ref_count;
81263a93856SMark Peek
81363a93856SMark Peek ASSERT(!VMCI_HANDLE_INVALID(handle));
81463a93856SMark Peek
81563a93856SMark Peek vmci_mutex_acquire(&qp_guest_endpoints.mutex);
81663a93856SMark Peek
81763a93856SMark Peek entry = (struct qp_guest_endpoint *)queue_pair_list_find_entry(
81863a93856SMark Peek &qp_guest_endpoints, handle);
81963a93856SMark Peek if (!entry) {
82063a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
82163a93856SMark Peek return (VMCI_ERROR_NOT_FOUND);
82263a93856SMark Peek }
82363a93856SMark Peek
82463a93856SMark Peek ASSERT(entry->qp.ref_count >= 1);
82563a93856SMark Peek
82663a93856SMark Peek if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
82763a93856SMark Peek result = VMCI_SUCCESS;
82863a93856SMark Peek
82963a93856SMark Peek if (entry->qp.ref_count > 1) {
83063a93856SMark Peek result = queue_pair_notify_peer_local(false, handle);
83163a93856SMark Peek
83263a93856SMark Peek /*
83363a93856SMark Peek * We can fail to notify a local queuepair because we
83463a93856SMark Peek * can't allocate. We still want to release the entry
83563a93856SMark Peek * if that happens, so don't bail out yet.
83663a93856SMark Peek */
83763a93856SMark Peek }
83863a93856SMark Peek } else {
83963a93856SMark Peek result = vmci_queue_pair_detach_hypercall(handle);
84063a93856SMark Peek if (entry->hibernate_failure) {
84163a93856SMark Peek if (result == VMCI_ERROR_NOT_FOUND) {
84263a93856SMark Peek /*
84363a93856SMark Peek * If a queue pair detach failed when entering
84463a93856SMark Peek * hibernation, the guest driver and the device
84563a93856SMark Peek * may disagree on its existence when coming
84663a93856SMark Peek * out of hibernation. The guest driver will
84763a93856SMark Peek * regard it as a non-local queue pair, but
84863a93856SMark Peek * the device state is gone, since the device
84963a93856SMark Peek * has been powered off. In this case, we
85063a93856SMark Peek * treat the queue pair as a local queue pair
85163a93856SMark Peek * with no peer.
85263a93856SMark Peek */
85363a93856SMark Peek
85463a93856SMark Peek ASSERT(entry->qp.ref_count == 1);
85563a93856SMark Peek result = VMCI_SUCCESS;
85663a93856SMark Peek }
85763a93856SMark Peek }
85863a93856SMark Peek if (result < VMCI_SUCCESS) {
85963a93856SMark Peek /*
86063a93856SMark Peek * We failed to notify a non-local queuepair. That other
86163a93856SMark Peek * queuepair might still be accessing the shared
86263a93856SMark Peek * memory, so don't release the entry yet. It will get
86363a93856SMark Peek * cleaned up by vmci_queue_pair_Exit() if necessary
86463a93856SMark Peek * (assuming we are going away, otherwise why did this
86563a93856SMark Peek * fail?).
86663a93856SMark Peek */
86763a93856SMark Peek
86863a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
86963a93856SMark Peek return (result);
87063a93856SMark Peek }
87163a93856SMark Peek }
87263a93856SMark Peek
87363a93856SMark Peek /*
87463a93856SMark Peek * If we get here then we either failed to notify a local queuepair, or
87563a93856SMark Peek * we succeeded in all cases. Release the entry if required.
87663a93856SMark Peek */
87763a93856SMark Peek
87863a93856SMark Peek entry->qp.ref_count--;
87963a93856SMark Peek if (entry->qp.ref_count == 0)
88063a93856SMark Peek queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
88163a93856SMark Peek
88263a93856SMark Peek /* If we didn't remove the entry, this could change once we unlock. */
88363a93856SMark Peek ref_count = entry ? entry->qp.ref_count :
88463a93856SMark Peek 0xffffffff; /*
88563a93856SMark Peek * Value does not matter, silence the
88663a93856SMark Peek * compiler.
88763a93856SMark Peek */
88863a93856SMark Peek
88963a93856SMark Peek vmci_mutex_release(&qp_guest_endpoints.mutex);
89063a93856SMark Peek
89163a93856SMark Peek if (ref_count == 0)
89263a93856SMark Peek qp_guest_endpoint_destroy(entry);
89363a93856SMark Peek return (result);
89463a93856SMark Peek }
89563a93856SMark Peek
89663a93856SMark Peek /*
89763a93856SMark Peek *------------------------------------------------------------------------------
89863a93856SMark Peek *
89963a93856SMark Peek * queue_pair_notify_peer_local --
90063a93856SMark Peek *
90163a93856SMark Peek * Dispatches a queue pair event message directly into the local event
90263a93856SMark Peek * queue.
90363a93856SMark Peek *
90463a93856SMark Peek * Results:
90563a93856SMark Peek * VMCI_SUCCESS on success, error code otherwise
90663a93856SMark Peek *
90763a93856SMark Peek * Side effects:
90863a93856SMark Peek * None.
90963a93856SMark Peek *
91063a93856SMark Peek *------------------------------------------------------------------------------
91163a93856SMark Peek */
91263a93856SMark Peek
91363a93856SMark Peek static int
queue_pair_notify_peer_local(bool attach,struct vmci_handle handle)91463a93856SMark Peek queue_pair_notify_peer_local(bool attach, struct vmci_handle handle)
91563a93856SMark Peek {
91663a93856SMark Peek struct vmci_event_msg *e_msg;
91763a93856SMark Peek struct vmci_event_payload_qp *e_payload;
91863a93856SMark Peek /* buf is only 48 bytes. */
91963a93856SMark Peek vmci_id context_id;
92063a93856SMark Peek context_id = vmci_get_context_id();
92163a93856SMark Peek char buf[sizeof(*e_msg) + sizeof(*e_payload)];
92263a93856SMark Peek
92363a93856SMark Peek e_msg = (struct vmci_event_msg *)buf;
92463a93856SMark Peek e_payload = vmci_event_msg_payload(e_msg);
92563a93856SMark Peek
92663a93856SMark Peek e_msg->hdr.dst = VMCI_MAKE_HANDLE(context_id, VMCI_EVENT_HANDLER);
92763a93856SMark Peek e_msg->hdr.src = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
92863a93856SMark Peek VMCI_CONTEXT_RESOURCE_ID);
92963a93856SMark Peek e_msg->hdr.payload_size = sizeof(*e_msg) + sizeof(*e_payload) -
93063a93856SMark Peek sizeof(e_msg->hdr);
93163a93856SMark Peek e_msg->event_data.event = attach ? VMCI_EVENT_QP_PEER_ATTACH :
93263a93856SMark Peek VMCI_EVENT_QP_PEER_DETACH;
93363a93856SMark Peek e_payload->peer_id = context_id;
93463a93856SMark Peek e_payload->handle = handle;
93563a93856SMark Peek
93663a93856SMark Peek return (vmci_event_dispatch((struct vmci_datagram *)e_msg));
93763a93856SMark Peek }
938