xref: /freebsd/sys/dev/vmware/vmci/vmci_defs.h (revision 623848af604116aa33bc8a66d2ac45b4cbaccfd8)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 #ifndef _VMCI_DEFS_H_
8 #define _VMCI_DEFS_H_
9 
10 #include <sys/types.h>
11 #include <machine/atomic.h>
12 
13 #include "vmci_kernel_defs.h"
14 
15 #pragma GCC diagnostic ignored "-Wcast-qual"
16 
17 /* Register offsets. */
18 #define VMCI_STATUS_ADDR		0x00
19 #define VMCI_CONTROL_ADDR		0x04
20 #define VMCI_ICR_ADDR			0x08
21 #define VMCI_IMR_ADDR			0x0c
22 #define VMCI_DATA_OUT_ADDR		0x10
23 #define VMCI_DATA_IN_ADDR		0x14
24 #define VMCI_CAPS_ADDR			0x18
25 #define VMCI_RESULT_LOW_ADDR		0x1c
26 #define VMCI_RESULT_HIGH_ADDR		0x20
27 
28 /* Status register bits. */
29 #define VMCI_STATUS_INT_ON		0x1
30 
31 /* Control register bits. */
32 #define VMCI_CONTROL_RESET		0x1
33 #define VMCI_CONTROL_INT_ENABLE		0x2
34 #define VMCI_CONTROL_INT_DISABLE	0x4
35 
36 /* Capabilities register bits. */
37 #define VMCI_CAPS_HYPERCALL		0x1
38 #define VMCI_CAPS_GUESTCALL		0x2
39 #define VMCI_CAPS_DATAGRAM		0x4
40 #define VMCI_CAPS_NOTIFICATIONS		0x8
41 
42 /* Interrupt Cause register bits. */
43 #define VMCI_ICR_DATAGRAM		0x1
44 #define VMCI_ICR_NOTIFICATION		0x2
45 
46 /* Interrupt Mask register bits. */
47 #define VMCI_IMR_DATAGRAM		0x1
48 #define VMCI_IMR_NOTIFICATION		0x2
49 
50 /* Interrupt type. */
51 typedef enum vmci_intr_type {
52 	VMCI_INTR_TYPE_INTX =	0,
53 	VMCI_INTR_TYPE_MSI =	1,
54 	VMCI_INTR_TYPE_MSIX =	2
55 } vmci_intr_type;
56 
57 /*
58  * Maximum MSI/MSI-X interrupt vectors in the device.
59  */
60 #define VMCI_MAX_INTRS			2
61 
62 /*
63  * Supported interrupt vectors. There is one for each ICR value above,
64  * but here they indicate the position in the vector array/message ID.
65  */
66 #define VMCI_INTR_DATAGRAM		0
67 #define VMCI_INTR_NOTIFICATION		1
68 
69 /*
70  * A single VMCI device has an upper limit of 128 MiB on the amount of
71  * memory that can be used for queue pairs.
72  */
73 #define VMCI_MAX_GUEST_QP_MEMORY	(128 * 1024 * 1024)
74 
75 /*
76  * We have a fixed set of resource IDs available in the VMX.
77  * This allows us to have a very simple implementation since we statically
78  * know how many will create datagram handles. If a new caller arrives and
79  * we have run out of slots we can manually increment the maximum size of
80  * available resource IDs.
81  */
82 
83 typedef uint32_t vmci_resource;
84 
85 /* VMCI reserved hypervisor datagram resource IDs. */
86 #define VMCI_RESOURCES_QUERY		0
87 #define VMCI_GET_CONTEXT_ID		1
88 #define VMCI_SET_NOTIFY_BITMAP		2
89 #define VMCI_DOORBELL_LINK		3
90 #define VMCI_DOORBELL_UNLINK		4
91 #define VMCI_DOORBELL_NOTIFY		5
92 /*
93  * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
94  * obsoleted by the removal of VM to VM communication.
95  */
96 #define VMCI_DATAGRAM_REQUEST_MAP	6
97 #define VMCI_DATAGRAM_REMOVE_MAP	7
98 #define VMCI_EVENT_SUBSCRIBE		8
99 #define VMCI_EVENT_UNSUBSCRIBE		9
100 #define VMCI_QUEUEPAIR_ALLOC		10
101 #define VMCI_QUEUEPAIR_DETACH		11
102 /*
103  * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
104  * WS 7.0/7.1 and ESX 4.1
105  */
106 #define VMCI_HGFS_TRANSPORT		13
107 #define VMCI_UNITY_PBRPC_REGISTER	14
108 /*
109  * This resource is used for VMCI socket control packets sent to the
110  * hypervisor (CID 0) because RID 1 is already reserved.
111  */
112 #define VSOCK_PACKET_HYPERVISOR_RID	15
113 #define VMCI_RESOURCE_MAX		16
114 /*
115  * The core VMCI device functionality only requires the resource IDs of
116  * VMCI_QUEUEPAIR_DETACH and below.
117  */
118 #define VMCI_CORE_DEVICE_RESOURCE_MAX	VMCI_QUEUEPAIR_DETACH
119 
120 /*
121  * VMCI reserved host datagram resource IDs.
122  * vsock control channel has resource id 1.
123  */
124 #define VMCI_DVFILTER_DATA_PATH_DATAGRAM	2
125 
126 /* VMCI Ids. */
127 typedef uint32_t vmci_id;
128 
129 struct vmci_id_range {
130 	int8_t	action;	/* VMCI_FA_X, for use in filters. */
131 	vmci_id	begin;	/* Beginning of range. */
132 	vmci_id	end;	/* End of range. */
133 };
134 
135 struct vmci_handle {
136 	vmci_id	context;
137 	vmci_id	resource;
138 };
139 
140 static inline struct vmci_handle
VMCI_MAKE_HANDLE(vmci_id cid,vmci_id rid)141 VMCI_MAKE_HANDLE(vmci_id cid, vmci_id rid)
142 {
143 	struct vmci_handle h;
144 
145 	h.context = cid;
146 	h.resource = rid;
147 	return (h);
148 }
149 
150 #define VMCI_HANDLE_TO_CONTEXT_ID(_handle)				\
151 	((_handle).context)
152 #define VMCI_HANDLE_TO_RESOURCE_ID(_handle)				\
153 	((_handle).resource)
154 #define VMCI_HANDLE_EQUAL(_h1, _h2)					\
155 	((_h1).context == (_h2).context && (_h1).resource == (_h2).resource)
156 
157 #define VMCI_INVALID_ID			0xFFFFFFFF
158 static const struct vmci_handle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID,
159 	    VMCI_INVALID_ID};
160 
161 #define VMCI_HANDLE_INVALID(_handle)					\
162 	VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE)
163 
164 /*
165  * The below defines can be used to send anonymous requests.
166  * This also indicates that no response is expected.
167  */
168 #define VMCI_ANON_SRC_CONTEXT_ID					\
169 	VMCI_INVALID_ID
170 #define VMCI_ANON_SRC_RESOURCE_ID					\
171 	VMCI_INVALID_ID
172 #define VMCI_ANON_SRC_HANDLE						\
173 	VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID,			\
174 	VMCI_ANON_SRC_RESOURCE_ID)
175 
176 /* The lowest 16 context ids are reserved for internal use. */
177 #define VMCI_RESERVED_CID_LIMIT		16
178 
179 /*
180  * Hypervisor context id, used for calling into hypervisor
181  * supplied services from the VM.
182  */
183 #define VMCI_HYPERVISOR_CONTEXT_ID	0
184 
185 /*
186  * Well-known context id, a logical context that contains a set of
187  * well-known services. This context ID is now obsolete.
188  */
189 #define VMCI_WELL_KNOWN_CONTEXT_ID	1
190 
191 /*
192  * Context ID used by host endpoints.
193  */
194 #define VMCI_HOST_CONTEXT_ID		2
195 #define VMCI_HOST_CONTEXT_INVALID_EVENT	((uintptr_t)~0)
196 
197 #define VMCI_CONTEXT_IS_VM(_cid)					\
198 	(VMCI_INVALID_ID != _cid && _cid > VMCI_HOST_CONTEXT_ID)
199 
200 /*
201  * The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make
202  * handles that refer to a specific context.
203  */
204 #define VMCI_CONTEXT_RESOURCE_ID	0
205 
206 /*
207  *------------------------------------------------------------------------------
208  *
209  * VMCI error codes.
210  *
211  *------------------------------------------------------------------------------
212  */
213 
214 #define VMCI_SUCCESS_QUEUEPAIR_ATTACH		5
215 #define VMCI_SUCCESS_QUEUEPAIR_CREATE		4
216 #define VMCI_SUCCESS_LAST_DETACH		3
217 #define VMCI_SUCCESS_ACCESS_GRANTED		2
218 #define VMCI_SUCCESS_ENTRY_DEAD			1
219 #define VMCI_SUCCESS				0LL
220 #define VMCI_ERROR_INVALID_RESOURCE		(-1)
221 #define VMCI_ERROR_INVALID_ARGS			(-2)
222 #define VMCI_ERROR_NO_MEM			(-3)
223 #define VMCI_ERROR_DATAGRAM_FAILED		(-4)
224 #define VMCI_ERROR_MORE_DATA			(-5)
225 #define VMCI_ERROR_NO_MORE_DATAGRAMS		(-6)
226 #define VMCI_ERROR_NO_ACCESS			(-7)
227 #define VMCI_ERROR_NO_HANDLE			(-8)
228 #define VMCI_ERROR_DUPLICATE_ENTRY		(-9)
229 #define VMCI_ERROR_DST_UNREACHABLE		(-10)
230 #define VMCI_ERROR_PAYLOAD_TOO_LARGE		(-11)
231 #define VMCI_ERROR_INVALID_PRIV			(-12)
232 #define VMCI_ERROR_GENERIC			(-13)
233 #define VMCI_ERROR_PAGE_ALREADY_SHARED		(-14)
234 #define VMCI_ERROR_CANNOT_SHARE_PAGE		(-15)
235 #define VMCI_ERROR_CANNOT_UNSHARE_PAGE		(-16)
236 #define VMCI_ERROR_NO_PROCESS			(-17)
237 #define VMCI_ERROR_NO_DATAGRAM			(-18)
238 #define VMCI_ERROR_NO_RESOURCES			(-19)
239 #define VMCI_ERROR_UNAVAILABLE			(-20)
240 #define VMCI_ERROR_NOT_FOUND			(-21)
241 #define VMCI_ERROR_ALREADY_EXISTS		(-22)
242 #define VMCI_ERROR_NOT_PAGE_ALIGNED		(-23)
243 #define VMCI_ERROR_INVALID_SIZE			(-24)
244 #define VMCI_ERROR_REGION_ALREADY_SHARED	(-25)
245 #define VMCI_ERROR_TIMEOUT			(-26)
246 #define VMCI_ERROR_DATAGRAM_INCOMPLETE		(-27)
247 #define VMCI_ERROR_INCORRECT_IRQL		(-28)
248 #define VMCI_ERROR_EVENT_UNKNOWN		(-29)
249 #define VMCI_ERROR_OBSOLETE			(-30)
250 #define VMCI_ERROR_QUEUEPAIR_MISMATCH		(-31)
251 #define VMCI_ERROR_QUEUEPAIR_NOTSET		(-32)
252 #define VMCI_ERROR_QUEUEPAIR_NOTOWNER		(-33)
253 #define VMCI_ERROR_QUEUEPAIR_NOTATTACHED	(-34)
254 #define VMCI_ERROR_QUEUEPAIR_NOSPACE		(-35)
255 #define VMCI_ERROR_QUEUEPAIR_NODATA		(-36)
256 #define VMCI_ERROR_BUSMEM_INVALIDATION		(-37)
257 #define VMCI_ERROR_MODULE_NOT_LOADED		(-38)
258 #define VMCI_ERROR_DEVICE_NOT_FOUND		(-39)
259 #define VMCI_ERROR_QUEUEPAIR_NOT_READY		(-40)
260 #define VMCI_ERROR_WOULD_BLOCK			(-41)
261 
262 /* VMCI clients should return error code within this range */
263 #define VMCI_ERROR_CLIENT_MIN			(-500)
264 #define VMCI_ERROR_CLIENT_MAX			(-550)
265 
266 /* Internal error codes. */
267 #define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT	(-1000)
268 
269 #define VMCI_PATH_MAX				256
270 
271 /* VMCI reserved events. */
272 typedef uint32_t vmci_event_type;
273 
274 #define VMCI_EVENT_CTX_ID_UPDATE	0	// Only applicable to guest
275 						// endpoints
276 #define VMCI_EVENT_CTX_REMOVED		1	// Applicable to guest and host
277 #define VMCI_EVENT_QP_RESUMED		2	// Only applicable to guest
278 						// endpoints
279 #define VMCI_EVENT_QP_PEER_ATTACH	3	// Applicable to guest, host
280 						// and VMX
281 #define VMCI_EVENT_QP_PEER_DETACH	4	// Applicable to guest, host
282 						// and VMX
283 #define VMCI_EVENT_MEM_ACCESS_ON	5	// Applicable to VMX and vmk. On
284 						// vmk, this event has the
285 						// Context payload type
286 #define VMCI_EVENT_MEM_ACCESS_OFF	6	// Applicable to VMX and vmk.
287 						// Same as above for the payload
288 						// type
289 #define VMCI_EVENT_GUEST_PAUSED		7	// Applicable to vmk. This
290 						// event has the Context
291 						// payload type
292 #define VMCI_EVENT_GUEST_UNPAUSED	8	// Applicable to vmk. Same as
293 						// above for the payload type.
294 #define VMCI_EVENT_MAX			9
295 
296 /*
297  * Of the above events, a few are reserved for use in the VMX, and other
298  * endpoints (guest and host kernel) should not use them. For the rest of the
299  * events, we allow both host and guest endpoints to subscribe to them, to
300  * maintain the same API for host and guest endpoints.
301  */
302 
303 #define VMCI_EVENT_VALID_VMX(_event)					\
304 	(_event == VMCI_EVENT_QP_PEER_ATTACH ||				\
305 	_event == VMCI_EVENT_QP_PEER_DETACH ||				\
306 	_event == VMCI_EVENT_MEM_ACCESS_ON ||				\
307 	_event == VMCI_EVENT_MEM_ACCESS_OFF)
308 
309 #define VMCI_EVENT_VALID(_event)					\
310 	(_event < VMCI_EVENT_MAX &&					\
311 	_event != VMCI_EVENT_MEM_ACCESS_ON &&				\
312 	_event != VMCI_EVENT_MEM_ACCESS_OFF &&				\
313 	_event != VMCI_EVENT_GUEST_PAUSED &&				\
314 	_event != VMCI_EVENT_GUEST_UNPAUSED)
315 
316 /* Reserved guest datagram resource ids. */
317 #define VMCI_EVENT_HANDLER		0
318 
319 /*
320  * VMCI coarse-grained privileges (per context or host process/endpoint. An
321  * entity with the restricted flag is only allowed to interact with the
322  * hypervisor and trusted entities.
323  */
324 typedef uint32_t vmci_privilege_flags;
325 
326 #define VMCI_PRIVILEGE_FLAG_RESTRICTED		0x01
327 #define VMCI_PRIVILEGE_FLAG_TRUSTED		0x02
328 #define VMCI_PRIVILEGE_ALL_FLAGS					\
329 	(VMCI_PRIVILEGE_FLAG_RESTRICTED | VMCI_PRIVILEGE_FLAG_TRUSTED)
330 #define VMCI_NO_PRIVILEGE_FLAGS			0x00
331 #define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS	VMCI_NO_PRIVILEGE_FLAGS
332 #define VMCI_LEAST_PRIVILEGE_FLAGS		VMCI_PRIVILEGE_FLAG_RESTRICTED
333 #define VMCI_MAX_PRIVILEGE_FLAGS		VMCI_PRIVILEGE_FLAG_TRUSTED
334 
335 /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
336 #define VMCI_RESERVED_RESOURCE_ID_MAX		1023
337 
338 #define VMCI_DOMAIN_NAME_MAXLEN			32
339 
340 #define VMCI_LGPFX				"vmci: "
341 
342 /*
343  * struct vmci_queue_header
344  *
345  * A Queue cannot stand by itself as designed. Each Queue's header contains a
346  * pointer into itself (the producer_tail) and into its peer (consumer_head).
347  * The reason for the separation is one of accessibility: Each end-point can
348  * modify two things: where the next location to enqueue is within its produce_q
349  * (producer_tail); and where the next dequeue location is in its consume_q
350  * (consumer_head).
351  *
352  * An end-point cannot modify the pointers of its peer (guest to guest; NOTE
353  * that in the host both queue headers are mapped r/w). But, each end-point
354  * needs read access to both Queue header structures in order to determine how
355  * much space is used (or left) in the Queue. This is because for an end-point
356  * to know how full its produce_q is, it needs to use the consumer_head that
357  * points into the produce_q but -that- consumer_head is in the Queue header
358  * for that end-points consume_q.
359  *
360  * Thoroughly confused?  Sorry.
361  *
362  * producer_tail: the point to enqueue new entrants.  When you approach a line
363  * in a store, for example, you walk up to the tail.
364  *
365  * consumer_head: the point in the queue from which the next element is
366  * dequeued. In other words, who is next in line is he who is at the head of
367  * the line.
368  *
369  * Also, producer_tail points to an empty byte in the Queue, whereas
370  * consumer_head points to a valid byte of data (unless producer_tail ==
371  * consumer_head in which case consumerHead does not point to a valid byte of
372  * data).
373  *
374  * For a queue of buffer 'size' bytes, the tail and head pointers will be in
375  * the range [0, size-1].
376  *
377  * If produce_q_header->producer_tail == consume_q_header->consumer_head then
378  * the produce_q is empty.
379  */
380 struct vmci_queue_header {
381 	/* All fields are 64bit and aligned. */
382 	struct vmci_handle	handle;		/* Identifier. */
383 	volatile uint64_t	producer_tail;	/* Offset in this queue. */
384 	volatile uint64_t	consumer_head;	/* Offset in peer queue. */
385 };
386 
387 /*
388  * If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
389  * size to be less than 4GB, and use 32bit atomic operations on the head and
390  * tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which
391  * is an atomic read-modify-write. This will cause traces to fire when a 32bit
392  * consumer tries to read the producer's tail pointer, for example, because the
393  * consumer has read-only access to the producer's tail pointer.
394  *
395  * We provide the following macros to invoke 32bit or 64bit atomic operations
396  * based on the architecture the code is being compiled on.
397  */
398 
399 #ifdef __x86_64__
400 #define QP_MAX_QUEUE_SIZE_ARCH		CONST64U(0xffffffffffffffff)
401 #define qp_atomic_read_offset(x)	atomic_load_64(x)
402 #define qp_atomic_write_offset(x, y)	atomic_store_64(x, y)
403 #else /* __x86_64__ */
404 	/*
405 	 * Wrappers below are being used because atomic_store_<type> operates
406 	 * on a specific <type>. Likewise for atomic_load_<type>
407 	 */
408 
409 	static inline uint32_t
type_safe_atomic_read_32(void * var)410 	type_safe_atomic_read_32(void *var)
411 	{
412 		return (atomic_load_32((volatile uint32_t *)(var)));
413 	}
414 
415 	static inline void
type_safe_atomic_write_32(void * var,uint32_t val)416 	type_safe_atomic_write_32(void *var, uint32_t val)
417 	{
418 		atomic_store_32((volatile uint32_t *)(var), (uint32_t)(val));
419 	}
420 
421 #define QP_MAX_QUEUE_SIZE_ARCH		CONST64U(0xffffffff)
422 #define qp_atomic_read_offset(x)	type_safe_atomic_read_32((void *)(x))
423 #define qp_atomic_write_offset(x, y)					\
424 	type_safe_atomic_write_32((void *)(x), (uint32_t)(y))
425 #endif /* __x86_64__ */
426 
427 /*
428  *------------------------------------------------------------------------------
429  *
430  * qp_add_pointer --
431  *
432  *     Helper to add a given offset to a head or tail pointer. Wraps the value
433  *     of the pointer around the max size of the queue.
434  *
435  * Results:
436  *     None.
437  *
438  * Side effects:
439  *     None.
440  *
441  *------------------------------------------------------------------------------
442  */
443 
444 static inline void
qp_add_pointer(volatile uint64_t * var,size_t add,uint64_t size)445 qp_add_pointer(volatile uint64_t *var, size_t add, uint64_t size)
446 {
447 	uint64_t new_val = qp_atomic_read_offset(var);
448 
449 	if (new_val >= size - add)
450 		new_val -= size;
451 
452 	new_val += add;
453 	qp_atomic_write_offset(var, new_val);
454 }
455 
456 /*
457  *------------------------------------------------------------------------------
458  *
459  * vmci_queue_header_producer_tail --
460  *
461  *     Helper routine to get the Producer Tail from the supplied queue.
462  *
463  * Results:
464  *     The contents of the queue's producer tail.
465  *
466  * Side effects:
467  *     None.
468  *
469  *------------------------------------------------------------------------------
470  */
471 
472 static inline uint64_t
vmci_queue_header_producer_tail(const struct vmci_queue_header * q_header)473 vmci_queue_header_producer_tail(const struct vmci_queue_header *q_header)
474 {
475 	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
476 	return (qp_atomic_read_offset(&qh->producer_tail));
477 }
478 
479 /*
480  *------------------------------------------------------------------------------
481  *
482  * vmci_queue_header_consumer_head --
483  *
484  *     Helper routine to get the Consumer Head from the supplied queue.
485  *
486  * Results:
487  *     The contents of the queue's consumer tail.
488  *
489  * Side effects:
490  *     None.
491  *
492  *------------------------------------------------------------------------------
493  */
494 
495 static inline uint64_t
vmci_queue_header_consumer_head(const struct vmci_queue_header * q_header)496 vmci_queue_header_consumer_head(const struct vmci_queue_header *q_header)
497 {
498 	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
499 	return (qp_atomic_read_offset(&qh->consumer_head));
500 }
501 
502 /*
503  *------------------------------------------------------------------------------
504  *
505  * vmci_queue_header_add_producer_tail --
506  *
507  *     Helper routine to increment the Producer Tail. Fundamentally,
508  *     qp_add_pointer() is used to manipulate the tail itself.
509  *
510  * Results:
511  *     None.
512  *
513  * Side effects:
514  *     None.
515  *
516  *------------------------------------------------------------------------------
517  */
518 
519 static inline void
vmci_queue_header_add_producer_tail(struct vmci_queue_header * q_header,size_t add,uint64_t queue_size)520 vmci_queue_header_add_producer_tail(struct vmci_queue_header *q_header,
521     size_t add, uint64_t queue_size)
522 {
523 
524 	qp_add_pointer(&q_header->producer_tail, add, queue_size);
525 }
526 
527 /*
528  *------------------------------------------------------------------------------
529  *
530  * vmci_queue_header_add_consumer_head --
531  *
532  *     Helper routine to increment the Consumer Head. Fundamentally,
533  *     qp_add_pointer() is used to manipulate the head itself.
534  *
535  * Results:
536  *     None.
537  *
538  * Side effects:
539  *     None.
540  *
541  *------------------------------------------------------------------------------
542  */
543 
544 static inline void
vmci_queue_header_add_consumer_head(struct vmci_queue_header * q_header,size_t add,uint64_t queue_size)545 vmci_queue_header_add_consumer_head(struct vmci_queue_header *q_header,
546     size_t add, uint64_t queue_size)
547 {
548 
549 	qp_add_pointer(&q_header->consumer_head, add, queue_size);
550 }
551 
552 /*
553  *------------------------------------------------------------------------------
554  *
555  * vmci_queue_header_get_pointers --
556  *
557  *     Helper routine for getting the head and the tail pointer for a queue.
558  *     Both the VMCIQueues are needed to get both the pointers for one queue.
559  *
560  * Results:
561  *     None.
562  *
563  * Side effects:
564  *     None.
565  *
566  *------------------------------------------------------------------------------
567  */
568 
569 static inline void
vmci_queue_header_get_pointers(const struct vmci_queue_header * produce_q_header,const struct vmci_queue_header * consume_q_header,uint64_t * producer_tail,uint64_t * consumer_head)570 vmci_queue_header_get_pointers(const struct vmci_queue_header *produce_q_header,
571     const struct vmci_queue_header *consume_q_header, uint64_t *producer_tail,
572     uint64_t *consumer_head)
573 {
574 
575 	if (producer_tail)
576 		*producer_tail =
577 		    vmci_queue_header_producer_tail(produce_q_header);
578 
579 	if (consumer_head)
580 		*consumer_head =
581 		    vmci_queue_header_consumer_head(consume_q_header);
582 }
583 
584 /*
585  *------------------------------------------------------------------------------
586  *
587  * vmci_queue_header_reset_pointers --
588  *
589  *     Reset the tail pointer (of "this" queue) and the head pointer (of "peer"
590  *     queue).
591  *
592  * Results:
593  *     None.
594  *
595  * Side effects:
596  *     None.
597  *
598  *------------------------------------------------------------------------------
599  */
600 
601 static inline void
vmci_queue_header_reset_pointers(struct vmci_queue_header * q_header)602 vmci_queue_header_reset_pointers(struct vmci_queue_header *q_header)
603 {
604 
605 	qp_atomic_write_offset(&q_header->producer_tail, CONST64U(0));
606 	qp_atomic_write_offset(&q_header->consumer_head, CONST64U(0));
607 }
608 
609 /*
610  *------------------------------------------------------------------------------
611  *
612  * vmci_queue_header_init --
613  *
614  *     Initializes a queue's state (head & tail pointers).
615  *
616  * Results:
617  *     None.
618  *
619  * Side effects:
620  *     None.
621  *
622  *------------------------------------------------------------------------------
623  */
624 
625 static inline void
vmci_queue_header_init(struct vmci_queue_header * q_header,const struct vmci_handle handle)626 vmci_queue_header_init(struct vmci_queue_header *q_header,
627     const struct vmci_handle handle)
628 {
629 
630 	q_header->handle = handle;
631 	vmci_queue_header_reset_pointers(q_header);
632 }
633 
634 /*
635  *------------------------------------------------------------------------------
636  *
637  * vmci_queue_header_free_space --
638  *
639  *     Finds available free space in a produce queue to enqueue more data or
640  *     reports an error if queue pair corruption is detected.
641  *
642  * Results:
643  *     Free space size in bytes or an error code.
644  *
645  * Side effects:
646  *     None.
647  *
648  *------------------------------------------------------------------------------
649  */
650 
651 static inline int64_t
vmci_queue_header_free_space(const struct vmci_queue_header * produce_q_header,const struct vmci_queue_header * consume_q_header,const uint64_t produce_q_size)652 vmci_queue_header_free_space(const struct vmci_queue_header *produce_q_header,
653     const struct vmci_queue_header *consume_q_header,
654     const uint64_t produce_q_size)
655 {
656 	uint64_t free_space;
657 	uint64_t head;
658 	uint64_t tail;
659 
660 	tail = vmci_queue_header_producer_tail(produce_q_header);
661 	head = vmci_queue_header_consumer_head(consume_q_header);
662 
663 	if (tail >= produce_q_size || head >= produce_q_size)
664 		return (VMCI_ERROR_INVALID_SIZE);
665 
666 	/*
667 	 * Deduct 1 to avoid tail becoming equal to head which causes ambiguity.
668 	 * If head and tail are equal it means that the queue is empty.
669 	 */
670 
671 	if (tail >= head)
672 		free_space = produce_q_size - (tail - head) - 1;
673 	else
674 		free_space = head - tail - 1;
675 
676 	return (free_space);
677 }
678 
679 /*
680  *------------------------------------------------------------------------------
681  *
682  * vmci_queue_header_buf_ready --
683  *
684  *     vmci_queue_header_free_space() does all the heavy lifting of determing
685  *     the number of free bytes in a Queue. This routine, then subtracts that
686  *     size from the full size of the Queue so the caller knows how many bytes
687  *     are ready to be dequeued.
688  *
689  * Results:
690  *     On success, available data size in bytes (up to MAX_INT64).
691  *     On failure, appropriate error code.
692  *
693  * Side effects:
694  *     None.
695  *
696  *------------------------------------------------------------------------------
697  */
698 
699 static inline int64_t
vmci_queue_header_buf_ready(const struct vmci_queue_header * consume_q_header,const struct vmci_queue_header * produce_q_header,const uint64_t consume_q_size)700 vmci_queue_header_buf_ready(const struct vmci_queue_header *consume_q_header,
701     const struct vmci_queue_header *produce_q_header,
702     const uint64_t consume_q_size)
703 {
704 	int64_t free_space;
705 
706 	free_space = vmci_queue_header_free_space(consume_q_header,
707 	    produce_q_header, consume_q_size);
708 	if (free_space < VMCI_SUCCESS)
709 		return (free_space);
710 	else
711 		return (consume_q_size - free_space - 1);
712 }
713 
714 #endif /* !_VMCI_DEFS_H_ */
715