xref: /illumos-gate/usr/src/uts/common/io/virtio/virtio_impl.h (revision b8052df9f609edb713f6828c9eecc3d7be19dfb3)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2019 Joyent, Inc.
14  * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
15  */
16 
17 #ifndef _VIRTIO_IMPL_H
18 #define	_VIRTIO_IMPL_H
19 
20 /*
21  * VIRTIO FRAMEWORK: FRAMEWORK-PRIVATE DEFINITIONS
22  *
23  * For design and usage documentation, see the comments in "virtio.h".
24  *
25  * NOTE: Client drivers should not use definitions from this file.
26  */
27 
28 #include <sys/types.h>
29 #include <sys/dditypes.h>
30 #include <sys/list.h>
31 #include <sys/ccompile.h>
32 
33 #include "virtio.h"
34 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38 
39 extern ddi_device_acc_attr_t virtio_acc_attr;
40 extern ddi_dma_attr_t virtio_dma_attr;
41 
42 typedef struct virtio_vq_desc virtio_vq_desc_t;
43 typedef struct virtio_vq_driver virtio_vq_driver_t;
44 typedef struct virtio_vq_device virtio_vq_device_t;
45 typedef struct virtio_vq_elem virtio_vq_elem_t;
46 
47 int virtio_dma_init(virtio_t *, virtio_dma_t *, size_t, const ddi_dma_attr_t *,
48     int, int);
49 void virtio_dma_fini(virtio_dma_t *);
50 
51 
52 
53 typedef enum virtio_dma_level {
54 	VIRTIO_DMALEVEL_HANDLE_ALLOC =	(1ULL << 0),
55 	VIRTIO_DMALEVEL_MEMORY_ALLOC =	(1ULL << 1),
56 	VIRTIO_DMALEVEL_HANDLE_BOUND =	(1ULL << 2),
57 	VIRTIO_DMALEVEL_COOKIE_ARRAY =	(1ULL << 3),
58 } virtio_dma_level_t;
59 
60 struct virtio_dma {
61 	virtio_dma_level_t		vidma_level;
62 	virtio_t			*vidma_virtio;
63 	caddr_t				vidma_va;
64 	size_t				vidma_size;
65 	size_t				vidma_real_size;
66 	ddi_dma_handle_t		vidma_dma_handle;
67 	ddi_acc_handle_t		vidma_acc_handle;
68 	uint_t				vidma_dma_ncookies;
69 	ddi_dma_cookie_t		*vidma_dma_cookies;
70 };
71 
72 typedef enum virtio_initlevel {
73 	VIRTIO_INITLEVEL_REGS =		(1ULL << 0),
74 	VIRTIO_INITLEVEL_PROVIDER =	(1ULL << 1),
75 	VIRTIO_INITLEVEL_INT_ALLOC =	(1ULL << 2),
76 	VIRTIO_INITLEVEL_INT_ADDED =	(1ULL << 3),
77 	VIRTIO_INITLEVEL_INT_ENABLED =	(1ULL << 4),
78 	VIRTIO_INITLEVEL_SHUTDOWN =	(1ULL << 5),
79 } virtio_initlevel_t;
80 
81 struct virtio {
82 	dev_info_t			*vio_dip;
83 
84 	kmutex_t			vio_mutex;
85 
86 	virtio_initlevel_t		vio_initlevel;
87 
88 	list_t				vio_queues;
89 
90 	ddi_acc_handle_t		vio_barh;
91 	caddr_t				vio_bar;
92 	uint_t				vio_config_offset;
93 
94 	uint32_t			vio_features;
95 	uint32_t			vio_features_device;
96 
97 	ddi_intr_handle_t		*vio_interrupts;
98 	int				vio_ninterrupts;
99 	int				vio_interrupt_type;
100 	int				vio_interrupt_cap;
101 	uint_t				vio_interrupt_priority;
102 
103 	ddi_intr_handler_t		*vio_cfgchange_handler;
104 	void				*vio_cfgchange_handlerarg;
105 	boolean_t			vio_cfgchange_handler_added;
106 	uint_t				vio_cfgchange_handler_index;
107 };
108 
109 struct virtio_queue {
110 	virtio_t			*viq_virtio;
111 	kmutex_t			viq_mutex;
112 	const char			*viq_name;
113 	list_node_t			viq_link;
114 
115 	boolean_t			viq_shutdown;
116 	boolean_t			viq_indirect;
117 	uint_t				viq_max_segs;
118 
119 	/*
120 	 * Each Virtio device type has some set of queues for data transfer to
121 	 * and from the host.  This index is described in the specification for
122 	 * the particular device and queue type, and written to QUEUE_SELECT to
123 	 * allow interaction with the queue.  For example, a network device has
124 	 * at least a receive queue with index 0, and a transmit queue with
125 	 * index 1.
126 	 */
127 	uint16_t			viq_index;
128 
129 	/*
130 	 * For legacy Virtio devices, the size and shape of the queue is
131 	 * determined entirely by the number of queue entries.
132 	 */
133 	uint16_t			viq_size;
134 	id_space_t			*viq_descmap;
135 
136 	/*
137 	 * The memory shared between the device and the driver is allocated as
138 	 * a large phyisically contiguous chunk.  Access to this area is
139 	 * through three pointers to packed structures.
140 	 */
141 	virtio_dma_t			viq_dma;
142 	virtio_vq_desc_t		*viq_dma_descs;
143 	virtio_vq_driver_t		*viq_dma_driver;
144 	virtio_vq_device_t		*viq_dma_device;
145 
146 	uint16_t			viq_device_index;
147 	uint16_t			viq_driver_index;
148 
149 	/*
150 	 * Interrupt handler function, or NULL if not provided.
151 	 */
152 	ddi_intr_handler_t		*viq_func;
153 	void				*viq_funcarg;
154 	boolean_t			viq_handler_added;
155 	uint_t				viq_handler_index;
156 
157 	/*
158 	 * When a chain is submitted to the queue, it is also stored in this
159 	 * AVL tree keyed by the index of the first descriptor in the chain.
160 	 */
161 	avl_tree_t			viq_inflight;
162 };
163 
164 struct virtio_chain {
165 	virtio_queue_t			*vic_vq;
166 	avl_node_t			vic_node;
167 
168 	void				*vic_data;
169 
170 	uint16_t			vic_head;
171 	uint32_t			vic_received_length;
172 
173 	virtio_dma_t			vic_indirect_dma;
174 	uint_t				vic_indirect_capacity;
175 	uint_t				vic_indirect_used;
176 
177 	uint_t				vic_direct_capacity;
178 	uint_t				vic_direct_used;
179 	uint16_t			vic_direct[];
180 };
181 
182 /*
183  * PACKED STRUCTS FOR DEVICE ACCESS
184  */
185 
186 struct virtio_vq_desc {
187 	/*
188 	 * Buffer physical address and length.
189 	 */
190 	uint64_t			vqd_addr;
191 	uint32_t			vqd_len;
192 
193 	/*
194 	 * Flags.  Use with the VIRTQ_DESC_F_* family of constants.  See below.
195 	 */
196 	uint16_t			vqd_flags;
197 
198 	/*
199 	 * If VIRTQ_DESC_F_NEXT is set in flags, this refers to the next
200 	 * descriptor in the chain by table index.
201 	 */
202 	uint16_t			vqd_next;
203 } __packed;
204 
205 /*
206  * VIRTIO DESCRIPTOR FLAGS (vqd_flags)
207  */
208 
209 /*
210  * NEXT:
211  *	Signals that this descriptor (direct or indirect) is part of a chain.
212  *	If populated, "vqd_next" names the next descriptor in the chain by its
213  *	table index.
214  */
215 #define	VIRTQ_DESC_F_NEXT		(1 << 0)
216 
217 /*
218  * WRITE:
219  *	Determines whether this buffer is to be written by the device (WRITE is
220  *	set) or by the driver (WRITE is not set).
221  */
222 #define	VIRTQ_DESC_F_WRITE		(1 << 1)
223 
224 /*
225  * INDIRECT:
226  *	This bit signals that a direct descriptor refers to an indirect
227  *	descriptor list, rather than directly to a buffer.  This bit may only
228  *	be used in a direct descriptor; indirect descriptors are not allowed to
229  *	refer to additional layers of indirect tables.  If this bit is set,
230  *	NEXT must be clear; indirect descriptors may not be chained.
231  */
232 #define	VIRTQ_DESC_F_INDIRECT		(1 << 2)
233 
234 /*
235  * This structure is variously known as the "available" or "avail" ring, or the
236  * driver-owned portion of the queue structure.  It is used by the driver to
237  * submit descriptor chains to the device.
238  */
239 struct virtio_vq_driver {
240 	uint16_t			vqdr_flags;
241 	uint16_t			vqdr_index;
242 	uint16_t			vqdr_ring[];
243 } __packed;
244 
245 #define	VIRTQ_AVAIL_F_NO_INTERRUPT	(1 << 0)
246 
247 /*
248  * We use the sizeof operator on this packed struct to calculate the offset of
249  * subsequent structs.  Ensure the compiler is not adding any padding to the
250  * end of the struct.
251  */
252 CTASSERT(sizeof (virtio_vq_driver_t) ==
253     offsetof(virtio_vq_driver_t, vqdr_ring));
254 
255 struct virtio_vq_elem {
256 	/*
257 	 * The device returns chains of descriptors by specifying the table
258 	 * index of the first descriptor in the chain.
259 	 */
260 	uint32_t			vqe_start;
261 	uint32_t			vqe_len;
262 } __packed;
263 
264 /*
265  * This structure is variously known as the "used" ring, or the device-owned
266  * portion of the queue structure.  It is used by the device to return
267  * completed descriptor chains to the driver.
268  */
269 struct virtio_vq_device {
270 	uint16_t			vqde_flags;
271 	uint16_t			vqde_index;
272 	virtio_vq_elem_t		vqde_ring[];
273 } __packed;
274 
275 #define	VIRTQ_USED_F_NO_NOTIFY		(1 << 0)
276 
277 /*
278  * BASIC CONFIGURATION
279  *
280  * Legacy devices expose both their generic and their device-specific
281  * configuration through PCI BAR0.  This is the second entry in the register
282  * address space set for these devices.
283  */
284 #define	VIRTIO_LEGACY_PCI_BAR0		1
285 
286 /*
287  * These are offsets into the base configuration space available through the
288  * virtio_get*() and virtio_put*() family of functions.  These offsets are for
289  * what the specification describes as the "legacy" mode of device operation.
290  */
291 #define	VIRTIO_LEGACY_FEATURES_DEVICE	0x00	/* 32 R   */
292 #define	VIRTIO_LEGACY_FEATURES_DRIVER	0x04	/* 32 R/W */
293 #define	VIRTIO_LEGACY_QUEUE_ADDRESS	0x08	/* 32 R/W */
294 #define	VIRTIO_LEGACY_QUEUE_SIZE	0x0C	/* 16 R   */
295 #define	VIRTIO_LEGACY_QUEUE_SELECT	0x0E	/* 16 R/W */
296 #define	VIRTIO_LEGACY_QUEUE_NOTIFY	0x10	/* 16 R/W */
297 #define	VIRTIO_LEGACY_DEVICE_STATUS	0x12	/* 8  R/W */
298 #define	VIRTIO_LEGACY_ISR_STATUS	0x13	/* 8  R   */
299 
300 #define	VIRTIO_LEGACY_MSIX_CONFIG	0x14	/* 16 R/W */
301 #define	VIRTIO_LEGACY_MSIX_QUEUE	0x16	/* 16 R/W */
302 
303 #define	VIRTIO_LEGACY_CFG_OFFSET	(VIRTIO_LEGACY_ISR_STATUS + 1)
304 #define	VIRTIO_LEGACY_CFG_OFFSET_MSIX	(VIRTIO_LEGACY_MSIX_QUEUE + 2)
305 
306 #define	VIRTIO_LEGACY_MSI_NO_VECTOR	0xFFFF
307 
308 /*
309  * Bits in the Device Status byte (VIRTIO_LEGACY_DEVICE_STATUS):
310  */
311 #define	VIRTIO_STATUS_RESET		0
312 #define	VIRTIO_STATUS_ACKNOWLEDGE	(1 << 0)
313 #define	VIRTIO_STATUS_DRIVER		(1 << 1)
314 #define	VIRTIO_STATUS_DRIVER_OK		(1 << 2)
315 #define	VIRTIO_STATUS_FAILED		(1 << 7)
316 
317 /*
318  * Bits in the Interrupt Service Routine Status byte
319  * (VIRTIO_LEGACY_ISR_STATUS):
320  */
321 #define	VIRTIO_ISR_CHECK_QUEUES		(1 << 0)
322 #define	VIRTIO_ISR_CHECK_CONFIG		(1 << 1)
323 
324 /*
325  * Bits in the Features fields (VIRTIO_LEGACY_FEATURES_DEVICE,
326  * VIRTIO_LEGACY_FEATURES_DRIVER):
327  */
328 #define	VIRTIO_F_RING_INDIRECT_DESC	(1ULL << 28)
329 
330 /*
331  * For devices operating in the legacy mode, virtqueues must be aligned on a
332  * "page size" of 4096 bytes; this is also called the "Queue Align" value in
333  * newer versions of the specification.
334  */
335 #define	VIRTIO_PAGE_SHIFT		12
336 #define	VIRTIO_PAGE_SIZE		(1 << VIRTIO_PAGE_SHIFT)
337 CTASSERT(VIRTIO_PAGE_SIZE == 4096);
338 CTASSERT(ISP2(VIRTIO_PAGE_SIZE));
339 
340 /*
341  * DMA SYNCHRONISATION WRAPPERS
342  */
343 
344 /*
345  * Synchronise the driver-owned portion of the queue so that the device can see
346  * our writes.  This covers the memory accessed via the "viq_dma_descs" and
347  * "viq_dma_device" members.
348  */
349 #define	VIRTQ_DMA_SYNC_FORDEV(viq)	VERIFY0(ddi_dma_sync( \
350 					    (viq)->viq_dma.vidma_dma_handle, \
351 					    0, \
352 					    (uintptr_t)(viq)->viq_dma_device - \
353 					    (uintptr_t)(viq)->viq_dma_descs, \
354 					    DDI_DMA_SYNC_FORDEV))
355 
356 /*
357  * Synchronise the device-owned portion of the queue so that we can see any
358  * writes from the device.  This covers the memory accessed via the
359  * "viq_dma_device" member.
360  */
361 #define	VIRTQ_DMA_SYNC_FORKERNEL(viq)	VERIFY0(ddi_dma_sync( \
362 					    (viq)->viq_dma.vidma_dma_handle, \
363 					    (uintptr_t)(viq)->viq_dma_device - \
364 					    (uintptr_t)(viq)->viq_dma_descs, \
365 					    (viq)->viq_dma.vidma_size - \
366 					    (uintptr_t)(viq)->viq_dma_device - \
367 					    (uintptr_t)(viq)->viq_dma_descs, \
368 					    DDI_DMA_SYNC_FORKERNEL))
369 
370 #ifdef __cplusplus
371 }
372 #endif
373 
374 #endif /* _VIRTIO_IMPL_H */
375