1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013 Chris Torek <torek @ torek net>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifndef _BHYVE_VIRTIO_H_
30 #define _BHYVE_VIRTIO_H_
31
32 #include <pthread_np.h>
33 #include <machine/atomic.h>
34
35 #include <dev/virtio/virtio.h>
36 #ifdef __FreeBSD__
37 #include <dev/virtio/virtio_ring.h>
38 #include <dev/virtio/pci/virtio_pci_var.h>
39 #endif
40
41 /*
42 * These are derived from several virtio specifications.
43 *
44 * Some useful links:
45 * https://github.com/rustyrussell/virtio-spec
46 * http://people.redhat.com/pbonzini/virtio-spec.pdf
47 */
48
49 /*
50 * A virtual device has zero or more "virtual queues" (virtqueue).
51 * Each virtqueue uses at least two 4096-byte pages, laid out thus:
52 *
53 * +-----------------------------------------------+
54 * | "desc": <N> descriptors, 16 bytes each |
55 * | ----------------------------------------- |
56 * | "avail": 2 uint16; <N> uint16; 1 uint16 |
57 * | ----------------------------------------- |
58 * | pad to 4k boundary |
59 * +-----------------------------------------------+
60 * | "used": 2 x uint16; <N> elems; 1 uint16 |
61 * | ----------------------------------------- |
62 * | pad to 4k boundary |
63 * +-----------------------------------------------+
64 *
65 * The number <N> that appears here is always a power of two and is
66 * limited to no more than 32768 (as it must fit in a 16-bit field).
67 * If <N> is sufficiently large, the above will occupy more than
68 * two pages. In any case, all pages must be physically contiguous
69 * within the guest's physical address space.
70 *
71 * The <N> 16-byte "desc" descriptors consist of a 64-bit guest
72 * physical address <addr>, a 32-bit length <len>, a 16-bit
73 * <flags>, and a 16-bit <next> field (all in guest byte order).
74 *
75 * There are three flags that may be set :
76 * NEXT descriptor is chained, so use its "next" field
77 * WRITE descriptor is for host to write into guest RAM
78 * (else host is to read from guest RAM)
79 * INDIRECT descriptor address field is (guest physical)
80 * address of a linear array of descriptors
81 *
82 * Unless INDIRECT is set, <len> is the number of bytes that may
83 * be read/written from guest physical address <addr>. If
84 * INDIRECT is set, WRITE is ignored and <len> provides the length
85 * of the indirect descriptors (and <len> must be a multiple of
86 * 16). Note that NEXT may still be set in the main descriptor
87 * pointing to the indirect, and should be set in each indirect
88 * descriptor that uses the next descriptor (these should generally
89 * be numbered sequentially). However, INDIRECT must not be set
90 * in the indirect descriptors. Upon reaching an indirect descriptor
91 * without a NEXT bit, control returns to the direct descriptors.
92 *
93 * Except inside an indirect, each <next> value must be in the
94 * range [0 .. N) (i.e., the half-open interval). (Inside an
95 * indirect, each <next> must be in the range [0 .. <len>/16).)
96 *
97 * The "avail" data structures reside in the same pages as the
98 * "desc" structures since both together are used by the device to
99 * pass information to the hypervisor's virtual driver. These
100 * begin with a 16-bit <flags> field and 16-bit index <idx>, then
101 * have <N> 16-bit <ring> values, followed by one final 16-bit
102 * field <used_event>. The <N> <ring> entries are simply indices
103 * into the descriptor ring (and thus must meet the same
104 * constraints as each <next> value). However, <idx> is counted
105 * up from 0 (initially) and simply wraps around after 65535; it
106 * is taken mod <N> to find the next available entry.
107 *
108 * The "used" ring occupies a separate page or pages, and contains
109 * values written from the virtual driver back to the guest OS.
110 * This begins with a 16-bit <flags> and 16-bit <idx>, then there
111 * are <N> "vring_used" elements, followed by a 16-bit <avail_event>.
112 * The <N> "vring_used" elements consist of a 32-bit <id> and a
113 * 32-bit <len> (vu_tlen below). The <id> is simply the index of
114 * the head of a descriptor chain the guest made available
115 * earlier, and the <len> is the number of bytes actually written,
116 * e.g., in the case of a network driver that provided a large
117 * receive buffer but received only a small amount of data.
118 *
119 * The two event fields, <used_event> and <avail_event>, in the
120 * avail and used rings (respectively -- note the reversal!), are
121 * always provided, but are used only if the virtual device
122 * negotiates the VIRTIO_RING_F_EVENT_IDX feature during feature
123 * negotiation. Similarly, both rings provide a flag --
124 * VRING_AVAIL_F_NO_INTERRUPT and VRING_USED_F_NO_NOTIFY -- in
125 * their <flags> field, indicating that the guest does not need an
126 * interrupt, or that the hypervisor driver does not need a
127 * notify, when descriptors are added to the corresponding ring.
128 * (These are provided only for interrupt optimization and need
129 * not be implemented.)
130 */
131 #define VRING_ALIGN 4096
132
133 /*
134 * The address of any given virtual queue is determined by a single
135 * Page Frame Number register. The guest writes the PFN into the
136 * PCI config space. However, a device that has two or more
137 * virtqueues can have a different PFN, and size, for each queue.
138 * The number of queues is determinable via the PCI config space
139 * VTCFG_R_QSEL register. Writes to QSEL select the queue: 0 means
140 * queue #0, 1 means queue#1, etc. Once a queue is selected, the
141 * remaining PFN and QNUM registers refer to that queue.
142 *
143 * QNUM is a read-only register containing a nonzero power of two
144 * that indicates the (hypervisor's) queue size. Or, if reading it
145 * produces zero, the hypervisor does not have a corresponding
146 * queue. (The number of possible queues depends on the virtual
147 * device. The block device has just one; the network device
148 * provides either two -- 0 = receive, 1 = transmit -- or three,
149 * with 2 = control.)
150 *
151 * PFN is a read/write register giving the physical page address of
152 * the virtqueue in guest memory (the guest must allocate enough space
153 * based on the hypervisor's provided QNUM).
154 *
155 * QNOTIFY is effectively write-only: when the guest writes a queue
156 * number to the register, the hypervisor should scan the specified
157 * virtqueue. (Reading QNOTIFY currently always gets 0).
158 */
159
160 /*
161 * PFN register shift amount
162 */
163 #define VRING_PFN 12
164
165 /*
166 * PCI vendor/device IDs
167 */
168 #define VIRTIO_VENDOR 0x1AF4
169 #define VIRTIO_DEV_NET 0x1000
170 #define VIRTIO_DEV_BLOCK 0x1001
171 #define VIRTIO_DEV_CONSOLE 0x1003
172 #define VIRTIO_DEV_SCSI 0x1004
173 #define VIRTIO_DEV_RANDOM 0x1005
174 #define VIRTIO_DEV_9P 0x1009
175 #define VIRTIO_DEV_INPUT 0x1052
176
177 /*
178 * PCI revision IDs
179 */
180 #define VIRTIO_REV_INPUT 1
181
182 /*
183 * PCI subvendor IDs
184 */
185 #define VIRTIO_SUBVEN_INPUT 0x108E
186
187 /*
188 * PCI subdevice IDs
189 */
190 #define VIRTIO_SUBDEV_INPUT 0x1100
191
192 /* From section 2.3, "Virtqueue Configuration", of the virtio specification */
193 static inline int
vring_size_aligned(u_int qsz)194 vring_size_aligned(u_int qsz)
195 {
196 return (roundup2(vring_size(qsz, VRING_ALIGN), VRING_ALIGN));
197 }
198
199 struct pci_devinst;
200 struct vqueue_info;
201
202 /*
203 * A virtual device, with some number (possibly 0) of virtual
204 * queues and some size (possibly 0) of configuration-space
205 * registers private to the device. The virtio_softc should come
206 * at the front of each "derived class", so that a pointer to the
207 * virtio_softc is also a pointer to the more specific, derived-
208 * from-virtio driver's softc.
209 *
210 * Note: inside each hypervisor virtio driver, changes to these
211 * data structures must be locked against other threads, if any.
212 * Except for PCI config space register read/write, we assume each
213 * driver does the required locking, but we need a pointer to the
214 * lock (if there is one) for PCI config space read/write ops.
215 *
216 * When the guest reads or writes the device's config space, the
217 * generic layer checks for operations on the special registers
218 * described above. If the offset of the register(s) being read
219 * or written is past the CFG area (CFG0 or CFG1), the request is
220 * passed on to the virtual device, after subtracting off the
221 * generic-layer size. (So, drivers can just use the offset as
222 * an offset into "struct config", for instance.)
223 *
224 * (The virtio layer also makes sure that the read or write is to/
225 * from a "good" config offset, hence vc_cfgsize, and on BAR #0.
226 * However, the driver must verify the read or write size and offset
227 * and that no one is writing a readonly register.)
228 *
229 * The BROKED flag ("this thing done gone and broked") is for future
230 * use.
231 */
232 #define VIRTIO_USE_MSIX 0x01
233 #define VIRTIO_EVENT_IDX 0x02 /* use the event-index values */
234 #define VIRTIO_BROKED 0x08 /* ??? */
235
236 struct virtio_softc {
237 struct virtio_consts *vs_vc; /* constants (see below) */
238 int vs_flags; /* VIRTIO_* flags from above */
239 pthread_mutex_t *vs_mtx; /* POSIX mutex, if any */
240 struct pci_devinst *vs_pi; /* PCI device instance */
241 uint32_t vs_negotiated_caps; /* negotiated capabilities */
242 struct vqueue_info *vs_queues; /* one per vc_nvq */
243 int vs_curq; /* current queue */
244 uint8_t vs_status; /* value from last status write */
245 uint8_t vs_isr; /* ISR flags, if not MSI-X */
246 uint16_t vs_msix_cfg_idx; /* MSI-X vector for config event */
247 };
248
249 #define VS_LOCK(vs) \
250 do { \
251 if (vs->vs_mtx) \
252 pthread_mutex_lock(vs->vs_mtx); \
253 } while (0)
254
255 #define VS_UNLOCK(vs) \
256 do { \
257 if (vs->vs_mtx) \
258 pthread_mutex_unlock(vs->vs_mtx); \
259 } while (0)
260
261 struct virtio_consts {
262 const char *vc_name; /* name of driver (for diagnostics) */
263 int vc_nvq; /* number of virtual queues */
264 size_t vc_cfgsize; /* size of dev-specific config regs */
265 void (*vc_reset)(void *); /* called on virtual device reset */
266 void (*vc_qnotify)(void *, struct vqueue_info *);
267 /* called on QNOTIFY if no VQ notify */
268 int (*vc_cfgread)(void *, int, int, uint32_t *);
269 /* called to read config regs */
270 int (*vc_cfgwrite)(void *, int, int, uint32_t);
271 /* called to write config regs */
272 void (*vc_apply_features)(void *, uint64_t);
273 /* called to apply negotiated features */
274 uint64_t vc_hv_caps; /* hypervisor-provided capabilities */
275 };
276
277 /*
278 * Data structure allocated (statically) per virtual queue.
279 *
280 * Drivers may change vq_qsize after a reset. When the guest OS
281 * requests a device reset, the hypervisor first calls
282 * vs->vs_vc->vc_reset(); then the data structure below is
283 * reinitialized (for each virtqueue: vs->vs_vc->vc_nvq).
284 *
285 * The remaining fields should only be fussed-with by the generic
286 * code.
287 *
288 * Note: the addresses of vq_desc, vq_avail, and vq_used are all
289 * computable from each other, but it's a lot simpler if we just
290 * keep a pointer to each one. The event indices are similarly
291 * (but more easily) computable, and this time we'll compute them:
292 * they're just XX_ring[N].
293 */
294 #define VQ_ALLOC 0x01 /* set once we have a pfn */
295 #define VQ_BROKED 0x02 /* ??? */
296 struct vqueue_info {
297 uint16_t vq_qsize; /* size of this queue (a power of 2) */
298 void (*vq_notify)(void *, struct vqueue_info *);
299 /* called instead of vc_notify, if not NULL */
300
301 struct virtio_softc *vq_vs; /* backpointer to softc */
302 uint16_t vq_num; /* we're the num'th queue in the softc */
303
304 uint16_t vq_flags; /* flags (see above) */
305 uint16_t vq_last_avail; /* a recent value of vq_avail->idx */
306 uint16_t vq_next_used; /* index of the next used slot to be filled */
307 uint16_t vq_save_used; /* saved vq_used->idx; see vq_endchains */
308 uint16_t vq_msix_idx; /* MSI-X index, or VIRTIO_MSI_NO_VECTOR */
309
310 uint32_t vq_pfn; /* PFN of virt queue (not shifted!) */
311
312 struct vring_desc *vq_desc; /* descriptor array */
313 struct vring_avail *vq_avail; /* the "avail" ring */
314 struct vring_used *vq_used; /* the "used" ring */
315
316 };
317 /* as noted above, these are sort of backwards, name-wise */
318 #define VQ_AVAIL_EVENT_IDX(vq) \
319 (*(uint16_t *)&(vq)->vq_used->ring[(vq)->vq_qsize])
320 #define VQ_USED_EVENT_IDX(vq) \
321 ((vq)->vq_avail->ring[(vq)->vq_qsize])
322
323 /*
324 * Is this ring ready for I/O?
325 */
326 static inline int
vq_ring_ready(struct vqueue_info * vq)327 vq_ring_ready(struct vqueue_info *vq)
328 {
329
330 return (vq->vq_flags & VQ_ALLOC);
331 }
332
333 /*
334 * Are there "available" descriptors? (This does not count
335 * how many, just returns True if there are some.)
336 */
337 static inline int
vq_has_descs(struct vqueue_info * vq)338 vq_has_descs(struct vqueue_info *vq)
339 {
340
341 return (vq_ring_ready(vq) && vq->vq_last_avail !=
342 vq->vq_avail->idx);
343 }
344
345 /*
346 * Deliver an interrupt to the guest for a specific MSI-X queue or
347 * event.
348 */
349 static inline void
vi_interrupt(struct virtio_softc * vs,uint8_t isr,uint16_t msix_idx)350 vi_interrupt(struct virtio_softc *vs, uint8_t isr, uint16_t msix_idx)
351 {
352
353 if (pci_msix_enabled(vs->vs_pi))
354 pci_generate_msix(vs->vs_pi, msix_idx);
355 else {
356 #ifndef __FreeBSD__
357 boolean_t unlock = B_FALSE;
358
359 if (vs->vs_mtx && !pthread_mutex_isowned_np(vs->vs_mtx)) {
360 unlock = B_TRUE;
361 pthread_mutex_lock(vs->vs_mtx);
362 }
363 #else
364 VS_LOCK(vs);
365 #endif
366 vs->vs_isr |= isr;
367 pci_generate_msi(vs->vs_pi, 0);
368 pci_lintr_assert(vs->vs_pi);
369 #ifndef __FreeBSD__
370 if (unlock)
371 pthread_mutex_unlock(vs->vs_mtx);
372 #else
373 VS_UNLOCK(vs);
374 #endif
375 }
376 }
377
378 /*
379 * Deliver an interrupt to the guest on the given virtual queue (if
380 * possible, or a generic MSI interrupt if not using MSI-X).
381 */
382 static inline void
vq_interrupt(struct virtio_softc * vs,struct vqueue_info * vq)383 vq_interrupt(struct virtio_softc *vs, struct vqueue_info *vq)
384 {
385
386 vi_interrupt(vs, VIRTIO_PCI_ISR_INTR, vq->vq_msix_idx);
387 }
388
389 static inline void
vq_kick_enable(struct vqueue_info * vq)390 vq_kick_enable(struct vqueue_info *vq)
391 {
392
393 vq->vq_used->flags &= ~VRING_USED_F_NO_NOTIFY;
394 /*
395 * Full memory barrier to make sure the store to vq_used->flags
396 * happens before the load from vq_avail->idx, which results from a
397 * subsequent call to vq_has_descs().
398 */
399 atomic_thread_fence_seq_cst();
400 }
401
402 static inline void
vq_kick_disable(struct vqueue_info * vq)403 vq_kick_disable(struct vqueue_info *vq)
404 {
405
406 vq->vq_used->flags |= VRING_USED_F_NO_NOTIFY;
407 }
408
409 struct iovec;
410
411 /*
412 * Request description returned by vq_getchain.
413 *
414 * Writable iovecs start at iov[req.readable].
415 */
416 struct vi_req {
417 int readable; /* num of readable iovecs */
418 int writable; /* num of writable iovecs */
419 unsigned int idx; /* ring index */
420 };
421
422 void vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
423 void *dev_softc, struct pci_devinst *pi,
424 struct vqueue_info *queues);
425 int vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix);
426 void vi_reset_dev(struct virtio_softc *);
427 void vi_set_io_bar(struct virtio_softc *, int);
428
429 int vq_getchain(struct vqueue_info *vq, struct iovec *iov, int niov,
430 struct vi_req *reqp);
431 void vq_retchains(struct vqueue_info *vq, uint16_t n_chains);
432 void vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx,
433 uint32_t iolen);
434 void vq_relchain_publish(struct vqueue_info *vq);
435 void vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen);
436 void vq_endchains(struct vqueue_info *vq, int used_all_avail);
437
438 uint64_t vi_pci_read(struct pci_devinst *pi, int baridx, uint64_t offset,
439 int size);
440 void vi_pci_write(struct pci_devinst *pi, int baridx, uint64_t offset,
441 int size, uint64_t value);
442
443 #ifndef __FreeBSD__
444 void vi_vq_init(struct virtio_softc *, uint32_t);
445 #endif
446
447 #endif /* _BHYVE_VIRTIO_H_ */
448