xref: /freebsd/usr.sbin/bhyve/virtio.c (revision 38a52bd3b5cac3da6f7f6eef3dd050e6aa08ebb3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013  Chris Torek <torek @ torek net>
5  * All rights reserved.
6  * Copyright (c) 2019 Joyent, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/uio.h>
35 
36 #include <machine/atomic.h>
37 #include <machine/vmm_snapshot.h>
38 
39 #include <dev/virtio/pci/virtio_pci_legacy_var.h>
40 
41 #include <stdio.h>
42 #include <stdint.h>
43 #include <string.h>
44 #include <pthread.h>
45 #include <pthread_np.h>
46 
47 #include "bhyverun.h"
48 #include "debug.h"
49 #include "pci_emul.h"
50 #include "virtio.h"
51 
52 /*
53  * Functions for dealing with generalized "virtual devices" as
54  * defined by <https://www.google.com/#output=search&q=virtio+spec>
55  */
56 
57 /*
58  * In case we decide to relax the "virtio softc comes at the
59  * front of virtio-based device softc" constraint, let's use
60  * this to convert.
61  */
62 #define	DEV_SOFTC(vs) ((void *)(vs))
63 
64 /*
65  * Link a virtio_softc to its constants, the device softc, and
66  * the PCI emulation.
67  */
68 void
69 vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
70 		void *dev_softc, struct pci_devinst *pi,
71 		struct vqueue_info *queues)
72 {
73 	int i;
74 
75 	/* vs and dev_softc addresses must match */
76 	assert((void *)vs == dev_softc);
77 	vs->vs_vc = vc;
78 	vs->vs_pi = pi;
79 	pi->pi_arg = vs;
80 
81 	vs->vs_queues = queues;
82 	for (i = 0; i < vc->vc_nvq; i++) {
83 		queues[i].vq_vs = vs;
84 		queues[i].vq_num = i;
85 	}
86 }
87 
88 /*
89  * Reset device (device-wide).  This erases all queues, i.e.,
90  * all the queues become invalid (though we don't wipe out the
91  * internal pointers, we just clear the VQ_ALLOC flag).
92  *
93  * It resets negotiated features to "none".
94  *
95  * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
96  */
97 void
98 vi_reset_dev(struct virtio_softc *vs)
99 {
100 	struct vqueue_info *vq;
101 	int i, nvq;
102 
103 	if (vs->vs_mtx)
104 		assert(pthread_mutex_isowned_np(vs->vs_mtx));
105 
106 	nvq = vs->vs_vc->vc_nvq;
107 	for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
108 		vq->vq_flags = 0;
109 		vq->vq_last_avail = 0;
110 		vq->vq_next_used = 0;
111 		vq->vq_save_used = 0;
112 		vq->vq_pfn = 0;
113 		vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
114 	}
115 	vs->vs_negotiated_caps = 0;
116 	vs->vs_curq = 0;
117 	/* vs->vs_status = 0; -- redundant */
118 	if (vs->vs_isr)
119 		pci_lintr_deassert(vs->vs_pi);
120 	vs->vs_isr = 0;
121 	vs->vs_msix_cfg_idx = VIRTIO_MSI_NO_VECTOR;
122 }
123 
124 /*
125  * Set I/O BAR (usually 0) to map PCI config registers.
126  */
127 void
128 vi_set_io_bar(struct virtio_softc *vs, int barnum)
129 {
130 	size_t size;
131 
132 	/*
133 	 * ??? should we use VIRTIO_PCI_CONFIG_OFF(0) if MSI-X is disabled?
134 	 * Existing code did not...
135 	 */
136 	size = VIRTIO_PCI_CONFIG_OFF(1) + vs->vs_vc->vc_cfgsize;
137 	pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size);
138 }
139 
140 /*
141  * Initialize MSI-X vector capabilities if we're to use MSI-X,
142  * or MSI capabilities if not.
143  *
144  * We assume we want one MSI-X vector per queue, here, plus one
145  * for the config vec.
146  */
147 int
148 vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix)
149 {
150 	int nvec;
151 
152 	if (use_msix) {
153 		vs->vs_flags |= VIRTIO_USE_MSIX;
154 		VS_LOCK(vs);
155 		vi_reset_dev(vs); /* set all vectors to NO_VECTOR */
156 		VS_UNLOCK(vs);
157 		nvec = vs->vs_vc->vc_nvq + 1;
158 		if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum))
159 			return (1);
160 	} else
161 		vs->vs_flags &= ~VIRTIO_USE_MSIX;
162 
163 	/* Only 1 MSI vector for bhyve */
164 	pci_emul_add_msicap(vs->vs_pi, 1);
165 
166 	/* Legacy interrupts are mandatory for virtio devices */
167 	pci_lintr_request(vs->vs_pi);
168 
169 	return (0);
170 }
171 
172 /*
173  * Initialize the currently-selected virtio queue (vs->vs_curq).
174  * The guest just gave us a page frame number, from which we can
175  * calculate the addresses of the queue.
176  */
177 static void
178 vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
179 {
180 	struct vqueue_info *vq;
181 	uint64_t phys;
182 	size_t size;
183 	char *base;
184 
185 	vq = &vs->vs_queues[vs->vs_curq];
186 	vq->vq_pfn = pfn;
187 	phys = (uint64_t)pfn << VRING_PFN;
188 	size = vring_size_aligned(vq->vq_qsize);
189 	base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);
190 
191 	/* First page(s) are descriptors... */
192 	vq->vq_desc = (struct vring_desc *)base;
193 	base += vq->vq_qsize * sizeof(struct vring_desc);
194 
195 	/* ... immediately followed by "avail" ring (entirely uint16_t's) */
196 	vq->vq_avail = (struct vring_avail *)base;
197 	base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
198 
199 	/* Then it's rounded up to the next page... */
200 	base = (char *)roundup2((uintptr_t)base, VRING_ALIGN);
201 
202 	/* ... and the last page(s) are the used ring. */
203 	vq->vq_used = (struct vring_used *)base;
204 
205 	/* Mark queue as allocated, and start at 0 when we use it. */
206 	vq->vq_flags = VQ_ALLOC;
207 	vq->vq_last_avail = 0;
208 	vq->vq_next_used = 0;
209 	vq->vq_save_used = 0;
210 }
211 
212 /*
213  * Helper inline for vq_getchain(): record the i'th "real"
214  * descriptor.
215  */
216 static inline void
217 _vq_record(int i, volatile struct vring_desc *vd,
218 	   struct vmctx *ctx, struct iovec *iov, int n_iov,
219 	   struct vi_req *reqp) {
220 
221 	if (i >= n_iov)
222 		return;
223 	iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len);
224 	iov[i].iov_len = vd->len;
225 	if ((vd->flags & VRING_DESC_F_WRITE) == 0)
226 		reqp->readable++;
227 	else
228 		reqp->writable++;
229 }
230 #define	VQ_MAX_DESCRIPTORS	512	/* see below */
231 
232 /*
233  * Examine the chain of descriptors starting at the "next one" to
234  * make sure that they describe a sensible request.  If so, return
235  * the number of "real" descriptors that would be needed/used in
236  * acting on this request.  This may be smaller than the number of
237  * available descriptors, e.g., if there are two available but
238  * they are two separate requests, this just returns 1.  Or, it
239  * may be larger: if there are indirect descriptors involved,
240  * there may only be one descriptor available but it may be an
241  * indirect pointing to eight more.  We return 8 in this case,
242  * i.e., we do not count the indirect descriptors, only the "real"
243  * ones.
244  *
245  * Basically, this vets the "flags" and "next" field of each
246  * descriptor and tells you how many are involved.  Since some may
247  * be indirect, this also needs the vmctx (in the pci_devinst
248  * at vs->vs_pi) so that it can find indirect descriptors.
249  *
250  * As we process each descriptor, we copy and adjust it (guest to
251  * host address wise, also using the vmtctx) into the given iov[]
252  * array (of the given size).  If the array overflows, we stop
253  * placing values into the array but keep processing descriptors,
254  * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1.
255  * So you, the caller, must not assume that iov[] is as big as the
256  * return value (you can process the same thing twice to allocate
257  * a larger iov array if needed, or supply a zero length to find
258  * out how much space is needed).
259  *
260  * If some descriptor(s) are invalid, this prints a diagnostic message
261  * and returns -1.  If no descriptors are ready now it simply returns 0.
262  *
263  * You are assumed to have done a vq_ring_ready() if needed (note
264  * that vq_has_descs() does one).
265  */
266 int
267 vq_getchain(struct vqueue_info *vq, struct iovec *iov, int niov,
268 	    struct vi_req *reqp)
269 {
270 	int i;
271 	u_int ndesc, n_indir;
272 	u_int idx, next;
273 	struct vi_req req;
274 	volatile struct vring_desc *vdir, *vindir, *vp;
275 	struct vmctx *ctx;
276 	struct virtio_softc *vs;
277 	const char *name;
278 
279 	vs = vq->vq_vs;
280 	name = vs->vs_vc->vc_name;
281 	memset(&req, 0, sizeof(req));
282 
283 	/*
284 	 * Note: it's the responsibility of the guest not to
285 	 * update vq->vq_avail->idx until all of the descriptors
286          * the guest has written are valid (including all their
287          * "next" fields and "flags").
288 	 *
289 	 * Compute (vq_avail->idx - last_avail) in integers mod 2**16.  This is
290 	 * the number of descriptors the device has made available
291 	 * since the last time we updated vq->vq_last_avail.
292 	 *
293 	 * We just need to do the subtraction as an unsigned int,
294 	 * then trim off excess bits.
295 	 */
296 	idx = vq->vq_last_avail;
297 	ndesc = (uint16_t)((u_int)vq->vq_avail->idx - idx);
298 	if (ndesc == 0)
299 		return (0);
300 	if (ndesc > vq->vq_qsize) {
301 		/* XXX need better way to diagnose issues */
302 		EPRINTLN(
303 		    "%s: ndesc (%u) out of range, driver confused?",
304 		    name, (u_int)ndesc);
305 		return (-1);
306 	}
307 
308 	/*
309 	 * Now count/parse "involved" descriptors starting from
310 	 * the head of the chain.
311 	 *
312 	 * To prevent loops, we could be more complicated and
313 	 * check whether we're re-visiting a previously visited
314 	 * index, but we just abort if the count gets excessive.
315 	 */
316 	ctx = vs->vs_pi->pi_vmctx;
317 	req.idx = next = vq->vq_avail->ring[idx & (vq->vq_qsize - 1)];
318 	vq->vq_last_avail++;
319 	for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) {
320 		if (next >= vq->vq_qsize) {
321 			EPRINTLN(
322 			    "%s: descriptor index %u out of range, "
323 			    "driver confused?",
324 			    name, next);
325 			return (-1);
326 		}
327 		vdir = &vq->vq_desc[next];
328 		if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) {
329 			_vq_record(i, vdir, ctx, iov, niov, &req);
330 			i++;
331 		} else if ((vs->vs_vc->vc_hv_caps &
332 		    VIRTIO_RING_F_INDIRECT_DESC) == 0) {
333 			EPRINTLN(
334 			    "%s: descriptor has forbidden INDIRECT flag, "
335 			    "driver confused?",
336 			    name);
337 			return (-1);
338 		} else {
339 			n_indir = vdir->len / 16;
340 			if ((vdir->len & 0xf) || n_indir == 0) {
341 				EPRINTLN(
342 				    "%s: invalid indir len 0x%x, "
343 				    "driver confused?",
344 				    name, (u_int)vdir->len);
345 				return (-1);
346 			}
347 			vindir = paddr_guest2host(ctx,
348 			    vdir->addr, vdir->len);
349 			/*
350 			 * Indirects start at the 0th, then follow
351 			 * their own embedded "next"s until those run
352 			 * out.  Each one's indirect flag must be off
353 			 * (we don't really have to check, could just
354 			 * ignore errors...).
355 			 */
356 			next = 0;
357 			for (;;) {
358 				vp = &vindir[next];
359 				if (vp->flags & VRING_DESC_F_INDIRECT) {
360 					EPRINTLN(
361 					    "%s: indirect desc has INDIR flag,"
362 					    " driver confused?",
363 					    name);
364 					return (-1);
365 				}
366 				_vq_record(i, vp, ctx, iov, niov, &req);
367 				if (++i > VQ_MAX_DESCRIPTORS)
368 					goto loopy;
369 				if ((vp->flags & VRING_DESC_F_NEXT) == 0)
370 					break;
371 				next = vp->next;
372 				if (next >= n_indir) {
373 					EPRINTLN(
374 					    "%s: invalid next %u > %u, "
375 					    "driver confused?",
376 					    name, (u_int)next, n_indir);
377 					return (-1);
378 				}
379 			}
380 		}
381 		if ((vdir->flags & VRING_DESC_F_NEXT) == 0)
382 			goto done;
383 	}
384 
385 loopy:
386 	EPRINTLN(
387 	    "%s: descriptor loop? count > %d - driver confused?",
388 	    name, i);
389 	return (-1);
390 
391 done:
392 	*reqp = req;
393 	return (i);
394 }
395 
396 /*
397  * Return the first n_chain request chains back to the available queue.
398  *
399  * (These chains are the ones you handled when you called vq_getchain()
400  * and used its positive return value.)
401  */
402 void
403 vq_retchains(struct vqueue_info *vq, uint16_t n_chains)
404 {
405 
406 	vq->vq_last_avail -= n_chains;
407 }
408 
409 void
410 vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
411 {
412 	volatile struct vring_used *vuh;
413 	volatile struct vring_used_elem *vue;
414 	uint16_t mask;
415 
416 	/*
417 	 * Notes:
418 	 *  - mask is N-1 where N is a power of 2 so computes x % N
419 	 *  - vuh points to the "used" data shared with guest
420 	 *  - vue points to the "used" ring entry we want to update
421 	 */
422 	mask = vq->vq_qsize - 1;
423 	vuh = vq->vq_used;
424 
425 	vue = &vuh->ring[vq->vq_next_used++ & mask];
426 	vue->id = idx;
427 	vue->len = iolen;
428 }
429 
430 void
431 vq_relchain_publish(struct vqueue_info *vq)
432 {
433 	/*
434 	 * Ensure the used descriptor is visible before updating the index.
435 	 * This is necessary on ISAs with memory ordering less strict than x86
436 	 * (and even on x86 to act as a compiler barrier).
437 	 */
438 	atomic_thread_fence_rel();
439 	vq->vq_used->idx = vq->vq_next_used;
440 }
441 
442 /*
443  * Return specified request chain to the guest, setting its I/O length
444  * to the provided value.
445  *
446  * (This chain is the one you handled when you called vq_getchain()
447  * and used its positive return value.)
448  */
449 void
450 vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
451 {
452 	vq_relchain_prepare(vq, idx, iolen);
453 	vq_relchain_publish(vq);
454 }
455 
456 /*
457  * Driver has finished processing "available" chains and calling
458  * vq_relchain on each one.  If driver used all the available
459  * chains, used_all should be set.
460  *
461  * If the "used" index moved we may need to inform the guest, i.e.,
462  * deliver an interrupt.  Even if the used index did NOT move we
463  * may need to deliver an interrupt, if the avail ring is empty and
464  * we are supposed to interrupt on empty.
465  *
466  * Note that used_all_avail is provided by the caller because it's
467  * a snapshot of the ring state when he decided to finish interrupt
468  * processing -- it's possible that descriptors became available after
469  * that point.  (It's also typically a constant 1/True as well.)
470  */
471 void
472 vq_endchains(struct vqueue_info *vq, int used_all_avail)
473 {
474 	struct virtio_softc *vs;
475 	uint16_t event_idx, new_idx, old_idx;
476 	int intr;
477 
478 	/*
479 	 * Interrupt generation: if we're using EVENT_IDX,
480 	 * interrupt if we've crossed the event threshold.
481 	 * Otherwise interrupt is generated if we added "used" entries,
482 	 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT.
483 	 *
484 	 * In any case, though, if NOTIFY_ON_EMPTY is set and the
485 	 * entire avail was processed, we need to interrupt always.
486 	 */
487 	vs = vq->vq_vs;
488 	old_idx = vq->vq_save_used;
489 	vq->vq_save_used = new_idx = vq->vq_used->idx;
490 
491 	/*
492 	 * Use full memory barrier between "idx" store from preceding
493 	 * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or
494 	 * "flags" field below.
495 	 */
496 	atomic_thread_fence_seq_cst();
497 	if (used_all_avail &&
498 	    (vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
499 		intr = 1;
500 	else if (vs->vs_negotiated_caps & VIRTIO_RING_F_EVENT_IDX) {
501 		event_idx = VQ_USED_EVENT_IDX(vq);
502 		/*
503 		 * This calculation is per docs and the kernel
504 		 * (see src/sys/dev/virtio/virtio_ring.h).
505 		 */
506 		intr = (uint16_t)(new_idx - event_idx - 1) <
507 			(uint16_t)(new_idx - old_idx);
508 	} else {
509 		intr = new_idx != old_idx &&
510 		    !(vq->vq_avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
511 	}
512 	if (intr)
513 		vq_interrupt(vs, vq);
514 }
515 
516 /* Note: these are in sorted order to make for a fast search */
517 static struct config_reg {
518 	uint16_t	cr_offset;	/* register offset */
519 	uint8_t		cr_size;	/* size (bytes) */
520 	uint8_t		cr_ro;		/* true => reg is read only */
521 	const char	*cr_name;	/* name of reg */
522 } config_regs[] = {
523 	{ VIRTIO_PCI_HOST_FEATURES,	4, 1, "HOST_FEATURES" },
524 	{ VIRTIO_PCI_GUEST_FEATURES,	4, 0, "GUEST_FEATURES" },
525 	{ VIRTIO_PCI_QUEUE_PFN,		4, 0, "QUEUE_PFN" },
526 	{ VIRTIO_PCI_QUEUE_NUM,		2, 1, "QUEUE_NUM" },
527 	{ VIRTIO_PCI_QUEUE_SEL,		2, 0, "QUEUE_SEL" },
528 	{ VIRTIO_PCI_QUEUE_NOTIFY,	2, 0, "QUEUE_NOTIFY" },
529 	{ VIRTIO_PCI_STATUS,		1, 0, "STATUS" },
530 	{ VIRTIO_PCI_ISR,		1, 0, "ISR" },
531 	{ VIRTIO_MSI_CONFIG_VECTOR,	2, 0, "CONFIG_VECTOR" },
532 	{ VIRTIO_MSI_QUEUE_VECTOR,	2, 0, "QUEUE_VECTOR" },
533 };
534 
535 static inline struct config_reg *
536 vi_find_cr(int offset) {
537 	u_int hi, lo, mid;
538 	struct config_reg *cr;
539 
540 	lo = 0;
541 	hi = sizeof(config_regs) / sizeof(*config_regs) - 1;
542 	while (hi >= lo) {
543 		mid = (hi + lo) >> 1;
544 		cr = &config_regs[mid];
545 		if (cr->cr_offset == offset)
546 			return (cr);
547 		if (cr->cr_offset < offset)
548 			lo = mid + 1;
549 		else
550 			hi = mid - 1;
551 	}
552 	return (NULL);
553 }
554 
555 /*
556  * Handle pci config space reads.
557  * If it's to the MSI-X info, do that.
558  * If it's part of the virtio standard stuff, do that.
559  * Otherwise dispatch to the actual driver.
560  */
561 uint64_t
562 vi_pci_read(struct vmctx *ctx __unused, int vcpu __unused,
563     struct pci_devinst *pi, int baridx, uint64_t offset, int size)
564 {
565 	struct virtio_softc *vs = pi->pi_arg;
566 	struct virtio_consts *vc;
567 	struct config_reg *cr;
568 	uint64_t virtio_config_size, max;
569 	const char *name;
570 	uint32_t newoff;
571 	uint32_t value;
572 	int error;
573 
574 	if (vs->vs_flags & VIRTIO_USE_MSIX) {
575 		if (baridx == pci_msix_table_bar(pi) ||
576 		    baridx == pci_msix_pba_bar(pi)) {
577 			return (pci_emul_msix_tread(pi, offset, size));
578 		}
579 	}
580 
581 	/* XXX probably should do something better than just assert() */
582 	assert(baridx == 0);
583 
584 	if (vs->vs_mtx)
585 		pthread_mutex_lock(vs->vs_mtx);
586 
587 	vc = vs->vs_vc;
588 	name = vc->vc_name;
589 	value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff;
590 
591 	if (size != 1 && size != 2 && size != 4)
592 		goto bad;
593 
594 	virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi));
595 
596 	if (offset >= virtio_config_size) {
597 		/*
598 		 * Subtract off the standard size (including MSI-X
599 		 * registers if enabled) and dispatch to underlying driver.
600 		 * If that fails, fall into general code.
601 		 */
602 		newoff = offset - virtio_config_size;
603 		max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
604 		if (newoff + size > max)
605 			goto bad;
606 		if (vc->vc_cfgread != NULL)
607 			error = (*vc->vc_cfgread)(DEV_SOFTC(vs), newoff, size, &value);
608 		else
609 			error = 0;
610 		if (!error)
611 			goto done;
612 	}
613 
614 bad:
615 	cr = vi_find_cr(offset);
616 	if (cr == NULL || cr->cr_size != size) {
617 		if (cr != NULL) {
618 			/* offset must be OK, so size must be bad */
619 			EPRINTLN(
620 			    "%s: read from %s: bad size %d",
621 			    name, cr->cr_name, size);
622 		} else {
623 			EPRINTLN(
624 			    "%s: read from bad offset/size %jd/%d",
625 			    name, (uintmax_t)offset, size);
626 		}
627 		goto done;
628 	}
629 
630 	switch (offset) {
631 	case VIRTIO_PCI_HOST_FEATURES:
632 		value = vc->vc_hv_caps;
633 		break;
634 	case VIRTIO_PCI_GUEST_FEATURES:
635 		value = vs->vs_negotiated_caps;
636 		break;
637 	case VIRTIO_PCI_QUEUE_PFN:
638 		if (vs->vs_curq < vc->vc_nvq)
639 			value = vs->vs_queues[vs->vs_curq].vq_pfn;
640 		break;
641 	case VIRTIO_PCI_QUEUE_NUM:
642 		value = vs->vs_curq < vc->vc_nvq ?
643 		    vs->vs_queues[vs->vs_curq].vq_qsize : 0;
644 		break;
645 	case VIRTIO_PCI_QUEUE_SEL:
646 		value = vs->vs_curq;
647 		break;
648 	case VIRTIO_PCI_QUEUE_NOTIFY:
649 		value = 0;	/* XXX */
650 		break;
651 	case VIRTIO_PCI_STATUS:
652 		value = vs->vs_status;
653 		break;
654 	case VIRTIO_PCI_ISR:
655 		value = vs->vs_isr;
656 		vs->vs_isr = 0;		/* a read clears this flag */
657 		if (value)
658 			pci_lintr_deassert(pi);
659 		break;
660 	case VIRTIO_MSI_CONFIG_VECTOR:
661 		value = vs->vs_msix_cfg_idx;
662 		break;
663 	case VIRTIO_MSI_QUEUE_VECTOR:
664 		value = vs->vs_curq < vc->vc_nvq ?
665 		    vs->vs_queues[vs->vs_curq].vq_msix_idx :
666 		    VIRTIO_MSI_NO_VECTOR;
667 		break;
668 	}
669 done:
670 	if (vs->vs_mtx)
671 		pthread_mutex_unlock(vs->vs_mtx);
672 	return (value);
673 }
674 
675 /*
676  * Handle pci config space writes.
677  * If it's to the MSI-X info, do that.
678  * If it's part of the virtio standard stuff, do that.
679  * Otherwise dispatch to the actual driver.
680  */
681 void
682 vi_pci_write(struct vmctx *ctx __unused, int vcpu __unused,
683     struct pci_devinst *pi, int baridx, uint64_t offset, int size,
684     uint64_t value)
685 {
686 	struct virtio_softc *vs = pi->pi_arg;
687 	struct vqueue_info *vq;
688 	struct virtio_consts *vc;
689 	struct config_reg *cr;
690 	uint64_t virtio_config_size, max;
691 	const char *name;
692 	uint32_t newoff;
693 	int error;
694 
695 	if (vs->vs_flags & VIRTIO_USE_MSIX) {
696 		if (baridx == pci_msix_table_bar(pi) ||
697 		    baridx == pci_msix_pba_bar(pi)) {
698 			pci_emul_msix_twrite(pi, offset, size, value);
699 			return;
700 		}
701 	}
702 
703 	/* XXX probably should do something better than just assert() */
704 	assert(baridx == 0);
705 
706 	if (vs->vs_mtx)
707 		pthread_mutex_lock(vs->vs_mtx);
708 
709 	vc = vs->vs_vc;
710 	name = vc->vc_name;
711 
712 	if (size != 1 && size != 2 && size != 4)
713 		goto bad;
714 
715 	virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi));
716 
717 	if (offset >= virtio_config_size) {
718 		/*
719 		 * Subtract off the standard size (including MSI-X
720 		 * registers if enabled) and dispatch to underlying driver.
721 		 */
722 		newoff = offset - virtio_config_size;
723 		max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
724 		if (newoff + size > max)
725 			goto bad;
726 		if (vc->vc_cfgwrite != NULL)
727 			error = (*vc->vc_cfgwrite)(DEV_SOFTC(vs), newoff, size, value);
728 		else
729 			error = 0;
730 		if (!error)
731 			goto done;
732 	}
733 
734 bad:
735 	cr = vi_find_cr(offset);
736 	if (cr == NULL || cr->cr_size != size || cr->cr_ro) {
737 		if (cr != NULL) {
738 			/* offset must be OK, wrong size and/or reg is R/O */
739 			if (cr->cr_size != size)
740 				EPRINTLN(
741 				    "%s: write to %s: bad size %d",
742 				    name, cr->cr_name, size);
743 			if (cr->cr_ro)
744 				EPRINTLN(
745 				    "%s: write to read-only reg %s",
746 				    name, cr->cr_name);
747 		} else {
748 			EPRINTLN(
749 			    "%s: write to bad offset/size %jd/%d",
750 			    name, (uintmax_t)offset, size);
751 		}
752 		goto done;
753 	}
754 
755 	switch (offset) {
756 	case VIRTIO_PCI_GUEST_FEATURES:
757 		vs->vs_negotiated_caps = value & vc->vc_hv_caps;
758 		if (vc->vc_apply_features)
759 			(*vc->vc_apply_features)(DEV_SOFTC(vs),
760 			    vs->vs_negotiated_caps);
761 		break;
762 	case VIRTIO_PCI_QUEUE_PFN:
763 		if (vs->vs_curq >= vc->vc_nvq)
764 			goto bad_qindex;
765 		vi_vq_init(vs, value);
766 		break;
767 	case VIRTIO_PCI_QUEUE_SEL:
768 		/*
769 		 * Note that the guest is allowed to select an
770 		 * invalid queue; we just need to return a QNUM
771 		 * of 0 while the bad queue is selected.
772 		 */
773 		vs->vs_curq = value;
774 		break;
775 	case VIRTIO_PCI_QUEUE_NOTIFY:
776 		if (value >= vc->vc_nvq) {
777 			EPRINTLN("%s: queue %d notify out of range",
778 				name, (int)value);
779 			goto done;
780 		}
781 		vq = &vs->vs_queues[value];
782 		if (vq->vq_notify)
783 			(*vq->vq_notify)(DEV_SOFTC(vs), vq);
784 		else if (vc->vc_qnotify)
785 			(*vc->vc_qnotify)(DEV_SOFTC(vs), vq);
786 		else
787 			EPRINTLN(
788 			    "%s: qnotify queue %d: missing vq/vc notify",
789 				name, (int)value);
790 		break;
791 	case VIRTIO_PCI_STATUS:
792 		vs->vs_status = value;
793 		if (value == 0)
794 			(*vc->vc_reset)(DEV_SOFTC(vs));
795 		break;
796 	case VIRTIO_MSI_CONFIG_VECTOR:
797 		vs->vs_msix_cfg_idx = value;
798 		break;
799 	case VIRTIO_MSI_QUEUE_VECTOR:
800 		if (vs->vs_curq >= vc->vc_nvq)
801 			goto bad_qindex;
802 		vq = &vs->vs_queues[vs->vs_curq];
803 		vq->vq_msix_idx = value;
804 		break;
805 	}
806 	goto done;
807 
808 bad_qindex:
809 	EPRINTLN(
810 	    "%s: write config reg %s: curq %d >= max %d",
811 	    name, cr->cr_name, vs->vs_curq, vc->vc_nvq);
812 done:
813 	if (vs->vs_mtx)
814 		pthread_mutex_unlock(vs->vs_mtx);
815 }
816 
817 #ifdef BHYVE_SNAPSHOT
818 int
819 vi_pci_pause(struct vmctx *ctx __unused, struct pci_devinst *pi)
820 {
821 	struct virtio_softc *vs;
822 	struct virtio_consts *vc;
823 
824 	vs = pi->pi_arg;
825 	vc = vs->vs_vc;
826 
827 	vc = vs->vs_vc;
828 	assert(vc->vc_pause != NULL);
829 	(*vc->vc_pause)(DEV_SOFTC(vs));
830 
831 	return (0);
832 }
833 
834 int
835 vi_pci_resume(struct vmctx *ctx __unused, struct pci_devinst *pi)
836 {
837 	struct virtio_softc *vs;
838 	struct virtio_consts *vc;
839 
840 	vs = pi->pi_arg;
841 	vc = vs->vs_vc;
842 
843 	vc = vs->vs_vc;
844 	assert(vc->vc_resume != NULL);
845 	(*vc->vc_resume)(DEV_SOFTC(vs));
846 
847 	return (0);
848 }
849 
850 static int
851 vi_pci_snapshot_softc(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
852 {
853 	int ret;
854 
855 	SNAPSHOT_VAR_OR_LEAVE(vs->vs_flags, meta, ret, done);
856 	SNAPSHOT_VAR_OR_LEAVE(vs->vs_negotiated_caps, meta, ret, done);
857 	SNAPSHOT_VAR_OR_LEAVE(vs->vs_curq, meta, ret, done);
858 	SNAPSHOT_VAR_OR_LEAVE(vs->vs_status, meta, ret, done);
859 	SNAPSHOT_VAR_OR_LEAVE(vs->vs_isr, meta, ret, done);
860 	SNAPSHOT_VAR_OR_LEAVE(vs->vs_msix_cfg_idx, meta, ret, done);
861 
862 done:
863 	return (ret);
864 }
865 
866 static int
867 vi_pci_snapshot_consts(struct virtio_consts *vc, struct vm_snapshot_meta *meta)
868 {
869 	int ret;
870 
871 	SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_nvq, meta, ret, done);
872 	SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_cfgsize, meta, ret, done);
873 	SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_hv_caps, meta, ret, done);
874 
875 done:
876 	return (ret);
877 }
878 
879 static int
880 vi_pci_snapshot_queues(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
881 {
882 	int i;
883 	int ret;
884 	struct virtio_consts *vc;
885 	struct vqueue_info *vq;
886 	uint64_t addr_size;
887 
888 	vc = vs->vs_vc;
889 
890 	/* Save virtio queue info */
891 	for (i = 0; i < vc->vc_nvq; i++) {
892 		vq = &vs->vs_queues[i];
893 
894 		SNAPSHOT_VAR_CMP_OR_LEAVE(vq->vq_qsize, meta, ret, done);
895 		SNAPSHOT_VAR_CMP_OR_LEAVE(vq->vq_num, meta, ret, done);
896 
897 		SNAPSHOT_VAR_OR_LEAVE(vq->vq_flags, meta, ret, done);
898 		SNAPSHOT_VAR_OR_LEAVE(vq->vq_last_avail, meta, ret, done);
899 		SNAPSHOT_VAR_OR_LEAVE(vq->vq_next_used, meta, ret, done);
900 		SNAPSHOT_VAR_OR_LEAVE(vq->vq_save_used, meta, ret, done);
901 		SNAPSHOT_VAR_OR_LEAVE(vq->vq_msix_idx, meta, ret, done);
902 
903 		SNAPSHOT_VAR_OR_LEAVE(vq->vq_pfn, meta, ret, done);
904 
905 		if (!vq_ring_ready(vq))
906 			continue;
907 
908 		addr_size = vq->vq_qsize * sizeof(struct vring_desc);
909 		SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_desc, addr_size,
910 			false, meta, ret, done);
911 
912 		addr_size = (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
913 		SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_avail, addr_size,
914 			false, meta, ret, done);
915 
916 		addr_size  = (2 + 2 * vq->vq_qsize + 1) * sizeof(uint16_t);
917 		SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_used, addr_size,
918 			false, meta, ret, done);
919 
920 		SNAPSHOT_BUF_OR_LEAVE(vq->vq_desc,
921 			vring_size_aligned(vq->vq_qsize), meta, ret, done);
922 	}
923 
924 done:
925 	return (ret);
926 }
927 
928 int
929 vi_pci_snapshot(struct vm_snapshot_meta *meta)
930 {
931 	int ret;
932 	struct pci_devinst *pi;
933 	struct virtio_softc *vs;
934 	struct virtio_consts *vc;
935 
936 	pi = meta->dev_data;
937 	vs = pi->pi_arg;
938 	vc = vs->vs_vc;
939 
940 	/* Save virtio softc */
941 	ret = vi_pci_snapshot_softc(vs, meta);
942 	if (ret != 0)
943 		goto done;
944 
945 	/* Save virtio consts */
946 	ret = vi_pci_snapshot_consts(vc, meta);
947 	if (ret != 0)
948 		goto done;
949 
950 	/* Save virtio queue info */
951 	ret = vi_pci_snapshot_queues(vs, meta);
952 	if (ret != 0)
953 		goto done;
954 
955 	/* Save device softc, if needed */
956 	if (vc->vc_snapshot != NULL) {
957 		ret = (*vc->vc_snapshot)(DEV_SOFTC(vs), meta);
958 		if (ret != 0)
959 			goto done;
960 	}
961 
962 done:
963 	return (ret);
964 }
965 #endif
966