1 /*-
2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
8 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 *
10 * Portions of this software were developed by Andrew Turner
11 * under sponsorship from the FreeBSD Foundation.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /*
36 * VirtIO MMIO interface.
37 * This driver is heavily based on VirtIO PCI interface driver.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/rman.h>
47 #include <sys/endian.h>
48
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51
52 #include <dev/virtio/virtio.h>
53 #include <dev/virtio/virtqueue.h>
54 #include <dev/virtio/mmio/virtio_mmio.h>
55
56 #include "virtio_bus_if.h"
57 #include "virtio_if.h"
58
59 struct vtmmio_virtqueue {
60 struct virtqueue *vtv_vq;
61 int vtv_no_intr;
62 };
63
64 static int vtmmio_detach(device_t);
65 static int vtmmio_suspend(device_t);
66 static int vtmmio_resume(device_t);
67 static int vtmmio_shutdown(device_t);
68 static void vtmmio_driver_added(device_t, driver_t *);
69 static void vtmmio_child_detached(device_t, device_t);
70 static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *);
71 static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t);
72 static uint64_t vtmmio_negotiate_features(device_t, uint64_t);
73 static int vtmmio_finalize_features(device_t);
74 static bool vtmmio_with_feature(device_t, uint64_t);
75 static void vtmmio_set_virtqueue(struct vtmmio_softc *sc,
76 struct virtqueue *vq, uint32_t size);
77 static int vtmmio_alloc_virtqueues(device_t, int,
78 struct vq_alloc_info *);
79 static int vtmmio_setup_intr(device_t, enum intr_type);
80 static void vtmmio_stop(device_t);
81 static int vtmmio_reinit(device_t, uint64_t);
82 static void vtmmio_reinit_complete(device_t);
83 static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
84 static int vtmmio_config_generation(device_t);
85 static uint8_t vtmmio_get_status(device_t);
86 static void vtmmio_set_status(device_t, uint8_t);
87 static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int);
88 static uint64_t vtmmio_read_dev_config_8(struct vtmmio_softc *, bus_size_t);
89 static void vtmmio_write_dev_config(device_t, bus_size_t, const void *, int);
90 static void vtmmio_describe_features(struct vtmmio_softc *, const char *,
91 uint64_t);
92 static void vtmmio_probe_and_attach_child(struct vtmmio_softc *);
93 static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int);
94 static void vtmmio_free_interrupts(struct vtmmio_softc *);
95 static void vtmmio_free_virtqueues(struct vtmmio_softc *);
96 static void vtmmio_release_child_resources(struct vtmmio_softc *);
97 static void vtmmio_reset(struct vtmmio_softc *);
98 static void vtmmio_select_virtqueue(struct vtmmio_softc *, int);
99 static void vtmmio_vq_intr(void *);
100
101 /*
102 * I/O port read/write wrappers.
103 */
104 #define vtmmio_write_config_1(sc, o, v) \
105 bus_write_1((sc)->res[0], (o), (v))
106 #define vtmmio_write_config_2(sc, o, v) \
107 bus_write_2((sc)->res[0], (o), (v))
108 #define vtmmio_write_config_4(sc, o, v) \
109 bus_write_4((sc)->res[0], (o), (v))
110
111 #define vtmmio_read_config_1(sc, o) \
112 bus_read_1((sc)->res[0], (o))
113 #define vtmmio_read_config_2(sc, o) \
114 bus_read_2((sc)->res[0], (o))
115 #define vtmmio_read_config_4(sc, o) \
116 bus_read_4((sc)->res[0], (o))
117
118 static device_method_t vtmmio_methods[] = {
119 /* Device interface. */
120 DEVMETHOD(device_attach, vtmmio_attach),
121 DEVMETHOD(device_detach, vtmmio_detach),
122 DEVMETHOD(device_suspend, vtmmio_suspend),
123 DEVMETHOD(device_resume, vtmmio_resume),
124 DEVMETHOD(device_shutdown, vtmmio_shutdown),
125
126 /* Bus interface. */
127 DEVMETHOD(bus_driver_added, vtmmio_driver_added),
128 DEVMETHOD(bus_child_detached, vtmmio_child_detached),
129 DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo),
130 DEVMETHOD(bus_read_ivar, vtmmio_read_ivar),
131 DEVMETHOD(bus_write_ivar, vtmmio_write_ivar),
132
133 /* VirtIO bus interface. */
134 DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features),
135 DEVMETHOD(virtio_bus_finalize_features, vtmmio_finalize_features),
136 DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature),
137 DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
138 DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
139 DEVMETHOD(virtio_bus_stop, vtmmio_stop),
140 DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
141 DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
142 DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
143 DEVMETHOD(virtio_bus_config_generation, vtmmio_config_generation),
144 DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config),
145 DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config),
146
147 DEVMETHOD_END
148 };
149
150 DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods,
151 sizeof(struct vtmmio_softc));
152
153 MODULE_VERSION(virtio_mmio, 1);
154
155 int
vtmmio_probe(device_t dev)156 vtmmio_probe(device_t dev)
157 {
158 struct vtmmio_softc *sc;
159 int rid;
160 uint32_t magic, version;
161
162 sc = device_get_softc(dev);
163
164 rid = 0;
165 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
166 RF_ACTIVE);
167 if (sc->res[0] == NULL) {
168 device_printf(dev, "Cannot allocate memory window.\n");
169 return (ENXIO);
170 }
171
172 magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE);
173 if (magic != VIRTIO_MMIO_MAGIC_VIRT) {
174 device_printf(dev, "Bad magic value %#x\n", magic);
175 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
176 return (ENXIO);
177 }
178
179 version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
180 if (version < 1 || version > 2) {
181 device_printf(dev, "Unsupported version: %#x\n", version);
182 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
183 return (ENXIO);
184 }
185
186 if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) {
187 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
188 return (ENXIO);
189 }
190
191 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
192
193 device_set_desc(dev, "VirtIO MMIO adapter");
194 return (BUS_PROBE_DEFAULT);
195 }
196
197 static int
vtmmio_setup_intr(device_t dev,enum intr_type type)198 vtmmio_setup_intr(device_t dev, enum intr_type type)
199 {
200 struct vtmmio_softc *sc;
201 int rid;
202
203 sc = device_get_softc(dev);
204
205 rid = 0;
206 sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
207 RF_ACTIVE);
208 if (!sc->res[1]) {
209 device_printf(dev, "Can't allocate interrupt\n");
210 return (ENXIO);
211 }
212
213 if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE,
214 NULL, vtmmio_vq_intr, sc, &sc->ih)) {
215 device_printf(dev, "Can't setup the interrupt\n");
216 return (ENXIO);
217 }
218
219 return (0);
220 }
221
222 int
vtmmio_attach(device_t dev)223 vtmmio_attach(device_t dev)
224 {
225 struct vtmmio_softc *sc;
226 device_t child;
227 int rid;
228
229 sc = device_get_softc(dev);
230 sc->dev = dev;
231
232 rid = 0;
233 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
234 RF_ACTIVE);
235 if (sc->res[0] == NULL) {
236 device_printf(dev, "Cannot allocate memory window.\n");
237 return (ENXIO);
238 }
239
240 sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
241
242 vtmmio_reset(sc);
243
244 /* Tell the host we've noticed this device. */
245 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
246
247 if ((child = device_add_child(dev, NULL, DEVICE_UNIT_ANY)) == NULL) {
248 device_printf(dev, "Cannot create child device.\n");
249 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
250 vtmmio_detach(dev);
251 return (ENOMEM);
252 }
253
254 sc->vtmmio_child_dev = child;
255 vtmmio_probe_and_attach_child(sc);
256
257 return (0);
258 }
259
260 static int
vtmmio_detach(device_t dev)261 vtmmio_detach(device_t dev)
262 {
263 struct vtmmio_softc *sc;
264 int error;
265
266 sc = device_get_softc(dev);
267
268 error = bus_generic_detach(dev);
269 if (error)
270 return (error);
271
272 vtmmio_reset(sc);
273
274 if (sc->res[0] != NULL) {
275 bus_release_resource(dev, SYS_RES_MEMORY, 0,
276 sc->res[0]);
277 sc->res[0] = NULL;
278 }
279
280 return (0);
281 }
282
283 static int
vtmmio_suspend(device_t dev)284 vtmmio_suspend(device_t dev)
285 {
286
287 return (bus_generic_suspend(dev));
288 }
289
290 static int
vtmmio_resume(device_t dev)291 vtmmio_resume(device_t dev)
292 {
293
294 return (bus_generic_resume(dev));
295 }
296
297 static int
vtmmio_shutdown(device_t dev)298 vtmmio_shutdown(device_t dev)
299 {
300
301 (void) bus_generic_shutdown(dev);
302
303 /* Forcibly stop the host device. */
304 vtmmio_stop(dev);
305
306 return (0);
307 }
308
309 static void
vtmmio_driver_added(device_t dev,driver_t * driver)310 vtmmio_driver_added(device_t dev, driver_t *driver)
311 {
312 struct vtmmio_softc *sc;
313
314 sc = device_get_softc(dev);
315
316 vtmmio_probe_and_attach_child(sc);
317 }
318
319 static void
vtmmio_child_detached(device_t dev,device_t child)320 vtmmio_child_detached(device_t dev, device_t child)
321 {
322 struct vtmmio_softc *sc;
323
324 sc = device_get_softc(dev);
325
326 vtmmio_reset(sc);
327 vtmmio_release_child_resources(sc);
328 }
329
330 static int
vtmmio_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)331 vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
332 {
333 struct vtmmio_softc *sc;
334
335 sc = device_get_softc(dev);
336
337 if (sc->vtmmio_child_dev != child)
338 return (ENOENT);
339
340 switch (index) {
341 case VIRTIO_IVAR_DEVTYPE:
342 case VIRTIO_IVAR_SUBDEVICE:
343 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID);
344 break;
345 case VIRTIO_IVAR_VENDOR:
346 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
347 break;
348 case VIRTIO_IVAR_SUBVENDOR:
349 case VIRTIO_IVAR_DEVICE:
350 /*
351 * Dummy value for fields not present in this bus. Used by
352 * bus-agnostic virtio_child_pnpinfo.
353 */
354 *result = 0;
355 break;
356 case VIRTIO_IVAR_MODERN:
357 /*
358 * There are several modern (aka MMIO v2) spec compliance
359 * issues with this driver, but keep the status quo.
360 */
361 *result = sc->vtmmio_version > 1;
362 break;
363 default:
364 return (ENOENT);
365 }
366
367 return (0);
368 }
369
370 static int
vtmmio_write_ivar(device_t dev,device_t child,int index,uintptr_t value)371 vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
372 {
373 struct vtmmio_softc *sc;
374
375 sc = device_get_softc(dev);
376
377 if (sc->vtmmio_child_dev != child)
378 return (ENOENT);
379
380 switch (index) {
381 case VIRTIO_IVAR_FEATURE_DESC:
382 sc->vtmmio_child_feat_desc = (void *) value;
383 break;
384 default:
385 return (ENOENT);
386 }
387
388 return (0);
389 }
390
391 static uint64_t
vtmmio_negotiate_features(device_t dev,uint64_t child_features)392 vtmmio_negotiate_features(device_t dev, uint64_t child_features)
393 {
394 struct vtmmio_softc *sc;
395 uint64_t host_features, features;
396
397 sc = device_get_softc(dev);
398
399 if (sc->vtmmio_version > 1) {
400 child_features |= VIRTIO_F_VERSION_1;
401 }
402
403 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1);
404 host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
405 host_features <<= 32;
406
407 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0);
408 host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
409
410 vtmmio_describe_features(sc, "host", host_features);
411
412 /*
413 * Limit negotiated features to what the driver, virtqueue, and
414 * host all support.
415 */
416 features = host_features & child_features;
417 features = virtio_filter_transport_features(features);
418 sc->vtmmio_features = features;
419
420 vtmmio_describe_features(sc, "negotiated", features);
421
422 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1);
423 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32);
424
425 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0);
426 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features);
427
428 return (features);
429 }
430
431 static int
vtmmio_finalize_features(device_t dev)432 vtmmio_finalize_features(device_t dev)
433 {
434 struct vtmmio_softc *sc;
435 uint8_t status;
436
437 sc = device_get_softc(dev);
438
439 if (sc->vtmmio_version > 1) {
440 /*
441 * Must re-read the status after setting it to verify the
442 * negotiated features were accepted by the device.
443 */
444 vtmmio_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
445
446 status = vtmmio_get_status(dev);
447 if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
448 device_printf(dev, "desired features were not accepted\n");
449 return (ENOTSUP);
450 }
451 }
452
453 return (0);
454 }
455
456 static bool
vtmmio_with_feature(device_t dev,uint64_t feature)457 vtmmio_with_feature(device_t dev, uint64_t feature)
458 {
459 struct vtmmio_softc *sc;
460
461 sc = device_get_softc(dev);
462
463 return ((sc->vtmmio_features & feature) != 0);
464 }
465
466 static void
vtmmio_set_virtqueue(struct vtmmio_softc * sc,struct virtqueue * vq,uint32_t size)467 vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq,
468 uint32_t size)
469 {
470 vm_paddr_t paddr;
471
472 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size);
473
474 if (sc->vtmmio_version == 1) {
475 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN,
476 VIRTIO_MMIO_VRING_ALIGN);
477 paddr = virtqueue_paddr(vq);
478 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
479 paddr >> PAGE_SHIFT);
480 } else {
481 paddr = virtqueue_desc_paddr(vq);
482 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW,
483 paddr);
484 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH,
485 ((uint64_t)paddr) >> 32);
486
487 paddr = virtqueue_avail_paddr(vq);
488 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW,
489 paddr);
490 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
491 ((uint64_t)paddr) >> 32);
492
493 paddr = virtqueue_used_paddr(vq);
494 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW,
495 paddr);
496 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH,
497 ((uint64_t)paddr) >> 32);
498
499 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1);
500 }
501 }
502
503 static int
vtmmio_alloc_virtqueues(device_t dev,int nvqs,struct vq_alloc_info * vq_info)504 vtmmio_alloc_virtqueues(device_t dev, int nvqs,
505 struct vq_alloc_info *vq_info)
506 {
507 struct vtmmio_virtqueue *vqx;
508 struct vq_alloc_info *info;
509 struct vtmmio_softc *sc;
510 struct virtqueue *vq;
511 uint32_t size;
512 int idx, error;
513
514 sc = device_get_softc(dev);
515
516 if (sc->vtmmio_nvqs != 0)
517 return (EALREADY);
518 if (nvqs <= 0)
519 return (EINVAL);
520
521 sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue),
522 M_DEVBUF, M_NOWAIT | M_ZERO);
523 if (sc->vtmmio_vqs == NULL)
524 return (ENOMEM);
525
526 if (sc->vtmmio_version == 1) {
527 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
528 (1 << PAGE_SHIFT));
529 }
530
531 for (idx = 0; idx < nvqs; idx++) {
532 vqx = &sc->vtmmio_vqs[idx];
533 info = &vq_info[idx];
534
535 vtmmio_select_virtqueue(sc, idx);
536 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
537
538 error = virtqueue_alloc(dev, idx, size,
539 VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN,
540 ~(vm_paddr_t)0, info, &vq);
541 if (error) {
542 device_printf(dev,
543 "cannot allocate virtqueue %d: %d\n",
544 idx, error);
545 break;
546 }
547
548 vtmmio_set_virtqueue(sc, vq, size);
549
550 vqx->vtv_vq = *info->vqai_vq = vq;
551 vqx->vtv_no_intr = info->vqai_intr == NULL;
552
553 sc->vtmmio_nvqs++;
554 }
555
556 if (error)
557 vtmmio_free_virtqueues(sc);
558
559 return (error);
560 }
561
562 static void
vtmmio_stop(device_t dev)563 vtmmio_stop(device_t dev)
564 {
565
566 vtmmio_reset(device_get_softc(dev));
567 }
568
569 static int
vtmmio_reinit(device_t dev,uint64_t features)570 vtmmio_reinit(device_t dev, uint64_t features)
571 {
572 struct vtmmio_softc *sc;
573 int idx, error;
574
575 sc = device_get_softc(dev);
576
577 if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
578 vtmmio_stop(dev);
579
580 /*
581 * Quickly drive the status through ACK and DRIVER. The device
582 * does not become usable again until vtmmio_reinit_complete().
583 */
584 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
585 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
586
587 /*
588 * TODO: Check that features are not added as to what was
589 * originally negotiated.
590 */
591 vtmmio_negotiate_features(dev, features);
592 error = vtmmio_finalize_features(dev);
593 if (error) {
594 device_printf(dev, "cannot finalize features during reinit\n");
595 return (error);
596 }
597
598 if (sc->vtmmio_version == 1) {
599 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
600 (1 << PAGE_SHIFT));
601 }
602
603 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
604 error = vtmmio_reinit_virtqueue(sc, idx);
605 if (error)
606 return (error);
607 }
608
609 return (0);
610 }
611
612 static void
vtmmio_reinit_complete(device_t dev)613 vtmmio_reinit_complete(device_t dev)
614 {
615
616 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
617 }
618
619 static void
vtmmio_notify_virtqueue(device_t dev,uint16_t queue,bus_size_t offset)620 vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset)
621 {
622 struct vtmmio_softc *sc;
623
624 sc = device_get_softc(dev);
625 MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY);
626
627 vtmmio_write_config_4(sc, offset, queue);
628 }
629
630 static int
vtmmio_config_generation(device_t dev)631 vtmmio_config_generation(device_t dev)
632 {
633 struct vtmmio_softc *sc;
634 uint32_t gen;
635
636 sc = device_get_softc(dev);
637
638 if (sc->vtmmio_version > 1)
639 gen = vtmmio_read_config_4(sc, VIRTIO_MMIO_CONFIG_GENERATION);
640 else
641 gen = 0;
642
643 return (gen);
644 }
645
646 static uint8_t
vtmmio_get_status(device_t dev)647 vtmmio_get_status(device_t dev)
648 {
649 struct vtmmio_softc *sc;
650
651 sc = device_get_softc(dev);
652
653 return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS));
654 }
655
656 static void
vtmmio_set_status(device_t dev,uint8_t status)657 vtmmio_set_status(device_t dev, uint8_t status)
658 {
659 struct vtmmio_softc *sc;
660
661 sc = device_get_softc(dev);
662
663 if (status != VIRTIO_CONFIG_STATUS_RESET)
664 status |= vtmmio_get_status(dev);
665
666 vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status);
667 }
668
669 static void
vtmmio_read_dev_config(device_t dev,bus_size_t offset,void * dst,int length)670 vtmmio_read_dev_config(device_t dev, bus_size_t offset,
671 void *dst, int length)
672 {
673 struct vtmmio_softc *sc;
674 bus_size_t off;
675 uint8_t *d;
676 int size;
677
678 sc = device_get_softc(dev);
679 off = VIRTIO_MMIO_CONFIG + offset;
680
681 /*
682 * The non-legacy MMIO specification adds the following restriction:
683 *
684 * 4.2.2.2: For the device-specific configuration space, the driver
685 * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide
686 * and aligned accesses for 16 bit wide fields and 32 bit wide and
687 * aligned accesses for 32 and 64 bit wide fields.
688 *
689 * The endianness also varies between non-legacy and legacy:
690 *
691 * 2.4: Note: The device configuration space uses the little-endian
692 * format for multi-byte fields.
693 *
694 * 2.4.3: Note that for legacy interfaces, device configuration space
695 * is generally the guest’s native endian, rather than PCI’s
696 * little-endian. The correct endian-ness is documented for each
697 * device.
698 */
699 if (sc->vtmmio_version > 1) {
700 switch (length) {
701 case 1:
702 *(uint8_t *)dst = vtmmio_read_config_1(sc, off);
703 break;
704 case 2:
705 *(uint16_t *)dst =
706 le16toh(vtmmio_read_config_2(sc, off));
707 break;
708 case 4:
709 *(uint32_t *)dst =
710 le32toh(vtmmio_read_config_4(sc, off));
711 break;
712 case 8:
713 *(uint64_t *)dst = vtmmio_read_dev_config_8(sc, off);
714 break;
715 default:
716 panic("%s: invalid length %d\n", __func__, length);
717 }
718
719 return;
720 }
721
722 for (d = dst; length > 0; d += size, off += size, length -= size) {
723 #ifdef ALLOW_WORD_ALIGNED_ACCESS
724 if (length >= 4) {
725 size = 4;
726 *(uint32_t *)d = vtmmio_read_config_4(sc, off);
727 } else if (length >= 2) {
728 size = 2;
729 *(uint16_t *)d = vtmmio_read_config_2(sc, off);
730 } else
731 #endif
732 {
733 size = 1;
734 *d = vtmmio_read_config_1(sc, off);
735 }
736 }
737 }
738
739 static uint64_t
vtmmio_read_dev_config_8(struct vtmmio_softc * sc,bus_size_t off)740 vtmmio_read_dev_config_8(struct vtmmio_softc *sc, bus_size_t off)
741 {
742 device_t dev;
743 int gen;
744 uint32_t val0, val1;
745
746 dev = sc->dev;
747
748 do {
749 gen = vtmmio_config_generation(dev);
750 val0 = le32toh(vtmmio_read_config_4(sc, off));
751 val1 = le32toh(vtmmio_read_config_4(sc, off + 4));
752 } while (gen != vtmmio_config_generation(dev));
753
754 return (((uint64_t) val1 << 32) | val0);
755 }
756
757 static void
vtmmio_write_dev_config(device_t dev,bus_size_t offset,const void * src,int length)758 vtmmio_write_dev_config(device_t dev, bus_size_t offset,
759 const void *src, int length)
760 {
761 struct vtmmio_softc *sc;
762 bus_size_t off;
763 const uint8_t *s;
764 int size;
765
766 sc = device_get_softc(dev);
767 off = VIRTIO_MMIO_CONFIG + offset;
768
769 /*
770 * The non-legacy MMIO specification adds size and alignment
771 * restrctions. It also changes the endianness from native-endian to
772 * little-endian. See vtmmio_read_dev_config.
773 */
774 if (sc->vtmmio_version > 1) {
775 switch (length) {
776 case 1:
777 vtmmio_write_config_1(sc, off, *(const uint8_t *)src);
778 break;
779 case 2:
780 vtmmio_write_config_2(sc, off,
781 htole16(*(const uint16_t *)src));
782 break;
783 case 4:
784 vtmmio_write_config_4(sc, off,
785 htole32(*(const uint32_t *)src));
786 break;
787 case 8:
788 vtmmio_write_config_4(sc, off,
789 htole32(*(const uint64_t *)src));
790 vtmmio_write_config_4(sc, off + 4,
791 htole32((*(const uint64_t *)src) >> 32));
792 break;
793 default:
794 panic("%s: invalid length %d\n", __func__, length);
795 }
796
797 return;
798 }
799
800 for (s = src; length > 0; s += size, off += size, length -= size) {
801 #ifdef ALLOW_WORD_ALIGNED_ACCESS
802 if (length >= 4) {
803 size = 4;
804 vtmmio_write_config_4(sc, off, *(uint32_t *)s);
805 } else if (length >= 2) {
806 size = 2;
807 vtmmio_write_config_2(sc, off, *(uint16_t *)s);
808 } else
809 #endif
810 {
811 size = 1;
812 vtmmio_write_config_1(sc, off, *s);
813 }
814 }
815 }
816
817 static void
vtmmio_describe_features(struct vtmmio_softc * sc,const char * msg,uint64_t features)818 vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg,
819 uint64_t features)
820 {
821 device_t dev, child;
822
823 dev = sc->dev;
824 child = sc->vtmmio_child_dev;
825
826 if (device_is_attached(child) || bootverbose == 0)
827 return;
828
829 virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc);
830 }
831
832 static void
vtmmio_probe_and_attach_child(struct vtmmio_softc * sc)833 vtmmio_probe_and_attach_child(struct vtmmio_softc *sc)
834 {
835 device_t dev, child;
836
837 dev = sc->dev;
838 child = sc->vtmmio_child_dev;
839
840 if (child == NULL)
841 return;
842
843 if (device_get_state(child) != DS_NOTPRESENT) {
844 return;
845 }
846
847 if (device_probe(child) != 0) {
848 return;
849 }
850
851 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
852 if (device_attach(child) != 0) {
853 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
854 vtmmio_reset(sc);
855 vtmmio_release_child_resources(sc);
856 /* Reset status for future attempt. */
857 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
858 } else {
859 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
860 VIRTIO_ATTACH_COMPLETED(child);
861 }
862 }
863
864 static int
vtmmio_reinit_virtqueue(struct vtmmio_softc * sc,int idx)865 vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx)
866 {
867 struct vtmmio_virtqueue *vqx;
868 struct virtqueue *vq;
869 int error;
870 uint16_t size;
871
872 vqx = &sc->vtmmio_vqs[idx];
873 vq = vqx->vtv_vq;
874
875 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
876
877 vtmmio_select_virtqueue(sc, idx);
878 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
879
880 error = virtqueue_reinit(vq, size);
881 if (error)
882 return (error);
883
884 vtmmio_set_virtqueue(sc, vq, size);
885
886 return (0);
887 }
888
889 static void
vtmmio_free_interrupts(struct vtmmio_softc * sc)890 vtmmio_free_interrupts(struct vtmmio_softc *sc)
891 {
892
893 if (sc->ih != NULL)
894 bus_teardown_intr(sc->dev, sc->res[1], sc->ih);
895
896 if (sc->res[1] != NULL)
897 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]);
898 }
899
900 static void
vtmmio_free_virtqueues(struct vtmmio_softc * sc)901 vtmmio_free_virtqueues(struct vtmmio_softc *sc)
902 {
903 struct vtmmio_virtqueue *vqx;
904 int idx;
905
906 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
907 vqx = &sc->vtmmio_vqs[idx];
908
909 vtmmio_select_virtqueue(sc, idx);
910 if (sc->vtmmio_version > 1) {
911 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0);
912 vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_READY);
913 } else
914 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0);
915
916 virtqueue_free(vqx->vtv_vq);
917 vqx->vtv_vq = NULL;
918 }
919
920 free(sc->vtmmio_vqs, M_DEVBUF);
921 sc->vtmmio_vqs = NULL;
922 sc->vtmmio_nvqs = 0;
923 }
924
925 static void
vtmmio_release_child_resources(struct vtmmio_softc * sc)926 vtmmio_release_child_resources(struct vtmmio_softc *sc)
927 {
928
929 vtmmio_free_interrupts(sc);
930 vtmmio_free_virtqueues(sc);
931 }
932
933 static void
vtmmio_reset(struct vtmmio_softc * sc)934 vtmmio_reset(struct vtmmio_softc *sc)
935 {
936
937 /*
938 * Setting the status to RESET sets the host device to
939 * the original, uninitialized state.
940 */
941 vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET);
942 }
943
944 static void
vtmmio_select_virtqueue(struct vtmmio_softc * sc,int idx)945 vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx)
946 {
947
948 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
949 }
950
951 static void
vtmmio_vq_intr(void * arg)952 vtmmio_vq_intr(void *arg)
953 {
954 struct vtmmio_virtqueue *vqx;
955 struct vtmmio_softc *sc;
956 struct virtqueue *vq;
957 uint32_t status;
958 int idx;
959
960 sc = arg;
961
962 status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS);
963 vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status);
964
965 /* The config changed */
966 if (status & VIRTIO_MMIO_INT_CONFIG)
967 if (sc->vtmmio_child_dev != NULL)
968 VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev);
969
970 /* Notify all virtqueues. */
971 if (status & VIRTIO_MMIO_INT_VRING) {
972 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
973 vqx = &sc->vtmmio_vqs[idx];
974 if (vqx->vtv_no_intr == 0) {
975 vq = vqx->vtv_vq;
976 virtqueue_intr(vq);
977 }
978 }
979 }
980 }
981