1 /*-
2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
8 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 *
10 * Portions of this software were developed by Andrew Turner
11 * under sponsorship from the FreeBSD Foundation.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /*
36 * VirtIO MMIO interface.
37 * This driver is heavily based on VirtIO PCI interface driver.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/rman.h>
47 #include <sys/endian.h>
48
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51
52 #include <dev/virtio/virtio.h>
53 #include <dev/virtio/virtqueue.h>
54 #include <dev/virtio/mmio/virtio_mmio.h>
55
56 #include "virtio_mmio_if.h"
57 #include "virtio_bus_if.h"
58 #include "virtio_if.h"
59
60 struct vtmmio_virtqueue {
61 struct virtqueue *vtv_vq;
62 int vtv_no_intr;
63 };
64
65 static int vtmmio_detach(device_t);
66 static int vtmmio_suspend(device_t);
67 static int vtmmio_resume(device_t);
68 static int vtmmio_shutdown(device_t);
69 static void vtmmio_driver_added(device_t, driver_t *);
70 static void vtmmio_child_detached(device_t, device_t);
71 static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *);
72 static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t);
73 static uint64_t vtmmio_negotiate_features(device_t, uint64_t);
74 static int vtmmio_finalize_features(device_t);
75 static bool vtmmio_with_feature(device_t, uint64_t);
76 static void vtmmio_set_virtqueue(struct vtmmio_softc *sc,
77 struct virtqueue *vq, uint32_t size);
78 static int vtmmio_alloc_virtqueues(device_t, int,
79 struct vq_alloc_info *);
80 static int vtmmio_setup_intr(device_t, enum intr_type);
81 static void vtmmio_stop(device_t);
82 static void vtmmio_poll(device_t);
83 static int vtmmio_reinit(device_t, uint64_t);
84 static void vtmmio_reinit_complete(device_t);
85 static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
86 static int vtmmio_config_generation(device_t);
87 static uint8_t vtmmio_get_status(device_t);
88 static void vtmmio_set_status(device_t, uint8_t);
89 static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int);
90 static uint64_t vtmmio_read_dev_config_8(struct vtmmio_softc *, bus_size_t);
91 static void vtmmio_write_dev_config(device_t, bus_size_t, const void *, int);
92 static void vtmmio_describe_features(struct vtmmio_softc *, const char *,
93 uint64_t);
94 static void vtmmio_probe_and_attach_child(struct vtmmio_softc *);
95 static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int);
96 static void vtmmio_free_interrupts(struct vtmmio_softc *);
97 static void vtmmio_free_virtqueues(struct vtmmio_softc *);
98 static void vtmmio_release_child_resources(struct vtmmio_softc *);
99 static void vtmmio_reset(struct vtmmio_softc *);
100 static void vtmmio_select_virtqueue(struct vtmmio_softc *, int);
101 static void vtmmio_vq_intr(void *);
102
103 /*
104 * I/O port read/write wrappers.
105 */
106 #define vtmmio_write_config_1(sc, o, v) \
107 do { \
108 if (sc->platform != NULL) \
109 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
110 bus_write_1((sc)->res[0], (o), (v)); \
111 if (sc->platform != NULL) \
112 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
113 } while (0)
114 #define vtmmio_write_config_2(sc, o, v) \
115 do { \
116 if (sc->platform != NULL) \
117 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
118 bus_write_2((sc)->res[0], (o), (v)); \
119 if (sc->platform != NULL) \
120 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
121 } while (0)
122 #define vtmmio_write_config_4(sc, o, v) \
123 do { \
124 if (sc->platform != NULL) \
125 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
126 bus_write_4((sc)->res[0], (o), (v)); \
127 if (sc->platform != NULL) \
128 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
129 } while (0)
130
131 #define vtmmio_read_config_1(sc, o) \
132 bus_read_1((sc)->res[0], (o))
133 #define vtmmio_read_config_2(sc, o) \
134 bus_read_2((sc)->res[0], (o))
135 #define vtmmio_read_config_4(sc, o) \
136 bus_read_4((sc)->res[0], (o))
137
138 static device_method_t vtmmio_methods[] = {
139 /* Device interface. */
140 DEVMETHOD(device_attach, vtmmio_attach),
141 DEVMETHOD(device_detach, vtmmio_detach),
142 DEVMETHOD(device_suspend, vtmmio_suspend),
143 DEVMETHOD(device_resume, vtmmio_resume),
144 DEVMETHOD(device_shutdown, vtmmio_shutdown),
145
146 /* Bus interface. */
147 DEVMETHOD(bus_driver_added, vtmmio_driver_added),
148 DEVMETHOD(bus_child_detached, vtmmio_child_detached),
149 DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo),
150 DEVMETHOD(bus_read_ivar, vtmmio_read_ivar),
151 DEVMETHOD(bus_write_ivar, vtmmio_write_ivar),
152
153 /* VirtIO bus interface. */
154 DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features),
155 DEVMETHOD(virtio_bus_finalize_features, vtmmio_finalize_features),
156 DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature),
157 DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
158 DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
159 DEVMETHOD(virtio_bus_stop, vtmmio_stop),
160 DEVMETHOD(virtio_bus_poll, vtmmio_poll),
161 DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
162 DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
163 DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
164 DEVMETHOD(virtio_bus_config_generation, vtmmio_config_generation),
165 DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config),
166 DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config),
167
168 DEVMETHOD_END
169 };
170
171 DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods,
172 sizeof(struct vtmmio_softc));
173
174 MODULE_VERSION(virtio_mmio, 1);
175
176 int
vtmmio_probe(device_t dev)177 vtmmio_probe(device_t dev)
178 {
179 struct vtmmio_softc *sc;
180 int rid;
181 uint32_t magic, version;
182
183 sc = device_get_softc(dev);
184
185 rid = 0;
186 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
187 RF_ACTIVE);
188 if (sc->res[0] == NULL) {
189 device_printf(dev, "Cannot allocate memory window.\n");
190 return (ENXIO);
191 }
192
193 magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE);
194 if (magic != VIRTIO_MMIO_MAGIC_VIRT) {
195 device_printf(dev, "Bad magic value %#x\n", magic);
196 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
197 return (ENXIO);
198 }
199
200 version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
201 if (version < 1 || version > 2) {
202 device_printf(dev, "Unsupported version: %#x\n", version);
203 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
204 return (ENXIO);
205 }
206
207 if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) {
208 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
209 return (ENXIO);
210 }
211
212 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
213
214 device_set_desc(dev, "VirtIO MMIO adapter");
215 return (BUS_PROBE_DEFAULT);
216 }
217
218 static int
vtmmio_setup_intr(device_t dev,enum intr_type type)219 vtmmio_setup_intr(device_t dev, enum intr_type type)
220 {
221 struct vtmmio_softc *sc;
222 int rid;
223 int err;
224
225 sc = device_get_softc(dev);
226
227 if (sc->platform != NULL) {
228 err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
229 vtmmio_vq_intr, sc);
230 if (err == 0) {
231 /* Okay we have backend-specific interrupts */
232 return (0);
233 }
234 }
235
236 rid = 0;
237 sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
238 RF_ACTIVE);
239 if (!sc->res[1]) {
240 device_printf(dev, "Can't allocate interrupt\n");
241 return (ENXIO);
242 }
243
244 if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE,
245 NULL, vtmmio_vq_intr, sc, &sc->ih)) {
246 device_printf(dev, "Can't setup the interrupt\n");
247 return (ENXIO);
248 }
249
250 return (0);
251 }
252
253 int
vtmmio_attach(device_t dev)254 vtmmio_attach(device_t dev)
255 {
256 struct vtmmio_softc *sc;
257 device_t child;
258 int rid;
259
260 sc = device_get_softc(dev);
261 sc->dev = dev;
262
263 rid = 0;
264 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
265 RF_ACTIVE);
266 if (sc->res[0] == NULL) {
267 device_printf(dev, "Cannot allocate memory window.\n");
268 return (ENXIO);
269 }
270
271 sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
272
273 vtmmio_reset(sc);
274
275 /* Tell the host we've noticed this device. */
276 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
277
278 if ((child = device_add_child(dev, NULL, -1)) == NULL) {
279 device_printf(dev, "Cannot create child device.\n");
280 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
281 vtmmio_detach(dev);
282 return (ENOMEM);
283 }
284
285 sc->vtmmio_child_dev = child;
286 vtmmio_probe_and_attach_child(sc);
287
288 return (0);
289 }
290
291 static int
vtmmio_detach(device_t dev)292 vtmmio_detach(device_t dev)
293 {
294 struct vtmmio_softc *sc;
295 int error;
296
297 sc = device_get_softc(dev);
298
299 error = bus_generic_detach(dev);
300 if (error)
301 return (error);
302
303 vtmmio_reset(sc);
304
305 if (sc->res[0] != NULL) {
306 bus_release_resource(dev, SYS_RES_MEMORY, 0,
307 sc->res[0]);
308 sc->res[0] = NULL;
309 }
310
311 return (0);
312 }
313
314 static int
vtmmio_suspend(device_t dev)315 vtmmio_suspend(device_t dev)
316 {
317
318 return (bus_generic_suspend(dev));
319 }
320
321 static int
vtmmio_resume(device_t dev)322 vtmmio_resume(device_t dev)
323 {
324
325 return (bus_generic_resume(dev));
326 }
327
328 static int
vtmmio_shutdown(device_t dev)329 vtmmio_shutdown(device_t dev)
330 {
331
332 (void) bus_generic_shutdown(dev);
333
334 /* Forcibly stop the host device. */
335 vtmmio_stop(dev);
336
337 return (0);
338 }
339
340 static void
vtmmio_driver_added(device_t dev,driver_t * driver)341 vtmmio_driver_added(device_t dev, driver_t *driver)
342 {
343 struct vtmmio_softc *sc;
344
345 sc = device_get_softc(dev);
346
347 vtmmio_probe_and_attach_child(sc);
348 }
349
350 static void
vtmmio_child_detached(device_t dev,device_t child)351 vtmmio_child_detached(device_t dev, device_t child)
352 {
353 struct vtmmio_softc *sc;
354
355 sc = device_get_softc(dev);
356
357 vtmmio_reset(sc);
358 vtmmio_release_child_resources(sc);
359 }
360
361 static int
vtmmio_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)362 vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
363 {
364 struct vtmmio_softc *sc;
365
366 sc = device_get_softc(dev);
367
368 if (sc->vtmmio_child_dev != child)
369 return (ENOENT);
370
371 switch (index) {
372 case VIRTIO_IVAR_DEVTYPE:
373 case VIRTIO_IVAR_SUBDEVICE:
374 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID);
375 break;
376 case VIRTIO_IVAR_VENDOR:
377 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
378 break;
379 case VIRTIO_IVAR_SUBVENDOR:
380 case VIRTIO_IVAR_DEVICE:
381 /*
382 * Dummy value for fields not present in this bus. Used by
383 * bus-agnostic virtio_child_pnpinfo.
384 */
385 *result = 0;
386 break;
387 case VIRTIO_IVAR_MODERN:
388 /*
389 * There are several modern (aka MMIO v2) spec compliance
390 * issues with this driver, but keep the status quo.
391 */
392 *result = sc->vtmmio_version > 1;
393 break;
394 default:
395 return (ENOENT);
396 }
397
398 return (0);
399 }
400
401 static int
vtmmio_write_ivar(device_t dev,device_t child,int index,uintptr_t value)402 vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
403 {
404 struct vtmmio_softc *sc;
405
406 sc = device_get_softc(dev);
407
408 if (sc->vtmmio_child_dev != child)
409 return (ENOENT);
410
411 switch (index) {
412 case VIRTIO_IVAR_FEATURE_DESC:
413 sc->vtmmio_child_feat_desc = (void *) value;
414 break;
415 default:
416 return (ENOENT);
417 }
418
419 return (0);
420 }
421
422 static uint64_t
vtmmio_negotiate_features(device_t dev,uint64_t child_features)423 vtmmio_negotiate_features(device_t dev, uint64_t child_features)
424 {
425 struct vtmmio_softc *sc;
426 uint64_t host_features, features;
427
428 sc = device_get_softc(dev);
429
430 if (sc->vtmmio_version > 1) {
431 child_features |= VIRTIO_F_VERSION_1;
432 }
433
434 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1);
435 host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
436 host_features <<= 32;
437
438 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0);
439 host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
440
441 vtmmio_describe_features(sc, "host", host_features);
442
443 /*
444 * Limit negotiated features to what the driver, virtqueue, and
445 * host all support.
446 */
447 features = host_features & child_features;
448 features = virtio_filter_transport_features(features);
449 sc->vtmmio_features = features;
450
451 vtmmio_describe_features(sc, "negotiated", features);
452
453 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1);
454 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32);
455
456 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0);
457 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features);
458
459 return (features);
460 }
461
462 static int
vtmmio_finalize_features(device_t dev)463 vtmmio_finalize_features(device_t dev)
464 {
465 struct vtmmio_softc *sc;
466 uint8_t status;
467
468 sc = device_get_softc(dev);
469
470 if (sc->vtmmio_version > 1) {
471 /*
472 * Must re-read the status after setting it to verify the
473 * negotiated features were accepted by the device.
474 */
475 vtmmio_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
476
477 status = vtmmio_get_status(dev);
478 if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
479 device_printf(dev, "desired features were not accepted\n");
480 return (ENOTSUP);
481 }
482 }
483
484 return (0);
485 }
486
487 static bool
vtmmio_with_feature(device_t dev,uint64_t feature)488 vtmmio_with_feature(device_t dev, uint64_t feature)
489 {
490 struct vtmmio_softc *sc;
491
492 sc = device_get_softc(dev);
493
494 return ((sc->vtmmio_features & feature) != 0);
495 }
496
497 static void
vtmmio_set_virtqueue(struct vtmmio_softc * sc,struct virtqueue * vq,uint32_t size)498 vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq,
499 uint32_t size)
500 {
501 vm_paddr_t paddr;
502
503 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size);
504
505 if (sc->vtmmio_version == 1) {
506 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN,
507 VIRTIO_MMIO_VRING_ALIGN);
508 paddr = virtqueue_paddr(vq);
509 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
510 paddr >> PAGE_SHIFT);
511 } else {
512 paddr = virtqueue_desc_paddr(vq);
513 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW,
514 paddr);
515 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH,
516 ((uint64_t)paddr) >> 32);
517
518 paddr = virtqueue_avail_paddr(vq);
519 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW,
520 paddr);
521 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
522 ((uint64_t)paddr) >> 32);
523
524 paddr = virtqueue_used_paddr(vq);
525 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW,
526 paddr);
527 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH,
528 ((uint64_t)paddr) >> 32);
529
530 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1);
531 }
532 }
533
534 static int
vtmmio_alloc_virtqueues(device_t dev,int nvqs,struct vq_alloc_info * vq_info)535 vtmmio_alloc_virtqueues(device_t dev, int nvqs,
536 struct vq_alloc_info *vq_info)
537 {
538 struct vtmmio_virtqueue *vqx;
539 struct vq_alloc_info *info;
540 struct vtmmio_softc *sc;
541 struct virtqueue *vq;
542 uint32_t size;
543 int idx, error;
544
545 sc = device_get_softc(dev);
546
547 if (sc->vtmmio_nvqs != 0)
548 return (EALREADY);
549 if (nvqs <= 0)
550 return (EINVAL);
551
552 sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue),
553 M_DEVBUF, M_NOWAIT | M_ZERO);
554 if (sc->vtmmio_vqs == NULL)
555 return (ENOMEM);
556
557 if (sc->vtmmio_version == 1) {
558 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
559 (1 << PAGE_SHIFT));
560 }
561
562 for (idx = 0; idx < nvqs; idx++) {
563 vqx = &sc->vtmmio_vqs[idx];
564 info = &vq_info[idx];
565
566 vtmmio_select_virtqueue(sc, idx);
567 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
568
569 error = virtqueue_alloc(dev, idx, size,
570 VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN,
571 ~(vm_paddr_t)0, info, &vq);
572 if (error) {
573 device_printf(dev,
574 "cannot allocate virtqueue %d: %d\n",
575 idx, error);
576 break;
577 }
578
579 vtmmio_set_virtqueue(sc, vq, size);
580
581 vqx->vtv_vq = *info->vqai_vq = vq;
582 vqx->vtv_no_intr = info->vqai_intr == NULL;
583
584 sc->vtmmio_nvqs++;
585 }
586
587 if (error)
588 vtmmio_free_virtqueues(sc);
589
590 return (error);
591 }
592
593 static void
vtmmio_stop(device_t dev)594 vtmmio_stop(device_t dev)
595 {
596
597 vtmmio_reset(device_get_softc(dev));
598 }
599
600 static void
vtmmio_poll(device_t dev)601 vtmmio_poll(device_t dev)
602 {
603 struct vtmmio_softc *sc;
604
605 sc = device_get_softc(dev);
606
607 if (sc->platform != NULL)
608 VIRTIO_MMIO_POLL(sc->platform);
609 }
610
611 static int
vtmmio_reinit(device_t dev,uint64_t features)612 vtmmio_reinit(device_t dev, uint64_t features)
613 {
614 struct vtmmio_softc *sc;
615 int idx, error;
616
617 sc = device_get_softc(dev);
618
619 if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
620 vtmmio_stop(dev);
621
622 /*
623 * Quickly drive the status through ACK and DRIVER. The device
624 * does not become usable again until vtmmio_reinit_complete().
625 */
626 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
627 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
628
629 /*
630 * TODO: Check that features are not added as to what was
631 * originally negotiated.
632 */
633 vtmmio_negotiate_features(dev, features);
634 error = vtmmio_finalize_features(dev);
635 if (error) {
636 device_printf(dev, "cannot finalize features during reinit\n");
637 return (error);
638 }
639
640 if (sc->vtmmio_version == 1) {
641 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
642 (1 << PAGE_SHIFT));
643 }
644
645 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
646 error = vtmmio_reinit_virtqueue(sc, idx);
647 if (error)
648 return (error);
649 }
650
651 return (0);
652 }
653
654 static void
vtmmio_reinit_complete(device_t dev)655 vtmmio_reinit_complete(device_t dev)
656 {
657
658 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
659 }
660
661 static void
vtmmio_notify_virtqueue(device_t dev,uint16_t queue,bus_size_t offset)662 vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset)
663 {
664 struct vtmmio_softc *sc;
665
666 sc = device_get_softc(dev);
667 MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY);
668
669 vtmmio_write_config_4(sc, offset, queue);
670 }
671
672 static int
vtmmio_config_generation(device_t dev)673 vtmmio_config_generation(device_t dev)
674 {
675 struct vtmmio_softc *sc;
676 uint32_t gen;
677
678 sc = device_get_softc(dev);
679
680 if (sc->vtmmio_version > 1)
681 gen = vtmmio_read_config_4(sc, VIRTIO_MMIO_CONFIG_GENERATION);
682 else
683 gen = 0;
684
685 return (gen);
686 }
687
688 static uint8_t
vtmmio_get_status(device_t dev)689 vtmmio_get_status(device_t dev)
690 {
691 struct vtmmio_softc *sc;
692
693 sc = device_get_softc(dev);
694
695 return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS));
696 }
697
698 static void
vtmmio_set_status(device_t dev,uint8_t status)699 vtmmio_set_status(device_t dev, uint8_t status)
700 {
701 struct vtmmio_softc *sc;
702
703 sc = device_get_softc(dev);
704
705 if (status != VIRTIO_CONFIG_STATUS_RESET)
706 status |= vtmmio_get_status(dev);
707
708 vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status);
709 }
710
711 static void
vtmmio_read_dev_config(device_t dev,bus_size_t offset,void * dst,int length)712 vtmmio_read_dev_config(device_t dev, bus_size_t offset,
713 void *dst, int length)
714 {
715 struct vtmmio_softc *sc;
716 bus_size_t off;
717 uint8_t *d;
718 int size;
719
720 sc = device_get_softc(dev);
721 off = VIRTIO_MMIO_CONFIG + offset;
722
723 /*
724 * The non-legacy MMIO specification adds the following restriction:
725 *
726 * 4.2.2.2: For the device-specific configuration space, the driver
727 * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide
728 * and aligned accesses for 16 bit wide fields and 32 bit wide and
729 * aligned accesses for 32 and 64 bit wide fields.
730 *
731 * The endianness also varies between non-legacy and legacy:
732 *
733 * 2.4: Note: The device configuration space uses the little-endian
734 * format for multi-byte fields.
735 *
736 * 2.4.3: Note that for legacy interfaces, device configuration space
737 * is generally the guest’s native endian, rather than PCI’s
738 * little-endian. The correct endian-ness is documented for each
739 * device.
740 */
741 if (sc->vtmmio_version > 1) {
742 switch (length) {
743 case 1:
744 *(uint8_t *)dst = vtmmio_read_config_1(sc, off);
745 break;
746 case 2:
747 *(uint16_t *)dst =
748 le16toh(vtmmio_read_config_2(sc, off));
749 break;
750 case 4:
751 *(uint32_t *)dst =
752 le32toh(vtmmio_read_config_4(sc, off));
753 break;
754 case 8:
755 *(uint64_t *)dst = vtmmio_read_dev_config_8(sc, off);
756 break;
757 default:
758 panic("%s: invalid length %d\n", __func__, length);
759 }
760
761 return;
762 }
763
764 for (d = dst; length > 0; d += size, off += size, length -= size) {
765 #ifdef ALLOW_WORD_ALIGNED_ACCESS
766 if (length >= 4) {
767 size = 4;
768 *(uint32_t *)d = vtmmio_read_config_4(sc, off);
769 } else if (length >= 2) {
770 size = 2;
771 *(uint16_t *)d = vtmmio_read_config_2(sc, off);
772 } else
773 #endif
774 {
775 size = 1;
776 *d = vtmmio_read_config_1(sc, off);
777 }
778 }
779 }
780
781 static uint64_t
vtmmio_read_dev_config_8(struct vtmmio_softc * sc,bus_size_t off)782 vtmmio_read_dev_config_8(struct vtmmio_softc *sc, bus_size_t off)
783 {
784 device_t dev;
785 int gen;
786 uint32_t val0, val1;
787
788 dev = sc->dev;
789
790 do {
791 gen = vtmmio_config_generation(dev);
792 val0 = le32toh(vtmmio_read_config_4(sc, off));
793 val1 = le32toh(vtmmio_read_config_4(sc, off + 4));
794 } while (gen != vtmmio_config_generation(dev));
795
796 return (((uint64_t) val1 << 32) | val0);
797 }
798
799 static void
vtmmio_write_dev_config(device_t dev,bus_size_t offset,const void * src,int length)800 vtmmio_write_dev_config(device_t dev, bus_size_t offset,
801 const void *src, int length)
802 {
803 struct vtmmio_softc *sc;
804 bus_size_t off;
805 const uint8_t *s;
806 int size;
807
808 sc = device_get_softc(dev);
809 off = VIRTIO_MMIO_CONFIG + offset;
810
811 /*
812 * The non-legacy MMIO specification adds size and alignment
813 * restrctions. It also changes the endianness from native-endian to
814 * little-endian. See vtmmio_read_dev_config.
815 */
816 if (sc->vtmmio_version > 1) {
817 switch (length) {
818 case 1:
819 vtmmio_write_config_1(sc, off, *(const uint8_t *)src);
820 break;
821 case 2:
822 vtmmio_write_config_2(sc, off,
823 htole16(*(const uint16_t *)src));
824 break;
825 case 4:
826 vtmmio_write_config_4(sc, off,
827 htole32(*(const uint32_t *)src));
828 break;
829 case 8:
830 vtmmio_write_config_4(sc, off,
831 htole32(*(const uint64_t *)src));
832 vtmmio_write_config_4(sc, off + 4,
833 htole32((*(const uint64_t *)src) >> 32));
834 break;
835 default:
836 panic("%s: invalid length %d\n", __func__, length);
837 }
838
839 return;
840 }
841
842 for (s = src; length > 0; s += size, off += size, length -= size) {
843 #ifdef ALLOW_WORD_ALIGNED_ACCESS
844 if (length >= 4) {
845 size = 4;
846 vtmmio_write_config_4(sc, off, *(uint32_t *)s);
847 } else if (length >= 2) {
848 size = 2;
849 vtmmio_write_config_2(sc, off, *(uint16_t *)s);
850 } else
851 #endif
852 {
853 size = 1;
854 vtmmio_write_config_1(sc, off, *s);
855 }
856 }
857 }
858
859 static void
vtmmio_describe_features(struct vtmmio_softc * sc,const char * msg,uint64_t features)860 vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg,
861 uint64_t features)
862 {
863 device_t dev, child;
864
865 dev = sc->dev;
866 child = sc->vtmmio_child_dev;
867
868 if (device_is_attached(child) || bootverbose == 0)
869 return;
870
871 virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc);
872 }
873
874 static void
vtmmio_probe_and_attach_child(struct vtmmio_softc * sc)875 vtmmio_probe_and_attach_child(struct vtmmio_softc *sc)
876 {
877 device_t dev, child;
878
879 dev = sc->dev;
880 child = sc->vtmmio_child_dev;
881
882 if (child == NULL)
883 return;
884
885 if (device_get_state(child) != DS_NOTPRESENT) {
886 return;
887 }
888
889 if (device_probe(child) != 0) {
890 return;
891 }
892
893 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
894 if (device_attach(child) != 0) {
895 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
896 vtmmio_reset(sc);
897 vtmmio_release_child_resources(sc);
898 /* Reset status for future attempt. */
899 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
900 } else {
901 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
902 VIRTIO_ATTACH_COMPLETED(child);
903 }
904 }
905
906 static int
vtmmio_reinit_virtqueue(struct vtmmio_softc * sc,int idx)907 vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx)
908 {
909 struct vtmmio_virtqueue *vqx;
910 struct virtqueue *vq;
911 int error;
912 uint16_t size;
913
914 vqx = &sc->vtmmio_vqs[idx];
915 vq = vqx->vtv_vq;
916
917 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
918
919 vtmmio_select_virtqueue(sc, idx);
920 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
921
922 error = virtqueue_reinit(vq, size);
923 if (error)
924 return (error);
925
926 vtmmio_set_virtqueue(sc, vq, size);
927
928 return (0);
929 }
930
931 static void
vtmmio_free_interrupts(struct vtmmio_softc * sc)932 vtmmio_free_interrupts(struct vtmmio_softc *sc)
933 {
934
935 if (sc->ih != NULL)
936 bus_teardown_intr(sc->dev, sc->res[1], sc->ih);
937
938 if (sc->res[1] != NULL)
939 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]);
940 }
941
942 static void
vtmmio_free_virtqueues(struct vtmmio_softc * sc)943 vtmmio_free_virtqueues(struct vtmmio_softc *sc)
944 {
945 struct vtmmio_virtqueue *vqx;
946 int idx;
947
948 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
949 vqx = &sc->vtmmio_vqs[idx];
950
951 vtmmio_select_virtqueue(sc, idx);
952 if (sc->vtmmio_version > 1) {
953 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0);
954 vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_READY);
955 } else
956 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0);
957
958 virtqueue_free(vqx->vtv_vq);
959 vqx->vtv_vq = NULL;
960 }
961
962 free(sc->vtmmio_vqs, M_DEVBUF);
963 sc->vtmmio_vqs = NULL;
964 sc->vtmmio_nvqs = 0;
965 }
966
967 static void
vtmmio_release_child_resources(struct vtmmio_softc * sc)968 vtmmio_release_child_resources(struct vtmmio_softc *sc)
969 {
970
971 vtmmio_free_interrupts(sc);
972 vtmmio_free_virtqueues(sc);
973 }
974
975 static void
vtmmio_reset(struct vtmmio_softc * sc)976 vtmmio_reset(struct vtmmio_softc *sc)
977 {
978
979 /*
980 * Setting the status to RESET sets the host device to
981 * the original, uninitialized state.
982 */
983 vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET);
984 }
985
986 static void
vtmmio_select_virtqueue(struct vtmmio_softc * sc,int idx)987 vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx)
988 {
989
990 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
991 }
992
993 static void
vtmmio_vq_intr(void * arg)994 vtmmio_vq_intr(void *arg)
995 {
996 struct vtmmio_virtqueue *vqx;
997 struct vtmmio_softc *sc;
998 struct virtqueue *vq;
999 uint32_t status;
1000 int idx;
1001
1002 sc = arg;
1003
1004 status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS);
1005 vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status);
1006
1007 /* The config changed */
1008 if (status & VIRTIO_MMIO_INT_CONFIG)
1009 if (sc->vtmmio_child_dev != NULL)
1010 VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev);
1011
1012 /* Notify all virtqueues. */
1013 if (status & VIRTIO_MMIO_INT_VRING) {
1014 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
1015 vqx = &sc->vtmmio_vqs[idx];
1016 if (vqx->vtv_no_intr == 0) {
1017 vq = vqx->vtv_vq;
1018 virtqueue_intr(vq);
1019 }
1020 }
1021 }
1022 }
1023