1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
5 * Copyright 2019 Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/limits.h>
36 #include <sys/module.h>
37 #include <sys/sysctl.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <machine/intr_machdep.h>
44 #include <sys/rman.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47
48 #include <sys/pciio.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pci_private.h>
52 #include <dev/pci/pcib_private.h>
53
54 #include <dev/vmd/vmd.h>
55
56 #include "pcib_if.h"
57
58 struct vmd_type {
59 u_int16_t vmd_vid;
60 u_int16_t vmd_did;
61 char *vmd_name;
62 int flags;
63 #define BUS_RESTRICT 1
64 #define VECTOR_OFFSET 2
65 #define CAN_BYPASS_MSI 4
66 };
67
68 #define VMD_CAP 0x40
69 #define VMD_BUS_RESTRICT 0x1
70
71 #define VMD_CONFIG 0x44
72 #define VMD_BYPASS_MSI 0x2
73 #define VMD_BUS_START(x) ((x >> 8) & 0x3)
74
75 #define VMD_LOCK 0x70
76
77 SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
78 "Intel Volume Management Device tuning parameters");
79
80 /*
81 * By default all VMD devices remap children MSI/MSI-X interrupts into their
82 * own. It creates additional isolation, but also complicates things due to
83 * sharing, etc. Fortunately some VMD devices can bypass the remapping.
84 */
85 static int vmd_bypass_msi = 1;
86 SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0,
87 "Bypass MSI remapping on capable hardware");
88
89 /*
90 * All MSIs within a group share address, so VMD can't distinguish them.
91 * It makes no sense to use more than one per device, only if required by
92 * some specific device drivers.
93 */
94 static int vmd_max_msi = 1;
95 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0,
96 "Maximum number of MSI vectors per device");
97
98 /*
99 * MSI-X can use different addresses, but we have limited number of MSI-X
100 * we can route to, so use conservative default to try to avoid sharing.
101 */
102 static int vmd_max_msix = 3;
103 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0,
104 "Maximum number of MSI-X vectors per device");
105
106 static struct vmd_type vmd_devs[] = {
107 { 0x8086, 0x201d, "Intel Volume Management Device", 0 },
108 { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI },
109 { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
110 { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
111 { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
112 { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
113 { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
114 { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
115 { 0, 0, NULL, 0 }
116 };
117
118 static int
vmd_probe(device_t dev)119 vmd_probe(device_t dev)
120 {
121 struct vmd_type *t;
122 uint16_t vid, did;
123
124 vid = pci_get_vendor(dev);
125 did = pci_get_device(dev);
126 for (t = vmd_devs; t->vmd_name != NULL; t++) {
127 if (vid == t->vmd_vid && did == t->vmd_did) {
128 device_set_desc(dev, t->vmd_name);
129 return (BUS_PROBE_DEFAULT);
130 }
131 }
132 return (ENXIO);
133 }
134
135 static void
vmd_free(struct vmd_softc * sc)136 vmd_free(struct vmd_softc *sc)
137 {
138 struct vmd_irq *vi;
139 struct vmd_irq_user *u;
140 int i;
141
142 if (sc->psc.bus.rman.rm_end != 0)
143 rman_fini(&sc->psc.bus.rman);
144 if (sc->psc.mem.rman.rm_end != 0)
145 rman_fini(&sc->psc.mem.rman);
146 while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) {
147 LIST_REMOVE(u, viu_link);
148 free(u, M_DEVBUF);
149 }
150 if (sc->vmd_irq != NULL) {
151 for (i = 0; i < sc->vmd_msix_count; i++) {
152 vi = &sc->vmd_irq[i];
153 if (vi->vi_res == NULL)
154 continue;
155 bus_teardown_intr(sc->psc.dev, vi->vi_res,
156 vi->vi_handle);
157 bus_release_resource(sc->psc.dev, SYS_RES_IRQ,
158 vi->vi_rid, vi->vi_res);
159 }
160 }
161 free(sc->vmd_irq, M_DEVBUF);
162 sc->vmd_irq = NULL;
163 pci_release_msi(sc->psc.dev);
164 for (i = 0; i < VMD_MAX_BAR; i++) {
165 if (sc->vmd_regs_res[i] != NULL)
166 bus_release_resource(sc->psc.dev, SYS_RES_MEMORY,
167 sc->vmd_regs_rid[i], sc->vmd_regs_res[i]);
168 }
169 }
170
171 /* Hidden PCI Roots are hidden in BAR(0). */
172
173 static uint32_t
vmd_read_config(device_t dev,u_int b,u_int s,u_int f,u_int reg,int width)174 vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
175 {
176 struct vmd_softc *sc;
177 bus_addr_t offset;
178
179 sc = device_get_softc(dev);
180 if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
181 return (0xffffffff);
182
183 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
184
185 switch (width) {
186 case 4:
187 return (bus_read_4(sc->vmd_regs_res[0], offset));
188 case 2:
189 return (bus_read_2(sc->vmd_regs_res[0], offset));
190 case 1:
191 return (bus_read_1(sc->vmd_regs_res[0], offset));
192 default:
193 __assert_unreachable();
194 return (0xffffffff);
195 }
196 }
197
198 static void
vmd_write_config(device_t dev,u_int b,u_int s,u_int f,u_int reg,uint32_t val,int width)199 vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
200 uint32_t val, int width)
201 {
202 struct vmd_softc *sc;
203 bus_addr_t offset;
204
205 sc = device_get_softc(dev);
206 if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
207 return;
208
209 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
210
211 switch (width) {
212 case 4:
213 return (bus_write_4(sc->vmd_regs_res[0], offset, val));
214 case 2:
215 return (bus_write_2(sc->vmd_regs_res[0], offset, val));
216 case 1:
217 return (bus_write_1(sc->vmd_regs_res[0], offset, val));
218 default:
219 __assert_unreachable();
220 }
221 }
222
223 static void
vmd_set_msi_bypass(device_t dev,bool enable)224 vmd_set_msi_bypass(device_t dev, bool enable)
225 {
226 uint16_t val;
227
228 val = pci_read_config(dev, VMD_CONFIG, 2);
229 if (enable)
230 val |= VMD_BYPASS_MSI;
231 else
232 val &= ~VMD_BYPASS_MSI;
233 pci_write_config(dev, VMD_CONFIG, val, 2);
234 }
235
236 static int
vmd_intr(void * arg)237 vmd_intr(void *arg)
238 {
239 /*
240 * We have nothing to do here, but we have to register some interrupt
241 * handler to make PCI code setup and enable the MSI-X vector.
242 */
243 return (FILTER_STRAY);
244 }
245
246 static int
vmd_attach(device_t dev)247 vmd_attach(device_t dev)
248 {
249 struct vmd_softc *sc;
250 struct pcib_secbus *bus;
251 struct pcib_window *w;
252 struct vmd_type *t;
253 struct vmd_irq *vi;
254 uint16_t vid, did;
255 uint32_t bar;
256 int i, j, error;
257 char buf[64];
258
259 sc = device_get_softc(dev);
260 bzero(sc, sizeof(*sc));
261 sc->psc.dev = dev;
262 sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev);
263
264 pci_enable_busmaster(dev);
265
266 for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) {
267 sc->vmd_regs_rid[i] = PCIR_BAR(j);
268 bar = pci_read_config(dev, PCIR_BAR(0), 4);
269 if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
270 PCIM_BAR_MEM_64)
271 j++;
272 if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev,
273 SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) {
274 device_printf(dev, "Cannot allocate resources\n");
275 goto fail;
276 }
277 }
278
279 vid = pci_get_vendor(dev);
280 did = pci_get_device(dev);
281 for (t = vmd_devs; t->vmd_name != NULL; t++) {
282 if (vid == t->vmd_vid && did == t->vmd_did)
283 break;
284 }
285
286 sc->vmd_bus_start = 0;
287 if ((t->flags & BUS_RESTRICT) &&
288 (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) {
289 switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) {
290 case 0:
291 sc->vmd_bus_start = 0;
292 break;
293 case 1:
294 sc->vmd_bus_start = 128;
295 break;
296 case 2:
297 sc->vmd_bus_start = 224;
298 break;
299 default:
300 device_printf(dev, "Unknown bus offset\n");
301 goto fail;
302 }
303 }
304 sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start +
305 (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1);
306
307 bus = &sc->psc.bus;
308 bus->sec = sc->vmd_bus_start;
309 bus->sub = sc->vmd_bus_end;
310 bus->dev = dev;
311 bus->rman.rm_start = 0;
312 bus->rman.rm_end = PCI_BUSMAX;
313 bus->rman.rm_type = RMAN_ARRAY;
314 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
315 bus->rman.rm_descr = strdup(buf, M_DEVBUF);
316 error = rman_init(&bus->rman);
317 if (error) {
318 device_printf(dev, "Failed to initialize bus rman\n");
319 bus->rman.rm_end = 0;
320 goto fail;
321 }
322 error = rman_manage_region(&bus->rman, sc->vmd_bus_start,
323 sc->vmd_bus_end);
324 if (error) {
325 device_printf(dev, "Failed to add resource to bus rman\n");
326 goto fail;
327 }
328
329 w = &sc->psc.mem;
330 w->rman.rm_type = RMAN_ARRAY;
331 snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev));
332 w->rman.rm_descr = strdup(buf, M_DEVBUF);
333 error = rman_init(&w->rman);
334 if (error) {
335 device_printf(dev, "Failed to initialize memory rman\n");
336 w->rman.rm_end = 0;
337 goto fail;
338 }
339 error = rman_manage_region(&w->rman,
340 rman_get_start(sc->vmd_regs_res[1]),
341 rman_get_end(sc->vmd_regs_res[1]));
342 if (error) {
343 device_printf(dev, "Failed to add resource to memory rman\n");
344 goto fail;
345 }
346 error = rman_manage_region(&w->rman,
347 rman_get_start(sc->vmd_regs_res[2]) + 0x2000,
348 rman_get_end(sc->vmd_regs_res[2]));
349 if (error) {
350 device_printf(dev, "Failed to add resource to memory rman\n");
351 goto fail;
352 }
353
354 LIST_INIT(&sc->vmd_users);
355 sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0;
356 sc->vmd_msix_count = pci_msix_count(dev);
357 if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) {
358 sc->vmd_msix_count = 0;
359 vmd_set_msi_bypass(dev, true);
360 } else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
361 sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
362 sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO);
363 for (i = 0; i < sc->vmd_msix_count; i++) {
364 vi = &sc->vmd_irq[i];
365 vi->vi_rid = i + 1;
366 vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
367 &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE);
368 if (vi->vi_res == NULL) {
369 device_printf(dev, "Failed to allocate irq\n");
370 goto fail;
371 }
372 vi->vi_irq = rman_get_start(vi->vi_res);
373 if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC |
374 INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) {
375 device_printf(dev, "Can't set up interrupt\n");
376 bus_release_resource(dev, SYS_RES_IRQ,
377 vi->vi_rid, vi->vi_res);
378 vi->vi_res = NULL;
379 goto fail;
380 }
381 }
382 vmd_set_msi_bypass(dev, false);
383 }
384
385 sc->vmd_dma_tag = bus_get_dma_tag(dev);
386 sc->psc.child = device_add_child(dev, "pci", DEVICE_UNIT_ANY);
387 bus_attach_children(dev);
388 return (0);
389
390 fail:
391 vmd_free(sc);
392 return (ENXIO);
393 }
394
395 static int
vmd_detach(device_t dev)396 vmd_detach(device_t dev)
397 {
398 struct vmd_softc *sc = device_get_softc(dev);
399 int error;
400
401 error = bus_generic_detach(dev);
402 if (error)
403 return (error);
404 error = device_delete_children(dev);
405 if (error)
406 return (error);
407 if (sc->vmd_msix_count == 0)
408 vmd_set_msi_bypass(dev, false);
409 vmd_free(sc);
410 return (0);
411 }
412
413 static bus_dma_tag_t
vmd_get_dma_tag(device_t dev,device_t child)414 vmd_get_dma_tag(device_t dev, device_t child)
415 {
416 struct vmd_softc *sc = device_get_softc(dev);
417
418 return (sc->vmd_dma_tag);
419 }
420
421 static struct rman *
vmd_get_rman(device_t dev,int type,u_int flags)422 vmd_get_rman(device_t dev, int type, u_int flags)
423 {
424 struct vmd_softc *sc = device_get_softc(dev);
425
426 switch (type) {
427 case SYS_RES_MEMORY:
428 return (&sc->psc.mem.rman);
429 case PCI_RES_BUS:
430 return (&sc->psc.bus.rman);
431 default:
432 /* VMD hardware does not support I/O ports. */
433 return (NULL);
434 }
435 }
436
437 static struct resource *
vmd_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)438 vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
439 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
440 {
441 struct resource *res;
442
443 if (type == SYS_RES_IRQ) {
444 /* VMD hardware does not support legacy interrupts. */
445 if (*rid == 0)
446 return (NULL);
447 return (bus_generic_alloc_resource(dev, child, type, rid,
448 start, end, count, flags | RF_SHAREABLE));
449 }
450 res = bus_generic_rman_alloc_resource(dev, child, type, rid, start,
451 end, count, flags);
452 if (bootverbose && res != NULL) {
453 switch (type) {
454 case SYS_RES_MEMORY:
455 device_printf(dev,
456 "allocated memory range (%#jx-%#jx) for rid %d of %s\n",
457 rman_get_start(res), rman_get_end(res), *rid,
458 pcib_child_name(child));
459 break;
460 case PCI_RES_BUS:
461 device_printf(dev,
462 "allocated bus range (%ju-%ju) for rid %d of %s\n",
463 rman_get_start(res), rman_get_end(res), *rid,
464 pcib_child_name(child));
465 break;
466 }
467 }
468 return (res);
469 }
470
471 static int
vmd_adjust_resource(device_t dev,device_t child,struct resource * r,rman_res_t start,rman_res_t end)472 vmd_adjust_resource(device_t dev, device_t child,
473 struct resource *r, rman_res_t start, rman_res_t end)
474 {
475
476 if (rman_get_type(r) == SYS_RES_IRQ) {
477 return (bus_generic_adjust_resource(dev, child, r, start, end));
478 }
479 return (bus_generic_rman_adjust_resource(dev, child, r, start, end));
480 }
481
482 static int
vmd_release_resource(device_t dev,device_t child,struct resource * r)483 vmd_release_resource(device_t dev, device_t child, struct resource *r)
484 {
485
486 if (rman_get_type(r) == SYS_RES_IRQ) {
487 return (bus_generic_release_resource(dev, child, r));
488 }
489 return (bus_generic_rman_release_resource(dev, child, r));
490 }
491
492 static int
vmd_activate_resource(device_t dev,device_t child,struct resource * r)493 vmd_activate_resource(device_t dev, device_t child, struct resource *r)
494 {
495 if (rman_get_type(r) == SYS_RES_IRQ) {
496 return (bus_generic_activate_resource(dev, child, r));
497 }
498 return (bus_generic_rman_activate_resource(dev, child, r));
499 }
500
501 static int
vmd_deactivate_resource(device_t dev,device_t child,struct resource * r)502 vmd_deactivate_resource(device_t dev, device_t child, struct resource *r)
503 {
504 if (rman_get_type(r) == SYS_RES_IRQ) {
505 return (bus_generic_deactivate_resource(dev, child, r));
506 }
507 return (bus_generic_rman_deactivate_resource(dev, child, r));
508 }
509
510 static struct resource *
vmd_find_parent_resource(struct vmd_softc * sc,struct resource * r)511 vmd_find_parent_resource(struct vmd_softc *sc, struct resource *r)
512 {
513 for (int i = 1; i < 3; i++) {
514 if (rman_get_start(sc->vmd_regs_res[i]) <= rman_get_start(r) &&
515 rman_get_end(sc->vmd_regs_res[i]) >= rman_get_end(r))
516 return (sc->vmd_regs_res[i]);
517 }
518 return (NULL);
519 }
520
521 static int
vmd_map_resource(device_t dev,device_t child,struct resource * r,struct resource_map_request * argsp,struct resource_map * map)522 vmd_map_resource(device_t dev, device_t child, struct resource *r,
523 struct resource_map_request *argsp, struct resource_map *map)
524 {
525 struct vmd_softc *sc = device_get_softc(dev);
526 struct resource_map_request args;
527 struct resource *pres;
528 rman_res_t length, start;
529 int error;
530
531 /* Resources must be active to be mapped. */
532 if (!(rman_get_flags(r) & RF_ACTIVE))
533 return (ENXIO);
534
535 resource_init_map_request(&args);
536 error = resource_validate_map_request(r, argsp, &args, &start, &length);
537 if (error)
538 return (error);
539
540 pres = vmd_find_parent_resource(sc, r);
541 if (pres == NULL)
542 return (ENOENT);
543
544 args.offset = start - rman_get_start(pres);
545 args.length = length;
546 return (bus_map_resource(dev, pres, &args, map));
547 }
548
549 static int
vmd_unmap_resource(device_t dev,device_t child,struct resource * r,struct resource_map * map)550 vmd_unmap_resource(device_t dev, device_t child, struct resource *r,
551 struct resource_map *map)
552 {
553 struct vmd_softc *sc = device_get_softc(dev);
554 struct resource *pres;
555
556 pres = vmd_find_parent_resource(sc, r);
557 if (pres == NULL)
558 return (ENOENT);
559 return (bus_unmap_resource(dev, pres, map));
560 }
561
562 static int
vmd_route_interrupt(device_t dev,device_t child,int pin)563 vmd_route_interrupt(device_t dev, device_t child, int pin)
564 {
565
566 /* VMD hardware does not support legacy interrupts. */
567 return (PCI_INVALID_IRQ);
568 }
569
570 static int
vmd_alloc_msi(device_t dev,device_t child,int count,int maxcount,int * irqs)571 vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount,
572 int *irqs)
573 {
574 struct vmd_softc *sc = device_get_softc(dev);
575 struct vmd_irq_user *u;
576 int i, ibest = 0, best = INT_MAX;
577
578 if (sc->vmd_msix_count == 0) {
579 return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)),
580 child, count, maxcount, irqs));
581 }
582
583 if (count > vmd_max_msi)
584 return (ENOSPC);
585 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
586 if (u->viu_child == child)
587 return (EBUSY);
588 }
589
590 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
591 if (best > sc->vmd_irq[i].vi_nusers) {
592 best = sc->vmd_irq[i].vi_nusers;
593 ibest = i;
594 }
595 }
596
597 u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
598 u->viu_child = child;
599 u->viu_vector = ibest;
600 LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
601 sc->vmd_irq[ibest].vi_nusers += count;
602
603 for (i = 0; i < count; i++)
604 irqs[i] = sc->vmd_irq[ibest].vi_irq;
605 return (0);
606 }
607
608 static int
vmd_release_msi(device_t dev,device_t child,int count,int * irqs)609 vmd_release_msi(device_t dev, device_t child, int count, int *irqs)
610 {
611 struct vmd_softc *sc = device_get_softc(dev);
612 struct vmd_irq_user *u;
613
614 if (sc->vmd_msix_count == 0) {
615 return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)),
616 child, count, irqs));
617 }
618
619 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
620 if (u->viu_child == child) {
621 sc->vmd_irq[u->viu_vector].vi_nusers -= count;
622 LIST_REMOVE(u, viu_link);
623 free(u, M_DEVBUF);
624 return (0);
625 }
626 }
627 return (EINVAL);
628 }
629
630 static int
vmd_alloc_msix(device_t dev,device_t child,int * irq)631 vmd_alloc_msix(device_t dev, device_t child, int *irq)
632 {
633 struct vmd_softc *sc = device_get_softc(dev);
634 struct vmd_irq_user *u;
635 int i, ibest = 0, best = INT_MAX;
636
637 if (sc->vmd_msix_count == 0) {
638 return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)),
639 child, irq));
640 }
641
642 i = 0;
643 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
644 if (u->viu_child == child)
645 i++;
646 }
647 if (i >= vmd_max_msix)
648 return (ENOSPC);
649
650 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
651 if (best > sc->vmd_irq[i].vi_nusers) {
652 best = sc->vmd_irq[i].vi_nusers;
653 ibest = i;
654 }
655 }
656
657 u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
658 u->viu_child = child;
659 u->viu_vector = ibest;
660 LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
661 sc->vmd_irq[ibest].vi_nusers++;
662
663 *irq = sc->vmd_irq[ibest].vi_irq;
664 return (0);
665 }
666
667 static int
vmd_release_msix(device_t dev,device_t child,int irq)668 vmd_release_msix(device_t dev, device_t child, int irq)
669 {
670 struct vmd_softc *sc = device_get_softc(dev);
671 struct vmd_irq_user *u;
672
673 if (sc->vmd_msix_count == 0) {
674 return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
675 child, irq));
676 }
677
678 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
679 if (u->viu_child == child &&
680 sc->vmd_irq[u->viu_vector].vi_irq == irq) {
681 sc->vmd_irq[u->viu_vector].vi_nusers--;
682 LIST_REMOVE(u, viu_link);
683 free(u, M_DEVBUF);
684 return (0);
685 }
686 }
687 return (EINVAL);
688 }
689
690 static int
vmd_map_msi(device_t dev,device_t child,int irq,uint64_t * addr,uint32_t * data)691 vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data)
692 {
693 struct vmd_softc *sc = device_get_softc(dev);
694 int i;
695
696 if (sc->vmd_msix_count == 0) {
697 return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)),
698 child, irq, addr, data));
699 }
700
701 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
702 if (sc->vmd_irq[i].vi_irq == irq)
703 break;
704 }
705 if (i >= sc->vmd_msix_count)
706 return (EINVAL);
707 *addr = MSI_INTEL_ADDR_BASE | (i << 12);
708 *data = 0;
709 return (0);
710 }
711
712 static device_method_t vmd_pci_methods[] = {
713 /* Device interface */
714 DEVMETHOD(device_probe, vmd_probe),
715 DEVMETHOD(device_attach, vmd_attach),
716 DEVMETHOD(device_detach, vmd_detach),
717 DEVMETHOD(device_suspend, bus_generic_suspend),
718 DEVMETHOD(device_resume, bus_generic_resume),
719 DEVMETHOD(device_shutdown, bus_generic_shutdown),
720
721 /* Bus interface */
722 DEVMETHOD(bus_get_dma_tag, vmd_get_dma_tag),
723 DEVMETHOD(bus_get_rman, vmd_get_rman),
724 DEVMETHOD(bus_read_ivar, pcib_read_ivar),
725 DEVMETHOD(bus_write_ivar, pcib_write_ivar),
726 DEVMETHOD(bus_alloc_resource, vmd_alloc_resource),
727 DEVMETHOD(bus_adjust_resource, vmd_adjust_resource),
728 DEVMETHOD(bus_release_resource, vmd_release_resource),
729 DEVMETHOD(bus_activate_resource, vmd_activate_resource),
730 DEVMETHOD(bus_deactivate_resource, vmd_deactivate_resource),
731 DEVMETHOD(bus_map_resource, vmd_map_resource),
732 DEVMETHOD(bus_unmap_resource, vmd_unmap_resource),
733 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
734 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
735
736 /* pcib interface */
737 DEVMETHOD(pcib_maxslots, pcib_maxslots),
738 DEVMETHOD(pcib_read_config, vmd_read_config),
739 DEVMETHOD(pcib_write_config, vmd_write_config),
740 DEVMETHOD(pcib_route_interrupt, vmd_route_interrupt),
741 DEVMETHOD(pcib_alloc_msi, vmd_alloc_msi),
742 DEVMETHOD(pcib_release_msi, vmd_release_msi),
743 DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix),
744 DEVMETHOD(pcib_release_msix, vmd_release_msix),
745 DEVMETHOD(pcib_map_msi, vmd_map_msi),
746 DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
747
748 DEVMETHOD_END
749 };
750
751 DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
752 DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL);
753 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
754 vmd_devs, nitems(vmd_devs) - 1);
755