vmd.c (0c852bb9b9282b30fd047ac1de398358f33777f4) vmd.c (7af4475a6e31202a865b1dd3727018659b44470f)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
4 * Copyright 2019 Cisco Systems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.

--- 17 unchanged lines hidden (view full) ---

29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/types.h>
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/kernel.h>
5 * Copyright 2019 Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.

--- 17 unchanged lines hidden (view full) ---

30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/bus.h>
36#include <sys/conf.h>
37#include <sys/kernel.h>
38#include <sys/limits.h>
37#include <sys/module.h>
39#include <sys/module.h>
40#include <sys/sysctl.h>
38#include <sys/systm.h>
39#include <sys/malloc.h>
40
41#include <machine/bus.h>
42#include <machine/resource.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>
43
44#include <machine/bus.h>
45#include <machine/resource.h>
46#include <machine/intr_machdep.h>
43#include <sys/rman.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
47#include <sys/rman.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
46#include <sys/taskqueue.h>
47
48#include <sys/pciio.h>
49#include <dev/pci/pcivar.h>
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pci_private.h>
52#include <dev/pci/pcib_private.h>
53
50
51#include <sys/pciio.h>
52#include <dev/pci/pcivar.h>
53#include <dev/pci/pcireg.h>
54#include <dev/pci/pci_private.h>
55#include <dev/pci/pcib_private.h>
56
54#define TASK_QUEUE_INTR 1
55#include <dev/vmd/vmd.h>
56
57#include "pcib_if.h"
57#include <dev/vmd/vmd.h>
58
59#include "pcib_if.h"
58#include "pci_if.h"
59
60struct vmd_type {
61 u_int16_t vmd_vid;
62 u_int16_t vmd_did;
63 char *vmd_name;
64 int flags;
60
61struct vmd_type {
62 u_int16_t vmd_vid;
63 u_int16_t vmd_did;
64 char *vmd_name;
65 int flags;
65#define BUS_RESTRICT 1
66#define BUS_RESTRICT 1
67#define VECTOR_OFFSET 2
66};
67
68};
69
68#define INTEL_VENDOR_ID 0x8086
69#define INTEL_DEVICE_ID_201d 0x201d
70#define INTEL_DEVICE_ID_28c0 0x28c0
71#define INTEL_DEVICE_ID_467f 0x467f
72#define INTEL_DEVICE_ID_4c3d 0x4c3d
73#define INTEL_DEVICE_ID_9a0b 0x9a0b
74
75#define VMD_CAP 0x40
76#define VMD_BUS_RESTRICT 0x1
77
78#define VMD_CONFIG 0x44
79#define VMD_BUS_START(x) ((x >> 8) & 0x3)
80
81#define VMD_LOCK 0x70
82
70#define VMD_CAP 0x40
71#define VMD_BUS_RESTRICT 0x1
72
73#define VMD_CONFIG 0x44
74#define VMD_BUS_START(x) ((x >> 8) & 0x3)
75
76#define VMD_LOCK 0x70
77
78SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
79 "Intel Volume Management Device tuning parameters");
80
81/*
82 * All MSIs within a group share address, so VMD can't distinguish them.
83 * It makes no sense to use more than one per device, only if required by
84 * some specific device drivers.
85 */
86static int vmd_max_msi = 1;
87SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0,
88 "Maximum number of MSI vectors per device");
89
90/*
91 * MSI-X can use different addresses, but we have limited number of MSI-X
92 * we can route to, so use conservative default to try to avoid sharing.
93 */
94static int vmd_max_msix = 3;
95SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0,
96 "Maximum number of MSI-X vectors per device");
97
83static struct vmd_type vmd_devs[] = {
98static struct vmd_type vmd_devs[] = {
84 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_201d, "Intel Volume Management Device", 0 },
85 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_28c0, "Intel Volume Management Device", BUS_RESTRICT },
86 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_467f, "Intel Volume Management Device", BUS_RESTRICT },
87 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_4c3d, "Intel Volume Management Device", BUS_RESTRICT },
88 { INTEL_VENDOR_ID, INTEL_DEVICE_ID_9a0b, "Intel Volume Management Device", BUS_RESTRICT },
99 { 0x8086, 0x201d, "Intel Volume Management Device", 0 },
100 { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT },
101 { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
102 { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
103 { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
89 { 0, 0, NULL, 0 }
90};
91
92static int
93vmd_probe(device_t dev)
94{
95 struct vmd_type *t;
96 uint16_t vid, did;
97
104 { 0, 0, NULL, 0 }
105};
106
107static int
108vmd_probe(device_t dev)
109{
110 struct vmd_type *t;
111 uint16_t vid, did;
112
98 t = vmd_devs;
99 vid = pci_get_vendor(dev);
100 did = pci_get_device(dev);
113 vid = pci_get_vendor(dev);
114 did = pci_get_device(dev);
101
102 while (t->vmd_name != NULL) {
103 if (vid == t->vmd_vid &&
104 did == t->vmd_did) {
115 for (t = vmd_devs; t->vmd_name != NULL; t++) {
116 if (vid == t->vmd_vid && did == t->vmd_did) {
105 device_set_desc(dev, t->vmd_name);
106 return (BUS_PROBE_DEFAULT);
107 }
117 device_set_desc(dev, t->vmd_name);
118 return (BUS_PROBE_DEFAULT);
119 }
108 t++;
109 }
120 }
110
111 return (ENXIO);
112}
113
114static void
115vmd_free(struct vmd_softc *sc)
116{
121 return (ENXIO);
122}
123
124static void
125vmd_free(struct vmd_softc *sc)
126{
127 struct vmd_irq *vi;
128 struct vmd_irq_user *u;
117 int i;
129 int i;
118 struct vmd_irq_handler *elm, *tmp;
119
130
120 if (sc->vmd_bus.rman.rm_end != 0)
121 rman_fini(&sc->vmd_bus.rman);
122
123#ifdef TASK_QUEUE_INTR
124 if (sc->vmd_irq_tq != NULL) {
125 taskqueue_drain(sc->vmd_irq_tq, &sc->vmd_irq_task);
126 taskqueue_free(sc->vmd_irq_tq);
127 sc->vmd_irq_tq = NULL;
131 if (sc->psc.bus.rman.rm_end != 0)
132 rman_fini(&sc->psc.bus.rman);
133 if (sc->psc.mem.rman.rm_end != 0)
134 rman_fini(&sc->psc.mem.rman);
135 while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) {
136 LIST_REMOVE(u, viu_link);
137 free(u, M_DEVBUF);
128 }
138 }
129#endif
130 if (sc->vmd_irq != NULL) {
131 for (i = 0; i < sc->vmd_msix_count; i++) {
139 if (sc->vmd_irq != NULL) {
140 for (i = 0; i < sc->vmd_msix_count; i++) {
132 if (sc->vmd_irq[i].vmd_res != NULL) {
133 bus_teardown_intr(sc->vmd_dev,
134 sc->vmd_irq[i].vmd_res,
135 sc->vmd_irq[i].vmd_handle);
136 bus_release_resource(sc->vmd_dev, SYS_RES_IRQ,
137 sc->vmd_irq[i].vmd_rid,
138 sc->vmd_irq[i].vmd_res);
139 }
141 vi = &sc->vmd_irq[i];
142 if (vi->vi_res == NULL)
143 continue;
144 bus_teardown_intr(sc->psc.dev, vi->vi_res,
145 vi->vi_handle);
146 bus_release_resource(sc->psc.dev, SYS_RES_IRQ,
147 vi->vi_rid, vi->vi_res);
140 }
148 }
141 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list ,vmd_link,
142 tmp) {
143 TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
144 free(elm, M_DEVBUF);
145 }
146 }
147 free(sc->vmd_irq, M_DEVBUF);
148 sc->vmd_irq = NULL;
149 }
150 free(sc->vmd_irq, M_DEVBUF);
151 sc->vmd_irq = NULL;
149 pci_release_msi(sc->vmd_dev);
152 pci_release_msi(sc->psc.dev);
150 for (i = 0; i < VMD_MAX_BAR; i++) {
153 for (i = 0; i < VMD_MAX_BAR; i++) {
151 if (sc->vmd_regs_resource[i] != NULL)
152 bus_release_resource(sc->vmd_dev, SYS_RES_MEMORY,
153 sc->vmd_regs_rid[i],
154 sc->vmd_regs_resource[i]);
154 if (sc->vmd_regs_res[i] != NULL)
155 bus_release_resource(sc->psc.dev, SYS_RES_MEMORY,
156 sc->vmd_regs_rid[i], sc->vmd_regs_res[i]);
155 }
157 }
156 if (sc->vmd_io_resource)
157 bus_release_resource(device_get_parent(sc->vmd_dev),
158 SYS_RES_IOPORT, sc->vmd_io_rid, sc->vmd_io_resource);
159
160#ifndef TASK_QUEUE_INTR
161 if (mtx_initialized(&sc->vmd_irq_lock)) {
162 mtx_destroy(&sc->vmd_irq_lock);
163 }
164#endif
165}
166
167/* Hidden PCI Roots are hidden in BAR(0). */
168
169static uint32_t
170vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
171{
158}
159
160/* Hidden PCI Roots are hidden in BAR(0). */
161
162static uint32_t
163vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
164{
172
173 struct vmd_softc *sc;
174 bus_addr_t offset;
175
176 sc = device_get_softc(dev);
165 struct vmd_softc *sc;
166 bus_addr_t offset;
167
168 sc = device_get_softc(dev);
177 if (b < sc->vmd_bus_start)
169 if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
178 return (0xffffffff);
179
180 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
181
170 return (0xffffffff);
171
172 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
173
182 switch(width) {
174 switch (width) {
183 case 4:
184 return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
185 offset));
186 case 2:
187 return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle,
188 offset));
189 case 1:
190 return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
191 offset));
192 default:
175 case 4:
176 return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
177 offset));
178 case 2:
179 return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle,
180 offset));
181 case 1:
182 return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
183 offset));
184 default:
193 KASSERT(1, ("Invalid width requested"));
185 __assert_unreachable();
194 return (0xffffffff);
195 }
196}
197
198static void
199vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
200 uint32_t val, int width)
201{
186 return (0xffffffff);
187 }
188}
189
190static void
191vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
192 uint32_t val, int width)
193{
202
203 struct vmd_softc *sc;
204 bus_addr_t offset;
205
206 sc = device_get_softc(dev);
194 struct vmd_softc *sc;
195 bus_addr_t offset;
196
197 sc = device_get_softc(dev);
207 if (b < sc->vmd_bus_start)
198 if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
208 return;
209
210 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
211
199 return;
200
201 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
202
212 switch(width) {
203 switch (width) {
213 case 4:
214 return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
215 offset, val));
216 case 2:
217 return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle,
218 offset, val));
219 case 1:
220 return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
221 offset, val));
222 default:
204 case 4:
205 return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
206 offset, val));
207 case 2:
208 return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle,
209 offset, val));
210 case 1:
211 return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
212 offset, val));
213 default:
223 panic("Failed to specific width");
214 __assert_unreachable();
224 }
225}
226
215 }
216}
217
227static uint32_t
228vmd_pci_read_config(device_t dev, device_t child, int reg, int width)
229{
230 struct pci_devinfo *dinfo = device_get_ivars(child);
231 pcicfgregs *cfg = &dinfo->cfg;
232
233 return vmd_read_config(dev, cfg->bus, cfg->slot, cfg->func, reg, width);
234}
235
236static void
237vmd_pci_write_config(device_t dev, device_t child, int reg, uint32_t val,
238 int width)
239{
240 struct pci_devinfo *dinfo = device_get_ivars(child);
241 pcicfgregs *cfg = &dinfo->cfg;
242
243 vmd_write_config(dev, cfg->bus, cfg->slot, cfg->func, reg, val, width);
244}
245
246static struct pci_devinfo *
247vmd_alloc_devinfo(device_t dev)
248{
249 struct pci_devinfo *dinfo;
250
251 dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO);
252 return (dinfo);
253}
254
255static void
218static int
256vmd_intr(void *arg)
257{
219vmd_intr(void *arg)
220{
258 struct vmd_irq *irq;
259 struct vmd_softc *sc;
260#ifndef TASK_QUEUE_INTR
261 struct vmd_irq_handler *elm, *tmp_elm;
262#endif
263
264 irq = (struct vmd_irq *)arg;
265 sc = irq->vmd_sc;
266#ifdef TASK_QUEUE_INTR
267 taskqueue_enqueue(sc->vmd_irq_tq, &sc->vmd_irq_task);
268#else
269 mtx_lock(&sc->vmd_irq_lock);
270 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
271 (elm->vmd_intr)(elm->vmd_arg);
272 }
273 mtx_unlock(&sc->vmd_irq_lock);
274#endif
221 /*
222 * We have nothing to do here, but we have to register some interrupt
223 * handler to make PCI code setup and enable the MSI-X vector.
224 */
225 return (FILTER_STRAY);
275}
276
226}
227
277#ifdef TASK_QUEUE_INTR
278static void
279vmd_handle_irq(void *context, int pending)
280{
281 struct vmd_irq_handler *elm, *tmp_elm;
282 struct vmd_softc *sc;
283
284 sc = context;
285
286 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
287 (elm->vmd_intr)(elm->vmd_arg);
288 }
289}
290#endif
291
292static int
293vmd_attach(device_t dev)
294{
295 struct vmd_softc *sc;
296 struct pcib_secbus *bus;
228static int
229vmd_attach(device_t dev)
230{
231 struct vmd_softc *sc;
232 struct pcib_secbus *bus;
233 struct pcib_window *w;
297 struct vmd_type *t;
234 struct vmd_type *t;
235 struct vmd_irq *vi;
298 uint16_t vid, did;
299 uint32_t bar;
300 int i, j, error;
236 uint16_t vid, did;
237 uint32_t bar;
238 int i, j, error;
301 int rid, sec_reg;
302 static int b;
303 static int s;
304 static int f;
305 int min_count = 1;
306 char buf[64];
307
308 sc = device_get_softc(dev);
309 bzero(sc, sizeof(*sc));
239 char buf[64];
240
241 sc = device_get_softc(dev);
242 bzero(sc, sizeof(*sc));
310 sc->vmd_dev = dev;
311 b = s = f = 0;
243 sc->psc.dev = dev;
244 sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev);
312
313 pci_enable_busmaster(dev);
314
245
246 pci_enable_busmaster(dev);
247
315#ifdef TASK_QUEUE_INTR
316 sc->vmd_irq_tq = taskqueue_create_fast("vmd_taskq", M_NOWAIT,
317 taskqueue_thread_enqueue, &sc->vmd_irq_tq);
318 taskqueue_start_threads(&sc->vmd_irq_tq, 1, PI_DISK, "%s taskq",
319 device_get_nameunit(sc->vmd_dev));
320 TASK_INIT(&sc->vmd_irq_task, 0, vmd_handle_irq, sc);
321#else
322 mtx_init(&sc->vmd_irq_lock, "VMD IRQ lock", NULL, MTX_DEF);
323#endif
324 for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++ ) {
248 for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) {
325 sc->vmd_regs_rid[i] = PCIR_BAR(j);
326 bar = pci_read_config(dev, PCIR_BAR(0), 4);
327 if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
328 PCIM_BAR_MEM_64)
329 j++;
249 sc->vmd_regs_rid[i] = PCIR_BAR(j);
250 bar = pci_read_config(dev, PCIR_BAR(0), 4);
251 if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
252 PCIM_BAR_MEM_64)
253 j++;
330 if ((sc->vmd_regs_resource[i] = bus_alloc_resource_any(
331 sc->vmd_dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i],
332 RF_ACTIVE)) == NULL) {
254 if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev,
255 SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) {
333 device_printf(dev, "Cannot allocate resources\n");
334 goto fail;
335 }
336 }
337
256 device_printf(dev, "Cannot allocate resources\n");
257 goto fail;
258 }
259 }
260
338 sc->vmd_io_rid = PCIR_IOBASEL_1;
339 sc->vmd_io_resource = bus_alloc_resource_any(
340 device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, &sc->vmd_io_rid,
341 RF_ACTIVE);
342 if (sc->vmd_io_resource == NULL) {
343 device_printf(dev, "Cannot allocate IO\n");
344 goto fail;
345 }
261 sc->vmd_btag = rman_get_bustag(sc->vmd_regs_res[0]);
262 sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_res[0]);
346
263
347 sc->vmd_btag = rman_get_bustag(sc->vmd_regs_resource[0]);
348 sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_resource[0]);
349
350 pci_write_config(dev, PCIR_PRIBUS_2,
351 pcib_get_bus(device_get_parent(dev)), 1);
352
353 t = vmd_devs;
354 vid = pci_get_vendor(dev);
355 did = pci_get_device(dev);
264 vid = pci_get_vendor(dev);
265 did = pci_get_device(dev);
266 for (t = vmd_devs; t->vmd_name != NULL; t++) {
267 if (vid == t->vmd_vid && did == t->vmd_did)
268 break;
269 }
356
357 sc->vmd_bus_start = 0;
270
271 sc->vmd_bus_start = 0;
358 while (t->vmd_name != NULL) {
359 if (vid == t->vmd_vid &&
360 did == t->vmd_did) {
361 if (t->flags == BUS_RESTRICT) {
362 if (pci_read_config(dev, VMD_CAP, 2) &
363 VMD_BUS_RESTRICT)
364 switch (VMD_BUS_START(pci_read_config(
365 dev, VMD_CONFIG, 2))) {
366 case 1:
367 sc->vmd_bus_start = 128;
368 break;
369 case 2:
370 sc->vmd_bus_start = 224;
371 break;
372 case 3:
373 device_printf(dev,
374 "Unknown bug offset\n");
375 goto fail;
376 break;
377 }
378 }
272 if ((t->flags & BUS_RESTRICT) &&
273 (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) {
274 switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) {
275 case 0:
276 sc->vmd_bus_start = 0;
277 break;
278 case 1:
279 sc->vmd_bus_start = 128;
280 break;
281 case 2:
282 sc->vmd_bus_start = 224;
283 break;
284 default:
285 device_printf(dev, "Unknown bus offset\n");
286 goto fail;
379 }
287 }
380 t++;
381 }
288 }
289 sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start +
290 (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1);
382
291
383 device_printf(dev, "VMD bus starts at %d\n", sc->vmd_bus_start);
384
385 sec_reg = PCIR_SECBUS_1;
386 bus = &sc->vmd_bus;
387 bus->sub_reg = PCIR_SUBBUS_1;
388 bus->sec = vmd_read_config(dev, b, s, f, sec_reg, 1);
389 bus->sub = vmd_read_config(dev, b, s, f, bus->sub_reg, 1);
292 bus = &sc->psc.bus;
293 bus->sec = sc->vmd_bus_start;
294 bus->sub = sc->vmd_bus_end;
390 bus->dev = dev;
295 bus->dev = dev;
391 bus->rman.rm_start = sc->vmd_bus_start;
296 bus->rman.rm_start = 0;
392 bus->rman.rm_end = PCI_BUSMAX;
393 bus->rman.rm_type = RMAN_ARRAY;
394 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
395 bus->rman.rm_descr = strdup(buf, M_DEVBUF);
396 error = rman_init(&bus->rman);
397 if (error) {
297 bus->rman.rm_end = PCI_BUSMAX;
298 bus->rman.rm_type = RMAN_ARRAY;
299 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
300 bus->rman.rm_descr = strdup(buf, M_DEVBUF);
301 error = rman_init(&bus->rman);
302 if (error) {
398 device_printf(dev, "Failed to initialize %s bus number rman\n",
399 device_get_nameunit(dev));
303 device_printf(dev, "Failed to initialize bus rman\n");
400 bus->rman.rm_end = 0;
401 goto fail;
402 }
304 bus->rman.rm_end = 0;
305 goto fail;
306 }
403
404 /*
405 * Allocate a bus range. This will return an existing bus range
406 * if one exists, or a new bus range if one does not.
407 */
408 rid = 0;
409 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
410 min_count, 0);
411 if (bus->res == NULL) {
412 /*
413 * Fall back to just allocating a range of a single bus
414 * number.
415 */
416 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
417 1, 0);
418 } else if (rman_get_size(bus->res) < min_count) {
419 /*
420 * Attempt to grow the existing range to satisfy the
421 * minimum desired count.
422 */
423 (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
424 rman_get_start(bus->res), rman_get_start(bus->res) +
425 min_count - 1);
307 error = rman_manage_region(&bus->rman, sc->vmd_bus_start,
308 sc->vmd_bus_end);
309 if (error) {
310 device_printf(dev, "Failed to add resource to bus rman\n");
311 goto fail;
426 }
427
312 }
313
428 /*
429 * Add the initial resource to the rman.
430 */
431 if (bus->res != NULL) {
432 error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
433 rman_get_end(bus->res));
434 if (error) {
435 device_printf(dev, "Failed to add resource to rman\n");
436 goto fail;
437 }
438 bus->sec = rman_get_start(bus->res);
439 bus->sub = rman_get_end(bus->res);
314 w = &sc->psc.mem;
315 w->rman.rm_type = RMAN_ARRAY;
316 snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev));
317 w->rman.rm_descr = strdup(buf, M_DEVBUF);
318 error = rman_init(&w->rman);
319 if (error) {
320 device_printf(dev, "Failed to initialize memory rman\n");
321 w->rman.rm_end = 0;
322 goto fail;
440 }
323 }
324 error = rman_manage_region(&w->rman,
325 rman_get_start(sc->vmd_regs_res[1]),
326 rman_get_end(sc->vmd_regs_res[1]));
327 if (error) {
328 device_printf(dev, "Failed to add resource to memory rman\n");
329 goto fail;
330 }
331 error = rman_manage_region(&w->rman,
332 rman_get_start(sc->vmd_regs_res[2]) + 0x2000,
333 rman_get_end(sc->vmd_regs_res[2]));
334 if (error) {
335 device_printf(dev, "Failed to add resource to memory rman\n");
336 goto fail;
337 }
441
338
339 LIST_INIT(&sc->vmd_users);
340 sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0;
442 sc->vmd_msix_count = pci_msix_count(dev);
443 if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
444 sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
341 sc->vmd_msix_count = pci_msix_count(dev);
342 if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
343 sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
445 sc->vmd_msix_count,
446 M_DEVBUF, M_WAITOK | M_ZERO);
447
344 sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO);
448 for (i = 0; i < sc->vmd_msix_count; i++) {
345 for (i = 0; i < sc->vmd_msix_count; i++) {
449 sc->vmd_irq[i].vmd_rid = i + 1;
450 sc->vmd_irq[i].vmd_sc = sc;
451 sc->vmd_irq[i].vmd_instance = i;
452 sc->vmd_irq[i].vmd_res = bus_alloc_resource_any(dev,
453 SYS_RES_IRQ, &sc->vmd_irq[i].vmd_rid,
454 RF_ACTIVE);
455 if (sc->vmd_irq[i].vmd_res == NULL) {
456 device_printf(dev,"Failed to alloc irq\n");
346 vi = &sc->vmd_irq[i];
347 vi->vi_rid = i + 1;
348 vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
349 &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE);
350 if (vi->vi_res == NULL) {
351 device_printf(dev, "Failed to allocate irq\n");
457 goto fail;
458 }
352 goto fail;
353 }
459
460 TAILQ_INIT(&sc->vmd_irq[i].vmd_list);
461 if (bus_setup_intr(dev, sc->vmd_irq[i].vmd_res,
462 INTR_TYPE_MISC | INTR_MPSAFE, NULL, vmd_intr,
463 &sc->vmd_irq[i], &sc->vmd_irq[i].vmd_handle)) {
464 device_printf(sc->vmd_dev,
465 "Cannot set up interrupt\n");
466 sc->vmd_irq[i].vmd_res = NULL;
354 vi->vi_irq = rman_get_start(vi->vi_res);
355 if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC |
356 INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) {
357 device_printf(dev, "Can't set up interrupt\n");
358 bus_release_resource(dev, SYS_RES_IRQ,
359 vi->vi_rid, vi->vi_res);
360 vi->vi_res = NULL;
467 goto fail;
468 }
469 }
470 }
471
361 goto fail;
362 }
363 }
364 }
365
472 sc->vmd_child = device_add_child(dev, NULL, -1);
473 if (sc->vmd_child == NULL) {
474 device_printf(dev, "Failed to attach child\n");
475 goto fail;
476 }
366 sc->vmd_dma_tag = bus_get_dma_tag(dev);
477
367
478 error = device_probe_and_attach(sc->vmd_child);
479 if (error) {
480 device_printf(dev, "Failed to add probe child: %d\n", error);
481 (void)device_delete_child(dev, sc->vmd_child);
482 goto fail;
483 }
368 sc->psc.child = device_add_child(dev, "pci", -1);
369 return (bus_generic_attach(dev));
484
370
485 return (0);
486
487fail:
488 vmd_free(sc);
489 return (ENXIO);
490}
491
492static int
493vmd_detach(device_t dev)
494{
371fail:
372 vmd_free(sc);
373 return (ENXIO);
374}
375
376static int
377vmd_detach(device_t dev)
378{
495 struct vmd_softc *sc;
496 int err;
379 struct vmd_softc *sc = device_get_softc(dev);
380 int error;
497
381
498 sc = device_get_softc(dev);
499 if (sc->vmd_child != NULL) {
500 err = bus_generic_detach(sc->vmd_child);
501 if (err)
502 return (err);
503 err = device_delete_child(dev, sc->vmd_child);
504 if (err)
505 return (err);
506 }
382 error = bus_generic_detach(dev);
383 if (error)
384 return (error);
385 error = device_delete_children(dev);
386 if (error)
387 return (error);
507 vmd_free(sc);
508 return (0);
509}
510
388 vmd_free(sc);
389 return (0);
390}
391
511/* Pass request to alloc an MSI-X message up to the parent bridge. */
512static int
513vmd_alloc_msix(device_t pcib, device_t dev, int *irq)
392static bus_dma_tag_t
393vmd_get_dma_tag(device_t dev, device_t child)
514{
394{
515 struct vmd_softc *sc = device_get_softc(pcib);
516 device_t bus;
517 int ret;
395 struct vmd_softc *sc = device_get_softc(dev);
518
396
519 if (sc->vmd_flags & PCIB_DISABLE_MSIX)
520 return (ENXIO);
521 bus = device_get_parent(pcib);
522 ret = PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq);
523 return (ret);
397 return (sc->vmd_dma_tag);
524}
525
526static struct resource *
527vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
528 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
529{
398}
399
400static struct resource *
401vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
402 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
403{
530 /* Start at max PCI vmd_domain and work down */
531 if (type == PCI_RES_BUS) {
532 return (pci_domain_alloc_bus(PCI_DOMAINMAX -
533 device_get_unit(dev), child, rid, start, end,
534 count, flags));
535 }
404 struct vmd_softc *sc = device_get_softc(dev);
405 struct resource *res;
536
406
537 return (pcib_alloc_resource(dev, child, type, rid, start, end,
538 count, flags));
407 switch (type) {
408 case SYS_RES_IRQ:
409 /* VMD harwdare does not support legacy interrupts. */
410 if (*rid == 0)
411 return (NULL);
412 return (bus_generic_alloc_resource(dev, child, type, rid,
413 start, end, count, flags | RF_SHAREABLE));
414 case SYS_RES_MEMORY:
415 res = rman_reserve_resource(&sc->psc.mem.rman, start, end,
416 count, flags, child);
417 if (res == NULL)
418 return (NULL);
419 if (bootverbose)
420 device_printf(dev,
421 "allocated memory range (%#jx-%#jx) for rid %d of %s\n",
422 rman_get_start(res), rman_get_end(res), *rid,
423 pcib_child_name(child));
424 break;
425 case PCI_RES_BUS:
426 res = rman_reserve_resource(&sc->psc.bus.rman, start, end,
427 count, flags, child);
428 if (res == NULL)
429 return (NULL);
430 if (bootverbose)
431 device_printf(dev,
432 "allocated bus range (%ju-%ju) for rid %d of %s\n",
433 rman_get_start(res), rman_get_end(res), *rid,
434 pcib_child_name(child));
435 break;
436 default:
437 /* VMD harwdare does not support I/O ports. */
438 return (NULL);
439 }
440 rman_set_rid(res, *rid);
441 return (res);
539}
540
541static int
542vmd_adjust_resource(device_t dev, device_t child, int type,
543 struct resource *r, rman_res_t start, rman_res_t end)
544{
442}
443
444static int
445vmd_adjust_resource(device_t dev, device_t child, int type,
446 struct resource *r, rman_res_t start, rman_res_t end)
447{
545 struct resource *res = r;
546
448
547 if (type == PCI_RES_BUS)
548 return (pci_domain_adjust_bus(PCI_DOMAINMAX -
549 device_get_unit(dev), child, res, start, end));
550 return (pcib_adjust_resource(dev, child, type, res, start, end));
449 if (type == SYS_RES_IRQ) {
450 return (bus_generic_adjust_resource(dev, child, type, r,
451 start, end));
452 }
453 return (rman_adjust_resource(r, start, end));
551}
552
553static int
554vmd_release_resource(device_t dev, device_t child, int type, int rid,
555 struct resource *r)
556{
454}
455
456static int
457vmd_release_resource(device_t dev, device_t child, int type, int rid,
458 struct resource *r)
459{
557 if (type == PCI_RES_BUS)
558 return (pci_domain_release_bus(PCI_DOMAINMAX -
559 device_get_unit(dev), child, rid, r));
560 return (pcib_release_resource(dev, child, type, rid, r));
561}
562
460
563static int
564vmd_shutdown(device_t dev)
565{
566 return (0);
461 if (type == SYS_RES_IRQ) {
462 return (bus_generic_release_resource(dev, child, type, rid,
463 r));
464 }
465 return (rman_release_resource(r));
567}
568
569static int
466}
467
468static int
570vmd_pcib_route_interrupt(device_t pcib, device_t dev, int pin)
469vmd_route_interrupt(device_t dev, device_t child, int pin)
571{
470{
572 return (pcib_route_interrupt(pcib, dev, pin));
471
472 /* VMD harwdare does not support legacy interrupts. */
473 return (PCI_INVALID_IRQ);
573}
574
575static int
474}
475
476static int
576vmd_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount,
477vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount,
577 int *irqs)
578{
478 int *irqs)
479{
579 return (pcib_alloc_msi(pcib, dev, count, maxcount, irqs));
480 struct vmd_softc *sc = device_get_softc(dev);
481 struct vmd_irq_user *u;
482 int i, ibest = 0, best = INT_MAX;
483
484 if (count > vmd_max_msi)
485 return (ENOSPC);
486 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
487 if (u->viu_child == child)
488 return (EBUSY);
489 }
490
491 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
492 if (best > sc->vmd_irq[i].vi_nusers) {
493 best = sc->vmd_irq[i].vi_nusers;
494 ibest = i;
495 }
496 }
497
498 u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
499 u->viu_child = child;
500 u->viu_vector = ibest;
501 LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
502 sc->vmd_irq[ibest].vi_nusers += count;
503
504 for (i = 0; i < count; i++)
505 irqs[i] = sc->vmd_irq[ibest].vi_irq;
506 return (0);
580}
581
582static int
507}
508
509static int
583vmd_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
510vmd_release_msi(device_t dev, device_t child, int count, int *irqs)
584{
511{
512 struct vmd_softc *sc = device_get_softc(dev);
513 struct vmd_irq_user *u;
585
514
586 return (pcib_release_msi(pcib, dev, count, irqs));
515 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
516 if (u->viu_child == child) {
517 sc->vmd_irq[u->viu_vector].vi_nusers -= count;
518 LIST_REMOVE(u, viu_link);
519 free(u, M_DEVBUF);
520 return (0);
521 }
522 }
523 return (EINVAL);
587}
588
589static int
524}
525
526static int
590vmd_pcib_release_msix(device_t pcib, device_t dev, int irq) {
591 return pcib_release_msix(pcib, dev, irq);
592}
593
594static int
595vmd_setup_intr(device_t dev, device_t child, struct resource *irq,
596 int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
597 void **cookiep)
527vmd_alloc_msix(device_t dev, device_t child, int *irq)
598{
528{
599 struct vmd_irq_handler *elm;
600 struct vmd_softc *sc;
601 int i;
529 struct vmd_softc *sc = device_get_softc(dev);
530 struct vmd_irq_user *u;
531 int i, ibest = 0, best = INT_MAX;
602
532
603 sc = device_get_softc(dev);
533 i = 0;
534 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
535 if (u->viu_child == child)
536 i++;
537 }
538 if (i >= vmd_max_msix)
539 return (ENOSPC);
604
540
605 /*
606 * There appears to be no steering of VMD interrupts from device
607 * to VMD interrupt
608 */
541 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
542 if (best > sc->vmd_irq[i].vi_nusers) {
543 best = sc->vmd_irq[i].vi_nusers;
544 ibest = i;
545 }
546 }
609
547
610 i = 0;
611 elm = malloc(sizeof(*elm), M_DEVBUF, M_NOWAIT|M_ZERO);
612 elm->vmd_child = child;
613 elm->vmd_intr = intr;
614 elm->vmd_rid = rman_get_rid(irq);
615 elm->vmd_arg = arg;
616 TAILQ_INSERT_TAIL(&sc->vmd_irq[i].vmd_list, elm, vmd_link);
548 u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
549 u->viu_child = child;
550 u->viu_vector = ibest;
551 LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
552 sc->vmd_irq[ibest].vi_nusers++;
617
553
618 return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
619 arg, cookiep));
554 *irq = sc->vmd_irq[ibest].vi_irq;
555 return (0);
620}
621
622static int
556}
557
558static int
623vmd_teardown_intr(device_t dev, device_t child, struct resource *irq,
624 void *cookie)
559vmd_release_msix(device_t dev, device_t child, int irq)
625{
560{
626 struct vmd_irq_handler *elm, *tmp;;
627 struct vmd_softc *sc;
561 struct vmd_softc *sc = device_get_softc(dev);
562 struct vmd_irq_user *u;
628
563
629 sc = device_get_softc(dev);
630 TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp) {
631 if (elm->vmd_child == child &&
632 elm->vmd_rid == rman_get_rid(irq)) {
633 TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
634 free(elm, M_DEVBUF);
564 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
565 if (u->viu_child == child &&
566 sc->vmd_irq[u->viu_vector].vi_irq == irq) {
567 sc->vmd_irq[u->viu_vector].vi_nusers--;
568 LIST_REMOVE(u, viu_link);
569 free(u, M_DEVBUF);
570 return (0);
635 }
636 }
571 }
572 }
573 return (EINVAL);
574}
637
575
638 return (bus_generic_teardown_intr(dev, child, irq, cookie));
576static int
577vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data)
578{
579 struct vmd_softc *sc = device_get_softc(dev);
580 int i;
581
582 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
583 if (sc->vmd_irq[i].vi_irq == irq)
584 break;
585 }
586 if (i >= sc->vmd_msix_count)
587 return (EINVAL);
588 *addr = MSI_INTEL_ADDR_BASE | (i << 12);
589 *data = 0;
590 return (0);
639}
640
641static device_method_t vmd_pci_methods[] = {
642 /* Device interface */
643 DEVMETHOD(device_probe, vmd_probe),
644 DEVMETHOD(device_attach, vmd_attach),
645 DEVMETHOD(device_detach, vmd_detach),
591}
592
593static device_method_t vmd_pci_methods[] = {
594 /* Device interface */
595 DEVMETHOD(device_probe, vmd_probe),
596 DEVMETHOD(device_attach, vmd_attach),
597 DEVMETHOD(device_detach, vmd_detach),
646 DEVMETHOD(device_shutdown, vmd_shutdown),
598 DEVMETHOD(device_suspend, bus_generic_suspend),
599 DEVMETHOD(device_resume, bus_generic_resume),
600 DEVMETHOD(device_shutdown, bus_generic_shutdown),
647
648 /* Bus interface */
601
602 /* Bus interface */
603 DEVMETHOD(bus_get_dma_tag, vmd_get_dma_tag),
649 DEVMETHOD(bus_read_ivar, pcib_read_ivar),
650 DEVMETHOD(bus_write_ivar, pcib_write_ivar),
651 DEVMETHOD(bus_alloc_resource, vmd_alloc_resource),
652 DEVMETHOD(bus_adjust_resource, vmd_adjust_resource),
653 DEVMETHOD(bus_release_resource, vmd_release_resource),
654 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
655 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
604 DEVMETHOD(bus_read_ivar, pcib_read_ivar),
605 DEVMETHOD(bus_write_ivar, pcib_write_ivar),
606 DEVMETHOD(bus_alloc_resource, vmd_alloc_resource),
607 DEVMETHOD(bus_adjust_resource, vmd_adjust_resource),
608 DEVMETHOD(bus_release_resource, vmd_release_resource),
609 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
610 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
656 DEVMETHOD(bus_setup_intr, vmd_setup_intr),
657 DEVMETHOD(bus_teardown_intr, vmd_teardown_intr),
611 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
612 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
658
613
659 /* pci interface */
660 DEVMETHOD(pci_read_config, vmd_pci_read_config),
661 DEVMETHOD(pci_write_config, vmd_pci_write_config),
662 DEVMETHOD(pci_alloc_devinfo, vmd_alloc_devinfo),
663
664 /* pcib interface */
665 DEVMETHOD(pcib_maxslots, pcib_maxslots),
666 DEVMETHOD(pcib_read_config, vmd_read_config),
667 DEVMETHOD(pcib_write_config, vmd_write_config),
614 /* pcib interface */
615 DEVMETHOD(pcib_maxslots, pcib_maxslots),
616 DEVMETHOD(pcib_read_config, vmd_read_config),
617 DEVMETHOD(pcib_write_config, vmd_write_config),
668 DEVMETHOD(pcib_route_interrupt, vmd_pcib_route_interrupt),
669 DEVMETHOD(pcib_alloc_msi, vmd_pcib_alloc_msi),
670 DEVMETHOD(pcib_release_msi, vmd_pcib_release_msi),
618 DEVMETHOD(pcib_route_interrupt, vmd_route_interrupt),
619 DEVMETHOD(pcib_alloc_msi, vmd_alloc_msi),
620 DEVMETHOD(pcib_release_msi, vmd_release_msi),
671 DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix),
621 DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix),
672 DEVMETHOD(pcib_release_msix, vmd_pcib_release_msix),
673 DEVMETHOD(pcib_map_msi, pcib_map_msi),
622 DEVMETHOD(pcib_release_msix, vmd_release_msix),
623 DEVMETHOD(pcib_map_msi, vmd_map_msi),
624 DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
674
675 DEVMETHOD_END
676};
677
625
626 DEVMETHOD_END
627};
628
678static devclass_t vmd_devclass;
629static devclass_t pcib_devclass;
679
630
680DEFINE_CLASS_0(vmd, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
681DRIVER_MODULE(vmd, pci, vmd_pci_driver, vmd_devclass, NULL, NULL);
631DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
632DRIVER_MODULE(vmd, pci, vmd_pci_driver, pcib_devclass, NULL, NULL);
682MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
683 vmd_devs, nitems(vmd_devs) - 1);
633MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
634 vmd_devs, nitems(vmd_devs) - 1);
684MODULE_DEPEND(vmd, vmd_bus, 1, 1, 1);