1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright © 2021-2022 Dmitry Salychev
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 /*
30 * The DPAA2 Management Complex (MC) bus driver.
31 *
32 * MC is a hardware resource manager which can be found in several NXP
33 * SoCs (LX2160A, for example) and provides an access to the specialized
34 * hardware objects used in network-oriented packet processing applications.
35 */
36
37 #include "opt_acpi.h"
38 #include "opt_platform.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/bus.h>
43 #include <sys/rman.h>
44 #include <sys/lock.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/queue.h>
49
50 #include <vm/vm.h>
51
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54
55 #ifdef DEV_ACPI
56 #include <contrib/dev/acpica/include/acpi.h>
57 #include <dev/acpica/acpivar.h>
58 #endif
59
60 #ifdef FDT
61 #include <dev/ofw/openfirm.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #include <dev/ofw/ofw_pci.h>
65 #endif
66
67 #include "pcib_if.h"
68 #include "pci_if.h"
69
70 #include "dpaa2_mc.h"
71
72 /* Macros to read/write MC registers */
73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
75
76 #define IORT_DEVICE_NAME "MCE"
77
78 /* MC Registers */
79 #define MC_REG_GCR1 0x0000u
80 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */
81 #define MC_REG_GSR 0x0008u
82 #define MC_REG_FAPR 0x0028u
83
84 /* General Control Register 1 (GCR1) */
85 #define GCR1_P1_STOP 0x80000000u
86 #define GCR1_P2_STOP 0x40000000u
87
88 /* General Status Register (GSR) */
89 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31)
90 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30)
91 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8)
92 #define GSR_MCS(v) (((v) & 0xFFu) >> 0)
93
94 /* Timeouts to wait for the MC status. */
95 #define MC_STAT_TIMEOUT 1000u /* us */
96 #define MC_STAT_ATTEMPTS 100u
97
98 /**
99 * @brief Structure to describe a DPAA2 device as a managed resource.
100 */
101 struct dpaa2_mc_devinfo {
102 STAILQ_ENTRY(dpaa2_mc_devinfo) link;
103 device_t dpaa2_dev;
104 uint32_t flags;
105 uint32_t owners;
106 };
107
108 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex");
109
110 static struct resource_spec dpaa2_mc_spec[] = {
111 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED },
112 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
113 RESOURCE_SPEC_END
114 };
115
116 static u_int dpaa2_mc_get_xref(device_t, device_t);
117 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *);
118
119 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *);
120 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *);
121 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *,
122 uint32_t *);
123
124 /*
125 * For device interface.
126 */
127
128 int
dpaa2_mc_attach(device_t dev)129 dpaa2_mc_attach(device_t dev)
130 {
131 struct dpaa2_mc_softc *sc;
132 struct resource_map_request req;
133 uint32_t val;
134 int error;
135
136 sc = device_get_softc(dev);
137 sc->dev = dev;
138 sc->msi_allocated = false;
139 sc->msi_owner = NULL;
140
141 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res);
142 if (error) {
143 device_printf(dev, "%s: failed to allocate resources\n",
144 __func__);
145 return (ENXIO);
146 }
147
148 if (sc->res[1]) {
149 resource_init_map_request(&req);
150 req.memattr = VM_MEMATTR_DEVICE;
151 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1],
152 &req, &sc->map[1]);
153 if (error) {
154 device_printf(dev, "%s: failed to map control "
155 "registers\n", __func__);
156 dpaa2_mc_detach(dev);
157 return (ENXIO);
158 }
159
160 if (bootverbose)
161 device_printf(dev,
162 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
163 mcreg_read_4(sc, MC_REG_GCR1),
164 mcreg_read_4(sc, MC_REG_GCR2),
165 mcreg_read_4(sc, MC_REG_GSR),
166 mcreg_read_4(sc, MC_REG_FAPR));
167
168 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */
169 val = mcreg_read_4(sc, MC_REG_GCR1) &
170 ~(GCR1_P1_STOP | GCR1_P2_STOP);
171 mcreg_write_4(sc, MC_REG_GCR1, val);
172
173 /* Poll MC status. */
174 if (bootverbose)
175 device_printf(dev, "polling MC status...\n");
176 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) {
177 val = mcreg_read_4(sc, MC_REG_GSR);
178 if (GSR_MCS(val) != 0u)
179 break;
180 DELAY(MC_STAT_TIMEOUT);
181 }
182
183 if (bootverbose)
184 device_printf(dev,
185 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
186 mcreg_read_4(sc, MC_REG_GCR1),
187 mcreg_read_4(sc, MC_REG_GCR2),
188 mcreg_read_4(sc, MC_REG_GSR),
189 mcreg_read_4(sc, MC_REG_FAPR));
190 }
191
192 /* At least 64 bytes of the command portal should be available. */
193 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) {
194 device_printf(dev, "%s: MC portal memory region too small: "
195 "%jd\n", __func__, rman_get_size(sc->res[0]));
196 dpaa2_mc_detach(dev);
197 return (ENXIO);
198 }
199
200 /* Map MC portal memory resource. */
201 resource_init_map_request(&req);
202 req.memattr = VM_MEMATTR_DEVICE;
203 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0],
204 &req, &sc->map[0]);
205 if (error) {
206 device_printf(dev, "Failed to map MC portal memory\n");
207 dpaa2_mc_detach(dev);
208 return (ENXIO);
209 }
210
211 /* Initialize a resource manager for the DPAA2 I/O objects. */
212 sc->dpio_rman.rm_type = RMAN_ARRAY;
213 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects";
214 error = rman_init(&sc->dpio_rman);
215 if (error) {
216 device_printf(dev, "Failed to initialize a resource manager for "
217 "the DPAA2 I/O objects: error=%d\n", error);
218 dpaa2_mc_detach(dev);
219 return (ENXIO);
220 }
221
222 /* Initialize a resource manager for the DPAA2 buffer pools. */
223 sc->dpbp_rman.rm_type = RMAN_ARRAY;
224 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects";
225 error = rman_init(&sc->dpbp_rman);
226 if (error) {
227 device_printf(dev, "Failed to initialize a resource manager for "
228 "the DPAA2 buffer pools: error=%d\n", error);
229 dpaa2_mc_detach(dev);
230 return (ENXIO);
231 }
232
233 /* Initialize a resource manager for the DPAA2 concentrators. */
234 sc->dpcon_rman.rm_type = RMAN_ARRAY;
235 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects";
236 error = rman_init(&sc->dpcon_rman);
237 if (error) {
238 device_printf(dev, "Failed to initialize a resource manager for "
239 "the DPAA2 concentrators: error=%d\n", error);
240 dpaa2_mc_detach(dev);
241 return (ENXIO);
242 }
243
244 /* Initialize a resource manager for the DPAA2 MC portals. */
245 sc->dpmcp_rman.rm_type = RMAN_ARRAY;
246 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects";
247 error = rman_init(&sc->dpmcp_rman);
248 if (error) {
249 device_printf(dev, "Failed to initialize a resource manager for "
250 "the DPAA2 MC portals: error=%d\n", error);
251 dpaa2_mc_detach(dev);
252 return (ENXIO);
253 }
254
255 /* Initialize a list of non-allocatable DPAA2 devices. */
256 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF);
257 STAILQ_INIT(&sc->mdev_list);
258
259 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF);
260
261 /*
262 * Add a root resource container as the only child of the bus. All of
263 * the direct descendant containers will be attached to the root one
264 * instead of the MC device.
265 */
266 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0);
267 if (sc->rcdev == NULL) {
268 dpaa2_mc_detach(dev);
269 return (ENXIO);
270 }
271 bus_identify_children(dev);
272 bus_attach_children(dev);
273
274 return (0);
275 }
276
277 int
dpaa2_mc_detach(device_t dev)278 dpaa2_mc_detach(device_t dev)
279 {
280 struct dpaa2_mc_softc *sc;
281 struct dpaa2_devinfo *dinfo = NULL;
282 int error;
283
284 error = bus_generic_detach(dev);
285 if (error != 0)
286 return (error);
287
288 sc = device_get_softc(dev);
289 bus_release_resources(dev, dpaa2_mc_spec, sc->res);
290
291 dinfo = device_get_ivars(dev);
292 if (dinfo)
293 free(dinfo, M_DPAA2_MC);
294
295 return (0);
296 }
297
298 /*
299 * For bus interface.
300 */
301
302 struct resource *
dpaa2_mc_alloc_resource(device_t mcdev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)303 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid,
304 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
305 {
306 struct resource *res;
307 struct rman *rm;
308 int error;
309
310 rm = dpaa2_mc_rman(mcdev, type, flags);
311 if (rm == NULL)
312 return (bus_generic_alloc_resource(mcdev, child, type, rid,
313 start, end, count, flags));
314
315 /*
316 * Skip managing DPAA2-specific resource. It must be provided to MC by
317 * calling DPAA2_MC_MANAGE_DEV() beforehand.
318 */
319 if (type <= DPAA2_DEV_MC) {
320 error = rman_manage_region(rm, start, end);
321 if (error) {
322 device_printf(mcdev, "rman_manage_region() failed: "
323 "start=%#jx, end=%#jx, error=%d\n", start, end,
324 error);
325 goto fail;
326 }
327 }
328
329 res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start,
330 end, count, flags);
331 if (res == NULL)
332 goto fail;
333 return (res);
334 fail:
335 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, "
336 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end,
337 count, flags);
338 return (NULL);
339 }
340
341 int
dpaa2_mc_adjust_resource(device_t mcdev,device_t child,struct resource * r,rman_res_t start,rman_res_t end)342 dpaa2_mc_adjust_resource(device_t mcdev, device_t child,
343 struct resource *r, rman_res_t start, rman_res_t end)
344 {
345 struct rman *rm;
346
347 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
348 if (rm)
349 return (bus_generic_rman_adjust_resource(mcdev, child, r,
350 start, end));
351 return (bus_generic_adjust_resource(mcdev, child, r, start, end));
352 }
353
354 int
dpaa2_mc_release_resource(device_t mcdev,device_t child,struct resource * r)355 dpaa2_mc_release_resource(device_t mcdev, device_t child, struct resource *r)
356 {
357 struct rman *rm;
358
359 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
360 if (rm)
361 return (bus_generic_rman_release_resource(mcdev, child, r));
362 return (bus_generic_release_resource(mcdev, child, r));
363 }
364
365 int
dpaa2_mc_activate_resource(device_t mcdev,device_t child,struct resource * r)366 dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r)
367 {
368 struct rman *rm;
369
370 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
371 if (rm)
372 return (bus_generic_rman_activate_resource(mcdev, child, r));
373 return (bus_generic_activate_resource(mcdev, child, r));
374 }
375
376 int
dpaa2_mc_deactivate_resource(device_t mcdev,device_t child,struct resource * r)377 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r)
378 {
379 struct rman *rm;
380
381 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
382 if (rm)
383 return (bus_generic_rman_deactivate_resource(mcdev, child, r));
384 return (bus_generic_deactivate_resource(mcdev, child, r));
385 }
386
387 /*
388 * For pseudo-pcib interface.
389 */
390
391 int
dpaa2_mc_alloc_msi(device_t mcdev,device_t child,int count,int maxcount,int * irqs)392 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount,
393 int *irqs)
394 {
395 #if defined(INTRNG)
396 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs));
397 #else
398 return (ENXIO);
399 #endif
400 }
401
402 int
dpaa2_mc_release_msi(device_t mcdev,device_t child,int count,int * irqs)403 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs)
404 {
405 #if defined(INTRNG)
406 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs));
407 #else
408 return (ENXIO);
409 #endif
410 }
411
412 int
dpaa2_mc_map_msi(device_t mcdev,device_t child,int irq,uint64_t * addr,uint32_t * data)413 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr,
414 uint32_t *data)
415 {
416 #if defined(INTRNG)
417 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data));
418 #else
419 return (ENXIO);
420 #endif
421 }
422
423 int
dpaa2_mc_get_id(device_t mcdev,device_t child,enum pci_id_type type,uintptr_t * id)424 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type,
425 uintptr_t *id)
426 {
427 struct dpaa2_devinfo *dinfo;
428
429 dinfo = device_get_ivars(child);
430
431 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
432 return (ENXIO);
433
434 if (type == PCI_ID_MSI)
435 return (dpaa2_mc_map_id(mcdev, child, id));
436
437 *id = dinfo->icid;
438 return (0);
439 }
440
441 /*
442 * For DPAA2 Management Complex bus driver interface.
443 */
444
445 int
dpaa2_mc_manage_dev(device_t mcdev,device_t dpaa2_dev,uint32_t flags)446 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags)
447 {
448 struct dpaa2_mc_softc *sc;
449 struct dpaa2_devinfo *dinfo;
450 struct dpaa2_mc_devinfo *di;
451 struct rman *rm;
452 int error;
453
454 sc = device_get_softc(mcdev);
455 dinfo = device_get_ivars(dpaa2_dev);
456
457 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
458 return (EINVAL);
459
460 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO);
461 di->dpaa2_dev = dpaa2_dev;
462 di->flags = flags;
463 di->owners = 0;
464
465 /* Append a new managed DPAA2 device to the queue. */
466 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
467 mtx_lock(&sc->mdev_lock);
468 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link);
469 mtx_unlock(&sc->mdev_lock);
470
471 if (flags & DPAA2_MC_DEV_ALLOCATABLE) {
472 /* Select rman based on a type of the DPAA2 device. */
473 rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0);
474 if (!rm)
475 return (ENOENT);
476 /* Manage DPAA2 device as an allocatable resource. */
477 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev,
478 (rman_res_t) dpaa2_dev);
479 if (error)
480 return (error);
481 }
482
483 return (0);
484 }
485
486 int
dpaa2_mc_get_free_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype)487 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev,
488 enum dpaa2_dev_type devtype)
489 {
490 struct rman *rm;
491 rman_res_t start, end;
492 int error;
493
494 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
495 return (EINVAL);
496
497 /* Select resource manager based on a type of the DPAA2 device. */
498 rm = dpaa2_mc_rman(mcdev, devtype, 0);
499 if (!rm)
500 return (ENOENT);
501 /* Find first free DPAA2 device of the given type. */
502 error = rman_first_free_region(rm, &start, &end);
503 if (error)
504 return (error);
505
506 KASSERT(start == end, ("start != end, but should be the same pointer "
507 "to the DPAA2 device: start=%jx, end=%jx", start, end));
508
509 *dpaa2_dev = (device_t) start;
510
511 return (0);
512 }
513
514 int
dpaa2_mc_get_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype,uint32_t obj_id)515 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev,
516 enum dpaa2_dev_type devtype, uint32_t obj_id)
517 {
518 struct dpaa2_mc_softc *sc;
519 struct dpaa2_devinfo *dinfo;
520 struct dpaa2_mc_devinfo *di;
521 int error = ENOENT;
522
523 sc = device_get_softc(mcdev);
524
525 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
526 return (EINVAL);
527
528 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
529 mtx_lock(&sc->mdev_lock);
530
531 STAILQ_FOREACH(di, &sc->mdev_list, link) {
532 dinfo = device_get_ivars(di->dpaa2_dev);
533 if (dinfo->dtype == devtype && dinfo->id == obj_id) {
534 *dpaa2_dev = di->dpaa2_dev;
535 error = 0;
536 break;
537 }
538 }
539
540 mtx_unlock(&sc->mdev_lock);
541
542 return (error);
543 }
544
545 int
dpaa2_mc_get_shared_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype)546 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev,
547 enum dpaa2_dev_type devtype)
548 {
549 struct dpaa2_mc_softc *sc;
550 struct dpaa2_devinfo *dinfo;
551 struct dpaa2_mc_devinfo *di;
552 device_t dev = NULL;
553 uint32_t owners = UINT32_MAX;
554 int error = ENOENT;
555
556 sc = device_get_softc(mcdev);
557
558 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
559 return (EINVAL);
560
561 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
562 mtx_lock(&sc->mdev_lock);
563
564 STAILQ_FOREACH(di, &sc->mdev_list, link) {
565 dinfo = device_get_ivars(di->dpaa2_dev);
566
567 if ((dinfo->dtype == devtype) &&
568 (di->flags & DPAA2_MC_DEV_SHAREABLE) &&
569 (di->owners < owners)) {
570 dev = di->dpaa2_dev;
571 owners = di->owners;
572 }
573 }
574 if (dev) {
575 *dpaa2_dev = dev;
576 error = 0;
577 }
578
579 mtx_unlock(&sc->mdev_lock);
580
581 return (error);
582 }
583
584 int
dpaa2_mc_reserve_dev(device_t mcdev,device_t dpaa2_dev,enum dpaa2_dev_type devtype)585 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev,
586 enum dpaa2_dev_type devtype)
587 {
588 struct dpaa2_mc_softc *sc;
589 struct dpaa2_mc_devinfo *di;
590 int error = ENOENT;
591
592 sc = device_get_softc(mcdev);
593
594 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
595 return (EINVAL);
596
597 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
598 mtx_lock(&sc->mdev_lock);
599
600 STAILQ_FOREACH(di, &sc->mdev_list, link) {
601 if (di->dpaa2_dev == dpaa2_dev &&
602 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
603 di->owners++;
604 error = 0;
605 break;
606 }
607 }
608
609 mtx_unlock(&sc->mdev_lock);
610
611 return (error);
612 }
613
614 int
dpaa2_mc_release_dev(device_t mcdev,device_t dpaa2_dev,enum dpaa2_dev_type devtype)615 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
616 enum dpaa2_dev_type devtype)
617 {
618 struct dpaa2_mc_softc *sc;
619 struct dpaa2_mc_devinfo *di;
620 int error = ENOENT;
621
622 sc = device_get_softc(mcdev);
623
624 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
625 return (EINVAL);
626
627 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
628 mtx_lock(&sc->mdev_lock);
629
630 STAILQ_FOREACH(di, &sc->mdev_list, link) {
631 if (di->dpaa2_dev == dpaa2_dev &&
632 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
633 di->owners -= di->owners > 0 ? 1 : 0;
634 error = 0;
635 break;
636 }
637 }
638
639 mtx_unlock(&sc->mdev_lock);
640
641 return (error);
642 }
643
644 /**
645 * @internal
646 */
647 static u_int
dpaa2_mc_get_xref(device_t mcdev,device_t child)648 dpaa2_mc_get_xref(device_t mcdev, device_t child)
649 {
650 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
651 struct dpaa2_devinfo *dinfo = device_get_ivars(child);
652 #ifdef DEV_ACPI
653 u_int xref, devid;
654 #endif
655 #ifdef FDT
656 phandle_t msi_parent;
657 #endif
658 int error;
659
660 if (sc && dinfo) {
661 #ifdef DEV_ACPI
662 if (sc->acpi_based) {
663 /*
664 * NOTE: The first named component from the IORT table
665 * with the given name (as a substring) will be used.
666 */
667 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME,
668 dinfo->icid, &xref, &devid);
669 if (error)
670 return (0);
671 return (xref);
672 }
673 #endif
674 #ifdef FDT
675 if (!sc->acpi_based) {
676 /* FDT-based driver. */
677 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid,
678 &msi_parent, NULL);
679 if (error)
680 return (0);
681 return ((u_int) msi_parent);
682 }
683 #endif
684 }
685 return (0);
686 }
687
688 /**
689 * @internal
690 */
691 static u_int
dpaa2_mc_map_id(device_t mcdev,device_t child,uintptr_t * id)692 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id)
693 {
694 struct dpaa2_devinfo *dinfo;
695 #ifdef DEV_ACPI
696 u_int xref, devid;
697 int error;
698 #endif
699
700 dinfo = device_get_ivars(child);
701 if (dinfo) {
702 /*
703 * The first named components from IORT table with the given
704 * name (as a substring) will be used.
705 */
706 #ifdef DEV_ACPI
707 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid,
708 &xref, &devid);
709 if (error == 0)
710 *id = devid;
711 else
712 #endif
713 *id = dinfo->icid; /* RID not in IORT, likely FW bug */
714
715 return (0);
716 }
717 return (ENXIO);
718 }
719
720 /**
721 * @internal
722 * @brief Obtain a resource manager based on the given type of the resource.
723 */
724 struct rman *
dpaa2_mc_rman(device_t mcdev,int type,u_int flags)725 dpaa2_mc_rman(device_t mcdev, int type, u_int flags)
726 {
727 struct dpaa2_mc_softc *sc;
728
729 sc = device_get_softc(mcdev);
730
731 switch (type) {
732 case DPAA2_DEV_IO:
733 return (&sc->dpio_rman);
734 case DPAA2_DEV_BP:
735 return (&sc->dpbp_rman);
736 case DPAA2_DEV_CON:
737 return (&sc->dpcon_rman);
738 case DPAA2_DEV_MCP:
739 return (&sc->dpmcp_rman);
740 default:
741 break;
742 }
743
744 return (NULL);
745 }
746
747 #if defined(INTRNG) && !defined(IOMMU)
748
749 /**
750 * @internal
751 * @brief Allocates requested number of MSIs.
752 *
753 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
754 * Total number of IRQs is limited to 32.
755 */
756 static int
dpaa2_mc_alloc_msi_impl(device_t mcdev,device_t child,int count,int maxcount,int * irqs)757 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount,
758 int *irqs)
759 {
760 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
761 int msi_irqs[DPAA2_MC_MSI_COUNT];
762 int error;
763
764 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */
765 if (!sc->msi_allocated) {
766 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev,
767 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs);
768 if (error) {
769 device_printf(mcdev, "failed to pre-allocate %d MSIs: "
770 "error=%d\n", DPAA2_MC_MSI_COUNT, error);
771 return (error);
772 }
773
774 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
775 mtx_lock(&sc->msi_lock);
776 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
777 sc->msi[i].child = NULL;
778 sc->msi[i].irq = msi_irqs[i];
779 }
780 sc->msi_owner = child;
781 sc->msi_allocated = true;
782 mtx_unlock(&sc->msi_lock);
783 }
784
785 error = ENOENT;
786
787 /* Find the first free MSIs from the pre-allocated pool. */
788 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
789 mtx_lock(&sc->msi_lock);
790 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
791 if (sc->msi[i].child != NULL)
792 continue;
793 error = 0;
794 for (int j = 0; j < count; j++) {
795 if (i + j >= DPAA2_MC_MSI_COUNT) {
796 device_printf(mcdev, "requested %d MSIs exceed "
797 "limit of %d available\n", count,
798 DPAA2_MC_MSI_COUNT);
799 error = E2BIG;
800 break;
801 }
802 sc->msi[i + j].child = child;
803 irqs[j] = sc->msi[i + j].irq;
804 }
805 break;
806 }
807 mtx_unlock(&sc->msi_lock);
808
809 return (error);
810 }
811
812 /**
813 * @internal
814 * @brief Marks IRQs as free in the pre-allocated pool of MSIs.
815 *
816 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
817 * Total number of IRQs is limited to 32.
818 * NOTE: MSIs are kept allocated in the kernel as a part of the pool.
819 */
820 static int
dpaa2_mc_release_msi_impl(device_t mcdev,device_t child,int count,int * irqs)821 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs)
822 {
823 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
824
825 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
826 mtx_lock(&sc->msi_lock);
827 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
828 if (sc->msi[i].child != child)
829 continue;
830 for (int j = 0; j < count; j++) {
831 if (sc->msi[i].irq == irqs[j]) {
832 sc->msi[i].child = NULL;
833 break;
834 }
835 }
836 }
837 mtx_unlock(&sc->msi_lock);
838
839 return (0);
840 }
841
842 /**
843 * @internal
844 * @brief Provides address to write to and data according to the given MSI from
845 * the pre-allocated pool.
846 *
847 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
848 * Total number of IRQs is limited to 32.
849 */
850 static int
dpaa2_mc_map_msi_impl(device_t mcdev,device_t child,int irq,uint64_t * addr,uint32_t * data)851 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr,
852 uint32_t *data)
853 {
854 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
855 int error = EINVAL;
856
857 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
858 mtx_lock(&sc->msi_lock);
859 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
860 if (sc->msi[i].child == child && sc->msi[i].irq == irq) {
861 error = 0;
862 break;
863 }
864 }
865 mtx_unlock(&sc->msi_lock);
866 if (error)
867 return (error);
868
869 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev,
870 sc->msi_owner), irq, addr, data));
871 }
872
873 #endif /* defined(INTRNG) && !defined(IOMMU) */
874
875 static device_method_t dpaa2_mc_methods[] = {
876 DEVMETHOD_END
877 };
878
879 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods,
880 sizeof(struct dpaa2_mc_softc));
881