1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright © 2021-2022 Dmitry Salychev
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 /*
30 * The DPAA2 Management Complex (MC) bus driver.
31 *
32 * MC is a hardware resource manager which can be found in several NXP
33 * SoCs (LX2160A, for example) and provides an access to the specialized
34 * hardware objects used in network-oriented packet processing applications.
35 */
36
37 #include "opt_acpi.h"
38 #include "opt_platform.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/bus.h>
43 #include <sys/rman.h>
44 #include <sys/lock.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/queue.h>
49
50 #include <vm/vm.h>
51
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54
55 #ifdef DEV_ACPI
56 #include <contrib/dev/acpica/include/acpi.h>
57 #include <dev/acpica/acpivar.h>
58 #endif
59
60 #ifdef FDT
61 #include <dev/ofw/openfirm.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #include <dev/ofw/ofw_pci.h>
65 #endif
66
67 #include "pcib_if.h"
68 #include "pci_if.h"
69
70 #include "dpaa2_mc.h"
71
72 /* Macros to read/write MC registers */
73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
75
76 #define IORT_DEVICE_NAME "MCE"
77
78 /* MC Registers */
79 #define MC_REG_GCR1 0x0000u
80 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */
81 #define MC_REG_GSR 0x0008u
82 #define MC_REG_FAPR 0x0028u
83
84 /* General Control Register 1 (GCR1) */
85 #define GCR1_P1_STOP 0x80000000u
86 #define GCR1_P2_STOP 0x40000000u
87
88 /* General Status Register (GSR) */
89 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31)
90 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30)
91 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8)
92 #define GSR_MCS(v) (((v) & 0xFFu) >> 0)
93
94 /* Timeouts to wait for the MC status. */
95 #define MC_STAT_TIMEOUT 1000u /* us */
96 #define MC_STAT_ATTEMPTS 100u
97
98 /**
99 * @brief Structure to describe a DPAA2 device as a managed resource.
100 */
101 struct dpaa2_mc_devinfo {
102 STAILQ_ENTRY(dpaa2_mc_devinfo) link;
103 device_t dpaa2_dev;
104 uint32_t flags;
105 uint32_t owners;
106 };
107
108 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex");
109
110 static struct resource_spec dpaa2_mc_spec[] = {
111 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED },
112 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
113 RESOURCE_SPEC_END
114 };
115
116 static u_int dpaa2_mc_get_xref(device_t, device_t);
117 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *);
118
119 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *);
120 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *);
121 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *,
122 uint32_t *);
123
124 /*
125 * For device interface.
126 */
127
128 int
dpaa2_mc_attach(device_t dev)129 dpaa2_mc_attach(device_t dev)
130 {
131 struct dpaa2_mc_softc *sc;
132 struct resource_map_request req;
133 uint32_t val;
134 int error;
135
136 sc = device_get_softc(dev);
137 sc->dev = dev;
138 sc->msi_allocated = false;
139 sc->msi_owner = NULL;
140
141 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res);
142 if (error) {
143 device_printf(dev, "%s: failed to allocate resources\n",
144 __func__);
145 return (ENXIO);
146 }
147
148 if (sc->res[1]) {
149 resource_init_map_request(&req);
150 req.memattr = VM_MEMATTR_DEVICE;
151 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1],
152 &req, &sc->map[1]);
153 if (error) {
154 device_printf(dev, "%s: failed to map control "
155 "registers\n", __func__);
156 dpaa2_mc_detach(dev);
157 return (ENXIO);
158 }
159
160 if (bootverbose)
161 device_printf(dev,
162 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
163 mcreg_read_4(sc, MC_REG_GCR1),
164 mcreg_read_4(sc, MC_REG_GCR2),
165 mcreg_read_4(sc, MC_REG_GSR),
166 mcreg_read_4(sc, MC_REG_FAPR));
167
168 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */
169 val = mcreg_read_4(sc, MC_REG_GCR1) &
170 ~(GCR1_P1_STOP | GCR1_P2_STOP);
171 mcreg_write_4(sc, MC_REG_GCR1, val);
172
173 /* Poll MC status. */
174 if (bootverbose)
175 device_printf(dev, "polling MC status...\n");
176 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) {
177 val = mcreg_read_4(sc, MC_REG_GSR);
178 if (GSR_MCS(val) != 0u)
179 break;
180 DELAY(MC_STAT_TIMEOUT);
181 }
182
183 if (bootverbose)
184 device_printf(dev,
185 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
186 mcreg_read_4(sc, MC_REG_GCR1),
187 mcreg_read_4(sc, MC_REG_GCR2),
188 mcreg_read_4(sc, MC_REG_GSR),
189 mcreg_read_4(sc, MC_REG_FAPR));
190 }
191
192 /* At least 64 bytes of the command portal should be available. */
193 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) {
194 device_printf(dev, "%s: MC portal memory region too small: "
195 "%jd\n", __func__, rman_get_size(sc->res[0]));
196 dpaa2_mc_detach(dev);
197 return (ENXIO);
198 }
199
200 /* Map MC portal memory resource. */
201 resource_init_map_request(&req);
202 req.memattr = VM_MEMATTR_DEVICE;
203 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0],
204 &req, &sc->map[0]);
205 if (error) {
206 device_printf(dev, "Failed to map MC portal memory\n");
207 dpaa2_mc_detach(dev);
208 return (ENXIO);
209 }
210
211 /* Initialize a resource manager for the DPAA2 I/O objects. */
212 sc->dpio_rman.rm_type = RMAN_ARRAY;
213 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects";
214 error = rman_init(&sc->dpio_rman);
215 if (error) {
216 device_printf(dev, "Failed to initialize a resource manager for "
217 "the DPAA2 I/O objects: error=%d\n", error);
218 dpaa2_mc_detach(dev);
219 return (ENXIO);
220 }
221
222 /* Initialize a resource manager for the DPAA2 buffer pools. */
223 sc->dpbp_rman.rm_type = RMAN_ARRAY;
224 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects";
225 error = rman_init(&sc->dpbp_rman);
226 if (error) {
227 device_printf(dev, "Failed to initialize a resource manager for "
228 "the DPAA2 buffer pools: error=%d\n", error);
229 dpaa2_mc_detach(dev);
230 return (ENXIO);
231 }
232
233 /* Initialize a resource manager for the DPAA2 concentrators. */
234 sc->dpcon_rman.rm_type = RMAN_ARRAY;
235 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects";
236 error = rman_init(&sc->dpcon_rman);
237 if (error) {
238 device_printf(dev, "Failed to initialize a resource manager for "
239 "the DPAA2 concentrators: error=%d\n", error);
240 dpaa2_mc_detach(dev);
241 return (ENXIO);
242 }
243
244 /* Initialize a resource manager for the DPAA2 MC portals. */
245 sc->dpmcp_rman.rm_type = RMAN_ARRAY;
246 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects";
247 error = rman_init(&sc->dpmcp_rman);
248 if (error) {
249 device_printf(dev, "Failed to initialize a resource manager for "
250 "the DPAA2 MC portals: error=%d\n", error);
251 dpaa2_mc_detach(dev);
252 return (ENXIO);
253 }
254
255 /* Initialize a list of non-allocatable DPAA2 devices. */
256 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF);
257 STAILQ_INIT(&sc->mdev_list);
258
259 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF);
260
261 /*
262 * Add a root resource container as the only child of the bus. All of
263 * the direct descendant containers will be attached to the root one
264 * instead of the MC device.
265 */
266 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0);
267 if (sc->rcdev == NULL) {
268 dpaa2_mc_detach(dev);
269 return (ENXIO);
270 }
271 bus_generic_probe(dev);
272 bus_generic_attach(dev);
273
274 return (0);
275 }
276
277 int
dpaa2_mc_detach(device_t dev)278 dpaa2_mc_detach(device_t dev)
279 {
280 struct dpaa2_mc_softc *sc;
281 struct dpaa2_devinfo *dinfo = NULL;
282 int error;
283
284 error = bus_generic_detach(dev);
285 if (error != 0)
286 return (error);
287
288 sc = device_get_softc(dev);
289 if (sc->rcdev)
290 device_delete_child(dev, sc->rcdev);
291 bus_release_resources(dev, dpaa2_mc_spec, sc->res);
292
293 dinfo = device_get_ivars(dev);
294 if (dinfo)
295 free(dinfo, M_DPAA2_MC);
296
297 return (device_delete_children(dev));
298 }
299
300 /*
301 * For bus interface.
302 */
303
304 struct resource *
dpaa2_mc_alloc_resource(device_t mcdev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)305 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid,
306 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
307 {
308 struct resource *res;
309 struct rman *rm;
310 int error;
311
312 rm = dpaa2_mc_rman(mcdev, type, flags);
313 if (rm == NULL)
314 return (bus_generic_alloc_resource(mcdev, child, type, rid,
315 start, end, count, flags));
316
317 /*
318 * Skip managing DPAA2-specific resource. It must be provided to MC by
319 * calling DPAA2_MC_MANAGE_DEV() beforehand.
320 */
321 if (type <= DPAA2_DEV_MC) {
322 error = rman_manage_region(rm, start, end);
323 if (error) {
324 device_printf(mcdev, "rman_manage_region() failed: "
325 "start=%#jx, end=%#jx, error=%d\n", start, end,
326 error);
327 goto fail;
328 }
329 }
330
331 res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start,
332 end, count, flags);
333 if (res == NULL)
334 goto fail;
335 return (res);
336 fail:
337 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, "
338 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end,
339 count, flags);
340 return (NULL);
341 }
342
343 int
dpaa2_mc_adjust_resource(device_t mcdev,device_t child,struct resource * r,rman_res_t start,rman_res_t end)344 dpaa2_mc_adjust_resource(device_t mcdev, device_t child,
345 struct resource *r, rman_res_t start, rman_res_t end)
346 {
347 struct rman *rm;
348
349 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
350 if (rm)
351 return (bus_generic_rman_adjust_resource(mcdev, child, r,
352 start, end));
353 return (bus_generic_adjust_resource(mcdev, child, r, start, end));
354 }
355
356 int
dpaa2_mc_release_resource(device_t mcdev,device_t child,struct resource * r)357 dpaa2_mc_release_resource(device_t mcdev, device_t child, struct resource *r)
358 {
359 struct rman *rm;
360
361 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
362 if (rm)
363 return (bus_generic_rman_release_resource(mcdev, child, r));
364 return (bus_generic_release_resource(mcdev, child, r));
365 }
366
367 int
dpaa2_mc_activate_resource(device_t mcdev,device_t child,struct resource * r)368 dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r)
369 {
370 struct rman *rm;
371
372 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
373 if (rm)
374 return (bus_generic_rman_activate_resource(mcdev, child, r));
375 return (bus_generic_activate_resource(mcdev, child, r));
376 }
377
378 int
dpaa2_mc_deactivate_resource(device_t mcdev,device_t child,struct resource * r)379 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r)
380 {
381 struct rman *rm;
382
383 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r));
384 if (rm)
385 return (bus_generic_rman_deactivate_resource(mcdev, child, r));
386 return (bus_generic_deactivate_resource(mcdev, child, r));
387 }
388
389 /*
390 * For pseudo-pcib interface.
391 */
392
393 int
dpaa2_mc_alloc_msi(device_t mcdev,device_t child,int count,int maxcount,int * irqs)394 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount,
395 int *irqs)
396 {
397 #if defined(INTRNG)
398 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs));
399 #else
400 return (ENXIO);
401 #endif
402 }
403
404 int
dpaa2_mc_release_msi(device_t mcdev,device_t child,int count,int * irqs)405 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs)
406 {
407 #if defined(INTRNG)
408 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs));
409 #else
410 return (ENXIO);
411 #endif
412 }
413
414 int
dpaa2_mc_map_msi(device_t mcdev,device_t child,int irq,uint64_t * addr,uint32_t * data)415 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr,
416 uint32_t *data)
417 {
418 #if defined(INTRNG)
419 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data));
420 #else
421 return (ENXIO);
422 #endif
423 }
424
425 int
dpaa2_mc_get_id(device_t mcdev,device_t child,enum pci_id_type type,uintptr_t * id)426 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type,
427 uintptr_t *id)
428 {
429 struct dpaa2_devinfo *dinfo;
430
431 dinfo = device_get_ivars(child);
432
433 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
434 return (ENXIO);
435
436 if (type == PCI_ID_MSI)
437 return (dpaa2_mc_map_id(mcdev, child, id));
438
439 *id = dinfo->icid;
440 return (0);
441 }
442
443 /*
444 * For DPAA2 Management Complex bus driver interface.
445 */
446
447 int
dpaa2_mc_manage_dev(device_t mcdev,device_t dpaa2_dev,uint32_t flags)448 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags)
449 {
450 struct dpaa2_mc_softc *sc;
451 struct dpaa2_devinfo *dinfo;
452 struct dpaa2_mc_devinfo *di;
453 struct rman *rm;
454 int error;
455
456 sc = device_get_softc(mcdev);
457 dinfo = device_get_ivars(dpaa2_dev);
458
459 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
460 return (EINVAL);
461
462 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO);
463 di->dpaa2_dev = dpaa2_dev;
464 di->flags = flags;
465 di->owners = 0;
466
467 /* Append a new managed DPAA2 device to the queue. */
468 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
469 mtx_lock(&sc->mdev_lock);
470 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link);
471 mtx_unlock(&sc->mdev_lock);
472
473 if (flags & DPAA2_MC_DEV_ALLOCATABLE) {
474 /* Select rman based on a type of the DPAA2 device. */
475 rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0);
476 if (!rm)
477 return (ENOENT);
478 /* Manage DPAA2 device as an allocatable resource. */
479 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev,
480 (rman_res_t) dpaa2_dev);
481 if (error)
482 return (error);
483 }
484
485 return (0);
486 }
487
488 int
dpaa2_mc_get_free_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype)489 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev,
490 enum dpaa2_dev_type devtype)
491 {
492 struct rman *rm;
493 rman_res_t start, end;
494 int error;
495
496 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
497 return (EINVAL);
498
499 /* Select resource manager based on a type of the DPAA2 device. */
500 rm = dpaa2_mc_rman(mcdev, devtype, 0);
501 if (!rm)
502 return (ENOENT);
503 /* Find first free DPAA2 device of the given type. */
504 error = rman_first_free_region(rm, &start, &end);
505 if (error)
506 return (error);
507
508 KASSERT(start == end, ("start != end, but should be the same pointer "
509 "to the DPAA2 device: start=%jx, end=%jx", start, end));
510
511 *dpaa2_dev = (device_t) start;
512
513 return (0);
514 }
515
516 int
dpaa2_mc_get_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype,uint32_t obj_id)517 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev,
518 enum dpaa2_dev_type devtype, uint32_t obj_id)
519 {
520 struct dpaa2_mc_softc *sc;
521 struct dpaa2_devinfo *dinfo;
522 struct dpaa2_mc_devinfo *di;
523 int error = ENOENT;
524
525 sc = device_get_softc(mcdev);
526
527 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
528 return (EINVAL);
529
530 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
531 mtx_lock(&sc->mdev_lock);
532
533 STAILQ_FOREACH(di, &sc->mdev_list, link) {
534 dinfo = device_get_ivars(di->dpaa2_dev);
535 if (dinfo->dtype == devtype && dinfo->id == obj_id) {
536 *dpaa2_dev = di->dpaa2_dev;
537 error = 0;
538 break;
539 }
540 }
541
542 mtx_unlock(&sc->mdev_lock);
543
544 return (error);
545 }
546
547 int
dpaa2_mc_get_shared_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype)548 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev,
549 enum dpaa2_dev_type devtype)
550 {
551 struct dpaa2_mc_softc *sc;
552 struct dpaa2_devinfo *dinfo;
553 struct dpaa2_mc_devinfo *di;
554 device_t dev = NULL;
555 uint32_t owners = UINT32_MAX;
556 int error = ENOENT;
557
558 sc = device_get_softc(mcdev);
559
560 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
561 return (EINVAL);
562
563 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
564 mtx_lock(&sc->mdev_lock);
565
566 STAILQ_FOREACH(di, &sc->mdev_list, link) {
567 dinfo = device_get_ivars(di->dpaa2_dev);
568
569 if ((dinfo->dtype == devtype) &&
570 (di->flags & DPAA2_MC_DEV_SHAREABLE) &&
571 (di->owners < owners)) {
572 dev = di->dpaa2_dev;
573 owners = di->owners;
574 }
575 }
576 if (dev) {
577 *dpaa2_dev = dev;
578 error = 0;
579 }
580
581 mtx_unlock(&sc->mdev_lock);
582
583 return (error);
584 }
585
586 int
dpaa2_mc_reserve_dev(device_t mcdev,device_t dpaa2_dev,enum dpaa2_dev_type devtype)587 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev,
588 enum dpaa2_dev_type devtype)
589 {
590 struct dpaa2_mc_softc *sc;
591 struct dpaa2_mc_devinfo *di;
592 int error = ENOENT;
593
594 sc = device_get_softc(mcdev);
595
596 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
597 return (EINVAL);
598
599 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
600 mtx_lock(&sc->mdev_lock);
601
602 STAILQ_FOREACH(di, &sc->mdev_list, link) {
603 if (di->dpaa2_dev == dpaa2_dev &&
604 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
605 di->owners++;
606 error = 0;
607 break;
608 }
609 }
610
611 mtx_unlock(&sc->mdev_lock);
612
613 return (error);
614 }
615
616 int
dpaa2_mc_release_dev(device_t mcdev,device_t dpaa2_dev,enum dpaa2_dev_type devtype)617 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
618 enum dpaa2_dev_type devtype)
619 {
620 struct dpaa2_mc_softc *sc;
621 struct dpaa2_mc_devinfo *di;
622 int error = ENOENT;
623
624 sc = device_get_softc(mcdev);
625
626 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
627 return (EINVAL);
628
629 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
630 mtx_lock(&sc->mdev_lock);
631
632 STAILQ_FOREACH(di, &sc->mdev_list, link) {
633 if (di->dpaa2_dev == dpaa2_dev &&
634 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
635 di->owners -= di->owners > 0 ? 1 : 0;
636 error = 0;
637 break;
638 }
639 }
640
641 mtx_unlock(&sc->mdev_lock);
642
643 return (error);
644 }
645
646 /**
647 * @internal
648 */
649 static u_int
dpaa2_mc_get_xref(device_t mcdev,device_t child)650 dpaa2_mc_get_xref(device_t mcdev, device_t child)
651 {
652 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
653 struct dpaa2_devinfo *dinfo = device_get_ivars(child);
654 #ifdef DEV_ACPI
655 u_int xref, devid;
656 #endif
657 #ifdef FDT
658 phandle_t msi_parent;
659 #endif
660 int error;
661
662 if (sc && dinfo) {
663 #ifdef DEV_ACPI
664 if (sc->acpi_based) {
665 /*
666 * NOTE: The first named component from the IORT table
667 * with the given name (as a substring) will be used.
668 */
669 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME,
670 dinfo->icid, &xref, &devid);
671 if (error)
672 return (0);
673 return (xref);
674 }
675 #endif
676 #ifdef FDT
677 if (!sc->acpi_based) {
678 /* FDT-based driver. */
679 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid,
680 &msi_parent, NULL);
681 if (error)
682 return (0);
683 return ((u_int) msi_parent);
684 }
685 #endif
686 }
687 return (0);
688 }
689
690 /**
691 * @internal
692 */
693 static u_int
dpaa2_mc_map_id(device_t mcdev,device_t child,uintptr_t * id)694 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id)
695 {
696 struct dpaa2_devinfo *dinfo;
697 #ifdef DEV_ACPI
698 u_int xref, devid;
699 int error;
700 #endif
701
702 dinfo = device_get_ivars(child);
703 if (dinfo) {
704 /*
705 * The first named components from IORT table with the given
706 * name (as a substring) will be used.
707 */
708 #ifdef DEV_ACPI
709 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid,
710 &xref, &devid);
711 if (error == 0)
712 *id = devid;
713 else
714 #endif
715 *id = dinfo->icid; /* RID not in IORT, likely FW bug */
716
717 return (0);
718 }
719 return (ENXIO);
720 }
721
722 /**
723 * @internal
724 * @brief Obtain a resource manager based on the given type of the resource.
725 */
726 struct rman *
dpaa2_mc_rman(device_t mcdev,int type,u_int flags)727 dpaa2_mc_rman(device_t mcdev, int type, u_int flags)
728 {
729 struct dpaa2_mc_softc *sc;
730
731 sc = device_get_softc(mcdev);
732
733 switch (type) {
734 case DPAA2_DEV_IO:
735 return (&sc->dpio_rman);
736 case DPAA2_DEV_BP:
737 return (&sc->dpbp_rman);
738 case DPAA2_DEV_CON:
739 return (&sc->dpcon_rman);
740 case DPAA2_DEV_MCP:
741 return (&sc->dpmcp_rman);
742 default:
743 break;
744 }
745
746 return (NULL);
747 }
748
749 #if defined(INTRNG) && !defined(IOMMU)
750
751 /**
752 * @internal
753 * @brief Allocates requested number of MSIs.
754 *
755 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
756 * Total number of IRQs is limited to 32.
757 */
758 static int
dpaa2_mc_alloc_msi_impl(device_t mcdev,device_t child,int count,int maxcount,int * irqs)759 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount,
760 int *irqs)
761 {
762 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
763 int msi_irqs[DPAA2_MC_MSI_COUNT];
764 int error;
765
766 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */
767 if (!sc->msi_allocated) {
768 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev,
769 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs);
770 if (error) {
771 device_printf(mcdev, "failed to pre-allocate %d MSIs: "
772 "error=%d\n", DPAA2_MC_MSI_COUNT, error);
773 return (error);
774 }
775
776 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
777 mtx_lock(&sc->msi_lock);
778 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
779 sc->msi[i].child = NULL;
780 sc->msi[i].irq = msi_irqs[i];
781 }
782 sc->msi_owner = child;
783 sc->msi_allocated = true;
784 mtx_unlock(&sc->msi_lock);
785 }
786
787 error = ENOENT;
788
789 /* Find the first free MSIs from the pre-allocated pool. */
790 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
791 mtx_lock(&sc->msi_lock);
792 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
793 if (sc->msi[i].child != NULL)
794 continue;
795 error = 0;
796 for (int j = 0; j < count; j++) {
797 if (i + j >= DPAA2_MC_MSI_COUNT) {
798 device_printf(mcdev, "requested %d MSIs exceed "
799 "limit of %d available\n", count,
800 DPAA2_MC_MSI_COUNT);
801 error = E2BIG;
802 break;
803 }
804 sc->msi[i + j].child = child;
805 irqs[j] = sc->msi[i + j].irq;
806 }
807 break;
808 }
809 mtx_unlock(&sc->msi_lock);
810
811 return (error);
812 }
813
814 /**
815 * @internal
816 * @brief Marks IRQs as free in the pre-allocated pool of MSIs.
817 *
818 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
819 * Total number of IRQs is limited to 32.
820 * NOTE: MSIs are kept allocated in the kernel as a part of the pool.
821 */
822 static int
dpaa2_mc_release_msi_impl(device_t mcdev,device_t child,int count,int * irqs)823 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs)
824 {
825 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
826
827 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
828 mtx_lock(&sc->msi_lock);
829 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
830 if (sc->msi[i].child != child)
831 continue;
832 for (int j = 0; j < count; j++) {
833 if (sc->msi[i].irq == irqs[j]) {
834 sc->msi[i].child = NULL;
835 break;
836 }
837 }
838 }
839 mtx_unlock(&sc->msi_lock);
840
841 return (0);
842 }
843
844 /**
845 * @internal
846 * @brief Provides address to write to and data according to the given MSI from
847 * the pre-allocated pool.
848 *
849 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
850 * Total number of IRQs is limited to 32.
851 */
852 static int
dpaa2_mc_map_msi_impl(device_t mcdev,device_t child,int irq,uint64_t * addr,uint32_t * data)853 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr,
854 uint32_t *data)
855 {
856 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
857 int error = EINVAL;
858
859 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
860 mtx_lock(&sc->msi_lock);
861 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
862 if (sc->msi[i].child == child && sc->msi[i].irq == irq) {
863 error = 0;
864 break;
865 }
866 }
867 mtx_unlock(&sc->msi_lock);
868 if (error)
869 return (error);
870
871 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev,
872 sc->msi_owner), irq, addr, data));
873 }
874
875 #endif /* defined(INTRNG) && !defined(IOMMU) */
876
877 static device_method_t dpaa2_mc_methods[] = {
878 DEVMETHOD_END
879 };
880
881 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods,
882 sizeof(struct dpaa2_mc_softc));
883