xref: /freebsd/sys/dev/dpaa2/dpaa2_io.c (revision 061e8e6ba60e10e7d25e45e63d44a54186ed845e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2022 Dmitry Salychev
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * QBMan command interface and the DPAA2 I/O (DPIO) driver.
33  *
34  * The DPIO object allows configuration of the QBMan software portal with
35  * optional notification capabilities.
36  *
37  * Software portals are used by the driver to communicate with the QBMan. The
38  * DPIO object’s main purpose is to enable the driver to perform I/O – enqueue
39  * and dequeue operations, as well as buffer release and acquire operations –
40  * using QBMan.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/_cpuset.h>
52 #include <sys/cpuset.h>
53 #include <sys/taskqueue.h>
54 #include <sys/smp.h>
55 
56 #include <vm/vm.h>
57 
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 
61 #include <dev/pci/pcivar.h>
62 
63 #include "pcib_if.h"
64 #include "pci_if.h"
65 
66 #include "dpaa2_mc.h"
67 #include "dpaa2_mcp.h"
68 #include "dpaa2_swp.h"
69 #include "dpaa2_swp_if.h"
70 #include "dpaa2_cmd_if.h"
71 #include "dpaa2_io.h"
72 #include "dpaa2_ni.h"
73 
74 #define DPIO_IRQ_INDEX		0 /* index of the only DPIO IRQ */
75 #define DPIO_POLL_MAX		32
76 
77 /*
78  * Memory:
79  *	0: cache-enabled part of the QBMan software portal.
80  *	1: cache-inhibited part of the QBMan software portal.
81  *	2: control registers of the QBMan software portal?
82  *
83  * Note that MSI should be allocated separately using pseudo-PCI interface.
84  */
85 struct resource_spec dpaa2_io_spec[] = {
86 	/*
87 	 * System Memory resources.
88 	 */
89 #define MEM_RES_NUM	(3u)
90 #define MEM_RID_OFF	(0u)
91 #define MEM_RID(rid)	((rid) + MEM_RID_OFF)
92 	{ SYS_RES_MEMORY, MEM_RID(0),   RF_ACTIVE | RF_UNMAPPED },
93 	{ SYS_RES_MEMORY, MEM_RID(1),   RF_ACTIVE | RF_UNMAPPED },
94 	{ SYS_RES_MEMORY, MEM_RID(2),   RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
95 	/*
96 	 * DPMCP resources.
97 	 *
98 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
99 	 *	 receive responses from, the MC firmware. One portal per DPIO.
100 	 */
101 #define MCP_RES_NUM	(1u)
102 #define MCP_RID_OFF	(MEM_RID_OFF + MEM_RES_NUM)
103 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
104 	/* --- */
105 	{ DPAA2_DEV_MCP,  MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
106 	/* --- */
107 	RESOURCE_SPEC_END
108 };
109 
110 /* Configuration routines. */
111 static int dpaa2_io_setup_irqs(device_t dev);
112 static int dpaa2_io_release_irqs(device_t dev);
113 static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc);
114 static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc);
115 
116 /* Interrupt handlers */
117 static void dpaa2_io_intr(void *arg);
118 
119 static int
120 dpaa2_io_probe(device_t dev)
121 {
122 	/* DPIO device will be added by a parent resource container itself. */
123 	device_set_desc(dev, "DPAA2 I/O");
124 	return (BUS_PROBE_DEFAULT);
125 }
126 
127 static int
128 dpaa2_io_detach(device_t dev)
129 {
130 	device_t pdev = device_get_parent(dev);
131 	device_t child = dev;
132 	struct dpaa2_io_softc *sc = device_get_softc(dev);
133 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
134 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
135 	struct dpaa2_cmd cmd;
136 	uint16_t rc_token, io_token;
137 	int error;
138 
139 	DPAA2_CMD_INIT(&cmd);
140 
141 	/* Tear down interrupt handler and release IRQ resources. */
142 	dpaa2_io_release_irqs(dev);
143 
144 	/* Free software portal helper object. */
145 	dpaa2_swp_free_portal(sc->swp);
146 
147 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
148 	if (error) {
149 		device_printf(dev, "%s: failed to open DPRC: error=%d\n",
150 		    __func__, error);
151 		goto err_exit;
152 	}
153 	error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token);
154 	if (error) {
155 		device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n",
156 		    __func__, dinfo->id, error);
157 		goto close_rc;
158 	}
159 
160 	error = DPAA2_CMD_IO_DISABLE(dev, child, &cmd);
161 	if (error && bootverbose) {
162 		device_printf(dev, "%s: failed to disable DPIO: id=%d, "
163 		    "error=%d\n", __func__, dinfo->id, error);
164 	}
165 
166 	(void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd);
167 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
168 
169 	/* Unmap memory resources of the portal. */
170 	for (int i = 0; i < MEM_RES_NUM; i++) {
171 		if (sc->res[MEM_RID(i)] == NULL) {
172 			continue;
173 		}
174 		error = bus_unmap_resource(sc->dev, SYS_RES_MEMORY,
175 		    sc->res[MEM_RID(i)], &sc->map[MEM_RID(i)]);
176 		if (error && bootverbose) {
177 			device_printf(dev, "%s: failed to unmap memory "
178 			    "resource: rid=%d, error=%d\n", __func__, MEM_RID(i),
179 			    error);
180 		}
181 	}
182 
183 	/* Release allocated resources. */
184 	bus_release_resources(dev, dpaa2_io_spec, sc->res);
185 
186 	return (0);
187 
188 close_rc:
189 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
190 err_exit:
191 	return (error);
192 }
193 
194 static int
195 dpaa2_io_attach(device_t dev)
196 {
197 	device_t pdev = device_get_parent(dev);
198 	device_t child = dev;
199 	device_t mcp_dev;
200 	struct dpaa2_io_softc *sc = device_get_softc(dev);
201 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
202 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
203 	struct dpaa2_devinfo *mcp_dinfo;
204 	struct dpaa2_cmd cmd;
205 	struct resource_map_request req;
206 	struct {
207 		vm_memattr_t memattr;
208 		char *label;
209 	} map_args[MEM_RES_NUM] = {
210 		{ VM_MEMATTR_WRITE_BACK, "cache-enabled part" },
211 		{ VM_MEMATTR_DEVICE, "cache-inhibited part" },
212 		{ VM_MEMATTR_DEVICE, "control registers" }
213 	};
214 	uint16_t rc_token, io_token;
215 	int error;
216 
217 	sc->dev = dev;
218 	sc->swp = NULL;
219 	sc->intr = NULL;
220 	sc->irq_resource = NULL;
221 
222 	/* Allocate resources. */
223 	error = bus_alloc_resources(sc->dev, dpaa2_io_spec, sc->res);
224 	if (error) {
225 		device_printf(dev, "%s: failed to allocate resources: "
226 		    "error=%d\n", __func__, error);
227 		return (ENXIO);
228 	}
229 
230 	/* Set allocated MC portal up. */
231 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
232 	mcp_dinfo = device_get_ivars(mcp_dev);
233 	dinfo->portal = mcp_dinfo->portal;
234 
235 	/* Map memory resources of the portal. */
236 	for (int i = 0; i < MEM_RES_NUM; i++) {
237 		if (sc->res[MEM_RID(i)] == NULL) {
238 			continue;
239 		}
240 
241 		resource_init_map_request(&req);
242 		req.memattr = map_args[i].memattr;
243 		error = bus_map_resource(sc->dev, SYS_RES_MEMORY,
244 		    sc->res[MEM_RID(i)], &req, &sc->map[MEM_RID(i)]);
245 		if (error) {
246 			device_printf(dev, "%s: failed to map %s: error=%d\n",
247 			    __func__, map_args[i].label, error);
248 			goto err_exit;
249 		}
250 	}
251 
252 	DPAA2_CMD_INIT(&cmd);
253 
254 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
255 	if (error) {
256 		device_printf(dev, "%s: failed to open DPRC: error=%d\n",
257 		    __func__, error);
258 		goto err_exit;
259 	}
260 	error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token);
261 	if (error) {
262 		device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n",
263 		    __func__, dinfo->id, error);
264 		goto close_rc;
265 	}
266 	error = DPAA2_CMD_IO_RESET(dev, child, &cmd);
267 	if (error) {
268 		device_printf(dev, "%s: failed to reset DPIO: id=%d, error=%d\n",
269 		    __func__, dinfo->id, error);
270 		goto close_io;
271 	}
272 	error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
273 	if (error) {
274 		device_printf(dev, "%s: failed to get DPIO attributes: id=%d, "
275 		    "error=%d\n", __func__, dinfo->id, error);
276 		goto close_io;
277 	}
278 	error = DPAA2_CMD_IO_ENABLE(dev, child, &cmd);
279 	if (error) {
280 		device_printf(dev, "%s: failed to enable DPIO: id=%d, "
281 		    "error=%d\n", __func__, dinfo->id, error);
282 		goto close_io;
283 	}
284 
285 	/* Prepare descriptor of the QBMan software portal. */
286 	sc->swp_desc.dpio_dev = dev;
287 	sc->swp_desc.swp_version = sc->attr.swp_version;
288 	sc->swp_desc.swp_clk = sc->attr.swp_clk;
289 	sc->swp_desc.swp_id = sc->attr.swp_id;
290 	sc->swp_desc.has_notif = sc->attr.priors_num ? true : false;
291 	sc->swp_desc.has_8prio = sc->attr.priors_num == 8u ? true : false;
292 
293 	sc->swp_desc.cena_res = sc->res[0];
294 	sc->swp_desc.cena_map = &sc->map[0];
295 	sc->swp_desc.cinh_res = sc->res[1];
296 	sc->swp_desc.cinh_map = &sc->map[1];
297 
298 	/*
299 	 * Compute how many 256 QBMAN cycles fit into one ns. This is because
300 	 * the interrupt timeout period register needs to be specified in QBMAN
301 	 * clock cycles in increments of 256.
302 	 */
303 	sc->swp_desc.swp_cycles_ratio = 256000 /
304 	    (sc->swp_desc.swp_clk / 1000000);
305 
306 	/* Initialize QBMan software portal. */
307 	error = dpaa2_swp_init_portal(&sc->swp, &sc->swp_desc, DPAA2_SWP_DEF);
308 	if (error) {
309 		device_printf(dev, "%s: failed to initialize dpaa2_swp: "
310 		    "error=%d\n", __func__, error);
311 		goto err_exit;
312 	}
313 
314 	error = dpaa2_io_setup_irqs(dev);
315 	if (error) {
316 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
317 		    __func__, error);
318 		goto err_exit;
319 	}
320 
321 	if (bootverbose) {
322 		device_printf(dev, "dpio_id=%d, swp_id=%d, chan_mode=%s, "
323 		    "notif_priors=%d, swp_version=0x%x\n",
324 		    sc->attr.id, sc->attr.swp_id,
325 		    sc->attr.chan_mode == DPAA2_IO_LOCAL_CHANNEL
326 		    ? "local_channel" : "no_channel", sc->attr.priors_num,
327 		    sc->attr.swp_version);
328 	}
329 
330 	(void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd);
331 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
332 	return (0);
333 
334 close_io:
335 	(void)DPAA2_CMD_IO_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, io_token));
336 close_rc:
337 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
338 err_exit:
339 	dpaa2_io_detach(dev);
340 	return (ENXIO);
341 }
342 
343 /**
344  * @brief Enqueue multiple frames to a frame queue using one FQID.
345  */
346 static int
347 dpaa2_io_enq_multiple_fq(device_t iodev, uint32_t fqid,
348     struct dpaa2_fd *fd, int frames_n)
349 {
350 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
351 	struct dpaa2_swp *swp = sc->swp;
352 	struct dpaa2_eq_desc ed;
353 	uint32_t flags = 0;
354 
355 	memset(&ed, 0, sizeof(ed));
356 
357 	/* Setup enqueue descriptor. */
358 	dpaa2_swp_set_ed_norp(&ed, false);
359 	dpaa2_swp_set_ed_fq(&ed, fqid);
360 
361 	return (dpaa2_swp_enq_mult(swp, &ed, fd, &flags, frames_n));
362 }
363 
364 /**
365  * @brief Configure the channel data availability notification (CDAN)
366  * in a particular WQ channel paired with DPIO.
367  */
368 static int
369 dpaa2_io_conf_wq_channel(device_t iodev, struct dpaa2_io_notif_ctx *ctx)
370 {
371 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
372 
373 	/* Enable generation of the CDAN notifications. */
374 	if (ctx->cdan_en) {
375 		return (dpaa2_swp_conf_wq_channel(sc->swp, ctx->fq_chan_id,
376 		    DPAA2_WQCHAN_WE_EN | DPAA2_WQCHAN_WE_CTX, ctx->cdan_en,
377 		    ctx->qman_ctx));
378 	}
379 
380 	return (0);
381 }
382 
383 /**
384  * @brief Query current configuration/state of the buffer pool.
385  */
386 static int
387 dpaa2_io_query_bp(device_t iodev, uint16_t bpid, struct dpaa2_bp_conf *conf)
388 {
389 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
390 
391 	return (dpaa2_swp_query_bp(sc->swp, bpid, conf));
392 }
393 
394 /**
395  * @brief Release one or more buffer pointers to the QBMan buffer pool.
396  */
397 static int
398 dpaa2_io_release_bufs(device_t iodev, uint16_t bpid, bus_addr_t *buf,
399     uint32_t buf_num)
400 {
401 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
402 
403 	return (dpaa2_swp_release_bufs(sc->swp, bpid, buf, buf_num));
404 }
405 
406 /**
407  * @brief Configure DPNI object to generate interrupts.
408  */
409 static int
410 dpaa2_io_setup_irqs(device_t dev)
411 {
412 	struct dpaa2_io_softc *sc = device_get_softc(dev);
413 	int error;
414 
415 	/*
416 	 * Setup interrupts generated by the software portal.
417 	 */
418 	dpaa2_swp_set_intr_trigger(sc->swp, DPAA2_SWP_INTR_DQRI);
419 	dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu);
420 
421 	/* Configure IRQs. */
422 	error = dpaa2_io_setup_msi(sc);
423 	if (error) {
424 		device_printf(dev, "%s: failed to allocate MSI: error=%d\n",
425 		    __func__, error);
426 		return (error);
427 	}
428 	if ((sc->irq_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ,
429 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
430 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
431 		    __func__);
432 		return (ENXIO);
433 	}
434 	if (bus_setup_intr(dev, sc->irq_resource, INTR_TYPE_NET | INTR_MPSAFE |
435 	    INTR_ENTROPY, NULL, dpaa2_io_intr, sc, &sc->intr)) {
436 		device_printf(dev, "%s: failed to setup IRQ resource\n",
437 		    __func__);
438 		return (ENXIO);
439 	}
440 
441 	/* Wrap DPIO ID around number of CPUs. */
442 	bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus);
443 
444 	/*
445 	 * Setup and enable Static Dequeue Command to receive CDANs from
446 	 * channel 0.
447 	 */
448 	if (sc->swp_desc.has_notif)
449 		dpaa2_swp_set_push_dequeue(sc->swp, 0, true);
450 
451 	return (0);
452 }
453 
454 static int
455 dpaa2_io_release_irqs(device_t dev)
456 {
457 	struct dpaa2_io_softc *sc = device_get_softc(dev);
458 
459 	/* Disable receiving CDANs from channel 0. */
460 	if (sc->swp_desc.has_notif)
461 		dpaa2_swp_set_push_dequeue(sc->swp, 0, false);
462 
463 	/* Release IRQ resources. */
464 	if (sc->intr != NULL)
465 		bus_teardown_intr(dev, sc->irq_resource, &sc->intr);
466 	if (sc->irq_resource != NULL)
467 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[0],
468 		    sc->irq_resource);
469 
470 	(void)dpaa2_io_release_msi(device_get_softc(dev));
471 
472 	/* Configure software portal to stop generating interrupts. */
473 	dpaa2_swp_set_intr_trigger(sc->swp, 0);
474 	dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu);
475 
476 	return (0);
477 }
478 
479 /**
480  * @brief Allocate MSI interrupts for this DPAA2 I/O object.
481  */
482 static int
483 dpaa2_io_setup_msi(struct dpaa2_io_softc *sc)
484 {
485 	int val;
486 
487 	val = pci_msi_count(sc->dev);
488 	if (val < DPAA2_IO_MSI_COUNT)
489 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
490 		    DPAA2_IO_MSI_COUNT);
491 	val = MIN(val, DPAA2_IO_MSI_COUNT);
492 
493 	if (pci_alloc_msi(sc->dev, &val) != 0)
494 		return (EINVAL);
495 
496 	for (int i = 0; i < val; i++)
497 		sc->irq_rid[i] = i + 1;
498 
499 	return (0);
500 }
501 
502 static int
503 dpaa2_io_release_msi(struct dpaa2_io_softc *sc)
504 {
505 	int error;
506 
507 	error = pci_release_msi(sc->dev);
508 	if (error) {
509 		device_printf(sc->dev, "%s: failed to release MSI: error=%d/n",
510 		    __func__, error);
511 		return (error);
512 	}
513 
514 	return (0);
515 }
516 
517 /**
518  * @brief DPAA2 I/O interrupt handler.
519  */
520 static void
521 dpaa2_io_intr(void *arg)
522 {
523 	struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg;
524 	struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX];
525 	struct dpaa2_dq dq;
526 	uint32_t idx, status;
527 	uint16_t flags;
528 	int rc, cdan_n = 0;
529 
530 	status = dpaa2_swp_read_intr_status(sc->swp);
531 	if (status == 0) {
532 		return;
533 	}
534 
535 	DPAA2_SWP_LOCK(sc->swp, &flags);
536 	if (flags & DPAA2_SWP_DESTROYED) {
537 		/* Terminate operation if portal is destroyed. */
538 		DPAA2_SWP_UNLOCK(sc->swp);
539 		return;
540 	}
541 
542 	for (int i = 0; i < DPIO_POLL_MAX; i++) {
543 		rc = dpaa2_swp_dqrr_next_locked(sc->swp, &dq, &idx);
544 		if (rc) {
545 			break;
546 		}
547 
548 		if ((dq.common.verb & DPAA2_DQRR_RESULT_MASK) ==
549 		    DPAA2_DQRR_RESULT_CDAN) {
550 			ctx[cdan_n++] = (struct dpaa2_io_notif_ctx *) dq.scn.ctx;
551 		} else {
552 			/* TODO: Report unknown DQRR entry. */
553 		}
554 		dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_DCAP, idx);
555 	}
556 	DPAA2_SWP_UNLOCK(sc->swp);
557 
558 	for (int i = 0; i < cdan_n; i++) {
559 		ctx[i]->poll(ctx[i]->channel);
560 	}
561 
562 	/* Enable software portal interrupts back */
563 	dpaa2_swp_clear_intr_status(sc->swp, status);
564 	dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_IIR, 0);
565 }
566 
567 static device_method_t dpaa2_io_methods[] = {
568 	/* Device interface */
569 	DEVMETHOD(device_probe,		dpaa2_io_probe),
570 	DEVMETHOD(device_attach,	dpaa2_io_attach),
571 	DEVMETHOD(device_detach,	dpaa2_io_detach),
572 
573 	/* QBMan software portal interface */
574 	DEVMETHOD(dpaa2_swp_enq_multiple_fq,	dpaa2_io_enq_multiple_fq),
575 	DEVMETHOD(dpaa2_swp_conf_wq_channel,	dpaa2_io_conf_wq_channel),
576 	DEVMETHOD(dpaa2_swp_query_bp,		dpaa2_io_query_bp),
577 	DEVMETHOD(dpaa2_swp_release_bufs,	dpaa2_io_release_bufs),
578 
579 	DEVMETHOD_END
580 };
581 
582 static driver_t dpaa2_io_driver = {
583 	"dpaa2_io",
584 	dpaa2_io_methods,
585 	sizeof(struct dpaa2_io_softc),
586 };
587 
588 DRIVER_MODULE(dpaa2_io, dpaa2_rc, dpaa2_io_driver, 0, 0);
589