1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2022 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 /* 30 * QBMan command interface and the DPAA2 I/O (DPIO) driver. 31 * 32 * The DPIO object allows configuration of the QBMan software portal with 33 * optional notification capabilities. 34 * 35 * Software portals are used by the driver to communicate with the QBMan. The 36 * DPIO object’s main purpose is to enable the driver to perform I/O – enqueue 37 * and dequeue operations, as well as buffer release and acquire operations – 38 * using QBMan. 39 */ 40 41 #include "opt_rss.h" 42 43 #include <sys/param.h> 44 #include <sys/kernel.h> 45 #include <sys/bus.h> 46 #include <sys/rman.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/_cpuset.h> 52 #include <sys/cpuset.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 56 #include <vm/vm.h> 57 58 #include <machine/bus.h> 59 #include <machine/resource.h> 60 61 #include <dev/pci/pcivar.h> 62 63 #ifdef RSS 64 #include <net/rss_config.h> 65 #endif 66 67 #include "pcib_if.h" 68 #include "pci_if.h" 69 70 #include "dpaa2_mc.h" 71 #include "dpaa2_mcp.h" 72 #include "dpaa2_swp.h" 73 #include "dpaa2_swp_if.h" 74 #include "dpaa2_cmd_if.h" 75 #include "dpaa2_io.h" 76 #include "dpaa2_ni.h" 77 #include "dpaa2_channel.h" 78 79 #define DPIO_IRQ_INDEX 0 /* index of the only DPIO IRQ */ 80 #define DPIO_POLL_MAX 32 81 82 /* 83 * Memory: 84 * 0: cache-enabled part of the QBMan software portal. 85 * 1: cache-inhibited part of the QBMan software portal. 86 * 2: control registers of the QBMan software portal? 87 * 88 * Note that MSI should be allocated separately using pseudo-PCI interface. 89 */ 90 struct resource_spec dpaa2_io_spec[] = { 91 /* 92 * System Memory resources. 93 */ 94 #define MEM_RES_NUM (3u) 95 #define MEM_RID_OFF (0u) 96 #define MEM_RID(rid) ((rid) + MEM_RID_OFF) 97 { SYS_RES_MEMORY, MEM_RID(0), RF_ACTIVE | RF_UNMAPPED }, 98 { SYS_RES_MEMORY, MEM_RID(1), RF_ACTIVE | RF_UNMAPPED }, 99 { SYS_RES_MEMORY, MEM_RID(2), RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, 100 /* 101 * DPMCP resources. 102 * 103 * NOTE: MC command portals (MCPs) are used to send commands to, and 104 * receive responses from, the MC firmware. One portal per DPIO. 105 */ 106 #define MCP_RES_NUM (1u) 107 #define MCP_RID_OFF (MEM_RID_OFF + MEM_RES_NUM) 108 #define MCP_RID(rid) ((rid) + MCP_RID_OFF) 109 /* --- */ 110 { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 111 /* --- */ 112 RESOURCE_SPEC_END 113 }; 114 115 /* Configuration routines. */ 116 static int dpaa2_io_setup_irqs(device_t dev); 117 static int dpaa2_io_release_irqs(device_t dev); 118 static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc); 119 static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc); 120 121 /* Interrupt handlers */ 122 static void dpaa2_io_intr(void *arg); 123 124 static int 125 dpaa2_io_probe(device_t dev) 126 { 127 /* DPIO device will be added by a parent resource container itself. */ 128 device_set_desc(dev, "DPAA2 I/O"); 129 return (BUS_PROBE_DEFAULT); 130 } 131 132 static int 133 dpaa2_io_detach(device_t dev) 134 { 135 device_t pdev = device_get_parent(dev); 136 device_t child = dev; 137 struct dpaa2_io_softc *sc = device_get_softc(dev); 138 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 139 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 140 struct dpaa2_cmd cmd; 141 uint16_t rc_token, io_token; 142 int error; 143 144 DPAA2_CMD_INIT(&cmd); 145 146 /* Tear down interrupt handler and release IRQ resources. */ 147 dpaa2_io_release_irqs(dev); 148 149 /* Free software portal helper object. */ 150 dpaa2_swp_free_portal(sc->swp); 151 152 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 153 if (error) { 154 device_printf(dev, "%s: failed to open DPRC: error=%d\n", 155 __func__, error); 156 goto err_exit; 157 } 158 error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token); 159 if (error) { 160 device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n", 161 __func__, dinfo->id, error); 162 goto close_rc; 163 } 164 165 error = DPAA2_CMD_IO_DISABLE(dev, child, &cmd); 166 if (error && bootverbose) { 167 device_printf(dev, "%s: failed to disable DPIO: id=%d, " 168 "error=%d\n", __func__, dinfo->id, error); 169 } 170 171 (void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd); 172 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 173 174 /* Unmap memory resources of the portal. */ 175 for (int i = 0; i < MEM_RES_NUM; i++) { 176 if (sc->res[MEM_RID(i)] == NULL) { 177 continue; 178 } 179 error = bus_unmap_resource(sc->dev, SYS_RES_MEMORY, 180 sc->res[MEM_RID(i)], &sc->map[MEM_RID(i)]); 181 if (error && bootverbose) { 182 device_printf(dev, "%s: failed to unmap memory " 183 "resource: rid=%d, error=%d\n", __func__, MEM_RID(i), 184 error); 185 } 186 } 187 188 /* Release allocated resources. */ 189 bus_release_resources(dev, dpaa2_io_spec, sc->res); 190 191 return (0); 192 193 close_rc: 194 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 195 err_exit: 196 return (error); 197 } 198 199 static int 200 dpaa2_io_attach(device_t dev) 201 { 202 device_t pdev = device_get_parent(dev); 203 device_t child = dev; 204 device_t mcp_dev; 205 struct dpaa2_io_softc *sc = device_get_softc(dev); 206 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 207 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 208 struct dpaa2_devinfo *mcp_dinfo; 209 struct dpaa2_cmd cmd; 210 struct resource_map_request req; 211 struct { 212 vm_memattr_t memattr; 213 char *label; 214 } map_args[MEM_RES_NUM] = { 215 { VM_MEMATTR_WRITE_BACK, "cache-enabled part" }, 216 { VM_MEMATTR_DEVICE, "cache-inhibited part" }, 217 { VM_MEMATTR_DEVICE, "control registers" } 218 }; 219 uint16_t rc_token, io_token; 220 int error; 221 222 sc->dev = dev; 223 sc->swp = NULL; 224 sc->intr = NULL; 225 sc->irq_resource = NULL; 226 227 /* Allocate resources. */ 228 error = bus_alloc_resources(sc->dev, dpaa2_io_spec, sc->res); 229 if (error) { 230 device_printf(dev, "%s: failed to allocate resources: " 231 "error=%d\n", __func__, error); 232 return (ENXIO); 233 } 234 235 /* Set allocated MC portal up. */ 236 mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); 237 mcp_dinfo = device_get_ivars(mcp_dev); 238 dinfo->portal = mcp_dinfo->portal; 239 240 /* Map memory resources of the portal. */ 241 for (int i = 0; i < MEM_RES_NUM; i++) { 242 if (sc->res[MEM_RID(i)] == NULL) { 243 continue; 244 } 245 246 resource_init_map_request(&req); 247 req.memattr = map_args[i].memattr; 248 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, 249 sc->res[MEM_RID(i)], &req, &sc->map[MEM_RID(i)]); 250 if (error) { 251 device_printf(dev, "%s: failed to map %s: error=%d\n", 252 __func__, map_args[i].label, error); 253 goto err_exit; 254 } 255 } 256 257 DPAA2_CMD_INIT(&cmd); 258 259 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 260 if (error) { 261 device_printf(dev, "%s: failed to open DPRC: error=%d\n", 262 __func__, error); 263 goto err_exit; 264 } 265 error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token); 266 if (error) { 267 device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n", 268 __func__, dinfo->id, error); 269 goto close_rc; 270 } 271 error = DPAA2_CMD_IO_RESET(dev, child, &cmd); 272 if (error) { 273 device_printf(dev, "%s: failed to reset DPIO: id=%d, error=%d\n", 274 __func__, dinfo->id, error); 275 goto close_io; 276 } 277 error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); 278 if (error) { 279 device_printf(dev, "%s: failed to get DPIO attributes: id=%d, " 280 "error=%d\n", __func__, dinfo->id, error); 281 goto close_io; 282 } 283 error = DPAA2_CMD_IO_ENABLE(dev, child, &cmd); 284 if (error) { 285 device_printf(dev, "%s: failed to enable DPIO: id=%d, " 286 "error=%d\n", __func__, dinfo->id, error); 287 goto close_io; 288 } 289 290 /* Prepare descriptor of the QBMan software portal. */ 291 sc->swp_desc.dpio_dev = dev; 292 sc->swp_desc.swp_version = sc->attr.swp_version; 293 sc->swp_desc.swp_clk = sc->attr.swp_clk; 294 sc->swp_desc.swp_id = sc->attr.swp_id; 295 sc->swp_desc.has_notif = sc->attr.priors_num ? true : false; 296 sc->swp_desc.has_8prio = sc->attr.priors_num == 8u ? true : false; 297 298 sc->swp_desc.cena_res = sc->res[0]; 299 sc->swp_desc.cena_map = &sc->map[0]; 300 sc->swp_desc.cinh_res = sc->res[1]; 301 sc->swp_desc.cinh_map = &sc->map[1]; 302 303 /* 304 * Compute how many 256 QBMAN cycles fit into one ns. This is because 305 * the interrupt timeout period register needs to be specified in QBMAN 306 * clock cycles in increments of 256. 307 */ 308 sc->swp_desc.swp_cycles_ratio = 256000 / 309 (sc->swp_desc.swp_clk / 1000000); 310 311 /* Initialize QBMan software portal. */ 312 error = dpaa2_swp_init_portal(&sc->swp, &sc->swp_desc, DPAA2_SWP_DEF); 313 if (error) { 314 device_printf(dev, "%s: failed to initialize dpaa2_swp: " 315 "error=%d\n", __func__, error); 316 goto err_exit; 317 } 318 319 error = dpaa2_io_setup_irqs(dev); 320 if (error) { 321 device_printf(dev, "%s: failed to setup IRQs: error=%d\n", 322 __func__, error); 323 goto err_exit; 324 } 325 326 if (bootverbose) { 327 device_printf(dev, "dpio_id=%d, swp_id=%d, chan_mode=%s, " 328 "notif_priors=%d, swp_version=0x%x\n", 329 sc->attr.id, sc->attr.swp_id, 330 sc->attr.chan_mode == DPAA2_IO_LOCAL_CHANNEL 331 ? "local_channel" : "no_channel", sc->attr.priors_num, 332 sc->attr.swp_version); 333 } 334 335 (void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd); 336 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 337 return (0); 338 339 close_io: 340 (void)DPAA2_CMD_IO_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, io_token)); 341 close_rc: 342 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 343 err_exit: 344 dpaa2_io_detach(dev); 345 return (ENXIO); 346 } 347 348 /** 349 * @brief Enqueue multiple frames to a frame queue using one FQID. 350 */ 351 static int 352 dpaa2_io_enq_multiple_fq(device_t iodev, uint32_t fqid, 353 struct dpaa2_fd *fd, int frames_n) 354 { 355 struct dpaa2_io_softc *sc = device_get_softc(iodev); 356 struct dpaa2_swp *swp = sc->swp; 357 struct dpaa2_eq_desc ed; 358 uint32_t flags = 0; 359 360 memset(&ed, 0, sizeof(ed)); 361 362 /* Setup enqueue descriptor. */ 363 dpaa2_swp_set_ed_norp(&ed, false); 364 dpaa2_swp_set_ed_fq(&ed, fqid); 365 366 return (dpaa2_swp_enq_mult(swp, &ed, fd, &flags, frames_n)); 367 } 368 369 /** 370 * @brief Configure the channel data availability notification (CDAN) 371 * in a particular WQ channel paired with DPIO. 372 */ 373 static int 374 dpaa2_io_conf_wq_channel(device_t iodev, struct dpaa2_io_notif_ctx *ctx) 375 { 376 struct dpaa2_io_softc *sc = device_get_softc(iodev); 377 378 /* Enable generation of the CDAN notifications. */ 379 if (ctx->cdan_en) { 380 return (dpaa2_swp_conf_wq_channel(sc->swp, ctx->fq_chan_id, 381 DPAA2_WQCHAN_WE_EN | DPAA2_WQCHAN_WE_CTX, ctx->cdan_en, 382 ctx->qman_ctx)); 383 } 384 385 return (0); 386 } 387 388 /** 389 * @brief Query current configuration/state of the buffer pool. 390 */ 391 static int 392 dpaa2_io_query_bp(device_t iodev, uint16_t bpid, struct dpaa2_bp_conf *conf) 393 { 394 struct dpaa2_io_softc *sc = device_get_softc(iodev); 395 396 return (dpaa2_swp_query_bp(sc->swp, bpid, conf)); 397 } 398 399 /** 400 * @brief Release one or more buffer pointers to the QBMan buffer pool. 401 */ 402 static int 403 dpaa2_io_release_bufs(device_t iodev, uint16_t bpid, bus_addr_t *buf, 404 uint32_t buf_num) 405 { 406 struct dpaa2_io_softc *sc = device_get_softc(iodev); 407 408 return (dpaa2_swp_release_bufs(sc->swp, bpid, buf, buf_num)); 409 } 410 411 /** 412 * @brief Configure DPNI object to generate interrupts. 413 */ 414 static int 415 dpaa2_io_setup_irqs(device_t dev) 416 { 417 struct dpaa2_io_softc *sc = device_get_softc(dev); 418 int error; 419 420 /* 421 * Setup interrupts generated by the software portal. 422 */ 423 dpaa2_swp_set_intr_trigger(sc->swp, DPAA2_SWP_INTR_DQRI); 424 dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); 425 426 /* Configure IRQs. */ 427 error = dpaa2_io_setup_msi(sc); 428 if (error) { 429 device_printf(dev, "%s: failed to allocate MSI: error=%d\n", 430 __func__, error); 431 return (error); 432 } 433 if ((sc->irq_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, 434 &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { 435 device_printf(dev, "%s: failed to allocate IRQ resource\n", 436 __func__); 437 return (ENXIO); 438 } 439 if (bus_setup_intr(dev, sc->irq_resource, INTR_TYPE_NET | INTR_MPSAFE | 440 INTR_ENTROPY, NULL, dpaa2_io_intr, sc, &sc->intr)) { 441 device_printf(dev, "%s: failed to setup IRQ resource\n", 442 __func__); 443 return (ENXIO); 444 } 445 446 /* Wrap DPIO ID around number of CPUs/RSS buckets */ 447 #ifdef RSS 448 sc->cpu = rss_getcpu(sc->attr.id % rss_getnumbuckets()); 449 #else 450 sc->cpu = sc->attr.id % mp_ncpus; 451 #endif 452 CPU_SETOF(sc->cpu, &sc->cpu_mask); 453 bus_bind_intr(dev, sc->irq_resource, sc->cpu); 454 455 /* 456 * Setup and enable Static Dequeue Command to receive CDANs from 457 * channel 0. 458 */ 459 if (sc->swp_desc.has_notif) 460 dpaa2_swp_set_push_dequeue(sc->swp, 0, true); 461 462 return (0); 463 } 464 465 static int 466 dpaa2_io_release_irqs(device_t dev) 467 { 468 struct dpaa2_io_softc *sc = device_get_softc(dev); 469 470 /* Disable receiving CDANs from channel 0. */ 471 if (sc->swp_desc.has_notif) 472 dpaa2_swp_set_push_dequeue(sc->swp, 0, false); 473 474 /* Release IRQ resources. */ 475 if (sc->intr != NULL) 476 bus_teardown_intr(dev, sc->irq_resource, &sc->intr); 477 if (sc->irq_resource != NULL) 478 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[0], 479 sc->irq_resource); 480 481 (void)dpaa2_io_release_msi(device_get_softc(dev)); 482 483 /* Configure software portal to stop generating interrupts. */ 484 dpaa2_swp_set_intr_trigger(sc->swp, 0); 485 dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); 486 487 return (0); 488 } 489 490 /** 491 * @brief Allocate MSI interrupts for this DPAA2 I/O object. 492 */ 493 static int 494 dpaa2_io_setup_msi(struct dpaa2_io_softc *sc) 495 { 496 int val; 497 498 val = pci_msi_count(sc->dev); 499 if (val < DPAA2_IO_MSI_COUNT) 500 device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, 501 DPAA2_IO_MSI_COUNT); 502 val = MIN(val, DPAA2_IO_MSI_COUNT); 503 504 if (pci_alloc_msi(sc->dev, &val) != 0) 505 return (EINVAL); 506 507 for (int i = 0; i < val; i++) 508 sc->irq_rid[i] = i + 1; 509 510 return (0); 511 } 512 513 static int 514 dpaa2_io_release_msi(struct dpaa2_io_softc *sc) 515 { 516 int error; 517 518 error = pci_release_msi(sc->dev); 519 if (error) { 520 device_printf(sc->dev, "%s: failed to release MSI: error=%d/n", 521 __func__, error); 522 return (error); 523 } 524 525 return (0); 526 } 527 528 /** 529 * @brief DPAA2 I/O interrupt handler. 530 */ 531 static void 532 dpaa2_io_intr(void *arg) 533 { 534 struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg; 535 /* struct dpaa2_ni_softc *nisc = NULL; */ 536 struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX]; 537 struct dpaa2_channel *chan; 538 struct dpaa2_dq dq; 539 uint32_t idx, status; 540 uint16_t flags; 541 int rc, cdan_n = 0; 542 543 status = dpaa2_swp_read_intr_status(sc->swp); 544 if (status == 0) { 545 return; 546 } 547 548 DPAA2_SWP_LOCK(sc->swp, &flags); 549 if (flags & DPAA2_SWP_DESTROYED) { 550 /* Terminate operation if portal is destroyed. */ 551 DPAA2_SWP_UNLOCK(sc->swp); 552 return; 553 } 554 555 for (int i = 0; i < DPIO_POLL_MAX; i++) { 556 rc = dpaa2_swp_dqrr_next_locked(sc->swp, &dq, &idx); 557 if (rc) { 558 break; 559 } 560 561 if ((dq.common.verb & DPAA2_DQRR_RESULT_MASK) == 562 DPAA2_DQRR_RESULT_CDAN) { 563 ctx[cdan_n++] = (struct dpaa2_io_notif_ctx *) dq.scn.ctx; 564 } else { 565 /* TODO: Report unknown DQRR entry. */ 566 } 567 dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_DCAP, idx); 568 } 569 DPAA2_SWP_UNLOCK(sc->swp); 570 571 for (int i = 0; i < cdan_n; i++) { 572 chan = (struct dpaa2_channel *)ctx[i]->channel; 573 /* nisc = device_get_softc(chan->ni_dev); */ 574 taskqueue_enqueue(chan->cleanup_tq, &chan->cleanup_task); 575 } 576 577 /* Enable software portal interrupts back */ 578 dpaa2_swp_clear_intr_status(sc->swp, status); 579 dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_IIR, 0); 580 } 581 582 static device_method_t dpaa2_io_methods[] = { 583 /* Device interface */ 584 DEVMETHOD(device_probe, dpaa2_io_probe), 585 DEVMETHOD(device_attach, dpaa2_io_attach), 586 DEVMETHOD(device_detach, dpaa2_io_detach), 587 588 /* QBMan software portal interface */ 589 DEVMETHOD(dpaa2_swp_enq_multiple_fq, dpaa2_io_enq_multiple_fq), 590 DEVMETHOD(dpaa2_swp_conf_wq_channel, dpaa2_io_conf_wq_channel), 591 DEVMETHOD(dpaa2_swp_query_bp, dpaa2_io_query_bp), 592 DEVMETHOD(dpaa2_swp_release_bufs, dpaa2_io_release_bufs), 593 594 DEVMETHOD_END 595 }; 596 597 static driver_t dpaa2_io_driver = { 598 "dpaa2_io", 599 dpaa2_io_methods, 600 sizeof(struct dpaa2_io_softc), 601 }; 602 603 DRIVER_MODULE(dpaa2_io, dpaa2_rc, dpaa2_io_driver, 0, 0); 604