1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2022 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * QBMan command interface and the DPAA2 I/O (DPIO) driver. 33 * 34 * The DPIO object allows configuration of the QBMan software portal with 35 * optional notification capabilities. 36 * 37 * Software portals are used by the driver to communicate with the QBMan. The 38 * DPIO object’s main purpose is to enable the driver to perform I/O – enqueue 39 * and dequeue operations, as well as buffer release and acquire operations – 40 * using QBMan. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/kernel.h> 45 #include <sys/bus.h> 46 #include <sys/rman.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/_cpuset.h> 52 #include <sys/cpuset.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 56 #include <vm/vm.h> 57 58 #include <machine/bus.h> 59 #include <machine/resource.h> 60 61 #include <dev/pci/pcivar.h> 62 63 #include "pcib_if.h" 64 #include "pci_if.h" 65 66 #include "dpaa2_mc.h" 67 #include "dpaa2_mcp.h" 68 #include "dpaa2_swp.h" 69 #include "dpaa2_swp_if.h" 70 #include "dpaa2_cmd_if.h" 71 #include "dpaa2_io.h" 72 #include "dpaa2_ni.h" 73 74 #define DPIO_IRQ_INDEX 0 /* index of the only DPIO IRQ */ 75 #define DPIO_POLL_MAX 32 76 77 /* 78 * Memory: 79 * 0: cache-enabled part of the QBMan software portal. 80 * 1: cache-inhibited part of the QBMan software portal. 81 * 2: control registers of the QBMan software portal? 82 * 83 * Note that MSI should be allocated separately using pseudo-PCI interface. 84 */ 85 struct resource_spec dpaa2_io_spec[] = { 86 /* 87 * System Memory resources. 88 */ 89 #define MEM_RES_NUM (3u) 90 #define MEM_RID_OFF (0u) 91 #define MEM_RID(rid) ((rid) + MEM_RID_OFF) 92 { SYS_RES_MEMORY, MEM_RID(0), RF_ACTIVE | RF_UNMAPPED }, 93 { SYS_RES_MEMORY, MEM_RID(1), RF_ACTIVE | RF_UNMAPPED }, 94 { SYS_RES_MEMORY, MEM_RID(2), RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, 95 /* 96 * DPMCP resources. 97 * 98 * NOTE: MC command portals (MCPs) are used to send commands to, and 99 * receive responses from, the MC firmware. One portal per DPIO. 100 */ 101 #define MCP_RES_NUM (1u) 102 #define MCP_RID_OFF (MEM_RID_OFF + MEM_RES_NUM) 103 #define MCP_RID(rid) ((rid) + MCP_RID_OFF) 104 /* --- */ 105 { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 106 /* --- */ 107 RESOURCE_SPEC_END 108 }; 109 110 /* Configuration routines. */ 111 static int dpaa2_io_setup_irqs(device_t dev); 112 static int dpaa2_io_release_irqs(device_t dev); 113 static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc); 114 static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc); 115 116 /* Interrupt handlers */ 117 static void dpaa2_io_intr(void *arg); 118 119 static int 120 dpaa2_io_probe(device_t dev) 121 { 122 /* DPIO device will be added by a parent resource container itself. */ 123 device_set_desc(dev, "DPAA2 I/O"); 124 return (BUS_PROBE_DEFAULT); 125 } 126 127 static int 128 dpaa2_io_detach(device_t dev) 129 { 130 device_t child = dev; 131 struct dpaa2_io_softc *sc = device_get_softc(dev); 132 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 133 int error; 134 135 /* Tear down interrupt handler and release IRQ resources. */ 136 dpaa2_io_release_irqs(dev); 137 138 /* Free software portal helper object. */ 139 dpaa2_swp_free_portal(sc->swp); 140 141 /* Disable DPIO object. */ 142 error = DPAA2_CMD_IO_DISABLE(dev, child, dpaa2_mcp_tk(sc->cmd, 143 sc->io_token)); 144 if (error && bootverbose) 145 device_printf(dev, "%s: failed to disable DPIO: id=%d, " 146 "error=%d\n", __func__, dinfo->id, error); 147 148 /* Close control sessions with the DPAA2 objects. */ 149 DPAA2_CMD_IO_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->io_token)); 150 DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); 151 152 /* Free pre-allocated MC command. */ 153 dpaa2_mcp_free_command(sc->cmd); 154 sc->cmd = NULL; 155 sc->io_token = 0; 156 sc->rc_token = 0; 157 158 /* Unmap memory resources of the portal. */ 159 for (int i = 0; i < MEM_RES_NUM; i++) { 160 if (sc->res[MEM_RID(i)] == NULL) 161 continue; 162 error = bus_unmap_resource(sc->dev, SYS_RES_MEMORY, 163 sc->res[MEM_RID(i)], &sc->map[MEM_RID(i)]); 164 if (error && bootverbose) 165 device_printf(dev, "%s: failed to unmap memory " 166 "resource: rid=%d, error=%d\n", __func__, MEM_RID(i), 167 error); 168 } 169 170 /* Release allocated resources. */ 171 bus_release_resources(dev, dpaa2_io_spec, sc->res); 172 173 return (0); 174 } 175 176 static int 177 dpaa2_io_attach(device_t dev) 178 { 179 device_t pdev = device_get_parent(dev); 180 device_t child = dev; 181 device_t mcp_dev; 182 struct dpaa2_io_softc *sc = device_get_softc(dev); 183 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 184 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 185 struct dpaa2_devinfo *mcp_dinfo; 186 struct resource_map_request req; 187 struct { 188 vm_memattr_t memattr; 189 char *label; 190 } map_args[MEM_RES_NUM] = { 191 { VM_MEMATTR_WRITE_BACK, "cache-enabled part" }, 192 { VM_MEMATTR_DEVICE, "cache-inhibited part" }, 193 { VM_MEMATTR_DEVICE, "control registers" } 194 }; 195 int error; 196 197 sc->dev = dev; 198 sc->swp = NULL; 199 sc->cmd = NULL; 200 sc->intr = NULL; 201 sc->irq_resource = NULL; 202 203 /* Allocate resources. */ 204 error = bus_alloc_resources(sc->dev, dpaa2_io_spec, sc->res); 205 if (error) { 206 device_printf(dev, "%s: failed to allocate resources: " 207 "error=%d\n", __func__, error); 208 return (ENXIO); 209 } 210 211 /* Set allocated MC portal up. */ 212 mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); 213 mcp_dinfo = device_get_ivars(mcp_dev); 214 dinfo->portal = mcp_dinfo->portal; 215 216 /* Map memory resources of the portal. */ 217 for (int i = 0; i < MEM_RES_NUM; i++) { 218 if (sc->res[MEM_RID(i)] == NULL) 219 continue; 220 221 resource_init_map_request(&req); 222 req.memattr = map_args[i].memattr; 223 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, 224 sc->res[MEM_RID(i)], &req, &sc->map[MEM_RID(i)]); 225 if (error) { 226 device_printf(dev, "%s: failed to map %s: error=%d\n", 227 __func__, map_args[i].label, error); 228 goto err_exit; 229 } 230 } 231 232 /* Allocate a command to send to the MC hardware. */ 233 error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF); 234 if (error) { 235 device_printf(dev, "%s: failed to allocate dpaa2_cmd: " 236 "error=%d\n", __func__, error); 237 goto err_exit; 238 } 239 240 /* Prepare DPIO object. */ 241 error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id, 242 &sc->rc_token); 243 if (error) { 244 device_printf(dev, "%s: failed to open DPRC: error=%d\n", 245 __func__, error); 246 goto err_exit; 247 } 248 error = DPAA2_CMD_IO_OPEN(dev, child, sc->cmd, dinfo->id, &sc->io_token); 249 if (error) { 250 device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n", 251 __func__, dinfo->id, error); 252 goto err_exit; 253 } 254 error = DPAA2_CMD_IO_RESET(dev, child, sc->cmd); 255 if (error) { 256 device_printf(dev, "%s: failed to reset DPIO: id=%d, error=%d\n", 257 __func__, dinfo->id, error); 258 goto err_exit; 259 } 260 error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr); 261 if (error) { 262 device_printf(dev, "%s: failed to get DPIO attributes: id=%d, " 263 "error=%d\n", __func__, dinfo->id, error); 264 goto err_exit; 265 } 266 error = DPAA2_CMD_IO_ENABLE(dev, child, sc->cmd); 267 if (error) { 268 device_printf(dev, "%s: failed to enable DPIO: id=%d, " 269 "error=%d\n", __func__, dinfo->id, error); 270 goto err_exit; 271 } 272 273 /* Prepare descriptor of the QBMan software portal. */ 274 sc->swp_desc.dpio_dev = dev; 275 sc->swp_desc.swp_version = sc->attr.swp_version; 276 sc->swp_desc.swp_clk = sc->attr.swp_clk; 277 sc->swp_desc.swp_id = sc->attr.swp_id; 278 sc->swp_desc.has_notif = sc->attr.priors_num ? true : false; 279 sc->swp_desc.has_8prio = sc->attr.priors_num == 8u ? true : false; 280 281 sc->swp_desc.cena_res = sc->res[0]; 282 sc->swp_desc.cena_map = &sc->map[0]; 283 sc->swp_desc.cinh_res = sc->res[1]; 284 sc->swp_desc.cinh_map = &sc->map[1]; 285 286 /* 287 * Compute how many 256 QBMAN cycles fit into one ns. This is because 288 * the interrupt timeout period register needs to be specified in QBMAN 289 * clock cycles in increments of 256. 290 */ 291 sc->swp_desc.swp_cycles_ratio = 256000 / 292 (sc->swp_desc.swp_clk / 1000000); 293 294 /* Initialize QBMan software portal. */ 295 error = dpaa2_swp_init_portal(&sc->swp, &sc->swp_desc, DPAA2_SWP_DEF); 296 if (error) { 297 device_printf(dev, "%s: failed to initialize dpaa2_swp: " 298 "error=%d\n", __func__, error); 299 goto err_exit; 300 } 301 302 error = dpaa2_io_setup_irqs(dev); 303 if (error) { 304 device_printf(dev, "%s: failed to setup IRQs: error=%d\n", 305 __func__, error); 306 goto err_exit; 307 } 308 309 #if 0 310 /* TODO: Enable debug output via sysctl (to reduce output). */ 311 if (bootverbose) 312 device_printf(dev, "dpio_id=%d, swp_id=%d, chan_mode=%s, " 313 "notif_priors=%d, swp_version=0x%x\n", 314 sc->attr.id, sc->attr.swp_id, 315 sc->attr.chan_mode == DPAA2_IO_LOCAL_CHANNEL 316 ? "local_channel" : "no_channel", sc->attr.priors_num, 317 sc->attr.swp_version); 318 #endif 319 return (0); 320 321 err_exit: 322 dpaa2_io_detach(dev); 323 return (ENXIO); 324 } 325 326 /** 327 * @brief Enqueue multiple frames to a frame queue using one FQID. 328 */ 329 static int 330 dpaa2_io_enq_multiple_fq(device_t iodev, uint32_t fqid, 331 struct dpaa2_fd *fd, int frames_n) 332 { 333 struct dpaa2_io_softc *sc = device_get_softc(iodev); 334 struct dpaa2_swp *swp = sc->swp; 335 struct dpaa2_eq_desc ed; 336 uint32_t flags = 0; 337 338 memset(&ed, 0, sizeof(ed)); 339 340 /* Setup enqueue descriptor. */ 341 dpaa2_swp_set_ed_norp(&ed, false); 342 dpaa2_swp_set_ed_fq(&ed, fqid); 343 344 return (dpaa2_swp_enq_mult(swp, &ed, fd, &flags, frames_n)); 345 } 346 347 /** 348 * @brief Configure the channel data availability notification (CDAN) 349 * in a particular WQ channel paired with DPIO. 350 */ 351 static int 352 dpaa2_io_conf_wq_channel(device_t iodev, struct dpaa2_io_notif_ctx *ctx) 353 { 354 struct dpaa2_io_softc *sc = device_get_softc(iodev); 355 356 /* Enable generation of the CDAN notifications. */ 357 if (ctx->cdan_en) 358 return (dpaa2_swp_conf_wq_channel(sc->swp, ctx->fq_chan_id, 359 DPAA2_WQCHAN_WE_EN | DPAA2_WQCHAN_WE_CTX, ctx->cdan_en, 360 ctx->qman_ctx)); 361 362 return (0); 363 } 364 365 /** 366 * @brief Query current configuration/state of the buffer pool. 367 */ 368 static int 369 dpaa2_io_query_bp(device_t iodev, uint16_t bpid, struct dpaa2_bp_conf *conf) 370 { 371 struct dpaa2_io_softc *sc = device_get_softc(iodev); 372 373 return (dpaa2_swp_query_bp(sc->swp, bpid, conf)); 374 } 375 376 /** 377 * @brief Release one or more buffer pointers to the QBMan buffer pool. 378 */ 379 static int 380 dpaa2_io_release_bufs(device_t iodev, uint16_t bpid, bus_addr_t *buf, 381 uint32_t buf_num) 382 { 383 struct dpaa2_io_softc *sc = device_get_softc(iodev); 384 385 return (dpaa2_swp_release_bufs(sc->swp, bpid, buf, buf_num)); 386 } 387 388 /** 389 * @brief Configure DPNI object to generate interrupts. 390 */ 391 static int 392 dpaa2_io_setup_irqs(device_t dev) 393 { 394 struct dpaa2_io_softc *sc = device_get_softc(dev); 395 int error; 396 397 /* 398 * Setup interrupts generated by the software portal. 399 */ 400 dpaa2_swp_set_intr_trigger(sc->swp, DPAA2_SWP_INTR_DQRI); 401 dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); 402 403 /* Configure IRQs. */ 404 error = dpaa2_io_setup_msi(sc); 405 if (error) { 406 device_printf(dev, "%s: failed to allocate MSI: error=%d\n", 407 __func__, error); 408 return (error); 409 } 410 if ((sc->irq_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, 411 &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { 412 device_printf(dev, "%s: failed to allocate IRQ resource\n", 413 __func__); 414 return (ENXIO); 415 } 416 if (bus_setup_intr(dev, sc->irq_resource, INTR_TYPE_NET | INTR_MPSAFE | 417 INTR_ENTROPY, NULL, dpaa2_io_intr, sc, &sc->intr)) { 418 device_printf(dev, "%s: failed to setup IRQ resource\n", 419 __func__); 420 return (ENXIO); 421 } 422 423 /* Wrap DPIO ID around number of CPUs. */ 424 bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus); 425 426 /* 427 * Setup and enable Static Dequeue Command to receive CDANs from 428 * channel 0. 429 */ 430 if (sc->swp_desc.has_notif) 431 dpaa2_swp_set_push_dequeue(sc->swp, 0, true); 432 433 return (0); 434 } 435 436 static int 437 dpaa2_io_release_irqs(device_t dev) 438 { 439 struct dpaa2_io_softc *sc = device_get_softc(dev); 440 441 /* Disable receiving CDANs from channel 0. */ 442 if (sc->swp_desc.has_notif) 443 dpaa2_swp_set_push_dequeue(sc->swp, 0, false); 444 445 /* Release IRQ resources. */ 446 if (sc->intr != NULL) 447 bus_teardown_intr(dev, sc->irq_resource, &sc->intr); 448 if (sc->irq_resource != NULL) 449 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[0], 450 sc->irq_resource); 451 452 (void)dpaa2_io_release_msi(device_get_softc(dev)); 453 454 /* Configure software portal to stop generating interrupts. */ 455 dpaa2_swp_set_intr_trigger(sc->swp, 0); 456 dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); 457 458 return (0); 459 } 460 461 /** 462 * @brief Allocate MSI interrupts for this DPAA2 I/O object. 463 */ 464 static int 465 dpaa2_io_setup_msi(struct dpaa2_io_softc *sc) 466 { 467 int val; 468 469 val = pci_msi_count(sc->dev); 470 if (val < DPAA2_IO_MSI_COUNT) 471 device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, 472 DPAA2_IO_MSI_COUNT); 473 val = MIN(val, DPAA2_IO_MSI_COUNT); 474 475 if (pci_alloc_msi(sc->dev, &val) != 0) 476 return (EINVAL); 477 478 for (int i = 0; i < val; i++) 479 sc->irq_rid[i] = i + 1; 480 481 return (0); 482 } 483 484 static int 485 dpaa2_io_release_msi(struct dpaa2_io_softc *sc) 486 { 487 int error; 488 489 error = pci_release_msi(sc->dev); 490 if (error) { 491 device_printf(sc->dev, "%s: failed to release MSI: error=%d/n", 492 __func__, error); 493 return (error); 494 } 495 496 return (0); 497 } 498 499 /** 500 * @brief DPAA2 I/O interrupt handler. 501 */ 502 static void 503 dpaa2_io_intr(void *arg) 504 { 505 struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg; 506 struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX]; 507 struct dpaa2_dq dq; 508 uint32_t idx, status; 509 uint16_t flags; 510 int rc, cdan_n = 0; 511 512 status = dpaa2_swp_read_intr_status(sc->swp); 513 if (status == 0) { 514 return; 515 } 516 517 DPAA2_SWP_LOCK(sc->swp, &flags); 518 if (flags & DPAA2_SWP_DESTROYED) { 519 /* Terminate operation if portal is destroyed. */ 520 DPAA2_SWP_UNLOCK(sc->swp); 521 return; 522 } 523 524 for (int i = 0; i < DPIO_POLL_MAX; i++) { 525 rc = dpaa2_swp_dqrr_next_locked(sc->swp, &dq, &idx); 526 if (rc) { 527 break; 528 } 529 530 if ((dq.common.verb & DPAA2_DQRR_RESULT_MASK) == 531 DPAA2_DQRR_RESULT_CDAN) { 532 ctx[cdan_n++] = (struct dpaa2_io_notif_ctx *) dq.scn.ctx; 533 } else { 534 /* TODO: Report unknown DQRR entry. */ 535 } 536 dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_DCAP, idx); 537 } 538 DPAA2_SWP_UNLOCK(sc->swp); 539 540 for (int i = 0; i < cdan_n; i++) { 541 ctx[i]->poll(ctx[i]->channel); 542 } 543 544 /* Enable software portal interrupts back */ 545 dpaa2_swp_clear_intr_status(sc->swp, status); 546 dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_IIR, 0); 547 } 548 549 static device_method_t dpaa2_io_methods[] = { 550 /* Device interface */ 551 DEVMETHOD(device_probe, dpaa2_io_probe), 552 DEVMETHOD(device_attach, dpaa2_io_attach), 553 DEVMETHOD(device_detach, dpaa2_io_detach), 554 555 /* QBMan software portal interface */ 556 DEVMETHOD(dpaa2_swp_enq_multiple_fq, dpaa2_io_enq_multiple_fq), 557 DEVMETHOD(dpaa2_swp_conf_wq_channel, dpaa2_io_conf_wq_channel), 558 DEVMETHOD(dpaa2_swp_query_bp, dpaa2_io_query_bp), 559 DEVMETHOD(dpaa2_swp_release_bufs, dpaa2_io_release_bufs), 560 561 DEVMETHOD_END 562 }; 563 564 static driver_t dpaa2_io_driver = { 565 "dpaa2_io", 566 dpaa2_io_methods, 567 sizeof(struct dpaa2_io_softc), 568 }; 569 570 DRIVER_MODULE(dpaa2_io, dpaa2_rc, dpaa2_io_driver, 0, 0); 571