1*58983e4bSDmitry Salychev /*- 2*58983e4bSDmitry Salychev * SPDX-License-Identifier: BSD-2-Clause 3*58983e4bSDmitry Salychev * 4*58983e4bSDmitry Salychev * Copyright © 2023 Dmitry Salychev 5*58983e4bSDmitry Salychev * 6*58983e4bSDmitry Salychev * Redistribution and use in source and binary forms, with or without 7*58983e4bSDmitry Salychev * modification, are permitted provided that the following conditions 8*58983e4bSDmitry Salychev * are met: 9*58983e4bSDmitry Salychev * 1. Redistributions of source code must retain the above copyright 10*58983e4bSDmitry Salychev * notice, this list of conditions and the following disclaimer. 11*58983e4bSDmitry Salychev * 2. Redistributions in binary form must reproduce the above copyright 12*58983e4bSDmitry Salychev * notice, this list of conditions and the following disclaimer in the 13*58983e4bSDmitry Salychev * documentation and/or other materials provided with the distribution. 14*58983e4bSDmitry Salychev * 15*58983e4bSDmitry Salychev * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16*58983e4bSDmitry Salychev * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17*58983e4bSDmitry Salychev * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18*58983e4bSDmitry Salychev * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19*58983e4bSDmitry Salychev * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20*58983e4bSDmitry Salychev * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21*58983e4bSDmitry Salychev * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22*58983e4bSDmitry Salychev * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23*58983e4bSDmitry Salychev * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24*58983e4bSDmitry Salychev * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25*58983e4bSDmitry Salychev * SUCH DAMAGE. 26*58983e4bSDmitry Salychev */ 27*58983e4bSDmitry Salychev 28*58983e4bSDmitry Salychev /* 29*58983e4bSDmitry Salychev * QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error). 30*58983e4bSDmitry Salychev * 31*58983e4bSDmitry Salychev * NOTE: Several WQs are organized into a single channel. 32*58983e4bSDmitry Salychev */ 33*58983e4bSDmitry Salychev 34*58983e4bSDmitry Salychev #include <sys/param.h> 35*58983e4bSDmitry Salychev #include <sys/systm.h> 36*58983e4bSDmitry Salychev #include <sys/kernel.h> 37*58983e4bSDmitry Salychev #include <sys/bus.h> 38*58983e4bSDmitry Salychev #include <sys/rman.h> 39*58983e4bSDmitry Salychev #include <sys/module.h> 40*58983e4bSDmitry Salychev #include <sys/malloc.h> 41*58983e4bSDmitry Salychev #include <sys/mutex.h> 42*58983e4bSDmitry Salychev #include <sys/socket.h> 43*58983e4bSDmitry Salychev #include <sys/sockio.h> 44*58983e4bSDmitry Salychev #include <sys/sysctl.h> 45*58983e4bSDmitry Salychev #include <sys/mbuf.h> 46*58983e4bSDmitry Salychev #include <sys/taskqueue.h> 47*58983e4bSDmitry Salychev #include <sys/sysctl.h> 48*58983e4bSDmitry Salychev #include <sys/buf_ring.h> 49*58983e4bSDmitry Salychev #include <sys/smp.h> 50*58983e4bSDmitry Salychev #include <sys/proc.h> 51*58983e4bSDmitry Salychev 52*58983e4bSDmitry Salychev #include <machine/bus.h> 53*58983e4bSDmitry Salychev #include <machine/resource.h> 54*58983e4bSDmitry Salychev #include <machine/atomic.h> 55*58983e4bSDmitry Salychev #include <machine/vmparam.h> 56*58983e4bSDmitry Salychev 57*58983e4bSDmitry Salychev #include <net/ethernet.h> 58*58983e4bSDmitry Salychev #include <net/bpf.h> 59*58983e4bSDmitry Salychev #include <net/if.h> 60*58983e4bSDmitry Salychev #include <net/if_dl.h> 61*58983e4bSDmitry Salychev #include <net/if_media.h> 62*58983e4bSDmitry Salychev #include <net/if_types.h> 63*58983e4bSDmitry Salychev #include <net/if_var.h> 64*58983e4bSDmitry Salychev 65*58983e4bSDmitry Salychev #include "dpaa2_types.h" 66*58983e4bSDmitry Salychev #include "dpaa2_channel.h" 67*58983e4bSDmitry Salychev #include "dpaa2_ni.h" 68*58983e4bSDmitry Salychev #include "dpaa2_mc.h" 69*58983e4bSDmitry Salychev #include "dpaa2_mc_if.h" 70*58983e4bSDmitry Salychev #include "dpaa2_mcp.h" 71*58983e4bSDmitry Salychev #include "dpaa2_io.h" 72*58983e4bSDmitry Salychev #include "dpaa2_con.h" 73*58983e4bSDmitry Salychev #include "dpaa2_buf.h" 74*58983e4bSDmitry Salychev #include "dpaa2_swp.h" 75*58983e4bSDmitry Salychev #include "dpaa2_swp_if.h" 76*58983e4bSDmitry Salychev #include "dpaa2_bp.h" 77*58983e4bSDmitry Salychev #include "dpaa2_cmd_if.h" 78*58983e4bSDmitry Salychev 79*58983e4bSDmitry Salychev MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel"); 80*58983e4bSDmitry Salychev 81*58983e4bSDmitry Salychev #define RX_SEG_N (1u) 82*58983e4bSDmitry Salychev #define RX_SEG_SZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE) 83*58983e4bSDmitry Salychev #define RX_SEG_MAXSZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE) 84*58983e4bSDmitry Salychev CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0); 85*58983e4bSDmitry Salychev CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0); 86*58983e4bSDmitry Salychev 87*58983e4bSDmitry Salychev #define TX_SEG_N (16u) /* XXX-DSL: does DPAA2 limit exist? */ 88*58983e4bSDmitry Salychev #define TX_SEG_SZ (PAGE_SIZE) 89*58983e4bSDmitry Salychev #define TX_SEG_MAXSZ (TX_SEG_N * TX_SEG_SZ) 90*58983e4bSDmitry Salychev CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0); 91*58983e4bSDmitry Salychev CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0); 92*58983e4bSDmitry Salychev 93*58983e4bSDmitry Salychev #define SGT_SEG_N (1u) 94*58983e4bSDmitry Salychev #define SGT_SEG_SZ (PAGE_SIZE) 95*58983e4bSDmitry Salychev #define SGT_SEG_MAXSZ (PAGE_SIZE) 96*58983e4bSDmitry Salychev CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0); 97*58983e4bSDmitry Salychev CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0); 98*58983e4bSDmitry Salychev 99*58983e4bSDmitry Salychev static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t); 100*58983e4bSDmitry Salychev static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t, 101*58983e4bSDmitry Salychev int, bus_size_t); 102*58983e4bSDmitry Salychev static void dpaa2_chan_bp_task(void *, int); 103*58983e4bSDmitry Salychev 104*58983e4bSDmitry Salychev /** 105*58983e4bSDmitry Salychev * @brief Сonfigures QBMan channel and registers data availability notifications. 106*58983e4bSDmitry Salychev */ 107*58983e4bSDmitry Salychev int 108*58983e4bSDmitry Salychev dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev, 109*58983e4bSDmitry Salychev struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn) 110*58983e4bSDmitry Salychev { 111*58983e4bSDmitry Salychev device_t pdev = device_get_parent(dev); 112*58983e4bSDmitry Salychev device_t child = dev; 113*58983e4bSDmitry Salychev struct dpaa2_ni_softc *sc = device_get_softc(dev); 114*58983e4bSDmitry Salychev struct dpaa2_io_softc *iosc = device_get_softc(iodev); 115*58983e4bSDmitry Salychev struct dpaa2_con_softc *consc = device_get_softc(condev); 116*58983e4bSDmitry Salychev struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 117*58983e4bSDmitry Salychev struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev); 118*58983e4bSDmitry Salychev struct dpaa2_devinfo *coninfo = device_get_ivars(condev); 119*58983e4bSDmitry Salychev struct dpaa2_con_notif_cfg notif_cfg; 120*58983e4bSDmitry Salychev struct dpaa2_io_notif_ctx *ctx; 121*58983e4bSDmitry Salychev struct dpaa2_channel *ch = NULL; 122*58983e4bSDmitry Salychev struct dpaa2_cmd cmd; 123*58983e4bSDmitry Salychev uint16_t rctk, contk; 124*58983e4bSDmitry Salychev int error; 125*58983e4bSDmitry Salychev 126*58983e4bSDmitry Salychev DPAA2_CMD_INIT(&cmd); 127*58983e4bSDmitry Salychev 128*58983e4bSDmitry Salychev error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk); 129*58983e4bSDmitry Salychev if (error) { 130*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n", 131*58983e4bSDmitry Salychev __func__, rcinfo->id, error); 132*58983e4bSDmitry Salychev goto fail_rc_open; 133*58983e4bSDmitry Salychev } 134*58983e4bSDmitry Salychev error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk); 135*58983e4bSDmitry Salychev if (error) { 136*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n", 137*58983e4bSDmitry Salychev __func__, coninfo->id, error); 138*58983e4bSDmitry Salychev goto fail_con_open; 139*58983e4bSDmitry Salychev } 140*58983e4bSDmitry Salychev 141*58983e4bSDmitry Salychev error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd); 142*58983e4bSDmitry Salychev if (error) { 143*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, " 144*58983e4bSDmitry Salychev "chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id); 145*58983e4bSDmitry Salychev goto fail_con_enable; 146*58983e4bSDmitry Salychev } 147*58983e4bSDmitry Salychev 148*58983e4bSDmitry Salychev ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO); 149*58983e4bSDmitry Salychev if (ch == NULL) { 150*58983e4bSDmitry Salychev device_printf(dev, "%s: malloc() failed\n", __func__); 151*58983e4bSDmitry Salychev error = ENOMEM; 152*58983e4bSDmitry Salychev goto fail_malloc; 153*58983e4bSDmitry Salychev } 154*58983e4bSDmitry Salychev 155*58983e4bSDmitry Salychev ch->ni_dev = dev; 156*58983e4bSDmitry Salychev ch->io_dev = iodev; 157*58983e4bSDmitry Salychev ch->con_dev = condev; 158*58983e4bSDmitry Salychev ch->id = consc->attr.chan_id; 159*58983e4bSDmitry Salychev ch->flowid = flowid; 160*58983e4bSDmitry Salychev ch->tx_frames = 0; /* for debug purposes */ 161*58983e4bSDmitry Salychev ch->tx_dropped = 0; /* for debug purposes */ 162*58983e4bSDmitry Salychev ch->store_sz = 0; 163*58983e4bSDmitry Salychev ch->store_idx = 0; 164*58983e4bSDmitry Salychev ch->recycled_n = 0; 165*58983e4bSDmitry Salychev ch->rxq_n = 0; 166*58983e4bSDmitry Salychev 167*58983e4bSDmitry Salychev NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch); 168*58983e4bSDmitry Salychev NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch); 169*58983e4bSDmitry Salychev 170*58983e4bSDmitry Salychev ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, 171*58983e4bSDmitry Salychev taskqueue_thread_enqueue, &ch->cleanup_tq); 172*58983e4bSDmitry Salychev taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET, 173*58983e4bSDmitry Salychev &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id); 174*58983e4bSDmitry Salychev 175*58983e4bSDmitry Salychev error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align); 176*58983e4bSDmitry Salychev if (error != 0) { 177*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to setup DMA\n", __func__); 178*58983e4bSDmitry Salychev goto fail_dma_setup; 179*58983e4bSDmitry Salychev } 180*58983e4bSDmitry Salychev 181*58983e4bSDmitry Salychev mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF); 182*58983e4bSDmitry Salychev 183*58983e4bSDmitry Salychev ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT, 184*58983e4bSDmitry Salychev &ch->xmit_mtx); 185*58983e4bSDmitry Salychev if (ch->xmit_br == NULL) { 186*58983e4bSDmitry Salychev device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__); 187*58983e4bSDmitry Salychev error = ENOMEM; 188*58983e4bSDmitry Salychev goto fail_buf_ring; 189*58983e4bSDmitry Salychev } 190*58983e4bSDmitry Salychev 191*58983e4bSDmitry Salychev DPAA2_BUF_INIT(&ch->store); 192*58983e4bSDmitry Salychev 193*58983e4bSDmitry Salychev /* Register the new notification context */ 194*58983e4bSDmitry Salychev ctx = &ch->ctx; 195*58983e4bSDmitry Salychev ctx->qman_ctx = (uint64_t)ctx; 196*58983e4bSDmitry Salychev ctx->cdan_en = true; 197*58983e4bSDmitry Salychev ctx->fq_chan_id = ch->id; 198*58983e4bSDmitry Salychev ctx->io_dev = ch->io_dev; 199*58983e4bSDmitry Salychev ctx->channel = ch; 200*58983e4bSDmitry Salychev error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx); 201*58983e4bSDmitry Salychev if (error) { 202*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to register CDAN context\n", 203*58983e4bSDmitry Salychev __func__); 204*58983e4bSDmitry Salychev goto fail_dpcon_notif; 205*58983e4bSDmitry Salychev } 206*58983e4bSDmitry Salychev 207*58983e4bSDmitry Salychev /* Register DPCON notification within Management Complex */ 208*58983e4bSDmitry Salychev notif_cfg.dpio_id = ioinfo->id; 209*58983e4bSDmitry Salychev notif_cfg.prior = 0; 210*58983e4bSDmitry Salychev notif_cfg.qman_ctx = ctx->qman_ctx; 211*58983e4bSDmitry Salychev error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, ¬if_cfg); 212*58983e4bSDmitry Salychev if (error) { 213*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to register DPCON " 214*58983e4bSDmitry Salychev "notifications: dpcon_id=%d, chan_id=%d\n", __func__, 215*58983e4bSDmitry Salychev coninfo->id, consc->attr.chan_id); 216*58983e4bSDmitry Salychev goto fail_dpcon_notif; 217*58983e4bSDmitry Salychev } 218*58983e4bSDmitry Salychev 219*58983e4bSDmitry Salychev /* Allocate initial # of Rx buffers and a channel storage */ 220*58983e4bSDmitry Salychev error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT, 221*58983e4bSDmitry Salychev DPAA2_RX_BUF_SIZE, NULL); 222*58983e4bSDmitry Salychev if (error) { 223*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to seed buffer pool\n", 224*58983e4bSDmitry Salychev __func__); 225*58983e4bSDmitry Salychev goto fail_dpcon_notif; 226*58983e4bSDmitry Salychev } 227*58983e4bSDmitry Salychev error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE, 228*58983e4bSDmitry Salychev BUS_DMA_NOWAIT, sc->buf_align); 229*58983e4bSDmitry Salychev if (error != 0) { 230*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to allocate channel storage\n", 231*58983e4bSDmitry Salychev __func__); 232*58983e4bSDmitry Salychev goto fail_dpcon_notif; 233*58983e4bSDmitry Salychev } else { 234*58983e4bSDmitry Salychev ch->store_sz = DPAA2_ETH_STORE_FRAMES; 235*58983e4bSDmitry Salychev } 236*58983e4bSDmitry Salychev 237*58983e4bSDmitry Salychev /* Prepare queues for the channel */ 238*58983e4bSDmitry Salychev error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF); 239*58983e4bSDmitry Salychev if (error) { 240*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to prepare TxConf queue: " 241*58983e4bSDmitry Salychev "error=%d\n", __func__, error); 242*58983e4bSDmitry Salychev goto fail_fq_setup; 243*58983e4bSDmitry Salychev } 244*58983e4bSDmitry Salychev error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX); 245*58983e4bSDmitry Salychev if (error) { 246*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n", 247*58983e4bSDmitry Salychev __func__, error); 248*58983e4bSDmitry Salychev goto fail_fq_setup; 249*58983e4bSDmitry Salychev } 250*58983e4bSDmitry Salychev 251*58983e4bSDmitry Salychev if (bootverbose) { 252*58983e4bSDmitry Salychev device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, " 253*58983e4bSDmitry Salychev "priorities=%d\n", ioinfo->id, coninfo->id, ch->id, 254*58983e4bSDmitry Salychev consc->attr.prior_num); 255*58983e4bSDmitry Salychev } 256*58983e4bSDmitry Salychev 257*58983e4bSDmitry Salychev *channel = ch; 258*58983e4bSDmitry Salychev 259*58983e4bSDmitry Salychev (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); 260*58983e4bSDmitry Salychev (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk)); 261*58983e4bSDmitry Salychev 262*58983e4bSDmitry Salychev return (0); 263*58983e4bSDmitry Salychev 264*58983e4bSDmitry Salychev fail_fq_setup: 265*58983e4bSDmitry Salychev if (ch->store.vaddr != NULL) { 266*58983e4bSDmitry Salychev bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap); 267*58983e4bSDmitry Salychev } 268*58983e4bSDmitry Salychev if (ch->store.dmat != NULL) { 269*58983e4bSDmitry Salychev bus_dma_tag_destroy(ch->store.dmat); 270*58983e4bSDmitry Salychev } 271*58983e4bSDmitry Salychev ch->store.dmat = NULL; 272*58983e4bSDmitry Salychev ch->store.vaddr = NULL; 273*58983e4bSDmitry Salychev ch->store.paddr = 0; 274*58983e4bSDmitry Salychev ch->store.nseg = 0; 275*58983e4bSDmitry Salychev fail_dpcon_notif: 276*58983e4bSDmitry Salychev buf_ring_free(ch->xmit_br, M_DEVBUF); 277*58983e4bSDmitry Salychev fail_buf_ring: 278*58983e4bSDmitry Salychev mtx_destroy(&ch->xmit_mtx); 279*58983e4bSDmitry Salychev fail_dma_setup: 280*58983e4bSDmitry Salychev /* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */ 281*58983e4bSDmitry Salychev /* taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */ 282*58983e4bSDmitry Salychev /* } */ 283*58983e4bSDmitry Salychev /* taskqueue_free(ch->cleanup_tq); */ 284*58983e4bSDmitry Salychev fail_malloc: 285*58983e4bSDmitry Salychev (void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk)); 286*58983e4bSDmitry Salychev fail_con_enable: 287*58983e4bSDmitry Salychev (void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk)); 288*58983e4bSDmitry Salychev fail_con_open: 289*58983e4bSDmitry Salychev (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk)); 290*58983e4bSDmitry Salychev fail_rc_open: 291*58983e4bSDmitry Salychev return (error); 292*58983e4bSDmitry Salychev } 293*58983e4bSDmitry Salychev 294*58983e4bSDmitry Salychev /** 295*58983e4bSDmitry Salychev * @brief Performs an initial configuration of the frame queue. 296*58983e4bSDmitry Salychev */ 297*58983e4bSDmitry Salychev int 298*58983e4bSDmitry Salychev dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch, 299*58983e4bSDmitry Salychev enum dpaa2_ni_queue_type queue_type) 300*58983e4bSDmitry Salychev { 301*58983e4bSDmitry Salychev struct dpaa2_ni_softc *sc = device_get_softc(dev); 302*58983e4bSDmitry Salychev struct dpaa2_ni_fq *fq; 303*58983e4bSDmitry Salychev 304*58983e4bSDmitry Salychev switch (queue_type) { 305*58983e4bSDmitry Salychev case DPAA2_NI_QUEUE_TX_CONF: 306*58983e4bSDmitry Salychev /* One queue per channel */ 307*58983e4bSDmitry Salychev fq = &ch->txc_queue; 308*58983e4bSDmitry Salychev fq->chan = ch; 309*58983e4bSDmitry Salychev fq->flowid = ch->flowid; 310*58983e4bSDmitry Salychev fq->tc = 0; /* ignored */ 311*58983e4bSDmitry Salychev fq->type = queue_type; 312*58983e4bSDmitry Salychev break; 313*58983e4bSDmitry Salychev case DPAA2_NI_QUEUE_RX: 314*58983e4bSDmitry Salychev KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS, 315*58983e4bSDmitry Salychev ("too many Rx traffic classes: rx_tcs=%d\n", 316*58983e4bSDmitry Salychev sc->attr.num.rx_tcs)); 317*58983e4bSDmitry Salychev 318*58983e4bSDmitry Salychev /* One queue per Rx traffic class within a channel */ 319*58983e4bSDmitry Salychev for (int i = 0; i < sc->attr.num.rx_tcs; i++) { 320*58983e4bSDmitry Salychev fq = &ch->rx_queues[i]; 321*58983e4bSDmitry Salychev fq->chan = ch; 322*58983e4bSDmitry Salychev fq->flowid = ch->flowid; 323*58983e4bSDmitry Salychev fq->tc = (uint8_t) i; 324*58983e4bSDmitry Salychev fq->type = queue_type; 325*58983e4bSDmitry Salychev 326*58983e4bSDmitry Salychev ch->rxq_n++; 327*58983e4bSDmitry Salychev } 328*58983e4bSDmitry Salychev break; 329*58983e4bSDmitry Salychev case DPAA2_NI_QUEUE_RX_ERR: 330*58983e4bSDmitry Salychev /* One queue per network interface */ 331*58983e4bSDmitry Salychev fq = &sc->rxe_queue; 332*58983e4bSDmitry Salychev fq->chan = ch; 333*58983e4bSDmitry Salychev fq->flowid = 0; /* ignored */ 334*58983e4bSDmitry Salychev fq->tc = 0; /* ignored */ 335*58983e4bSDmitry Salychev fq->type = queue_type; 336*58983e4bSDmitry Salychev break; 337*58983e4bSDmitry Salychev default: 338*58983e4bSDmitry Salychev device_printf(dev, "%s: unexpected frame queue type: %d\n", 339*58983e4bSDmitry Salychev __func__, queue_type); 340*58983e4bSDmitry Salychev return (EINVAL); 341*58983e4bSDmitry Salychev } 342*58983e4bSDmitry Salychev 343*58983e4bSDmitry Salychev return (0); 344*58983e4bSDmitry Salychev } 345*58983e4bSDmitry Salychev 346*58983e4bSDmitry Salychev /** 347*58983e4bSDmitry Salychev * @brief Obtain the next dequeue response from the channel storage. 348*58983e4bSDmitry Salychev */ 349*58983e4bSDmitry Salychev int 350*58983e4bSDmitry Salychev dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq) 351*58983e4bSDmitry Salychev { 352*58983e4bSDmitry Salychev struct dpaa2_buf *buf = &ch->store; 353*58983e4bSDmitry Salychev struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr; 354*58983e4bSDmitry Salychev struct dpaa2_dq *msg = &msgs[ch->store_idx]; 355*58983e4bSDmitry Salychev int rc = EINPROGRESS; 356*58983e4bSDmitry Salychev 357*58983e4bSDmitry Salychev ch->store_idx++; 358*58983e4bSDmitry Salychev 359*58983e4bSDmitry Salychev if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) { 360*58983e4bSDmitry Salychev rc = EALREADY; /* VDQ command is expired */ 361*58983e4bSDmitry Salychev ch->store_idx = 0; 362*58983e4bSDmitry Salychev if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) { 363*58983e4bSDmitry Salychev msg = NULL; /* Null response, FD is invalid */ 364*58983e4bSDmitry Salychev } 365*58983e4bSDmitry Salychev } 366*58983e4bSDmitry Salychev if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) { 367*58983e4bSDmitry Salychev rc = ENOENT; /* FQ is empty */ 368*58983e4bSDmitry Salychev ch->store_idx = 0; 369*58983e4bSDmitry Salychev } 370*58983e4bSDmitry Salychev 371*58983e4bSDmitry Salychev if (dq != NULL) { 372*58983e4bSDmitry Salychev *dq = msg; 373*58983e4bSDmitry Salychev } 374*58983e4bSDmitry Salychev 375*58983e4bSDmitry Salychev return (rc); 376*58983e4bSDmitry Salychev } 377*58983e4bSDmitry Salychev 378*58983e4bSDmitry Salychev static int 379*58983e4bSDmitry Salychev dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch, 380*58983e4bSDmitry Salychev bus_size_t alignment) 381*58983e4bSDmitry Salychev { 382*58983e4bSDmitry Salychev int error; 383*58983e4bSDmitry Salychev 384*58983e4bSDmitry Salychev mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF); 385*58983e4bSDmitry Salychev 386*58983e4bSDmitry Salychev error = bus_dma_tag_create( 387*58983e4bSDmitry Salychev bus_get_dma_tag(dev), /* parent */ 388*58983e4bSDmitry Salychev alignment, 0, /* alignment, boundary */ 389*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* low restricted addr */ 390*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* high restricted addr */ 391*58983e4bSDmitry Salychev NULL, NULL, /* filter, filterarg */ 392*58983e4bSDmitry Salychev RX_SEG_MAXSZ, /* maxsize */ 393*58983e4bSDmitry Salychev RX_SEG_N, /* nsegments */ 394*58983e4bSDmitry Salychev RX_SEG_SZ, /* maxsegsize */ 395*58983e4bSDmitry Salychev 0, /* flags */ 396*58983e4bSDmitry Salychev NULL, /* lockfunc */ 397*58983e4bSDmitry Salychev NULL, /* lockarg */ 398*58983e4bSDmitry Salychev &ch->rx_dmat); 399*58983e4bSDmitry Salychev if (error) { 400*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to create rx_dmat\n", __func__); 401*58983e4bSDmitry Salychev goto fail_rx_tag; 402*58983e4bSDmitry Salychev } 403*58983e4bSDmitry Salychev 404*58983e4bSDmitry Salychev error = bus_dma_tag_create( 405*58983e4bSDmitry Salychev bus_get_dma_tag(dev), /* parent */ 406*58983e4bSDmitry Salychev alignment, 0, /* alignment, boundary */ 407*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* low restricted addr */ 408*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* high restricted addr */ 409*58983e4bSDmitry Salychev NULL, NULL, /* filter, filterarg */ 410*58983e4bSDmitry Salychev TX_SEG_MAXSZ, /* maxsize */ 411*58983e4bSDmitry Salychev TX_SEG_N, /* nsegments */ 412*58983e4bSDmitry Salychev TX_SEG_SZ, /* maxsegsize */ 413*58983e4bSDmitry Salychev 0, /* flags */ 414*58983e4bSDmitry Salychev NULL, /* lockfunc */ 415*58983e4bSDmitry Salychev NULL, /* lockarg */ 416*58983e4bSDmitry Salychev &ch->tx_dmat); 417*58983e4bSDmitry Salychev if (error) { 418*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to create tx_dmat\n", __func__); 419*58983e4bSDmitry Salychev goto fail_tx_tag; 420*58983e4bSDmitry Salychev } 421*58983e4bSDmitry Salychev 422*58983e4bSDmitry Salychev error = bus_dma_tag_create( 423*58983e4bSDmitry Salychev bus_get_dma_tag(dev), /* parent */ 424*58983e4bSDmitry Salychev alignment, 0, /* alignment, boundary */ 425*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* low restricted addr */ 426*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* high restricted addr */ 427*58983e4bSDmitry Salychev NULL, NULL, /* filter, filterarg */ 428*58983e4bSDmitry Salychev SGT_SEG_MAXSZ, /* maxsize */ 429*58983e4bSDmitry Salychev SGT_SEG_N, /* nsegments */ 430*58983e4bSDmitry Salychev SGT_SEG_SZ, /* maxsegsize */ 431*58983e4bSDmitry Salychev 0, /* flags */ 432*58983e4bSDmitry Salychev NULL, /* lockfunc */ 433*58983e4bSDmitry Salychev NULL, /* lockarg */ 434*58983e4bSDmitry Salychev &ch->sgt_dmat); 435*58983e4bSDmitry Salychev if (error) { 436*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to create sgt_dmat\n", __func__); 437*58983e4bSDmitry Salychev goto fail_sgt_tag; 438*58983e4bSDmitry Salychev } 439*58983e4bSDmitry Salychev 440*58983e4bSDmitry Salychev return (0); 441*58983e4bSDmitry Salychev 442*58983e4bSDmitry Salychev fail_sgt_tag: 443*58983e4bSDmitry Salychev bus_dma_tag_destroy(ch->tx_dmat); 444*58983e4bSDmitry Salychev fail_tx_tag: 445*58983e4bSDmitry Salychev bus_dma_tag_destroy(ch->rx_dmat); 446*58983e4bSDmitry Salychev fail_rx_tag: 447*58983e4bSDmitry Salychev mtx_destroy(&ch->dma_mtx); 448*58983e4bSDmitry Salychev ch->rx_dmat = NULL; 449*58983e4bSDmitry Salychev ch->tx_dmat = NULL; 450*58983e4bSDmitry Salychev ch->sgt_dmat = NULL; 451*58983e4bSDmitry Salychev 452*58983e4bSDmitry Salychev return (error); 453*58983e4bSDmitry Salychev } 454*58983e4bSDmitry Salychev 455*58983e4bSDmitry Salychev /** 456*58983e4bSDmitry Salychev * @brief Allocate a DMA-mapped storage to keep responses from VDQ command. 457*58983e4bSDmitry Salychev */ 458*58983e4bSDmitry Salychev static int 459*58983e4bSDmitry Salychev dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size, 460*58983e4bSDmitry Salychev int mapflags, bus_size_t alignment) 461*58983e4bSDmitry Salychev { 462*58983e4bSDmitry Salychev struct dpaa2_buf *buf = &ch->store; 463*58983e4bSDmitry Salychev uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 464*58983e4bSDmitry Salychev int error; 465*58983e4bSDmitry Salychev 466*58983e4bSDmitry Salychev error = bus_dma_tag_create( 467*58983e4bSDmitry Salychev bus_get_dma_tag(dev), /* parent */ 468*58983e4bSDmitry Salychev alignment, 0, /* alignment, boundary */ 469*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* low restricted addr */ 470*58983e4bSDmitry Salychev BUS_SPACE_MAXADDR, /* high restricted addr */ 471*58983e4bSDmitry Salychev NULL, NULL, /* filter, filterarg */ 472*58983e4bSDmitry Salychev maxsize, /* maxsize */ 473*58983e4bSDmitry Salychev 1, /* nsegments */ 474*58983e4bSDmitry Salychev maxsize, /* maxsegsize */ 475*58983e4bSDmitry Salychev BUS_DMA_ALLOCNOW, /* flags */ 476*58983e4bSDmitry Salychev NULL, /* lockfunc */ 477*58983e4bSDmitry Salychev NULL, /* lockarg */ 478*58983e4bSDmitry Salychev &buf->dmat); 479*58983e4bSDmitry Salychev if (error != 0) { 480*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to create DMA tag\n", __func__); 481*58983e4bSDmitry Salychev goto fail_tag; 482*58983e4bSDmitry Salychev } 483*58983e4bSDmitry Salychev 484*58983e4bSDmitry Salychev error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr, 485*58983e4bSDmitry Salychev BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap); 486*58983e4bSDmitry Salychev if (error != 0) { 487*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to allocate storage memory\n", 488*58983e4bSDmitry Salychev __func__); 489*58983e4bSDmitry Salychev goto fail_map_create; 490*58983e4bSDmitry Salychev } 491*58983e4bSDmitry Salychev 492*58983e4bSDmitry Salychev buf->paddr = 0; 493*58983e4bSDmitry Salychev error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, size, 494*58983e4bSDmitry Salychev dpaa2_dmamap_oneseg_cb, &buf->paddr, mapflags); 495*58983e4bSDmitry Salychev if (error != 0) { 496*58983e4bSDmitry Salychev device_printf(dev, "%s: failed to map storage memory\n", 497*58983e4bSDmitry Salychev __func__); 498*58983e4bSDmitry Salychev goto fail_map_load; 499*58983e4bSDmitry Salychev } 500*58983e4bSDmitry Salychev 501*58983e4bSDmitry Salychev bus_dmamap_sync(buf->dmat, buf->dmap, 502*58983e4bSDmitry Salychev BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 503*58983e4bSDmitry Salychev 504*58983e4bSDmitry Salychev buf->nseg = 1; 505*58983e4bSDmitry Salychev 506*58983e4bSDmitry Salychev return (0); 507*58983e4bSDmitry Salychev 508*58983e4bSDmitry Salychev fail_map_load: 509*58983e4bSDmitry Salychev bus_dmamem_free(buf->dmat, buf->vaddr, buf->dmap); 510*58983e4bSDmitry Salychev fail_map_create: 511*58983e4bSDmitry Salychev bus_dma_tag_destroy(buf->dmat); 512*58983e4bSDmitry Salychev fail_tag: 513*58983e4bSDmitry Salychev buf->dmat = NULL; 514*58983e4bSDmitry Salychev buf->vaddr = NULL; 515*58983e4bSDmitry Salychev buf->paddr = 0; 516*58983e4bSDmitry Salychev buf->nseg = 0; 517*58983e4bSDmitry Salychev 518*58983e4bSDmitry Salychev return (error); 519*58983e4bSDmitry Salychev } 520*58983e4bSDmitry Salychev 521*58983e4bSDmitry Salychev /** 522*58983e4bSDmitry Salychev * @brief Release new buffers to the buffer pool if necessary. 523*58983e4bSDmitry Salychev */ 524*58983e4bSDmitry Salychev static void 525*58983e4bSDmitry Salychev dpaa2_chan_bp_task(void *arg, int count) 526*58983e4bSDmitry Salychev { 527*58983e4bSDmitry Salychev struct dpaa2_channel *ch = (struct dpaa2_channel *)arg; 528*58983e4bSDmitry Salychev struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev); 529*58983e4bSDmitry Salychev struct dpaa2_bp_softc *bpsc; 530*58983e4bSDmitry Salychev struct dpaa2_bp_conf bpconf; 531*58983e4bSDmitry Salychev const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num); 532*58983e4bSDmitry Salychev device_t bpdev; 533*58983e4bSDmitry Salychev int error; 534*58983e4bSDmitry Salychev 535*58983e4bSDmitry Salychev /* There's only one buffer pool for now */ 536*58983e4bSDmitry Salychev bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); 537*58983e4bSDmitry Salychev bpsc = device_get_softc(bpdev); 538*58983e4bSDmitry Salychev 539*58983e4bSDmitry Salychev /* Get state of the buffer pool */ 540*58983e4bSDmitry Salychev error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf); 541*58983e4bSDmitry Salychev if (error) { 542*58983e4bSDmitry Salychev device_printf(sc->dev, "%s: DPAA2_SWP_QUERY_BP() failed: " 543*58983e4bSDmitry Salychev "error=%d\n", __func__, error); 544*58983e4bSDmitry Salychev return; 545*58983e4bSDmitry Salychev } 546*58983e4bSDmitry Salychev 547*58983e4bSDmitry Salychev /* Double allocated Rx buffers if amount of free buffers is < 25% */ 548*58983e4bSDmitry Salychev if (bpconf.free_bufn < (buf_num >> 2)) { 549*58983e4bSDmitry Salychev mtx_assert(&ch->dma_mtx, MA_NOTOWNED); 550*58983e4bSDmitry Salychev mtx_lock(&ch->dma_mtx); 551*58983e4bSDmitry Salychev (void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num, 552*58983e4bSDmitry Salychev DPAA2_RX_BUF_SIZE, &ch->dma_mtx); 553*58983e4bSDmitry Salychev mtx_unlock(&ch->dma_mtx); 554*58983e4bSDmitry Salychev 555*58983e4bSDmitry Salychev DPAA2_ATOMIC_XCHG(&sc->buf_free, bpconf.free_bufn); 556*58983e4bSDmitry Salychev } 557*58983e4bSDmitry Salychev } 558