1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2023 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/mbuf.h> 32 #include <sys/malloc.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 36 #include <machine/bus.h> 37 38 #include "dpaa2_types.h" 39 #include "dpaa2_buf.h" 40 #include "dpaa2_bp.h" 41 #include "dpaa2_channel.h" 42 #include "dpaa2_swp.h" 43 #include "dpaa2_swp_if.h" 44 #include "dpaa2_ni.h" 45 #include "dpaa2_frame.h" 46 47 MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)"); 48 49 /** 50 * @brief Allocate Rx buffers visible to QBMan and release them to the 51 * buffer pool. 52 */ 53 int 54 dpaa2_buf_seed_pool(device_t dev, device_t bpdev, void *arg, uint32_t count, 55 int size, struct mtx *dma_mtx) 56 { 57 struct dpaa2_ni_softc *sc = device_get_softc(dev); 58 struct dpaa2_bp_softc *bpsc = device_get_softc(bpdev); 59 struct dpaa2_channel *ch = (struct dpaa2_channel *)arg; 60 struct dpaa2_buf *buf; 61 const int alloc = DPAA2_ATOMIC_READ(&sc->buf_num); 62 const uint16_t bpid = bpsc->attr.bpid; 63 bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD]; 64 int error, bufn = 0; 65 66 #if defined(INVARIANTS) 67 KASSERT(ch->rx_dmat != NULL, ("%s: no DMA tag?", __func__)); 68 if (dma_mtx != NULL) { 69 mtx_assert(dma_mtx, MA_OWNED); 70 } 71 #endif /* INVARIANTS */ 72 73 #ifdef _notyet_ 74 /* Limit amount of buffers released to the pool */ 75 count = (alloc + count > DPAA2_NI_BUFS_MAX) 76 ? DPAA2_NI_BUFS_MAX - alloc : count; 77 #endif 78 79 /* Release "count" buffers to the pool */ 80 for (int i = alloc; i < alloc + count; i++) { 81 /* Enough buffers were allocated for a single command */ 82 if (bufn == DPAA2_SWP_BUFS_PER_CMD) { 83 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, 84 bufn); 85 if (error) { 86 device_printf(sc->dev, "%s: failed to release " 87 "buffers to the pool (1)\n", __func__); 88 return (error); 89 } 90 DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); 91 bufn = 0; 92 } 93 94 buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_RXB, M_NOWAIT); 95 if (buf == NULL) { 96 device_printf(dev, "%s: malloc() failed\n", __func__); 97 return (ENOMEM); 98 } 99 DPAA2_BUF_INIT_TAGOPT(buf, ch->rx_dmat, ch); 100 101 error = dpaa2_buf_seed_rxb(dev, buf, size, dma_mtx); 102 if (error != 0) { 103 device_printf(dev, "%s: dpaa2_buf_seed_rxb() failed: " 104 "error=%d/n", __func__, error); 105 break; 106 } 107 paddr[bufn] = buf->paddr; 108 bufn++; 109 } 110 111 /* Release reminder of the buffers to the pool */ 112 if (bufn > 0) { 113 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, bufn); 114 if (error) { 115 device_printf(sc->dev, "%s: failed to release " 116 "buffers to the pool (2)\n", __func__); 117 return (error); 118 } 119 DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); 120 } 121 122 return (0); 123 } 124 125 /** 126 * @brief Prepare Rx buffer to be released to the buffer pool. 127 */ 128 int 129 dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size, 130 struct mtx *dma_mtx) 131 { 132 struct dpaa2_ni_softc *sc = device_get_softc(dev); 133 struct dpaa2_swa *swa; 134 bool map_created = false; 135 bool mbuf_alloc = false; 136 int error; 137 138 #if defined(INVARIANTS) 139 DPAA2_BUF_ASSERT_RXPREP(buf); 140 if (dma_mtx != NULL) { 141 mtx_assert(dma_mtx, MA_OWNED); 142 } 143 #endif /* INVARIANTS */ 144 145 if (__predict_false(buf->dmap == NULL)) { 146 error = bus_dmamap_create(buf->dmat, 0, &buf->dmap); 147 if (error != 0) { 148 device_printf(dev, "%s: failed to create DMA map: " 149 "error=%d\n", __func__, error); 150 goto fail_map_create; 151 } 152 map_created = true; 153 } 154 155 if (__predict_true(buf->m == NULL)) { 156 buf->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); 157 if (__predict_false(buf->m == NULL)) { 158 device_printf(dev, "%s: m_getjcl(%d) failed\n", 159 __func__, size); 160 error = ENOMEM; 161 goto fail_mbuf_alloc; 162 } 163 buf->m->m_len = buf->m->m_ext.ext_size; 164 buf->m->m_pkthdr.len = buf->m->m_ext.ext_size; 165 mbuf_alloc = true; 166 } 167 168 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, buf->m, &buf->seg, 169 &buf->nseg, BUS_DMA_NOWAIT); 170 KASSERT(buf->nseg == 1, ("%s: one segment expected: nseg=%d", __func__, 171 buf->nseg)); 172 KASSERT(error == 0, ("%s: bus_dmamap_load_mbuf_sg() failed: error=%d", 173 __func__, error)); 174 if (__predict_false(error != 0 || buf->nseg != 1)) { 175 device_printf(sc->dev, "%s: bus_dmamap_load_mbuf_sg() failed: " 176 "error=%d, nsegs=%d\n", __func__, error, buf->nseg); 177 goto fail_mbuf_map; 178 } 179 buf->paddr = buf->seg.ds_addr; 180 buf->vaddr = buf->m->m_data; 181 182 /* Populate frame annotation for future use */ 183 swa = (struct dpaa2_swa *)buf->vaddr; 184 swa->magic = DPAA2_MAGIC; 185 swa->buf = buf; 186 187 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD); 188 189 DPAA2_BUF_ASSERT_RXREADY(buf); 190 191 return (0); 192 193 fail_mbuf_map: 194 if (mbuf_alloc) { 195 m_freem(buf->m); 196 buf->m = NULL; 197 } 198 fail_mbuf_alloc: 199 if (map_created) { 200 (void)bus_dmamap_destroy(buf->dmat, buf->dmap); 201 } 202 fail_map_create: 203 return (error); 204 } 205 206 /** 207 * @brief Prepare Tx buffer to be added to the Tx ring. 208 */ 209 int 210 dpaa2_buf_seed_txb(device_t dev, struct dpaa2_buf *buf) 211 { 212 struct dpaa2_buf *sgt = buf->sgt; 213 bool map_created = false; 214 int error; 215 216 DPAA2_BUF_ASSERT_TXPREP(buf); 217 218 if (buf->dmap == NULL) { 219 error = bus_dmamap_create(buf->dmat, 0, &buf->dmap); 220 if (error != 0) { 221 device_printf(dev, "%s: bus_dmamap_create() failed: " 222 "error=%d\n", __func__, error); 223 goto fail_map_create; 224 } 225 map_created = true; 226 } 227 228 if (sgt->vaddr == NULL) { 229 error = bus_dmamem_alloc(sgt->dmat, (void **)&sgt->vaddr, 230 BUS_DMA_ZERO | BUS_DMA_COHERENT, &sgt->dmap); 231 if (error != 0) { 232 device_printf(dev, "%s: bus_dmamem_alloc() failed: " 233 "error=%d\n", __func__, error); 234 goto fail_mem_alloc; 235 } 236 } 237 238 DPAA2_BUF_ASSERT_TXREADY(buf); 239 240 return (0); 241 242 fail_mem_alloc: 243 if (map_created) { 244 (void)bus_dmamap_destroy(buf->dmat, buf->dmap); 245 } 246 fail_map_create: 247 return (error); 248 } 249