xref: /freebsd/sys/dev/dpaa2/dpaa2_buf.c (revision 58983e4b0253ad38a3e1ef2166fedd3133fdb552)
1*58983e4bSDmitry Salychev /*-
2*58983e4bSDmitry Salychev  * SPDX-License-Identifier: BSD-2-Clause
3*58983e4bSDmitry Salychev  *
4*58983e4bSDmitry Salychev  * Copyright © 2023 Dmitry Salychev
5*58983e4bSDmitry Salychev  *
6*58983e4bSDmitry Salychev  * Redistribution and use in source and binary forms, with or without
7*58983e4bSDmitry Salychev  * modification, are permitted provided that the following conditions
8*58983e4bSDmitry Salychev  * are met:
9*58983e4bSDmitry Salychev  * 1. Redistributions of source code must retain the above copyright
10*58983e4bSDmitry Salychev  *    notice, this list of conditions and the following disclaimer.
11*58983e4bSDmitry Salychev  * 2. Redistributions in binary form must reproduce the above copyright
12*58983e4bSDmitry Salychev  *    notice, this list of conditions and the following disclaimer in the
13*58983e4bSDmitry Salychev  *    documentation and/or other materials provided with the distribution.
14*58983e4bSDmitry Salychev  *
15*58983e4bSDmitry Salychev  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16*58983e4bSDmitry Salychev  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*58983e4bSDmitry Salychev  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*58983e4bSDmitry Salychev  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19*58983e4bSDmitry Salychev  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20*58983e4bSDmitry Salychev  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21*58983e4bSDmitry Salychev  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22*58983e4bSDmitry Salychev  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23*58983e4bSDmitry Salychev  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24*58983e4bSDmitry Salychev  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25*58983e4bSDmitry Salychev  * SUCH DAMAGE.
26*58983e4bSDmitry Salychev  */
27*58983e4bSDmitry Salychev 
28*58983e4bSDmitry Salychev #include <sys/types.h>
29*58983e4bSDmitry Salychev #include <sys/param.h>
30*58983e4bSDmitry Salychev #include <sys/systm.h>
31*58983e4bSDmitry Salychev #include <sys/mbuf.h>
32*58983e4bSDmitry Salychev #include <sys/malloc.h>
33*58983e4bSDmitry Salychev #include <sys/lock.h>
34*58983e4bSDmitry Salychev #include <sys/mutex.h>
35*58983e4bSDmitry Salychev 
36*58983e4bSDmitry Salychev #include <machine/bus.h>
37*58983e4bSDmitry Salychev 
38*58983e4bSDmitry Salychev #include "dpaa2_types.h"
39*58983e4bSDmitry Salychev #include "dpaa2_buf.h"
40*58983e4bSDmitry Salychev #include "dpaa2_bp.h"
41*58983e4bSDmitry Salychev #include "dpaa2_channel.h"
42*58983e4bSDmitry Salychev #include "dpaa2_swp.h"
43*58983e4bSDmitry Salychev #include "dpaa2_swp_if.h"
44*58983e4bSDmitry Salychev #include "dpaa2_ni.h"
45*58983e4bSDmitry Salychev 
46*58983e4bSDmitry Salychev MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)");
47*58983e4bSDmitry Salychev 
48*58983e4bSDmitry Salychev /**
49*58983e4bSDmitry Salychev  * @brief Allocate Rx buffers visible to QBMan and release them to the
50*58983e4bSDmitry Salychev  * buffer pool.
51*58983e4bSDmitry Salychev  */
52*58983e4bSDmitry Salychev int
dpaa2_buf_seed_pool(device_t dev,device_t bpdev,void * arg,uint32_t count,int size,struct mtx * dma_mtx)53*58983e4bSDmitry Salychev dpaa2_buf_seed_pool(device_t dev, device_t bpdev, void *arg, uint32_t count,
54*58983e4bSDmitry Salychev     int size, struct mtx *dma_mtx)
55*58983e4bSDmitry Salychev {
56*58983e4bSDmitry Salychev 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
57*58983e4bSDmitry Salychev 	struct dpaa2_bp_softc *bpsc = device_get_softc(bpdev);
58*58983e4bSDmitry Salychev 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
59*58983e4bSDmitry Salychev 	struct dpaa2_buf *buf;
60*58983e4bSDmitry Salychev 	const int alloc = DPAA2_ATOMIC_READ(&sc->buf_num);
61*58983e4bSDmitry Salychev 	const uint16_t bpid = bpsc->attr.bpid;
62*58983e4bSDmitry Salychev 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
63*58983e4bSDmitry Salychev 	int error, bufn = 0;
64*58983e4bSDmitry Salychev 
65*58983e4bSDmitry Salychev #if defined(INVARIANTS)
66*58983e4bSDmitry Salychev 	KASSERT(ch->rx_dmat != NULL, ("%s: no DMA tag?", __func__));
67*58983e4bSDmitry Salychev 	if (dma_mtx != NULL) {
68*58983e4bSDmitry Salychev 		mtx_assert(dma_mtx, MA_OWNED);
69*58983e4bSDmitry Salychev 	}
70*58983e4bSDmitry Salychev #endif /* INVARIANTS */
71*58983e4bSDmitry Salychev 
72*58983e4bSDmitry Salychev #ifdef _notyet_
73*58983e4bSDmitry Salychev 	/* Limit amount of buffers released to the pool */
74*58983e4bSDmitry Salychev 	count = (alloc + count > DPAA2_NI_BUFS_MAX)
75*58983e4bSDmitry Salychev 	    ? DPAA2_NI_BUFS_MAX - alloc : count;
76*58983e4bSDmitry Salychev #endif
77*58983e4bSDmitry Salychev 
78*58983e4bSDmitry Salychev 	/* Release "count" buffers to the pool */
79*58983e4bSDmitry Salychev 	for (int i = alloc; i < alloc + count; i++) {
80*58983e4bSDmitry Salychev 		/* Enough buffers were allocated for a single command */
81*58983e4bSDmitry Salychev 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
82*58983e4bSDmitry Salychev 			error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr,
83*58983e4bSDmitry Salychev 			    bufn);
84*58983e4bSDmitry Salychev 			if (error) {
85*58983e4bSDmitry Salychev 				device_printf(sc->dev, "%s: failed to release "
86*58983e4bSDmitry Salychev 				    "buffers to the pool (1)\n", __func__);
87*58983e4bSDmitry Salychev 				return (error);
88*58983e4bSDmitry Salychev 			}
89*58983e4bSDmitry Salychev 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
90*58983e4bSDmitry Salychev 			bufn = 0;
91*58983e4bSDmitry Salychev 		}
92*58983e4bSDmitry Salychev 
93*58983e4bSDmitry Salychev 		buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_RXB, M_NOWAIT);
94*58983e4bSDmitry Salychev 		if (buf == NULL) {
95*58983e4bSDmitry Salychev 			device_printf(dev, "%s: malloc() failed\n", __func__);
96*58983e4bSDmitry Salychev 			return (ENOMEM);
97*58983e4bSDmitry Salychev 		}
98*58983e4bSDmitry Salychev 		DPAA2_BUF_INIT_TAGOPT(buf, ch->rx_dmat, ch);
99*58983e4bSDmitry Salychev 
100*58983e4bSDmitry Salychev 		error = dpaa2_buf_seed_rxb(dev, buf, size, dma_mtx);
101*58983e4bSDmitry Salychev 		if (error != 0) {
102*58983e4bSDmitry Salychev 			device_printf(dev, "%s: dpaa2_buf_seed_rxb() failed: "
103*58983e4bSDmitry Salychev 			    "error=%d/n", __func__, error);
104*58983e4bSDmitry Salychev 			break;
105*58983e4bSDmitry Salychev 		}
106*58983e4bSDmitry Salychev 		paddr[bufn] = buf->paddr;
107*58983e4bSDmitry Salychev 		bufn++;
108*58983e4bSDmitry Salychev 	}
109*58983e4bSDmitry Salychev 
110*58983e4bSDmitry Salychev 	/* Release reminder of the buffers to the pool */
111*58983e4bSDmitry Salychev 	if (bufn > 0) {
112*58983e4bSDmitry Salychev 		error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, bufn);
113*58983e4bSDmitry Salychev 		if (error) {
114*58983e4bSDmitry Salychev 			device_printf(sc->dev, "%s: failed to release "
115*58983e4bSDmitry Salychev 			    "buffers to the pool (2)\n", __func__);
116*58983e4bSDmitry Salychev 			return (error);
117*58983e4bSDmitry Salychev 		}
118*58983e4bSDmitry Salychev 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
119*58983e4bSDmitry Salychev 	}
120*58983e4bSDmitry Salychev 
121*58983e4bSDmitry Salychev 	return (0);
122*58983e4bSDmitry Salychev }
123*58983e4bSDmitry Salychev 
124*58983e4bSDmitry Salychev /**
125*58983e4bSDmitry Salychev  * @brief Prepare Rx buffer to be released to the buffer pool.
126*58983e4bSDmitry Salychev  */
127*58983e4bSDmitry Salychev int
dpaa2_buf_seed_rxb(device_t dev,struct dpaa2_buf * buf,int size,struct mtx * dma_mtx)128*58983e4bSDmitry Salychev dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size,
129*58983e4bSDmitry Salychev     struct mtx *dma_mtx)
130*58983e4bSDmitry Salychev {
131*58983e4bSDmitry Salychev 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
132*58983e4bSDmitry Salychev 	struct dpaa2_fa *fa;
133*58983e4bSDmitry Salychev 	bool map_created = false;
134*58983e4bSDmitry Salychev 	bool mbuf_alloc = false;
135*58983e4bSDmitry Salychev 	int error;
136*58983e4bSDmitry Salychev 
137*58983e4bSDmitry Salychev #if defined(INVARIANTS)
138*58983e4bSDmitry Salychev 	DPAA2_BUF_ASSERT_RXPREP(buf);
139*58983e4bSDmitry Salychev 	if (dma_mtx != NULL) {
140*58983e4bSDmitry Salychev 		mtx_assert(dma_mtx, MA_OWNED);
141*58983e4bSDmitry Salychev 	}
142*58983e4bSDmitry Salychev #endif /* INVARIANTS */
143*58983e4bSDmitry Salychev 
144*58983e4bSDmitry Salychev 	if (__predict_false(buf->dmap == NULL)) {
145*58983e4bSDmitry Salychev 		error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
146*58983e4bSDmitry Salychev 		if (error != 0) {
147*58983e4bSDmitry Salychev 			device_printf(dev, "%s: failed to create DMA map: "
148*58983e4bSDmitry Salychev 			    "error=%d\n", __func__, error);
149*58983e4bSDmitry Salychev 			goto fail_map_create;
150*58983e4bSDmitry Salychev 		}
151*58983e4bSDmitry Salychev 		map_created = true;
152*58983e4bSDmitry Salychev 	}
153*58983e4bSDmitry Salychev 
154*58983e4bSDmitry Salychev 	if (__predict_true(buf->m == NULL)) {
155*58983e4bSDmitry Salychev 		buf->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
156*58983e4bSDmitry Salychev 		if (__predict_false(buf->m == NULL)) {
157*58983e4bSDmitry Salychev 			device_printf(dev, "%s: m_getjcl() failed\n", __func__);
158*58983e4bSDmitry Salychev 			error = ENOMEM;
159*58983e4bSDmitry Salychev 			goto fail_mbuf_alloc;
160*58983e4bSDmitry Salychev 		}
161*58983e4bSDmitry Salychev 		buf->m->m_len = buf->m->m_ext.ext_size;
162*58983e4bSDmitry Salychev 		buf->m->m_pkthdr.len = buf->m->m_ext.ext_size;
163*58983e4bSDmitry Salychev 		mbuf_alloc = true;
164*58983e4bSDmitry Salychev 	}
165*58983e4bSDmitry Salychev 
166*58983e4bSDmitry Salychev 	error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, buf->m, &buf->seg,
167*58983e4bSDmitry Salychev 	    &buf->nseg, BUS_DMA_NOWAIT);
168*58983e4bSDmitry Salychev 	KASSERT(buf->nseg == 1, ("%s: one segment expected: nseg=%d", __func__,
169*58983e4bSDmitry Salychev 	    buf->nseg));
170*58983e4bSDmitry Salychev 	KASSERT(error == 0, ("%s: bus_dmamap_load_mbuf_sg() failed: error=%d",
171*58983e4bSDmitry Salychev 	    __func__, error));
172*58983e4bSDmitry Salychev 	if (__predict_false(error != 0 || buf->nseg != 1)) {
173*58983e4bSDmitry Salychev 		device_printf(sc->dev, "%s: bus_dmamap_load_mbuf_sg() failed: "
174*58983e4bSDmitry Salychev 		    "error=%d, nsegs=%d\n", __func__, error, buf->nseg);
175*58983e4bSDmitry Salychev 		goto fail_mbuf_map;
176*58983e4bSDmitry Salychev 	}
177*58983e4bSDmitry Salychev 	buf->paddr = buf->seg.ds_addr;
178*58983e4bSDmitry Salychev 	buf->vaddr = buf->m->m_data;
179*58983e4bSDmitry Salychev 
180*58983e4bSDmitry Salychev 	/* Populate frame annotation for future use */
181*58983e4bSDmitry Salychev 	fa = (struct dpaa2_fa *)buf->vaddr;
182*58983e4bSDmitry Salychev 	fa->magic = DPAA2_MAGIC;
183*58983e4bSDmitry Salychev 	fa->buf = buf;
184*58983e4bSDmitry Salychev 
185*58983e4bSDmitry Salychev 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD);
186*58983e4bSDmitry Salychev 
187*58983e4bSDmitry Salychev 	DPAA2_BUF_ASSERT_RXREADY(buf);
188*58983e4bSDmitry Salychev 
189*58983e4bSDmitry Salychev 	return (0);
190*58983e4bSDmitry Salychev 
191*58983e4bSDmitry Salychev fail_mbuf_map:
192*58983e4bSDmitry Salychev 	if (mbuf_alloc) {
193*58983e4bSDmitry Salychev 		m_freem(buf->m);
194*58983e4bSDmitry Salychev 		buf->m = NULL;
195*58983e4bSDmitry Salychev 	}
196*58983e4bSDmitry Salychev fail_mbuf_alloc:
197*58983e4bSDmitry Salychev 	if (map_created) {
198*58983e4bSDmitry Salychev 		(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
199*58983e4bSDmitry Salychev 	}
200*58983e4bSDmitry Salychev fail_map_create:
201*58983e4bSDmitry Salychev 	return (error);
202*58983e4bSDmitry Salychev }
203*58983e4bSDmitry Salychev 
204*58983e4bSDmitry Salychev /**
205*58983e4bSDmitry Salychev  * @brief Prepare Tx buffer to be added to the Tx ring.
206*58983e4bSDmitry Salychev  */
207*58983e4bSDmitry Salychev int
dpaa2_buf_seed_txb(device_t dev,struct dpaa2_buf * buf)208*58983e4bSDmitry Salychev dpaa2_buf_seed_txb(device_t dev, struct dpaa2_buf *buf)
209*58983e4bSDmitry Salychev {
210*58983e4bSDmitry Salychev 	struct dpaa2_buf *sgt = buf->sgt;
211*58983e4bSDmitry Salychev 	bool map_created = false;
212*58983e4bSDmitry Salychev 	int error;
213*58983e4bSDmitry Salychev 
214*58983e4bSDmitry Salychev 	DPAA2_BUF_ASSERT_TXPREP(buf);
215*58983e4bSDmitry Salychev 
216*58983e4bSDmitry Salychev 	if (buf->dmap == NULL) {
217*58983e4bSDmitry Salychev 		error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
218*58983e4bSDmitry Salychev 		if (error != 0) {
219*58983e4bSDmitry Salychev 			device_printf(dev, "%s: bus_dmamap_create() failed: "
220*58983e4bSDmitry Salychev 			    "error=%d\n", __func__, error);
221*58983e4bSDmitry Salychev 			goto fail_map_create;
222*58983e4bSDmitry Salychev 		}
223*58983e4bSDmitry Salychev 		map_created = true;
224*58983e4bSDmitry Salychev 	}
225*58983e4bSDmitry Salychev 
226*58983e4bSDmitry Salychev 	if (sgt->vaddr == NULL) {
227*58983e4bSDmitry Salychev 		error = bus_dmamem_alloc(sgt->dmat, (void **)&sgt->vaddr,
228*58983e4bSDmitry Salychev 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &sgt->dmap);
229*58983e4bSDmitry Salychev 		if (error != 0) {
230*58983e4bSDmitry Salychev 			device_printf(dev, "%s: bus_dmamem_alloc() failed: "
231*58983e4bSDmitry Salychev 			    "error=%d\n", __func__, error);
232*58983e4bSDmitry Salychev 			goto fail_mem_alloc;
233*58983e4bSDmitry Salychev 		}
234*58983e4bSDmitry Salychev 	}
235*58983e4bSDmitry Salychev 
236*58983e4bSDmitry Salychev 	DPAA2_BUF_ASSERT_TXREADY(buf);
237*58983e4bSDmitry Salychev 
238*58983e4bSDmitry Salychev 	return (0);
239*58983e4bSDmitry Salychev 
240*58983e4bSDmitry Salychev fail_mem_alloc:
241*58983e4bSDmitry Salychev 	if (map_created) {
242*58983e4bSDmitry Salychev 		(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
243*58983e4bSDmitry Salychev 	}
244*58983e4bSDmitry Salychev fail_map_create:
245*58983e4bSDmitry Salychev 	return (error);
246*58983e4bSDmitry Salychev }
247