xref: /freebsd/sys/dev/dpaa2/dpaa2_buf.c (revision f5463265955b829775bbb32e1fd0bc11dafc36ce)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2023 Dmitry Salychev
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/mbuf.h>
32 #include <sys/malloc.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 
36 #include <machine/bus.h>
37 
38 #include "dpaa2_types.h"
39 #include "dpaa2_buf.h"
40 #include "dpaa2_bp.h"
41 #include "dpaa2_channel.h"
42 #include "dpaa2_swp.h"
43 #include "dpaa2_swp_if.h"
44 #include "dpaa2_ni.h"
45 
46 MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)");
47 
48 /**
49  * @brief Allocate Rx buffers visible to QBMan and release them to the
50  * buffer pool.
51  */
52 int
53 dpaa2_buf_seed_pool(device_t dev, device_t bpdev, void *arg, uint32_t count,
54     int size, struct mtx *dma_mtx)
55 {
56 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
57 	struct dpaa2_bp_softc *bpsc = device_get_softc(bpdev);
58 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
59 	struct dpaa2_buf *buf;
60 	const int alloc = DPAA2_ATOMIC_READ(&sc->buf_num);
61 	const uint16_t bpid = bpsc->attr.bpid;
62 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
63 	int error, bufn = 0;
64 
65 #if defined(INVARIANTS)
66 	KASSERT(ch->rx_dmat != NULL, ("%s: no DMA tag?", __func__));
67 	if (dma_mtx != NULL) {
68 		mtx_assert(dma_mtx, MA_OWNED);
69 	}
70 #endif /* INVARIANTS */
71 
72 #ifdef _notyet_
73 	/* Limit amount of buffers released to the pool */
74 	count = (alloc + count > DPAA2_NI_BUFS_MAX)
75 	    ? DPAA2_NI_BUFS_MAX - alloc : count;
76 #endif
77 
78 	/* Release "count" buffers to the pool */
79 	for (int i = alloc; i < alloc + count; i++) {
80 		/* Enough buffers were allocated for a single command */
81 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
82 			error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr,
83 			    bufn);
84 			if (error) {
85 				device_printf(sc->dev, "%s: failed to release "
86 				    "buffers to the pool (1)\n", __func__);
87 				return (error);
88 			}
89 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
90 			bufn = 0;
91 		}
92 
93 		buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_RXB, M_NOWAIT);
94 		if (buf == NULL) {
95 			device_printf(dev, "%s: malloc() failed\n", __func__);
96 			return (ENOMEM);
97 		}
98 		DPAA2_BUF_INIT_TAGOPT(buf, ch->rx_dmat, ch);
99 
100 		error = dpaa2_buf_seed_rxb(dev, buf, size, dma_mtx);
101 		if (error != 0) {
102 			device_printf(dev, "%s: dpaa2_buf_seed_rxb() failed: "
103 			    "error=%d/n", __func__, error);
104 			break;
105 		}
106 		paddr[bufn] = buf->paddr;
107 		bufn++;
108 	}
109 
110 	/* Release reminder of the buffers to the pool */
111 	if (bufn > 0) {
112 		error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, bufn);
113 		if (error) {
114 			device_printf(sc->dev, "%s: failed to release "
115 			    "buffers to the pool (2)\n", __func__);
116 			return (error);
117 		}
118 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
119 	}
120 
121 	return (0);
122 }
123 
124 /**
125  * @brief Prepare Rx buffer to be released to the buffer pool.
126  */
127 int
128 dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size,
129     struct mtx *dma_mtx)
130 {
131 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
132 	struct dpaa2_fa *fa;
133 	bool map_created = false;
134 	bool mbuf_alloc = false;
135 	int error;
136 
137 #if defined(INVARIANTS)
138 	DPAA2_BUF_ASSERT_RXPREP(buf);
139 	if (dma_mtx != NULL) {
140 		mtx_assert(dma_mtx, MA_OWNED);
141 	}
142 #endif /* INVARIANTS */
143 
144 	if (__predict_false(buf->dmap == NULL)) {
145 		error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
146 		if (error != 0) {
147 			device_printf(dev, "%s: failed to create DMA map: "
148 			    "error=%d\n", __func__, error);
149 			goto fail_map_create;
150 		}
151 		map_created = true;
152 	}
153 
154 	if (__predict_true(buf->m == NULL)) {
155 		buf->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
156 		if (__predict_false(buf->m == NULL)) {
157 			device_printf(dev, "%s: m_getjcl() failed\n", __func__);
158 			error = ENOMEM;
159 			goto fail_mbuf_alloc;
160 		}
161 		buf->m->m_len = buf->m->m_ext.ext_size;
162 		buf->m->m_pkthdr.len = buf->m->m_ext.ext_size;
163 		mbuf_alloc = true;
164 	}
165 
166 	error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, buf->m, &buf->seg,
167 	    &buf->nseg, BUS_DMA_NOWAIT);
168 	KASSERT(buf->nseg == 1, ("%s: one segment expected: nseg=%d", __func__,
169 	    buf->nseg));
170 	KASSERT(error == 0, ("%s: bus_dmamap_load_mbuf_sg() failed: error=%d",
171 	    __func__, error));
172 	if (__predict_false(error != 0 || buf->nseg != 1)) {
173 		device_printf(sc->dev, "%s: bus_dmamap_load_mbuf_sg() failed: "
174 		    "error=%d, nsegs=%d\n", __func__, error, buf->nseg);
175 		goto fail_mbuf_map;
176 	}
177 	buf->paddr = buf->seg.ds_addr;
178 	buf->vaddr = buf->m->m_data;
179 
180 	/* Populate frame annotation for future use */
181 	fa = (struct dpaa2_fa *)buf->vaddr;
182 	fa->magic = DPAA2_MAGIC;
183 	fa->buf = buf;
184 
185 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD);
186 
187 	DPAA2_BUF_ASSERT_RXREADY(buf);
188 
189 	return (0);
190 
191 fail_mbuf_map:
192 	if (mbuf_alloc) {
193 		m_freem(buf->m);
194 		buf->m = NULL;
195 	}
196 fail_mbuf_alloc:
197 	if (map_created) {
198 		(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
199 	}
200 fail_map_create:
201 	return (error);
202 }
203 
204 /**
205  * @brief Prepare Tx buffer to be added to the Tx ring.
206  */
207 int
208 dpaa2_buf_seed_txb(device_t dev, struct dpaa2_buf *buf)
209 {
210 	struct dpaa2_buf *sgt = buf->sgt;
211 	bool map_created = false;
212 	int error;
213 
214 	DPAA2_BUF_ASSERT_TXPREP(buf);
215 
216 	if (buf->dmap == NULL) {
217 		error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
218 		if (error != 0) {
219 			device_printf(dev, "%s: bus_dmamap_create() failed: "
220 			    "error=%d\n", __func__, error);
221 			goto fail_map_create;
222 		}
223 		map_created = true;
224 	}
225 
226 	if (sgt->vaddr == NULL) {
227 		error = bus_dmamem_alloc(sgt->dmat, (void **)&sgt->vaddr,
228 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &sgt->dmap);
229 		if (error != 0) {
230 			device_printf(dev, "%s: bus_dmamem_alloc() failed: "
231 			    "error=%d\n", __func__, error);
232 			goto fail_mem_alloc;
233 		}
234 	}
235 
236 	DPAA2_BUF_ASSERT_TXREADY(buf);
237 
238 	return (0);
239 
240 fail_mem_alloc:
241 	if (map_created) {
242 		(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
243 	}
244 fail_map_create:
245 	return (error);
246 }
247