xref: /freebsd/sys/dev/dpaa/bman.c (revision b2e4da0b53ad082768b8f6f83766e030fd00d02a)
1 /*
2  * Copyright (c) 2026 Justin Hibbits
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  */
6 
7 #include <sys/param.h>
8 #include <sys/systm.h>
9 #include <sys/kernel.h>
10 #include <sys/bus.h>
11 #include <sys/lock.h>
12 #include <sys/malloc.h>
13 #include <sys/module.h>
14 #include <sys/mutex.h>
15 #include <sys/proc.h>
16 #include <sys/pcpu.h>
17 #include <sys/rman.h>
18 #include <sys/sched.h>
19 
20 #include <machine/bus.h>
21 #include <machine/tlb.h>
22 
23 #include "bman.h"
24 #include "dpaa_common.h"
25 #include "bman_var.h"
26 
27 #define	BMAN_POOL_SWDET(n)	(0x000 + 4 * (n))
28 #define	BMAN_POOL_HWDET(n)	(0x100 + 4 * (n))
29 #define	BMAN_POOL_SWDXT(n)	(0x200 + 4 * (n))
30 #define	BMAN_POOL_HWDXT(n)	(0x300 + 4 * (n))
31 #define	FBPR_FP_LWIT	0x804
32 #define	BMAN_IP_REV_1	0x0bf8
33 #define	  IP_MAJ_S	  8
34 #define	  IP_MAJ_M	  0x0000ff00
35 #define	  IP_MIN_M	  0x000000ff
36 #define	BMAN_IP_REV_2	0x0bfc
37 #define	BMAN_FBPR_BARE	0x0c00
38 #define	BMAN_FBPR_BAR	0x0c04
39 #define	BMAN_FBPR_AR	0x0c10
40 #define	BMAN_LIODNR	0x0d08
41 
42 #define	BMAN_POOL_CONTENT(n)	(0x0600 + 4 * (n))
43 #define	BMAN_ECSR	0x0a00
44 #define	BMAN_ECIR	0x0a04
45 #define	  ECIR_PORTAL(r)  (((r) >> 24) & 0x0f)
46 #define	  ECIR_VERB(r)	  (((r) >> 16) & 0x07)
47 #define	  ECIR_R	  0x00080000
48 #define	  ECIR_POOL(r)	  ((r) & 0x3f)
49 #define	BMAN_CECR	0x0a34	/* Corruption Error Capture Register */
50 #define	BMAN_CEAR	0x0a38	/* Corruption Error Address Register */
51 #define	BMAN_AECR	0x0a34	/* Acces Error Capture Register */
52 #define	BMAN_AEAR	0x0a38	/* Acces Error Address Register */
53 #define	BMAN_ERR_ISR	0x0e00
54 #define	BMAN_ERR_IER	0x0e04
55 #define	BMAN_ERR_ISDR	0x0e08
56 #define	  ERR_EMAI	  0x00000040
57 #define	  ERR_EMCI	  0x00000020
58 #define	  ERR_IVCI	  0x00000010
59 #define	  ERR_FLWI	  0x00000008
60 #define	  ERR_MBEI	  0x00000004
61 #define	  ERR_SBEI	  0x00000002
62 #define	  ERR_BSCN	  0x00000001
63 
64 static MALLOC_DEFINE(M_BMAN, "bman", "DPAA Buffer Manager structures");
65 
66 static struct bman_softc *bman_sc;
67 
68 static void
69 bman_isr(void *arg)
70 {
71 	struct bman_softc *sc = arg;
72 	uint32_t ier, isr, isr_bit;
73 	uint32_t reg;
74 
75 	ier = bus_read_4(sc->sc_rres, BMAN_ERR_IER);
76 	isr = bus_read_4(sc->sc_rres, BMAN_ERR_ISR);
77 
78 	isr_bit = (isr & ier);
79 	if (isr_bit == 0)
80 		goto end;
81 
82 	if (isr_bit & ERR_EMAI) {
83 		device_printf(sc->sc_dev, "External memory access error\n");
84 		reg = bus_read_4(sc->sc_rres, BMAN_AECR);
85 		if (reg <= 63)
86 			device_printf(sc->sc_dev, "  pool %d\n", reg);
87 		else
88 			device_printf(sc->sc_dev, "  FBPR free list\n");
89 		reg = bus_read_4(sc->sc_rres, BMAN_AEAR);
90 		device_printf(sc->sc_dev, "  offset: %#x\n", reg);
91 	}
92 
93 	if (isr_bit & ERR_EMCI) {
94 		device_printf(sc->sc_dev, "External memory corruption error\n");
95 		reg = bus_read_4(sc->sc_rres, BMAN_CECR);
96 		if (reg <= 63)
97 			device_printf(sc->sc_dev, "  pool %d\n", reg);
98 		else
99 			device_printf(sc->sc_dev, "  FBPR free list\n");
100 		reg = bus_read_4(sc->sc_rres, BMAN_CEAR);
101 		device_printf(sc->sc_dev, "  offset: %#x\n", reg);
102 	}
103 	if (isr_bit & ERR_IVCI) {
104 		reg = bus_read_4(sc->sc_rres, BMAN_ECIR);
105 		device_printf(sc->sc_dev, "Invalid verb command\n");
106 		device_printf(sc->sc_dev, "Portal: %d, ring: %s\n",
107 		    ECIR_POOL(reg), (reg & ECIR_R) ? "RCR" : "Command");
108 		device_printf(sc->sc_dev, "verb: 0x%02x, pool: %d\n",
109 		    ECIR_VERB(reg), ECIR_POOL(reg));
110 	}
111 	if (isr_bit & (ERR_MBEI | ERR_SBEI)) {
112 		if (isr_bit & ERR_MBEI)
113 			device_printf(sc->sc_dev, "Multi-bit ECC error\n");
114 		if (isr_bit & ERR_MBEI)
115 			device_printf(sc->sc_dev, "Single-bit ECC error\n");
116 		/* TODO: Add more error details for ECC errors. */
117 	}
118 
119 end:
120 	bus_write_4(sc->sc_rres, BMAN_ERR_ISR, isr);
121 }
122 
123 static void
124 bman_get_version(struct bman_softc *sc)
125 {
126 	uint32_t reg = bus_read_4(sc->sc_rres, BMAN_IP_REV_1);
127 
128 	sc->sc_major = (reg & IP_MAJ_M) >> IP_MAJ_S;
129 	sc->sc_minor = (reg & IP_MIN_M);
130 }
131 
132 static int
133 bman_set_memory(struct bman_softc *sc, vm_paddr_t pa, vm_size_t size)
134 {
135 	vm_paddr_t bar_pa;
136 	if ((pa & (size - 1)) != 0 || (size & (size - 1)) != 0) {
137 		device_printf(sc->sc_dev,
138 		    "invalid memory configuration: pa: %#jx, size: %#jx\n",
139 		    (uintmax_t)pa, (uintmax_t)size);
140 		return (ENXIO);
141 	}
142 	bar_pa = bus_read_4(sc->sc_rres, BMAN_FBPR_BARE);
143 	bar_pa <<= 32;
144 	bar_pa |= bus_read_4(sc->sc_rres, BMAN_FBPR_BAR);
145 	if (bar_pa != 0 && bar_pa != pa) {
146 		device_printf(sc->sc_dev,
147 		    "attempted to reinitialize BMan with different BAR\n");
148 		return (ENOMEM);
149 	} else if (bar_pa == pa)
150 		return (0);
151 
152 	bus_write_4(sc->sc_rres, BMAN_FBPR_BARE, pa >> 32);
153 	bus_write_4(sc->sc_rres, BMAN_FBPR_BAR, pa & 0xffffffff);
154 	bus_write_4(sc->sc_rres, BMAN_FBPR_AR, ilog2(size) - 1);
155 
156 	return (0);
157 }
158 
159 int
160 bman_attach(device_t dev)
161 {
162 	struct bman_softc *sc;
163 	vm_paddr_t bp_pa;
164 	size_t bp_size;
165 	int bp_count;
166 
167 	sc = device_get_softc(dev);
168 	sc->sc_dev = dev;
169 	bman_sc = sc;
170 
171 	/* Allocate resources */
172 	sc->sc_rrid = 0;
173 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
174 	    sc->sc_rrid, RF_ACTIVE);
175 	if (sc->sc_rres == NULL)
176 		return (ENXIO);
177 
178 	sc->sc_irid = 0;
179 	sc->sc_ires = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ,
180 	    &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE);
181 	if (sc->sc_ires == NULL)
182 		goto err;
183 
184 	bman_get_version(sc);
185 	if (sc->sc_major == 2 && sc->sc_minor == 0)
186 		bp_count = BMAN_MAX_POOLS_1023;
187 	else
188 		bp_count = BMAN_MAX_POOLS;
189 
190 	/* TODO: LIODN */
191 	bus_write_4(sc->sc_rres, BMAN_LIODNR, 0);
192 
193 	sc->sc_vmem = vmem_create("BMan Pools", 0, bp_count, 1, 0, M_WAITOK);
194 
195 	/* Pool is reserved memory, so no need to track it ourselves. */
196 	dpaa_map_private_memory(dev, 0, "fsl,bman-fbpr", &bp_pa, &bp_size);
197 	bman_set_memory(sc, bp_pa, bp_size);
198 
199 	/* Warn if FBPR drops below 5% total. */
200 	bus_write_4(sc->sc_rres, FBPR_FP_LWIT, (bp_size / 8) / 20);
201 
202 	/* Clear interrupt status, and enable all interrupts. */
203 	bus_write_4(sc->sc_rres, BMAN_ERR_ISR, 0xffffffff);
204 	bus_write_4(sc->sc_rres, BMAN_ERR_IER, 0xffffffff);
205 	bus_write_4(sc->sc_rres, BMAN_ERR_ISDR, 0);
206 
207 	/* Enable the IRQ line now. */
208 	if (bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_NET, NULL, bman_isr,
209 	    sc, &sc->sc_icookie) != 0)
210 		goto err;
211 
212 	return (0);
213 
214 err:
215 	bman_detach(dev);
216 	return (ENXIO);
217 }
218 
219 int
220 bman_detach(device_t dev)
221 {
222 	struct bman_softc *sc;
223 
224 	sc = device_get_softc(dev);
225 
226 	if (sc->sc_vmem != NULL)
227 		vmem_destroy(sc->sc_vmem);
228 	if (sc->sc_icookie != NULL)
229 		bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie);
230 	if (sc->sc_ires != NULL)
231 		bus_release_resource(dev, SYS_RES_IRQ,
232 		    sc->sc_irid, sc->sc_ires);
233 
234 	if (sc->sc_rres != NULL)
235 		bus_release_resource(dev, SYS_RES_MEMORY,
236 		    sc->sc_rrid, sc->sc_rres);
237 
238 	return (0);
239 }
240 
241 int
242 bman_suspend(device_t dev)
243 {
244 
245 	return (0);
246 }
247 
248 int
249 bman_resume(device_t dev)
250 {
251 
252 	return (0);
253 }
254 
255 int
256 bman_shutdown(device_t dev)
257 {
258 
259 	return (0);
260 }
261 
262 /*
263  * BMAN API
264  */
265 
266 struct bman_pool *
267 bman_new_pool(void)
268 {
269 	struct bman_softc *sc;
270 	vmem_addr_t bpid;
271 	struct bman_pool *pool;
272 
273 	sc = bman_sc;
274 	pool = NULL;
275 
276 	if (vmem_alloc(sc->sc_vmem, 1, M_FIRSTFIT | M_NOWAIT, &bpid) != 0)
277 		return (NULL);
278 
279 	pool = malloc(sizeof(*pool), M_BMAN, M_WAITOK | M_ZERO);
280 
281 	pool->bpid = bpid;
282 
283 	return (pool);
284 }
285 
286 struct bman_pool *
287 bman_pool_create(uint8_t *bpid, uint16_t buffer_size, uint16_t max_buffers,
288     uint32_t dep_sw_entry, uint32_t dep_sw_exit,
289     uint32_t dep_hw_entry, uint32_t dep_hw_exit,
290     bm_depletion_handler dep_cb, void *arg)
291 {
292 	struct bman_softc *sc;
293 	struct bman_pool *bp;
294 
295 	sc = bman_sc;
296 	bp = bman_new_pool();
297 	if (bpid != NULL)
298 		*bpid = bp->bpid;
299 
300 	if (dep_cb) {
301 		bp->dep_cb = dep_cb;
302 		bus_write_4(sc->sc_rres, BMAN_POOL_SWDET(bp->bpid),
303 		    dep_sw_entry);
304 		bus_write_4(sc->sc_rres, BMAN_POOL_SWDXT(bp->bpid),
305 		    dep_sw_exit);
306 		bus_write_4(sc->sc_rres, BMAN_POOL_HWDET(bp->bpid),
307 		    dep_hw_entry);
308 		bus_write_4(sc->sc_rres, BMAN_POOL_HWDXT(bp->bpid),
309 		    dep_hw_exit);
310 		bp->arg = arg;
311 		bman_portal_enable_scn(DPCPU_GET(bman_affine_portal), bp);
312 	}
313 
314 	return (bp);
315 }
316 
317 int
318 bman_pool_destroy(struct bman_pool *pool)
319 {
320 	/* Need to error, or print a warning, if the pool isn't empty */
321 	if (bman_count(pool) != 0)
322 		return (EBUSY);
323 	vmem_free(bman_sc->sc_vmem, pool->bpid, 1);
324 	free(pool, M_BMAN);
325 
326 	return (0);
327 }
328 
329 int
330 bman_put_buffers(struct bman_pool *pool, struct bman_buffer *buffers, int count)
331 {
332 	struct bman_portal_softc *portal;
333 	int error;
334 
335 	critical_enter();
336 
337 	portal = DPCPU_GET(bman_affine_portal);
338 	if (portal == NULL) {
339 		critical_exit();
340 		return (EIO);
341 	}
342 
343 	while (count > 0) {
344 		int c = min(count, 8);
345 		error = bman_release(pool, buffers, c);
346 		buffers += c;
347 		count -= c;
348 	}
349 
350 	critical_exit();
351 
352 	return (error);
353 }
354 
355 uint32_t
356 bman_get_bpid(struct bman_pool *pool)
357 {
358 	return (pool->bpid);
359 }
360 
361 uint32_t
362 bman_count(struct bman_pool *pool)
363 {
364 
365 	return (bus_read_4(bman_sc->sc_rres, BMAN_POOL_CONTENT(pool->bpid)));
366 }
367 
368