xref: /freebsd/sys/dev/dpaa2/dpaa2_channel.c (revision edf8578117e8844e02c0121147f45e4609b30680)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2023 Dmitry Salychev
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error).
30  *
31  * NOTE: Several WQs are organized into a single channel.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/rman.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/taskqueue.h>
47 #include <sys/sysctl.h>
48 #include <sys/buf_ring.h>
49 #include <sys/smp.h>
50 #include <sys/proc.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/atomic.h>
55 #include <machine/vmparam.h>
56 
57 #include <net/ethernet.h>
58 #include <net/bpf.h>
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_types.h>
63 #include <net/if_var.h>
64 
65 #include "dpaa2_types.h"
66 #include "dpaa2_channel.h"
67 #include "dpaa2_ni.h"
68 #include "dpaa2_mc.h"
69 #include "dpaa2_mc_if.h"
70 #include "dpaa2_mcp.h"
71 #include "dpaa2_io.h"
72 #include "dpaa2_con.h"
73 #include "dpaa2_buf.h"
74 #include "dpaa2_swp.h"
75 #include "dpaa2_swp_if.h"
76 #include "dpaa2_bp.h"
77 #include "dpaa2_cmd_if.h"
78 
79 MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel");
80 
81 #define RX_SEG_N		 (1u)
82 #define RX_SEG_SZ		 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
83 #define RX_SEG_MAXSZ	 	 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
84 CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0);
85 CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0);
86 
87 #define TX_SEG_N		 (16u) /* XXX-DSL: does DPAA2 limit exist? */
88 #define TX_SEG_SZ		 (PAGE_SIZE)
89 #define TX_SEG_MAXSZ	 	 (TX_SEG_N * TX_SEG_SZ)
90 CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0);
91 CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0);
92 
93 #define SGT_SEG_N		 (1u)
94 #define SGT_SEG_SZ		 (PAGE_SIZE)
95 #define SGT_SEG_MAXSZ	 	 (PAGE_SIZE)
96 CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0);
97 CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0);
98 
99 static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t);
100 static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t,
101     int, bus_size_t);
102 static void dpaa2_chan_bp_task(void *, int);
103 
104 /**
105  * @brief Сonfigures QBMan channel and registers data availability notifications.
106  */
107 int
108 dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
109     struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn)
110 {
111 	device_t pdev = device_get_parent(dev);
112 	device_t child = dev;
113 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
114 	struct dpaa2_io_softc *iosc = device_get_softc(iodev);
115 	struct dpaa2_con_softc *consc = device_get_softc(condev);
116 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
117 	struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev);
118 	struct dpaa2_devinfo *coninfo = device_get_ivars(condev);
119 	struct dpaa2_con_notif_cfg notif_cfg;
120 	struct dpaa2_io_notif_ctx *ctx;
121 	struct dpaa2_channel *ch = NULL;
122 	struct dpaa2_cmd cmd;
123 	uint16_t rctk, contk;
124 	int error;
125 
126 	DPAA2_CMD_INIT(&cmd);
127 
128 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk);
129 	if (error) {
130 		device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n",
131 		    __func__, rcinfo->id, error);
132 		goto fail_rc_open;
133 	}
134 	error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk);
135 	if (error) {
136 		device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n",
137 		    __func__, coninfo->id, error);
138 		goto fail_con_open;
139 	}
140 
141 	error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
142 	if (error) {
143 		device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, "
144 		    "chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id);
145 		goto fail_con_enable;
146 	}
147 
148 	ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
149 	if (ch == NULL) {
150 		device_printf(dev, "%s: malloc() failed\n", __func__);
151 		error = ENOMEM;
152 		goto fail_malloc;
153 	}
154 
155 	ch->ni_dev = dev;
156 	ch->io_dev = iodev;
157 	ch->con_dev = condev;
158 	ch->id = consc->attr.chan_id;
159 	ch->flowid = flowid;
160 	ch->tx_frames = 0; /* for debug purposes */
161 	ch->tx_dropped = 0; /* for debug purposes */
162 	ch->store_sz = 0;
163 	ch->store_idx = 0;
164 	ch->recycled_n = 0;
165 	ch->rxq_n = 0;
166 
167 	NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch);
168 	NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch);
169 
170 	ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK,
171 	    taskqueue_thread_enqueue, &ch->cleanup_tq);
172 	taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET,
173 	    &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id);
174 
175 	error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align);
176 	if (error != 0) {
177 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
178 		goto fail_dma_setup;
179 	}
180 
181 	mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF);
182 
183 	ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
184 	    &ch->xmit_mtx);
185 	if (ch->xmit_br == NULL) {
186 		device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__);
187 		error = ENOMEM;
188 		goto fail_buf_ring;
189 	}
190 
191 	DPAA2_BUF_INIT(&ch->store);
192 
193 	/* Register the new notification context */
194 	ctx = &ch->ctx;
195 	ctx->qman_ctx = (uint64_t)ctx;
196 	ctx->cdan_en = true;
197 	ctx->fq_chan_id = ch->id;
198 	ctx->io_dev = ch->io_dev;
199 	ctx->channel = ch;
200 	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx);
201 	if (error) {
202 		device_printf(dev, "%s: failed to register CDAN context\n",
203 		    __func__);
204 		goto fail_dpcon_notif;
205 	}
206 
207 	/* Register DPCON notification within Management Complex */
208 	notif_cfg.dpio_id = ioinfo->id;
209 	notif_cfg.prior = 0;
210 	notif_cfg.qman_ctx = ctx->qman_ctx;
211 	error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
212 	if (error) {
213 		device_printf(dev, "%s: failed to register DPCON "
214 		    "notifications: dpcon_id=%d, chan_id=%d\n", __func__,
215 		    coninfo->id, consc->attr.chan_id);
216 		goto fail_dpcon_notif;
217 	}
218 
219 	/* Allocate initial # of Rx buffers and a channel storage */
220 	error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT,
221 	    DPAA2_RX_BUF_SIZE, NULL);
222 	if (error) {
223 		device_printf(dev, "%s: failed to seed buffer pool\n",
224 		    __func__);
225 		goto fail_dpcon_notif;
226 	}
227 	error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE,
228 	    BUS_DMA_NOWAIT, sc->buf_align);
229 	if (error != 0) {
230 		device_printf(dev, "%s: failed to allocate channel storage\n",
231 		    __func__);
232 		goto fail_dpcon_notif;
233 	} else {
234 		ch->store_sz = DPAA2_ETH_STORE_FRAMES;
235 	}
236 
237 	/* Prepare queues for the channel */
238 	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF);
239 	if (error) {
240 		device_printf(dev, "%s: failed to prepare TxConf queue: "
241 		    "error=%d\n", __func__, error);
242 		goto fail_fq_setup;
243 	}
244 	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX);
245 	if (error) {
246 		device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n",
247 		    __func__, error);
248 		goto fail_fq_setup;
249 	}
250 
251 	if (bootverbose) {
252 		device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, "
253 		    "priorities=%d\n", ioinfo->id, coninfo->id, ch->id,
254 		    consc->attr.prior_num);
255 	}
256 
257 	*channel = ch;
258 
259 	(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
260 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
261 
262 	return (0);
263 
264 fail_fq_setup:
265 	if (ch->store.vaddr != NULL) {
266 		bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap);
267 	}
268 	if (ch->store.dmat != NULL) {
269 		bus_dma_tag_destroy(ch->store.dmat);
270 	}
271 	ch->store.dmat = NULL;
272 	ch->store.vaddr = NULL;
273 	ch->store.paddr = 0;
274 	ch->store.nseg = 0;
275 fail_dpcon_notif:
276 	buf_ring_free(ch->xmit_br, M_DEVBUF);
277 fail_buf_ring:
278 	mtx_destroy(&ch->xmit_mtx);
279 fail_dma_setup:
280 	/* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */
281 	/* 	taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
282 	/* } */
283 	/* taskqueue_free(ch->cleanup_tq); */
284 fail_malloc:
285 	(void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
286 fail_con_enable:
287 	(void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
288 fail_con_open:
289 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
290 fail_rc_open:
291 	return (error);
292 }
293 
294 /**
295  * @brief Performs an initial configuration of the frame queue.
296  */
297 int
298 dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch,
299     enum dpaa2_ni_queue_type queue_type)
300 {
301 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
302 	struct dpaa2_ni_fq *fq;
303 
304 	switch (queue_type) {
305 	case DPAA2_NI_QUEUE_TX_CONF:
306 		/* One queue per channel */
307 		fq = &ch->txc_queue;
308 		fq->chan = ch;
309 		fq->flowid = ch->flowid;
310 		fq->tc = 0; /* ignored */
311 		fq->type = queue_type;
312 		break;
313 	case DPAA2_NI_QUEUE_RX:
314 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS,
315 		    ("too many Rx traffic classes: rx_tcs=%d\n",
316 		    sc->attr.num.rx_tcs));
317 
318 		/* One queue per Rx traffic class within a channel */
319 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
320 			fq = &ch->rx_queues[i];
321 			fq->chan = ch;
322 			fq->flowid = ch->flowid;
323 			fq->tc = (uint8_t) i;
324 			fq->type = queue_type;
325 
326 			ch->rxq_n++;
327 		}
328 		break;
329 	case DPAA2_NI_QUEUE_RX_ERR:
330 		/* One queue per network interface */
331 		fq = &sc->rxe_queue;
332 		fq->chan = ch;
333 		fq->flowid = 0; /* ignored */
334 		fq->tc = 0; /* ignored */
335 		fq->type = queue_type;
336 		break;
337 	default:
338 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
339 		    __func__, queue_type);
340 		return (EINVAL);
341 	}
342 
343 	return (0);
344 }
345 
346 /**
347  * @brief Obtain the next dequeue response from the channel storage.
348  */
349 int
350 dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq)
351 {
352 	struct dpaa2_buf *buf = &ch->store;
353 	struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr;
354 	struct dpaa2_dq *msg = &msgs[ch->store_idx];
355 	int rc = EINPROGRESS;
356 
357 	ch->store_idx++;
358 
359 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
360 		rc = EALREADY; /* VDQ command is expired */
361 		ch->store_idx = 0;
362 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) {
363 			msg = NULL; /* Null response, FD is invalid */
364 		}
365 	}
366 	if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) {
367 		rc = ENOENT; /* FQ is empty */
368 		ch->store_idx = 0;
369 	}
370 
371 	if (dq != NULL) {
372 		*dq = msg;
373 	}
374 
375 	return (rc);
376 }
377 
378 static int
379 dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch,
380     bus_size_t alignment)
381 {
382 	int error;
383 
384 	mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF);
385 
386 	error = bus_dma_tag_create(
387 	    bus_get_dma_tag(dev),	/* parent */
388 	    alignment, 0,		/* alignment, boundary */
389 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
390 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
391 	    NULL, NULL,			/* filter, filterarg */
392 	    RX_SEG_MAXSZ,		/* maxsize */
393 	    RX_SEG_N,			/* nsegments */
394 	    RX_SEG_SZ,			/* maxsegsize */
395 	    0,				/* flags */
396 	    NULL,			/* lockfunc */
397 	    NULL,			/* lockarg */
398 	    &ch->rx_dmat);
399 	if (error) {
400 		device_printf(dev, "%s: failed to create rx_dmat\n", __func__);
401 		goto fail_rx_tag;
402 	}
403 
404 	error = bus_dma_tag_create(
405 	    bus_get_dma_tag(dev),	/* parent */
406 	    alignment, 0,		/* alignment, boundary */
407 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
408 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
409 	    NULL, NULL,			/* filter, filterarg */
410 	    TX_SEG_MAXSZ,		/* maxsize */
411 	    TX_SEG_N,			/* nsegments */
412 	    TX_SEG_SZ,			/* maxsegsize */
413 	    0,				/* flags */
414 	    NULL,			/* lockfunc */
415 	    NULL,			/* lockarg */
416 	    &ch->tx_dmat);
417 	if (error) {
418 		device_printf(dev, "%s: failed to create tx_dmat\n", __func__);
419 		goto fail_tx_tag;
420 	}
421 
422 	error = bus_dma_tag_create(
423 	    bus_get_dma_tag(dev),	/* parent */
424 	    alignment, 0,		/* alignment, boundary */
425 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
426 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
427 	    NULL, NULL,			/* filter, filterarg */
428 	    SGT_SEG_MAXSZ,		/* maxsize */
429 	    SGT_SEG_N,			/* nsegments */
430 	    SGT_SEG_SZ,			/* maxsegsize */
431 	    0,				/* flags */
432 	    NULL,			/* lockfunc */
433 	    NULL,			/* lockarg */
434 	    &ch->sgt_dmat);
435 	if (error) {
436 		device_printf(dev, "%s: failed to create sgt_dmat\n", __func__);
437 		goto fail_sgt_tag;
438 	}
439 
440 	return (0);
441 
442 fail_sgt_tag:
443 	bus_dma_tag_destroy(ch->tx_dmat);
444 fail_tx_tag:
445 	bus_dma_tag_destroy(ch->rx_dmat);
446 fail_rx_tag:
447 	mtx_destroy(&ch->dma_mtx);
448 	ch->rx_dmat = NULL;
449 	ch->tx_dmat = NULL;
450 	ch->sgt_dmat = NULL;
451 
452 	return (error);
453 }
454 
455 /**
456  * @brief Allocate a DMA-mapped storage to keep responses from VDQ command.
457  */
458 static int
459 dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size,
460     int mapflags, bus_size_t alignment)
461 {
462 	struct dpaa2_buf *buf = &ch->store;
463 	uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
464 	int error;
465 
466 	error = bus_dma_tag_create(
467 	    bus_get_dma_tag(dev),	/* parent */
468 	    alignment, 0,		/* alignment, boundary */
469 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
470 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
471 	    NULL, NULL,			/* filter, filterarg */
472 	    maxsize,			/* maxsize */
473 	    1,				/* nsegments */
474 	    maxsize,			/* maxsegsize */
475 	    BUS_DMA_ALLOCNOW,		/* flags */
476 	    NULL,			/* lockfunc */
477 	    NULL,			/* lockarg */
478 	    &buf->dmat);
479 	if (error != 0) {
480 		device_printf(dev, "%s: failed to create DMA tag\n", __func__);
481 		goto fail_tag;
482 	}
483 
484 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
485 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
486 	if (error != 0) {
487 		device_printf(dev, "%s: failed to allocate storage memory\n",
488 		    __func__);
489 		goto fail_map_create;
490 	}
491 
492 	buf->paddr = 0;
493 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, size,
494 	    dpaa2_dmamap_oneseg_cb, &buf->paddr, mapflags);
495 	if (error != 0) {
496 		device_printf(dev, "%s: failed to map storage memory\n",
497 		    __func__);
498 		goto fail_map_load;
499 	}
500 
501 	bus_dmamap_sync(buf->dmat, buf->dmap,
502 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
503 
504 	buf->nseg = 1;
505 
506 	return (0);
507 
508 fail_map_load:
509 	bus_dmamem_free(buf->dmat, buf->vaddr, buf->dmap);
510 fail_map_create:
511 	bus_dma_tag_destroy(buf->dmat);
512 fail_tag:
513 	buf->dmat = NULL;
514 	buf->vaddr = NULL;
515 	buf->paddr = 0;
516 	buf->nseg = 0;
517 
518 	return (error);
519 }
520 
521 /**
522  * @brief Release new buffers to the buffer pool if necessary.
523  */
524 static void
525 dpaa2_chan_bp_task(void *arg, int count)
526 {
527 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
528 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
529 	struct dpaa2_bp_softc *bpsc;
530 	struct dpaa2_bp_conf bpconf;
531 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
532 	device_t bpdev;
533 	int error;
534 
535 	/* There's only one buffer pool for now */
536 	bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
537 	bpsc = device_get_softc(bpdev);
538 
539 	/* Get state of the buffer pool */
540 	error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf);
541 	if (error) {
542 		device_printf(sc->dev, "%s: DPAA2_SWP_QUERY_BP() failed: "
543 		    "error=%d\n", __func__, error);
544 		return;
545 	}
546 
547 	/* Double allocated Rx buffers if amount of free buffers is < 25% */
548 	if (bpconf.free_bufn < (buf_num >> 2)) {
549 		mtx_assert(&ch->dma_mtx, MA_NOTOWNED);
550 		mtx_lock(&ch->dma_mtx);
551 		(void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num,
552 		    DPAA2_RX_BUF_SIZE, &ch->dma_mtx);
553 		mtx_unlock(&ch->dma_mtx);
554 
555 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bpconf.free_bufn);
556 	}
557 }
558